aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS8
-rw-r--r--docs/about/deprecated.rst17
-rw-r--r--docs/about/removed-features.rst7
-rw-r--r--docs/conf.py18
-rw-r--r--docs/devel/codebase.rst6
-rw-r--r--docs/devel/index-build.rst1
-rw-r--r--docs/devel/qapi-domain.rst670
-rw-r--r--docs/glossary.rst10
-rw-r--r--docs/interop/qemu-qmp-ref.rst1
-rw-r--r--docs/sphinx-static/theme_overrides.css98
-rw-r--r--docs/sphinx/compat.py230
-rw-r--r--docs/sphinx/qapi_domain.py931
-rw-r--r--docs/sphinx/qapidoc.py913
-rw-r--r--docs/sphinx/qapidoc_legacy.py440
-rw-r--r--docs/system/ppc/amigang.rst17
-rw-r--r--docs/system/ppc/embedded.rst1
-rw-r--r--docs/system/ppc/powernv.rst7
-rw-r--r--hw/intc/pnv_xive.c10
-rw-r--r--hw/intc/pnv_xive2.c166
-rw-r--r--hw/intc/spapr_xive.c8
-rw-r--r--hw/intc/trace-events6
-rw-r--r--hw/intc/xive.c205
-rw-r--r--hw/intc/xive2.c690
-rw-r--r--hw/pci-host/pnv_phb4_pec.c55
-rw-r--r--hw/ppc/Kconfig9
-rw-r--r--hw/ppc/amigaone.c284
-rw-r--r--hw/ppc/meson.build3
-rw-r--r--hw/ppc/pnv.c150
-rw-r--r--hw/ppc/pnv_bmc.c28
-rw-r--r--hw/ppc/pnv_homer.c230
-rw-r--r--hw/ppc/pnv_lpc.c89
-rw-r--r--hw/ppc/pnv_occ.c672
-rw-r--r--hw/ppc/ppc.c11
-rw-r--r--hw/ppc/ppc405.h186
-rw-r--r--hw/ppc/ppc405_boards.c520
-rw-r--r--hw/ppc/ppc405_uc.c1216
-rw-r--r--hw/ppc/sam460ex.c2
-rw-r--r--hw/ppc/spapr.c80
-rw-r--r--hw/ppc/spapr_caps.c44
-rw-r--r--hw/ppc/spapr_cpu_core.c2
-rw-r--r--hw/ppc/spapr_hcall.c29
-rw-r--r--hw/ppc/spapr_nested.c119
-rw-r--r--hw/ppc/virtex_ml507.c2
-rw-r--r--hw/s390x/s390-virtio-ccw.c1
-rw-r--r--hw/ssi/pnv_spi.c366
-rw-r--r--hw/vfio/common.c9
-rw-r--r--hw/vfio/igd.c529
-rw-r--r--hw/vfio/iommufd.c1
-rw-r--r--hw/vfio/meson.build27
-rw-r--r--hw/vfio/migration-multifd.c15
-rw-r--r--hw/vfio/migration.c1
-rw-r--r--hw/vfio/pci-quirks.c53
-rw-r--r--hw/vfio/pci.c35
-rw-r--r--hw/vfio/pci.h11
-rw-r--r--hw/vfio/spapr.c4
-rw-r--r--include/exec/ram_addr.h3
-rw-r--r--include/hw/pci-host/pnv_phb4.h5
-rw-r--r--include/hw/ppc/pnv.h6
-rw-r--r--include/hw/ppc/pnv_homer.h12
-rw-r--r--include/hw/ppc/pnv_occ.h9
-rw-r--r--include/hw/ppc/pnv_pnor.h6
-rw-r--r--include/hw/ppc/pnv_xscom.h4
-rw-r--r--include/hw/ppc/spapr.h7
-rw-r--r--include/hw/ppc/spapr_nested.h67
-rw-r--r--include/hw/ppc/xive.h41
-rw-r--r--include/hw/ppc/xive2.h24
-rw-r--r--include/hw/ppc/xive2_regs.h17
-rw-r--r--include/hw/ppc/xive_regs.h25
-rw-r--r--include/hw/ssi/pnv_spi.h7
-rw-r--r--include/system/hostmem.h3
-rw-r--r--pc-bios/README17
-rw-r--r--pc-bios/meson.build1
-rw-r--r--pc-bios/pnv-pnor.binbin0 -> 139264 bytes
-rw-r--r--pc-bios/skiboot.lidbin2527328 -> 2592960 bytes
-rw-r--r--pc-bios/slof.binbin995000 -> 996184 bytes
-rw-r--r--qapi/qapi-schema.json2
m---------roms/skiboot0
-rw-r--r--scripts/qapi/main.py24
-rw-r--r--scripts/qapi/parser.py123
-rw-r--r--scripts/qapi/source.py4
-rw-r--r--target/ppc/cpu.c45
-rw-r--r--target/ppc/cpu.h14
-rw-r--r--target/ppc/cpu_init.c48
-rw-r--r--target/ppc/excp_helper.c842
-rw-r--r--target/ppc/helper.h4
-rw-r--r--target/ppc/internal.h8
-rw-r--r--target/ppc/kvm.c12
-rw-r--r--target/ppc/kvm_ppc.h12
-rw-r--r--target/ppc/machine.c3
-rw-r--r--target/ppc/meson.build1
-rw-r--r--target/ppc/misc_helper.c63
-rw-r--r--target/ppc/mmu-radix64.c14
-rw-r--r--target/ppc/spr_common.h4
-rw-r--r--target/ppc/tcg-excp_helper.c851
-rw-r--r--target/ppc/translate.c28
-rw-r--r--tests/functional/meson.build1
-rwxr-xr-xtests/functional/test_ppc_405.py37
-rw-r--r--tests/qapi-schema/doc-good.out10
-rwxr-xr-xtests/qapi-schema/test-qapi.py2
-rw-r--r--tests/qtest/m48t59-test.c5
-rw-r--r--tests/qtest/meson.build4
-rw-r--r--tests/qtest/pnv-spi-seeprom-test.c2
-rw-r--r--tests/qtest/pnv-xive2-common.h1
-rw-r--r--tests/qtest/pnv-xive2-flush-sync.c6
-rw-r--r--tests/qtest/pnv-xive2-nvpg_bar.c152
-rw-r--r--tests/qtest/pnv-xive2-test.c249
-rw-r--r--trace/control-target.c2
-rw-r--r--trace/meson.build4
108 files changed, 7494 insertions, 4580 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e34de42..31b395f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1409,12 +1409,6 @@ F: hw/openrisc/openrisc_sim.c
PowerPC Machines
----------------
-405 (ref405ep)
-L: qemu-ppc@nongnu.org
-S: Orphan
-F: hw/ppc/ppc405*
-F: tests/functional/test_ppc_405.py
-
Bamboo
L: qemu-ppc@nongnu.org
S: Orphan
@@ -1545,6 +1539,7 @@ F: include/hw/ppc/pnv*
F: include/hw/pci-host/pnv*
F: include/hw/ssi/pnv_spi*
F: pc-bios/skiboot.lid
+F: pc-bios/pnv-pnor.bin
F: tests/qtest/pnv*
F: tests/functional/test_ppc64_powernv.py
@@ -4327,6 +4322,7 @@ S: Orphan
F: po/*.po
Sphinx documentation configuration and build machinery
+M: John Snow <jsnow@redhat.com>
M: Peter Maydell <peter.maydell@linaro.org>
S: Maintained
F: docs/conf.py
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index 589951b..e2b4f07 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -266,6 +266,15 @@ in the QEMU object model anymore. ``Sun-UltraSparc-IIIi+`` and
but for consistency these will get removed in a future release, too.
Use ``Sun-UltraSparc-IIIi-plus`` and ``Sun-UltraSparc-IV-plus`` instead.
+PPC 405 CPUs (since 10.0)
+'''''''''''''''''''''''''
+
+The PPC 405 CPU has no known users and the ``ref405ep`` machine was
+removed in QEMU 10.0. Since the IBM POWER [8-11] processors uses an
+embedded 405 for power management (OCC) and other internal tasks, it
+is theoretically possible to use QEMU to model them. Let's keep the
+CPU implementation for a while before removing all support.
+
System emulator machines
------------------------
@@ -277,14 +286,6 @@ deprecated; use the new name ``dtb-randomness`` instead. The new name
better reflects the way this property affects all random data within
the device tree blob, not just the ``kaslr-seed`` node.
-PPC 405 ``ref405ep`` machine (since 9.1)
-''''''''''''''''''''''''''''''''''''''''
-
-The ``ref405ep`` machine and PPC 405 CPU have no known users, firmware
-images are not available, OpenWRT dropped support in 2019, U-Boot in
-2017, Linux also is dropping support in 2024. It is time to let go of
-this ancient hardware and focus on newer CPUs and platforms.
-
Big-Endian variants of MicroBlaze ``petalogix-ml605`` and ``xlnx-zynqmp-pmu`` machines (since 9.2)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst
index 156c0c2..2527a91 100644
--- a/docs/about/removed-features.rst
+++ b/docs/about/removed-features.rst
@@ -1064,6 +1064,13 @@ for all machine types using the PXA2xx and OMAP2 SoCs. We are also
dropping the ``cheetah`` OMAP1 board, because we don't have any
test images for it and don't know of anybody who does.
+ppc ``ref405ep`` machine (removed in 10.0)
+''''''''''''''''''''''''''''''''''''''''''
+
+This machine was removed because PPC 405 CPU have no known users,
+firmware images are not available, OpenWRT dropped support in 2019,
+U-Boot in 2017, and Linux in 2024.
+
linux-user mode CPUs
--------------------
diff --git a/docs/conf.py b/docs/conf.py
index 31bb9a3..a3f9fa6 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -60,7 +60,14 @@ needs_sphinx = '3.4.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['kerneldoc', 'qmp_lexer', 'hxtool', 'depfile', 'qapidoc']
+extensions = [
+ 'depfile',
+ 'hxtool',
+ 'kerneldoc',
+ 'qapi_domain',
+ 'qapidoc',
+ 'qmp_lexer',
+]
if sphinx.version_info[:3] > (4, 0, 0):
tags.add('sphinx4')
@@ -146,6 +153,15 @@ rst_epilog = ".. |CONFDIR| replace:: ``" + confdir + "``\n"
with open(os.path.join(qemu_docdir, 'defs.rst.inc')) as f:
rst_epilog += f.read()
+
+# Normally, the QAPI domain is picky about what field lists you use to
+# describe a QAPI entity. If you'd like to use arbitrary additional
+# fields in source documentation, add them here.
+qapi_allowed_fields = {
+ "see also",
+}
+
+
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
diff --git a/docs/devel/codebase.rst b/docs/devel/codebase.rst
index 4039875..1b09953 100644
--- a/docs/devel/codebase.rst
+++ b/docs/devel/codebase.rst
@@ -23,7 +23,7 @@ Some of the main QEMU subsystems are:
- `Devices<device-emulation>` & Board models
- `Documentation <documentation-root>`
- `GDB support<GDB usage>`
-- `Migration<migration>`
+- :ref:`Migration<migration>`
- `Monitor<QEMU monitor>`
- :ref:`QOM (QEMU Object Model)<qom>`
- `System mode<System emulation>`
@@ -112,7 +112,7 @@ yet, so sometimes the source code is all you have.
* `libdecnumber <https://gitlab.com/qemu-project/qemu/-/tree/master/libdecnumber>`_:
Import of gcc library, used to implement decimal number arithmetic.
* `migration <https://gitlab.com/qemu-project/qemu/-/tree/master/migration>`__:
- `Migration framework <migration>`.
+ :ref:`Migration framework <migration>`.
* `monitor <https://gitlab.com/qemu-project/qemu/-/tree/master/monitor>`_:
`Monitor <QEMU monitor>` implementation (HMP & QMP).
* `nbd <https://gitlab.com/qemu-project/qemu/-/tree/master/nbd>`_:
@@ -193,7 +193,7 @@ yet, so sometimes the source code is all you have.
- `lcitool <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/lcitool>`_:
Generate dockerfiles for CI containers.
- `migration <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/migration>`_:
- Test scripts and data for `Migration framework <migration>`.
+ Test scripts and data for :ref:`Migration framework <migration>`.
- `multiboot <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/multiboot>`_:
Test multiboot functionality for x86_64/i386.
- `qapi-schema <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/qapi-schema>`_:
diff --git a/docs/devel/index-build.rst b/docs/devel/index-build.rst
index 0745c81..3f3cb21 100644
--- a/docs/devel/index-build.rst
+++ b/docs/devel/index-build.rst
@@ -12,4 +12,5 @@ some of the basics if you are adding new files and targets to the build.
kconfig
docs
qapi-code-gen
+ qapi-domain
control-flow-integrity
diff --git a/docs/devel/qapi-domain.rst b/docs/devel/qapi-domain.rst
new file mode 100644
index 0000000..1475870
--- /dev/null
+++ b/docs/devel/qapi-domain.rst
@@ -0,0 +1,670 @@
+======================
+The Sphinx QAPI Domain
+======================
+
+An extension to the `rST syntax
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_
+in Sphinx is provided by the QAPI Domain, located in
+``docs/sphinx/qapi_domain.py``. This extension is analogous to the
+`Python Domain
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html>`_
+included with Sphinx, but provides special directives and roles
+speciically for annotating and documenting QAPI definitions
+specifically.
+
+A `Domain
+<https://www.sphinx-doc.org/en/master/usage/domains/index.html>`_
+provides a set of special rST directives and cross-referencing roles to
+Sphinx for understanding rST markup written to document a specific
+language. By itself, this QAPI extension is only sufficient to parse rST
+markup written by hand; the `autodoc
+<https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html>`_
+functionality is provided elsewhere, in ``docs/sphinx/qapidoc.py``, by
+the "Transmogrifier".
+
+It is not expected that any developer nor documentation writer would
+never need to write *nor* read these special rST forms. However, in the
+event that something needs to be debugged, knowing the syntax of the
+domain is quite handy. This reference may also be useful as a guide for
+understanding the QAPI Domain extension code itself. Although most of
+these forms will not be needed for documentation writing purposes,
+understanding the cross-referencing syntax *will* be helpful when
+writing rST documentation elsewhere, or for enriching the body of
+QAPIDoc blocks themselves.
+
+
+Concepts
+========
+
+The QAPI Domain itself provides no mechanisms for reading the QAPI
+Schema or generating documentation from code that exists. It is merely
+the rST syntax used to describe things. For instance, the Sphinx Python
+domain adds syntax like ``:py:func:`` for describing Python functions in
+documentation, but it's the autodoc module that is responsible for
+reading python code and generating such syntax. QAPI is analagous here:
+qapidoc.py is responsible for reading the QAPI Schema and generating rST
+syntax, and qapi_domain.py is responsible for translating that special
+syntax and providing APIs for Sphinx internals.
+
+In other words:
+
+qapi_domain.py adds syntax like ``.. qapi:command::`` to Sphinx, and
+qapidoc.py transforms the documentation in ``qapi/*.json`` into rST
+using directives defined by the domain.
+
+Or even shorter:
+
+``:py:`` is to ``:qapi:`` as *autodoc* is to *qapidoc*.
+
+
+Info Field Lists
+================
+
+`Field lists
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#field-lists>`_
+are a standard syntax in reStructuredText. Sphinx `extends that syntax
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html#info-field-lists>`_
+to give certain field list entries special meaning and parsing to, for
+example, add cross-references. The QAPI Domain takes advantage of this
+field list extension to document things like Arguments, Members, Values,
+and so on.
+
+The special parsing and handling of info field lists in Sphinx is provided by
+three main classes; Field, GroupedField, and TypedField. The behavior
+and formatting for each configured field list entry in the domain
+changes depending on which class is used.
+
+Field:
+ * Creates an ungrouped field: i.e., each entry will create its own
+ section and they will not be combined.
+ * May *optionally* support an argument.
+ * May apply cross-reference roles to *either* the argument *or* the
+ content body, both, or neither.
+
+This is used primarily for entries which are not expected to be
+repeated, i.e., items that may only show up at most once. The QAPI
+domain uses this class for "Errors" section.
+
+GroupedField:
+ * Creates a grouped field: i.e. multiple adjacent entries will be
+ merged into one section, and the content will form a bulleted list.
+ * *Must* take an argument.
+ * May optionally apply a cross-reference role to the argument, but not
+ the body.
+ * Can be configured to remove the bulleted list if there is only a
+ single entry.
+ * All items will be generated with the form: "argument -- body"
+
+This is used for entries which are expected to be repeated, but aren't
+expected to have two arguments, i.e. types without names, or names
+without types. The QAPI domain uses this class for features, returns,
+and enum values.
+
+TypedField:
+ * Creates a grouped, typed field. Multiple adjacent entres will be
+ merged into one section, and the content will form a bulleted list.
+ * *Must* take at least one argument, but supports up to two -
+ nominally, a name and a type.
+ * May optionally apply a cross-reference role to the type or the name
+ argument, but not the body.
+ * Can be configured to remove the bulleted list if there is only a
+ single entry.
+ * All items will be generated with the form "name (type) -- body"
+
+This is used for entries that are expected to be repeated and will have
+a name, a type, and a description. The QAPI domain uses this class for
+arguments, alternatives, and members. Wherever type names are referenced
+below, They must be a valid, documented type that will be
+cross-referenced in the HTML output; or one of the built-in JSON types
+(string, number, int, boolean, null, value, q_empty).
+
+
+``:feat:``
+----------
+
+Document a feature attached to a QAPI definition.
+
+:availability: This field list is available in the body of Command,
+ Event, Enum, Object and Alternate directives.
+:syntax: ``:feat name: Lorem ipsum, dolor sit amet...``
+:type: `sphinx.util.docfields.GroupedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.GroupedField.html?private=1>`_
+
+Example::
+
+ .. qapi:object:: BlockdevOptionsVirtioBlkVhostVdpa
+ :since: 7.2
+ :ifcond: CONFIG_BLKIO
+
+ Driver specific block device options for the virtio-blk-vhost-vdpa
+ backend.
+
+ :memb string path: path to the vhost-vdpa character device.
+ :feat fdset: Member ``path`` supports the special "/dev/fdset/N" path
+ (since 8.1)
+
+
+``:arg:``
+---------
+
+Document an argument to a QAPI command.
+
+:availability: This field list is only available in the body of the
+ Command directive.
+:syntax: ``:arg type name: description``
+:type: `sphinx.util.docfields.TypedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.TypedField.html?private=1>`_
+
+
+Example::
+
+ .. qapi:command:: job-pause
+ :since: 3.0
+
+ Pause an active job.
+
+ This command returns immediately after marking the active job for
+ pausing. Pausing an already paused job is an error.
+
+ The job will pause as soon as possible, which means transitioning
+ into the PAUSED state if it was RUNNING, or into STANDBY if it was
+ READY. The corresponding JOB_STATUS_CHANGE event will be emitted.
+
+ Cancelling a paused job automatically resumes it.
+
+ :arg string id: The job identifier.
+
+
+``:error:``
+-----------
+
+Document the error condition(s) of a QAPI command.
+
+:availability: This field list is only available in the body of the
+ Command directive.
+:syntax: ``:error: Lorem ipsum dolor sit amet ...``
+:type: `sphinx.util.docfields.Field
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.Field.html?private=1>`_
+
+The format of the :errors: field list description is free-form rST. The
+alternative spelling ":errors:" is also permitted, but strictly
+analogous.
+
+Example::
+
+ .. qapi:command:: block-job-set-speed
+ :since: 1.1
+
+ Set maximum speed for a background block operation.
+
+ This command can only be issued when there is an active block job.
+
+ Throttling can be disabled by setting the speed to 0.
+
+ :arg string device: The job identifier. This used to be a device
+ name (hence the name of the parameter), but since QEMU 2.7 it
+ can have other values.
+ :arg int speed: the maximum speed, in bytes per second, or 0 for
+ unlimited. Defaults to 0.
+ :error:
+ - If no background operation is active on this device,
+ DeviceNotActive
+
+
+``:return:``
+-------------
+
+Document the return type(s) and value(s) of a QAPI command.
+
+:availability: This field list is only available in the body of the
+ Command directive.
+:syntax: ``:return type: Lorem ipsum dolor sit amet ...``
+:type: `sphinx.util.docfields.GroupedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.GroupedField.html?private=1>`_
+
+
+Example::
+
+ .. qapi:command:: query-replay
+ :since: 5.2
+
+ Retrieve the record/replay information. It includes current
+ instruction count which may be used for ``replay-break`` and
+ ``replay-seek`` commands.
+
+ :return ReplayInfo: record/replay information.
+
+ .. qmp-example::
+
+ -> { "execute": "query-replay" }
+ <- { "return": {
+ "mode": "play", "filename": "log.rr", "icount": 220414 }
+ }
+
+
+``:value:``
+-----------
+
+Document a possible value for a QAPI enum.
+
+:availability: This field list is only available in the body of the Enum
+ directive.
+:syntax: ``:value name: Lorem ipsum, dolor sit amet ...``
+:type: `sphinx.util.docfields.GroupedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.GroupedField.html?private=1>`_
+
+Example::
+
+ .. qapi:enum:: QapiErrorClass
+ :since: 1.2
+
+ QEMU error classes
+
+ :value GenericError: this is used for errors that don't require a specific
+ error class. This should be the default case for most errors
+ :value CommandNotFound: the requested command has not been found
+ :value DeviceNotActive: a device has failed to be become active
+ :value DeviceNotFound: the requested device has not been found
+ :value KVMMissingCap: the requested operation can't be fulfilled because a
+ required KVM capability is missing
+
+
+``:alt:``
+------------
+
+Document a possible branch for a QAPI alternate.
+
+:availability: This field list is only available in the body of the
+ Alternate directive.
+:syntax: ``:alt type name: Lorem ipsum, dolor sit amet ...``
+:type: `sphinx.util.docfields.TypedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.TypedField.html?private=1>`_
+
+As a limitation of Sphinx, we must document the "name" of the branch in
+addition to the type, even though this information is not visible on the
+wire in the QMP protocol format. This limitation *may* be lifted at a
+future date.
+
+Example::
+
+ .. qapi:alternate:: StrOrNull
+ :since: 2.10
+
+ This is a string value or the explicit lack of a string (null
+ pointer in C). Intended for cases when 'optional absent' already
+ has a different meaning.
+
+ :alt string s: the string value
+ :alt null n: no string value
+
+
+``:memb:``
+----------
+
+Document a member of an Event or Object.
+
+:availability: This field list is available in the body of Event or
+ Object directives.
+:syntax: ``:memb type name: Lorem ipsum, dolor sit amet ...``
+:type: `sphinx.util.docfields.TypedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.TypedField.html?private=1>`_
+
+This is fundamentally the same as ``:arg:`` and ``:alt:``, but uses the
+"Members" phrasing for Events and Objects (Structs and Unions).
+
+Example::
+
+ .. qapi:event:: JOB_STATUS_CHANGE
+ :since: 3.0
+
+ Emitted when a job transitions to a different status.
+
+ :memb string id: The job identifier
+ :memb JobStatus status: The new job status
+
+
+Arbitrary field lists
+---------------------
+
+Other field list names, while valid rST syntax, are prohibited inside of
+QAPI directives to help prevent accidental misspellings of info field
+list names. If you want to add a new arbitrary "non-value-added" field
+list to QAPI documentation, you must add the field name to the allow
+list in ``docs/conf.py``
+
+For example::
+
+ qapi_allowed_fields = {
+ "see also",
+ }
+
+Will allow you to add arbitrary field lists in QAPI directives::
+
+ .. qapi:command:: x-fake-command
+
+ :see also: Lorem ipsum, dolor sit amet ...
+
+
+Cross-references
+================
+
+Cross-reference `roles
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html>`_
+in the QAPI domain are modeled closely after the `Python
+cross-referencing syntax
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html#cross-referencing-python-objects>`_.
+
+QAPI definitions can be referenced using the standard `any
+<https://www.sphinx-doc.org/en/master/usage/referencing.html#role-any>`_
+role cross-reference syntax, such as with ```query-blockstats```. In
+the event that disambiguation is needed, cross-references can also be
+written using a number of explicit cross-reference roles:
+
+* ``:qapi:mod:`block-core``` -- Reference a QAPI module. The link will
+ take you to the beginning of that section in the documentation.
+* ``:qapi:cmd:`query-block``` -- Reference a QAPI command.
+* ``:qapi:event:`JOB_STATUS_CHANGE``` -- Reference a QAPI event.
+* ``:qapi:enum:`QapiErrorClass``` -- Reference a QAPI enum.
+* ``:qapi:obj:`BlockdevOptionsVirtioBlkVhostVdpa`` -- Reference a QAPI
+ object (struct or union)
+* ``:qapi:alt:`StrOrNull``` -- Reference a QAPI alternate.
+* ``:qapi:type:`BlockDirtyInfo``` -- Reference *any* QAPI type; this
+ excludes modules, commands, and events.
+* ``:qapi:any:`block-job-set-speed``` -- Reference absolutely any QAPI entity.
+
+Type arguments in info field lists are converted into references as if
+you had used the ``:qapi:type:`` role. All of the special syntax below
+applies to both info field lists and standalone explicit
+cross-references.
+
+
+Type decorations
+----------------
+
+Type names in references can be surrounded by brackets, like
+``[typename]``, to indicate an array of that type. The cross-reference
+will apply only to the type name between the brackets. For example;
+``:qapi:type:`[Qcow2BitmapInfoFlags]``` renders to:
+:qapi:type:`[Qcow2BitmapInfoFlags]`
+
+To indicate an optional argument/member in a field list, the type name
+can be suffixed with ``?``. The cross-reference will be transformed to
+"type, Optional" with the link applying only to the type name. For
+example; ``:qapi:type:`BitmapSyncMode?``` renders to:
+:qapi:type:`BitmapSyncMode?`
+
+
+Namespaces
+----------
+
+Mimicking the `Python domain target specification syntax
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html#target-specification>`_,
+QAPI allows you to specify the fully qualified path for a data
+type. QAPI enforces globally unique names, so it's unlikely you'll need
+this specific feature, but it may be extended in the near future to
+allow referencing identically named commands and data types from
+different utilities; i.e. QEMU Storage Daemon vs QMP.
+
+* A module can be explicitly provided;
+ ``:qapi:type:`block-core.BitmapSyncMode``` will render to:
+ :qapi:type:`block-core.BitmapSyncMode`
+* If you don't want to display the "fully qualified" name, it can be
+ prefixed with a tilde; ``:qapi:type:`~block-core.BitmapSyncMode```
+ will render to: :qapi:type:`~block-core.BitmapSyncMode`
+
+
+Custom link text
+----------------
+
+The name of a cross-reference link can be explicitly overridden like
+`most stock Sphinx references
+<https://www.sphinx-doc.org/en/master/usage/referencing.html#syntax>`_
+using the ``custom text <target>`` syntax.
+
+For example, ``:qapi:cmd:`Merge dirty bitmaps
+<block-dirty-bitmap-merge>``` will render as: :qapi:cmd:`Merge dirty
+bitmaps <block-dirty-bitmap-merge>`
+
+
+Directives
+==========
+
+The QAPI domain adds a number of custom directives for documenting
+various QAPI/QMP entities. The syntax is plain rST, and follows this
+general format::
+
+ .. qapi:directive:: argument
+ :option:
+ :another-option: with an argument
+
+ Content body, arbitrary rST is allowed here.
+
+
+Sphinx standard options
+-----------------------
+
+All QAPI directives inherit a number of `standard options
+<https://www.sphinx-doc.org/en/master/usage/domains/index.html#basic-markup>`_
+from Sphinx's ObjectDescription class.
+
+The dashed spellings of the below options were added in Sphinx 7.2, the
+undashed spellings are currently retained as aliases, but will be
+removed in a future version.
+
+* ``:no-index:`` and ``:noindex:`` -- Do not add this item into the
+ Index, and do not make it available for cross-referencing.
+* ``no-index-entry:`` and ``:noindexentry:`` -- Do not add this item
+ into the Index, but allow it to be cross-referenced.
+* ``no-contents-entry`` and ``:nocontentsentry:`` -- Exclude this item
+ from the Table of Contents.
+* ``no-typesetting`` -- Create TOC, Index and cross-referencing
+ entities, but don't actually display the content.
+
+
+QAPI standard options
+---------------------
+
+All QAPI directives -- *except* for module -- support these common options.
+
+* ``:module: modname`` -- Borrowed from the Python domain, this option allows
+ you to override the module association of a given definition.
+* ``:since: x.y`` -- Allows the documenting of "Since" information, which is
+ displayed in the signature bar.
+* ``:ifcond: CONDITION`` -- Allows the documenting of conditional availability
+ information, which is displayed in an eyecatch just below the
+ signature bar.
+* ``:deprecated:`` -- Adds an eyecatch just below the signature bar that
+ advertises that this definition is deprecated and should be avoided.
+* ``:unstable:`` -- Adds an eyecatch just below the signature bar that
+ advertises that this definition is unstable and should not be used in
+ production code.
+
+
+qapi:module
+-----------
+
+The ``qapi:module`` directive marks the start of a QAPI module. It may have
+a content body, but it can be omitted. All subsequent QAPI directives
+are associated with the most recent module; this effects their "fully
+qualified" name, but has no other effect.
+
+Example::
+
+ .. qapi:module:: block-core
+
+ Welcome to the block-core module!
+
+Will be rendered as:
+
+.. qapi:module:: block-core
+ :noindex:
+
+ Welcome to the block-core module!
+
+
+qapi:command
+------------
+
+This directive documents a QMP command. It may use any of the standard
+Sphinx or QAPI options, and the documentation body may contain
+``:arg:``, ``:feat:``, ``:error:``, or ``:return:`` info field list
+entries.
+
+Example::
+
+ .. qapi:command:: x-fake-command
+ :since: 42.0
+ :unstable:
+
+ This command is fake, so it can't hurt you!
+
+ :arg int foo: Your favorite number.
+ :arg string? bar: Your favorite season.
+ :return [string]: A lovely computer-written poem for you.
+
+
+Will be rendered as:
+
+ .. qapi:command:: x-fake-command
+ :noindex:
+ :since: 42.0
+ :unstable:
+
+ This command is fake, so it can't hurt you!
+
+ :arg int foo: Your favorite number.
+ :arg string? bar: Your favorite season.
+ :return [string]: A lovely computer-written poem for you.
+
+
+qapi:event
+----------
+
+This directive documents a QMP event. It may use any of the standard
+Sphinx or QAPI options, and the documentation body may contain
+``:memb:`` or ``:feat:`` info field list entries.
+
+Example::
+
+ .. qapi:event:: COMPUTER_IS_RUINED
+ :since: 0.1
+ :deprecated:
+
+ This event is emitted when your computer is *extremely* ruined.
+
+ :memb string reason: Diagnostics as to what caused your computer to
+ be ruined.
+ :feat sadness: When present, the diagnostic message will also
+ explain how sad the computer is as a result of your wrongdoings.
+
+Will be rendered as:
+
+.. qapi:event:: COMPUTER_IS_RUINED
+ :noindex:
+ :since: 0.1
+ :deprecated:
+
+ This event is emitted when your computer is *extremely* ruined.
+
+ :memb string reason: Diagnostics as to what caused your computer to
+ be ruined.
+ :feat sadness: When present, the diagnostic message will also explain
+ how sad the computer is as a result of your wrongdoings.
+
+
+qapi:enum
+---------
+
+This directive documents a QAPI enum. It may use any of the standard
+Sphinx or QAPI options, and the documentation body may contain
+``:value:`` or ``:feat:`` info field list entries.
+
+Example::
+
+ .. qapi:enum:: Mood
+ :ifcond: LIB_PERSONALITY
+
+ This enum represents your virtual machine's current mood!
+
+ :value Happy: Your VM is content and well-fed.
+ :value Hungry: Your VM needs food.
+ :value Melancholic: Your VM is experiencing existential angst.
+ :value Petulant: Your VM is throwing a temper tantrum.
+
+Will be rendered as:
+
+.. qapi:enum:: Mood
+ :noindex:
+ :ifcond: LIB_PERSONALITY
+
+ This enum represents your virtual machine's current mood!
+
+ :value Happy: Your VM is content and well-fed.
+ :value Hungry: Your VM needs food.
+ :value Melancholic: Your VM is experiencing existential angst.
+ :value Petulant: Your VM is throwing a temper tantrum.
+
+
+qapi:object
+-----------
+
+This directive documents a QAPI structure or union and represents a QMP
+object. It may use any of the standard Sphinx or QAPI options, and the
+documentation body may contain ``:memb:`` or ``:feat:`` info field list
+entries.
+
+Example::
+
+ .. qapi:object:: BigBlobOfStuff
+
+ This object has a bunch of disparate and unrelated things in it.
+
+ :memb int Birthday: Your birthday, represented in seconds since the
+ UNIX epoch.
+ :memb [string] Fav-Foods: A list of your favorite foods.
+ :memb boolean? Bizarre-Docs: True if the documentation reference
+ should be strange.
+
+Will be rendered as:
+
+.. qapi:object:: BigBlobOfStuff
+ :noindex:
+
+ This object has a bunch of disparate and unrelated things in it.
+
+ :memb int Birthday: Your birthday, represented in seconds since the
+ UNIX epoch.
+ :memb [string] Fav-Foods: A list of your favorite foods.
+ :memb boolean? Bizarre-Docs: True if the documentation reference
+ should be strange.
+
+
+qapi:alternate
+--------------
+
+This directive documents a QAPI alternate. It may use any of the
+standard Sphinx or QAPI options, and the documentation body may contain
+``:alt:`` or ``:feat:`` info field list entries.
+
+Example::
+
+ .. qapi:alternate:: ErrorCode
+
+ This alternate represents an Error Code from the VM.
+
+ :alt int ec: An error code, like the type you're used to.
+ :alt string em: An expletive-laced error message, if your
+ computer is feeling particularly cranky and tired of your
+ antics.
+
+Will be rendered as:
+
+.. qapi:alternate:: ErrorCode
+ :noindex:
+
+ This alternate represents an Error Code from the VM.
+
+ :alt int ec: An error code, like the type you're used to.
+ :alt string em: An expletive-laced error message, if your
+ computer is feeling particularly cranky and tired of your
+ antics.
diff --git a/docs/glossary.rst b/docs/glossary.rst
index 693d985..4fa044b 100644
--- a/docs/glossary.rst
+++ b/docs/glossary.rst
@@ -120,7 +120,7 @@ Migration
---------
QEMU can save and restore the execution of a virtual machine between different
-host systems. This is provided by the `Migration framework<migration>`.
+host systems. This is provided by the :ref:`Migration framework<migration>`.
NBD
---
@@ -212,14 +212,14 @@ machine emulator and virtualizer.
QOM
---
-`QEMU Object Model <qom>` is an object oriented API used to define various
-devices and hardware in the QEMU codebase.
+:ref:`QEMU Object Model <qom>` is an object oriented API used to define
+various devices and hardware in the QEMU codebase.
Record/replay
-------------
-`Record/replay <replay>` is a feature of QEMU allowing to have a deterministic
-and reproducible execution of a virtual machine.
+:ref:`Record/replay <replay>` is a feature of QEMU allowing to have a
+deterministic and reproducible execution of a virtual machine.
Rust
----
diff --git a/docs/interop/qemu-qmp-ref.rst b/docs/interop/qemu-qmp-ref.rst
index f94614a..e95eeac 100644
--- a/docs/interop/qemu-qmp-ref.rst
+++ b/docs/interop/qemu-qmp-ref.rst
@@ -7,3 +7,4 @@ QEMU QMP Reference Manual
:depth: 3
.. qapi-doc:: qapi/qapi-schema.json
+ :transmogrify:
diff --git a/docs/sphinx-static/theme_overrides.css b/docs/sphinx-static/theme_overrides.css
index 965ecac..b225bf7 100644
--- a/docs/sphinx-static/theme_overrides.css
+++ b/docs/sphinx-static/theme_overrides.css
@@ -18,8 +18,8 @@ h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend {
.rst-content dl:not(.docutils) dt {
border-top: none;
- border-left: solid 3px #ccc;
- background-color: #f0f0f0;
+ border-left: solid 5px #bcc6d2;
+ background-color: #eaedf1;
color: black;
}
@@ -208,3 +208,97 @@ div[class^="highlight"] pre {
color: inherit;
}
}
+
+/* QAPI domain theming */
+
+/* most content in a QAPI object definition should not eclipse about
+ 80ch, but nested field lists are explicitly exempt due to their
+ two-column nature */
+.qapi dd *:not(dl) {
+ max-width: 80ch;
+}
+
+/* but the content column itself should still be less than ~80ch. */
+.qapi .field-list dd {
+ max-width: 80ch;
+}
+
+.qapi-infopips {
+ margin-bottom: 1em;
+}
+
+.qapi-infopip {
+ display: inline-block;
+ padding: 0em 0.5em 0em 0.5em;
+ margin: 0.25em;
+}
+
+.qapi-deprecated,.qapi-unstable {
+ background-color: #fffef5;
+ border: solid #fff176 6px;
+ font-weight: bold;
+ padding: 8px;
+ border-radius: 15px;
+ margin: 5px;
+}
+
+.qapi-unstable::before {
+ content: '🚧 ';
+}
+
+.qapi-deprecated::before {
+ content: '⚠️ ';
+}
+
+.qapi-ifcond::before {
+ /* gaze ye into the crystal ball to determine feature availability */
+ content: '🔮 ';
+}
+
+.qapi-ifcond {
+ background-color: #f9f5ff;
+ border: solid #dac2ff 6px;
+ padding: 8px;
+ border-radius: 15px;
+ margin: 5px;
+}
+
+/* code blocks */
+.qapi div[class^="highlight"] {
+ width: fit-content;
+ background-color: #fffafd;
+ border: 2px solid #ffe1f3;
+}
+
+/* note, warning, etc. */
+.qapi .admonition {
+ width: fit-content;
+}
+
+/* pad the top of the field-list so the text doesn't start directly at
+ the top border; primarily for the field list labels, but adjust the
+ field bodies as well for parity. */
+dl.field-list > dt:first-of-type, dl.field-list > dd:first-of-type {
+ padding-top: 0.3em;
+}
+
+dl.field-list > dt:last-of-type, dl.field-list > dd:last-of-type {
+ padding-bottom: 0.3em;
+}
+
+/* pad the field list labels so they don't crash into the border */
+dl.field-list > dt {
+ padding-left: 0.5em;
+ padding-right: 0.5em;
+}
+
+/* Add a little padding between field list sections */
+dl.field-list > dd:not(:last-child) {
+ padding-bottom: 1em;
+}
+
+/* Sphinx 3.x: unresolved xrefs */
+.rst-content *:not(a) > code.xref {
+ font-weight: 400;
+ color: #333333;
+}
diff --git a/docs/sphinx/compat.py b/docs/sphinx/compat.py
new file mode 100644
index 0000000..9cf7fe0
--- /dev/null
+++ b/docs/sphinx/compat.py
@@ -0,0 +1,230 @@
+"""
+Sphinx cross-version compatibility goop
+"""
+
+import re
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Optional,
+ Type,
+)
+
+from docutils import nodes
+from docutils.nodes import Element, Node, Text
+from docutils.statemachine import StringList
+
+import sphinx
+from sphinx import addnodes, util
+from sphinx.directives import ObjectDescription
+from sphinx.environment import BuildEnvironment
+from sphinx.roles import XRefRole
+from sphinx.util import docfields
+from sphinx.util.docutils import (
+ ReferenceRole,
+ SphinxDirective,
+ switch_source_input,
+)
+from sphinx.util.typing import TextlikeNode
+
+
+MAKE_XREF_WORKAROUND = sphinx.version_info[:3] < (4, 1, 0)
+
+
+SpaceNode: Callable[[str], Node]
+KeywordNode: Callable[[str, str], Node]
+
+if sphinx.version_info[:3] >= (4, 0, 0):
+ SpaceNode = addnodes.desc_sig_space
+ KeywordNode = addnodes.desc_sig_keyword
+else:
+ SpaceNode = Text
+ KeywordNode = addnodes.desc_annotation
+
+
+def nested_parse_with_titles(
+ directive: SphinxDirective, content_node: Element
+) -> None:
+ """
+ This helper preserves error parsing context across sphinx versions.
+ """
+
+ # necessary so that the child nodes get the right source/line set
+ content_node.document = directive.state.document
+
+ try:
+ # Modern sphinx (6.2.0+) supports proper offsetting for
+ # nested parse error context management
+ util.nodes.nested_parse_with_titles(
+ directive.state,
+ directive.content,
+ content_node,
+ content_offset=directive.content_offset,
+ )
+ except TypeError:
+ # No content_offset argument. Fall back to SSI method.
+ with switch_source_input(directive.state, directive.content):
+ util.nodes.nested_parse_with_titles(
+ directive.state, directive.content, content_node
+ )
+
+
+# ###########################################
+# xref compatibility hacks for Sphinx < 4.1 #
+# ###########################################
+
+# When we require >= Sphinx 4.1, the following function and the
+# subsequent 3 compatibility classes can be removed. Anywhere in
+# qapi_domain that uses one of these Compat* types can be switched to
+# using the garden-variety lib-provided classes with no trickery.
+
+
+def _compat_make_xref( # pylint: disable=unused-argument
+ self: sphinx.util.docfields.Field,
+ rolename: str,
+ domain: str,
+ target: str,
+ innernode: Type[TextlikeNode] = addnodes.literal_emphasis,
+ contnode: Optional[Node] = None,
+ env: Optional[BuildEnvironment] = None,
+ inliner: Any = None,
+ location: Any = None,
+) -> Node:
+ """
+ Compatibility workaround for Sphinx versions prior to 4.1.0.
+
+ Older sphinx versions do not use the domain's XRefRole for parsing
+ and formatting cross-references, so we need to perform this magick
+ ourselves to avoid needing to write the parser/formatter in two
+ separate places.
+
+ This workaround isn't brick-for-brick compatible with modern Sphinx
+ versions, because we do not have access to the parent directive's
+ state during this parsing like we do in more modern versions.
+
+ It's no worse than what pre-Sphinx 4.1.0 does, so... oh well!
+ """
+
+ # Yes, this function is gross. Pre-4.1 support is a miracle.
+ # pylint: disable=too-many-locals
+
+ assert env
+ # Note: Sphinx's own code ignores the type warning here, too.
+ if not rolename:
+ return contnode or innernode(target, target) # type: ignore[call-arg]
+
+ # Get the role instance, but don't *execute it* - we lack the
+ # correct state to do so. Instead, we'll just use its public
+ # methods to do our reference formatting, and emulate the rest.
+ role = env.get_domain(domain).roles[rolename]
+ assert isinstance(role, XRefRole)
+
+ # XRefRole features not supported by this compatibility shim;
+ # these were not supported in Sphinx 3.x either, so nothing of
+ # value is really lost.
+ assert not target.startswith("!")
+ assert not re.match(ReferenceRole.explicit_title_re, target)
+ assert not role.lowercase
+ assert not role.fix_parens
+
+ # Code below based mostly on sphinx.roles.XRefRole; run() and
+ # create_xref_node()
+ options = {
+ "refdoc": env.docname,
+ "refdomain": domain,
+ "reftype": rolename,
+ "refexplicit": False,
+ "refwarn": role.warn_dangling,
+ }
+ refnode = role.nodeclass(target, **options)
+ title, target = role.process_link(env, refnode, False, target, target)
+ refnode["reftarget"] = target
+ classes = ["xref", domain, f"{domain}-{rolename}"]
+ refnode += role.innernodeclass(target, title, classes=classes)
+
+ # This is the very gross part of the hack. Normally,
+ # result_nodes takes a document object to which we would pass
+ # self.inliner.document. Prior to Sphinx 4.1, we don't *have* an
+ # inliner to pass, so we have nothing to pass here. However, the
+ # actual implementation of role.result_nodes in this case
+ # doesn't actually use that argument, so this winds up being
+ # ... fine. Rest easy at night knowing this code only runs under
+ # old versions of Sphinx, so at least it won't change in the
+ # future on us and lead to surprising new failures.
+ # Gross, I know.
+ result_nodes, _messages = role.result_nodes(
+ None, # type: ignore
+ env,
+ refnode,
+ is_ref=True,
+ )
+ return nodes.inline(target, "", *result_nodes)
+
+
+class CompatField(docfields.Field):
+ if MAKE_XREF_WORKAROUND:
+ make_xref = _compat_make_xref
+
+
+class CompatGroupedField(docfields.GroupedField):
+ if MAKE_XREF_WORKAROUND:
+ make_xref = _compat_make_xref
+
+
+class CompatTypedField(docfields.TypedField):
+ if MAKE_XREF_WORKAROUND:
+ make_xref = _compat_make_xref
+
+
+# ################################################################
+# Nested parsing error location fix for Sphinx 5.3.0 < x < 6.2.0 #
+# ################################################################
+
+# When we require Sphinx 4.x, the TYPE_CHECKING hack where we avoid
+# subscripting ObjectDescription at runtime can be removed in favor of
+# just always subscripting the class.
+
+# When we require Sphinx > 6.2.0, the rest of this compatibility hack
+# can be dropped and QAPIObject can just inherit directly from
+# ObjectDescription[Signature].
+
+SOURCE_LOCATION_FIX = (5, 3, 0) <= sphinx.version_info[:3] < (6, 2, 0)
+
+Signature = str
+
+
+if TYPE_CHECKING:
+ _BaseClass = ObjectDescription[Signature]
+else:
+ _BaseClass = ObjectDescription
+
+
+class ParserFix(_BaseClass):
+
+ _temp_content: StringList
+ _temp_offset: int
+ _temp_node: Optional[addnodes.desc_content]
+
+ def before_content(self) -> None:
+ # Work around a sphinx bug and parse the content ourselves.
+ self._temp_content = self.content
+ self._temp_offset = self.content_offset
+ self._temp_node = None
+
+ if SOURCE_LOCATION_FIX:
+ self._temp_node = addnodes.desc_content()
+ self.state.nested_parse(
+ self.content, self.content_offset, self._temp_node
+ )
+ # Sphinx will try to parse the content block itself,
+ # Give it nothingness to parse instead.
+ self.content = StringList()
+ self.content_offset = 0
+
+ def transform_content(self, content_node: addnodes.desc_content) -> None:
+ # Sphinx workaround: Inject our parsed content and restore state.
+ if self._temp_node:
+ content_node += self._temp_node.children
+ self.content = self._temp_content
+ self.content_offset = self._temp_offset
diff --git a/docs/sphinx/qapi_domain.py b/docs/sphinx/qapi_domain.py
new file mode 100644
index 0000000..7ff618d
--- /dev/null
+++ b/docs/sphinx/qapi_domain.py
@@ -0,0 +1,931 @@
+"""
+QAPI domain extension.
+"""
+
+# The best laid plans of mice and men, ...
+# pylint: disable=too-many-lines
+
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ AbstractSet,
+ Any,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Tuple,
+ Union,
+ cast,
+)
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+
+from compat import (
+ CompatField,
+ CompatGroupedField,
+ CompatTypedField,
+ KeywordNode,
+ ParserFix,
+ Signature,
+ SpaceNode,
+)
+from sphinx import addnodes
+from sphinx.addnodes import desc_signature, pending_xref
+from sphinx.directives import ObjectDescription
+from sphinx.domains import (
+ Domain,
+ Index,
+ IndexEntry,
+ ObjType,
+)
+from sphinx.locale import _, __
+from sphinx.roles import XRefRole
+from sphinx.util import logging
+from sphinx.util.nodes import make_id, make_refnode
+
+
+if TYPE_CHECKING:
+ from docutils.nodes import Element, Node
+
+ from sphinx.application import Sphinx
+ from sphinx.builders import Builder
+ from sphinx.environment import BuildEnvironment
+ from sphinx.util.typing import OptionSpec
+
+logger = logging.getLogger(__name__)
+
+
+def _unpack_field(
+ field: nodes.Node,
+) -> Tuple[nodes.field_name, nodes.field_body]:
+ """
+ docutils helper: unpack a field node in a type-safe manner.
+ """
+ assert isinstance(field, nodes.field)
+ assert len(field.children) == 2
+ assert isinstance(field.children[0], nodes.field_name)
+ assert isinstance(field.children[1], nodes.field_body)
+ return (field.children[0], field.children[1])
+
+
+class ObjectEntry(NamedTuple):
+ docname: str
+ node_id: str
+ objtype: str
+ aliased: bool
+
+
+class QAPIXRefRole(XRefRole):
+
+ def process_link(
+ self,
+ env: BuildEnvironment,
+ refnode: Element,
+ has_explicit_title: bool,
+ title: str,
+ target: str,
+ ) -> tuple[str, str]:
+ refnode["qapi:module"] = env.ref_context.get("qapi:module")
+
+ # Cross-references that begin with a tilde adjust the title to
+ # only show the reference without a leading module, even if one
+ # was provided. This is a Sphinx-standard syntax; give it
+ # priority over QAPI-specific type markup below.
+ hide_module = False
+ if target.startswith("~"):
+ hide_module = True
+ target = target[1:]
+
+ # Type names that end with "?" are considered optional
+ # arguments and should be documented as such, but it's not
+ # part of the xref itself.
+ if target.endswith("?"):
+ refnode["qapi:optional"] = True
+ target = target[:-1]
+
+ # Type names wrapped in brackets denote lists. strip the
+ # brackets and remember to add them back later.
+ if target.startswith("[") and target.endswith("]"):
+ refnode["qapi:array"] = True
+ target = target[1:-1]
+
+ if has_explicit_title:
+ # Don't mess with the title at all if it was explicitly set.
+ # Explicit title syntax for references is e.g.
+ # :qapi:type:`target <explicit title>`
+ # and this explicit title overrides everything else here.
+ return title, target
+
+ title = target
+ if hide_module:
+ title = target.split(".")[-1]
+
+ return title, target
+
+ def result_nodes(
+ self,
+ document: nodes.document,
+ env: BuildEnvironment,
+ node: Element,
+ is_ref: bool,
+ ) -> Tuple[List[nodes.Node], List[nodes.system_message]]:
+
+ # node here is the pending_xref node (or whatever nodeclass was
+ # configured at XRefRole class instantiation time).
+ results: List[nodes.Node] = [node]
+
+ if node.get("qapi:array"):
+ results.insert(0, nodes.literal("[", "["))
+ results.append(nodes.literal("]", "]"))
+
+ if node.get("qapi:optional"):
+ results.append(nodes.Text(", "))
+ results.append(nodes.emphasis("?", "optional"))
+
+ return results, []
+
+
+class QAPIDescription(ParserFix):
+ """
+ Generic QAPI description.
+
+ This is meant to be an abstract class, not instantiated
+ directly. This class handles the abstract details of indexing, the
+ TOC, and reference targets for QAPI descriptions.
+ """
+
+ def handle_signature(self, sig: str, signode: desc_signature) -> Signature:
+ # Do nothing. The return value here is the "name" of the entity
+ # being documented; for QAPI, this is the same as the
+ # "signature", which is just a name.
+
+ # Normally this method must also populate signode with nodes to
+ # render the signature; here we do nothing instead - the
+ # subclasses will handle this.
+ return sig
+
+ def get_index_text(self, name: Signature) -> Tuple[str, str]:
+ """Return the text for the index entry of the object."""
+
+ # NB: this is used for the global index, not the QAPI index.
+ return ("single", f"{name} (QMP {self.objtype})")
+
+ def add_target_and_index(
+ self, name: Signature, sig: str, signode: desc_signature
+ ) -> None:
+ # name is the return value of handle_signature.
+ # sig is the original, raw text argument to handle_signature.
+ # For QAPI, these are identical, currently.
+
+ assert self.objtype
+
+ # If we're documenting a module, don't include the module as
+ # part of the FQN.
+ modname = ""
+ if self.objtype != "module":
+ modname = self.options.get(
+ "module", self.env.ref_context.get("qapi:module")
+ )
+ fullname = (modname + "." if modname else "") + name
+
+ node_id = make_id(
+ self.env, self.state.document, self.objtype, fullname
+ )
+ signode["ids"].append(node_id)
+
+ self.state.document.note_explicit_target(signode)
+ domain = cast(QAPIDomain, self.env.get_domain("qapi"))
+ domain.note_object(fullname, self.objtype, node_id, location=signode)
+
+ if "no-index-entry" not in self.options:
+ arity, indextext = self.get_index_text(name)
+ assert self.indexnode is not None
+ if indextext:
+ self.indexnode["entries"].append(
+ (arity, indextext, node_id, "", None)
+ )
+
+ def _object_hierarchy_parts(
+ self, sig_node: desc_signature
+ ) -> Tuple[str, ...]:
+ if "fullname" not in sig_node:
+ return ()
+ modname = sig_node.get("module")
+ fullname = sig_node["fullname"]
+
+ if modname:
+ return (modname, *fullname.split("."))
+
+ return tuple(fullname.split("."))
+
+ def _toc_entry_name(self, sig_node: desc_signature) -> str:
+ # This controls the name in the TOC and on the sidebar.
+
+ # This is the return type of _object_hierarchy_parts().
+ toc_parts = cast(Tuple[str, ...], sig_node.get("_toc_parts", ()))
+ if not toc_parts:
+ return ""
+
+ config = self.env.app.config
+ *parents, name = toc_parts
+ if config.toc_object_entries_show_parents == "domain":
+ return sig_node.get("fullname", name)
+ if config.toc_object_entries_show_parents == "hide":
+ return name
+ if config.toc_object_entries_show_parents == "all":
+ return ".".join(parents + [name])
+ return ""
+
+
+class QAPIObject(QAPIDescription):
+ """
+ Description of a generic QAPI object.
+
+ It's not used directly, but is instead subclassed by specific directives.
+ """
+
+ # Inherit some standard options from Sphinx's ObjectDescription
+ option_spec: OptionSpec = ( # type:ignore[misc]
+ ObjectDescription.option_spec.copy()
+ )
+ option_spec.update(
+ {
+ # Borrowed from the Python domain:
+ "module": directives.unchanged, # Override contextual module name
+ # These are QAPI originals:
+ "since": directives.unchanged,
+ "ifcond": directives.unchanged,
+ "deprecated": directives.flag,
+ "unstable": directives.flag,
+ }
+ )
+
+ doc_field_types = [
+ # :feat name: descr
+ CompatGroupedField(
+ "feature",
+ label=_("Features"),
+ names=("feat",),
+ can_collapse=False,
+ ),
+ ]
+
+ def get_signature_prefix(self) -> List[nodes.Node]:
+ """Return a prefix to put before the object name in the signature."""
+ assert self.objtype
+ return [
+ KeywordNode("", self.objtype.title()),
+ SpaceNode(" "),
+ ]
+
+ def get_signature_suffix(self) -> List[nodes.Node]:
+ """Return a suffix to put after the object name in the signature."""
+ ret: List[nodes.Node] = []
+
+ if "since" in self.options:
+ ret += [
+ SpaceNode(" "),
+ addnodes.desc_sig_element(
+ "", f"(Since: {self.options['since']})"
+ ),
+ ]
+
+ return ret
+
+ def handle_signature(self, sig: str, signode: desc_signature) -> Signature:
+ """
+ Transform a QAPI definition name into RST nodes.
+
+ This method was originally intended for handling function
+ signatures. In the QAPI domain, however, we only pass the
+ definition name as the directive argument and handle everything
+ else in the content body with field lists.
+
+ As such, the only argument here is "sig", which is just the QAPI
+ definition name.
+ """
+ modname = self.options.get(
+ "module", self.env.ref_context.get("qapi:module")
+ )
+
+ signode["fullname"] = sig
+ signode["module"] = modname
+ sig_prefix = self.get_signature_prefix()
+ if sig_prefix:
+ signode += addnodes.desc_annotation(
+ str(sig_prefix), "", *sig_prefix
+ )
+ signode += addnodes.desc_name(sig, sig)
+ signode += self.get_signature_suffix()
+
+ return sig
+
+ def _add_infopips(self, contentnode: addnodes.desc_content) -> None:
+ # Add various eye-catches and things that go below the signature
+ # bar, but precede the user-defined content.
+ infopips = nodes.container()
+ infopips.attributes["classes"].append("qapi-infopips")
+
+ def _add_pip(
+ source: str, content: Union[str, List[nodes.Node]], classname: str
+ ) -> None:
+ node = nodes.container(source)
+ if isinstance(content, str):
+ node.append(nodes.Text(content))
+ else:
+ node.extend(content)
+ node.attributes["classes"].extend(["qapi-infopip", classname])
+ infopips.append(node)
+
+ if "deprecated" in self.options:
+ _add_pip(
+ ":deprecated:",
+ f"This {self.objtype} is deprecated.",
+ "qapi-deprecated",
+ )
+
+ if "unstable" in self.options:
+ _add_pip(
+ ":unstable:",
+ f"This {self.objtype} is unstable/experimental.",
+ "qapi-unstable",
+ )
+
+ if self.options.get("ifcond", ""):
+ ifcond = self.options["ifcond"]
+ _add_pip(
+ f":ifcond: {ifcond}",
+ [
+ nodes.emphasis("", "Availability"),
+ nodes.Text(": "),
+ nodes.literal(ifcond, ifcond),
+ ],
+ "qapi-ifcond",
+ )
+
+ if infopips.children:
+ contentnode.insert(0, infopips)
+
+ def _validate_field(self, field: nodes.field) -> None:
+ """Validate field lists in this QAPI Object Description."""
+ name, _ = _unpack_field(field)
+ allowed_fields = set(self.env.app.config.qapi_allowed_fields)
+
+ field_label = name.astext()
+ if field_label in allowed_fields:
+ # Explicitly allowed field list name, OK.
+ return
+
+ try:
+ # split into field type and argument (if provided)
+ # e.g. `:arg type name: descr` is
+ # field_type = "arg", field_arg = "type name".
+ field_type, field_arg = field_label.split(None, 1)
+ except ValueError:
+ # No arguments provided
+ field_type = field_label
+ field_arg = ""
+
+ typemap = self.get_field_type_map()
+ if field_type in typemap:
+ # This is a special docfield, yet-to-be-processed. Catch
+ # correct names, but incorrect arguments. This mismatch WILL
+ # cause Sphinx to render this field incorrectly (without a
+ # warning), which is never what we want.
+ typedesc = typemap[field_type][0]
+ if typedesc.has_arg != bool(field_arg):
+ msg = f"docfield field list type {field_type!r} "
+ if typedesc.has_arg:
+ msg += "requires an argument."
+ else:
+ msg += "takes no arguments."
+ logger.warning(msg, location=field)
+ else:
+ # This is unrecognized entirely. It's valid rST to use
+ # arbitrary fields, but let's ensure the documentation
+ # writer has done this intentionally.
+ valid = ", ".join(sorted(set(typemap) | allowed_fields))
+ msg = (
+ f"Unrecognized field list name {field_label!r}.\n"
+ f"Valid fields for qapi:{self.objtype} are: {valid}\n"
+ "\n"
+ "If this usage is intentional, please add it to "
+ "'qapi_allowed_fields' in docs/conf.py."
+ )
+ logger.warning(msg, location=field)
+
+ def transform_content(self, content_node: addnodes.desc_content) -> None:
+ # This hook runs after before_content and the nested parse, but
+ # before the DocFieldTransformer is executed.
+ super().transform_content(content_node)
+
+ self._add_infopips(content_node)
+
+ # Validate field lists.
+ for child in content_node:
+ if isinstance(child, nodes.field_list):
+ for field in child.children:
+ assert isinstance(field, nodes.field)
+ self._validate_field(field)
+
+
+class SpecialTypedField(CompatTypedField):
+ def make_field(self, *args: Any, **kwargs: Any) -> nodes.field:
+ ret = super().make_field(*args, **kwargs)
+
+ # Look for the characteristic " -- " text node that Sphinx
+ # inserts for each TypedField entry ...
+ for node in ret.traverse(lambda n: str(n) == " -- "):
+ par = node.parent
+ if par.children[0].astext() != "q_dummy":
+ continue
+
+ # If the first node's text is q_dummy, this is a dummy
+ # field we want to strip down to just its contents.
+ del par.children[:-1]
+
+ return ret
+
+
+class QAPICommand(QAPIObject):
+ """Description of a QAPI Command."""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :arg TypeName ArgName: descr
+ SpecialTypedField(
+ "argument",
+ label=_("Arguments"),
+ names=("arg",),
+ typerolename="type",
+ can_collapse=False,
+ ),
+ # :error: descr
+ CompatField(
+ "error",
+ label=_("Errors"),
+ names=("error", "errors"),
+ has_arg=False,
+ ),
+ # :return TypeName: descr
+ CompatGroupedField(
+ "returnvalue",
+ label=_("Return"),
+ rolename="type",
+ names=("return",),
+ can_collapse=True,
+ ),
+ ]
+ )
+
+
+class QAPIEnum(QAPIObject):
+ """Description of a QAPI Enum."""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :value name: descr
+ CompatGroupedField(
+ "value",
+ label=_("Values"),
+ names=("value",),
+ can_collapse=False,
+ )
+ ]
+ )
+
+
+class QAPIAlternate(QAPIObject):
+ """Description of a QAPI Alternate."""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :alt type name: descr
+ CompatTypedField(
+ "alternative",
+ label=_("Alternatives"),
+ names=("alt",),
+ typerolename="type",
+ can_collapse=False,
+ ),
+ ]
+ )
+
+
+class QAPIObjectWithMembers(QAPIObject):
+ """Base class for Events/Structs/Unions"""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :member type name: descr
+ SpecialTypedField(
+ "member",
+ label=_("Members"),
+ names=("memb",),
+ typerolename="type",
+ can_collapse=False,
+ ),
+ ]
+ )
+
+
+class QAPIEvent(QAPIObjectWithMembers):
+ # pylint: disable=too-many-ancestors
+ """Description of a QAPI Event."""
+
+
+class QAPIJSONObject(QAPIObjectWithMembers):
+ # pylint: disable=too-many-ancestors
+ """Description of a QAPI Object: structs and unions."""
+
+
+class QAPIModule(QAPIDescription):
+ """
+ Directive to mark description of a new module.
+
+ This directive doesn't generate any special formatting, and is just
+ a pass-through for the content body. Named section titles are
+ allowed in the content body.
+
+ Use this directive to create entries for the QAPI module in the
+ global index and the QAPI index; as well as to associate subsequent
+ definitions with the module they are defined in for purposes of
+ search and QAPI index organization.
+
+ :arg: The name of the module.
+ :opt no-index: Don't add cross-reference targets or index entries.
+ :opt no-typesetting: Don't render the content body (but preserve any
+ cross-reference target IDs in the squelched output.)
+
+ Example::
+
+ .. qapi:module:: block-core
+ :no-index:
+ :no-typesetting:
+
+ Lorem ipsum, dolor sit amet ...
+ """
+
+ def run(self) -> List[Node]:
+ modname = self.arguments[0].strip()
+ self.env.ref_context["qapi:module"] = modname
+ ret = super().run()
+
+ # ObjectDescription always creates a visible signature bar. We
+ # want module items to be "invisible", however.
+
+ # Extract the content body of the directive:
+ assert isinstance(ret[-1], addnodes.desc)
+ desc_node = ret.pop(-1)
+ assert isinstance(desc_node.children[1], addnodes.desc_content)
+ ret.extend(desc_node.children[1].children)
+
+ # Re-home node_ids so anchor refs still work:
+ node_ids: List[str]
+ if node_ids := [
+ node_id
+ for el in desc_node.children[0].traverse(nodes.Element)
+ for node_id in cast(List[str], el.get("ids", ()))
+ ]:
+ target_node = nodes.target(ids=node_ids)
+ ret.insert(1, target_node)
+
+ return ret
+
+
+class QAPIIndex(Index):
+ """
+ Index subclass to provide the QAPI definition index.
+ """
+
+ # pylint: disable=too-few-public-methods
+
+ name = "index"
+ localname = _("QAPI Index")
+ shortname = _("QAPI Index")
+
+ def generate(
+ self,
+ docnames: Optional[Iterable[str]] = None,
+ ) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]:
+ assert isinstance(self.domain, QAPIDomain)
+ content: Dict[str, List[IndexEntry]] = {}
+ collapse = False
+
+ # list of all object (name, ObjectEntry) pairs, sorted by name
+ # (ignoring the module)
+ objects = sorted(
+ self.domain.objects.items(),
+ key=lambda x: x[0].split(".")[-1].lower(),
+ )
+
+ for objname, obj in objects:
+ if docnames and obj.docname not in docnames:
+ continue
+
+ # Strip the module name out:
+ objname = objname.split(".")[-1]
+
+ # Add an alphabetical entry:
+ entries = content.setdefault(objname[0].upper(), [])
+ entries.append(
+ IndexEntry(
+ objname, 0, obj.docname, obj.node_id, obj.objtype, "", ""
+ )
+ )
+
+ # Add a categorical entry:
+ category = obj.objtype.title() + "s"
+ entries = content.setdefault(category, [])
+ entries.append(
+ IndexEntry(objname, 0, obj.docname, obj.node_id, "", "", "")
+ )
+
+ # alphabetically sort categories; type names first, ABC entries last.
+ sorted_content = sorted(
+ content.items(),
+ key=lambda x: (len(x[0]) == 1, x[0]),
+ )
+ return sorted_content, collapse
+
+
+class QAPIDomain(Domain):
+ """QAPI language domain."""
+
+ name = "qapi"
+ label = "QAPI"
+
+ # This table associates cross-reference object types (key) with an
+ # ObjType instance, which defines the valid cross-reference roles
+ # for each object type.
+ #
+ # e.g., the :qapi:type: cross-reference role can refer to enum,
+ # struct, union, or alternate objects; but :qapi:obj: can refer to
+ # anything. Each object also gets its own targeted cross-reference role.
+ object_types: Dict[str, ObjType] = {
+ "module": ObjType(_("module"), "mod", "any"),
+ "command": ObjType(_("command"), "cmd", "any"),
+ "event": ObjType(_("event"), "event", "any"),
+ "enum": ObjType(_("enum"), "enum", "type", "any"),
+ "object": ObjType(_("object"), "obj", "type", "any"),
+ "alternate": ObjType(_("alternate"), "alt", "type", "any"),
+ }
+
+ # Each of these provides a rST directive,
+ # e.g. .. qapi:module:: block-core
+ directives = {
+ "module": QAPIModule,
+ "command": QAPICommand,
+ "event": QAPIEvent,
+ "enum": QAPIEnum,
+ "object": QAPIJSONObject,
+ "alternate": QAPIAlternate,
+ }
+
+ # These are all cross-reference roles; e.g.
+ # :qapi:cmd:`query-block`. The keys correlate to the names used in
+ # the object_types table values above.
+ roles = {
+ "mod": QAPIXRefRole(),
+ "cmd": QAPIXRefRole(),
+ "event": QAPIXRefRole(),
+ "enum": QAPIXRefRole(),
+ "obj": QAPIXRefRole(), # specifically structs and unions.
+ "alt": QAPIXRefRole(),
+ # reference any data type (excludes modules, commands, events)
+ "type": QAPIXRefRole(),
+ "any": QAPIXRefRole(), # reference *any* type of QAPI object.
+ }
+
+ # Moved into the data property at runtime;
+ # this is the internal index of reference-able objects.
+ initial_data: Dict[str, Dict[str, Tuple[Any]]] = {
+ "objects": {}, # fullname -> ObjectEntry
+ }
+
+ # Index pages to generate; each entry is an Index class.
+ indices = [
+ QAPIIndex,
+ ]
+
+ @property
+ def objects(self) -> Dict[str, ObjectEntry]:
+ ret = self.data.setdefault("objects", {})
+ return ret # type: ignore[no-any-return]
+
+ def note_object(
+ self,
+ name: str,
+ objtype: str,
+ node_id: str,
+ aliased: bool = False,
+ location: Any = None,
+ ) -> None:
+ """Note a QAPI object for cross reference."""
+ if name in self.objects:
+ other = self.objects[name]
+ if other.aliased and aliased is False:
+ # The original definition found. Override it!
+ pass
+ elif other.aliased is False and aliased:
+ # The original definition is already registered.
+ return
+ else:
+ # duplicated
+ logger.warning(
+ __(
+ "duplicate object description of %s, "
+ "other instance in %s, use :no-index: for one of them"
+ ),
+ name,
+ other.docname,
+ location=location,
+ )
+ self.objects[name] = ObjectEntry(
+ self.env.docname, node_id, objtype, aliased
+ )
+
+ def clear_doc(self, docname: str) -> None:
+ for fullname, obj in list(self.objects.items()):
+ if obj.docname == docname:
+ del self.objects[fullname]
+
+ def merge_domaindata(
+ self, docnames: AbstractSet[str], otherdata: Dict[str, Any]
+ ) -> None:
+ for fullname, obj in otherdata["objects"].items():
+ if obj.docname in docnames:
+ # Sphinx's own python domain doesn't appear to bother to
+ # check for collisions. Assert they don't happen and
+ # we'll fix it if/when the case arises.
+ assert fullname not in self.objects, (
+ "bug - collision on merge?"
+ f" {fullname=} {obj=} {self.objects[fullname]=}"
+ )
+ self.objects[fullname] = obj
+
+ def find_obj(
+ self, modname: str, name: str, typ: Optional[str]
+ ) -> list[tuple[str, ObjectEntry]]:
+ """
+ Find a QAPI object for "name", perhaps using the given module.
+
+ Returns a list of (name, object entry) tuples.
+
+ :param modname: The current module context (if any!)
+ under which we are searching.
+ :param name: The name of the x-ref to resolve;
+ may or may not include a leading module.
+ :param type: The role name of the x-ref we're resolving, if provided.
+ (This is absent for "any" lookups.)
+ """
+ if not name:
+ return []
+
+ names: list[str] = []
+ matches: list[tuple[str, ObjectEntry]] = []
+
+ fullname = name
+ if "." in fullname:
+ # We're searching for a fully qualified reference;
+ # ignore the contextual module.
+ pass
+ elif modname:
+ # We're searching for something from somewhere;
+ # try searching the current module first.
+ # e.g. :qapi:cmd:`query-block` or `query-block` is being searched.
+ fullname = f"{modname}.{name}"
+
+ if typ is None:
+ # type isn't specified, this is a generic xref.
+ # search *all* qapi-specific object types.
+ objtypes: List[str] = list(self.object_types)
+ else:
+ # type is specified and will be a role (e.g. obj, mod, cmd)
+ # convert this to eligible object types (e.g. command, module)
+ # using the QAPIDomain.object_types table.
+ objtypes = self.objtypes_for_role(typ, [])
+
+ if name in self.objects and self.objects[name].objtype in objtypes:
+ names = [name]
+ elif (
+ fullname in self.objects
+ and self.objects[fullname].objtype in objtypes
+ ):
+ names = [fullname]
+ else:
+ # exact match wasn't found; e.g. we are searching for
+ # `query-block` from a different (or no) module.
+ searchname = "." + name
+ names = [
+ oname
+ for oname in self.objects
+ if oname.endswith(searchname)
+ and self.objects[oname].objtype in objtypes
+ ]
+
+ matches = [(oname, self.objects[oname]) for oname in names]
+ if len(matches) > 1:
+ matches = [m for m in matches if not m[1].aliased]
+ return matches
+
+ def resolve_xref(
+ self,
+ env: BuildEnvironment,
+ fromdocname: str,
+ builder: Builder,
+ typ: str,
+ target: str,
+ node: pending_xref,
+ contnode: Element,
+ ) -> nodes.reference | None:
+ modname = node.get("qapi:module")
+ matches = self.find_obj(modname, target, typ)
+
+ if not matches:
+ # Normally, we could pass warn_dangling=True to QAPIXRefRole(),
+ # but that will trigger on references to these built-in types,
+ # which we'd like to ignore instead.
+
+ # Take care of that warning here instead, so long as the
+ # reference isn't to one of our built-in core types.
+ if target not in (
+ "string",
+ "number",
+ "int",
+ "boolean",
+ "null",
+ "value",
+ "q_empty",
+ ):
+ logger.warning(
+ __("qapi:%s reference target not found: %r"),
+ typ,
+ target,
+ type="ref",
+ subtype="qapi",
+ location=node,
+ )
+ return None
+
+ if len(matches) > 1:
+ logger.warning(
+ __("more than one target found for cross-reference %r: %s"),
+ target,
+ ", ".join(match[0] for match in matches),
+ type="ref",
+ subtype="qapi",
+ location=node,
+ )
+
+ name, obj = matches[0]
+ return make_refnode(
+ builder, fromdocname, obj.docname, obj.node_id, contnode, name
+ )
+
+ def resolve_any_xref(
+ self,
+ env: BuildEnvironment,
+ fromdocname: str,
+ builder: Builder,
+ target: str,
+ node: pending_xref,
+ contnode: Element,
+ ) -> List[Tuple[str, nodes.reference]]:
+ results: List[Tuple[str, nodes.reference]] = []
+ matches = self.find_obj(node.get("qapi:module"), target, None)
+ for name, obj in matches:
+ rolename = self.role_for_objtype(obj.objtype)
+ assert rolename is not None
+ role = f"qapi:{rolename}"
+ refnode = make_refnode(
+ builder, fromdocname, obj.docname, obj.node_id, contnode, name
+ )
+ results.append((role, refnode))
+ return results
+
+
+def setup(app: Sphinx) -> Dict[str, Any]:
+ app.setup_extension("sphinx.directives")
+ app.add_config_value(
+ "qapi_allowed_fields",
+ set(),
+ "env", # Setting impacts parsing phase
+ types=set,
+ )
+ app.add_domain(QAPIDomain)
+
+ return {
+ "version": "1.0",
+ "env_version": 1,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True,
+ }
diff --git a/docs/sphinx/qapidoc.py b/docs/sphinx/qapidoc.py
index 61997fd..432fef0 100644
--- a/docs/sphinx/qapidoc.py
+++ b/docs/sphinx/qapidoc.py
@@ -2,6 +2,7 @@
#
# QEMU qapidoc QAPI file parsing extension
#
+# Copyright (c) 2024-2025 Red Hat
# Copyright (c) 2020 Linaro
#
# This work is licensed under the terms of the GNU GPLv2 or later.
@@ -24,440 +25,431 @@ The Sphinx documentation on writing extensions is at:
https://www.sphinx-doc.org/en/master/development/index.html
"""
+from __future__ import annotations
+
+__version__ = "2.0"
+
+from contextlib import contextmanager
import os
+from pathlib import Path
import re
import sys
-import textwrap
-from typing import List
+from typing import TYPE_CHECKING
from docutils import nodes
-from docutils.parsers.rst import Directive, directives
-from docutils.statemachine import ViewList
-from qapi.error import QAPIError, QAPISemError
-from qapi.gen import QAPISchemaVisitor
-from qapi.schema import QAPISchema
-
+from docutils.parsers.rst import directives
+from docutils.statemachine import StringList
+from qapi.error import QAPIError
+from qapi.parser import QAPIDoc
+from qapi.schema import (
+ QAPISchema,
+ QAPISchemaArrayType,
+ QAPISchemaCommand,
+ QAPISchemaDefinition,
+ QAPISchemaEnumMember,
+ QAPISchemaEvent,
+ QAPISchemaFeature,
+ QAPISchemaMember,
+ QAPISchemaObjectType,
+ QAPISchemaObjectTypeMember,
+ QAPISchemaType,
+ QAPISchemaVisitor,
+)
+from qapi.source import QAPISourceInfo
+
+from qapidoc_legacy import QAPISchemaGenRSTVisitor # type: ignore
from sphinx import addnodes
from sphinx.directives.code import CodeBlock
from sphinx.errors import ExtensionError
-from sphinx.util.docutils import switch_source_input
+from sphinx.util import logging
+from sphinx.util.docutils import SphinxDirective, switch_source_input
from sphinx.util.nodes import nested_parse_with_titles
-__version__ = "1.0"
+if TYPE_CHECKING:
+ from typing import (
+ Any,
+ Generator,
+ List,
+ Optional,
+ Sequence,
+ Union,
+ )
+ from sphinx.application import Sphinx
+ from sphinx.util.typing import ExtensionMetadata
-def dedent(text: str) -> str:
- # Adjust indentation to make description text parse as paragraph.
- lines = text.splitlines(True)
- if re.match(r"\s+", lines[0]):
- # First line is indented; description started on the line after
- # the name. dedent the whole block.
- return textwrap.dedent(text)
+logger = logging.getLogger(__name__)
- # Descr started on same line. Dedent line 2+.
- return lines[0] + textwrap.dedent("".join(lines[1:]))
+class Transmogrifier:
+ # pylint: disable=too-many-public-methods
-# Disable black auto-formatter until re-enabled:
-# fmt: off
+ # Field names used for different entity types:
+ field_types = {
+ "enum": "value",
+ "struct": "memb",
+ "union": "memb",
+ "event": "memb",
+ "command": "arg",
+ "alternate": "alt",
+ }
+ def __init__(self) -> None:
+ self._curr_ent: Optional[QAPISchemaDefinition] = None
+ self._result = StringList()
+ self.indent = 0
-class QAPISchemaGenRSTVisitor(QAPISchemaVisitor):
- """A QAPI schema visitor which generates docutils/Sphinx nodes
+ @property
+ def result(self) -> StringList:
+ return self._result
- This class builds up a tree of docutils/Sphinx nodes corresponding
- to documentation for the various QAPI objects. To use it, first
- create a QAPISchemaGenRSTVisitor object, and call its
- visit_begin() method. Then you can call one of the two methods
- 'freeform' (to add documentation for a freeform documentation
- chunk) or 'symbol' (to add documentation for a QAPI symbol). These
- will cause the visitor to build up the tree of document
- nodes. Once you've added all the documentation via 'freeform' and
- 'symbol' method calls, you can call 'get_document_nodes' to get
- the final list of document nodes (in a form suitable for returning
- from a Sphinx directive's 'run' method).
- """
- def __init__(self, sphinx_directive):
- self._cur_doc = None
- self._sphinx_directive = sphinx_directive
- self._top_node = nodes.section()
- self._active_headings = [self._top_node]
-
- def _make_dlitem(self, term, defn):
- """Return a dlitem node with the specified term and definition.
-
- term should be a list of Text and literal nodes.
- defn should be one of:
- - a string, which will be handed to _parse_text_into_node
- - a list of Text and literal nodes, which will be put into
- a paragraph node
- """
- dlitem = nodes.definition_list_item()
- dlterm = nodes.term('', '', *term)
- dlitem += dlterm
- if defn:
- dldef = nodes.definition()
- if isinstance(defn, list):
- dldef += nodes.paragraph('', '', *defn)
- else:
- self._parse_text_into_node(defn, dldef)
- dlitem += dldef
- return dlitem
-
- def _make_section(self, title):
- """Return a section node with optional title"""
- section = nodes.section(ids=[self._sphinx_directive.new_serialno()])
- if title:
- section += nodes.title(title, title)
- return section
-
- def _nodes_for_ifcond(self, ifcond, with_if=True):
- """Return list of Text, literal nodes for the ifcond
-
- Return a list which gives text like ' (If: condition)'.
- If with_if is False, we don't return the "(If: " and ")".
- """
+ @property
+ def entity(self) -> QAPISchemaDefinition:
+ assert self._curr_ent is not None
+ return self._curr_ent
- doc = ifcond.docgen()
- if not doc:
- return []
- doc = nodes.literal('', doc)
- if not with_if:
- return [doc]
+ @property
+ def member_field_type(self) -> str:
+ return self.field_types[self.entity.meta]
- nodelist = [nodes.Text(' ('), nodes.strong('', 'If: ')]
- nodelist.append(doc)
- nodelist.append(nodes.Text(')'))
- return nodelist
+ # General-purpose rST generation functions
- def _nodes_for_one_member(self, member):
- """Return list of Text, literal nodes for this member
+ def get_indent(self) -> str:
+ return " " * self.indent
- Return a list of doctree nodes which give text like
- 'name: type (optional) (If: ...)' suitable for use as the
- 'term' part of a definition list item.
- """
- term = [nodes.literal('', member.name)]
- if member.type.doc_type():
- term.append(nodes.Text(': '))
- term.append(nodes.literal('', member.type.doc_type()))
- if member.optional:
- term.append(nodes.Text(' (optional)'))
- if member.ifcond.is_present():
- term.extend(self._nodes_for_ifcond(member.ifcond))
- return term
-
- def _nodes_for_variant_when(self, branches, variant):
- """Return list of Text, literal nodes for variant 'when' clause
-
- Return a list of doctree nodes which give text like
- 'when tagname is variant (If: ...)' suitable for use in
- the 'branches' part of a definition list.
- """
- term = [nodes.Text(' when '),
- nodes.literal('', branches.tag_member.name),
- nodes.Text(' is '),
- nodes.literal('', '"%s"' % variant.name)]
- if variant.ifcond.is_present():
- term.extend(self._nodes_for_ifcond(variant.ifcond))
- return term
-
- def _nodes_for_members(self, doc, what, base=None, branches=None):
- """Return list of doctree nodes for the table of members"""
- dlnode = nodes.definition_list()
- for section in doc.args.values():
- term = self._nodes_for_one_member(section.member)
- # TODO drop fallbacks when undocumented members are outlawed
- if section.text:
- defn = dedent(section.text)
- else:
- defn = [nodes.Text('Not documented')]
+ @contextmanager
+ def indented(self) -> Generator[None]:
+ self.indent += 1
+ try:
+ yield
+ finally:
+ self.indent -= 1
- dlnode += self._make_dlitem(term, defn)
+ def add_line_raw(self, line: str, source: str, *lineno: int) -> None:
+ """Append one line of generated reST to the output."""
- if base:
- dlnode += self._make_dlitem([nodes.Text('The members of '),
- nodes.literal('', base.doc_type())],
- None)
+ # NB: Sphinx uses zero-indexed lines; subtract one.
+ lineno = tuple((n - 1 for n in lineno))
- if branches:
- for v in branches.variants:
- if v.type.name == 'q_empty':
- continue
- assert not v.type.is_implicit()
- term = [nodes.Text('The members of '),
- nodes.literal('', v.type.doc_type())]
- term.extend(self._nodes_for_variant_when(branches, v))
- dlnode += self._make_dlitem(term, None)
-
- if not dlnode.children:
- return []
-
- section = self._make_section(what)
- section += dlnode
- return [section]
-
- def _nodes_for_enum_values(self, doc):
- """Return list of doctree nodes for the table of enum values"""
- seen_item = False
- dlnode = nodes.definition_list()
- for section in doc.args.values():
- termtext = [nodes.literal('', section.member.name)]
- if section.member.ifcond.is_present():
- termtext.extend(self._nodes_for_ifcond(section.member.ifcond))
- # TODO drop fallbacks when undocumented members are outlawed
- if section.text:
- defn = dedent(section.text)
- else:
- defn = [nodes.Text('Not documented')]
-
- dlnode += self._make_dlitem(termtext, defn)
- seen_item = True
-
- if not seen_item:
- return []
-
- section = self._make_section('Values')
- section += dlnode
- return [section]
-
- def _nodes_for_arguments(self, doc, arg_type):
- """Return list of doctree nodes for the arguments section"""
- if arg_type and not arg_type.is_implicit():
- assert not doc.args
- section = self._make_section('Arguments')
- dlnode = nodes.definition_list()
- dlnode += self._make_dlitem(
- [nodes.Text('The members of '),
- nodes.literal('', arg_type.name)],
- None)
- section += dlnode
- return [section]
-
- return self._nodes_for_members(doc, 'Arguments')
-
- def _nodes_for_features(self, doc):
- """Return list of doctree nodes for the table of features"""
- seen_item = False
- dlnode = nodes.definition_list()
- for section in doc.features.values():
- dlnode += self._make_dlitem(
- [nodes.literal('', section.member.name)], dedent(section.text))
- seen_item = True
-
- if not seen_item:
- return []
-
- section = self._make_section('Features')
- section += dlnode
- return [section]
-
- def _nodes_for_sections(self, doc):
- """Return list of doctree nodes for additional sections"""
- nodelist = []
- for section in doc.sections:
- if section.tag and section.tag == 'TODO':
- # Hide TODO: sections
- continue
-
- if not section.tag:
- # Sphinx cannot handle sectionless titles;
- # Instead, just append the results to the prior section.
- container = nodes.container()
- self._parse_text_into_node(section.text, container)
- nodelist += container.children
- continue
-
- snode = self._make_section(section.tag)
- self._parse_text_into_node(dedent(section.text), snode)
- nodelist.append(snode)
- return nodelist
-
- def _nodes_for_if_section(self, ifcond):
- """Return list of doctree nodes for the "If" section"""
- nodelist = []
- if ifcond.is_present():
- snode = self._make_section('If')
- snode += nodes.paragraph(
- '', '', *self._nodes_for_ifcond(ifcond, with_if=False)
+ if line.strip():
+ # not a blank line
+ self._result.append(
+ self.get_indent() + line.rstrip("\n"), source, *lineno
)
- nodelist.append(snode)
- return nodelist
-
- def _add_doc(self, typ, sections):
- """Add documentation for a command/object/enum...
-
- We assume we're documenting the thing defined in self._cur_doc.
- typ is the type of thing being added ("Command", "Object", etc)
-
- sections is a list of nodes for sections to add to the definition.
- """
-
- doc = self._cur_doc
- snode = nodes.section(ids=[self._sphinx_directive.new_serialno()])
- snode += nodes.title('', '', *[nodes.literal(doc.symbol, doc.symbol),
- nodes.Text(' (' + typ + ')')])
- self._parse_text_into_node(doc.body.text, snode)
- for s in sections:
- if s is not None:
- snode += s
- self._add_node_to_current_heading(snode)
-
- def visit_enum_type(self, name, info, ifcond, features, members, prefix):
- doc = self._cur_doc
- self._add_doc('Enum',
- self._nodes_for_enum_values(doc)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_object_type(self, name, info, ifcond, features,
- base, members, branches):
- doc = self._cur_doc
- if base and base.is_implicit():
- base = None
- self._add_doc('Object',
- self._nodes_for_members(doc, 'Members', base, branches)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_alternate_type(self, name, info, ifcond, features,
- alternatives):
- doc = self._cur_doc
- self._add_doc('Alternate',
- self._nodes_for_members(doc, 'Members')
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_command(self, name, info, ifcond, features, arg_type,
- ret_type, gen, success_response, boxed, allow_oob,
- allow_preconfig, coroutine):
- doc = self._cur_doc
- self._add_doc('Command',
- self._nodes_for_arguments(doc, arg_type)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_event(self, name, info, ifcond, features, arg_type, boxed):
- doc = self._cur_doc
- self._add_doc('Event',
- self._nodes_for_arguments(doc, arg_type)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def symbol(self, doc, entity):
- """Add documentation for one symbol to the document tree
-
- This is the main entry point which causes us to add documentation
- nodes for a symbol (which could be a 'command', 'object', 'event',
- etc). We do this by calling 'visit' on the schema entity, which
- will then call back into one of our visit_* methods, depending
- on what kind of thing this symbol is.
- """
- self._cur_doc = doc
- entity.visit(self)
- self._cur_doc = None
-
- def _start_new_heading(self, heading, level):
- """Start a new heading at the specified heading level
+ else:
+ self._result.append("", source, *lineno)
+
+ def add_line(self, content: str, info: QAPISourceInfo) -> None:
+ # NB: We *require* an info object; this works out OK because we
+ # don't document built-in objects that don't have
+ # one. Everything else should.
+ self.add_line_raw(content, info.fname, info.line)
+
+ def add_lines(
+ self,
+ content: str,
+ info: QAPISourceInfo,
+ ) -> None:
+ lines = content.splitlines(True)
+ for i, line in enumerate(lines):
+ self.add_line_raw(line, info.fname, info.line + i)
+
+ def ensure_blank_line(self) -> None:
+ # Empty document -- no blank line required.
+ if not self._result:
+ return
+
+ # Last line isn't blank, add one.
+ if self._result[-1].strip(): # pylint: disable=no-member
+ fname, line = self._result.info(-1)
+ assert isinstance(line, int)
+ # New blank line is credited to one-after the current last line.
+ # +2: correct for zero/one index, then increment by one.
+ self.add_line_raw("", fname, line + 2)
+
+ def add_field(
+ self,
+ kind: str,
+ name: str,
+ body: str,
+ info: QAPISourceInfo,
+ typ: Optional[str] = None,
+ ) -> None:
+ if typ:
+ text = f":{kind} {typ} {name}: {body}"
+ else:
+ text = f":{kind} {name}: {body}"
+ self.add_lines(text, info)
+
+ def format_type(
+ self, ent: Union[QAPISchemaDefinition | QAPISchemaMember]
+ ) -> Optional[str]:
+ if isinstance(ent, (QAPISchemaEnumMember, QAPISchemaFeature)):
+ return None
+
+ qapi_type = ent
+ optional = False
+ if isinstance(ent, QAPISchemaObjectTypeMember):
+ qapi_type = ent.type
+ optional = ent.optional
+
+ if isinstance(qapi_type, QAPISchemaArrayType):
+ ret = f"[{qapi_type.element_type.doc_type()}]"
+ else:
+ assert isinstance(qapi_type, QAPISchemaType)
+ tmp = qapi_type.doc_type()
+ assert tmp
+ ret = tmp
+ if optional:
+ ret += "?"
+
+ return ret
+
+ def generate_field(
+ self,
+ kind: str,
+ member: QAPISchemaMember,
+ body: str,
+ info: QAPISourceInfo,
+ ) -> None:
+ typ = self.format_type(member)
+ self.add_field(kind, member.name, body, info, typ)
+
+ # Transmogrification helpers
+
+ def visit_paragraph(self, section: QAPIDoc.Section) -> None:
+ # Squelch empty paragraphs.
+ if not section.text:
+ return
+
+ self.ensure_blank_line()
+ self.add_lines(section.text, section.info)
+ self.ensure_blank_line()
+
+ def visit_member(self, section: QAPIDoc.ArgSection) -> None:
+ # FIXME: ifcond for members
+ # TODO: features for members (documented at entity-level,
+ # but sometimes defined per-member. Should we add such
+ # information to member descriptions when we can?)
+ assert section.member
+ self.generate_field(
+ self.member_field_type,
+ section.member,
+ # TODO drop fallbacks when undocumented members are outlawed
+ section.text if section.text else "Not documented",
+ section.info,
+ )
- Create a new section whose title is 'heading' and which is placed
- in the docutils node tree as a child of the most recent level-1
- heading. Subsequent document sections (commands, freeform doc chunks,
- etc) will be placed as children of this new heading section.
+ def visit_feature(self, section: QAPIDoc.ArgSection) -> None:
+ # FIXME - ifcond for features is not handled at all yet!
+ # Proposal: decorate the right-hand column with some graphical
+ # element to indicate conditional availability?
+ assert section.text # Guaranteed by parser.py
+ assert section.member
+
+ self.generate_field("feat", section.member, section.text, section.info)
+
+ def visit_returns(self, section: QAPIDoc.Section) -> None:
+ assert isinstance(self.entity, QAPISchemaCommand)
+ rtype = self.entity.ret_type
+ # q_empty can produce None, but we won't be documenting anything
+ # without an explicit return statement in the doc block, and we
+ # should not have any such explicit statements when there is no
+ # return value.
+ assert rtype
+
+ typ = self.format_type(rtype)
+ assert typ
+ assert section.text
+ self.add_field("return", typ, section.text, section.info)
+
+ def visit_errors(self, section: QAPIDoc.Section) -> None:
+ # FIXME: the formatting for errors may be inconsistent and may
+ # or may not require different newline placement to ensure
+ # proper rendering as a nested list.
+ self.add_lines(f":error:\n{section.text}", section.info)
+
+ def preamble(self, ent: QAPISchemaDefinition) -> None:
"""
- if len(self._active_headings) < level:
- raise QAPISemError(self._cur_doc.info,
- 'Level %d subheading found outside a '
- 'level %d heading'
- % (level, level - 1))
- snode = self._make_section(heading)
- self._active_headings[level - 1] += snode
- self._active_headings = self._active_headings[:level]
- self._active_headings.append(snode)
- return snode
-
- def _add_node_to_current_heading(self, node):
- """Add the node to whatever the current active heading is"""
- self._active_headings[-1] += node
-
- def freeform(self, doc):
- """Add a piece of 'freeform' documentation to the document tree
-
- A 'freeform' document chunk doesn't relate to any particular
- symbol (for instance, it could be an introduction).
-
- If the freeform document starts with a line of the form
- '= Heading text', this is a section or subsection heading, with
- the heading level indicated by the number of '=' signs.
+ Generate option lines for QAPI entity directives.
"""
+ if ent.doc and ent.doc.since:
+ assert ent.doc.since.kind == QAPIDoc.Kind.SINCE
+ # Generated from the entity's docblock; info location is exact.
+ self.add_line(f":since: {ent.doc.since.text}", ent.doc.since.info)
+
+ if ent.ifcond.is_present():
+ doc = ent.ifcond.docgen()
+ assert ent.info
+ # Generated from entity definition; info location is approximate.
+ self.add_line(f":ifcond: {doc}", ent.info)
+
+ # Hoist special features such as :deprecated: and :unstable:
+ # into the options block for the entity. If, in the future, new
+ # special features are added, qapi-domain will chirp about
+ # unrecognized options and fail until they are handled in
+ # qapi-domain.
+ for feat in ent.features:
+ if feat.is_special():
+ # FIXME: handle ifcond if present. How to display that
+ # information is TBD.
+ # Generated from entity def; info location is approximate.
+ assert feat.info
+ self.add_line(f":{feat.name}:", feat.info)
+
+ self.ensure_blank_line()
+
+ def _insert_member_pointer(self, ent: QAPISchemaDefinition) -> None:
+
+ def _get_target(
+ ent: QAPISchemaDefinition,
+ ) -> Optional[QAPISchemaDefinition]:
+ if isinstance(ent, (QAPISchemaCommand, QAPISchemaEvent)):
+ return ent.arg_type
+ if isinstance(ent, QAPISchemaObjectType):
+ return ent.base
+ return None
+
+ target = _get_target(ent)
+ if target is not None and not target.is_implicit():
+ assert ent.info
+ self.add_field(
+ self.member_field_type,
+ "q_dummy",
+ f"The members of :qapi:type:`{target.name}`.",
+ ent.info,
+ "q_dummy",
+ )
- # QAPIDoc documentation says free-form documentation blocks
- # must have only a body section, nothing else.
- assert not doc.sections
- assert not doc.args
- assert not doc.features
- self._cur_doc = doc
-
- text = doc.body.text
- if re.match(r'=+ ', text):
- # Section/subsection heading (if present, will always be
- # the first line of the block)
- (heading, _, text) = text.partition('\n')
- (leader, _, heading) = heading.partition(' ')
- node = self._start_new_heading(heading, len(leader))
- if text == '':
- return
- else:
- node = nodes.container()
-
- self._parse_text_into_node(text, node)
- self._cur_doc = None
+ if isinstance(ent, QAPISchemaObjectType) and ent.branches is not None:
+ for variant in ent.branches.variants:
+ if variant.type.name == "q_empty":
+ continue
+ assert ent.info
+ self.add_field(
+ self.member_field_type,
+ "q_dummy",
+ f" When ``{ent.branches.tag_member.name}`` is "
+ f"``{variant.name}``: "
+ f"The members of :qapi:type:`{variant.type.name}`.",
+ ent.info,
+ "q_dummy",
+ )
+
+ def visit_sections(self, ent: QAPISchemaDefinition) -> None:
+ sections = ent.doc.all_sections if ent.doc else []
+
+ # Determine the index location at which we should generate
+ # documentation for "The members of ..." pointers. This should
+ # go at the end of the members section(s) if any. Note that
+ # index 0 is assumed to be a plain intro section, even if it is
+ # empty; and that a members section if present will always
+ # immediately follow the opening PLAIN section.
+ gen_index = 1
+ if len(sections) > 1:
+ while sections[gen_index].kind == QAPIDoc.Kind.MEMBER:
+ gen_index += 1
+ if gen_index >= len(sections):
+ break
+
+ # Add sections in source order:
+ for i, section in enumerate(sections):
+ # @var is translated to ``var``:
+ section.text = re.sub(r"@([\w-]+)", r"``\1``", section.text)
+
+ if section.kind == QAPIDoc.Kind.PLAIN:
+ self.visit_paragraph(section)
+ elif section.kind == QAPIDoc.Kind.MEMBER:
+ assert isinstance(section, QAPIDoc.ArgSection)
+ self.visit_member(section)
+ elif section.kind == QAPIDoc.Kind.FEATURE:
+ assert isinstance(section, QAPIDoc.ArgSection)
+ self.visit_feature(section)
+ elif section.kind in (QAPIDoc.Kind.SINCE, QAPIDoc.Kind.TODO):
+ # Since is handled in preamble, TODO is skipped intentionally.
+ pass
+ elif section.kind == QAPIDoc.Kind.RETURNS:
+ self.visit_returns(section)
+ elif section.kind == QAPIDoc.Kind.ERRORS:
+ self.visit_errors(section)
+ else:
+ assert False
+
+ # Generate "The members of ..." entries if necessary:
+ if i == gen_index - 1:
+ self._insert_member_pointer(ent)
+
+ self.ensure_blank_line()
+
+ # Transmogrification core methods
+
+ def visit_module(self, path: str) -> None:
+ name = Path(path).stem
+ # module directives are credited to the first line of a module file.
+ self.add_line_raw(f".. qapi:module:: {name}", path, 1)
+ self.ensure_blank_line()
+
+ def visit_freeform(self, doc: QAPIDoc) -> None:
+ # TODO: Once the old qapidoc transformer is deprecated, freeform
+ # sections can be updated to pure rST, and this transformed removed.
+ #
+ # For now, translate our micro-format into rST. Code adapted
+ # from Peter Maydell's freeform().
+
+ assert len(doc.all_sections) == 1, doc.all_sections
+ body = doc.all_sections[0]
+ text = body.text
+ info = doc.info
+
+ if re.match(r"=+ ", text):
+ # Section/subsection heading (if present, will always be the
+ # first line of the block)
+ (heading, _, text) = text.partition("\n")
+ (leader, _, heading) = heading.partition(" ")
+ # Implicit +1 for heading in the containing .rst doc
+ level = len(leader) + 1
+
+ # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections
+ markers = ' #*=_^"'
+ overline = level <= 2
+ marker = markers[level]
+
+ self.ensure_blank_line()
+ # This credits all 2 or 3 lines to the single source line.
+ if overline:
+ self.add_line(marker * len(heading), info)
+ self.add_line(heading, info)
+ self.add_line(marker * len(heading), info)
+ self.ensure_blank_line()
+
+ # Eat blank line(s) and advance info
+ trimmed = text.lstrip("\n")
+ text = trimmed
+ info = info.next_line(len(text) - len(trimmed) + 1)
+
+ self.add_lines(text, info)
+ self.ensure_blank_line()
+
+ def visit_entity(self, ent: QAPISchemaDefinition) -> None:
+ assert ent.info
- def _parse_text_into_node(self, doctext, node):
- """Parse a chunk of QAPI-doc-format text into the node
+ try:
+ self._curr_ent = ent
- The doc comment can contain most inline rST markup, including
- bulleted and enumerated lists.
- As an extra permitted piece of markup, @var will be turned
- into ``var``.
- """
+ # Squish structs and unions together into an "object" directive.
+ meta = ent.meta
+ if meta in ("struct", "union"):
+ meta = "object"
- # Handle the "@var means ``var`` case
- doctext = re.sub(r'@([\w-]+)', r'``\1``', doctext)
-
- rstlist = ViewList()
- for line in doctext.splitlines():
- # The reported line number will always be that of the start line
- # of the doc comment, rather than the actual location of the error.
- # Being more precise would require overhaul of the QAPIDoc class
- # to track lines more exactly within all the sub-parts of the doc
- # comment, as well as counting lines here.
- rstlist.append(line, self._cur_doc.info.fname,
- self._cur_doc.info.line)
- # Append a blank line -- in some cases rST syntax errors get
- # attributed to the line after one with actual text, and if there
- # isn't anything in the ViewList corresponding to that then Sphinx
- # 1.6's AutodocReporter will then misidentify the source/line location
- # in the error message (usually attributing it to the top-level
- # .rst file rather than the offending .json file). The extra blank
- # line won't affect the rendered output.
- rstlist.append("", self._cur_doc.info.fname, self._cur_doc.info.line)
- self._sphinx_directive.do_parse(rstlist, node)
-
- def get_document_nodes(self):
- """Return the list of docutils nodes which make up the document"""
- return self._top_node.children
-
-
-# Turn the black formatter on for the rest of the file.
-# fmt: on
+ # This line gets credited to the start of the /definition/.
+ self.add_line(f".. qapi:{meta}:: {ent.name}", ent.info)
+ with self.indented():
+ self.preamble(ent)
+ self.visit_sections(ent)
+ finally:
+ self._curr_ent = None
class QAPISchemaGenDepVisitor(QAPISchemaVisitor):
@@ -468,22 +460,22 @@ class QAPISchemaGenDepVisitor(QAPISchemaVisitor):
schema file associated with each module in the QAPI input.
"""
- def __init__(self, env, qapidir):
+ def __init__(self, env: Any, qapidir: str) -> None:
self._env = env
self._qapidir = qapidir
- def visit_module(self, name):
+ def visit_module(self, name: str) -> None:
if name != "./builtin":
qapifile = self._qapidir + "/" + name
self._env.note_dependency(os.path.abspath(qapifile))
super().visit_module(name)
-class NestedDirective(Directive):
- def run(self):
+class NestedDirective(SphinxDirective):
+ def run(self) -> Sequence[nodes.Node]:
raise NotImplementedError
- def do_parse(self, rstlist, node):
+ def do_parse(self, rstlist: StringList, node: nodes.Node) -> None:
"""
Parse rST source lines and add them to the specified node
@@ -502,18 +494,104 @@ class QAPIDocDirective(NestedDirective):
required_argument = 1
optional_arguments = 1
- option_spec = {"qapifile": directives.unchanged_required}
+ option_spec = {
+ "qapifile": directives.unchanged_required,
+ "transmogrify": directives.flag,
+ }
has_content = False
- def new_serialno(self):
+ def new_serialno(self) -> str:
"""Return a unique new ID string suitable for use as a node's ID"""
env = self.state.document.settings.env
return "qapidoc-%d" % env.new_serialno("qapidoc")
- def run(self):
+ def transmogrify(self, schema: QAPISchema) -> nodes.Element:
+ logger.info("Transmogrifying QAPI to rST ...")
+ vis = Transmogrifier()
+ modules = set()
+
+ for doc in schema.docs:
+ module_source = doc.info.fname
+ if module_source not in modules:
+ vis.visit_module(module_source)
+ modules.add(module_source)
+
+ if doc.symbol:
+ ent = schema.lookup_entity(doc.symbol)
+ assert isinstance(ent, QAPISchemaDefinition)
+ vis.visit_entity(ent)
+ else:
+ vis.visit_freeform(doc)
+
+ logger.info("Transmogrification complete.")
+
+ contentnode = nodes.section()
+ content = vis.result
+ titles_allowed = True
+
+ logger.info("Transmogrifier running nested parse ...")
+ with switch_source_input(self.state, content):
+ if titles_allowed:
+ node: nodes.Element = nodes.section()
+ node.document = self.state.document
+ nested_parse_with_titles(self.state, content, contentnode)
+ else:
+ node = nodes.paragraph()
+ node.document = self.state.document
+ self.state.nested_parse(content, 0, contentnode)
+ logger.info("Transmogrifier's nested parse completed.")
+
+ if self.env.app.verbosity >= 2 or os.environ.get("DEBUG"):
+ argname = "_".join(Path(self.arguments[0]).parts)
+ name = Path(argname).stem + ".ir"
+ self.write_intermediate(content, name)
+
+ sys.stdout.flush()
+ return contentnode
+
+ def write_intermediate(self, content: StringList, filename: str) -> None:
+ logger.info(
+ "writing intermediate rST for '%s' to '%s'",
+ self.arguments[0],
+ filename,
+ )
+
+ srctree = Path(self.env.app.config.qapidoc_srctree).resolve()
+ outlines = []
+ lcol_width = 0
+
+ for i, line in enumerate(content):
+ src, lineno = content.info(i)
+ srcpath = Path(src).resolve()
+ srcpath = srcpath.relative_to(srctree)
+
+ lcol = f"{srcpath}:{lineno:04d}"
+ lcol_width = max(lcol_width, len(lcol))
+ outlines.append((lcol, line))
+
+ with open(filename, "w", encoding="UTF-8") as outfile:
+ for lcol, rcol in outlines:
+ outfile.write(lcol.rjust(lcol_width))
+ outfile.write(" |")
+ if rcol:
+ outfile.write(f" {rcol}")
+ outfile.write("\n")
+
+ def legacy(self, schema: QAPISchema) -> nodes.Element:
+ vis = QAPISchemaGenRSTVisitor(self)
+ vis.visit_begin(schema)
+ for doc in schema.docs:
+ if doc.symbol:
+ vis.symbol(doc, schema.lookup_entity(doc.symbol))
+ else:
+ vis.freeform(doc)
+ return vis.get_document_node() # type: ignore
+
+ def run(self) -> Sequence[nodes.Node]:
env = self.state.document.settings.env
qapifile = env.config.qapidoc_srctree + "/" + self.arguments[0]
qapidir = os.path.dirname(qapifile)
+ transmogrify = "transmogrify" in self.options
try:
schema = QAPISchema(qapifile)
@@ -521,20 +599,18 @@ class QAPIDocDirective(NestedDirective):
# First tell Sphinx about all the schema files that the
# output documentation depends on (including 'qapifile' itself)
schema.visit(QAPISchemaGenDepVisitor(env, qapidir))
-
- vis = QAPISchemaGenRSTVisitor(self)
- vis.visit_begin(schema)
- for doc in schema.docs:
- if doc.symbol:
- vis.symbol(doc, schema.lookup_entity(doc.symbol))
- else:
- vis.freeform(doc)
- return vis.get_document_nodes()
except QAPIError as err:
# Launder QAPI parse errors into Sphinx extension errors
# so they are displayed nicely to the user
raise ExtensionError(str(err)) from err
+ if transmogrify:
+ contentnode = self.transmogrify(schema)
+ else:
+ contentnode = self.legacy(schema)
+
+ return contentnode.children
+
class QMPExample(CodeBlock, NestedDirective):
"""
@@ -585,7 +661,7 @@ class QMPExample(CodeBlock, NestedDirective):
)
return node
- def admonition_wrap(self, *content) -> List[nodes.Node]:
+ def admonition_wrap(self, *content: nodes.Node) -> List[nodes.Node]:
title = "Example:"
if "title" in self.options:
title = f"{title} {self.options['title']}"
@@ -631,8 +707,9 @@ class QMPExample(CodeBlock, NestedDirective):
return self.admonition_wrap(*content_nodes)
-def setup(app):
+def setup(app: Sphinx) -> ExtensionMetadata:
"""Register qapi-doc directive with Sphinx"""
+ app.setup_extension("qapi_domain")
app.add_config_value("qapidoc_srctree", None, "env")
app.add_directive("qapi-doc", QAPIDocDirective)
app.add_directive("qmp-example", QMPExample)
diff --git a/docs/sphinx/qapidoc_legacy.py b/docs/sphinx/qapidoc_legacy.py
new file mode 100644
index 0000000..13520f4
--- /dev/null
+++ b/docs/sphinx/qapidoc_legacy.py
@@ -0,0 +1,440 @@
+# coding=utf-8
+# type: ignore
+#
+# QEMU qapidoc QAPI file parsing extension
+#
+# Copyright (c) 2020 Linaro
+#
+# This work is licensed under the terms of the GNU GPLv2 or later.
+# See the COPYING file in the top-level directory.
+
+"""
+qapidoc is a Sphinx extension that implements the qapi-doc directive
+
+The purpose of this extension is to read the documentation comments
+in QAPI schema files, and insert them all into the current document.
+
+It implements one new rST directive, "qapi-doc::".
+Each qapi-doc:: directive takes one argument, which is the
+pathname of the schema file to process, relative to the source tree.
+
+The docs/conf.py file must set the qapidoc_srctree config value to
+the root of the QEMU source tree.
+
+The Sphinx documentation on writing extensions is at:
+https://www.sphinx-doc.org/en/master/development/index.html
+"""
+
+import re
+import textwrap
+
+from docutils import nodes
+from docutils.statemachine import ViewList
+from qapi.error import QAPISemError
+from qapi.gen import QAPISchemaVisitor
+from qapi.parser import QAPIDoc
+
+
+def dedent(text: str) -> str:
+ # Adjust indentation to make description text parse as paragraph.
+
+ lines = text.splitlines(True)
+ if re.match(r"\s+", lines[0]):
+ # First line is indented; description started on the line after
+ # the name. dedent the whole block.
+ return textwrap.dedent(text)
+
+ # Descr started on same line. Dedent line 2+.
+ return lines[0] + textwrap.dedent("".join(lines[1:]))
+
+
+class QAPISchemaGenRSTVisitor(QAPISchemaVisitor):
+ """A QAPI schema visitor which generates docutils/Sphinx nodes
+
+ This class builds up a tree of docutils/Sphinx nodes corresponding
+ to documentation for the various QAPI objects. To use it, first
+ create a QAPISchemaGenRSTVisitor object, and call its
+ visit_begin() method. Then you can call one of the two methods
+ 'freeform' (to add documentation for a freeform documentation
+ chunk) or 'symbol' (to add documentation for a QAPI symbol). These
+ will cause the visitor to build up the tree of document
+ nodes. Once you've added all the documentation via 'freeform' and
+ 'symbol' method calls, you can call 'get_document_nodes' to get
+ the final list of document nodes (in a form suitable for returning
+ from a Sphinx directive's 'run' method).
+ """
+ def __init__(self, sphinx_directive):
+ self._cur_doc = None
+ self._sphinx_directive = sphinx_directive
+ self._top_node = nodes.section()
+ self._active_headings = [self._top_node]
+
+ def _make_dlitem(self, term, defn):
+ """Return a dlitem node with the specified term and definition.
+
+ term should be a list of Text and literal nodes.
+ defn should be one of:
+ - a string, which will be handed to _parse_text_into_node
+ - a list of Text and literal nodes, which will be put into
+ a paragraph node
+ """
+ dlitem = nodes.definition_list_item()
+ dlterm = nodes.term('', '', *term)
+ dlitem += dlterm
+ if defn:
+ dldef = nodes.definition()
+ if isinstance(defn, list):
+ dldef += nodes.paragraph('', '', *defn)
+ else:
+ self._parse_text_into_node(defn, dldef)
+ dlitem += dldef
+ return dlitem
+
+ def _make_section(self, title):
+ """Return a section node with optional title"""
+ section = nodes.section(ids=[self._sphinx_directive.new_serialno()])
+ if title:
+ section += nodes.title(title, title)
+ return section
+
+ def _nodes_for_ifcond(self, ifcond, with_if=True):
+ """Return list of Text, literal nodes for the ifcond
+
+ Return a list which gives text like ' (If: condition)'.
+ If with_if is False, we don't return the "(If: " and ")".
+ """
+
+ doc = ifcond.docgen()
+ if not doc:
+ return []
+ doc = nodes.literal('', doc)
+ if not with_if:
+ return [doc]
+
+ nodelist = [nodes.Text(' ('), nodes.strong('', 'If: ')]
+ nodelist.append(doc)
+ nodelist.append(nodes.Text(')'))
+ return nodelist
+
+ def _nodes_for_one_member(self, member):
+ """Return list of Text, literal nodes for this member
+
+ Return a list of doctree nodes which give text like
+ 'name: type (optional) (If: ...)' suitable for use as the
+ 'term' part of a definition list item.
+ """
+ term = [nodes.literal('', member.name)]
+ if member.type.doc_type():
+ term.append(nodes.Text(': '))
+ term.append(nodes.literal('', member.type.doc_type()))
+ if member.optional:
+ term.append(nodes.Text(' (optional)'))
+ if member.ifcond.is_present():
+ term.extend(self._nodes_for_ifcond(member.ifcond))
+ return term
+
+ def _nodes_for_variant_when(self, branches, variant):
+ """Return list of Text, literal nodes for variant 'when' clause
+
+ Return a list of doctree nodes which give text like
+ 'when tagname is variant (If: ...)' suitable for use in
+ the 'branches' part of a definition list.
+ """
+ term = [nodes.Text(' when '),
+ nodes.literal('', branches.tag_member.name),
+ nodes.Text(' is '),
+ nodes.literal('', '"%s"' % variant.name)]
+ if variant.ifcond.is_present():
+ term.extend(self._nodes_for_ifcond(variant.ifcond))
+ return term
+
+ def _nodes_for_members(self, doc, what, base=None, branches=None):
+ """Return list of doctree nodes for the table of members"""
+ dlnode = nodes.definition_list()
+ for section in doc.args.values():
+ term = self._nodes_for_one_member(section.member)
+ # TODO drop fallbacks when undocumented members are outlawed
+ if section.text:
+ defn = dedent(section.text)
+ else:
+ defn = [nodes.Text('Not documented')]
+
+ dlnode += self._make_dlitem(term, defn)
+
+ if base:
+ dlnode += self._make_dlitem([nodes.Text('The members of '),
+ nodes.literal('', base.doc_type())],
+ None)
+
+ if branches:
+ for v in branches.variants:
+ if v.type.name == 'q_empty':
+ continue
+ assert not v.type.is_implicit()
+ term = [nodes.Text('The members of '),
+ nodes.literal('', v.type.doc_type())]
+ term.extend(self._nodes_for_variant_when(branches, v))
+ dlnode += self._make_dlitem(term, None)
+
+ if not dlnode.children:
+ return []
+
+ section = self._make_section(what)
+ section += dlnode
+ return [section]
+
+ def _nodes_for_enum_values(self, doc):
+ """Return list of doctree nodes for the table of enum values"""
+ seen_item = False
+ dlnode = nodes.definition_list()
+ for section in doc.args.values():
+ termtext = [nodes.literal('', section.member.name)]
+ if section.member.ifcond.is_present():
+ termtext.extend(self._nodes_for_ifcond(section.member.ifcond))
+ # TODO drop fallbacks when undocumented members are outlawed
+ if section.text:
+ defn = dedent(section.text)
+ else:
+ defn = [nodes.Text('Not documented')]
+
+ dlnode += self._make_dlitem(termtext, defn)
+ seen_item = True
+
+ if not seen_item:
+ return []
+
+ section = self._make_section('Values')
+ section += dlnode
+ return [section]
+
+ def _nodes_for_arguments(self, doc, arg_type):
+ """Return list of doctree nodes for the arguments section"""
+ if arg_type and not arg_type.is_implicit():
+ assert not doc.args
+ section = self._make_section('Arguments')
+ dlnode = nodes.definition_list()
+ dlnode += self._make_dlitem(
+ [nodes.Text('The members of '),
+ nodes.literal('', arg_type.name)],
+ None)
+ section += dlnode
+ return [section]
+
+ return self._nodes_for_members(doc, 'Arguments')
+
+ def _nodes_for_features(self, doc):
+ """Return list of doctree nodes for the table of features"""
+ seen_item = False
+ dlnode = nodes.definition_list()
+ for section in doc.features.values():
+ dlnode += self._make_dlitem(
+ [nodes.literal('', section.member.name)], dedent(section.text))
+ seen_item = True
+
+ if not seen_item:
+ return []
+
+ section = self._make_section('Features')
+ section += dlnode
+ return [section]
+
+ def _nodes_for_sections(self, doc):
+ """Return list of doctree nodes for additional sections"""
+ nodelist = []
+ for section in doc.sections:
+ if section.kind == QAPIDoc.Kind.TODO:
+ # Hide TODO: sections
+ continue
+
+ if section.kind == QAPIDoc.Kind.PLAIN:
+ # Sphinx cannot handle sectionless titles;
+ # Instead, just append the results to the prior section.
+ container = nodes.container()
+ self._parse_text_into_node(section.text, container)
+ nodelist += container.children
+ continue
+
+ snode = self._make_section(section.kind.name.title())
+ self._parse_text_into_node(dedent(section.text), snode)
+ nodelist.append(snode)
+ return nodelist
+
+ def _nodes_for_if_section(self, ifcond):
+ """Return list of doctree nodes for the "If" section"""
+ nodelist = []
+ if ifcond.is_present():
+ snode = self._make_section('If')
+ snode += nodes.paragraph(
+ '', '', *self._nodes_for_ifcond(ifcond, with_if=False)
+ )
+ nodelist.append(snode)
+ return nodelist
+
+ def _add_doc(self, typ, sections):
+ """Add documentation for a command/object/enum...
+
+ We assume we're documenting the thing defined in self._cur_doc.
+ typ is the type of thing being added ("Command", "Object", etc)
+
+ sections is a list of nodes for sections to add to the definition.
+ """
+
+ doc = self._cur_doc
+ snode = nodes.section(ids=[self._sphinx_directive.new_serialno()])
+ snode += nodes.title('', '', *[nodes.literal(doc.symbol, doc.symbol),
+ nodes.Text(' (' + typ + ')')])
+ self._parse_text_into_node(doc.body.text, snode)
+ for s in sections:
+ if s is not None:
+ snode += s
+ self._add_node_to_current_heading(snode)
+
+ def visit_enum_type(self, name, info, ifcond, features, members, prefix):
+ doc = self._cur_doc
+ self._add_doc('Enum',
+ self._nodes_for_enum_values(doc)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_object_type(self, name, info, ifcond, features,
+ base, members, branches):
+ doc = self._cur_doc
+ if base and base.is_implicit():
+ base = None
+ self._add_doc('Object',
+ self._nodes_for_members(doc, 'Members', base, branches)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_alternate_type(self, name, info, ifcond, features,
+ alternatives):
+ doc = self._cur_doc
+ self._add_doc('Alternate',
+ self._nodes_for_members(doc, 'Members')
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_command(self, name, info, ifcond, features, arg_type,
+ ret_type, gen, success_response, boxed, allow_oob,
+ allow_preconfig, coroutine):
+ doc = self._cur_doc
+ self._add_doc('Command',
+ self._nodes_for_arguments(doc, arg_type)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_event(self, name, info, ifcond, features, arg_type, boxed):
+ doc = self._cur_doc
+ self._add_doc('Event',
+ self._nodes_for_arguments(doc, arg_type)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def symbol(self, doc, entity):
+ """Add documentation for one symbol to the document tree
+
+ This is the main entry point which causes us to add documentation
+ nodes for a symbol (which could be a 'command', 'object', 'event',
+ etc). We do this by calling 'visit' on the schema entity, which
+ will then call back into one of our visit_* methods, depending
+ on what kind of thing this symbol is.
+ """
+ self._cur_doc = doc
+ entity.visit(self)
+ self._cur_doc = None
+
+ def _start_new_heading(self, heading, level):
+ """Start a new heading at the specified heading level
+
+ Create a new section whose title is 'heading' and which is placed
+ in the docutils node tree as a child of the most recent level-1
+ heading. Subsequent document sections (commands, freeform doc chunks,
+ etc) will be placed as children of this new heading section.
+ """
+ if len(self._active_headings) < level:
+ raise QAPISemError(self._cur_doc.info,
+ 'Level %d subheading found outside a '
+ 'level %d heading'
+ % (level, level - 1))
+ snode = self._make_section(heading)
+ self._active_headings[level - 1] += snode
+ self._active_headings = self._active_headings[:level]
+ self._active_headings.append(snode)
+ return snode
+
+ def _add_node_to_current_heading(self, node):
+ """Add the node to whatever the current active heading is"""
+ self._active_headings[-1] += node
+
+ def freeform(self, doc):
+ """Add a piece of 'freeform' documentation to the document tree
+
+ A 'freeform' document chunk doesn't relate to any particular
+ symbol (for instance, it could be an introduction).
+
+ If the freeform document starts with a line of the form
+ '= Heading text', this is a section or subsection heading, with
+ the heading level indicated by the number of '=' signs.
+ """
+
+ # QAPIDoc documentation says free-form documentation blocks
+ # must have only a body section, nothing else.
+ assert not doc.sections
+ assert not doc.args
+ assert not doc.features
+ self._cur_doc = doc
+
+ text = doc.body.text
+ if re.match(r'=+ ', text):
+ # Section/subsection heading (if present, will always be
+ # the first line of the block)
+ (heading, _, text) = text.partition('\n')
+ (leader, _, heading) = heading.partition(' ')
+ node = self._start_new_heading(heading, len(leader))
+ if text == '':
+ return
+ else:
+ node = nodes.container()
+
+ self._parse_text_into_node(text, node)
+ self._cur_doc = None
+
+ def _parse_text_into_node(self, doctext, node):
+ """Parse a chunk of QAPI-doc-format text into the node
+
+ The doc comment can contain most inline rST markup, including
+ bulleted and enumerated lists.
+ As an extra permitted piece of markup, @var will be turned
+ into ``var``.
+ """
+
+ # Handle the "@var means ``var`` case
+ doctext = re.sub(r'@([\w-]+)', r'``\1``', doctext)
+
+ rstlist = ViewList()
+ for line in doctext.splitlines():
+ # The reported line number will always be that of the start line
+ # of the doc comment, rather than the actual location of the error.
+ # Being more precise would require overhaul of the QAPIDoc class
+ # to track lines more exactly within all the sub-parts of the doc
+ # comment, as well as counting lines here.
+ rstlist.append(line, self._cur_doc.info.fname,
+ self._cur_doc.info.line)
+ # Append a blank line -- in some cases rST syntax errors get
+ # attributed to the line after one with actual text, and if there
+ # isn't anything in the ViewList corresponding to that then Sphinx
+ # 1.6's AutodocReporter will then misidentify the source/line location
+ # in the error message (usually attributing it to the top-level
+ # .rst file rather than the offending .json file). The extra blank
+ # line won't affect the rendered output.
+ rstlist.append("", self._cur_doc.info.fname, self._cur_doc.info.line)
+ self._sphinx_directive.do_parse(rstlist, node)
+
+ def get_document_node(self):
+ """Return the root docutils node which makes up the document"""
+ return self._top_node
diff --git a/docs/system/ppc/amigang.rst b/docs/system/ppc/amigang.rst
index e2c9cb7..21bb14e 100644
--- a/docs/system/ppc/amigang.rst
+++ b/docs/system/ppc/amigang.rst
@@ -21,6 +21,7 @@ Emulated devices
* VIA VT82C686B south bridge
* PCI VGA compatible card (guests may need other card instead)
* PS/2 keyboard and mouse
+ * 4 KiB NVRAM (use ``-drive if=mtd,format=raw,file=nvram.bin`` to keep contents persistent)
Firmware
--------
@@ -54,14 +55,14 @@ To boot the system run:
-cdrom "A1 Linux Net Installer.iso" \
-device ati-vga,model=rv100,romfile=VGABIOS-lgpl-latest.bin
-From the firmware menu that appears select ``Boot sequence`` →
-``Amiga Multiboot Options`` and set ``Boot device 1`` to
-``Onboard VIA IDE CDROM``. Then hit escape until the main screen appears again,
-hit escape once more and from the exit menu that appears select either
-``Save settings and exit`` or ``Use settings for this session only``. It may
-take a long time loading the kernel into memory but eventually it boots and the
-installer becomes visible. The ``ati-vga`` RV100 emulation is not
-complete yet so only frame buffer works, DRM and 3D is not available.
+If a firmware menu appears, select ``Boot sequence`` → ``Amiga Multiboot Options``
+and set ``Boot device 1`` to ``Onboard VIA IDE CDROM``. Then hit escape until
+the main screen appears again, hit escape once more and from the exit menu that
+appears select either ``Save settings and exit`` or ``Use settings for this
+session only``. It may take a long time loading the kernel into memory but
+eventually it boots and the installer becomes visible. The ``ati-vga`` RV100
+emulation is not complete yet so only frame buffer works, DRM and 3D is not
+available.
Genesi/bPlan Pegasos II (``pegasos2``)
======================================
diff --git a/docs/system/ppc/embedded.rst b/docs/system/ppc/embedded.rst
index af3b3d9..5cb7d98 100644
--- a/docs/system/ppc/embedded.rst
+++ b/docs/system/ppc/embedded.rst
@@ -4,6 +4,5 @@ Embedded family boards
- ``bamboo`` bamboo
- ``mpc8544ds`` mpc8544ds
- ``ppce500`` generic paravirt e500 platform
-- ``ref405ep`` ref405ep
- ``sam460ex`` aCube Sam460ex
- ``virtex-ml507`` Xilinx Virtex ML507 reference design
diff --git a/docs/system/ppc/powernv.rst b/docs/system/ppc/powernv.rst
index de7a807..f3ec2cc 100644
--- a/docs/system/ppc/powernv.rst
+++ b/docs/system/ppc/powernv.rst
@@ -195,6 +195,13 @@ Use a MTD drive to add a PNOR to the machine, and get a NVRAM :
-drive file=./witherspoon.pnor,format=raw,if=mtd
+If no mtd drive is provided, the powernv platform will create a default
+PNOR device using a tiny formatted PNOR in pc-bios/pnv-pnor.bin opened
+read-only (PNOR changes will be persistent across reboots but not across
+invocations of QEMU). If no defaults are used, an erased 128MB PNOR is
+provided (which skiboot will probably not recognize since it is not
+formatted).
+
Maintainer contact information
------------------------------
diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
index b755ddf..ccbe95a 100644
--- a/hw/intc/pnv_xive.c
+++ b/hw/intc/pnv_xive.c
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC XIVE interrupt controller model
*
- * Copyright (c) 2017-2019, IBM Corporation.
+ * Copyright (c) 2017-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -473,7 +472,7 @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
PnvXive *xive = PNV_XIVE(xptr);
@@ -500,7 +499,8 @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
* Check the thread context CAM lines and record matches.
*/
ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
- nvt_idx, cam_ignore, logic_serv);
+ nvt_idx, cam_ignore,
+ logic_serv);
/*
* Save the context and follow on to catch duplicates, that we
* don't support yet.
diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c
index 9ed7594..0b81dad 100644
--- a/hw/intc/pnv_xive2.c
+++ b/hw/intc/pnv_xive2.c
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation.
+ * Copyright (c) 2019-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -625,7 +624,7 @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
PnvXive2 *xive = PNV_XIVE2(xptr);
@@ -656,25 +655,38 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
logic_serv);
} else {
ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
- nvt_idx, cam_ignore,
- logic_serv);
+ nvt_idx, crowd, cam_ignore,
+ logic_serv);
}
- /*
- * Save the context and follow on to catch duplicates,
- * that we don't support yet.
- */
if (ring != -1) {
- if (match->tctx) {
+ /*
+ * For VP-specific match, finding more than one is a
+ * problem. For group notification, it's possible.
+ */
+ if (!cam_ignore && match->tctx) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
"thread context NVT %x/%x\n",
nvt_blk, nvt_idx);
- return false;
+ /* Should set a FIR if we ever model it */
+ return -1;
+ }
+ /*
+ * For a group notification, we need to know if the
+ * match is precluded first by checking the current
+ * thread priority. If the interrupt can be delivered,
+ * we always notify the first match (for now).
+ */
+ if (cam_ignore &&
+ xive2_tm_irq_precluded(tctx, ring, priority)) {
+ match->precluded = true;
+ } else {
+ if (!match->tctx) {
+ match->ring = ring;
+ match->tctx = tctx;
+ }
+ count++;
}
-
- match->ring = ring;
- match->tctx = tctx;
- count++;
}
}
}
@@ -693,6 +705,47 @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
return cfg;
}
+static int pnv_xive2_broadcast(XivePresenter *xptr,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool ignore, uint8_t priority)
+{
+ PnvXive2 *xive = PNV_XIVE2(xptr);
+ PnvChip *chip = xive->chip;
+ int i, j;
+ bool gen1_tima_os =
+ xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
+
+ for (i = 0; i < chip->nr_cores; i++) {
+ PnvCore *pc = chip->cores[i];
+ CPUCore *cc = CPU_CORE(pc);
+
+ for (j = 0; j < cc->nr_threads; j++) {
+ PowerPCCPU *cpu = pc->threads[j];
+ XiveTCTX *tctx;
+ int ring;
+
+ if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
+ continue;
+ }
+
+ tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+
+ if (gen1_tima_os) {
+ ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
+ nvt_idx, ignore, 0);
+ } else {
+ ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
+ nvt_idx, crowd, ignore, 0);
+ }
+
+ if (ring != -1) {
+ xive2_tm_set_lsmfb(tctx, ring, priority);
+ }
+ }
+ }
+ return 0;
+}
+
static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
{
return pnv_xive2_block_id(PNV_XIVE2(xrtr));
@@ -2149,21 +2202,40 @@ static const MemoryRegionOps pnv_xive2_tm_ops = {
},
};
-static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
+static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvpg_shift;
+ uint16_t op = addr & 0xFFF;
+ uint8_t blk = pnv_xive2_block_id(xive);
- xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
- return -1;
+ if (size != 2) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n",
+ size);
+ return -1;
+ }
+
+ return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1);
}
-static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
+static void pnv_xive2_nvc_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvc_shift;
+ uint16_t op = addr & 0xFFF;
+ uint8_t blk = pnv_xive2_block_id(xive);
- xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
+ if (size != 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n",
+ size);
+ return;
+ }
+
+ (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val);
}
static const MemoryRegionOps pnv_xive2_nvc_ops = {
@@ -2171,30 +2243,63 @@ static const MemoryRegionOps pnv_xive2_nvc_ops = {
.write = pnv_xive2_nvc_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
.impl = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
};
-static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
+static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvpg_shift;
+ uint16_t op = addr & 0xFFF;
+ uint32_t index = page >> 1;
+ uint8_t blk = pnv_xive2_block_id(xive);
- xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
- return -1;
+ if (size != 2) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n",
+ size);
+ return -1;
+ }
+
+ if (page % 2) {
+ /* odd page - NVG */
+ return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1);
+ } else {
+ /* even page - NVP */
+ return xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
+ }
}
-static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
+static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvpg_shift;
+ uint16_t op = addr & 0xFFF;
+ uint32_t index = page >> 1;
+ uint8_t blk = pnv_xive2_block_id(xive);
- xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
+ if (size != 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n",
+ size);
+ return;
+ }
+
+ if (page % 2) {
+ /* odd page - NVG */
+ (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val);
+ } else {
+ /* even page - NVP */
+ (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
+ }
}
static const MemoryRegionOps pnv_xive2_nvpg_ops = {
@@ -2202,11 +2307,11 @@ static const MemoryRegionOps pnv_xive2_nvpg_ops = {
.write = pnv_xive2_nvpg_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
.impl = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
};
@@ -2432,6 +2537,7 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data)
xpc->match_nvt = pnv_xive2_match_nvt;
xpc->get_config = pnv_xive2_presenter_get_config;
+ xpc->broadcast = pnv_xive2_broadcast;
};
static const TypeInfo pnv_xive2_info = {
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index a764c0b..ce734b0 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC sPAPR XIVE interrupt controller model
*
- * Copyright (c) 2017-2018, IBM Corporation.
+ * Copyright (c) 2017-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -431,7 +430,8 @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore,
+ uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
CPUState *cs;
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 913197a..0ba9a02 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -283,9 +283,13 @@ xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "EN
xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x"
xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64
xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64
-xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x"
+xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d"
xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64
+# xive2.c
+xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d"
+xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d"
+
# pnv_xive.c
pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index 139cfdf..c77df2c 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -3,8 +3,7 @@
*
* Copyright (c) 2017-2018, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -27,28 +26,6 @@
* XIVE Thread Interrupt Management context
*/
-/*
- * Convert an Interrupt Pending Buffer (IPB) register to a Pending
- * Interrupt Priority Register (PIPR), which contains the priority of
- * the most favored pending notification.
- */
-static uint8_t ipb_to_pipr(uint8_t ibp)
-{
- return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
-}
-
-static uint8_t exception_mask(uint8_t ring)
-{
- switch (ring) {
- case TM_QW1_OS:
- return TM_QW1_NSR_EO;
- case TM_QW3_HV_PHYS:
- return TM_QW3_NSR_HE;
- default:
- g_assert_not_reached();
- }
-}
-
static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
{
switch (ring) {
@@ -68,11 +45,10 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
{
uint8_t *regs = &tctx->regs[ring];
uint8_t nsr = regs[TM_NSR];
- uint8_t mask = exception_mask(ring);
qemu_irq_lower(xive_tctx_output(tctx, ring));
- if (regs[TM_NSR] & mask) {
+ if (regs[TM_NSR] != 0) {
uint8_t cppr = regs[TM_PIPR];
uint8_t alt_ring;
uint8_t *alt_regs;
@@ -87,11 +63,18 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
regs[TM_CPPR] = cppr;
- /* Reset the pending buffer bit */
- alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
+ /*
+ * If the interrupt was for a specific VP, reset the pending
+ * buffer bit, otherwise clear the logical server indicator
+ */
+ if (regs[TM_NSR] & TM_NSR_GRP_LVL) {
+ regs[TM_NSR] &= ~TM_NSR_GRP_LVL;
+ } else {
+ alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
+ }
- /* Drop Exception bit */
- regs[TM_NSR] &= ~mask;
+ /* Drop the exception bit and any group/crowd */
+ regs[TM_NSR] = 0;
trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring,
alt_regs[TM_IPB], regs[TM_PIPR],
@@ -101,7 +84,7 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
return ((uint64_t)nsr << 8) | regs[TM_CPPR];
}
-static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
+void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level)
{
/* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
@@ -111,13 +94,13 @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) {
switch (ring) {
case TM_QW1_OS:
- regs[TM_NSR] |= TM_QW1_NSR_EO;
+ regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F);
break;
case TM_QW2_HV_POOL:
- alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6);
+ alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F);
break;
case TM_QW3_HV_PHYS:
- regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
+ regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F);
break;
default:
g_assert_not_reached();
@@ -159,7 +142,7 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
* Recompute the PIPR based on local pending interrupts. The PHYS
* ring must take the minimum of both the PHYS and POOL PIPR values.
*/
- pipr_min = ipb_to_pipr(regs[TM_IPB]);
+ pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
ring_min = ring;
/* PHYS updates also depend on POOL values */
@@ -169,7 +152,7 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
/* POOL values only matter if POOL ctx is valid */
if (pool_regs[TM_WORD2] & 0x80) {
- uint8_t pool_pipr = ipb_to_pipr(pool_regs[TM_IPB]);
+ uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]);
/*
* Determine highest priority interrupt and
@@ -185,17 +168,27 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
regs[TM_PIPR] = pipr_min;
/* CPPR has changed, check if we need to raise a pending exception */
- xive_tctx_notify(tctx, ring_min);
+ xive_tctx_notify(tctx, ring_min, 0);
}
-void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
-{
+void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
+ uint8_t group_level)
+ {
+ /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
+ uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
+ uint8_t *alt_regs = &tctx->regs[alt_ring];
uint8_t *regs = &tctx->regs[ring];
- regs[TM_IPB] |= ipb;
- regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
- xive_tctx_notify(tctx, ring);
-}
+ if (group_level == 0) {
+ /* VP-specific */
+ regs[TM_IPB] |= xive_priority_to_ipb(priority);
+ alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
+ } else {
+ /* VP-group */
+ alt_regs[TM_PIPR] = xive_priority_to_pipr(priority);
+ }
+ xive_tctx_notify(tctx, ring, group_level);
+ }
/*
* XIVE Thread Interrupt Management Area (TIMA)
@@ -411,13 +404,13 @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx,
}
/*
- * Adjust the IPB to allow a CPU to process event queues of other
+ * Adjust the PIPR to allow a CPU to process event queues of other
* priorities during one physical interrupt cycle.
*/
static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
- xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff));
+ xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0);
}
static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
@@ -495,16 +488,20 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
/* Reset the NVT value */
nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
+
+ uint8_t *regs = &tctx->regs[TM_QW1_OS];
+ regs[TM_IPB] |= ipb;
}
+
/*
- * Always call xive_tctx_ipb_update(). Even if there were no
+ * Always call xive_tctx_pipr_update(). Even if there were no
* escalation triggered, there could be a pending interrupt which
* was saved when the context was pulled and that we need to take
* into account by recalculating the PIPR (which is not
* saved/restored).
* It will also raise the External interrupt signal if needed.
*/
- xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
+ xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */
}
/*
@@ -592,7 +589,7 @@ static const XiveTmOp xive2_tm_operations[] = {
* MMIOs below 2K : raw values and special operations without side
* effects
*/
- { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr,
+ { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx,
NULL },
@@ -600,7 +597,7 @@ static const XiveTmOp xive2_tm_operations[] = {
NULL },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs,
NULL },
- { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr,
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
NULL },
@@ -841,9 +838,9 @@ void xive_tctx_reset(XiveTCTX *tctx)
* CPPR is first set.
*/
tctx->regs[TM_QW1_OS + TM_PIPR] =
- ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
+ xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
- ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
+ xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
}
static void xive_tctx_realize(DeviceState *dev, Error **errp)
@@ -1658,6 +1655,54 @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
}
+uint32_t xive_get_vpgroup_size(uint32_t nvp_index)
+{
+ /*
+ * Group size is a power of 2. The position of the first 0
+ * (starting with the least significant bits) in the NVP index
+ * gives the size of the group.
+ */
+ return 1 << (ctz32(~nvp_index) + 1);
+}
+
+static uint8_t xive_get_group_level(bool crowd, bool ignore,
+ uint32_t nvp_blk, uint32_t nvp_index)
+{
+ uint8_t level;
+
+ if (!ignore) {
+ g_assert(!crowd);
+ return 0;
+ }
+
+ level = (ctz32(~nvp_index) + 1) & 0b1111;
+ if (crowd) {
+ uint32_t blk;
+
+ /* crowd level is bit position of first 0 from the right in nvp_blk */
+ blk = ctz32(~nvp_blk) + 1;
+
+ /*
+ * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported.
+ * HW will encode level 4 as the value 3. See xive2_pgofnext().
+ */
+ switch (level) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ blk = 3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Crowd level bits reside in upper 2 bits of the 6 bit group level */
+ level |= blk << 4;
+ }
+ return level;
+}
+
/*
* The thread context register words are in big-endian format.
*/
@@ -1724,31 +1769,41 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
/*
* This is our simple Xive Presenter Engine model. It is merged in the
* Router as it does not require an extra object.
- *
- * It receives notification requests sent by the IVRE to find one
- * matching NVT (or more) dispatched on the processor threads. In case
- * of a single NVT notification, the process is abbreviated and the
- * thread is signaled if a match is found. In case of a logical server
- * notification (bits ignored at the end of the NVT identifier), the
- * IVPE and IVRE select a winning thread using different filters. This
- * involves 2 or 3 exchanges on the PowerBus that the model does not
- * support.
- *
- * The parameters represent what is sent on the PowerBus
*/
bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
- uint32_t logic_serv)
+ bool crowd, bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, bool *precluded)
{
XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
- XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
+ XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false };
+ uint8_t group_level;
int count;
/*
- * Ask the machine to scan the interrupt controllers for a match
+ * Ask the machine to scan the interrupt controllers for a match.
+ *
+ * For VP-specific notification, we expect at most one match and
+ * one call to the presenters is all we need (abbreviated notify
+ * sequence documented by the architecture).
+ *
+ * For VP-group notification, match_nvt() is the equivalent of the
+ * "histogram" and "poll" commands sent to the power bus to the
+ * presenters. 'count' could be more than one, but we always
+ * select the first match for now. 'precluded' tells if (at least)
+ * one thread matches but can't take the interrupt now because
+ * it's running at a more favored priority. We return the
+ * information to the router so that it can take appropriate
+ * actions (backlog, escalation, broadcast, etc...)
+ *
+ * If we were to implement a better way of dispatching the
+ * interrupt in case of multiple matches (instead of the first
+ * match), we would need a heuristic to elect a thread (for
+ * example, the hardware keeps track of an 'age' in the TIMA) and
+ * a new command to the presenters (the equivalent of the "assign"
+ * power bus command in the documented full notify sequence.
*/
- count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore,
+ count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, &match);
if (count < 0) {
return false;
@@ -1756,9 +1811,11 @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
/* handle CPU exception delivery */
if (count) {
- trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
- xive_tctx_ipb_update(match.tctx, match.ring,
- xive_priority_to_ipb(priority));
+ group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx);
+ trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level);
+ xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level);
+ } else {
+ *precluded = match.precluded;
}
return !!count;
@@ -1798,7 +1855,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
uint8_t nvt_blk;
uint32_t nvt_idx;
XiveNVT nvt;
- bool found;
+ bool found, precluded;
uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
@@ -1879,10 +1936,12 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
}
found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
+ false /* crowd */,
xive_get_field32(END_W7_F0_IGNORE, end.w7),
priority,
- xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
-
+ xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7),
+ &precluded);
+ /* we don't support VP-group notification on P9, so precluded is not used */
/* TODO: Auto EOI. */
if (found) {
diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
index fc5aed3..f8ef615 100644
--- a/hw/intc/xive2.c
+++ b/hw/intc/xive2.c
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation..
+ * Copyright (c) 2019-2024, IBM Corporation..
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -18,6 +17,7 @@
#include "hw/ppc/xive.h"
#include "hw/ppc/xive2.h"
#include "hw/ppc/xive2_regs.h"
+#include "trace.h"
uint32_t xive2_router_get_config(Xive2Router *xrtr)
{
@@ -54,7 +54,8 @@ static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
/*
* The per-priority backlog counters are 24-bit and the structure
- * is stored in big endian
+ * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from
+ * w2, which fits 8 priorities * 24-bits per priority.
*/
ptr = (uint8_t *)&nvgc->w2 + priority * 3;
for (i = 0; i < 3; i++, ptr++) {
@@ -63,6 +64,117 @@ static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
return val;
}
+static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority,
+ uint32_t val)
+{
+ uint8_t *ptr, i;
+ uint32_t shift;
+
+ if (priority > 7) {
+ return;
+ }
+
+ if (val > 0xFFFFFF) {
+ val = 0xFFFFFF;
+ }
+ /*
+ * The per-priority backlog counters are 24-bit and the structure
+ * is stored in big endian
+ */
+ ptr = (uint8_t *)&nvgc->w2 + priority * 3;
+ for (i = 0; i < 3; i++, ptr++) {
+ shift = 8 * (2 - i);
+ *ptr = (val >> shift) & 0xFF;
+ }
+}
+
+uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr,
+ bool crowd,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset, uint16_t val)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
+ uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
+ Xive2Nvgc nvgc;
+ uint32_t count, old_count;
+
+ if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n",
+ crowd ? "NVC" : "NVG", blk, idx);
+ return -1;
+ }
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx);
+ return -1;
+ }
+
+ old_count = xive2_nvgc_get_backlog(&nvgc, priority);
+ count = old_count;
+ /*
+ * op:
+ * 0b00 => increment
+ * 0b01 => decrement
+ * 0b1- => read
+ */
+ if (op == 0b00 || op == 0b01) {
+ if (op == 0b00) {
+ count += val;
+ } else {
+ if (count > val) {
+ count -= val;
+ } else {
+ count = 0;
+ }
+ }
+ xive2_nvgc_set_backlog(&nvgc, priority, count);
+ xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc);
+ }
+ trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count);
+ return old_count;
+}
+
+uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
+ uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
+ Xive2Nvp nvp;
+ uint8_t ipb, old_ipb, rc;
+
+ if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx);
+ return -1;
+ }
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx);
+ return -1;
+ }
+
+ old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
+ ipb = old_ipb;
+ /*
+ * op:
+ * 0b00 => set priority bit
+ * 0b01 => reset priority bit
+ * 0b1- => read
+ */
+ if (op == 0b00 || op == 0b01) {
+ if (op == 0b00) {
+ ipb |= xive_priority_to_ipb(priority);
+ } else {
+ ipb &= ~xive_priority_to_ipb(priority);
+ }
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
+ xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2);
+ }
+ rc = !!(old_ipb & xive_priority_to_ipb(priority));
+ trace_xive_nvp_backlog_op(blk, idx, op, priority, rc);
+ return rc;
+}
+
void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
{
if (!xive2_eas_is_valid(eas)) {
@@ -114,8 +226,8 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
uint32_t qentries = 1 << (qsize + 10);
- uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
- uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
+ uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
+ uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
uint8_t pq;
@@ -144,7 +256,7 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
xive2_end_is_firmware2(end) ? 'F' : '-',
xive2_end_is_ignore(end) ? 'i' : '-',
xive2_end_is_crowd(end) ? 'c' : '-',
- priority, nvp_blk, nvp_idx);
+ priority, nvx_blk, nvx_idx);
if (qaddr_base) {
g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
@@ -255,6 +367,115 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data)
end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
}
+static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx,
+ uint8_t next_level)
+{
+ uint32_t mask, next_idx;
+ uint8_t next_blk;
+
+ /*
+ * Adjust the block and index of a VP for the next group/crowd
+ * size (PGofFirst/PGofNext field in the NVP and NVGC structures).
+ *
+ * The 6-bit group level is split into a 2-bit crowd and 4-bit
+ * group levels. Encoding is similar. However, we don't support
+ * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd
+ * size of 16.
+ */
+ next_blk = NVx_CROWD_LVL(next_level);
+ if (next_blk == 3) {
+ next_blk = 4;
+ }
+ mask = (1 << next_blk) - 1;
+ *nvgc_blk &= ~mask;
+ *nvgc_blk |= mask >> 1;
+
+ next_idx = NVx_GROUP_LVL(next_level);
+ mask = (1 << next_idx) - 1;
+ *nvgc_idx &= ~mask;
+ *nvgc_idx |= mask >> 1;
+}
+
+/*
+ * Scan the group chain and return the highest priority and group
+ * level of pending group interrupts.
+ */
+static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr,
+ uint8_t nvx_blk, uint32_t nvx_idx,
+ uint8_t first_group,
+ uint8_t *out_level)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint32_t nvgc_idx;
+ uint32_t current_level, count;
+ uint8_t nvgc_blk, prio;
+ Xive2Nvgc nvgc;
+
+ for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) {
+ current_level = first_group & 0x3F;
+ nvgc_blk = nvx_blk;
+ nvgc_idx = nvx_idx;
+
+ while (current_level) {
+ xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level);
+
+ if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level),
+ nvgc_blk, nvgc_idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return 0xFF;
+ }
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return 0xFF;
+ }
+
+ count = xive2_nvgc_get_backlog(&nvgc, prio);
+ if (count) {
+ *out_level = current_level;
+ return prio;
+ }
+ current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F;
+ }
+ }
+ return 0xFF;
+}
+
+static void xive2_presenter_backlog_decr(XivePresenter *xptr,
+ uint8_t nvx_blk, uint32_t nvx_idx,
+ uint8_t group_prio,
+ uint8_t group_level)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint32_t nvgc_idx, count;
+ uint8_t nvgc_blk;
+ Xive2Nvgc nvgc;
+
+ nvgc_blk = nvx_blk;
+ nvgc_idx = nvx_idx;
+ xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level);
+
+ if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level),
+ nvgc_blk, nvgc_idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return;
+ }
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return;
+ }
+ count = xive2_nvgc_get_backlog(&nvgc, group_prio);
+ if (!count) {
+ return;
+ }
+ xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1);
+ xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level),
+ nvgc_blk, nvgc_idx, &nvgc);
+}
+
/*
* XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
*
@@ -313,7 +534,19 @@ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
- nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
+ if (nvp.w0 & NVP2_W0_L) {
+ /*
+ * Typically not used. If LSMFB is restored with 0, it will
+ * force a backlog rescan
+ */
+ nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
+ }
+ if (nvp.w0 & NVP2_W0_G) {
+ nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]);
+ }
+ if (nvp.w0 & NVP2_W0_T) {
+ nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]);
+ }
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
@@ -527,7 +760,9 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
- /* we don't model LSMFB */
+ tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2);
+ tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2);
+ tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2);
nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
@@ -550,8 +785,15 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
uint8_t nvp_blk, uint32_t nvp_idx,
bool do_restore)
{
- Xive2Nvp nvp;
+ XivePresenter *xptr = XIVE_PRESENTER(xrtr);
uint8_t ipb;
+ uint8_t backlog_level;
+ uint8_t group_level;
+ uint8_t first_group;
+ uint8_t backlog_prio;
+ uint8_t group_prio;
+ uint8_t *regs = &tctx->regs[TM_QW1_OS];
+ Xive2Nvp nvp;
/*
* Grab the associated thread interrupt context registers in the
@@ -580,15 +822,29 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
}
+ regs[TM_IPB] |= ipb;
+ backlog_prio = xive_ipb_to_pipr(ipb);
+ backlog_level = 0;
+
+ first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
+ if (first_group && regs[TM_LSMFB] < backlog_prio) {
+ group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx,
+ first_group, &group_level);
+ regs[TM_LSMFB] = group_prio;
+ if (regs[TM_LGS] && group_prio < backlog_prio) {
+ /* VP can take a group interrupt */
+ xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx,
+ group_prio, group_level);
+ backlog_prio = group_prio;
+ backlog_level = group_level;
+ }
+ }
+
/*
- * Always call xive_tctx_ipb_update(). Even if there were no
- * escalation triggered, there could be a pending interrupt which
- * was saved when the context was pulled and that we need to take
- * into account by recalculating the PIPR (which is not
- * saved/restored).
- * It will also raise the External interrupt signal if needed.
+ * Compute the PIPR based on the restored state.
+ * It will raise the External interrupt signal if needed.
*/
- xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
+ xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level);
}
/*
@@ -630,6 +886,172 @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
}
}
+static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring,
+ uint32_t *nvp_blk, uint32_t *nvp_idx)
+{
+ uint32_t w2, cam;
+
+ w2 = xive_tctx_word2(&tctx->regs[ring]);
+ switch (ring) {
+ case TM_QW1_OS:
+ if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) {
+ return -1;
+ }
+ cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2);
+ break;
+ case TM_QW2_HV_POOL:
+ if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) {
+ return -1;
+ }
+ cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2);
+ break;
+ case TM_QW3_HV_PHYS:
+ if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) {
+ return -1;
+ }
+ cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx);
+ break;
+ default:
+ return -1;
+ }
+ *nvp_blk = xive2_nvp_blk(cam);
+ *nvp_idx = xive2_nvp_idx(cam);
+ return 0;
+}
+
+static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
+{
+ uint8_t *regs = &tctx->regs[ring];
+ Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr);
+ uint8_t old_cppr, backlog_prio, first_group, group_level = 0;
+ uint8_t pipr_min, lsmfb_min, ring_min;
+ bool group_enabled;
+ uint32_t nvp_blk, nvp_idx;
+ Xive2Nvp nvp;
+ int rc;
+
+ trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ cppr, regs[TM_NSR]);
+
+ if (cppr > XIVE_PRIORITY_MAX) {
+ cppr = 0xff;
+ }
+
+ old_cppr = regs[TM_CPPR];
+ regs[TM_CPPR] = cppr;
+
+ /*
+ * Recompute the PIPR based on local pending interrupts. It will
+ * be adjusted below if needed in case of pending group interrupts.
+ */
+ pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
+ group_enabled = !!regs[TM_LGS];
+ lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff;
+ ring_min = ring;
+
+ /* PHYS updates also depend on POOL values */
+ if (ring == TM_QW3_HV_PHYS) {
+ uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL];
+
+ /* POOL values only matter if POOL ctx is valid */
+ if (pregs[TM_WORD2] & 0x80) {
+
+ uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]);
+ uint8_t pool_lsmfb = pregs[TM_LSMFB];
+
+ /*
+ * Determine highest priority interrupt and
+ * remember which ring has it.
+ */
+ if (pool_pipr < pipr_min) {
+ pipr_min = pool_pipr;
+ if (pool_pipr < lsmfb_min) {
+ ring_min = TM_QW2_HV_POOL;
+ }
+ }
+
+ /* Values needed for group priority calculation */
+ if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) {
+ group_enabled = true;
+ lsmfb_min = pool_lsmfb;
+ if (lsmfb_min < pipr_min) {
+ ring_min = TM_QW2_HV_POOL;
+ }
+ }
+ }
+ }
+ regs[TM_PIPR] = pipr_min;
+
+ rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx);
+ if (rc) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n");
+ return;
+ }
+
+ if (cppr < old_cppr) {
+ /*
+ * FIXME: check if there's a group interrupt being presented
+ * and if the new cppr prevents it. If so, then the group
+ * interrupt needs to be re-added to the backlog and
+ * re-triggered (see re-trigger END info in the NVGC
+ * structure)
+ */
+ }
+
+ if (group_enabled &&
+ lsmfb_min < cppr &&
+ lsmfb_min < regs[TM_PIPR]) {
+ /*
+ * Thread has seen a group interrupt with a higher priority
+ * than the new cppr or pending local interrupt. Check the
+ * backlog
+ */
+ if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
+ if (!first_group) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ backlog_prio = xive2_presenter_backlog_scan(tctx->xptr,
+ nvp_blk, nvp_idx,
+ first_group, &group_level);
+ tctx->regs[ring_min + TM_LSMFB] = backlog_prio;
+ if (backlog_prio != 0xFF) {
+ xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx,
+ backlog_prio, group_level);
+ regs[TM_PIPR] = backlog_prio;
+ }
+ }
+ /* CPPR has changed, check if we need to raise a pending exception */
+ xive_tctx_notify(tctx, ring_min, group_level);
+}
+
+void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
+}
+
+void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
+}
+
static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
{
uint8_t *regs = &tctx->regs[ring];
@@ -723,13 +1145,46 @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
}
+static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2,
+ uint32_t vp_mask)
+{
+ return (cam1 & vp_mask) == (cam2 & vp_mask);
+}
+
+static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd)
+{
+ uint8_t size, block_mask = 0b1111;
+
+ /* 3 supported crowd sizes: 2, 4, 16 */
+ if (crowd) {
+ size = xive_get_vpgroup_size(nvt_blk);
+ if (size == 8) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of 8n");
+ return block_mask;
+ }
+ block_mask &= ~(size - 1);
+ }
+ return block_mask;
+}
+
+static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore)
+{
+ uint32_t index_mask = 0xFFFFFF; /* 24 bits */
+
+ if (cam_ignore) {
+ index_mask &= ~(xive_get_vpgroup_size(nvt_index) - 1);
+ }
+ return index_mask;
+}
+
/*
* The thread context register words are in big-endian format.
*/
int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint32_t logic_serv)
+ bool crowd, bool cam_ignore,
+ uint32_t logic_serv)
{
uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx);
uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
@@ -737,44 +1192,51 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
- /*
- * TODO (PowerNV): ignore mode. The low order bits of the NVT
- * identifier are ignored in the "CAM" match.
- */
+ uint32_t index_mask, vp_mask;
+ uint8_t block_mask;
if (format == 0) {
- if (cam_ignore == true) {
- /*
- * F=0 & i=1: Logical server notification (bits ignored at
- * the end of the NVT identifier)
- */
- qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
- nvt_blk, nvt_idx);
- return -1;
- }
+ /*
+ * i=0: Specific NVT notification
+ * i=1: VP-group notification (bits ignored at the end of the
+ * NVT identifier)
+ */
+ block_mask = xive2_get_vp_block_mask(nvt_blk, crowd);
+ index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore);
+ vp_mask = xive2_nvp_cam_line(block_mask, index_mask);
- /* F=0 & i=0: Specific NVT notification */
+ /* For VP-group notifications, threads with LGS=0 are excluded */
/* PHYS ring */
if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
- cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
+ !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) &&
+ xive2_vp_match_mask(cam,
+ xive2_tctx_hw_cam_line(xptr, tctx),
+ vp_mask)) {
return TM_QW3_HV_PHYS;
}
/* HV POOL ring */
if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
- cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
+ !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) &&
+ xive2_vp_match_mask(cam,
+ xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2),
+ vp_mask)) {
return TM_QW2_HV_POOL;
}
/* OS ring */
if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
- cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
+ !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) &&
+ xive2_vp_match_mask(cam,
+ xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2),
+ vp_mask)) {
return TM_QW1_OS;
}
} else {
/* F=1 : User level Event-Based Branch (EBB) notification */
+ /* FIXME: what if cam_ignore and LGS = 0 ? */
/* USER ring */
if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
(cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
@@ -786,6 +1248,37 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
return -1;
}
+bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
+{
+ /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
+ uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
+ uint8_t *alt_regs = &tctx->regs[alt_ring];
+
+ /*
+ * The xive2_presenter_tctx_match() above tells if there's a match
+ * but for VP-group notification, we still need to look at the
+ * priority to know if the thread can take the interrupt now or if
+ * it is precluded.
+ */
+ if (priority < alt_regs[TM_CPPR]) {
+ return false;
+ }
+ return true;
+}
+
+void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority)
+{
+ uint8_t *regs = &tctx->regs[ring];
+
+ /*
+ * Called by the router during a VP-group notification when the
+ * thread matches but can't take the interrupt because it's
+ * already running at a more favored priority. It then stores the
+ * new interrupt priority in the LSMFB field.
+ */
+ regs[TM_LSMFB] = priority;
+}
+
static void xive2_router_realize(DeviceState *dev, Error **errp)
{
Xive2Router *xrtr = XIVE2_ROUTER(dev);
@@ -825,10 +1318,9 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
Xive2End end;
uint8_t priority;
uint8_t format;
- bool found;
- Xive2Nvp nvp;
- uint8_t nvp_blk;
- uint32_t nvp_idx;
+ bool found, precluded;
+ uint8_t nvx_blk;
+ uint32_t nvx_idx;
/* END cache lookup */
if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
@@ -843,6 +1335,12 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
return;
}
+ if (xive2_end_is_crowd(&end) & !xive2_end_is_ignore(&end)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n");
+ return;
+ }
+
if (xive2_end_is_enqueue(&end)) {
xive2_end_enqueue(&end, end_data);
/* Enqueuing event data modifies the EQ toggle and index */
@@ -887,26 +1385,14 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
/*
* Follows IVPE notification
*/
- nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
- nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
+ nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
+ nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
- /* NVP cache lookup */
- if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
- qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
- nvp_blk, nvp_idx);
- return;
- }
-
- if (!xive2_nvp_is_valid(&nvp)) {
- qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
- nvp_blk, nvp_idx);
- return;
- }
-
- found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
- xive2_end_is_ignore(&end),
+ found = xive_presenter_notify(xrtr->xfb, format, nvx_blk, nvx_idx,
+ xive2_end_is_crowd(&end), xive2_end_is_ignore(&end),
priority,
- xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
+ xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7),
+ &precluded);
/* TODO: Auto EOI. */
@@ -917,10 +1403,9 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
/*
* If no matching NVP is dispatched on a HW thread :
* - specific VP: update the NVP structure if backlog is activated
- * - logical server : forward request to IVPE (not supported)
+ * - VP-group: update the backlog counter for that priority in the NVG
*/
if (xive2_end_is_backlog(&end)) {
- uint8_t ipb;
if (format == 1) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -929,19 +1414,82 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
return;
}
- /*
- * Record the IPB in the associated NVP structure for later
- * use. The presenter will resend the interrupt when the vCPU
- * is dispatched again on a HW thread.
- */
- ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
- xive_priority_to_ipb(priority);
- nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
- xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
+ if (!xive2_end_is_ignore(&end)) {
+ uint8_t ipb;
+ Xive2Nvp nvp;
- /*
- * On HW, follows a "Broadcast Backlog" to IVPEs
- */
+ /* NVP cache lookup */
+ if (xive2_router_get_nvp(xrtr, nvx_blk, nvx_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
+ nvx_blk, nvx_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
+ nvx_blk, nvx_idx);
+ return;
+ }
+
+ /*
+ * Record the IPB in the associated NVP structure for later
+ * use. The presenter will resend the interrupt when the vCPU
+ * is dispatched again on a HW thread.
+ */
+ ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
+ xive_priority_to_ipb(priority);
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
+ xive2_router_write_nvp(xrtr, nvx_blk, nvx_idx, &nvp, 2);
+ } else {
+ Xive2Nvgc nvgc;
+ uint32_t backlog;
+ bool crowd;
+
+ crowd = xive2_end_is_crowd(&end);
+
+ /*
+ * For groups and crowds, the per-priority backlog
+ * counters are stored in the NVG/NVC structures
+ */
+ if (xive2_router_get_nvgc(xrtr, crowd,
+ nvx_blk, nvx_idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n",
+ crowd ? "NVC" : "NVG", nvx_blk, nvx_idx);
+ return;
+ }
+
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n",
+ nvx_blk, nvx_idx);
+ return;
+ }
+
+ /*
+ * Increment the backlog counter for that priority.
+ * We only call broadcast the first time the counter is
+ * incremented. broadcast will set the LSMFB field of the TIMA of
+ * relevant threads so that they know an interrupt is pending.
+ */
+ backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1;
+ xive2_nvgc_set_backlog(&nvgc, priority, backlog);
+ xive2_router_write_nvgc(xrtr, crowd, nvx_blk, nvx_idx, &nvgc);
+
+ if (backlog == 1) {
+ XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb);
+ xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx,
+ xive2_end_is_crowd(&end),
+ xive2_end_is_ignore(&end),
+ priority);
+
+ if (!xive2_end_is_precluded_escalation(&end)) {
+ /*
+ * The interrupt will be picked up when the
+ * matching thread lowers its priority level
+ */
+ return;
+ }
+ }
+ }
}
do_escalation:
diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c
index a156839..cb8a7e3 100644
--- a/hw/pci-host/pnv_phb4_pec.c
+++ b/hw/pci-host/pnv_phb4_pec.c
@@ -197,6 +197,9 @@ static PnvPHB *pnv_pec_default_phb_realize(PnvPhb4PecState *pec,
return phb;
}
+#define XPEC_P9_PCI_LANE_CFG PPC_BITMASK(10, 11)
+#define XPEC_P10_PCI_LANE_CFG PPC_BITMASK(0, 1)
+
static void pnv_pec_realize(DeviceState *dev, Error **errp)
{
PnvPhb4PecState *pec = PNV_PHB4_PEC(dev);
@@ -211,6 +214,43 @@ static void pnv_pec_realize(DeviceState *dev, Error **errp)
pec->num_phbs = pecc->num_phbs[pec->index];
+ /* Pervasive chiplet */
+ object_initialize_child(OBJECT(pec), "nest-pervasive-common",
+ &pec->nest_pervasive,
+ TYPE_PNV_NEST_CHIPLET_PERVASIVE);
+ if (!qdev_realize(DEVICE(&pec->nest_pervasive), NULL, errp)) {
+ return;
+ }
+
+ /* Set up pervasive chiplet registers */
+ /*
+ * Most registers are not set up, this just sets the PCI CONF1 link-width
+ * field because skiboot probes it.
+ */
+ if (pecc->version == PNV_PHB4_VERSION) {
+ /*
+ * On P9, PEC2 has configurable 1/2/3-furcation).
+ * Make it trifurcated (x8, x4, x4) to match pnv_pec_num_phbs.
+ */
+ if (pec->index == 2) {
+ pec->nest_pervasive.control_regs.cplt_cfg1 =
+ SETFIELD(XPEC_P9_PCI_LANE_CFG,
+ pec->nest_pervasive.control_regs.cplt_cfg1,
+ 0b10);
+ }
+ } else if (pecc->version == PNV_PHB5_VERSION) {
+ /*
+ * On P10, both PECs are configurable 1/2/3-furcation).
+ * Both are trifurcated to match pnv_phb5_pec_num_stacks.
+ */
+ pec->nest_pervasive.control_regs.cplt_cfg1 =
+ SETFIELD(XPEC_P10_PCI_LANE_CFG,
+ pec->nest_pervasive.control_regs.cplt_cfg1,
+ 0b10);
+ } else {
+ g_assert_not_reached();
+ }
+
/* Create PHBs if running with defaults */
if (defaults_enabled()) {
g_assert(pec->num_phbs <= MAX_PHBS_PER_PEC);
@@ -290,9 +330,16 @@ static const Property pnv_pec_properties[] = {
PnvChip *),
};
+#define XPEC_PCI_CPLT_OFFSET 0x1000000ULL
+
+static uint32_t pnv_pec_xscom_cplt_base(PnvPhb4PecState *pec)
+{
+ return PNV9_XSCOM_PEC_NEST_CPLT_BASE + XPEC_PCI_CPLT_OFFSET * pec->index;
+}
+
static uint32_t pnv_pec_xscom_pci_base(PnvPhb4PecState *pec)
{
- return PNV9_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index;
+ return PNV9_XSCOM_PEC_PCI_BASE + XPEC_PCI_CPLT_OFFSET * pec->index;
}
static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec)
@@ -321,6 +368,7 @@ static void pnv_pec_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, pnv_pec_properties);
dc->user_creatable = false;
+ pecc->xscom_cplt_base = pnv_pec_xscom_cplt_base;
pecc->xscom_nest_base = pnv_pec_xscom_nest_base;
pecc->xscom_pci_base = pnv_pec_xscom_pci_base;
pecc->xscom_nest_size = PNV9_XSCOM_PEC_NEST_SIZE;
@@ -349,6 +397,10 @@ static const TypeInfo pnv_pec_type_info = {
/*
* POWER10 definitions
*/
+static uint32_t pnv_phb5_pec_xscom_cplt_base(PnvPhb4PecState *pec)
+{
+ return PNV10_XSCOM_PEC_NEST_CPLT_BASE + XPEC_PCI_CPLT_OFFSET * pec->index;
+}
static uint32_t pnv_phb5_pec_xscom_pci_base(PnvPhb4PecState *pec)
{
@@ -373,6 +425,7 @@ static void pnv_phb5_pec_class_init(ObjectClass *klass, void *data)
static const char compat[] = "ibm,power10-pbcq";
static const char stk_compat[] = "ibm,power10-phb-stack";
+ pecc->xscom_cplt_base = pnv_phb5_pec_xscom_cplt_base;
pecc->xscom_nest_base = pnv_phb5_pec_xscom_nest_base;
pecc->xscom_pci_base = pnv_phb5_pec_xscom_pci_base;
pecc->xscom_nest_size = PNV10_XSCOM_PEC_NEST_SIZE;
diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig
index b44d91b..ced6bbc 100644
--- a/hw/ppc/Kconfig
+++ b/hw/ppc/Kconfig
@@ -44,15 +44,6 @@ config POWERNV
select SSI_M25P80
select PNV_SPI
-config PPC405
- bool
- default y
- depends on PPC
- select M48T59
- select PFLASH_CFI02
- select PPC4XX
- select SERIAL_MM
-
config PPC440
bool
default y
diff --git a/hw/ppc/amigaone.c b/hw/ppc/amigaone.c
index b027922..4835121 100644
--- a/hw/ppc/amigaone.c
+++ b/hw/ppc/amigaone.c
@@ -21,12 +21,26 @@
#include "hw/ide/pci.h"
#include "hw/i2c/smbus_eeprom.h"
#include "hw/ppc/ppc.h"
+#include "system/block-backend.h"
#include "system/qtest.h"
#include "system/reset.h"
#include "kvm_ppc.h"
+#include "elf.h"
+
+#include <zlib.h> /* for crc32 */
#define BUS_FREQ_HZ 100000000
+#define INITRD_MIN_ADDR 0x600000
+#define INIT_RAM_ADDR 0x40000000
+
+#define PCI_HIGH_ADDR 0x80000000
+#define PCI_HIGH_SIZE 0x7d000000
+#define PCI_LOW_ADDR 0xfd000000
+#define PCI_LOW_SIZE 0xe0000
+
+#define ARTICIA_ADDR 0xfe000000
+
/*
* Firmware binary available at
* https://www.hyperion-entertainment.com/index.php/downloads?view=files&parent=28
@@ -41,20 +55,202 @@
/* AmigaOS calls this routine from ROM, use this if no firmware loaded */
static const char dummy_fw[] = {
- 0x38, 0x00, 0x00, 0x08, /* li r0,8 */
- 0x7c, 0x09, 0x03, 0xa6, /* mtctr r0 */
- 0x54, 0x63, 0xf8, 0x7e, /* srwi r3,r3,1 */
- 0x42, 0x00, 0xff, 0xfc, /* bdnz 0x8 */
+ 0x54, 0x63, 0xc2, 0x3e, /* srwi r3,r3,8 */
0x7c, 0x63, 0x18, 0xf8, /* not r3,r3 */
0x4e, 0x80, 0x00, 0x20, /* blr */
};
+#define NVRAM_ADDR 0xfd0e0000
+#define NVRAM_SIZE (4 * KiB)
+
+static char default_env[] =
+ "baudrate=115200\0"
+ "stdout=vga\0"
+ "stdin=ps2kbd\0"
+ "bootcmd=boota; menu; run menuboot_cmd\0"
+ "boot1=ide\0"
+ "boot2=cdrom\0"
+ "boota_timeout=3\0"
+ "ide_doreset=on\0"
+ "pci_irqa=9\0"
+ "pci_irqa_select=level\0"
+ "pci_irqb=10\0"
+ "pci_irqb_select=level\0"
+ "pci_irqc=11\0"
+ "pci_irqc_select=level\0"
+ "pci_irqd=7\0"
+ "pci_irqd_select=level\0"
+ "a1ide_irq=1111\0"
+ "a1ide_xfer=FFFF\0";
+#define CRC32_DEFAULT_ENV 0xb5548481
+#define CRC32_ALL_ZEROS 0x603b0489
+
+#define TYPE_A1_NVRAM "a1-nvram"
+OBJECT_DECLARE_SIMPLE_TYPE(A1NVRAMState, A1_NVRAM)
+
+struct A1NVRAMState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mr;
+ BlockBackend *blk;
+};
+
+static uint64_t nvram_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ /* read callback not used because of romd mode */
+ g_assert_not_reached();
+}
+
+static void nvram_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned int size)
+{
+ A1NVRAMState *s = opaque;
+ uint8_t *p = memory_region_get_ram_ptr(&s->mr);
+
+ p[addr] = val;
+ if (s->blk) {
+ blk_pwrite(s->blk, addr, 1, &val, 0);
+ }
+}
+
+static const MemoryRegionOps nvram_ops = {
+ .read = nvram_read,
+ .write = nvram_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void nvram_realize(DeviceState *dev, Error **errp)
+{
+ A1NVRAMState *s = A1_NVRAM(dev);
+ void *p;
+ uint32_t crc, *c;
+
+ memory_region_init_rom_device(&s->mr, NULL, &nvram_ops, s, "nvram",
+ NVRAM_SIZE, &error_fatal);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr);
+ c = p = memory_region_get_ram_ptr(&s->mr);
+ if (s->blk) {
+ if (blk_getlength(s->blk) != NVRAM_SIZE) {
+ error_setg(errp, "NVRAM backing file size must be %" PRId64 "bytes",
+ NVRAM_SIZE);
+ return;
+ }
+ blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
+ BLK_PERM_ALL, &error_fatal);
+ if (blk_pread(s->blk, 0, NVRAM_SIZE, p, 0) < 0) {
+ error_setg(errp, "Cannot read NVRAM contents from backing file");
+ return;
+ }
+ }
+ crc = crc32(0, p + 4, NVRAM_SIZE - 4);
+ if (crc == CRC32_ALL_ZEROS) { /* If env is uninitialized set default */
+ *c = cpu_to_be32(CRC32_DEFAULT_ENV);
+ /* Also copies terminating \0 as env is terminated by \0\0 */
+ memcpy(p + 4, default_env, sizeof(default_env));
+ if (s->blk) {
+ blk_pwrite(s->blk, 0, sizeof(crc) + sizeof(default_env), p, 0);
+ }
+ return;
+ }
+ if (*c == 0) {
+ *c = cpu_to_be32(crc32(0, p + 4, NVRAM_SIZE - 4));
+ if (s->blk) {
+ blk_pwrite(s->blk, 0, 4, p, 0);
+ }
+ }
+ if (be32_to_cpu(*c) != crc) {
+ warn_report("NVRAM checksum mismatch");
+ }
+}
+
+static const Property nvram_properties[] = {
+ DEFINE_PROP_DRIVE("drive", A1NVRAMState, blk),
+};
+
+static void nvram_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = nvram_realize;
+ device_class_set_props(dc, nvram_properties);
+}
+
+static const TypeInfo nvram_types[] = {
+ {
+ .name = TYPE_A1_NVRAM,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(A1NVRAMState),
+ .class_init = nvram_class_init,
+ },
+};
+DEFINE_TYPES(nvram_types)
+
+struct boot_info {
+ hwaddr entry;
+ hwaddr stack;
+ hwaddr bd_info;
+ hwaddr initrd_start;
+ hwaddr initrd_end;
+ hwaddr cmdline_start;
+ hwaddr cmdline_end;
+};
+
+/* Board info struct from U-Boot */
+struct bd_info {
+ uint32_t bi_memstart;
+ uint32_t bi_memsize;
+ uint32_t bi_flashstart;
+ uint32_t bi_flashsize;
+ uint32_t bi_flashoffset;
+ uint32_t bi_sramstart;
+ uint32_t bi_sramsize;
+ uint32_t bi_bootflags;
+ uint32_t bi_ip_addr;
+ uint8_t bi_enetaddr[6];
+ uint16_t bi_ethspeed;
+ uint32_t bi_intfreq;
+ uint32_t bi_busfreq;
+ uint32_t bi_baudrate;
+} QEMU_PACKED;
+
+static void create_bd_info(hwaddr addr, ram_addr_t ram_size)
+{
+ struct bd_info *bd = g_new0(struct bd_info, 1);
+
+ bd->bi_memsize = cpu_to_be32(ram_size);
+ bd->bi_flashstart = cpu_to_be32(PROM_ADDR);
+ bd->bi_flashsize = cpu_to_be32(1); /* match what U-Boot detects */
+ bd->bi_bootflags = cpu_to_be32(1);
+ bd->bi_intfreq = cpu_to_be32(11.5 * BUS_FREQ_HZ);
+ bd->bi_busfreq = cpu_to_be32(BUS_FREQ_HZ);
+ bd->bi_baudrate = cpu_to_be32(115200);
+
+ cpu_physical_memory_write(addr, bd, sizeof(*bd));
+}
+
static void amigaone_cpu_reset(void *opaque)
{
PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
cpu_reset(CPU(cpu));
- cpu_ppc_tb_reset(&cpu->env);
+ if (env->load_info) {
+ struct boot_info *bi = env->load_info;
+
+ env->gpr[1] = bi->stack;
+ env->gpr[2] = 1024;
+ env->gpr[3] = bi->bd_info;
+ env->gpr[4] = bi->initrd_start;
+ env->gpr[5] = bi->initrd_end;
+ env->gpr[6] = bi->cmdline_start;
+ env->gpr[7] = bi->cmdline_end;
+ env->nip = bi->entry;
+ }
+ cpu_ppc_tb_reset(env);
}
static void fix_spd_data(uint8_t *spd)
@@ -75,7 +271,9 @@ static void amigaone_init(MachineState *machine)
DeviceState *dev;
I2CBus *i2c_bus;
uint8_t *spd_data;
- int i;
+ DriveInfo *di;
+ hwaddr loadaddr;
+ struct boot_info *bi = NULL;
/* init CPU */
cpu = POWERPC_CPU(cpu_create(machine->cpu_type));
@@ -97,9 +295,19 @@ static void amigaone_init(MachineState *machine)
/* Firmware uses this area for startup */
mr = g_new(MemoryRegion, 1);
memory_region_init_ram(mr, NULL, "init-cache", 32 * KiB, &error_fatal);
- memory_region_add_subregion(get_system_memory(), 0x40000000, mr);
+ memory_region_add_subregion(get_system_memory(), INIT_RAM_ADDR, mr);
}
+ /* nvram */
+ dev = qdev_new(TYPE_A1_NVRAM);
+ di = drive_get(IF_MTD, 0, 0);
+ if (di) {
+ qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(di));
+ }
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ memory_region_add_subregion(get_system_memory(), NVRAM_ADDR,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0));
+
/* allocate and load firmware */
rom = g_new(MemoryRegion, 1);
memory_region_init_rom(rom, NULL, "rom", PROM_SIZE, &error_fatal);
@@ -122,7 +330,7 @@ static void amigaone_init(MachineState *machine)
}
/* Articia S */
- dev = sysbus_create_simple(TYPE_ARTICIA, 0xfe000000, NULL);
+ dev = sysbus_create_simple(TYPE_ARTICIA, ARTICIA_ADDR, NULL);
i2c_bus = I2C_BUS(qdev_get_child_bus(dev, "smbus"));
if (machine->ram_size > 512 * MiB) {
@@ -139,12 +347,12 @@ static void amigaone_init(MachineState *machine)
pci_mem = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
mr = g_new(MemoryRegion, 1);
memory_region_init_alias(mr, OBJECT(dev), "pci-mem-low", pci_mem,
- 0, 0x1000000);
- memory_region_add_subregion(get_system_memory(), 0xfd000000, mr);
+ 0, PCI_LOW_SIZE);
+ memory_region_add_subregion(get_system_memory(), PCI_LOW_ADDR, mr);
mr = g_new(MemoryRegion, 1);
memory_region_init_alias(mr, OBJECT(dev), "pci-mem-high", pci_mem,
- 0x80000000, 0x7d000000);
- memory_region_add_subregion(get_system_memory(), 0x80000000, mr);
+ PCI_HIGH_ADDR, PCI_HIGH_SIZE);
+ memory_region_add_subregion(get_system_memory(), PCI_HIGH_ADDR, mr);
pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0"));
/* VIA VT82c686B South Bridge (multifunction PCI device) */
@@ -156,12 +364,62 @@ static void amigaone_init(MachineState *machine)
qdev_connect_gpio_out_named(DEVICE(via), "intr", 0,
qdev_get_gpio_in(DEVICE(cpu),
PPC6xx_INPUT_INT));
- for (i = 0; i < PCI_NUM_PINS; i++) {
+ for (int i = 0; i < PCI_NUM_PINS; i++) {
qdev_connect_gpio_out(dev, i, qdev_get_gpio_in_named(DEVICE(via),
"pirq", i));
}
pci_ide_create_devs(PCI_DEVICE(object_resolve_path_component(via, "ide")));
pci_vga_init(pci_bus);
+
+ if (!machine->kernel_filename) {
+ return;
+ }
+
+ /* handle -kernel, -initrd, -append options and emulate U-Boot */
+ bi = g_new0(struct boot_info, 1);
+ cpu->env.load_info = bi;
+
+ loadaddr = MIN(machine->ram_size, 256 * MiB);
+ bi->bd_info = loadaddr - 8 * MiB;
+ create_bd_info(bi->bd_info, machine->ram_size);
+ bi->stack = bi->bd_info - 64 * KiB - 8;
+
+ if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
+ size_t len = strlen(machine->kernel_cmdline);
+
+ loadaddr = bi->bd_info + 1 * MiB;
+ cpu_physical_memory_write(loadaddr, machine->kernel_cmdline, len + 1);
+ bi->cmdline_start = loadaddr;
+ bi->cmdline_end = loadaddr + len + 1; /* including terminating '\0' */
+ }
+
+ sz = load_elf(machine->kernel_filename, NULL, NULL, NULL,
+ &bi->entry, &loadaddr, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
+ if (sz <= 0) {
+ sz = load_uimage(machine->kernel_filename, &bi->entry, &loadaddr,
+ NULL, NULL, NULL);
+ }
+ if (sz <= 0) {
+ error_report("Could not load kernel '%s'",
+ machine->kernel_filename);
+ exit(1);
+ }
+ loadaddr += sz;
+
+ if (machine->initrd_filename) {
+ loadaddr = ROUND_UP(loadaddr + 4 * MiB, 4 * KiB);
+ loadaddr = MAX(loadaddr, INITRD_MIN_ADDR);
+ sz = load_image_targphys(machine->initrd_filename, loadaddr,
+ bi->bd_info - loadaddr);
+ if (sz <= 0) {
+ error_report("Could not load initrd '%s'",
+ machine->initrd_filename);
+ exit(1);
+ }
+ bi->initrd_start = loadaddr;
+ bi->initrd_end = loadaddr + sz;
+ }
}
static void amigaone_machine_init(MachineClass *mc)
diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build
index 7cd9189..9893f8a 100644
--- a/hw/ppc/meson.build
+++ b/hw/ppc/meson.build
@@ -57,9 +57,6 @@ ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files(
'pnv_n1_chiplet.c',
))
# PowerPC 4xx boards
-ppc_ss.add(when: 'CONFIG_PPC405', if_true: files(
- 'ppc405_boards.c',
- 'ppc405_uc.c'))
ppc_ss.add(when: 'CONFIG_PPC440', if_true: files(
'ppc440_bamboo.c',
'ppc440_uc.c'))
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 8760750..5936537 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -1,7 +1,9 @@
/*
* QEMU PowerPC PowerNV machine model
*
- * Copyright (c) 2016, IBM Corporation.
+ * Copyright (c) 2016-2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -64,6 +66,8 @@
#define FW_LOAD_ADDR 0x0
#define FW_MAX_SIZE (16 * MiB)
+#define PNOR_FILE_NAME "pnv-pnor.bin"
+
#define KERNEL_LOAD_ADDR 0x20000000
#define KERNEL_MAX_SIZE (128 * MiB)
#define INITRD_LOAD_ADDR 0x28000000
@@ -941,7 +945,7 @@ static void pnv_init(MachineState *machine)
uint64_t chip_ram_start = 0;
int i;
char *chip_typename;
- DriveInfo *pnor = drive_get(IF_MTD, 0, 0);
+ DriveInfo *pnor;
DeviceState *dev;
if (kvm_enabled()) {
@@ -971,6 +975,18 @@ static void pnv_init(MachineState *machine)
* Create our simple PNOR device
*/
dev = qdev_new(TYPE_PNV_PNOR);
+ pnor = drive_get(IF_MTD, 0, 0);
+ if (!pnor && defaults_enabled()) {
+ fw_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, PNOR_FILE_NAME);
+ if (!fw_filename) {
+ warn_report("Could not find PNOR '%s'", PNOR_FILE_NAME);
+ } else {
+ QemuOpts *opts;
+ opts = drive_add(IF_MTD, -1, fw_filename, "format=raw,readonly=on");
+ pnor = drive_new(opts, IF_MTD, &error_fatal);
+ g_free(fw_filename);
+ }
+ }
if (pnor) {
qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(pnor));
}
@@ -1555,7 +1571,21 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp)
return;
}
+ /* HOMER (must be created before OCC) */
+ object_property_set_link(OBJECT(&chip8->homer), "chip", OBJECT(chip),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip8->homer), NULL, errp)) {
+ return;
+ }
+ /* Homer Xscom region */
+ pnv_xscom_add_subregion(chip, PNV_XSCOM_PBA_BASE, &chip8->homer.pba_regs);
+ /* Homer RAM region */
+ memory_region_add_subregion(get_system_memory(), chip8->homer.base,
+ &chip8->homer.mem);
+
/* Create the simplified OCC model */
+ object_property_set_link(OBJECT(&chip8->occ), "homer",
+ OBJECT(&chip8->homer), &error_abort);
if (!qdev_realize(DEVICE(&chip8->occ), NULL, errp)) {
return;
}
@@ -1567,19 +1597,6 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(get_system_memory(), PNV_OCC_SENSOR_BASE(chip),
&chip8->occ.sram_regs);
- /* HOMER */
- object_property_set_link(OBJECT(&chip8->homer), "chip", OBJECT(chip),
- &error_abort);
- if (!qdev_realize(DEVICE(&chip8->homer), NULL, errp)) {
- return;
- }
- /* Homer Xscom region */
- pnv_xscom_add_subregion(chip, PNV_XSCOM_PBA_BASE, &chip8->homer.pba_regs);
-
- /* Homer mmio region */
- memory_region_add_subregion(get_system_memory(), PNV_HOMER_BASE(chip),
- &chip8->homer.regs);
-
/* PHB controllers */
for (i = 0; i < chip8->num_phbs; i++) {
PnvPHB *phb = chip8->phbs[i];
@@ -1753,6 +1770,7 @@ static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp)
for (i = 0; i < chip->num_pecs; i++) {
PnvPhb4PecState *pec = &chip9->pecs[i];
PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec);
+ uint32_t pec_cplt_base;
uint32_t pec_nest_base;
uint32_t pec_pci_base;
@@ -1765,9 +1783,12 @@ static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp)
return;
}
+ pec_cplt_base = pecc->xscom_cplt_base(pec);
pec_nest_base = pecc->xscom_nest_base(pec);
pec_pci_base = pecc->xscom_pci_base(pec);
+ pnv_xscom_add_subregion(chip, pec_cplt_base,
+ &pec->nest_pervasive.xscom_ctrl_regs_mr);
pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr);
pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr);
}
@@ -1859,18 +1880,6 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
pnv_xscom_add_subregion(chip, PNV9_XSCOM_CHIPTOD_BASE,
&chip9->chiptod.xscom_regs);
- /* Create the simplified OCC model */
- if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) {
- return;
- }
- pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs);
- qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in(
- DEVICE(psi9), PSIHB9_IRQ_OCC));
-
- /* OCC SRAM model */
- memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip),
- &chip9->occ.sram_regs);
-
/* SBE */
if (!qdev_realize(DEVICE(&chip9->sbe), NULL, errp)) {
return;
@@ -1882,7 +1891,7 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
qdev_connect_gpio_out(DEVICE(&chip9->sbe), 0, qdev_get_gpio_in(
DEVICE(psi9), PSIHB9_IRQ_PSU));
- /* HOMER */
+ /* HOMER (must be created before OCC) */
object_property_set_link(OBJECT(&chip9->homer), "chip", OBJECT(chip),
&error_abort);
if (!qdev_realize(DEVICE(&chip9->homer), NULL, errp)) {
@@ -1890,10 +1899,23 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
}
/* Homer Xscom region */
pnv_xscom_add_subregion(chip, PNV9_XSCOM_PBA_BASE, &chip9->homer.pba_regs);
+ /* Homer RAM region */
+ memory_region_add_subregion(get_system_memory(), chip9->homer.base,
+ &chip9->homer.mem);
+
+ /* Create the simplified OCC model */
+ object_property_set_link(OBJECT(&chip9->occ), "homer",
+ OBJECT(&chip9->homer), &error_abort);
+ if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs);
+ qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in(
+ DEVICE(psi9), PSIHB9_IRQ_OCC));
- /* Homer mmio region */
- memory_region_add_subregion(get_system_memory(), PNV9_HOMER_BASE(chip),
- &chip9->homer.regs);
+ /* OCC SRAM model */
+ memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip),
+ &chip9->occ.sram_regs);
/* PEC PHBs */
pnv_chip_power9_pec_realize(chip, &local_err);
@@ -2027,6 +2049,7 @@ static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp)
for (i = 0; i < chip->num_pecs; i++) {
PnvPhb4PecState *pec = &chip10->pecs[i];
PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec);
+ uint32_t pec_cplt_base;
uint32_t pec_nest_base;
uint32_t pec_pci_base;
@@ -2039,9 +2062,12 @@ static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp)
return;
}
+ pec_cplt_base = pecc->xscom_cplt_base(pec);
pec_nest_base = pecc->xscom_nest_base(pec);
pec_pci_base = pecc->xscom_pci_base(pec);
+ pnv_xscom_add_subregion(chip, pec_cplt_base,
+ &pec->nest_pervasive.xscom_ctrl_regs_mr);
pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr);
pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr);
}
@@ -2136,7 +2162,22 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
pnv_xscom_add_subregion(chip, PNV10_XSCOM_CHIPTOD_BASE,
&chip10->chiptod.xscom_regs);
+ /* HOMER (must be created before OCC) */
+ object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) {
+ return;
+ }
+ /* Homer Xscom region */
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE,
+ &chip10->homer.pba_regs);
+ /* Homer RAM region */
+ memory_region_add_subregion(get_system_memory(), chip10->homer.base,
+ &chip10->homer.mem);
+
/* Create the simplified OCC model */
+ object_property_set_link(OBJECT(&chip10->occ), "homer",
+ OBJECT(&chip10->homer), &error_abort);
if (!qdev_realize(DEVICE(&chip10->occ), NULL, errp)) {
return;
}
@@ -2161,20 +2202,6 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
qdev_connect_gpio_out(DEVICE(&chip10->sbe), 0, qdev_get_gpio_in(
DEVICE(&chip10->psi), PSIHB9_IRQ_PSU));
- /* HOMER */
- object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip),
- &error_abort);
- if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) {
- return;
- }
- /* Homer Xscom region */
- pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE,
- &chip10->homer.pba_regs);
-
- /* Homer mmio region */
- memory_region_add_subregion(get_system_memory(), PNV10_HOMER_BASE(chip),
- &chip10->homer.regs);
-
/* N1 chiplet */
if (!qdev_realize(DEVICE(&chip10->n1_chiplet), NULL, errp)) {
return;
@@ -2225,6 +2252,8 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
/* pib_spic[2] connected to 25csm04 which implements 1 byte transfer */
object_property_set_int(OBJECT(&chip10->pib_spic[i]), "transfer_len",
(i == 2) ? 1 : 4, &error_fatal);
+ object_property_set_int(OBJECT(&chip10->pib_spic[i]), "chip-id",
+ chip->chip_id, &error_fatal);
if (!sysbus_realize(SYS_BUS_DEVICE(OBJECT
(&chip10->pib_spic[i])), errp)) {
return;
@@ -2581,7 +2610,7 @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf)
static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
{
@@ -2595,8 +2624,8 @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
- count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
- priority, logic_serv, match);
+ count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
+ cam_ignore, priority, logic_serv, match);
if (count < 0) {
return count;
@@ -2610,7 +2639,7 @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
{
@@ -2624,8 +2653,8 @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
- count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
- priority, logic_serv, match);
+ count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
+ cam_ignore, priority, logic_serv, match);
if (count < 0) {
return count;
@@ -2637,6 +2666,24 @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
return total_count;
}
+static int pnv10_xive_broadcast(XiveFabric *xfb,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool cam_ignore,
+ uint8_t priority)
+{
+ PnvMachineState *pnv = PNV_MACHINE(xfb);
+ int i;
+
+ for (i = 0; i < pnv->num_chips; i++) {
+ Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
+ XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive);
+ XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
+
+ xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority);
+ }
+ return 0;
+}
+
static bool pnv_machine_get_big_core(Object *obj, Error **errp)
{
PnvMachineState *pnv = PNV_MACHINE(obj);
@@ -2770,6 +2817,7 @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data)
pmc->dt_power_mgt = pnv_dt_power_mgt;
xfc->match_nvt = pnv10_xive_match_nvt;
+ xfc->broadcast = pnv10_xive_broadcast;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
}
diff --git a/hw/ppc/pnv_bmc.c b/hw/ppc/pnv_bmc.c
index 0c1274d..811ba3d 100644
--- a/hw/ppc/pnv_bmc.c
+++ b/hw/ppc/pnv_bmc.c
@@ -251,10 +251,38 @@ static const IPMINetfn hiomap_netfn = {
void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor)
{
+ uint32_t pnor_size = pnor->size;
+ uint32_t pnor_addr = PNOR_SPI_OFFSET;
+
if (!pnv_bmc_is_simulator(bmc)) {
return;
}
+ /*
+ * The HIOMAP protocol uses block units and 16-bit addressing.
+ * Prevent overflow or misalign.
+ */
+ if (pnor_addr >= 1U << (BLOCK_SHIFT + 16)) {
+ warn_report("PNOR address is larger than 2^%d, disabling PNOR",
+ BLOCK_SHIFT + 16);
+ return;
+ }
+ if (pnor_addr & ((1U << BLOCK_SHIFT) - 1)) {
+ warn_report("PNOR address is not aligned to 2^%d, disabling PNOR",
+ BLOCK_SHIFT);
+ return;
+ }
+ if (pnor_size > 1U << (BLOCK_SHIFT + 16)) {
+ warn_report("PNOR size is larger than 2^%d, disabling PNOR",
+ BLOCK_SHIFT + 16);
+ return;
+ }
+ if (pnor_size & ((1U << BLOCK_SHIFT) - 1)) {
+ warn_report("PNOR size is not aligned to 2^%d, disabling PNOR",
+ BLOCK_SHIFT);
+ return;
+ }
+
object_ref(OBJECT(pnor));
object_property_add_const_link(OBJECT(bmc), "pnor", OBJECT(pnor));
diff --git a/hw/ppc/pnv_homer.c b/hw/ppc/pnv_homer.c
index a1d83c8..18a53a8 100644
--- a/hw/ppc/pnv_homer.c
+++ b/hw/ppc/pnv_homer.c
@@ -29,94 +29,6 @@
#include "hw/ppc/pnv_homer.h"
#include "hw/ppc/pnv_xscom.h"
-
-static bool core_max_array(PnvHomer *homer, hwaddr addr)
-{
- int i;
- PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
-
- for (i = 0; i <= homer->chip->nr_cores; i++) {
- if (addr == (hmrc->core_max_base + i)) {
- return true;
- }
- }
- return false;
-}
-
-/* P8 Pstate table */
-
-#define PNV8_OCC_PSTATE_VERSION 0x1f8001
-#define PNV8_OCC_PSTATE_MIN 0x1f8003
-#define PNV8_OCC_PSTATE_VALID 0x1f8000
-#define PNV8_OCC_PSTATE_THROTTLE 0x1f8002
-#define PNV8_OCC_PSTATE_NOM 0x1f8004
-#define PNV8_OCC_PSTATE_TURBO 0x1f8005
-#define PNV8_OCC_PSTATE_ULTRA_TURBO 0x1f8006
-#define PNV8_OCC_PSTATE_DATA 0x1f8008
-#define PNV8_OCC_PSTATE_ID_ZERO 0x1f8010
-#define PNV8_OCC_PSTATE_ID_ONE 0x1f8018
-#define PNV8_OCC_PSTATE_ID_TWO 0x1f8020
-#define PNV8_OCC_VDD_VOLTAGE_IDENTIFIER 0x1f8012
-#define PNV8_OCC_VCS_VOLTAGE_IDENTIFIER 0x1f8013
-#define PNV8_OCC_PSTATE_ZERO_FREQUENCY 0x1f8014
-#define PNV8_OCC_PSTATE_ONE_FREQUENCY 0x1f801c
-#define PNV8_OCC_PSTATE_TWO_FREQUENCY 0x1f8024
-#define PNV8_CORE_MAX_BASE 0x1f8810
-
-
-static uint64_t pnv_power8_homer_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PnvHomer *homer = PNV_HOMER(opaque);
-
- switch (addr) {
- case PNV8_OCC_PSTATE_VERSION:
- case PNV8_OCC_PSTATE_MIN:
- case PNV8_OCC_PSTATE_ID_ZERO:
- return 0;
- case PNV8_OCC_PSTATE_VALID:
- case PNV8_OCC_PSTATE_THROTTLE:
- case PNV8_OCC_PSTATE_NOM:
- case PNV8_OCC_PSTATE_TURBO:
- case PNV8_OCC_PSTATE_ID_ONE:
- case PNV8_OCC_VDD_VOLTAGE_IDENTIFIER:
- case PNV8_OCC_VCS_VOLTAGE_IDENTIFIER:
- return 1;
- case PNV8_OCC_PSTATE_ULTRA_TURBO:
- case PNV8_OCC_PSTATE_ID_TWO:
- return 2;
- case PNV8_OCC_PSTATE_DATA:
- return 0x1000000000000000;
- /* P8 frequency for 0, 1, and 2 pstates */
- case PNV8_OCC_PSTATE_ZERO_FREQUENCY:
- case PNV8_OCC_PSTATE_ONE_FREQUENCY:
- case PNV8_OCC_PSTATE_TWO_FREQUENCY:
- return 3000;
- }
- /* pstate table core max array */
- if (core_max_array(homer, addr)) {
- return 1;
- }
- return 0;
-}
-
-static void pnv_power8_homer_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
-{
- /* callback function defined to homer write */
- return;
-}
-
-static const MemoryRegionOps pnv_power8_homer_ops = {
- .read = pnv_power8_homer_read,
- .write = pnv_power8_homer_write,
- .valid.min_access_size = 1,
- .valid.max_access_size = 8,
- .impl.min_access_size = 1,
- .impl.max_access_size = 8,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
/* P8 PBA BARs */
#define PBA_BAR0 0x00
#define PBA_BAR1 0x01
@@ -131,16 +43,16 @@ static uint64_t pnv_homer_power8_pba_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvHomer *homer = PNV_HOMER(opaque);
- PnvChip *chip = homer->chip;
+ PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
uint32_t reg = addr >> 3;
uint64_t val = 0;
switch (reg) {
case PBA_BAR0:
- val = PNV_HOMER_BASE(chip);
+ val = homer->base;
break;
case PBA_BARMASK0: /* P8 homer region mask */
- val = (PNV_HOMER_SIZE - 1) & 0x300000;
+ val = (hmrc->size - 1) & 0x300000;
break;
case PBA_BAR3: /* P8 occ common area */
val = PNV_OCC_COMMON_AREA_BASE;
@@ -172,15 +84,19 @@ static const MemoryRegionOps pnv_homer_power8_pba_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
+static hwaddr pnv_homer_power8_get_base(PnvChip *chip)
+{
+ return PNV_HOMER_BASE(chip);
+}
+
static void pnv_homer_power8_class_init(ObjectClass *klass, void *data)
{
PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
+ homer->get_base = pnv_homer_power8_get_base;
+ homer->size = PNV_HOMER_SIZE;
homer->pba_size = PNV_XSCOM_PBA_SIZE;
homer->pba_ops = &pnv_homer_power8_pba_ops;
- homer->homer_size = PNV_HOMER_SIZE;
- homer->homer_ops = &pnv_power8_homer_ops;
- homer->core_max_base = PNV8_CORE_MAX_BASE;
}
static const TypeInfo pnv_homer_power8_type_info = {
@@ -190,100 +106,20 @@ static const TypeInfo pnv_homer_power8_type_info = {
.class_init = pnv_homer_power8_class_init,
};
-/* P9 Pstate table */
-
-#define PNV9_OCC_PSTATE_ID_ZERO 0xe2018
-#define PNV9_OCC_PSTATE_ID_ONE 0xe2020
-#define PNV9_OCC_PSTATE_ID_TWO 0xe2028
-#define PNV9_OCC_PSTATE_DATA 0xe2000
-#define PNV9_OCC_PSTATE_DATA_AREA 0xe2008
-#define PNV9_OCC_PSTATE_MIN 0xe2003
-#define PNV9_OCC_PSTATE_NOM 0xe2004
-#define PNV9_OCC_PSTATE_TURBO 0xe2005
-#define PNV9_OCC_PSTATE_ULTRA_TURBO 0xe2818
-#define PNV9_OCC_MAX_PSTATE_ULTRA_TURBO 0xe2006
-#define PNV9_OCC_PSTATE_MAJOR_VERSION 0xe2001
-#define PNV9_OCC_OPAL_RUNTIME_DATA 0xe2b85
-#define PNV9_CHIP_HOMER_IMAGE_POINTER 0x200008
-#define PNV9_CHIP_HOMER_BASE 0x0
-#define PNV9_OCC_PSTATE_ZERO_FREQUENCY 0xe201c
-#define PNV9_OCC_PSTATE_ONE_FREQUENCY 0xe2024
-#define PNV9_OCC_PSTATE_TWO_FREQUENCY 0xe202c
-#define PNV9_OCC_ROLE_MASTER_OR_SLAVE 0xe2002
-#define PNV9_CORE_MAX_BASE 0xe2819
-
-
-static uint64_t pnv_power9_homer_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PnvHomer *homer = PNV_HOMER(opaque);
-
- switch (addr) {
- case PNV9_OCC_MAX_PSTATE_ULTRA_TURBO:
- case PNV9_OCC_PSTATE_ID_ZERO:
- return 0;
- case PNV9_OCC_PSTATE_DATA:
- case PNV9_OCC_ROLE_MASTER_OR_SLAVE:
- case PNV9_OCC_PSTATE_NOM:
- case PNV9_OCC_PSTATE_TURBO:
- case PNV9_OCC_PSTATE_ID_ONE:
- case PNV9_OCC_PSTATE_ULTRA_TURBO:
- case PNV9_OCC_OPAL_RUNTIME_DATA:
- return 1;
- case PNV9_OCC_PSTATE_MIN:
- case PNV9_OCC_PSTATE_ID_TWO:
- return 2;
-
- /* 3000 khz frequency for 0, 1, and 2 pstates */
- case PNV9_OCC_PSTATE_ZERO_FREQUENCY:
- case PNV9_OCC_PSTATE_ONE_FREQUENCY:
- case PNV9_OCC_PSTATE_TWO_FREQUENCY:
- return 3000;
- case PNV9_OCC_PSTATE_MAJOR_VERSION:
- return 0x90;
- case PNV9_CHIP_HOMER_BASE:
- case PNV9_OCC_PSTATE_DATA_AREA:
- case PNV9_CHIP_HOMER_IMAGE_POINTER:
- return 0x1000000000000000;
- }
- /* pstate table core max array */
- if (core_max_array(homer, addr)) {
- return 1;
- }
- return 0;
-}
-
-static void pnv_power9_homer_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
-{
- /* callback function defined to homer write */
- return;
-}
-
-static const MemoryRegionOps pnv_power9_homer_ops = {
- .read = pnv_power9_homer_read,
- .write = pnv_power9_homer_write,
- .valid.min_access_size = 1,
- .valid.max_access_size = 8,
- .impl.min_access_size = 1,
- .impl.max_access_size = 8,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
static uint64_t pnv_homer_power9_pba_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvHomer *homer = PNV_HOMER(opaque);
- PnvChip *chip = homer->chip;
+ PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
uint32_t reg = addr >> 3;
uint64_t val = 0;
switch (reg) {
case PBA_BAR0:
- val = PNV9_HOMER_BASE(chip);
+ val = homer->base;
break;
case PBA_BARMASK0: /* P9 homer region mask */
- val = (PNV9_HOMER_SIZE - 1) & 0x300000;
+ val = (hmrc->size - 1) & 0x300000;
break;
case PBA_BAR2: /* P9 occ common area */
val = PNV9_OCC_COMMON_AREA_BASE;
@@ -315,15 +151,19 @@ static const MemoryRegionOps pnv_homer_power9_pba_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
+static hwaddr pnv_homer_power9_get_base(PnvChip *chip)
+{
+ return PNV9_HOMER_BASE(chip);
+}
+
static void pnv_homer_power9_class_init(ObjectClass *klass, void *data)
{
PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
+ homer->get_base = pnv_homer_power9_get_base;
+ homer->size = PNV_HOMER_SIZE;
homer->pba_size = PNV9_XSCOM_PBA_SIZE;
homer->pba_ops = &pnv_homer_power9_pba_ops;
- homer->homer_size = PNV9_HOMER_SIZE;
- homer->homer_ops = &pnv_power9_homer_ops;
- homer->core_max_base = PNV9_CORE_MAX_BASE;
}
static const TypeInfo pnv_homer_power9_type_info = {
@@ -337,16 +177,16 @@ static uint64_t pnv_homer_power10_pba_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvHomer *homer = PNV_HOMER(opaque);
- PnvChip *chip = homer->chip;
+ PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
uint32_t reg = addr >> 3;
uint64_t val = 0;
switch (reg) {
case PBA_BAR0:
- val = PNV10_HOMER_BASE(chip);
+ val = homer->base;
break;
case PBA_BARMASK0: /* P10 homer region mask */
- val = (PNV10_HOMER_SIZE - 1) & 0x300000;
+ val = (hmrc->size - 1) & 0x300000;
break;
case PBA_BAR2: /* P10 occ common area */
val = PNV10_OCC_COMMON_AREA_BASE;
@@ -378,15 +218,19 @@ static const MemoryRegionOps pnv_homer_power10_pba_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
+static hwaddr pnv_homer_power10_get_base(PnvChip *chip)
+{
+ return PNV10_HOMER_BASE(chip);
+}
+
static void pnv_homer_power10_class_init(ObjectClass *klass, void *data)
{
PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
+ homer->get_base = pnv_homer_power10_get_base;
+ homer->size = PNV_HOMER_SIZE;
homer->pba_size = PNV10_XSCOM_PBA_SIZE;
homer->pba_ops = &pnv_homer_power10_pba_ops;
- homer->homer_size = PNV10_HOMER_SIZE;
- homer->homer_ops = &pnv_power9_homer_ops; /* TODO */
- homer->core_max_base = PNV9_CORE_MAX_BASE;
}
static const TypeInfo pnv_homer_power10_type_info = {
@@ -400,16 +244,22 @@ static void pnv_homer_realize(DeviceState *dev, Error **errp)
{
PnvHomer *homer = PNV_HOMER(dev);
PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
+ char homer_str[32];
assert(homer->chip);
pnv_xscom_region_init(&homer->pba_regs, OBJECT(dev), hmrc->pba_ops,
homer, "xscom-pba", hmrc->pba_size);
- /* homer region */
- memory_region_init_io(&homer->regs, OBJECT(dev),
- hmrc->homer_ops, homer, "homer-main-memory",
- hmrc->homer_size);
+ /* Homer RAM region */
+ homer->base = hmrc->get_base(homer->chip);
+
+ snprintf(homer_str, sizeof(homer_str), "homer-chip%d-memory",
+ homer->chip->chip_id);
+ if (!memory_region_init_ram(&homer->mem, OBJECT(homer),
+ homer_str, hmrc->size, errp)) {
+ return;
+ }
}
static const Property pnv_homer_properties[] = {
diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c
index 0480a60..d812dc8 100644
--- a/hw/ppc/pnv_lpc.c
+++ b/hw/ppc/pnv_lpc.c
@@ -85,7 +85,7 @@ enum {
#define ISA_IO_SIZE 0x00010000
#define ISA_MEM_SIZE 0x10000000
-#define ISA_FW_SIZE 0x10000000
+#define ISA_FW_SIZE 0x100000000
#define LPC_IO_OPB_ADDR 0xd0010000
#define LPC_IO_OPB_SIZE 0x00010000
#define LPC_MEM_OPB_ADDR 0xe0000000
@@ -353,6 +353,8 @@ static const MemoryRegionOps pnv_lpc_xscom_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
+static void pnv_lpc_opb_noresponse(PnvLpcController *lpc);
+
static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size)
{
PnvLpcController *lpc = PNV_LPC(opaque);
@@ -376,6 +378,7 @@ static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size)
}
if (result != MEMTX_OK) {
+ pnv_lpc_opb_noresponse(lpc);
qemu_log_mask(LOG_GUEST_ERROR, "OPB read failed at @0x%"
HWADDR_PRIx "\n", addr);
}
@@ -406,6 +409,7 @@ static void pnv_lpc_mmio_write(void *opaque, hwaddr addr,
}
if (result != MEMTX_OK) {
+ pnv_lpc_opb_noresponse(lpc);
qemu_log_mask(LOG_GUEST_ERROR, "OPB write failed at @0x%"
HWADDR_PRIx "\n", addr);
}
@@ -456,46 +460,18 @@ static void pnv_lpc_eval_irqs(PnvLpcController *lpc)
{
uint32_t active_irqs = 0;
- if (lpc->lpc_hc_irqstat & PPC_BITMASK32(16, 31)) {
- qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented irqs in IRQSTAT: "
- "0x%08"PRIx32"\n", lpc->lpc_hc_irqstat);
- }
-
- if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) {
- active_irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask;
+ active_irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask;
+ if (!(lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN)) {
+ active_irqs &= ~LPC_HC_IRQ_SERIRQ_ALL;
}
/* Reflect the interrupt */
- if (!lpc->psi_has_serirq) {
+ if (lpc->psi_has_serirq) {
/*
- * POWER8 ORs all irqs together (also with LPCHC internal interrupt
- * sources) and outputs a single line that raises the PSI LPCHC irq
- * which then latches an OPB IRQ status register that sends the irq
- * to PSI.
- *
- * We don't honor the polarity register, it's pointless and unused
- * anyway
- */
- if (active_irqs) {
- lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC;
- } else {
- lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC;
- }
-
- /* Update OPB internal latch */
- lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask;
-
- qemu_set_irq(lpc->psi_irq_lpchc, lpc->opb_irq_stat != 0);
- } else {
- /*
- * POWER9 and POWER10 have routing fields in OPB master registers that
+ * POWER9 and later have routing fields in OPB master registers that
* send LPC irqs to 4 output lines that raise the PSI SERIRQ irqs.
* These don't appear to get latched into an OPB register like the
* LPCHC irqs.
- *
- * POWER9 LPC controller internal irqs still go via the OPB
- * and LPCHC PSI irqs like P8, but we have no such internal sources
- * modelled yet.
*/
bool serirq_out[4] = { false, false, false, false };
int irq;
@@ -510,7 +486,39 @@ static void pnv_lpc_eval_irqs(PnvLpcController *lpc)
qemu_set_irq(lpc->psi_irq_serirq[1], serirq_out[1]);
qemu_set_irq(lpc->psi_irq_serirq[2], serirq_out[2]);
qemu_set_irq(lpc->psi_irq_serirq[3], serirq_out[3]);
+
+ /*
+ * POWER9 and later LPC controller internal irqs still go via the OPB
+ * and LPCHC PSI irqs like P8, so take the SERIRQs out and continue.
+ */
+ active_irqs &= ~LPC_HC_IRQ_SERIRQ_ALL;
}
+
+ /*
+ * POWER8 ORs all irqs together (also with LPCHC internal interrupt
+ * sources) and outputs a single line that raises the PSI LPCHC irq
+ * which then latches an OPB IRQ status register that sends the irq
+ * to PSI.
+ *
+ * We don't honor the polarity register, it's pointless and unused
+ * anyway
+ */
+ if (active_irqs) {
+ lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC;
+ } else {
+ lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC;
+ }
+
+ /* Update OPB internal latch */
+ lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask;
+
+ qemu_set_irq(lpc->psi_irq_lpchc, lpc->opb_irq_stat != 0);
+}
+
+static void pnv_lpc_opb_noresponse(PnvLpcController *lpc)
+{
+ lpc->lpc_hc_irqstat |= LPC_HC_IRQ_SYNC_NORESP_ERR;
+ pnv_lpc_eval_irqs(lpc);
}
static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size)
@@ -553,10 +561,13 @@ static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val,
switch (addr) {
case LPC_HC_FW_SEG_IDSEL:
- /* XXX Actually figure out how that works as this impact
- * memory regions/aliases
+ /*
+ * ISA FW "devices" are modeled as 16x256MB windows into a
+ * 4GB LPC FW address space.
*/
+ val &= 0xf; /* Selects device 0-15 */
lpc->lpc_hc_fw_seg_idsel = val;
+ memory_region_set_alias_offset(&lpc->opb_isa_fw, val * LPC_FW_OPB_SIZE);
break;
case LPC_HC_FW_RD_ACC_SIZE:
lpc->lpc_hc_fw_rd_acc_size = val;
@@ -790,9 +801,9 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp)
memory_region_init(&lpc->opb_mr, OBJECT(dev), "lpc-opb", 0x100000000ull);
address_space_init(&lpc->opb_as, &lpc->opb_mr, "lpc-opb");
- /* Create ISA IO and Mem space regions which are the root of
- * the ISA bus (ie, ISA address spaces). We don't create a
- * separate one for FW which we alias to memory.
+ /*
+ * Create ISA IO, Mem, and FW space regions which are the root of
+ * the ISA bus (ie, ISA address spaces).
*/
memory_region_init(&lpc->isa_io, OBJECT(dev), "isa-io", ISA_IO_SIZE);
memory_region_init(&lpc->isa_mem, OBJECT(dev), "isa-mem", ISA_MEM_SIZE);
diff --git a/hw/ppc/pnv_occ.c b/hw/ppc/pnv_occ.c
index 48123ce..bda6b23 100644
--- a/hw/ppc/pnv_occ.c
+++ b/hw/ppc/pnv_occ.c
@@ -24,40 +24,53 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_chip.h"
#include "hw/ppc/pnv_xscom.h"
#include "hw/ppc/pnv_occ.h"
+#define P8_HOMER_OPAL_DATA_OFFSET 0x1F8000
+#define P9_HOMER_OPAL_DATA_OFFSET 0x0E2000
+
#define OCB_OCI_OCCMISC 0x4020
#define OCB_OCI_OCCMISC_AND 0x4021
#define OCB_OCI_OCCMISC_OR 0x4022
+#define OCCMISC_PSI_IRQ PPC_BIT(0)
+#define OCCMISC_IRQ_SHMEM PPC_BIT(3)
/* OCC sensors */
-#define OCC_SENSOR_DATA_BLOCK_OFFSET 0x580000
-#define OCC_SENSOR_DATA_VALID 0x580001
-#define OCC_SENSOR_DATA_VERSION 0x580002
-#define OCC_SENSOR_DATA_READING_VERSION 0x580004
-#define OCC_SENSOR_DATA_NR_SENSORS 0x580008
-#define OCC_SENSOR_DATA_NAMES_OFFSET 0x580010
-#define OCC_SENSOR_DATA_READING_PING_OFFSET 0x580014
-#define OCC_SENSOR_DATA_READING_PONG_OFFSET 0x58000c
-#define OCC_SENSOR_DATA_NAME_LENGTH 0x58000d
-#define OCC_SENSOR_NAME_STRUCTURE_TYPE 0x580023
-#define OCC_SENSOR_LOC_CORE 0x580022
-#define OCC_SENSOR_LOC_GPU 0x580020
-#define OCC_SENSOR_TYPE_POWER 0x580003
-#define OCC_SENSOR_NAME 0x580005
-#define HWMON_SENSORS_MASK 0x58001e
-#define SLW_IMAGE_BASE 0x0
+#define OCC_SENSOR_DATA_BLOCK_OFFSET 0x0000
+#define OCC_SENSOR_DATA_VALID 0x0001
+#define OCC_SENSOR_DATA_VERSION 0x0002
+#define OCC_SENSOR_DATA_READING_VERSION 0x0004
+#define OCC_SENSOR_DATA_NR_SENSORS 0x0008
+#define OCC_SENSOR_DATA_NAMES_OFFSET 0x0010
+#define OCC_SENSOR_DATA_READING_PING_OFFSET 0x0014
+#define OCC_SENSOR_DATA_READING_PONG_OFFSET 0x000c
+#define OCC_SENSOR_DATA_NAME_LENGTH 0x000d
+#define OCC_SENSOR_NAME_STRUCTURE_TYPE 0x0023
+#define OCC_SENSOR_LOC_CORE 0x0022
+#define OCC_SENSOR_LOC_GPU 0x0020
+#define OCC_SENSOR_TYPE_POWER 0x0003
+#define OCC_SENSOR_NAME 0x0005
+#define HWMON_SENSORS_MASK 0x001e
static void pnv_occ_set_misc(PnvOCC *occ, uint64_t val)
{
- bool irq_state;
-
- val &= 0xffff000000000000ull;
+ val &= PPC_BITMASK(0, 18); /* Mask out unimplemented bits */
occ->occmisc = val;
- irq_state = !!(val >> 63);
- qemu_set_irq(occ->psi_irq, irq_state);
+
+ /*
+ * OCCMISC IRQ bit triggers the interrupt on a 0->1 edge, but not clear
+ * how that is handled in PSI so it is level-triggered here, which is not
+ * really correct (but skiboot is okay with it).
+ */
+ qemu_set_irq(occ->psi_irq, !!(val & OCCMISC_PSI_IRQ));
+}
+
+static void pnv_occ_raise_msg_irq(PnvOCC *occ)
+{
+ pnv_occ_set_misc(occ, occ->occmisc | OCCMISC_PSI_IRQ | OCCMISC_IRQ_SHMEM);
}
static uint64_t pnv_occ_power8_xscom_read(void *opaque, hwaddr addr,
@@ -129,8 +142,6 @@ static uint64_t pnv_occ_common_area_read(void *opaque, hwaddr addr,
case HWMON_SENSORS_MASK:
case OCC_SENSOR_LOC_GPU:
return 0x8e00;
- case SLW_IMAGE_BASE:
- return 0x1000000000000000;
}
return 0;
}
@@ -165,7 +176,11 @@ const MemoryRegionOps pnv_occ_sram_ops = {
static void pnv_occ_power8_class_init(ObjectClass *klass, void *data)
{
PnvOCCClass *poc = PNV_OCC_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->desc = "PowerNV OCC Controller (POWER8)";
+ poc->opal_shared_memory_offset = P8_HOMER_OPAL_DATA_OFFSET;
+ poc->opal_shared_memory_version = 0x02;
poc->xscom_size = PNV_XSCOM_OCC_SIZE;
poc->xscom_ops = &pnv_occ_power8_xscom_ops;
}
@@ -238,8 +253,11 @@ static void pnv_occ_power9_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "PowerNV OCC Controller (POWER9)";
+ poc->opal_shared_memory_offset = P9_HOMER_OPAL_DATA_OFFSET;
+ poc->opal_shared_memory_version = 0x90;
poc->xscom_size = PNV9_XSCOM_OCC_SIZE;
poc->xscom_ops = &pnv_occ_power9_xscom_ops;
+ assert(!dc->user_creatable);
}
static const TypeInfo pnv_occ_power9_type_info = {
@@ -251,21 +269,50 @@ static const TypeInfo pnv_occ_power9_type_info = {
static void pnv_occ_power10_class_init(ObjectClass *klass, void *data)
{
+ PnvOCCClass *poc = PNV_OCC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "PowerNV OCC Controller (POWER10)";
+ poc->opal_shared_memory_offset = P9_HOMER_OPAL_DATA_OFFSET;
+ poc->opal_shared_memory_version = 0xA0;
+ poc->xscom_size = PNV9_XSCOM_OCC_SIZE;
+ poc->xscom_ops = &pnv_occ_power9_xscom_ops;
+ assert(!dc->user_creatable);
}
static const TypeInfo pnv_occ_power10_type_info = {
.name = TYPE_PNV10_OCC,
- .parent = TYPE_PNV9_OCC,
+ .parent = TYPE_PNV_OCC,
.class_init = pnv_occ_power10_class_init,
};
+static bool occ_init_homer_memory(PnvOCC *occ, Error **errp);
+static bool occ_model_tick(PnvOCC *occ);
+
+/* Relatively arbitrary */
+#define OCC_POLL_MS 100
+
+static void occ_state_machine_timer(void *opaque)
+{
+ PnvOCC *occ = opaque;
+ uint64_t next = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + OCC_POLL_MS;
+
+ if (occ_model_tick(occ)) {
+ timer_mod(&occ->state_machine_timer, next);
+ }
+}
+
static void pnv_occ_realize(DeviceState *dev, Error **errp)
{
PnvOCC *occ = PNV_OCC(dev);
PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+
+ assert(homer);
+
+ if (!occ_init_homer_memory(occ, errp)) {
+ return;
+ }
occ->occmisc = 0;
@@ -279,14 +326,22 @@ static void pnv_occ_realize(DeviceState *dev, Error **errp)
PNV_OCC_SENSOR_DATA_BLOCK_SIZE);
qdev_init_gpio_out(dev, &occ->psi_irq, 1);
+
+ timer_init_ms(&occ->state_machine_timer, QEMU_CLOCK_VIRTUAL,
+ occ_state_machine_timer, occ);
+ timer_mod(&occ->state_machine_timer, OCC_POLL_MS);
}
+static const Property pnv_occ_properties[] = {
+ DEFINE_PROP_LINK("homer", PnvOCC, homer, TYPE_PNV_HOMER, PnvHomer *),
+};
+
static void pnv_occ_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pnv_occ_realize;
- dc->desc = "PowerNV OCC Controller";
+ device_class_set_props(dc, pnv_occ_properties);
dc->user_creatable = false;
}
@@ -308,3 +363,570 @@ static void pnv_occ_register_types(void)
}
type_init(pnv_occ_register_types);
+
+/* From skiboot/hw/occ.c with tab to space conversion */
+/* OCC Communication Area for PStates */
+
+#define OPAL_DYNAMIC_DATA_OFFSET 0x0B80
+/* relative to HOMER_OPAL_DATA_OFFSET */
+
+#define MAX_PSTATES 256
+#define MAX_P8_CORES 12
+#define MAX_P9_CORES 24
+#define MAX_P10_CORES 32
+
+#define MAX_OPAL_CMD_DATA_LENGTH 4090
+#define MAX_OCC_RSP_DATA_LENGTH 8698
+
+#define P8_PIR_CORE_MASK 0xFFF8
+#define P9_PIR_QUAD_MASK 0xFFF0
+#define P10_PIR_CHIP_MASK 0x0000
+#define FREQ_MAX_IN_DOMAIN 0
+#define FREQ_MOST_RECENTLY_SET 1
+
+#define u8 uint8_t
+#define s8 int8_t
+#define u16 uint16_t
+#define s16 int16_t
+#define u32 uint32_t
+#define s32 int32_t
+#define u64 uint64_t
+#define s64 int64_t
+#define __be16 uint16_t
+#define __be32 uint32_t
+#ifndef __packed
+#define __packed QEMU_PACKED
+#endif /* !__packed */
+
+/**
+ * OCC-OPAL Shared Memory Region
+ *
+ * Reference document :
+ * https://github.com/open-power/docs/blob/master/occ/OCC_OpenPwr_FW_Interfaces.pdf
+ *
+ * Supported layout versions:
+ * - 0x01, 0x02 : P8
+ * https://github.com/open-power/occ/blob/master_p8/src/occ/proc/proc_pstate.h
+ *
+ * - 0x90 : P9
+ * https://github.com/open-power/occ/blob/master/src/occ_405/proc/proc_pstate.h
+ * In 0x90 the data is separated into :-
+ * -- Static Data (struct occ_pstate_table): Data is written once by OCC
+ * -- Dynamic Data (struct occ_dynamic_data): Data is updated at runtime
+ *
+ * struct occ_pstate_table - Pstate table layout
+ * @valid: Indicates if data is valid
+ * @version: Layout version [Major/Minor]
+ * @v2.throttle: Reason for limiting the max pstate
+ * @v9.occ_role: OCC role (Master/Slave)
+ * @v#.pstate_min: Minimum pstate ever allowed
+ * @v#.pstate_nom: Nominal pstate
+ * @v#.pstate_turbo: Maximum turbo pstate
+ * @v#.pstate_ultra_turbo: Maximum ultra turbo pstate and the maximum
+ * pstate ever allowed
+ * @v#.pstates: Pstate-id and frequency list from Pmax to Pmin
+ * @v#.pstates.id: Pstate-id
+ * @v#.pstates.flags: Pstate-flag(reserved)
+ * @v2.pstates.vdd: Voltage Identifier
+ * @v2.pstates.vcs: Voltage Identifier
+ * @v#.pstates.freq_khz: Frequency in KHz
+ * @v#.core_max[1..N]: Max pstate with N active cores
+ * @spare/reserved/pad: Unused data
+ */
+struct occ_pstate_table {
+ u8 valid;
+ u8 version;
+ union __packed {
+ struct __packed { /* Version 0x01 and 0x02 */
+ u8 throttle;
+ s8 pstate_min;
+ s8 pstate_nom;
+ s8 pstate_turbo;
+ s8 pstate_ultra_turbo;
+ u8 spare;
+ u64 reserved;
+ struct __packed {
+ s8 id;
+ u8 flags;
+ u8 vdd;
+ u8 vcs;
+ __be32 freq_khz;
+ } pstates[MAX_PSTATES];
+ s8 core_max[MAX_P8_CORES];
+ u8 pad[100];
+ } v2;
+ struct __packed { /* Version 0x90 */
+ u8 occ_role;
+ u8 pstate_min;
+ u8 pstate_nom;
+ u8 pstate_turbo;
+ u8 pstate_ultra_turbo;
+ u8 spare;
+ u64 reserved1;
+ u64 reserved2;
+ struct __packed {
+ u8 id;
+ u8 flags;
+ u16 reserved;
+ __be32 freq_khz;
+ } pstates[MAX_PSTATES];
+ u8 core_max[MAX_P9_CORES];
+ u8 pad[56];
+ } v9;
+ struct __packed { /* Version 0xA0 */
+ u8 occ_role;
+ u8 pstate_min;
+ u8 pstate_fixed_freq;
+ u8 pstate_base;
+ u8 pstate_ultra_turbo;
+ u8 pstate_fmax;
+ u8 minor;
+ u8 pstate_bottom_throttle;
+ u8 spare;
+ u8 spare1;
+ u32 reserved_32;
+ u64 reserved_64;
+ struct __packed {
+ u8 id;
+ u8 valid;
+ u16 reserved;
+ __be32 freq_khz;
+ } pstates[MAX_PSTATES];
+ u8 core_max[MAX_P10_CORES];
+ u8 pad[48];
+ } v10;
+ };
+} __packed;
+
+/**
+ * OPAL-OCC Command Response Interface
+ *
+ * OPAL-OCC Command Buffer
+ *
+ * ---------------------------------------------------------------------
+ * | OPAL | Cmd | OPAL | | Cmd Data | Cmd Data | OPAL |
+ * | Cmd | Request | OCC | Reserved | Length | Length | Cmd |
+ * | Flags | ID | Cmd | | (MSB) | (LSB) | Data... |
+ * ---------------------------------------------------------------------
+ * | ….OPAL Command Data up to max of Cmd Data Length 4090 bytes |
+ * | |
+ * ---------------------------------------------------------------------
+ *
+ * OPAL Command Flag
+ *
+ * -----------------------------------------------------------------
+ * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
+ * | (msb) | | | | | | | (lsb) |
+ * -----------------------------------------------------------------
+ * |Cmd | | | | | | | |
+ * |Ready | | | | | | | |
+ * -----------------------------------------------------------------
+ *
+ * struct opal_command_buffer - Defines the layout of OPAL command buffer
+ * @flag: Provides general status of the command
+ * @request_id: Token to identify request
+ * @cmd: Command sent
+ * @data_size: Command data length
+ * @data: Command specific data
+ * @spare: Unused byte
+ */
+struct opal_command_buffer {
+ u8 flag;
+ u8 request_id;
+ u8 cmd;
+ u8 spare;
+ __be16 data_size;
+ u8 data[MAX_OPAL_CMD_DATA_LENGTH];
+} __packed;
+
+/**
+ * OPAL-OCC Response Buffer
+ *
+ * ---------------------------------------------------------------------
+ * | OCC | Cmd | OPAL | Response | Rsp Data | Rsp Data | OPAL |
+ * | Rsp | Request | OCC | Status | Length | Length | Rsp |
+ * | Flags | ID | Cmd | | (MSB) | (LSB) | Data... |
+ * ---------------------------------------------------------------------
+ * | ….OPAL Response Data up to max of Rsp Data Length 8698 bytes |
+ * | |
+ * ---------------------------------------------------------------------
+ *
+ * OCC Response Flag
+ *
+ * -----------------------------------------------------------------
+ * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
+ * | (msb) | | | | | | | (lsb) |
+ * -----------------------------------------------------------------
+ * | | | | | | |OCC in | Rsp |
+ * | | | | | | |progress|Ready |
+ * -----------------------------------------------------------------
+ *
+ * struct occ_response_buffer - Defines the layout of OCC response buffer
+ * @flag: Provides general status of the response
+ * @request_id: Token to identify request
+ * @cmd: Command requested
+ * @status: Indicates success/failure status of
+ * the command
+ * @data_size: Response data length
+ * @data: Response specific data
+ */
+struct occ_response_buffer {
+ u8 flag;
+ u8 request_id;
+ u8 cmd;
+ u8 status;
+ __be16 data_size;
+ u8 data[MAX_OCC_RSP_DATA_LENGTH];
+} __packed;
+
+/**
+ * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
+ *
+ * struct occ_dynamic_data - Contains runtime attributes
+ * @occ_state: Current state of OCC
+ * @major_version: Major version number
+ * @minor_version: Minor version number (backwards compatible)
+ * Version 1 indicates GPU presence populated
+ * @gpus_present: Bitmask of GPUs present (on systems where GPU
+ * presence is detected through APSS)
+ * @cpu_throttle: Reason for limiting the max pstate
+ * @mem_throttle: Reason for throttling memory
+ * @quick_pwr_drop: Indicates if QPD is asserted
+ * @pwr_shifting_ratio: Indicates the current percentage of power to
+ * take away from the CPU vs GPU when shifting
+ * power to maintain a power cap. Value of 100
+ * means take all power from CPU.
+ * @pwr_cap_type: Indicates type of power cap in effect
+ * @hard_min_pwr_cap: Hard minimum system power cap in Watts.
+ * Guaranteed unless hardware failure
+ * @max_pwr_cap: Maximum allowed system power cap in Watts
+ * @cur_pwr_cap: Current system power cap
+ * @soft_min_pwr_cap: Soft powercap minimum. OCC may or may not be
+ * able to maintain this
+ * @spare/reserved: Unused data
+ * @cmd: Opal Command Buffer
+ * @rsp: OCC Response Buffer
+ */
+struct occ_dynamic_data {
+ u8 occ_state;
+ u8 major_version;
+ u8 minor_version;
+ u8 gpus_present;
+ union __packed {
+ struct __packed { /* Version 0x90 */
+ u8 spare1;
+ } v9;
+ struct __packed { /* Version 0xA0 */
+ u8 wof_enabled;
+ } v10;
+ };
+ u8 cpu_throttle;
+ u8 mem_throttle;
+ u8 quick_pwr_drop;
+ u8 pwr_shifting_ratio;
+ u8 pwr_cap_type;
+ __be16 hard_min_pwr_cap;
+ __be16 max_pwr_cap;
+ __be16 cur_pwr_cap;
+ __be16 soft_min_pwr_cap;
+ u8 pad[110];
+ struct opal_command_buffer cmd;
+ struct occ_response_buffer rsp;
+} __packed;
+
+enum occ_response_status {
+ OCC_RSP_SUCCESS = 0x00,
+ OCC_RSP_INVALID_COMMAND = 0x11,
+ OCC_RSP_INVALID_CMD_DATA_LENGTH = 0x12,
+ OCC_RSP_INVALID_DATA = 0x13,
+ OCC_RSP_INTERNAL_ERROR = 0x15,
+};
+
+#define OCC_ROLE_SLAVE 0x00
+#define OCC_ROLE_MASTER 0x01
+
+#define OCC_FLAG_RSP_READY 0x01
+#define OCC_FLAG_CMD_IN_PROGRESS 0x02
+#define OPAL_FLAG_CMD_READY 0x80
+
+#define PCAP_MAX_POWER_W 100
+#define PCAP_SOFT_MIN_POWER_W 20
+#define PCAP_HARD_MIN_POWER_W 10
+
+static bool occ_write_static_data(PnvOCC *occ,
+ struct occ_pstate_table *static_data,
+ Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ hwaddr static_addr = homer->base + poc->opal_shared_memory_offset;
+ MemTxResult ret;
+
+ ret = address_space_write(&address_space_memory, static_addr,
+ MEMTXATTRS_UNSPECIFIED, static_data,
+ sizeof(*static_data));
+ if (ret != MEMTX_OK) {
+ error_setg(errp, "OCC: cannot write OCC-OPAL static data");
+ return false;
+ }
+
+ return true;
+}
+
+static bool occ_read_dynamic_data(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data,
+ Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ hwaddr static_addr = homer->base + poc->opal_shared_memory_offset;
+ hwaddr dynamic_addr = static_addr + OPAL_DYNAMIC_DATA_OFFSET;
+ MemTxResult ret;
+
+ ret = address_space_read(&address_space_memory, dynamic_addr,
+ MEMTXATTRS_UNSPECIFIED, dynamic_data,
+ sizeof(*dynamic_data));
+ if (ret != MEMTX_OK) {
+ error_setg(errp, "OCC: cannot read OCC-OPAL dynamic data");
+ return false;
+ }
+
+ return true;
+}
+
+static bool occ_write_dynamic_data(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data,
+ Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ hwaddr static_addr = homer->base + poc->opal_shared_memory_offset;
+ hwaddr dynamic_addr = static_addr + OPAL_DYNAMIC_DATA_OFFSET;
+ MemTxResult ret;
+
+ ret = address_space_write(&address_space_memory, dynamic_addr,
+ MEMTXATTRS_UNSPECIFIED, dynamic_data,
+ sizeof(*dynamic_data));
+ if (ret != MEMTX_OK) {
+ error_setg(errp, "OCC: cannot write OCC-OPAL dynamic data");
+ return false;
+ }
+
+ return true;
+}
+
+static bool occ_opal_send_response(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data,
+ enum occ_response_status status,
+ uint8_t *data, uint16_t datalen)
+{
+ struct opal_command_buffer *cmd = &dynamic_data->cmd;
+ struct occ_response_buffer *rsp = &dynamic_data->rsp;
+
+ rsp->request_id = cmd->request_id;
+ rsp->cmd = cmd->cmd;
+ rsp->status = status;
+ rsp->data_size = cpu_to_be16(datalen);
+ if (datalen) {
+ memcpy(rsp->data, data, datalen);
+ }
+ if (!occ_write_dynamic_data(occ, dynamic_data, NULL)) {
+ return false;
+ }
+ /* Would be a memory barrier here */
+ rsp->flag = OCC_FLAG_RSP_READY;
+ cmd->flag = 0;
+ if (!occ_write_dynamic_data(occ, dynamic_data, NULL)) {
+ return false;
+ }
+
+ pnv_occ_raise_msg_irq(occ);
+
+ return true;
+}
+
+/* Returns error status */
+static bool occ_opal_process_command(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data)
+{
+ struct opal_command_buffer *cmd = &dynamic_data->cmd;
+ struct occ_response_buffer *rsp = &dynamic_data->rsp;
+
+ if (rsp->flag == 0) {
+ /* Spend one "tick" in the in-progress state */
+ rsp->flag = OCC_FLAG_CMD_IN_PROGRESS;
+ return occ_write_dynamic_data(occ, dynamic_data, NULL);
+ } else if (rsp->flag != OCC_FLAG_CMD_IN_PROGRESS) {
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_INTERNAL_ERROR,
+ NULL, 0);
+ }
+
+ switch (cmd->cmd) {
+ case 0xD1: { /* SET_POWER_CAP */
+ uint16_t data;
+ if (be16_to_cpu(cmd->data_size) != 2) {
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_INVALID_CMD_DATA_LENGTH,
+ (uint8_t *)&dynamic_data->cur_pwr_cap,
+ 2);
+ }
+ data = be16_to_cpu(*(uint16_t *)cmd->data);
+ if (data == 0) { /* clear power cap */
+ dynamic_data->pwr_cap_type = 0x00; /* none */
+ data = PCAP_MAX_POWER_W;
+ } else {
+ dynamic_data->pwr_cap_type = 0x02; /* user set in-band */
+ if (data < PCAP_HARD_MIN_POWER_W) {
+ data = PCAP_HARD_MIN_POWER_W;
+ } else if (data > PCAP_MAX_POWER_W) {
+ data = PCAP_MAX_POWER_W;
+ }
+ }
+ dynamic_data->cur_pwr_cap = cpu_to_be16(data);
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_SUCCESS,
+ (uint8_t *)&dynamic_data->cur_pwr_cap, 2);
+ }
+
+ default:
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_INVALID_COMMAND,
+ NULL, 0);
+ }
+ g_assert_not_reached();
+}
+
+static bool occ_model_tick(PnvOCC *occ)
+{
+ struct occ_dynamic_data dynamic_data;
+
+ if (!occ_read_dynamic_data(occ, &dynamic_data, NULL)) {
+ /* Can't move OCC state field to safe because we can't map it! */
+ qemu_log("OCC: failed to read HOMER data, shutting down OCC\n");
+ return false;
+ }
+ if (dynamic_data.cmd.flag == OPAL_FLAG_CMD_READY) {
+ if (!occ_opal_process_command(occ, &dynamic_data)) {
+ qemu_log("OCC: failed to write HOMER data, shutting down OCC\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool occ_init_homer_memory(PnvOCC *occ, Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ PnvChip *chip = homer->chip;
+ struct occ_pstate_table static_data;
+ struct occ_dynamic_data dynamic_data;
+ int i;
+
+ memset(&static_data, 0, sizeof(static_data));
+ static_data.valid = 1;
+ static_data.version = poc->opal_shared_memory_version;
+ switch (poc->opal_shared_memory_version) {
+ case 0x02:
+ static_data.v2.throttle = 0;
+ static_data.v2.pstate_min = -2;
+ static_data.v2.pstate_nom = -1;
+ static_data.v2.pstate_turbo = -1;
+ static_data.v2.pstate_ultra_turbo = 0;
+ static_data.v2.pstates[0].id = 0;
+ static_data.v2.pstates[1].freq_khz = cpu_to_be32(4000000);
+ static_data.v2.pstates[1].id = -1;
+ static_data.v2.pstates[1].freq_khz = cpu_to_be32(3000000);
+ static_data.v2.pstates[2].id = -2;
+ static_data.v2.pstates[2].freq_khz = cpu_to_be32(2000000);
+ for (i = 0; i < chip->nr_cores; i++) {
+ static_data.v2.core_max[i] = 1;
+ }
+ break;
+ case 0x90:
+ if (chip->chip_id == 0) {
+ static_data.v9.occ_role = OCC_ROLE_MASTER;
+ } else {
+ static_data.v9.occ_role = OCC_ROLE_SLAVE;
+ }
+ static_data.v9.pstate_min = 2;
+ static_data.v9.pstate_nom = 1;
+ static_data.v9.pstate_turbo = 1;
+ static_data.v9.pstate_ultra_turbo = 0;
+ static_data.v9.pstates[0].id = 0;
+ static_data.v9.pstates[0].freq_khz = cpu_to_be32(4000000);
+ static_data.v9.pstates[1].id = 1;
+ static_data.v9.pstates[1].freq_khz = cpu_to_be32(3000000);
+ static_data.v9.pstates[2].id = 2;
+ static_data.v9.pstates[2].freq_khz = cpu_to_be32(2000000);
+ for (i = 0; i < chip->nr_cores; i++) {
+ static_data.v9.core_max[i] = 1;
+ }
+ break;
+ case 0xA0:
+ if (chip->chip_id == 0) {
+ static_data.v10.occ_role = OCC_ROLE_MASTER;
+ } else {
+ static_data.v10.occ_role = OCC_ROLE_SLAVE;
+ }
+ static_data.v10.pstate_min = 4;
+ static_data.v10.pstate_fixed_freq = 3;
+ static_data.v10.pstate_base = 2;
+ static_data.v10.pstate_ultra_turbo = 0;
+ static_data.v10.pstate_fmax = 1;
+ static_data.v10.minor = 0x01;
+ static_data.v10.pstates[0].valid = 1;
+ static_data.v10.pstates[0].id = 0;
+ static_data.v10.pstates[0].freq_khz = cpu_to_be32(4200000);
+ static_data.v10.pstates[1].valid = 1;
+ static_data.v10.pstates[1].id = 1;
+ static_data.v10.pstates[1].freq_khz = cpu_to_be32(4000000);
+ static_data.v10.pstates[2].valid = 1;
+ static_data.v10.pstates[2].id = 2;
+ static_data.v10.pstates[2].freq_khz = cpu_to_be32(3800000);
+ static_data.v10.pstates[3].valid = 1;
+ static_data.v10.pstates[3].id = 3;
+ static_data.v10.pstates[3].freq_khz = cpu_to_be32(3000000);
+ static_data.v10.pstates[4].valid = 1;
+ static_data.v10.pstates[4].id = 4;
+ static_data.v10.pstates[4].freq_khz = cpu_to_be32(2000000);
+ for (i = 0; i < chip->nr_cores; i++) {
+ static_data.v10.core_max[i] = 1;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (!occ_write_static_data(occ, &static_data, errp)) {
+ return false;
+ }
+
+ memset(&dynamic_data, 0, sizeof(dynamic_data));
+ dynamic_data.occ_state = 0x3; /* active */
+ dynamic_data.major_version = 0x0;
+ dynamic_data.hard_min_pwr_cap = cpu_to_be16(PCAP_HARD_MIN_POWER_W);
+ dynamic_data.max_pwr_cap = cpu_to_be16(PCAP_MAX_POWER_W);
+ dynamic_data.cur_pwr_cap = cpu_to_be16(PCAP_MAX_POWER_W);
+ dynamic_data.soft_min_pwr_cap = cpu_to_be16(PCAP_SOFT_MIN_POWER_W);
+ switch (poc->opal_shared_memory_version) {
+ case 0xA0:
+ dynamic_data.minor_version = 0x1;
+ dynamic_data.v10.wof_enabled = 0x1;
+ break;
+ case 0x90:
+ dynamic_data.minor_version = 0x1;
+ break;
+ case 0x02:
+ dynamic_data.minor_version = 0x0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (!occ_write_dynamic_data(occ, &dynamic_data, errp)) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index 90e3db5..3a80931 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -1123,16 +1123,21 @@ void cpu_ppc_tb_reset(CPUPPCState *env)
timer_del(tb_env->hdecr_timer);
ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
tb_env->hdecr_next = 0;
+ _cpu_ppc_store_hdecr(cpu, 0, 0, 0, 64);
}
/*
* There is a bug in Linux 2.4 kernels:
* if a decrementer exception is pending when it enables msr_ee at startup,
* it's not ready to handle it...
+ *
+ * On machine reset, this is called before icount is reset, so for
+ * icount-mode, setting TB registers using now == qemu_clock_get_ns()
+ * results in them being garbage after icount is reset. Use an
+ * explicit now == 0 to get a consistent reset state.
*/
- cpu_ppc_store_decr(env, -1);
- cpu_ppc_store_hdecr(env, -1);
- cpu_ppc_store_purr(env, 0x0000000000000000ULL);
+ _cpu_ppc_store_decr(cpu, 0, 0, -1, 64);
+ _cpu_ppc_store_purr(env, 0, 0);
}
void cpu_ppc_tb_free(CPUPPCState *env)
diff --git a/hw/ppc/ppc405.h b/hw/ppc/ppc405.h
deleted file mode 100644
index 9a43126..0000000
--- a/hw/ppc/ppc405.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * QEMU PowerPC 405 shared definitions
- *
- * Copyright (c) 2007 Jocelyn Mayer
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef PPC405_H
-#define PPC405_H
-
-#include "qom/object.h"
-#include "hw/ppc/ppc4xx.h"
-#include "hw/intc/ppc-uic.h"
-#include "hw/i2c/ppc4xx_i2c.h"
-
-/* PLB to OPB bridge */
-#define TYPE_PPC405_POB "ppc405-pob"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405PobState, PPC405_POB);
-struct Ppc405PobState {
- Ppc4xxDcrDeviceState parent_obj;
-
- uint32_t bear;
- uint32_t besr0;
- uint32_t besr1;
-};
-
-/* OPB arbitrer */
-#define TYPE_PPC405_OPBA "ppc405-opba"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OpbaState, PPC405_OPBA);
-struct Ppc405OpbaState {
- SysBusDevice parent_obj;
-
- MemoryRegion io;
- uint8_t cr;
- uint8_t pr;
-};
-
-/* DMA controller */
-#define TYPE_PPC405_DMA "ppc405-dma"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405DmaState, PPC405_DMA);
-struct Ppc405DmaState {
- Ppc4xxDcrDeviceState parent_obj;
-
- qemu_irq irqs[4];
- uint32_t cr[4];
- uint32_t ct[4];
- uint32_t da[4];
- uint32_t sa[4];
- uint32_t sg[4];
- uint32_t sr;
- uint32_t sgc;
- uint32_t slp;
- uint32_t pol;
-};
-
-/* GPIO */
-#define TYPE_PPC405_GPIO "ppc405-gpio"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GpioState, PPC405_GPIO);
-struct Ppc405GpioState {
- SysBusDevice parent_obj;
-
- MemoryRegion io;
- uint32_t or;
- uint32_t tcr;
- uint32_t osrh;
- uint32_t osrl;
- uint32_t tsrh;
- uint32_t tsrl;
- uint32_t odr;
- uint32_t ir;
- uint32_t rr1;
- uint32_t isr1h;
- uint32_t isr1l;
-};
-
-/* On Chip Memory */
-#define TYPE_PPC405_OCM "ppc405-ocm"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OcmState, PPC405_OCM);
-struct Ppc405OcmState {
- Ppc4xxDcrDeviceState parent_obj;
-
- MemoryRegion ram;
- MemoryRegion isarc_ram;
- MemoryRegion dsarc_ram;
- uint32_t isarc;
- uint32_t isacntl;
- uint32_t dsarc;
- uint32_t dsacntl;
-};
-
-/* General purpose timers */
-#define TYPE_PPC405_GPT "ppc405-gpt"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GptState, PPC405_GPT);
-struct Ppc405GptState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
-
- int64_t tb_offset;
- uint32_t tb_freq;
- QEMUTimer *timer;
- qemu_irq irqs[5];
- uint32_t oe;
- uint32_t ol;
- uint32_t im;
- uint32_t is;
- uint32_t ie;
- uint32_t comp[5];
- uint32_t mask[5];
-};
-
-#define TYPE_PPC405_CPC "ppc405-cpc"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405CpcState, PPC405_CPC);
-
-enum {
- PPC405EP_CPU_CLK = 0,
- PPC405EP_PLB_CLK = 1,
- PPC405EP_OPB_CLK = 2,
- PPC405EP_EBC_CLK = 3,
- PPC405EP_MAL_CLK = 4,
- PPC405EP_PCI_CLK = 5,
- PPC405EP_UART0_CLK = 6,
- PPC405EP_UART1_CLK = 7,
- PPC405EP_CLK_NB = 8,
-};
-
-struct Ppc405CpcState {
- Ppc4xxDcrDeviceState parent_obj;
-
- uint32_t sysclk;
- clk_setup_t clk_setup[PPC405EP_CLK_NB];
- uint32_t boot;
- uint32_t epctl;
- uint32_t pllmr[2];
- uint32_t ucr;
- uint32_t srr;
- uint32_t jtagid;
- uint32_t pci;
- /* Clock and power management */
- uint32_t er;
- uint32_t fr;
- uint32_t sr;
-};
-
-#define TYPE_PPC405_SOC "ppc405-soc"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405SoCState, PPC405_SOC);
-
-struct Ppc405SoCState {
- /* Private */
- DeviceState parent_obj;
-
- /* Public */
- PowerPCCPU cpu;
- PPCUIC uic;
- Ppc405CpcState cpc;
- Ppc405GptState gpt;
- Ppc405OcmState ocm;
- Ppc405GpioState gpio;
- Ppc405DmaState dma;
- PPC4xxI2CState i2c;
- Ppc4xxEbcState ebc;
- Ppc405OpbaState opba;
- Ppc405PobState pob;
- Ppc4xxPlbState plb;
- Ppc4xxMalState mal;
- Ppc4xxSdramDdrState sdram;
-};
-
-#endif /* PPC405_H */
diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c
deleted file mode 100644
index 969cac3..0000000
--- a/hw/ppc/ppc405_boards.c
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
- * QEMU PowerPC 405 evaluation boards emulation
- *
- * Copyright (c) 2007 Jocelyn Mayer
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qapi/error.h"
-#include "qemu/datadir.h"
-#include "cpu.h"
-#include "hw/ppc/ppc.h"
-#include "hw/qdev-properties.h"
-#include "hw/sysbus.h"
-#include "ppc405.h"
-#include "hw/rtc/m48t59.h"
-#include "hw/block/flash.h"
-#include "system/qtest.h"
-#include "system/reset.h"
-#include "system/block-backend.h"
-#include "hw/boards.h"
-#include "qemu/error-report.h"
-#include "hw/loader.h"
-#include "qemu/cutils.h"
-#include "elf.h"
-
-#define BIOS_FILENAME "ppc405_rom.bin"
-#define BIOS_SIZE (2 * MiB)
-
-#define KERNEL_LOAD_ADDR 0x01000000
-#define INITRD_LOAD_ADDR 0x01800000
-
-#define PPC405EP_SDRAM_BASE 0x00000000
-#define PPC405EP_SRAM_BASE 0xFFF00000
-#define PPC405EP_SRAM_SIZE (512 * KiB)
-
-#define USE_FLASH_BIOS
-
-#define TYPE_PPC405_MACHINE MACHINE_TYPE_NAME("ppc405")
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405MachineState, PPC405_MACHINE);
-
-struct Ppc405MachineState {
- /* Private */
- MachineState parent_obj;
- /* Public */
-
- Ppc405SoCState soc;
-};
-
-/* CPU reset handler when booting directly from a loaded kernel */
-static struct boot_info {
- uint32_t entry;
- uint32_t bdloc;
- uint32_t initrd_base;
- uint32_t initrd_size;
- uint32_t cmdline_base;
- uint32_t cmdline_size;
-} boot_info;
-
-static void main_cpu_reset(void *opaque)
-{
- PowerPCCPU *cpu = opaque;
- CPUPPCState *env = &cpu->env;
- struct boot_info *bi = env->load_info;
-
- cpu_reset(CPU(cpu));
-
- /* stack: top of sram */
- env->gpr[1] = PPC405EP_SRAM_BASE + PPC405EP_SRAM_SIZE - 8;
-
- /* Tune our boot state */
- env->gpr[3] = bi->bdloc;
- env->gpr[4] = bi->initrd_base;
- env->gpr[5] = bi->initrd_base + bi->initrd_size;
- env->gpr[6] = bi->cmdline_base;
- env->gpr[7] = bi->cmdline_size;
-
- env->nip = bi->entry;
-}
-
-/* Bootinfo as set-up by u-boot */
-typedef struct {
- uint32_t bi_memstart;
- uint32_t bi_memsize;
- uint32_t bi_flashstart;
- uint32_t bi_flashsize;
- uint32_t bi_flashoffset; /* 0x10 */
- uint32_t bi_sramstart;
- uint32_t bi_sramsize;
- uint32_t bi_bootflags;
- uint32_t bi_ipaddr; /* 0x20 */
- uint8_t bi_enetaddr[6];
- uint16_t bi_ethspeed;
- uint32_t bi_intfreq;
- uint32_t bi_busfreq; /* 0x30 */
- uint32_t bi_baudrate;
- uint8_t bi_s_version[4];
- uint8_t bi_r_version[32];
- uint32_t bi_procfreq;
- uint32_t bi_plb_busfreq;
- uint32_t bi_pci_busfreq;
- uint8_t bi_pci_enetaddr[6];
- uint8_t bi_pci_enetaddr2[6]; /* PPC405EP specific */
- uint32_t bi_opbfreq;
- uint32_t bi_iic_fast[2];
-} ppc4xx_bd_info_t;
-
-static void ppc405_set_default_bootinfo(ppc4xx_bd_info_t *bd,
- ram_addr_t ram_size)
-{
- memset(bd, 0, sizeof(*bd));
-
- bd->bi_memstart = PPC405EP_SDRAM_BASE;
- bd->bi_memsize = ram_size;
- bd->bi_sramstart = PPC405EP_SRAM_BASE;
- bd->bi_sramsize = PPC405EP_SRAM_SIZE;
- bd->bi_bootflags = 0;
- bd->bi_intfreq = 133333333;
- bd->bi_busfreq = 33333333;
- bd->bi_baudrate = 115200;
- bd->bi_s_version[0] = 'Q';
- bd->bi_s_version[1] = 'M';
- bd->bi_s_version[2] = 'U';
- bd->bi_s_version[3] = '\0';
- bd->bi_r_version[0] = 'Q';
- bd->bi_r_version[1] = 'E';
- bd->bi_r_version[2] = 'M';
- bd->bi_r_version[3] = 'U';
- bd->bi_r_version[4] = '\0';
- bd->bi_procfreq = 133333333;
- bd->bi_plb_busfreq = 33333333;
- bd->bi_pci_busfreq = 33333333;
- bd->bi_opbfreq = 33333333;
-}
-
-static ram_addr_t __ppc405_set_bootinfo(CPUPPCState *env, ppc4xx_bd_info_t *bd)
-{
- CPUState *cs = env_cpu(env);
- ram_addr_t bdloc;
- int i, n;
-
- /* We put the bd structure at the top of memory */
- if (bd->bi_memsize >= 0x01000000UL) {
- bdloc = 0x01000000UL - sizeof(ppc4xx_bd_info_t);
- } else {
- bdloc = bd->bi_memsize - sizeof(ppc4xx_bd_info_t);
- }
- stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart);
- stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize);
- stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart);
- stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize);
- stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset);
- stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart);
- stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize);
- stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags);
- stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr);
- for (i = 0; i < 6; i++) {
- stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]);
- }
- stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed);
- stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq);
- stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq);
- stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate);
- for (i = 0; i < 4; i++) {
- stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]);
- }
- for (i = 0; i < 32; i++) {
- stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]);
- }
- stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_procfreq);
- stl_be_phys(cs->as, bdloc + 0x60, bd->bi_plb_busfreq);
- stl_be_phys(cs->as, bdloc + 0x64, bd->bi_pci_busfreq);
- for (i = 0; i < 6; i++) {
- stb_phys(cs->as, bdloc + 0x68 + i, bd->bi_pci_enetaddr[i]);
- }
- n = 0x70; /* includes 2 bytes hole */
- for (i = 0; i < 6; i++) {
- stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]);
- }
- stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq);
- n += 4;
- for (i = 0; i < 2; i++) {
- stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]);
- n += 4;
- }
-
- return bdloc;
-}
-
-static ram_addr_t ppc405_set_bootinfo(CPUPPCState *env, ram_addr_t ram_size)
-{
- ppc4xx_bd_info_t bd;
-
- memset(&bd, 0, sizeof(bd));
-
- ppc405_set_default_bootinfo(&bd, ram_size);
-
- return __ppc405_set_bootinfo(env, &bd);
-}
-
-static void boot_from_kernel(MachineState *machine, PowerPCCPU *cpu)
-{
- CPUPPCState *env = &cpu->env;
- hwaddr boot_entry;
- hwaddr kernel_base;
- int kernel_size;
- hwaddr initrd_base;
- int initrd_size;
- ram_addr_t bdloc;
- int len;
-
- bdloc = ppc405_set_bootinfo(env, machine->ram_size);
- boot_info.bdloc = bdloc;
-
- kernel_size = load_elf(machine->kernel_filename, NULL, NULL, NULL,
- &boot_entry, &kernel_base, NULL, NULL,
- ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
- if (kernel_size < 0) {
- error_report("Could not load kernel '%s' : %s",
- machine->kernel_filename, load_elf_strerror(kernel_size));
- exit(1);
- }
- boot_info.entry = boot_entry;
-
- /* load initrd */
- if (machine->initrd_filename) {
- initrd_base = INITRD_LOAD_ADDR;
- initrd_size = load_image_targphys(machine->initrd_filename, initrd_base,
- machine->ram_size - initrd_base);
- if (initrd_size < 0) {
- error_report("could not load initial ram disk '%s'",
- machine->initrd_filename);
- exit(1);
- }
-
- boot_info.initrd_base = initrd_base;
- boot_info.initrd_size = initrd_size;
- }
-
- if (machine->kernel_cmdline) {
- len = strlen(machine->kernel_cmdline);
- bdloc -= ((len + 255) & ~255);
- cpu_physical_memory_write(bdloc, machine->kernel_cmdline, len + 1);
- boot_info.cmdline_base = bdloc;
- boot_info.cmdline_size = bdloc + len;
- }
-
- /* Install our custom reset handler to start from Linux */
- qemu_register_reset(main_cpu_reset, cpu);
- env->load_info = &boot_info;
-}
-
-static void ppc405_init(MachineState *machine)
-{
- Ppc405MachineState *ppc405 = PPC405_MACHINE(machine);
- const char *kernel_filename = machine->kernel_filename;
- MemoryRegion *sysmem = get_system_memory();
-
- object_initialize_child(OBJECT(machine), "soc", &ppc405->soc,
- TYPE_PPC405_SOC);
- object_property_set_link(OBJECT(&ppc405->soc), "dram",
- OBJECT(machine->ram), &error_abort);
- object_property_set_uint(OBJECT(&ppc405->soc), "sys-clk", 33333333,
- &error_abort);
- qdev_realize(DEVICE(&ppc405->soc), NULL, &error_fatal);
-
- /* allocate and load BIOS */
- if (machine->firmware) {
- MemoryRegion *bios = g_new(MemoryRegion, 1);
- g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
- machine->firmware);
- long bios_size;
-
- memory_region_init_rom(bios, NULL, "ef405ep.bios", BIOS_SIZE,
- &error_fatal);
-
- if (!filename) {
- error_report("Could not find firmware '%s'", machine->firmware);
- exit(1);
- }
-
- bios_size = load_image_size(filename,
- memory_region_get_ram_ptr(bios),
- BIOS_SIZE);
- if (bios_size < 0) {
- error_report("Could not load PowerPC BIOS '%s'", machine->firmware);
- exit(1);
- }
-
- bios_size = (bios_size + 0xfff) & ~0xfff;
- memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
- }
-
- /* Load kernel and initrd using U-Boot images */
- if (kernel_filename && machine->firmware) {
- target_ulong kernel_base, initrd_base;
- long kernel_size, initrd_size;
-
- kernel_base = KERNEL_LOAD_ADDR;
- kernel_size = load_image_targphys(kernel_filename, kernel_base,
- machine->ram_size - kernel_base);
- if (kernel_size < 0) {
- error_report("could not load kernel '%s'", kernel_filename);
- exit(1);
- }
-
- /* load initrd */
- if (machine->initrd_filename) {
- initrd_base = INITRD_LOAD_ADDR;
- initrd_size = load_image_targphys(machine->initrd_filename,
- initrd_base,
- machine->ram_size - initrd_base);
- if (initrd_size < 0) {
- error_report("could not load initial ram disk '%s'",
- machine->initrd_filename);
- exit(1);
- }
- }
-
- /* Load ELF kernel and rootfs.cpio */
- } else if (kernel_filename && !machine->firmware) {
- ppc4xx_sdram_ddr_enable(&ppc405->soc.sdram);
- boot_from_kernel(machine, &ppc405->soc.cpu);
- }
-}
-
-static void ppc405_machine_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "PPC405 generic machine";
- mc->init = ppc405_init;
- mc->default_ram_size = 128 * MiB;
- mc->default_ram_id = "ppc405.ram";
- mc->deprecation_reason = "machine is old and unmaintained";
-}
-
-static const TypeInfo ppc405_machine_type = {
- .name = TYPE_PPC405_MACHINE,
- .parent = TYPE_MACHINE,
- .instance_size = sizeof(Ppc405MachineState),
- .class_init = ppc405_machine_class_init,
- .abstract = true,
-};
-
-/*****************************************************************************/
-/* PPC405EP reference board (IBM) */
-/*
- * Standalone board with:
- * - PowerPC 405EP CPU
- * - SDRAM (0x00000000)
- * - Flash (0xFFF80000)
- * - SRAM (0xFFF00000)
- * - NVRAM (0xF0000000)
- * - FPGA (0xF0300000)
- */
-
-#define PPC405EP_NVRAM_BASE 0xF0000000
-#define PPC405EP_FPGA_BASE 0xF0300000
-#define PPC405EP_FLASH_BASE 0xFFF80000
-
-#define TYPE_REF405EP_FPGA "ref405ep-fpga"
-OBJECT_DECLARE_SIMPLE_TYPE(Ref405epFpgaState, REF405EP_FPGA);
-struct Ref405epFpgaState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
-
- uint8_t reg0;
- uint8_t reg1;
-};
-
-static uint64_t ref405ep_fpga_readb(void *opaque, hwaddr addr, unsigned size)
-{
- Ref405epFpgaState *fpga = opaque;
- uint32_t ret;
-
- switch (addr) {
- case 0x0:
- ret = fpga->reg0;
- break;
- case 0x1:
- ret = fpga->reg1;
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void ref405ep_fpga_writeb(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- Ref405epFpgaState *fpga = opaque;
-
- switch (addr) {
- case 0x0:
- /* Read only */
- break;
- case 0x1:
- fpga->reg1 = value;
- break;
- default:
- break;
- }
-}
-
-static const MemoryRegionOps ref405ep_fpga_ops = {
- .read = ref405ep_fpga_readb,
- .write = ref405ep_fpga_writeb,
- .impl.min_access_size = 1,
- .impl.max_access_size = 1,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
-static void ref405ep_fpga_reset(DeviceState *dev)
-{
- Ref405epFpgaState *fpga = REF405EP_FPGA(dev);
-
- fpga->reg0 = 0x00;
- fpga->reg1 = 0x0F;
-}
-
-static void ref405ep_fpga_realize(DeviceState *dev, Error **errp)
-{
- Ref405epFpgaState *s = REF405EP_FPGA(dev);
-
- memory_region_init_io(&s->iomem, OBJECT(s), &ref405ep_fpga_ops, s,
- "fpga", 0x00000100);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
-}
-
-static void ref405ep_fpga_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ref405ep_fpga_realize;
- device_class_set_legacy_reset(dc, ref405ep_fpga_reset);
- /* Reason: only works as part of a ppc405 board */
- dc->user_creatable = false;
-}
-
-static const TypeInfo ref405ep_fpga_type = {
- .name = TYPE_REF405EP_FPGA,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ref405epFpgaState),
- .class_init = ref405ep_fpga_class_init,
-};
-
-static void ref405ep_init(MachineState *machine)
-{
- DeviceState *dev;
- SysBusDevice *s;
- MemoryRegion *sram = g_new(MemoryRegion, 1);
-
- ppc405_init(machine);
-
- /* allocate SRAM */
- memory_region_init_ram(sram, NULL, "ref405ep.sram", PPC405EP_SRAM_SIZE,
- &error_fatal);
- memory_region_add_subregion(get_system_memory(), PPC405EP_SRAM_BASE, sram);
-
- /* Register FPGA */
- dev = qdev_new(TYPE_REF405EP_FPGA);
- object_property_add_child(OBJECT(machine), "fpga", OBJECT(dev));
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, PPC405EP_FPGA_BASE);
-
- /* Register NVRAM */
- dev = qdev_new("sysbus-m48t08");
- qdev_prop_set_int32(dev, "base-year", 1968);
- s = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, PPC405EP_NVRAM_BASE);
-}
-
-static void ref405ep_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "ref405ep";
- mc->init = ref405ep_init;
-}
-
-static const TypeInfo ref405ep_type = {
- .name = MACHINE_TYPE_NAME("ref405ep"),
- .parent = TYPE_PPC405_MACHINE,
- .class_init = ref405ep_class_init,
-};
-
-static void ppc405_machine_init(void)
-{
- type_register_static(&ppc405_machine_type);
- type_register_static(&ref405ep_type);
- type_register_static(&ref405ep_fpga_type);
-}
-
-type_init(ppc405_machine_init)
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
deleted file mode 100644
index 8250824..0000000
--- a/hw/ppc/ppc405_uc.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-/*
- * QEMU PowerPC 405 embedded processors emulation
- *
- * Copyright (c) 2007 Jocelyn Mayer
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qapi/error.h"
-#include "qemu/log.h"
-#include "cpu.h"
-#include "hw/ppc/ppc.h"
-#include "hw/i2c/ppc4xx_i2c.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "ppc405.h"
-#include "hw/char/serial-mm.h"
-#include "qemu/timer.h"
-#include "system/reset.h"
-#include "system/system.h"
-#include "exec/address-spaces.h"
-#include "hw/intc/ppc-uic.h"
-#include "trace.h"
-
-/*****************************************************************************/
-/* Shared peripherals */
-
-/*****************************************************************************/
-/* PLB to OPB bridge */
-enum {
- POB0_BESR0 = 0x0A0,
- POB0_BESR1 = 0x0A2,
- POB0_BEAR = 0x0A4,
-};
-
-static uint32_t dcr_read_pob(void *opaque, int dcrn)
-{
- Ppc405PobState *pob = opaque;
- uint32_t ret;
-
- switch (dcrn) {
- case POB0_BEAR:
- ret = pob->bear;
- break;
- case POB0_BESR0:
- ret = pob->besr0;
- break;
- case POB0_BESR1:
- ret = pob->besr1;
- break;
- default:
- /* Avoid gcc warning */
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void dcr_write_pob(void *opaque, int dcrn, uint32_t val)
-{
- Ppc405PobState *pob = opaque;
-
- switch (dcrn) {
- case POB0_BEAR:
- /* Read only */
- break;
- case POB0_BESR0:
- /* Write-clear */
- pob->besr0 &= ~val;
- break;
- case POB0_BESR1:
- /* Write-clear */
- pob->besr1 &= ~val;
- break;
- }
-}
-
-static void ppc405_pob_reset(DeviceState *dev)
-{
- Ppc405PobState *pob = PPC405_POB(dev);
-
- /* No error */
- pob->bear = 0x00000000;
- pob->besr0 = 0x0000000;
- pob->besr1 = 0x0000000;
-}
-
-static void ppc405_pob_realize(DeviceState *dev, Error **errp)
-{
- Ppc405PobState *pob = PPC405_POB(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
-
- ppc4xx_dcr_register(dcr, POB0_BEAR, pob, &dcr_read_pob, &dcr_write_pob);
- ppc4xx_dcr_register(dcr, POB0_BESR0, pob, &dcr_read_pob, &dcr_write_pob);
- ppc4xx_dcr_register(dcr, POB0_BESR1, pob, &dcr_read_pob, &dcr_write_pob);
-}
-
-static void ppc405_pob_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_pob_realize;
- device_class_set_legacy_reset(dc, ppc405_pob_reset);
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* OPB arbitrer */
-static uint64_t opba_readb(void *opaque, hwaddr addr, unsigned size)
-{
- Ppc405OpbaState *opba = opaque;
- uint32_t ret;
-
- switch (addr) {
- case 0x00:
- ret = opba->cr;
- break;
- case 0x01:
- ret = opba->pr;
- break;
- default:
- ret = 0x00;
- break;
- }
-
- trace_opba_readb(addr, ret);
- return ret;
-}
-
-static void opba_writeb(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- Ppc405OpbaState *opba = opaque;
-
- trace_opba_writeb(addr, value);
-
- switch (addr) {
- case 0x00:
- opba->cr = value & 0xF8;
- break;
- case 0x01:
- opba->pr = value & 0xFF;
- break;
- default:
- break;
- }
-}
-static const MemoryRegionOps opba_ops = {
- .read = opba_readb,
- .write = opba_writeb,
- .impl.min_access_size = 1,
- .impl.max_access_size = 1,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
-static void ppc405_opba_reset(DeviceState *dev)
-{
- Ppc405OpbaState *opba = PPC405_OPBA(dev);
-
- opba->cr = 0x00; /* No dynamic priorities - park disabled */
- opba->pr = 0x11;
-}
-
-static void ppc405_opba_realize(DeviceState *dev, Error **errp)
-{
- Ppc405OpbaState *s = PPC405_OPBA(dev);
-
- memory_region_init_io(&s->io, OBJECT(s), &opba_ops, s, "opba", 2);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io);
-}
-
-static void ppc405_opba_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_opba_realize;
- device_class_set_legacy_reset(dc, ppc405_opba_reset);
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* Code decompression controller */
-/* XXX: TODO */
-
-/*****************************************************************************/
-/* DMA controller */
-enum {
- DMA0_CR0 = 0x100,
- DMA0_CT0 = 0x101,
- DMA0_DA0 = 0x102,
- DMA0_SA0 = 0x103,
- DMA0_SG0 = 0x104,
- DMA0_CR1 = 0x108,
- DMA0_CT1 = 0x109,
- DMA0_DA1 = 0x10A,
- DMA0_SA1 = 0x10B,
- DMA0_SG1 = 0x10C,
- DMA0_CR2 = 0x110,
- DMA0_CT2 = 0x111,
- DMA0_DA2 = 0x112,
- DMA0_SA2 = 0x113,
- DMA0_SG2 = 0x114,
- DMA0_CR3 = 0x118,
- DMA0_CT3 = 0x119,
- DMA0_DA3 = 0x11A,
- DMA0_SA3 = 0x11B,
- DMA0_SG3 = 0x11C,
- DMA0_SR = 0x120,
- DMA0_SGC = 0x123,
- DMA0_SLP = 0x125,
- DMA0_POL = 0x126,
-};
-
-static uint32_t dcr_read_dma(void *opaque, int dcrn)
-{
- return 0;
-}
-
-static void dcr_write_dma(void *opaque, int dcrn, uint32_t val)
-{
-}
-
-static void ppc405_dma_reset(DeviceState *dev)
-{
- Ppc405DmaState *dma = PPC405_DMA(dev);
- int i;
-
- for (i = 0; i < 4; i++) {
- dma->cr[i] = 0x00000000;
- dma->ct[i] = 0x00000000;
- dma->da[i] = 0x00000000;
- dma->sa[i] = 0x00000000;
- dma->sg[i] = 0x00000000;
- }
- dma->sr = 0x00000000;
- dma->sgc = 0x00000000;
- dma->slp = 0x7C000000;
- dma->pol = 0x00000000;
-}
-
-static void ppc405_dma_realize(DeviceState *dev, Error **errp)
-{
- Ppc405DmaState *dma = PPC405_DMA(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dma->irqs); i++) {
- sysbus_init_irq(SYS_BUS_DEVICE(dma), &dma->irqs[i]);
- }
-
- ppc4xx_dcr_register(dcr, DMA0_CR0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CR1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CR2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CR3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SR, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SGC, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SLP, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_POL, dma, &dcr_read_dma, &dcr_write_dma);
-}
-
-static void ppc405_dma_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_dma_realize;
- device_class_set_legacy_reset(dc, ppc405_dma_reset);
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* GPIO */
-static uint64_t ppc405_gpio_read(void *opaque, hwaddr addr, unsigned size)
-{
- trace_ppc405_gpio_read(addr, size);
- return 0;
-}
-
-static void ppc405_gpio_write(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- trace_ppc405_gpio_write(addr, size, value);
-}
-
-static const MemoryRegionOps ppc405_gpio_ops = {
- .read = ppc405_gpio_read,
- .write = ppc405_gpio_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void ppc405_gpio_realize(DeviceState *dev, Error **errp)
-{
- Ppc405GpioState *s = PPC405_GPIO(dev);
-
- memory_region_init_io(&s->io, OBJECT(s), &ppc405_gpio_ops, s, "gpio",
- 0x38);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io);
-}
-
-static void ppc405_gpio_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_gpio_realize;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* On Chip Memory */
-enum {
- OCM0_ISARC = 0x018,
- OCM0_ISACNTL = 0x019,
- OCM0_DSARC = 0x01A,
- OCM0_DSACNTL = 0x01B,
-};
-
-static void ocm_update_mappings(Ppc405OcmState *ocm,
- uint32_t isarc, uint32_t isacntl,
- uint32_t dsarc, uint32_t dsacntl)
-{
- trace_ocm_update_mappings(isarc, isacntl, dsarc, dsacntl, ocm->isarc,
- ocm->isacntl, ocm->dsarc, ocm->dsacntl);
-
- if (ocm->isarc != isarc ||
- (ocm->isacntl & 0x80000000) != (isacntl & 0x80000000)) {
- if (ocm->isacntl & 0x80000000) {
- /* Unmap previously assigned memory region */
- trace_ocm_unmap("ISA", ocm->isarc);
- memory_region_del_subregion(get_system_memory(), &ocm->isarc_ram);
- }
- if (isacntl & 0x80000000) {
- /* Map new instruction memory region */
- trace_ocm_map("ISA", isarc);
- memory_region_add_subregion(get_system_memory(), isarc,
- &ocm->isarc_ram);
- }
- }
- if (ocm->dsarc != dsarc ||
- (ocm->dsacntl & 0x80000000) != (dsacntl & 0x80000000)) {
- if (ocm->dsacntl & 0x80000000) {
- /* Beware not to unmap the region we just mapped */
- if (!(isacntl & 0x80000000) || ocm->dsarc != isarc) {
- /* Unmap previously assigned memory region */
- trace_ocm_unmap("DSA", ocm->dsarc);
- memory_region_del_subregion(get_system_memory(),
- &ocm->dsarc_ram);
- }
- }
- if (dsacntl & 0x80000000) {
- /* Beware not to remap the region we just mapped */
- if (!(isacntl & 0x80000000) || dsarc != isarc) {
- /* Map new data memory region */
- trace_ocm_map("DSA", dsarc);
- memory_region_add_subregion(get_system_memory(), dsarc,
- &ocm->dsarc_ram);
- }
- }
- }
-}
-
-static uint32_t dcr_read_ocm(void *opaque, int dcrn)
-{
- Ppc405OcmState *ocm = opaque;
- uint32_t ret;
-
- switch (dcrn) {
- case OCM0_ISARC:
- ret = ocm->isarc;
- break;
- case OCM0_ISACNTL:
- ret = ocm->isacntl;
- break;
- case OCM0_DSARC:
- ret = ocm->dsarc;
- break;
- case OCM0_DSACNTL:
- ret = ocm->dsacntl;
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void dcr_write_ocm(void *opaque, int dcrn, uint32_t val)
-{
- Ppc405OcmState *ocm = opaque;
- uint32_t isarc, dsarc, isacntl, dsacntl;
-
- isarc = ocm->isarc;
- dsarc = ocm->dsarc;
- isacntl = ocm->isacntl;
- dsacntl = ocm->dsacntl;
- switch (dcrn) {
- case OCM0_ISARC:
- isarc = val & 0xFC000000;
- break;
- case OCM0_ISACNTL:
- isacntl = val & 0xC0000000;
- break;
- case OCM0_DSARC:
- isarc = val & 0xFC000000;
- break;
- case OCM0_DSACNTL:
- isacntl = val & 0xC0000000;
- break;
- }
- ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl);
- ocm->isarc = isarc;
- ocm->dsarc = dsarc;
- ocm->isacntl = isacntl;
- ocm->dsacntl = dsacntl;
-}
-
-static void ppc405_ocm_reset(DeviceState *dev)
-{
- Ppc405OcmState *ocm = PPC405_OCM(dev);
- uint32_t isarc, dsarc, isacntl, dsacntl;
-
- isarc = 0x00000000;
- isacntl = 0x00000000;
- dsarc = 0x00000000;
- dsacntl = 0x00000000;
- ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl);
- ocm->isarc = isarc;
- ocm->dsarc = dsarc;
- ocm->isacntl = isacntl;
- ocm->dsacntl = dsacntl;
-}
-
-static void ppc405_ocm_realize(DeviceState *dev, Error **errp)
-{
- Ppc405OcmState *ocm = PPC405_OCM(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
-
- /* XXX: Size is 4096 or 0x04000000 */
- memory_region_init_ram(&ocm->isarc_ram, OBJECT(ocm), "ppc405.ocm", 4 * KiB,
- &error_fatal);
- memory_region_init_alias(&ocm->dsarc_ram, OBJECT(ocm), "ppc405.dsarc",
- &ocm->isarc_ram, 0, 4 * KiB);
-
- ppc4xx_dcr_register(dcr, OCM0_ISARC, ocm, &dcr_read_ocm, &dcr_write_ocm);
- ppc4xx_dcr_register(dcr, OCM0_ISACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm);
- ppc4xx_dcr_register(dcr, OCM0_DSARC, ocm, &dcr_read_ocm, &dcr_write_ocm);
- ppc4xx_dcr_register(dcr, OCM0_DSACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm);
-}
-
-static void ppc405_ocm_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_ocm_realize;
- device_class_set_legacy_reset(dc, ppc405_ocm_reset);
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* General purpose timers */
-static int ppc4xx_gpt_compare(Ppc405GptState *gpt, int n)
-{
- /* XXX: TODO */
- return 0;
-}
-
-static void ppc4xx_gpt_set_output(Ppc405GptState *gpt, int n, int level)
-{
- /* XXX: TODO */
-}
-
-static void ppc4xx_gpt_set_outputs(Ppc405GptState *gpt)
-{
- uint32_t mask;
- int i;
-
- mask = 0x80000000;
- for (i = 0; i < 5; i++) {
- if (gpt->oe & mask) {
- /* Output is enabled */
- if (ppc4xx_gpt_compare(gpt, i)) {
- /* Comparison is OK */
- ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask);
- } else {
- /* Comparison is KO */
- ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask ? 0 : 1);
- }
- }
- mask = mask >> 1;
- }
-}
-
-static void ppc4xx_gpt_set_irqs(Ppc405GptState *gpt)
-{
- uint32_t mask;
- int i;
-
- mask = 0x00008000;
- for (i = 0; i < 5; i++) {
- if (gpt->is & gpt->im & mask) {
- qemu_irq_raise(gpt->irqs[i]);
- } else {
- qemu_irq_lower(gpt->irqs[i]);
- }
- mask = mask >> 1;
- }
-}
-
-static void ppc4xx_gpt_compute_timer(Ppc405GptState *gpt)
-{
- /* XXX: TODO */
-}
-
-static uint64_t ppc4xx_gpt_read(void *opaque, hwaddr addr, unsigned size)
-{
- Ppc405GptState *gpt = opaque;
- uint32_t ret;
- int idx;
-
- trace_ppc4xx_gpt_read(addr, size);
-
- switch (addr) {
- case 0x00:
- /* Time base counter */
- ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + gpt->tb_offset,
- gpt->tb_freq, NANOSECONDS_PER_SECOND);
- break;
- case 0x10:
- /* Output enable */
- ret = gpt->oe;
- break;
- case 0x14:
- /* Output level */
- ret = gpt->ol;
- break;
- case 0x18:
- /* Interrupt mask */
- ret = gpt->im;
- break;
- case 0x1C:
- case 0x20:
- /* Interrupt status */
- ret = gpt->is;
- break;
- case 0x24:
- /* Interrupt enable */
- ret = gpt->ie;
- break;
- case 0x80 ... 0x90:
- /* Compare timer */
- idx = (addr - 0x80) >> 2;
- ret = gpt->comp[idx];
- break;
- case 0xC0 ... 0xD0:
- /* Compare mask */
- idx = (addr - 0xC0) >> 2;
- ret = gpt->mask[idx];
- break;
- default:
- ret = -1;
- break;
- }
-
- return ret;
-}
-
-static void ppc4xx_gpt_write(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- Ppc405GptState *gpt = opaque;
- int idx;
-
- trace_ppc4xx_gpt_write(addr, size, value);
-
- switch (addr) {
- case 0x00:
- /* Time base counter */
- gpt->tb_offset = muldiv64(value, NANOSECONDS_PER_SECOND, gpt->tb_freq)
- - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- ppc4xx_gpt_compute_timer(gpt);
- break;
- case 0x10:
- /* Output enable */
- gpt->oe = value & 0xF8000000;
- ppc4xx_gpt_set_outputs(gpt);
- break;
- case 0x14:
- /* Output level */
- gpt->ol = value & 0xF8000000;
- ppc4xx_gpt_set_outputs(gpt);
- break;
- case 0x18:
- /* Interrupt mask */
- gpt->im = value & 0x0000F800;
- break;
- case 0x1C:
- /* Interrupt status set */
- gpt->is |= value & 0x0000F800;
- ppc4xx_gpt_set_irqs(gpt);
- break;
- case 0x20:
- /* Interrupt status clear */
- gpt->is &= ~(value & 0x0000F800);
- ppc4xx_gpt_set_irqs(gpt);
- break;
- case 0x24:
- /* Interrupt enable */
- gpt->ie = value & 0x0000F800;
- ppc4xx_gpt_set_irqs(gpt);
- break;
- case 0x80 ... 0x90:
- /* Compare timer */
- idx = (addr - 0x80) >> 2;
- gpt->comp[idx] = value & 0xF8000000;
- ppc4xx_gpt_compute_timer(gpt);
- break;
- case 0xC0 ... 0xD0:
- /* Compare mask */
- idx = (addr - 0xC0) >> 2;
- gpt->mask[idx] = value & 0xF8000000;
- ppc4xx_gpt_compute_timer(gpt);
- break;
- }
-}
-
-static const MemoryRegionOps gpt_ops = {
- .read = ppc4xx_gpt_read,
- .write = ppc4xx_gpt_write,
- .valid.min_access_size = 4,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void ppc4xx_gpt_cb(void *opaque)
-{
- Ppc405GptState *gpt = opaque;
-
- ppc4xx_gpt_set_irqs(gpt);
- ppc4xx_gpt_set_outputs(gpt);
- ppc4xx_gpt_compute_timer(gpt);
-}
-
-static void ppc405_gpt_reset(DeviceState *dev)
-{
- Ppc405GptState *gpt = PPC405_GPT(dev);
- int i;
-
- timer_del(gpt->timer);
- gpt->oe = 0x00000000;
- gpt->ol = 0x00000000;
- gpt->im = 0x00000000;
- gpt->is = 0x00000000;
- gpt->ie = 0x00000000;
- for (i = 0; i < 5; i++) {
- gpt->comp[i] = 0x00000000;
- gpt->mask[i] = 0x00000000;
- }
-}
-
-static void ppc405_gpt_realize(DeviceState *dev, Error **errp)
-{
- Ppc405GptState *s = PPC405_GPT(dev);
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- int i;
-
- s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, s);
- memory_region_init_io(&s->iomem, OBJECT(s), &gpt_ops, s, "gpt", 0xd4);
- sysbus_init_mmio(sbd, &s->iomem);
-
- for (i = 0; i < ARRAY_SIZE(s->irqs); i++) {
- sysbus_init_irq(sbd, &s->irqs[i]);
- }
-}
-
-static void ppc405_gpt_finalize(Object *obj)
-{
- /* timer will be NULL if the GPT wasn't realized */
- if (PPC405_GPT(obj)->timer) {
- timer_del(PPC405_GPT(obj)->timer);
- }
-}
-
-static void ppc405_gpt_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_gpt_realize;
- device_class_set_legacy_reset(dc, ppc405_gpt_reset);
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* PowerPC 405EP */
-/* CPU control */
-enum {
- PPC405EP_CPC0_PLLMR0 = 0x0F0,
- PPC405EP_CPC0_BOOT = 0x0F1,
- PPC405EP_CPC0_EPCTL = 0x0F3,
- PPC405EP_CPC0_PLLMR1 = 0x0F4,
- PPC405EP_CPC0_UCR = 0x0F5,
- PPC405EP_CPC0_SRR = 0x0F6,
- PPC405EP_CPC0_JTAGID = 0x0F7,
- PPC405EP_CPC0_PCI = 0x0F9,
-#if 0
- PPC405EP_CPC0_ER = xxx,
- PPC405EP_CPC0_FR = xxx,
- PPC405EP_CPC0_SR = xxx,
-#endif
-};
-
-static void ppc405ep_compute_clocks(Ppc405CpcState *cpc)
-{
- uint32_t CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk;
- uint32_t UART0_clk, UART1_clk;
- uint64_t VCO_out, PLL_out;
- int M, D;
-
- VCO_out = 0;
- if ((cpc->pllmr[1] & 0x80000000) && !(cpc->pllmr[1] & 0x40000000)) {
- M = (((cpc->pllmr[1] >> 20) - 1) & 0xF) + 1; /* FBMUL */
- trace_ppc405ep_clocks_compute("FBMUL", (cpc->pllmr[1] >> 20) & 0xF, M);
- D = 8 - ((cpc->pllmr[1] >> 16) & 0x7); /* FWDA */
- trace_ppc405ep_clocks_compute("FWDA", (cpc->pllmr[1] >> 16) & 0x7, D);
- VCO_out = (uint64_t)cpc->sysclk * M * D;
- if (VCO_out < 500000000UL || VCO_out > 1000000000UL) {
- /* Error - unlock the PLL */
- qemu_log_mask(LOG_GUEST_ERROR, "VCO out of range %" PRIu64 "\n",
- VCO_out);
-#if 0
- cpc->pllmr[1] &= ~0x80000000;
- goto pll_bypass;
-#endif
- }
- PLL_out = VCO_out / D;
- /* Pretend the PLL is locked */
- cpc->boot |= 0x00000001;
- } else {
-#if 0
- pll_bypass:
-#endif
- PLL_out = cpc->sysclk;
- if (cpc->pllmr[1] & 0x40000000) {
- /* Pretend the PLL is not locked */
- cpc->boot &= ~0x00000001;
- }
- }
- /* Now, compute all other clocks */
- D = ((cpc->pllmr[0] >> 20) & 0x3) + 1; /* CCDV */
- trace_ppc405ep_clocks_compute("CCDV", (cpc->pllmr[0] >> 20) & 0x3, D);
- CPU_clk = PLL_out / D;
- D = ((cpc->pllmr[0] >> 16) & 0x3) + 1; /* CBDV */
- trace_ppc405ep_clocks_compute("CBDV", (cpc->pllmr[0] >> 16) & 0x3, D);
- PLB_clk = CPU_clk / D;
- D = ((cpc->pllmr[0] >> 12) & 0x3) + 1; /* OPDV */
- trace_ppc405ep_clocks_compute("OPDV", (cpc->pllmr[0] >> 12) & 0x3, D);
- OPB_clk = PLB_clk / D;
- D = ((cpc->pllmr[0] >> 8) & 0x3) + 2; /* EPDV */
- trace_ppc405ep_clocks_compute("EPDV", (cpc->pllmr[0] >> 8) & 0x3, D);
- EBC_clk = PLB_clk / D;
- D = ((cpc->pllmr[0] >> 4) & 0x3) + 1; /* MPDV */
- trace_ppc405ep_clocks_compute("MPDV", (cpc->pllmr[0] >> 4) & 0x3, D);
- MAL_clk = PLB_clk / D;
- D = (cpc->pllmr[0] & 0x3) + 1; /* PPDV */
- trace_ppc405ep_clocks_compute("PPDV", cpc->pllmr[0] & 0x3, D);
- PCI_clk = PLB_clk / D;
- D = ((cpc->ucr - 1) & 0x7F) + 1; /* U0DIV */
- trace_ppc405ep_clocks_compute("U0DIV", cpc->ucr & 0x7F, D);
- UART0_clk = PLL_out / D;
- D = (((cpc->ucr >> 8) - 1) & 0x7F) + 1; /* U1DIV */
- trace_ppc405ep_clocks_compute("U1DIV", (cpc->ucr >> 8) & 0x7F, D);
- UART1_clk = PLL_out / D;
-
- if (trace_event_get_state_backends(TRACE_PPC405EP_CLOCKS_SETUP)) {
- g_autofree char *trace = g_strdup_printf(
- "Setup PPC405EP clocks - sysclk %" PRIu32 " VCO %" PRIu64
- " PLL out %" PRIu64 " Hz\n"
- "CPU %" PRIu32 " PLB %" PRIu32 " OPB %" PRIu32 " EBC %" PRIu32
- " MAL %" PRIu32 " PCI %" PRIu32 " UART0 %" PRIu32
- " UART1 %" PRIu32 "\n",
- cpc->sysclk, VCO_out, PLL_out,
- CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk,
- UART0_clk, UART1_clk);
- trace_ppc405ep_clocks_setup(trace);
- }
-
- /* Setup CPU clocks */
- clk_setup(&cpc->clk_setup[PPC405EP_CPU_CLK], CPU_clk);
- /* Setup PLB clock */
- clk_setup(&cpc->clk_setup[PPC405EP_PLB_CLK], PLB_clk);
- /* Setup OPB clock */
- clk_setup(&cpc->clk_setup[PPC405EP_OPB_CLK], OPB_clk);
- /* Setup external clock */
- clk_setup(&cpc->clk_setup[PPC405EP_EBC_CLK], EBC_clk);
- /* Setup MAL clock */
- clk_setup(&cpc->clk_setup[PPC405EP_MAL_CLK], MAL_clk);
- /* Setup PCI clock */
- clk_setup(&cpc->clk_setup[PPC405EP_PCI_CLK], PCI_clk);
- /* Setup UART0 clock */
- clk_setup(&cpc->clk_setup[PPC405EP_UART0_CLK], UART0_clk);
- /* Setup UART1 clock */
- clk_setup(&cpc->clk_setup[PPC405EP_UART1_CLK], UART1_clk);
-}
-
-static uint32_t dcr_read_epcpc(void *opaque, int dcrn)
-{
- Ppc405CpcState *cpc = opaque;
- uint32_t ret;
-
- switch (dcrn) {
- case PPC405EP_CPC0_BOOT:
- ret = cpc->boot;
- break;
- case PPC405EP_CPC0_EPCTL:
- ret = cpc->epctl;
- break;
- case PPC405EP_CPC0_PLLMR0:
- ret = cpc->pllmr[0];
- break;
- case PPC405EP_CPC0_PLLMR1:
- ret = cpc->pllmr[1];
- break;
- case PPC405EP_CPC0_UCR:
- ret = cpc->ucr;
- break;
- case PPC405EP_CPC0_SRR:
- ret = cpc->srr;
- break;
- case PPC405EP_CPC0_JTAGID:
- ret = cpc->jtagid;
- break;
- case PPC405EP_CPC0_PCI:
- ret = cpc->pci;
- break;
- default:
- /* Avoid gcc warning */
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void dcr_write_epcpc(void *opaque, int dcrn, uint32_t val)
-{
- Ppc405CpcState *cpc = opaque;
-
- switch (dcrn) {
- case PPC405EP_CPC0_BOOT:
- /* Read-only register */
- break;
- case PPC405EP_CPC0_EPCTL:
- /* Don't care for now */
- cpc->epctl = val & 0xC00000F3;
- break;
- case PPC405EP_CPC0_PLLMR0:
- cpc->pllmr[0] = val & 0x00633333;
- ppc405ep_compute_clocks(cpc);
- break;
- case PPC405EP_CPC0_PLLMR1:
- cpc->pllmr[1] = val & 0xC0F73FFF;
- ppc405ep_compute_clocks(cpc);
- break;
- case PPC405EP_CPC0_UCR:
- /* UART control - don't care for now */
- cpc->ucr = val & 0x003F7F7F;
- break;
- case PPC405EP_CPC0_SRR:
- cpc->srr = val;
- break;
- case PPC405EP_CPC0_JTAGID:
- /* Read-only */
- break;
- case PPC405EP_CPC0_PCI:
- cpc->pci = val;
- break;
- }
-}
-
-static void ppc405_cpc_reset(DeviceState *dev)
-{
- Ppc405CpcState *cpc = PPC405_CPC(dev);
-
- cpc->boot = 0x00000010; /* Boot from PCI - IIC EEPROM disabled */
- cpc->epctl = 0x00000000;
- cpc->pllmr[0] = 0x00021002;
- cpc->pllmr[1] = 0x80a552be;
- cpc->ucr = 0x00004646;
- cpc->srr = 0x00040000;
- cpc->pci = 0x00000000;
- cpc->er = 0x00000000;
- cpc->fr = 0x00000000;
- cpc->sr = 0x00000000;
- cpc->jtagid = 0x20267049;
- ppc405ep_compute_clocks(cpc);
-}
-
-/* XXX: sysclk should be between 25 and 100 MHz */
-static void ppc405_cpc_realize(DeviceState *dev, Error **errp)
-{
- Ppc405CpcState *cpc = PPC405_CPC(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
-
- assert(dcr->cpu);
- cpc->clk_setup[PPC405EP_CPU_CLK].cb =
- ppc_40x_timers_init(&dcr->cpu->env, cpc->sysclk, PPC_INTERRUPT_PIT);
- cpc->clk_setup[PPC405EP_CPU_CLK].opaque = &dcr->cpu->env;
-
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_BOOT, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_EPCTL, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR0, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR1, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_UCR, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_SRR, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_JTAGID, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PCI, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
-}
-
-static const Property ppc405_cpc_properties[] = {
- DEFINE_PROP_UINT32("sys-clk", Ppc405CpcState, sysclk, 0),
-};
-
-static void ppc405_cpc_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_cpc_realize;
- device_class_set_legacy_reset(dc, ppc405_cpc_reset);
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
- device_class_set_props(dc, ppc405_cpc_properties);
-}
-
-/* PPC405_SOC */
-
-static void ppc405_soc_instance_init(Object *obj)
-{
- Ppc405SoCState *s = PPC405_SOC(obj);
-
- object_initialize_child(obj, "cpu", &s->cpu,
- POWERPC_CPU_TYPE_NAME("405ep"));
-
- object_initialize_child(obj, "uic", &s->uic, TYPE_PPC_UIC);
-
- object_initialize_child(obj, "cpc", &s->cpc, TYPE_PPC405_CPC);
- object_property_add_alias(obj, "sys-clk", OBJECT(&s->cpc), "sys-clk");
-
- object_initialize_child(obj, "gpt", &s->gpt, TYPE_PPC405_GPT);
-
- object_initialize_child(obj, "ocm", &s->ocm, TYPE_PPC405_OCM);
-
- object_initialize_child(obj, "gpio", &s->gpio, TYPE_PPC405_GPIO);
-
- object_initialize_child(obj, "dma", &s->dma, TYPE_PPC405_DMA);
-
- object_initialize_child(obj, "i2c", &s->i2c, TYPE_PPC4xx_I2C);
-
- object_initialize_child(obj, "ebc", &s->ebc, TYPE_PPC4xx_EBC);
-
- object_initialize_child(obj, "opba", &s->opba, TYPE_PPC405_OPBA);
-
- object_initialize_child(obj, "pob", &s->pob, TYPE_PPC405_POB);
-
- object_initialize_child(obj, "plb", &s->plb, TYPE_PPC4xx_PLB);
-
- object_initialize_child(obj, "mal", &s->mal, TYPE_PPC4xx_MAL);
-
- object_initialize_child(obj, "sdram", &s->sdram, TYPE_PPC4xx_SDRAM_DDR);
- object_property_add_alias(obj, "dram", OBJECT(&s->sdram), "dram");
-}
-
-static void ppc405_reset(void *opaque)
-{
- cpu_reset(CPU(opaque));
-}
-
-static void ppc405_soc_realize(DeviceState *dev, Error **errp)
-{
- Ppc405SoCState *s = PPC405_SOC(dev);
- CPUPPCState *env;
- SysBusDevice *sbd;
- int i;
-
- /* init CPUs */
- if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) {
- return;
- }
- qemu_register_reset(ppc405_reset, &s->cpu);
-
- env = &s->cpu.env;
-
- ppc_dcr_init(env, NULL, NULL);
-
- /* CPU control */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->cpc), &s->cpu, errp)) {
- return;
- }
-
- /* PLB arbitrer */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->plb), &s->cpu, errp)) {
- return;
- }
-
- /* PLB to OPB bridge */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->pob), &s->cpu, errp)) {
- return;
- }
-
- /* OBP arbitrer */
- sbd = SYS_BUS_DEVICE(&s->opba);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600600);
-
- /* Universal interrupt controller */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->uic), &s->cpu, errp)) {
- return;
- }
- sbd = SYS_BUS_DEVICE(&s->uic);
- sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT,
- qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_INT));
- sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT,
- qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_CINT));
-
- /* SDRAM controller */
- /*
- * We use the 440 DDR SDRAM controller which has more regs and features
- * but it's compatible enough for now
- */
- object_property_set_int(OBJECT(&s->sdram), "nbanks", 2, &error_abort);
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->sdram), &s->cpu, errp)) {
- return;
- }
- /* XXX 405EP has no ECC interrupt */
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdram), 0,
- qdev_get_gpio_in(DEVICE(&s->uic), 17));
-
- /* External bus controller */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ebc), &s->cpu, errp)) {
- return;
- }
-
- /* DMA controller */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->dma), &s->cpu, errp)) {
- return;
- }
- sbd = SYS_BUS_DEVICE(&s->dma);
- for (i = 0; i < ARRAY_SIZE(s->dma.irqs); i++) {
- sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 5 + i));
- }
-
- /* I2C controller */
- sbd = SYS_BUS_DEVICE(&s->i2c);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600500);
- sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(DEVICE(&s->uic), 2));
-
- /* GPIO */
- sbd = SYS_BUS_DEVICE(&s->gpio);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600700);
-
- /* Serial ports */
- if (serial_hd(0) != NULL) {
- serial_mm_init(get_system_memory(), 0xef600300, 0,
- qdev_get_gpio_in(DEVICE(&s->uic), 0),
- PPC_SERIAL_MM_BAUDBASE, serial_hd(0),
- DEVICE_BIG_ENDIAN);
- }
- if (serial_hd(1) != NULL) {
- serial_mm_init(get_system_memory(), 0xef600400, 0,
- qdev_get_gpio_in(DEVICE(&s->uic), 1),
- PPC_SERIAL_MM_BAUDBASE, serial_hd(1),
- DEVICE_BIG_ENDIAN);
- }
-
- /* OCM */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ocm), &s->cpu, errp)) {
- return;
- }
-
- /* GPT */
- sbd = SYS_BUS_DEVICE(&s->gpt);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600000);
- for (i = 0; i < ARRAY_SIZE(s->gpt.irqs); i++) {
- sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 19 + i));
- }
-
- /* MAL */
- object_property_set_int(OBJECT(&s->mal), "txc-num", 4, &error_abort);
- object_property_set_int(OBJECT(&s->mal), "rxc-num", 2, &error_abort);
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->mal), &s->cpu, errp)) {
- return;
- }
- sbd = SYS_BUS_DEVICE(&s->mal);
- for (i = 0; i < ARRAY_SIZE(s->mal.irqs); i++) {
- sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 11 + i));
- }
-
- /* Ethernet */
- /* Uses UIC IRQs 9, 15, 17 */
-}
-
-static void ppc405_soc_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_soc_realize;
- /* Reason: only works as part of a ppc405 board/machine */
- dc->user_creatable = false;
-}
-
-static const TypeInfo ppc405_types[] = {
- {
- .name = TYPE_PPC405_POB,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405PobState),
- .class_init = ppc405_pob_class_init,
- }, {
- .name = TYPE_PPC405_OPBA,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ppc405OpbaState),
- .class_init = ppc405_opba_class_init,
- }, {
- .name = TYPE_PPC405_DMA,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405DmaState),
- .class_init = ppc405_dma_class_init,
- }, {
- .name = TYPE_PPC405_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ppc405GpioState),
- .class_init = ppc405_gpio_class_init,
- }, {
- .name = TYPE_PPC405_OCM,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405OcmState),
- .class_init = ppc405_ocm_class_init,
- }, {
- .name = TYPE_PPC405_GPT,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ppc405GptState),
- .instance_finalize = ppc405_gpt_finalize,
- .class_init = ppc405_gpt_class_init,
- }, {
- .name = TYPE_PPC405_CPC,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405CpcState),
- .class_init = ppc405_cpc_class_init,
- }, {
- .name = TYPE_PPC405_SOC,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(Ppc405SoCState),
- .instance_init = ppc405_soc_instance_init,
- .class_init = ppc405_soc_class_init,
- }
-};
-
-DEFINE_TYPES(ppc405_types)
diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c
index 3ecae6a..7dc3b30 100644
--- a/hw/ppc/sam460ex.c
+++ b/hw/ppc/sam460ex.c
@@ -234,7 +234,7 @@ static void main_cpu_reset(void *opaque)
/* Create a mapping for the kernel. */
booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1 << 31);
- env->gpr[6] = tswap32(EPAPR_MAGIC);
+ env->gpr[6] = EPAPR_MAGIC;
env->gpr[7] = (16 * MiB) - 8; /* bi->ima_size; */
} else {
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index c15340a..a415e51 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -4,6 +4,9 @@
* Copyright (c) 2004-2007 Fabrice Bellard
* Copyright (c) 2007 Jocelyn Mayer
* Copyright (c) 2010 David Gibson, IBM Corporation.
+ * Copyright (c) 2010-2024, IBM Corporation..
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -243,7 +246,7 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr,
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
/* 54: DecFP, 56: DecI, 58: SHA */
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
- /* 60: NM atomic, 62: RNG */
+ /* 60: NM atomic, 62: RNG, 64: DAWR1 (ISA 3.1) */
0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
/* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */
0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */
@@ -292,6 +295,9 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr,
* in pa-features. So hide it from them. */
pa_features[40 + 2] &= ~0x80; /* Radix MMU */
}
+ if (spapr_get_cap(spapr, SPAPR_CAP_DAWR1)) {
+ pa_features[66] |= 0x80;
+ }
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
}
@@ -1399,11 +1405,34 @@ static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
}
}
-#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
-#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
-#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
-#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
-#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
+static uint64_t *hpte_get_ptr(SpaprMachineState *s, unsigned index)
+{
+ uint64_t *table = s->htab;
+
+ return &table[2 * index];
+}
+
+static bool hpte_is_valid(SpaprMachineState *s, unsigned index)
+{
+ return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_VALID;
+}
+
+static bool hpte_is_dirty(SpaprMachineState *s, unsigned index)
+{
+ return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_HPTE_DIRTY;
+}
+
+static void hpte_set_clean(SpaprMachineState *s, unsigned index)
+{
+ stq_be_p(hpte_get_ptr(s, index),
+ ldq_be_p(hpte_get_ptr(s, index)) & ~HPTE64_V_HPTE_DIRTY);
+}
+
+static void hpte_set_dirty(SpaprMachineState *s, unsigned index)
+{
+ stq_be_p(hpte_get_ptr(s, index),
+ ldq_be_p(hpte_get_ptr(s, index)) | HPTE64_V_HPTE_DIRTY);
+}
/*
* Get the fd to access the kernel htab, re-opening it if necessary
@@ -1614,7 +1643,7 @@ int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
spapr->htab_shift = shift;
for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
- DIRTY_HPTE(HPTE(spapr->htab, i));
+ hpte_set_dirty(spapr, i);
}
}
/* We're setting up a hash table, so that means we're not radix */
@@ -2137,6 +2166,7 @@ static const VMStateDescription vmstate_spapr = {
&vmstate_spapr_cap_rpt_invalidate,
&vmstate_spapr_cap_ail_mode_3,
&vmstate_spapr_cap_nested_papr,
+ &vmstate_spapr_cap_dawr1,
NULL
}
};
@@ -2171,7 +2201,7 @@ static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
qemu_put_be32(f, chunkstart);
qemu_put_be16(f, n_valid);
qemu_put_be16(f, n_invalid);
- qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
+ qemu_put_buffer(f, (void *)hpte_get_ptr(spapr, chunkstart),
HASH_PTE_SIZE_64 * n_valid);
}
@@ -2197,16 +2227,16 @@ static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
/* Consume invalid HPTEs */
while ((index < htabslots)
- && !HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && !hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
}
/* Consume valid HPTEs */
chunkstart = index;
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
- && HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
}
@@ -2246,7 +2276,7 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
/* Consume non-dirty HPTEs */
while ((index < htabslots)
- && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
+ && !hpte_is_dirty(spapr, index)) {
index++;
examined++;
}
@@ -2254,9 +2284,9 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
chunkstart = index;
/* Consume valid dirty HPTEs */
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
- && HPTE_DIRTY(HPTE(spapr->htab, index))
- && HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && hpte_is_dirty(spapr, index)
+ && hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
examined++;
}
@@ -2264,9 +2294,9 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
invalidstart = index;
/* Consume invalid dirty HPTEs */
while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
- && HPTE_DIRTY(HPTE(spapr->htab, index))
- && !HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && hpte_is_dirty(spapr, index)
+ && !hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
examined++;
}
@@ -2448,11 +2478,11 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
if (spapr->htab) {
if (n_valid) {
- qemu_get_buffer(f, HPTE(spapr->htab, index),
+ qemu_get_buffer(f, (void *)hpte_get_ptr(spapr, index),
HASH_PTE_SIZE_64 * n_valid);
}
if (n_invalid) {
- memset(HPTE(spapr->htab, index + n_valid), 0,
+ memset(hpte_get_ptr(spapr, index + n_valid), 0,
HASH_PTE_SIZE_64 * n_invalid);
}
} else {
@@ -2887,6 +2917,9 @@ static void spapr_machine_init(MachineState *machine)
spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
}
+ qemu_guest_getrandom_nofail(&spapr->hashpkey_val,
+ sizeof(spapr->hashpkey_val));
+
/* init CPUs */
spapr_init_cpus(spapr);
@@ -4436,7 +4469,7 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf)
*/
static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
@@ -4444,7 +4477,7 @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
- count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
+ count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, match);
if (count < 0) {
return count;
@@ -4654,6 +4687,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF;
+ smc->default_caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_ON;
/*
* This cap specifies whether the AIL 3 mode for
diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c
index 904bff8..815c94e 100644
--- a/hw/ppc/spapr_caps.c
+++ b/hw/ppc/spapr_caps.c
@@ -34,6 +34,7 @@
#include "kvm_ppc.h"
#include "migration/vmstate.h"
#include "system/tcg.h"
+#include "system/hostmem.h"
#include "hw/ppc/spapr.h"
@@ -696,6 +697,34 @@ static void cap_ail_mode_3_apply(SpaprMachineState *spapr,
}
}
+static void cap_dawr1_apply(SpaprMachineState *spapr, uint8_t val,
+ Error **errp)
+{
+ ERRP_GUARD();
+
+ if (!val) {
+ return; /* Disable by default */
+ }
+
+ if (!ppc_type_check_compat(MACHINE(spapr)->cpu_type,
+ CPU_POWERPC_LOGICAL_3_10, 0,
+ spapr->max_compat_pvr)) {
+ error_setg(errp, "DAWR1 supported only on POWER10 and later CPUs");
+ error_append_hint(errp, "Try appending -machine cap-dawr1=off\n");
+ return;
+ }
+
+ if (kvm_enabled()) {
+ if (!kvmppc_has_cap_dawr1()) {
+ error_setg(errp, "DAWR1 not supported by KVM.");
+ error_append_hint(errp, "Try appending -machine cap-dawr1=off");
+ } else if (kvmppc_set_cap_dawr1(val) < 0) {
+ error_setg(errp, "Error enabling cap-dawr1 with KVM.");
+ error_append_hint(errp, "Try appending -machine cap-dawr1=off");
+ }
+ }
+}
+
SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = {
[SPAPR_CAP_HTM] = {
.name = "htm",
@@ -831,6 +860,15 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = {
.type = "bool",
.apply = cap_ail_mode_3_apply,
},
+ [SPAPR_CAP_DAWR1] = {
+ .name = "dawr1",
+ .description = "Allow 2nd Data Address Watchpoint Register (DAWR1)",
+ .index = SPAPR_CAP_DAWR1,
+ .get = spapr_cap_get_bool,
+ .set = spapr_cap_set_bool,
+ .type = "bool",
+ .apply = cap_dawr1_apply,
+ },
};
static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr,
@@ -841,6 +879,11 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr,
caps = smc->default_caps;
+ if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_10,
+ 0, spapr->max_compat_pvr)) {
+ caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_OFF;
+ }
+
if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_00,
0, spapr->max_compat_pvr)) {
caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
@@ -975,6 +1018,7 @@ SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST);
SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI);
SPAPR_CAP_MIG_STATE(rpt_invalidate, SPAPR_CAP_RPT_INVALIDATE);
SPAPR_CAP_MIG_STATE(ail_mode_3, SPAPR_CAP_AIL_MODE_3);
+SPAPR_CAP_MIG_STATE(dawr1, SPAPR_CAP_DAWR1);
void spapr_caps_init(SpaprMachineState *spapr)
{
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 9e0e064..0671d9e 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -273,6 +273,8 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
env->spr_cb[SPR_PIR].default_value = cs->cpu_index;
env->spr_cb[SPR_TIR].default_value = thread_index;
+ env->spr_cb[SPR_HASHPKEYR].default_value = spapr->hashpkey_val;
+
cpu_ppc_set_1lpar(cpu);
/* Set time-base frequency to 512 MHz. vhyp must be set first. */
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index f987ff3..406aea4 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -580,6 +580,8 @@ static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
CPUState *cs = CPU(cpu);
SpaprCpuState *spapr_cpu;
+ assert(tcg_enabled()); /* KVM will have handled this */
+
/*
* -1 means confer to all other CPUs without dispatch counter check,
* otherwise it's a targeted confer.
@@ -820,11 +822,12 @@ static target_ulong h_set_mode_resource_set_ciabr(PowerPCCPU *cpu,
return H_SUCCESS;
}
-static target_ulong h_set_mode_resource_set_dawr0(PowerPCCPU *cpu,
- SpaprMachineState *spapr,
- target_ulong mflags,
- target_ulong value1,
- target_ulong value2)
+static target_ulong h_set_mode_resource_set_dawr(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong mflags,
+ target_ulong resource,
+ target_ulong value1,
+ target_ulong value2)
{
CPUPPCState *env = &cpu->env;
@@ -837,8 +840,15 @@ static target_ulong h_set_mode_resource_set_dawr0(PowerPCCPU *cpu,
return H_P4;
}
- ppc_store_dawr0(env, value1);
- ppc_store_dawrx0(env, value2);
+ if (resource == H_SET_MODE_RESOURCE_SET_DAWR0) {
+ ppc_store_dawr0(env, value1);
+ ppc_store_dawrx0(env, value2);
+ } else if (resource == H_SET_MODE_RESOURCE_SET_DAWR1) {
+ ppc_store_dawr1(env, value1);
+ ppc_store_dawrx1(env, value2);
+ } else {
+ g_assert_not_reached();
+ }
return H_SUCCESS;
}
@@ -917,8 +927,9 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr,
args[3]);
break;
case H_SET_MODE_RESOURCE_SET_DAWR0:
- ret = h_set_mode_resource_set_dawr0(cpu, spapr, args[0], args[2],
- args[3]);
+ case H_SET_MODE_RESOURCE_SET_DAWR1:
+ ret = h_set_mode_resource_set_dawr(cpu, spapr, args[0], args[1],
+ args[2], args[3]);
break;
case H_SET_MODE_RESOURCE_LE:
ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]);
diff --git a/hw/ppc/spapr_nested.c b/hw/ppc/spapr_nested.c
index 23958c6..201f629 100644
--- a/hw/ppc/spapr_nested.c
+++ b/hw/ppc/spapr_nested.c
@@ -65,10 +65,9 @@ static
SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr,
target_ulong guestid)
{
- SpaprMachineStateNestedGuest *guest;
-
- guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid));
- return guest;
+ return spapr->nested.guests ?
+ g_hash_table_lookup(spapr->nested.guests,
+ GINT_TO_POINTER(guestid)) : NULL;
}
bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu,
@@ -594,26 +593,37 @@ static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest,
return false;
}
-static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest,
- target_ulong vcpuid)
+static void *get_vcpu_state_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
+ target_ulong vcpuid)
{
assert(spapr_nested_vcpu_check(guest, vcpuid, false));
return &guest->vcpus[vcpuid].state;
}
-static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest,
- target_ulong vcpuid)
+static void *get_vcpu_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
+ target_ulong vcpuid)
{
assert(spapr_nested_vcpu_check(guest, vcpuid, false));
return &guest->vcpus[vcpuid];
}
-static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest,
+static void *get_guest_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
target_ulong vcpuid)
{
return guest; /* for GSBE_NESTED */
}
+static void *get_machine_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
+ target_ulong vcpuid)
+{
+ /* ignore guest and vcpuid for this */
+ return &spapr->nested;
+}
+
/*
* set=1 means the L1 is trying to set some state
* set=0 means the L1 is trying to get some state
@@ -1013,7 +1023,15 @@ struct guest_state_element_type guest_state_element_types[] = {
GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf),
GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size),
GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb,
- copy_state_hdecr)
+ copy_state_hdecr),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_INUSE, l0_guest_heap_inuse),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_MAX, l0_guest_heap_max),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_INUSE,
+ l0_guest_pgtable_size_inuse),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_MAX,
+ l0_guest_pgtable_size_max),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_RECLAIMED,
+ l0_guest_pgtable_reclaimed),
};
void spapr_nested_gsb_init(void)
@@ -1031,8 +1049,13 @@ void spapr_nested_gsb_init(void)
else if (type->id >= GSB_VCPU_IN_BUFFER)
/* 0x0c00 - 0xf000 Thread + RW */
type->flags = 0;
+ else if (type->id >= GSB_L0_GUEST_HEAP_INUSE)
+
+ /*0x0800 - 0x0804 Hostwide Counters + RO */
+ type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE |
+ GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY;
else if (type->id >= GSB_VCPU_LPVR)
- /* 0x0003 - 0x0bff Guest + RW */
+ /* 0x0003 - 0x07ff Guest + RW */
type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE;
else if (type->id >= GSB_HV_VCPU_STATE_SIZE)
/* 0x0001 - 0x0002 Guest + RO */
@@ -1139,18 +1162,26 @@ static bool guest_state_request_check(struct guest_state_request *gsr)
return false;
}
- if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) {
+ if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE) {
+ /* Hostwide elements cant be clubbed with other types */
+ if (!(gsr->flags & GUEST_STATE_REQUEST_HOST_WIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a host wide "
+ "Element ID:%04x.\n", id);
+ return false;
+ }
+ } else if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) {
/* guest wide element type */
if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) {
- qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide "
+ qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a guest wide "
"Element ID:%04x.\n", id);
return false;
}
} else {
/* thread wide element type */
- if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) {
- qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide "
- "Element ID:%04x.\n", id);
+ if (gsr->flags & (GUEST_STATE_REQUEST_GUEST_WIDE |
+ GUEST_STATE_REQUEST_HOST_WIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a thread wide"
+ " Element ID:%04x.\n", id);
return false;
}
}
@@ -1419,7 +1450,8 @@ static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu,
return H_SUCCESS;
}
-static target_ulong getset_state(SpaprMachineStateNestedGuest *guest,
+static target_ulong getset_state(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
uint64_t vcpuid,
struct guest_state_request *gsr)
{
@@ -1452,7 +1484,7 @@ static target_ulong getset_state(SpaprMachineStateNestedGuest *guest,
/* Get pointer to guest data to get/set */
if (type->location && type->copy) {
- ptr = type->location(guest, vcpuid);
+ ptr = type->location(spapr, guest, vcpuid);
assert(ptr);
if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) {
return H_INVALID_ELEMENT_VALUE;
@@ -1469,6 +1501,7 @@ next_element:
}
static target_ulong map_and_getset_state(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
SpaprMachineStateNestedGuest *guest,
uint64_t vcpuid,
struct guest_state_request *gsr)
@@ -1492,7 +1525,7 @@ static target_ulong map_and_getset_state(PowerPCCPU *cpu,
goto out1;
}
- rc = getset_state(guest, vcpuid, gsr);
+ rc = getset_state(spapr, guest, vcpuid, gsr);
out1:
address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len);
@@ -1510,27 +1543,46 @@ static target_ulong h_guest_getset_state(PowerPCCPU *cpu,
target_ulong buf = args[3];
target_ulong buflen = args[4];
struct guest_state_request gsr;
- SpaprMachineStateNestedGuest *guest;
+ SpaprMachineStateNestedGuest *guest = NULL;
- guest = spapr_get_nested_guest(spapr, lpid);
- if (!guest) {
- return H_P2;
- }
gsr.buf = buf;
assert(buflen <= GSB_MAX_BUF_SIZE);
gsr.len = buflen;
gsr.flags = 0;
- if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) {
+
+ /* Works for both get/set state */
+ if ((flags & H_GUEST_GET_STATE_FLAGS_GUEST_WIDE) ||
+ (flags & H_GUEST_SET_STATE_FLAGS_GUEST_WIDE)) {
gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE;
}
- if (flags & ~H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) {
- return H_PARAMETER; /* flag not supported yet */
- }
if (set) {
+ if (flags & ~H_GUEST_SET_STATE_FLAGS_MASK) {
+ return H_PARAMETER;
+ }
gsr.flags |= GUEST_STATE_REQUEST_SET;
+ } else {
+ /*
+ * No reserved fields to be set in flags nor both
+ * GUEST/HOST wide bits
+ */
+ if ((flags & ~H_GUEST_GET_STATE_FLAGS_MASK) ||
+ (flags == H_GUEST_GET_STATE_FLAGS_MASK)) {
+ return H_PARAMETER;
+ }
+
+ if (flags & H_GUEST_GET_STATE_FLAGS_HOST_WIDE) {
+ gsr.flags |= GUEST_STATE_REQUEST_HOST_WIDE;
+ }
+ }
+
+ if (!(gsr.flags & GUEST_STATE_REQUEST_HOST_WIDE)) {
+ guest = spapr_get_nested_guest(spapr, lpid);
+ if (!guest) {
+ return H_P2;
+ }
}
- return map_and_getset_state(cpu, guest, vcpuid, &gsr);
+ return map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr);
}
static target_ulong h_guest_set_state(PowerPCCPU *cpu,
@@ -1641,7 +1693,8 @@ static int get_exit_ids(uint64_t srr0, uint16_t ids[16])
return nr;
}
-static void exit_process_output_buffer(PowerPCCPU *cpu,
+static void exit_process_output_buffer(SpaprMachineState *spapr,
+ PowerPCCPU *cpu,
SpaprMachineStateNestedGuest *guest,
target_ulong vcpuid,
target_ulong *r3)
@@ -1679,7 +1732,7 @@ static void exit_process_output_buffer(PowerPCCPU *cpu,
gsr.gsb = gsb;
gsr.len = VCPU_OUT_BUF_MIN_SZ;
gsr.flags = 0; /* get + never guest wide */
- getset_state(guest, vcpuid, &gsr);
+ getset_state(spapr, guest, vcpuid, &gsr);
address_space_unmap(CPU(cpu)->as, gsb, len, true, len);
return;
@@ -1705,7 +1758,7 @@ void spapr_exit_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, int excp)
exit_nested_store_l2(cpu, excp, vcpu);
/* do the output buffer for run_vcpu*/
- exit_process_output_buffer(cpu, guest, vcpuid, &r3_return);
+ exit_process_output_buffer(spapr, cpu, guest, vcpuid, &r3_return);
assert(env->spr[SPR_LPIDR] != 0);
nested_load_state(cpu, spapr_cpu->nested_host_state);
@@ -1820,7 +1873,7 @@ static target_ulong h_guest_run_vcpu(PowerPCCPU *cpu,
gsr.buf = vcpu->runbufin.addr;
gsr.len = vcpu->runbufin.size;
gsr.flags = GUEST_STATE_REQUEST_SET; /* Thread wide + writing */
- rc = map_and_getset_state(cpu, guest, vcpuid, &gsr);
+ rc = map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr);
if (rc == H_SUCCESS) {
nested_papr_run_vcpu(cpu, lpid, vcpu);
} else {
diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c
index a01354d..17115be 100644
--- a/hw/ppc/virtex_ml507.c
+++ b/hw/ppc/virtex_ml507.c
@@ -119,7 +119,7 @@ static void main_cpu_reset(void *opaque)
/* Create a mapping spanning the 32bit addr space. */
booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31);
booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31);
- env->gpr[6] = tswap32(EPAPR_MAGIC);
+ env->gpr[6] = EPAPR_MAGIC;
env->gpr[7] = bi->ima_size;
}
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index a9b3db1..75b3218 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -41,6 +41,7 @@
#include "hw/s390x/tod.h"
#include "system/system.h"
#include "system/cpus.h"
+#include "system/hostmem.h"
#include "target/s390x/kvm/pv.h"
#include "migration/blocker.h"
#include "qapi/visitor.h"
diff --git a/hw/ssi/pnv_spi.c b/hw/ssi/pnv_spi.c
index 15e25bd..1260703 100644
--- a/hw/ssi/pnv_spi.c
+++ b/hw/ssi/pnv_spi.c
@@ -19,6 +19,8 @@
#define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
#define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
+#define PNV_SPI_FIFO_SIZE 16
+#define RDR_MATCH_FAILURE_LIMIT 16
/*
* Macro from include/hw/ppc/fdt.h
@@ -35,48 +37,14 @@
} \
} while (0)
-/* PnvXferBuffer */
-typedef struct PnvXferBuffer {
-
- uint32_t len;
- uint8_t *data;
-
-} PnvXferBuffer;
-
-/* pnv_spi_xfer_buffer_methods */
-static PnvXferBuffer *pnv_spi_xfer_buffer_new(void)
-{
- PnvXferBuffer *payload = g_malloc0(sizeof(*payload));
-
- return payload;
-}
-
-static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload)
-{
- g_free(payload->data);
- g_free(payload);
-}
-
-static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload,
- uint32_t offset, uint32_t length)
-{
- if (payload->len < (offset + length)) {
- payload->len = offset + length;
- payload->data = g_realloc(payload->data, payload->len);
- }
- return &payload->data[offset];
-}
-
static bool does_rdr_match(PnvSpi *s)
{
/*
* According to spec, the mask bits that are 0 are compared and the
* bits that are 1 are ignored.
*/
- uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK,
- s->regs[SPI_MM_REG]);
- uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL,
- s->regs[SPI_MM_REG]);
+ uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK, s->regs[SPI_MM_REG]);
+ uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL, s->regs[SPI_MM_REG]);
if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
@@ -107,8 +75,8 @@ static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
return byte;
}
-static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes,
- uint8_t ecc_count, uint8_t shift_in_count)
+static uint8_t read_from_frame(PnvSpi *s, uint8_t nr_bytes, uint8_t ecc_count,
+ uint8_t shift_in_count)
{
uint8_t byte;
int count = 0;
@@ -118,20 +86,24 @@ static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes,
if ((ecc_count != 0) &&
(shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
shift_in_count = 0;
- } else {
- byte = read_buf[count];
+ } else if (!fifo8_is_empty(&s->rx_fifo)) {
+ byte = fifo8_pop(&s->rx_fifo);
trace_pnv_spi_shift_rx(byte, count);
s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty RX_FIFO\n");
}
count++;
} /* end of while */
return shift_in_count;
}
-static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
+static void spi_response(PnvSpi *s)
{
uint8_t ecc_count;
uint8_t shift_in_count;
+ uint32_t rx_len;
+ int i;
/*
* Processing here must handle:
@@ -144,13 +116,14 @@ static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
* First check that the response payload is the exact same
* number of bytes as the request payload was
*/
- if (rsp_payload->len != (s->N1_bytes + s->N2_bytes)) {
+ rx_len = fifo8_num_used(&s->rx_fifo);
+ if (rx_len != (s->N1_bytes + s->N2_bytes)) {
qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
"bytes, expected %d, got %d\n",
- (s->N1_bytes + s->N2_bytes), rsp_payload->len);
+ (s->N1_bytes + s->N2_bytes), rx_len);
} else {
uint8_t ecc_control;
- trace_pnv_spi_rx_received(rsp_payload->len);
+ trace_pnv_spi_rx_received(rx_len);
trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
/*
@@ -175,15 +148,23 @@ static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
/* Handle the N1 portion of the frame first */
if (s->N1_rx != 0) {
trace_pnv_spi_rx_read_N1frame();
- shift_in_count = read_from_frame(s, &rsp_payload->data[0],
- s->N1_bytes, ecc_count, shift_in_count);
+ shift_in_count = read_from_frame(s, s->N1_bytes, ecc_count, shift_in_count);
}
/* Handle the N2 portion of the frame */
if (s->N2_rx != 0) {
+ /* pop out N1_bytes from rx_fifo if not already */
+ if (s->N1_rx == 0) {
+ for (i = 0; i < s->N1_bytes; i++) {
+ if (!fifo8_is_empty(&s->rx_fifo)) {
+ fifo8_pop(&s->rx_fifo);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty"
+ " RX_FIFO\n");
+ }
+ }
+ }
trace_pnv_spi_rx_read_N2frame();
- shift_in_count = read_from_frame(s,
- &rsp_payload->data[s->N1_bytes], s->N2_bytes,
- ecc_count, shift_in_count);
+ shift_in_count = read_from_frame(s, s->N2_bytes, ecc_count, shift_in_count);
}
if ((s->N1_rx + s->N2_rx) > 0) {
/*
@@ -210,48 +191,41 @@ static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
} /* end of else */
} /* end of spi_response() */
-static void transfer(PnvSpi *s, PnvXferBuffer *payload)
+static void transfer(PnvSpi *s)
{
- uint32_t tx;
- uint32_t rx;
- PnvXferBuffer *rsp_payload = NULL;
+ uint32_t tx, rx, payload_len;
+ uint8_t rx_byte;
- rsp_payload = pnv_spi_xfer_buffer_new();
- if (!rsp_payload) {
- return;
- }
- for (int offset = 0; offset < payload->len; offset += s->transfer_len) {
+ payload_len = fifo8_num_used(&s->tx_fifo);
+ for (int offset = 0; offset < payload_len; offset += s->transfer_len) {
tx = 0;
for (int i = 0; i < s->transfer_len; i++) {
- if ((offset + i) >= payload->len) {
+ if ((offset + i) >= payload_len) {
tx <<= 8;
+ } else if (!fifo8_is_empty(&s->tx_fifo)) {
+ tx = (tx << 8) | fifo8_pop(&s->tx_fifo);
} else {
- tx = (tx << 8) | payload->data[offset + i];
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO underflow\n");
}
}
rx = ssi_transfer(s->ssi_bus, tx);
for (int i = 0; i < s->transfer_len; i++) {
- if ((offset + i) >= payload->len) {
+ if ((offset + i) >= payload_len) {
+ break;
+ }
+ rx_byte = (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
+ if (!fifo8_is_full(&s->rx_fifo)) {
+ fifo8_push(&s->rx_fifo, rx_byte);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RX_FIFO is full\n");
break;
}
- *(pnv_spi_xfer_buffer_write_ptr(rsp_payload, rsp_payload->len, 1)) =
- (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
}
}
- spi_response(s, s->N1_bits, rsp_payload);
- pnv_spi_xfer_buffer_free(rsp_payload);
-}
-
-static inline uint8_t get_seq_index(PnvSpi *s)
-{
- return GETFIELD(SPI_STS_SEQ_INDEX, s->status);
-}
-
-static inline void next_sequencer_fsm(PnvSpi *s)
-{
- uint8_t seq_index = get_seq_index(s);
- s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, (seq_index + 1));
- s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
+ spi_response(s);
+ /* Reset fifo for next frame */
+ fifo8_reset(&s->tx_fifo);
+ fifo8_reset(&s->rx_fifo);
}
/*
@@ -310,13 +284,11 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode)
* If Forced Implicit mode and count control doesn't
* indicate transmit then reset the tx count to 0
*/
- if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2,
- s->regs[SPI_CTR_CFG_REG]) == 0) {
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 0) {
s->N1_tx = 0;
}
/* If rx count control for N1 is set, load the rx value */
- if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3,
- s->regs[SPI_CTR_CFG_REG]) == 1) {
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N1_rx = s->N1_bytes;
}
}
@@ -328,8 +300,7 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode)
* cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
* error bit.
*/
- uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
- s->regs[SPI_CLK_CFG_REG]);
+ uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
if (ecc_control == 0 || ecc_control == 2) {
if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
@@ -340,8 +311,7 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode)
}
} else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
- "bytes = 0x%x, bits = 0x%x\n",
- s->N1_bytes, s->N1_bits);
+ "bytes = 0x%x, bits = 0x%x\n", s->N1_bytes, s->N1_bits);
s->N1_bytes = PNV_SPI_REG_SIZE;
s->N1_bits = s->N1_bytes * 8;
}
@@ -350,19 +320,10 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode)
/*
* Shift_N1 operation handler method
*/
-static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
- PnvXferBuffer **payload, bool send_n1_alone)
+static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, bool send_n1_alone)
{
uint8_t n1_count;
bool stop = false;
-
- /*
- * If there isn't a current payload left over from a stopped sequence
- * create a new one.
- */
- if (*payload == NULL) {
- *payload = pnv_spi_xfer_buffer_new();
- }
/*
* Use a combination of N1 counters to build the N1 portion of the
* transmit payload.
@@ -413,9 +374,13 @@ static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
*/
uint8_t n1_byte = 0x00;
n1_byte = get_from_offset(s, n1_count);
- trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
- *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) =
- n1_byte;
+ if (!fifo8_is_full(&s->tx_fifo)) {
+ trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
+ fifo8_push(&s->tx_fifo, n1_byte);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
+ }
} else {
/*
* We hit a shift_n1 opcode TX but the TDR is empty, tell the
@@ -436,16 +401,17 @@ static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
* - we are receiving and the RDR is empty so we allow the operation
* to proceed.
*/
- if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL,
- s->status) == 1)) {
+ if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
trace_pnv_spi_sequencer_stop_requested("shift N1"
"set for receive but RDR is full");
stop = true;
break;
- } else {
+ } else if (!fifo8_is_full(&s->tx_fifo)) {
trace_pnv_spi_tx_append_FF("n1_byte");
- *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
- = 0xff;
+ fifo8_push(&s->tx_fifo, 0xff);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
}
}
n1_count++;
@@ -486,15 +452,13 @@ static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
*/
if (send_n1_alone && !stop) {
/* We have a TX and a full TDR or an RX and an empty RDR */
- trace_pnv_spi_tx_request("Shifting N1 frame", (*payload)->len);
- transfer(s, *payload);
+ trace_pnv_spi_tx_request("Shifting N1 frame", fifo8_num_used(&s->tx_fifo));
+ transfer(s);
/* The N1 frame shift is complete so reset the N1 counters */
s->N2_bits = 0;
s->N2_bytes = 0;
s->N2_tx = 0;
s->N2_rx = 0;
- pnv_spi_xfer_buffer_free(*payload);
- *payload = NULL;
}
return stop;
} /* end of operation_shiftn1() */
@@ -552,13 +516,11 @@ static void calculate_N2(PnvSpi *s, uint8_t opcode)
* If Forced Implicit mode and count control doesn't
* indicate a receive then reset the rx count to 0
*/
- if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3,
- s->regs[SPI_CTR_CFG_REG]) == 0) {
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 0) {
s->N2_rx = 0;
}
/* If tx count control for N2 is set, load the tx value */
- if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2,
- s->regs[SPI_CTR_CFG_REG]) == 1) {
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N2_tx = s->N2_bytes;
}
}
@@ -571,8 +533,7 @@ static void calculate_N2(PnvSpi *s, uint8_t opcode)
* cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
* error bit.
*/
- uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
- s->regs[SPI_CLK_CFG_REG]);
+ uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
if (ecc_control == 0 || ecc_control == 2) {
if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
/* Unsupported N2 shift size when ECC enabled */
@@ -590,19 +551,10 @@ static void calculate_N2(PnvSpi *s, uint8_t opcode)
* Shift_N2 operation handler method
*/
-static bool operation_shiftn2(PnvSpi *s, uint8_t opcode,
- PnvXferBuffer **payload)
+static bool operation_shiftn2(PnvSpi *s, uint8_t opcode)
{
uint8_t n2_count;
bool stop = false;
-
- /*
- * If there isn't a current payload left over from a stopped sequence
- * create a new one.
- */
- if (*payload == NULL) {
- *payload = pnv_spi_xfer_buffer_new();
- }
/*
* Use a combination of N2 counters to build the N2 portion of the
* transmit payload.
@@ -629,44 +581,47 @@ static bool operation_shiftn2(PnvSpi *s, uint8_t opcode,
* code continue will end up building the payload twice in the same
* buffer since RDR full causes a sequence stop and restart.
*/
- if ((s->N2_rx != 0) &&
- (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
+ if ((s->N2_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
trace_pnv_spi_sequencer_stop_requested("shift N2 set"
"for receive but RDR is full");
stop = true;
break;
}
- if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) <
- PNV_SPI_REG_SIZE)) {
+ if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) < PNV_SPI_REG_SIZE)) {
/* Always append data for the N2 segment if it is set for TX */
uint8_t n2_byte = 0x00;
n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
- trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
- *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
- = n2_byte;
- } else {
+ if (!fifo8_is_full(&s->tx_fifo)) {
+ trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
+ fifo8_push(&s->tx_fifo, n2_byte);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
+ }
+ } else if (!fifo8_is_full(&s->tx_fifo)) {
/*
* Regardless of whether or not N2 is set for TX or RX, we need
* the number of bytes in the payload to match the overall length
* of the operation.
*/
trace_pnv_spi_tx_append_FF("n2_byte");
- *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
- = 0xff;
+ fifo8_push(&s->tx_fifo, 0xff);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
}
n2_count++;
} /* end of while */
if (!stop) {
/* We have a TX and a full TDR or an RX and an empty RDR */
- trace_pnv_spi_tx_request("Shifting N2 frame", (*payload)->len);
- transfer(s, *payload);
+ trace_pnv_spi_tx_request("Shifting N2 frame", fifo8_num_used(&s->tx_fifo));
+ transfer(s);
/*
* If we are doing an N2 TX and the TDR is full we need to clear the
* TDR_full status. Do this here instead of up in the loop above so we
* don't log the message in every loop iteration.
*/
- if ((s->N2_tx != 0) &&
- (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
+ if ((s->N2_tx != 0) && (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
}
/*
@@ -682,8 +637,6 @@ static bool operation_shiftn2(PnvSpi *s, uint8_t opcode,
s->N1_bytes = 0;
s->N1_tx = 0;
s->N1_rx = 0;
- pnv_spi_xfer_buffer_free(*payload);
- *payload = NULL;
}
return stop;
} /* end of operation_shiftn2()*/
@@ -700,21 +653,9 @@ static void operation_sequencer(PnvSpi *s)
bool stop = false; /* Flag to stop the sequencer */
uint8_t opcode = 0;
uint8_t masked_opcode = 0;
+ uint8_t seq_index;
/*
- * PnvXferBuffer for containing the payload of the SPI frame.
- * This is a static because there are cases where a sequence has to stop
- * and wait for the target application to unload the RDR. If this occurs
- * during a sequence where N1 is not sent alone and instead combined with
- * N2 since the N1 tx length + the N2 tx length is less than the size of
- * the TDR.
- */
- static PnvXferBuffer *payload;
-
- if (payload == NULL) {
- payload = pnv_spi_xfer_buffer_new();
- }
- /*
* Clear the sequencer FSM error bit - general_SPI_status[3]
* before starting a sequence.
*/
@@ -727,11 +668,16 @@ static void operation_sequencer(PnvSpi *s)
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
}
/*
+ * SPI_STS_SEQ_INDEX of status register is kept in seq_index variable and
+ * updated back to status register at the end of operation_sequencer().
+ */
+ seq_index = GETFIELD(SPI_STS_SEQ_INDEX, s->status);
+ /*
* There are only 8 possible operation IDs to iterate through though
* some operations may cause more than one frame to be sequenced.
*/
- while (get_seq_index(s) < NUM_SEQ_OPS) {
- opcode = s->seq_op[get_seq_index(s)];
+ while (seq_index < NUM_SEQ_OPS) {
+ opcode = s->seq_op[seq_index];
/* Set sequencer state to decode */
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
/*
@@ -748,7 +694,7 @@ static void operation_sequencer(PnvSpi *s)
case SEQ_OP_STOP:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
/* A stop operation in any position stops the sequencer */
- trace_pnv_spi_sequencer_op("STOP", get_seq_index(s));
+ trace_pnv_spi_sequencer_op("STOP", seq_index);
stop = true;
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
@@ -759,7 +705,7 @@ static void operation_sequencer(PnvSpi *s)
case SEQ_OP_SELECT_SLAVE:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
- trace_pnv_spi_sequencer_op("SELECT_SLAVE", get_seq_index(s));
+ trace_pnv_spi_sequencer_op("SELECT_SLAVE", seq_index);
/*
* This device currently only supports a single responder
* connection at position 0. De-selecting a responder is fine
@@ -770,15 +716,12 @@ static void operation_sequencer(PnvSpi *s)
if (s->responder_select == 0) {
trace_pnv_spi_shifter_done();
qemu_set_irq(s->cs_line[0], 1);
- s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
- (get_seq_index(s) + 1));
+ seq_index++;
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
} else if (s->responder_select != 1) {
qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
- "not supported, select = 0x%x\n",
- s->responder_select);
- trace_pnv_spi_sequencer_stop_requested("invalid "
- "responder select");
+ "not supported, select = 0x%x\n", s->responder_select);
+ trace_pnv_spi_sequencer_stop_requested("invalid responder select");
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
stop = true;
} else {
@@ -798,13 +741,15 @@ static void operation_sequencer(PnvSpi *s)
* applies once a valid responder select has occurred.
*/
s->shift_n1_done = false;
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
}
break;
case SEQ_OP_SHIFT_N1:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
- trace_pnv_spi_sequencer_op("SHIFT_N1", get_seq_index(s));
+ trace_pnv_spi_sequencer_op("SHIFT_N1", seq_index);
/*
* Only allow a shift_n1 when the state is not IDLE or DONE.
* In either of those two cases the sequencer is not in a proper
@@ -836,13 +781,13 @@ static void operation_sequencer(PnvSpi *s)
* transmission to the responder without requiring a refill of
* the TDR between the two operations.
*/
- if (PNV_SPI_MASKED_OPCODE(s->seq_op[get_seq_index(s) + 1])
- == SEQ_OP_SHIFT_N2) {
+ if ((seq_index != 7) &&
+ PNV_SPI_MASKED_OPCODE(s->seq_op[(seq_index + 1)]) ==
+ SEQ_OP_SHIFT_N2) {
send_n1_alone = false;
}
- s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
- FSM_SHIFT_N1);
- stop = operation_shiftn1(s, opcode, &payload, send_n1_alone);
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N1);
+ stop = operation_shiftn1(s, opcode, send_n1_alone);
if (stop) {
/*
* The operation code says to stop, this can occur if:
@@ -859,27 +804,27 @@ static void operation_sequencer(PnvSpi *s)
s->shift_n1_done = true;
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
FSM_SHIFT_N2);
- s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
- (get_seq_index(s) + 1));
+ seq_index++;
} else {
/*
* This is case (1) or (2) so the sequencer needs to
* wait and NOT go to the next sequence yet.
*/
- s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
- FSM_WAIT);
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
}
} else {
/* Ok to move on to the next index */
s->shift_n1_done = true;
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
}
}
break;
case SEQ_OP_SHIFT_N2:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
- trace_pnv_spi_sequencer_op("SHIFT_N2", get_seq_index(s));
+ trace_pnv_spi_sequencer_op("SHIFT_N2", seq_index);
if (!s->shift_n1_done) {
qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
"Shift_N1 is not done, shifter state = 0x%llx",
@@ -890,31 +835,30 @@ static void operation_sequencer(PnvSpi *s)
* error bit 3 (general_SPI_status[3]) in status reg.
*/
s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
- trace_pnv_spi_sequencer_stop_requested("shift_n2 "
- "w/no shift_n1 done");
+ trace_pnv_spi_sequencer_stop_requested("shift_n2 w/no shift_n1 done");
stop = true;
} else {
/* Ok to do a Shift_N2 */
- s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
- FSM_SHIFT_N2);
- stop = operation_shiftn2(s, opcode, &payload);
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N2);
+ stop = operation_shiftn2(s, opcode);
/*
* If the operation code says to stop set the shifter state to
* wait and stop
*/
if (stop) {
- s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
- FSM_WAIT);
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
} else {
/* Ok to move on to the next index */
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
}
}
break;
case SEQ_OP_BRANCH_IFNEQ_RDR:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
- trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", get_seq_index(s));
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", seq_index);
/*
* The memory mapping register RDR match value is compared against
* the 16 rightmost bytes of the RDR (potentially with masking).
@@ -929,16 +873,26 @@ static void operation_sequencer(PnvSpi *s)
rdr_matched = does_rdr_match(s);
if (rdr_matched) {
trace_pnv_spi_RDR_match("success");
+ s->fail_count = 0;
/* A match occurred, increment the sequencer index. */
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
} else {
trace_pnv_spi_RDR_match("failed");
+ s->fail_count++;
/*
* Branch the sequencer to the index coded into the op
* code.
*/
- s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
- PNV_SPI_OPCODE_LO_NIBBLE(opcode));
+ seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ }
+ if (s->fail_count >= RDR_MATCH_FAILURE_LIMIT) {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RDR match failure"
+ " limit crossed %d times hence requesting "
+ "sequencer to stop.\n",
+ RDR_MATCH_FAILURE_LIMIT);
+ stop = true;
}
/*
* Regardless of where the branch ended up we want the
@@ -957,12 +911,13 @@ static void operation_sequencer(PnvSpi *s)
case SEQ_OP_TRANSFER_TDR:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
break;
case SEQ_OP_BRANCH_IFNEQ_INC_1:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
- trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", get_seq_index(s));
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", seq_index);
/*
* The spec says the loop should execute count compare + 1 times.
* However we learned from engineering that we really only loop
@@ -976,18 +931,19 @@ static void operation_sequencer(PnvSpi *s)
* mask off all but the first three bits so we don't try to
* access beyond the sequencer_operation_reg boundary.
*/
- s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
- PNV_SPI_OPCODE_LO_NIBBLE(opcode));
+ seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
s->loop_counter_1++;
} else {
/* Continue to next index if loop counter is reached */
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
}
break;
case SEQ_OP_BRANCH_IFNEQ_INC_2:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
- trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", get_seq_index(s));
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", seq_index);
uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
s->regs[SPI_CTR_CFG_REG]);
/*
@@ -1002,19 +958,21 @@ static void operation_sequencer(PnvSpi *s)
* mask off all but the first three bits so we don't try to
* access beyond the sequencer_operation_reg boundary.
*/
- s->status = SETFIELD(SPI_STS_SEQ_INDEX,
- s->status, PNV_SPI_OPCODE_LO_NIBBLE(opcode));
+ seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
s->loop_counter_2++;
} else {
/* Continue to next index if loop counter is reached */
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
}
break;
default:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
/* Ignore unsupported operations. */
- next_sequencer_fsm(s);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
break;
} /* end of switch */
/*
@@ -1022,10 +980,10 @@ static void operation_sequencer(PnvSpi *s)
* we need to go ahead and end things as if there was a STOP at the
* end.
*/
- if (get_seq_index(s) == NUM_SEQ_OPS) {
+ if (seq_index == NUM_SEQ_OPS) {
/* All 8 opcodes completed, sequencer idling */
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
- s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
+ seq_index = 0;
s->loop_counter_1 = 0;
s->loop_counter_2 = 0;
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
@@ -1036,6 +994,8 @@ static void operation_sequencer(PnvSpi *s)
break;
}
} /* end of while */
+ /* Update sequencer index field in status.*/
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, seq_index);
return;
} /* end of operation_sequencer() */
@@ -1197,18 +1157,22 @@ static const MemoryRegionOps pnv_spi_xscom_ops = {
static const Property pnv_spi_properties[] = {
DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
+ DEFINE_PROP_UINT32("chip-id", PnvSpi, chip_id, 0),
DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
};
static void pnv_spi_realize(DeviceState *dev, Error **errp)
{
PnvSpi *s = PNV_SPI(dev);
- g_autofree char *name = g_strdup_printf(TYPE_PNV_SPI_BUS ".%d",
- s->spic_num);
+ g_autofree char *name = g_strdup_printf("chip%d." TYPE_PNV_SPI_BUS ".%d",
+ s->chip_id, s->spic_num);
s->ssi_bus = ssi_create_bus(dev, name);
s->cs_line = g_new0(qemu_irq, 1);
qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
+ fifo8_create(&s->tx_fifo, PNV_SPI_FIFO_SIZE);
+ fifo8_create(&s->rx_fifo, PNV_SPI_FIFO_SIZE);
+
/* spi scoms */
pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 7a4010e..1a0d929 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -30,6 +30,7 @@
#include "exec/address-spaces.h"
#include "exec/memory.h"
#include "exec/ram_addr.h"
+#include "exec/target_page.h"
#include "hw/hw.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
@@ -42,6 +43,7 @@
#include "migration/misc.h"
#include "migration/blocker.h"
#include "migration/qemu-file.h"
+#include "system/tcg.h"
#include "system/tpm.h"
VFIODeviceList vfio_device_list =
@@ -392,13 +394,14 @@ static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer,
MemoryRegionSection *section)
{
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
+ int target_page_size = qemu_target_page_size();
VFIORamDiscardListener *vrdl;
/* Ignore some corner cases not relevant in practice. */
- g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE));
+ g_assert(QEMU_IS_ALIGNED(section->offset_within_region, target_page_size));
g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
- TARGET_PAGE_SIZE));
- g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
+ target_page_size));
+ g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), target_page_size));
vrdl = g_new0(VFIORamDiscardListener, 1);
vrdl->bcontainer = bcontainer;
diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c
index b1a237e..265fffc 100644
--- a/hw/vfio/igd.c
+++ b/hw/vfio/igd.c
@@ -15,6 +15,7 @@
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
+#include "hw/boards.h"
#include "hw/hw.h"
#include "hw/nvram/fw_cfg.h"
#include "pci.h"
@@ -106,40 +107,15 @@ static int igd_gen(VFIOPCIDevice *vdev)
return -1;
}
-typedef struct VFIOIGDQuirk {
- struct VFIOPCIDevice *vdev;
- uint32_t index;
- uint64_t bdsm;
-} VFIOIGDQuirk;
-
+#define IGD_ASLS 0xfc /* ASL Storage Register */
#define IGD_GMCH 0x50 /* Graphics Control Register */
#define IGD_BDSM 0x5c /* Base Data of Stolen Memory */
#define IGD_BDSM_GEN11 0xc0 /* Base Data of Stolen Memory of gen 11 and later */
#define IGD_GMCH_GEN6_GMS_SHIFT 3 /* SNB_GMCH in i915 */
#define IGD_GMCH_GEN6_GMS_MASK 0x1f
-#define IGD_GMCH_GEN6_GGMS_SHIFT 8
-#define IGD_GMCH_GEN6_GGMS_MASK 0x3
#define IGD_GMCH_GEN8_GMS_SHIFT 8 /* BDW_GMCH in i915 */
#define IGD_GMCH_GEN8_GMS_MASK 0xff
-#define IGD_GMCH_GEN8_GGMS_SHIFT 6
-#define IGD_GMCH_GEN8_GGMS_MASK 0x3
-
-static uint64_t igd_gtt_memory_size(int gen, uint16_t gmch)
-{
- uint64_t ggms;
-
- if (gen < 8) {
- ggms = (gmch >> IGD_GMCH_GEN6_GGMS_SHIFT) & IGD_GMCH_GEN6_GGMS_MASK;
- } else {
- ggms = (gmch >> IGD_GMCH_GEN8_GGMS_SHIFT) & IGD_GMCH_GEN8_GGMS_MASK;
- if (ggms != 0) {
- ggms = 1ULL << ggms;
- }
- }
-
- return ggms * MiB;
-}
static uint64_t igd_stolen_memory_size(int gen, uint32_t gmch)
{
@@ -165,6 +141,82 @@ static uint64_t igd_stolen_memory_size(int gen, uint32_t gmch)
}
/*
+ * The OpRegion includes the Video BIOS Table, which seems important for
+ * telling the driver what sort of outputs it has. Without this, the device
+ * may work in the guest, but we may not get output. This also requires BIOS
+ * support to reserve and populate a section of guest memory sufficient for
+ * the table and to write the base address of that memory to the ASLS register
+ * of the IGD device.
+ */
+static bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
+ struct vfio_region_info *info,
+ Error **errp)
+{
+ int ret;
+
+ vdev->igd_opregion = g_malloc0(info->size);
+ ret = pread(vdev->vbasedev.fd, vdev->igd_opregion,
+ info->size, info->offset);
+ if (ret != info->size) {
+ error_setg(errp, "failed to read IGD OpRegion");
+ g_free(vdev->igd_opregion);
+ vdev->igd_opregion = NULL;
+ return false;
+ }
+
+ /*
+ * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to
+ * allocate 32bit reserved memory for, copy these contents into, and write
+ * the reserved memory base address to the device ASLS register at 0xFC.
+ * Alignment of this reserved region seems flexible, but using a 4k page
+ * alignment seems to work well. This interface assumes a single IGD
+ * device, which may be at VM address 00:02.0 in legacy mode or another
+ * address in UPT mode.
+ *
+ * NB, there may be future use cases discovered where the VM should have
+ * direct interaction with the host OpRegion, in which case the write to
+ * the ASLS register would trigger MemoryRegion setup to enable that.
+ */
+ fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion",
+ vdev->igd_opregion, info->size);
+
+ trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name);
+
+ pci_set_long(vdev->pdev.config + IGD_ASLS, 0);
+ pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0);
+ pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0);
+
+ return true;
+}
+
+static bool vfio_pci_igd_setup_opregion(VFIOPCIDevice *vdev, Error **errp)
+{
+ g_autofree struct vfio_region_info *opregion = NULL;
+ int ret;
+
+ /* Hotplugging is not supported for opregion access */
+ if (vdev->pdev.qdev.hotplugged) {
+ error_setg(errp, "IGD OpRegion is not supported on hotplugged device");
+ return false;
+ }
+
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "Device does not supports IGD OpRegion feature");
+ return false;
+ }
+
+ if (!vfio_pci_igd_opregion_init(vdev, opregion, errp)) {
+ return false;
+ }
+
+ return true;
+}
+
+/*
* The rather short list of registers that we copy from the host devices.
* The LPC/ISA bridge values are definitely needed to support the vBIOS, the
* host bridge values may or may not be needed depending on the guest OS.
@@ -300,129 +352,72 @@ static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev,
return ret;
}
-/*
- * IGD Gen8 and newer support up to 8MB for the GTT and use a 64bit PTE
- * entry, older IGDs use 2MB and 32bit. Each PTE maps a 4k page. Therefore
- * we either have 2M/4k * 4 = 2k or 8M/4k * 8 = 16k as the maximum iobar index
- * for programming the GTT.
- *
- * See linux:include/drm/i915_drm.h for shift and mask values.
- */
-static int vfio_igd_gtt_max(VFIOPCIDevice *vdev)
-{
- uint32_t gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
- int gen = igd_gen(vdev);
- uint64_t ggms_size = igd_gtt_memory_size(gen, gmch);
-
- return (ggms_size / (4 * KiB)) * (gen < 8 ? 4 : 8);
-}
-
-/*
- * The IGD ROM will make use of stolen memory (GGMS) for support of VESA modes.
- * Somehow the host stolen memory range is used for this, but how the ROM gets
- * it is a mystery, perhaps it's hardcoded into the ROM. Thankfully though, it
- * reprograms the GTT through the IOBAR where we can trap it and transpose the
- * programming to the VM allocated buffer. That buffer gets reserved by the VM
- * firmware via the fw_cfg entry added below. Here we're just monitoring the
- * IOBAR address and data registers to detect a write sequence targeting the
- * GTTADR. This code is developed by observed behavior and doesn't have a
- * direct spec reference, unfortunately.
- */
-static uint64_t vfio_igd_quirk_data_read(void *opaque,
- hwaddr addr, unsigned size)
-{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
-
- igd->index = ~0;
-
- return vfio_region_read(&vdev->bars[4].region, addr + 4, size);
-}
-
-static void vfio_igd_quirk_data_write(void *opaque, hwaddr addr,
- uint64_t data, unsigned size)
+static bool vfio_pci_igd_setup_lpc_bridge(VFIOPCIDevice *vdev, Error **errp)
{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
- uint64_t val = data;
- int gen = igd_gen(vdev);
+ g_autofree struct vfio_region_info *host = NULL;
+ g_autofree struct vfio_region_info *lpc = NULL;
+ PCIDevice *lpc_bridge;
+ int ret;
/*
- * Programming the GGMS starts at index 0x1 and uses every 4th index (ie.
- * 0x1, 0x5, 0x9, 0xd,...). For pre-Gen8 each 4-byte write is a whole PTE
- * entry, with 0th bit enable set. For Gen8 and up, PTEs are 64bit, so
- * entries 0x5 & 0xd are the high dword, in our case zero. Each PTE points
- * to a 4k page, which we translate to a page from the VM allocated region,
- * pointed to by the BDSM register. If this is not set, we fail.
- *
- * We trap writes to the full configured GTT size, but we typically only
- * see the vBIOS writing up to (nearly) the 1MB barrier. In fact it often
- * seems to miss the last entry for an even 1MB GTT. Doing a gratuitous
- * write of that last entry does work, but is hopefully unnecessary since
- * we clear the previous GTT on initialization.
+ * Copying IDs or creating new devices are not supported on hotplug
*/
- if ((igd->index % 4 == 1) && igd->index < vfio_igd_gtt_max(vdev)) {
- if (gen < 8 || (igd->index % 8 == 1)) {
- uint64_t base;
-
- if (gen < 11) {
- base = pci_get_long(vdev->pdev.config + IGD_BDSM);
- } else {
- base = pci_get_quad(vdev->pdev.config + IGD_BDSM_GEN11);
- }
- if (!base) {
- hw_error("vfio-igd: Guest attempted to program IGD GTT before "
- "BIOS reserved stolen memory. Unsupported BIOS?");
- }
-
- val = data - igd->bdsm + base;
- } else {
- val = 0; /* upper 32bits of pte, we only enable below 4G PTEs */
- }
-
- trace_vfio_pci_igd_bar4_write(vdev->vbasedev.name,
- igd->index, data, val);
+ if (vdev->pdev.qdev.hotplugged) {
+ error_setg(errp, "IGD LPC is not supported on hotplugged device");
+ return false;
}
- vfio_region_write(&vdev->bars[4].region, addr + 4, val, size);
-
- igd->index = ~0;
-}
-
-static const MemoryRegionOps vfio_igd_data_quirk = {
- .read = vfio_igd_quirk_data_read,
- .write = vfio_igd_quirk_data_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
-static uint64_t vfio_igd_quirk_index_read(void *opaque,
- hwaddr addr, unsigned size)
-{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
+ /*
+ * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
+ * can stuff host values into, so if there's already one there and it's not
+ * one we can hack on, this quirk is no-go. Sorry Q35.
+ */
+ lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
+ 0, PCI_DEVFN(0x1f, 0));
+ if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge),
+ "vfio-pci-igd-lpc-bridge")) {
+ error_setg(errp,
+ "Cannot create LPC bridge due to existing device at 1f.0");
+ return false;
+ }
- igd->index = ~0;
+ /*
+ * Check whether we have all the vfio device specific regions to
+ * support LPC quirk (added in Linux v4.6).
+ */
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc);
+ if (ret) {
+ error_setg(errp, "IGD LPC bridge access is not supported by kernel");
+ return false;
+ }
- return vfio_region_read(&vdev->bars[4].region, addr, size);
-}
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host);
+ if (ret) {
+ error_setg(errp, "IGD host bridge access is not supported by kernel");
+ return false;
+ }
-static void vfio_igd_quirk_index_write(void *opaque, hwaddr addr,
- uint64_t data, unsigned size)
-{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
+ /* Create/modify LPC bridge */
+ ret = vfio_pci_igd_lpc_init(vdev, lpc);
+ if (ret) {
+ error_setg(errp, "Failed to create/modify LPC bridge for IGD");
+ return false;
+ }
- igd->index = data;
+ /* Stuff some host values into the VM PCI host bridge */
+ ret = vfio_pci_igd_host_init(vdev, host);
+ if (ret) {
+ error_setg(errp, "Failed to modify host bridge for IGD");
+ return false;
+ }
- vfio_region_write(&vdev->bars[4].region, addr, data, size);
+ return true;
}
-static const MemoryRegionOps vfio_igd_index_quirk = {
- .read = vfio_igd_quirk_index_read,
- .write = vfio_igd_quirk_index_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
#define IGD_GGC_MMIO_OFFSET 0x108040
#define IGD_BDSM_MMIO_OFFSET 0x1080C0
@@ -438,9 +433,7 @@ void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr)
* bus address.
*/
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
- !vfio_is_vga(vdev) || nr != 0 ||
- &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
- 0, PCI_DEVFN(0x2, 0))) {
+ !vfio_is_vga(vdev) || nr != 0) {
return;
}
@@ -488,20 +481,13 @@ void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr)
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, bdsm_quirk, next);
}
-void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
+static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp)
{
- g_autofree struct vfio_region_info *rom = NULL;
- g_autofree struct vfio_region_info *opregion = NULL;
- g_autofree struct vfio_region_info *host = NULL;
- g_autofree struct vfio_region_info *lpc = NULL;
- VFIOQuirk *quirk;
- VFIOIGDQuirk *igd;
- PCIDevice *lpc_bridge;
- int i, ret, gen;
- uint64_t ggms_size, gms_size;
+ int ret, gen;
+ uint64_t gms_size;
uint64_t *bdsm_size;
uint32_t gmch;
- uint16_t cmd_orig, cmd;
+ bool legacy_mode_enabled = false;
Error *err = NULL;
/*
@@ -510,24 +496,8 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
* PCI bus address.
*/
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
- !vfio_is_vga(vdev) || nr != 4 ||
- &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
- 0, PCI_DEVFN(0x2, 0))) {
- return;
- }
-
- /*
- * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
- * can stuff host values into, so if there's already one there and it's not
- * one we can hack on, legacy mode is no-go. Sorry Q35.
- */
- lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
- 0, PCI_DEVFN(0x1f, 0));
- if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge),
- "vfio-pci-igd-lpc-bridge")) {
- error_report("IGD device %s cannot support legacy mode due to existing "
- "devices at address 1f.0", vdev->vbasedev.name);
- return;
+ !vfio_is_vga(vdev)) {
+ return true;
}
/*
@@ -539,126 +509,77 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
if (gen == -1) {
error_report("IGD device %s is unsupported in legacy mode, "
"try SandyBridge or newer", vdev->vbasedev.name);
- return;
- }
-
- /*
- * Most of what we're doing here is to enable the ROM to run, so if
- * there's no ROM, there's no point in setting up this quirk.
- * NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support.
- */
- ret = vfio_get_region_info(&vdev->vbasedev,
- VFIO_PCI_ROM_REGION_INDEX, &rom);
- if ((ret || !rom->size) && !vdev->pdev.romfile) {
- error_report("IGD device %s has no ROM, legacy mode disabled",
- vdev->vbasedev.name);
- return;
- }
-
- /*
- * Ignore the hotplug corner case, mark the ROM failed, we can't
- * create the devices we need for legacy mode in the hotplug scenario.
- */
- if (vdev->pdev.qdev.hotplugged) {
- error_report("IGD device %s hotplugged, ROM disabled, "
- "legacy mode disabled", vdev->vbasedev.name);
- vdev->rom_read_failed = true;
- return;
- }
-
- /*
- * Check whether we have all the vfio device specific regions to
- * support legacy mode (added in Linux v4.6). If not, bail.
- */
- ret = vfio_get_dev_region_info(&vdev->vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
- if (ret) {
- error_report("IGD device %s does not support OpRegion access,"
- "legacy mode disabled", vdev->vbasedev.name);
- return;
- }
-
- ret = vfio_get_dev_region_info(&vdev->vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host);
- if (ret) {
- error_report("IGD device %s does not support host bridge access,"
- "legacy mode disabled", vdev->vbasedev.name);
- return;
- }
-
- ret = vfio_get_dev_region_info(&vdev->vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc);
- if (ret) {
- error_report("IGD device %s does not support LPC bridge access,"
- "legacy mode disabled", vdev->vbasedev.name);
- return;
+ return true;
}
gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
/*
- * If IGD VGA Disable is clear (expected) and VGA is not already enabled,
- * try to enable it. Probably shouldn't be using legacy mode without VGA,
- * but also no point in us enabling VGA if disabled in hardware.
+ * For backward compatibility, enable legacy mode when
+ * - Machine type is i440fx (pc_piix)
+ * - IGD device is at guest BDF 00:02.0
+ * - Not manually disabled by x-igd-legacy-mode=off
*/
- if (!(gmch & 0x2) && !vdev->vga && !vfio_populate_vga(vdev, &err)) {
- error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
- error_report("IGD device %s failed to enable VGA access, "
- "legacy mode disabled", vdev->vbasedev.name);
- return;
- }
+ if ((vdev->igd_legacy_mode != ON_OFF_AUTO_OFF) &&
+ !strcmp(MACHINE_GET_CLASS(qdev_get_machine())->family, "pc_piix") &&
+ (&vdev->pdev == pci_find_device(pci_device_root_bus(&vdev->pdev),
+ 0, PCI_DEVFN(0x2, 0)))) {
+ /*
+ * IGD legacy mode requires:
+ * - VBIOS in ROM BAR or file
+ * - VGA IO/MMIO ranges are claimed by IGD
+ * - OpRegion
+ * - Same LPC bridge and Host bridge VID/DID/SVID/SSID as host
+ */
+ g_autofree struct vfio_region_info *rom = NULL;
+
+ legacy_mode_enabled = true;
+ info_report("IGD legacy mode enabled, "
+ "use x-igd-legacy-mode=off to disable it if unwanted.");
+
+ /*
+ * Most of what we're doing here is to enable the ROM to run, so if
+ * there's no ROM, there's no point in setting up this quirk.
+ * NB. We only seem to get BIOS ROMs, so UEFI VM would need CSM support.
+ */
+ ret = vfio_get_region_info(&vdev->vbasedev,
+ VFIO_PCI_ROM_REGION_INDEX, &rom);
+ if ((ret || !rom->size) && !vdev->pdev.romfile) {
+ error_setg(&err, "Device has no ROM");
+ goto error;
+ }
- /* Create our LPC/ISA bridge */
- ret = vfio_pci_igd_lpc_init(vdev, lpc);
- if (ret) {
- error_report("IGD device %s failed to create LPC bridge, "
- "legacy mode disabled", vdev->vbasedev.name);
- return;
- }
+ /*
+ * If IGD VGA Disable is clear (expected) and VGA is not already
+ * enabled, try to enable it. Probably shouldn't be using legacy mode
+ * without VGA, but also no point in us enabling VGA if disabled in
+ * hardware.
+ */
+ if (!(gmch & 0x2) && !vdev->vga && !vfio_populate_vga(vdev, &err)) {
+ error_setg(&err, "Unable to enable VGA access");
+ goto error;
+ }
- /* Stuff some host values into the VM PCI host bridge */
- ret = vfio_pci_igd_host_init(vdev, host);
- if (ret) {
- error_report("IGD device %s failed to modify host bridge, "
- "legacy mode disabled", vdev->vbasedev.name);
- return;
+ /* Enable OpRegion and LPC bridge quirk */
+ vdev->features |= VFIO_FEATURE_ENABLE_IGD_OPREGION;
+ vdev->features |= VFIO_FEATURE_ENABLE_IGD_LPC;
+ } else if (vdev->igd_legacy_mode == ON_OFF_AUTO_ON) {
+ error_setg(&err,
+ "Machine is not i440fx or assigned BDF is not 00:02.0");
+ goto error;
}
/* Setup OpRegion access */
- if (!vfio_pci_igd_opregion_init(vdev, opregion, &err)) {
- error_append_hint(&err, "IGD legacy mode disabled\n");
- error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
- return;
+ if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) &&
+ !vfio_pci_igd_setup_opregion(vdev, errp)) {
+ goto error;
}
- /* Setup our quirk to munge GTT addresses to the VM allocated buffer */
- quirk = vfio_quirk_alloc(2);
- igd = quirk->data = g_malloc0(sizeof(*igd));
- igd->vdev = vdev;
- igd->index = ~0;
- if (gen < 11) {
- igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4);
- } else {
- igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM_GEN11, 4);
- igd->bdsm |=
- (uint64_t)vfio_pci_read_config(&vdev->pdev, IGD_BDSM_GEN11 + 4, 4) << 32;
- }
- igd->bdsm &= ~((1 * MiB) - 1); /* 1MB aligned */
-
- memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk,
- igd, "vfio-igd-index-quirk", 4);
- memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
- 0, &quirk->mem[0], 1);
-
- memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_igd_data_quirk,
- igd, "vfio-igd-data-quirk", 4);
- memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
- 4, &quirk->mem[1], 1);
-
- QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+ /* Setup LPC bridge / Host bridge PCI IDs */
+ if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_LPC) &&
+ !vfio_pci_igd_setup_lpc_bridge(vdev, errp)) {
+ goto error;
+ }
/*
* Allow user to override dsm size using x-igd-gms option, in multiples of
@@ -685,7 +606,6 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
}
}
- ggms_size = igd_gtt_memory_size(gen, gmch);
gms_size = igd_stolen_memory_size(gen, gmch);
/*
@@ -697,7 +617,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
* config offset 0x5C.
*/
bdsm_size = g_malloc(sizeof(*bdsm_size));
- *bdsm_size = cpu_to_le64(ggms_size + gms_size);
+ *bdsm_size = cpu_to_le64(gms_size);
fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size",
bdsm_size, sizeof(*bdsm_size));
@@ -717,37 +637,46 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
pci_set_quad(vdev->emulated_config_bits + IGD_BDSM_GEN11, ~0);
}
+ trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, (gms_size / MiB));
+
+ return true;
+
+error:
/*
- * This IOBAR gives us access to GTTADR, which allows us to write to
- * the GTT itself. So let's go ahead and write zero to all the GTT
- * entries to avoid spurious DMA faults. Be sure I/O access is enabled
- * before talking to the device.
+ * When legacy mode is implicity enabled, continue on error,
+ * to keep compatibility
*/
- if (pread(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
- vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
- error_report("IGD device %s - failed to read PCI command register",
- vdev->vbasedev.name);
+ if (legacy_mode_enabled && (vdev->igd_legacy_mode == ON_OFF_AUTO_AUTO)) {
+ error_report_err(err);
+ error_report("IGD legacy mode disabled");
+ return true;
}
- cmd = cmd_orig | PCI_COMMAND_IO;
+ error_propagate(errp, err);
+ return false;
+}
- if (pwrite(vdev->vbasedev.fd, &cmd, sizeof(cmd),
- vdev->config_offset + PCI_COMMAND) != sizeof(cmd)) {
- error_report("IGD device %s - failed to write PCI command register",
- vdev->vbasedev.name);
+/*
+ * KVMGT/GVT-g vGPU exposes an emulated OpRegion. So far, users have to specify
+ * x-igd-opregion=on to enable the access.
+ * TODO: Check VID/DID and enable opregion access automatically
+ */
+static bool vfio_pci_kvmgt_config_quirk(VFIOPCIDevice *vdev, Error **errp)
+{
+ if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) &&
+ !vfio_pci_igd_setup_opregion(vdev, errp)) {
+ return false;
}
- for (i = 1; i < vfio_igd_gtt_max(vdev); i += 4) {
- vfio_region_write(&vdev->bars[4].region, 0, i, 4);
- vfio_region_write(&vdev->bars[4].region, 4, 0, 4);
- }
+ return true;
+}
- if (pwrite(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
- vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
- error_report("IGD device %s - failed to restore PCI command register",
- vdev->vbasedev.name);
+bool vfio_probe_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp)
+{
+ /* KVMGT/GVT-g vGPU is exposed as mdev */
+ if (vdev->vbasedev.mdev) {
+ return vfio_pci_kvmgt_config_quirk(vdev, errp);
}
- trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name,
- (ggms_size + gms_size) / MiB);
+ return vfio_pci_igd_config_quirk(vdev, errp);
}
diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
index df61edf..42c8412 100644
--- a/hw/vfio/iommufd.c
+++ b/hw/vfio/iommufd.c
@@ -25,7 +25,6 @@
#include "qemu/cutils.h"
#include "qemu/chardev_open.h"
#include "pci.h"
-#include "exec/ram_addr.h"
static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova,
ram_addr_t size, void *vaddr, bool readonly)
diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build
index 260d65f..a8939c8 100644
--- a/hw/vfio/meson.build
+++ b/hw/vfio/meson.build
@@ -1,27 +1,32 @@
vfio_ss = ss.source_set()
vfio_ss.add(files(
- 'helpers.c',
'common.c',
- 'container-base.c',
'container.c',
- 'migration.c',
- 'migration-multifd.c',
- 'cpr.c',
))
vfio_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr.c'))
-vfio_ss.add(when: 'CONFIG_IOMMUFD', if_true: files(
- 'iommufd.c',
-))
vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files(
- 'display.c',
'pci-quirks.c',
'pci.c',
))
vfio_ss.add(when: 'CONFIG_VFIO_CCW', if_true: files('ccw.c'))
vfio_ss.add(when: 'CONFIG_VFIO_PLATFORM', if_true: files('platform.c'))
-vfio_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c'))
-vfio_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c'))
vfio_ss.add(when: 'CONFIG_VFIO_AP', if_true: files('ap.c'))
vfio_ss.add(when: 'CONFIG_VFIO_IGD', if_true: files('igd.c'))
specific_ss.add_all(when: 'CONFIG_VFIO', if_true: vfio_ss)
+
+system_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c'))
+system_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c'))
+system_ss.add(when: 'CONFIG_VFIO', if_true: files(
+ 'helpers.c',
+ 'container-base.c',
+ 'migration.c',
+ 'migration-multifd.c',
+ 'cpr.c',
+))
+system_ss.add(when: ['CONFIG_VFIO', 'CONFIG_IOMMUFD'], if_true: files(
+ 'iommufd.c',
+))
+system_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files(
+ 'display.c',
+))
diff --git a/hw/vfio/migration-multifd.c b/hw/vfio/migration-multifd.c
index 2337247..378f6f3 100644
--- a/hw/vfio/migration-multifd.c
+++ b/hw/vfio/migration-multifd.c
@@ -13,6 +13,7 @@
#include "hw/vfio/vfio-common.h"
#include "migration/misc.h"
#include "qapi/error.h"
+#include "qemu/bswap.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
#include "qemu/main-loop.h"
@@ -155,12 +156,16 @@ bool vfio_multifd_load_state_buffer(void *opaque, char *data, size_t data_size,
return false;
}
+ packet->version = be32_to_cpu(packet->version);
if (packet->version != VFIO_DEVICE_STATE_PACKET_VER_CURRENT) {
error_setg(errp, "%s: packet has unknown version %" PRIu32,
vbasedev->name, packet->version);
return false;
}
+ packet->idx = be32_to_cpu(packet->idx);
+ packet->flags = be32_to_cpu(packet->flags);
+
if (packet->idx == UINT32_MAX) {
error_setg(errp, "%s: packet index is invalid", vbasedev->name);
return false;
@@ -558,9 +563,9 @@ vfio_save_complete_precopy_thread_config_state(VFIODevice *vbasedev,
packet_len = sizeof(*packet) + bioc->usage;
packet = g_malloc0(packet_len);
- packet->version = VFIO_DEVICE_STATE_PACKET_VER_CURRENT;
- packet->idx = idx;
- packet->flags = VFIO_DEVICE_STATE_CONFIG_STATE;
+ packet->version = cpu_to_be32(VFIO_DEVICE_STATE_PACKET_VER_CURRENT);
+ packet->idx = cpu_to_be32(idx);
+ packet->flags = cpu_to_be32(VFIO_DEVICE_STATE_CONFIG_STATE);
memcpy(&packet->data, bioc->data, bioc->usage);
if (!multifd_queue_device_state(idstr, instance_id,
@@ -610,7 +615,7 @@ vfio_multifd_save_complete_precopy_thread(SaveLiveCompletePrecopyThreadData *d,
}
packet = g_malloc0(sizeof(*packet) + migration->data_buffer_size);
- packet->version = VFIO_DEVICE_STATE_PACKET_VER_CURRENT;
+ packet->version = cpu_to_be32(VFIO_DEVICE_STATE_PACKET_VER_CURRENT);
for (idx = 0; ; idx++) {
ssize_t data_size;
@@ -631,7 +636,7 @@ vfio_multifd_save_complete_precopy_thread(SaveLiveCompletePrecopyThreadData *d,
break;
}
- packet->idx = idx;
+ packet->idx = cpu_to_be32(idx);
packet_size = sizeof(*packet) + data_size;
if (!multifd_queue_device_state(d->idstr, d->instance_id,
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index 416643d..fbff46c 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -27,7 +27,6 @@
#include "qapi/error.h"
#include "qapi/qapi-events-vfio.h"
#include "exec/ramlist.h"
-#include "exec/ram_addr.h"
#include "pci.h"
#include "trace.h"
#include "hw/hw.h"
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index c53591f..3f00225 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -403,7 +403,7 @@ static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr)
/* This windows doesn't seem to be used except by legacy VGA code */
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
- !vdev->vga || nr != 4) {
+ !vdev->vga || nr != 4 || !vdev->bars[4].ioport) {
return;
}
@@ -1114,59 +1114,19 @@ static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr)
trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name);
}
-#define IGD_ASLS 0xfc /* ASL Storage Register */
-
/*
- * The OpRegion includes the Video BIOS Table, which seems important for
- * telling the driver what sort of outputs it has. Without this, the device
- * may work in the guest, but we may not get output. This also requires BIOS
- * support to reserve and populate a section of guest memory sufficient for
- * the table and to write the base address of that memory to the ASLS register
- * of the IGD device.
+ * Common quirk probe entry points.
*/
-bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
- struct vfio_region_info *info, Error **errp)
+bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp)
{
- int ret;
-
- vdev->igd_opregion = g_malloc0(info->size);
- ret = pread(vdev->vbasedev.fd, vdev->igd_opregion,
- info->size, info->offset);
- if (ret != info->size) {
- error_setg(errp, "failed to read IGD OpRegion");
- g_free(vdev->igd_opregion);
- vdev->igd_opregion = NULL;
+#ifdef CONFIG_VFIO_IGD
+ if (!vfio_probe_igd_config_quirk(vdev, errp)) {
return false;
}
-
- /*
- * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to
- * allocate 32bit reserved memory for, copy these contents into, and write
- * the reserved memory base address to the device ASLS register at 0xFC.
- * Alignment of this reserved region seems flexible, but using a 4k page
- * alignment seems to work well. This interface assumes a single IGD
- * device, which may be at VM address 00:02.0 in legacy mode or another
- * address in UPT mode.
- *
- * NB, there may be future use cases discovered where the VM should have
- * direct interaction with the host OpRegion, in which case the write to
- * the ASLS register would trigger MemoryRegion setup to enable that.
- */
- fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion",
- vdev->igd_opregion, info->size);
-
- trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name);
-
- pci_set_long(vdev->pdev.config + IGD_ASLS, 0);
- pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0);
- pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0);
-
+#endif
return true;
}
-/*
- * Common quirk probe entry points.
- */
void vfio_vga_quirk_setup(VFIOPCIDevice *vdev)
{
vfio_vga_probe_ati_3c3_quirk(vdev);
@@ -1215,7 +1175,6 @@ void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr)
vfio_probe_rtl8168_bar2_quirk(vdev, nr);
#ifdef CONFIG_VFIO_IGD
vfio_probe_igd_bar0_quirk(vdev, nr);
- vfio_probe_igd_bar4_quirk(vdev, nr);
#endif
}
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index fdbc158..7f1532f 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -3128,6 +3128,10 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
goto out_unset_idev;
}
+ if (!vfio_config_quirk_setup(vdev, errp)) {
+ goto out_unset_idev;
+ }
+
if (vdev->vga) {
vfio_vga_quirk_setup(vdev);
}
@@ -3136,31 +3140,6 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
vfio_bar_quirk_setup(vdev, i);
}
- if (!vdev->igd_opregion &&
- vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
- g_autofree struct vfio_region_info *opregion = NULL;
-
- if (vdev->pdev.qdev.hotplugged) {
- error_setg(errp,
- "cannot support IGD OpRegion feature on hotplugged "
- "device");
- goto out_unset_idev;
- }
-
- ret = vfio_get_dev_region_info(vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
- if (ret) {
- error_setg_errno(errp, -ret,
- "does not support requested IGD OpRegion feature");
- goto out_unset_idev;
- }
-
- if (!vfio_pci_igd_opregion_init(vdev, opregion, errp)) {
- goto out_unset_idev;
- }
- }
-
/* QEMU emulates all of MSI & MSIX */
if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
@@ -3381,6 +3360,10 @@ static const Property vfio_pci_dev_properties[] = {
VFIO_FEATURE_ENABLE_REQ_BIT, true),
DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
+ DEFINE_PROP_BIT("x-igd-lpc", VFIOPCIDevice, features,
+ VFIO_FEATURE_ENABLE_IGD_LPC_BIT, false),
+ DEFINE_PROP_ON_OFF_AUTO("x-igd-legacy-mode", VFIOPCIDevice,
+ igd_legacy_mode, ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
DEFINE_PROP("x-migration-multifd-transfer", VFIOPCIDevice,
@@ -3549,7 +3532,7 @@ static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
object_class_property_set_description(klass, /* 9.1 */
"x-device-dirty-page-tracking",
"Disable device dirty page tracking and use "
- "container-based dirty page tracking (DEBUG)");
+ "container-based dirty page tracking");
object_class_property_set_description(klass, /* 9.1 */
"migration-events",
"Emit VFIO migration QAPI event when a VFIO device "
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index d638c78..d94ecab 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -154,10 +154,14 @@ struct VFIOPCIDevice {
#define VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT 2
#define VFIO_FEATURE_ENABLE_IGD_OPREGION \
(1 << VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT)
+#define VFIO_FEATURE_ENABLE_IGD_LPC_BIT 3
+#define VFIO_FEATURE_ENABLE_IGD_LPC \
+ (1 << VFIO_FEATURE_ENABLE_IGD_LPC_BIT)
OnOffAuto display;
uint32_t display_xres;
uint32_t display_yres;
int32_t bootindex;
+ OnOffAuto igd_legacy_mode;
uint32_t igd_gms;
OffAutoPCIBAR msix_relo;
uint8_t nv_gpudirect_clique;
@@ -204,6 +208,7 @@ uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size);
void vfio_vga_write(void *opaque, hwaddr addr, uint64_t data, unsigned size);
bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev);
+bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp);
void vfio_vga_quirk_setup(VFIOPCIDevice *vdev);
void vfio_vga_quirk_exit(VFIOPCIDevice *vdev);
void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev);
@@ -215,7 +220,7 @@ bool vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp);
void vfio_quirk_reset(VFIOPCIDevice *vdev);
VFIOQuirk *vfio_quirk_alloc(int nr_mem);
void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr);
-void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr);
+bool vfio_probe_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp);
extern const PropertyInfo qdev_prop_nv_gpudirect_clique;
@@ -227,10 +232,6 @@ int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
-bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
- struct vfio_region_info *info,
- Error **errp);
-
void vfio_display_reset(VFIOPCIDevice *vdev);
bool vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
void vfio_display_finalize(VFIOPCIDevice *vdev);
diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
index ad4c499..1a5d161 100644
--- a/hw/vfio/spapr.c
+++ b/hw/vfio/spapr.c
@@ -11,10 +11,8 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <linux/vfio.h>
-#ifdef CONFIG_KVM
-#include <linux/kvm.h>
-#endif
#include "system/kvm.h"
+#include "system/hostmem.h"
#include "exec/address-spaces.h"
#include "hw/vfio/vfio-common.h"
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 3d8df4e..e4c28fb 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -102,9 +102,6 @@ static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
bool ramblock_is_pmem(RAMBlock *rb);
-long qemu_minrampagesize(void);
-long qemu_maxrampagesize(void);
-
/**
* qemu_ram_alloc_from_file,
* qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h
index 8abee78..8a80c0c 100644
--- a/include/hw/pci-host/pnv_phb4.h
+++ b/include/hw/pci-host/pnv_phb4.h
@@ -13,6 +13,7 @@
#include "hw/pci-host/pnv_phb.h"
#include "hw/pci/pci_bus.h"
#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_nest_pervasive.h"
#include "hw/ppc/xive.h"
#include "qom/object.h"
@@ -174,6 +175,9 @@ struct PnvPhb4PecState {
uint32_t index;
uint32_t chip_id;
+ /* Pervasive chiplet control */
+ PnvNestChipletPervasive nest_pervasive;
+
/* Nest registers, excuding per-stack */
#define PHB4_PEC_NEST_REGS_COUNT 0xf
uint64_t nest_regs[PHB4_PEC_NEST_REGS_COUNT];
@@ -196,6 +200,7 @@ struct PnvPhb4PecState {
struct PnvPhb4PecClass {
DeviceClass parent_class;
+ uint32_t (*xscom_cplt_base)(PnvPhb4PecState *pec);
uint32_t (*xscom_nest_base)(PnvPhb4PecState *pec);
uint32_t xscom_nest_size;
uint32_t (*xscom_pci_base)(PnvPhb4PecState *pec);
diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h
index fcb6699..d8fca07 100644
--- a/include/hw/ppc/pnv.h
+++ b/include/hw/ppc/pnv.h
@@ -205,9 +205,8 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV9_OCC_SENSOR_BASE(chip) (PNV9_OCC_COMMON_AREA_BASE + \
PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
-#define PNV9_HOMER_SIZE 0x0000000000400000ull
#define PNV9_HOMER_BASE(chip) \
- (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV9_HOMER_SIZE)
+ (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE)
/*
* POWER10 MMIO base addresses - 16TB stride per chip
@@ -250,8 +249,7 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV10_OCC_SENSOR_BASE(chip) (PNV10_OCC_COMMON_AREA_BASE + \
PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
-#define PNV10_HOMER_SIZE 0x0000000000400000ull
#define PNV10_HOMER_BASE(chip) \
- (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV10_HOMER_SIZE)
+ (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE)
#endif /* PPC_PNV_H */
diff --git a/include/hw/ppc/pnv_homer.h b/include/hw/ppc/pnv_homer.h
index b1c5d49..a6f2710 100644
--- a/include/hw/ppc/pnv_homer.h
+++ b/include/hw/ppc/pnv_homer.h
@@ -41,19 +41,21 @@ struct PnvHomer {
PnvChip *chip;
MemoryRegion pba_regs;
- MemoryRegion regs;
+ MemoryRegion mem;
+ hwaddr base;
};
struct PnvHomerClass {
DeviceClass parent_class;
+ /* Get base address of HOMER memory */
+ hwaddr (*get_base)(PnvChip *chip);
+ /* Size of HOMER memory */
+ int size;
+
int pba_size;
const MemoryRegionOps *pba_ops;
- int homer_size;
- const MemoryRegionOps *homer_ops;
-
- hwaddr core_max_base;
};
#endif /* PPC_PNV_HOMER_H */
diff --git a/include/hw/ppc/pnv_occ.h b/include/hw/ppc/pnv_occ.h
index df32124..3ec42de 100644
--- a/include/hw/ppc/pnv_occ.h
+++ b/include/hw/ppc/pnv_occ.h
@@ -41,11 +41,17 @@ DECLARE_INSTANCE_CHECKER(PnvOCC, PNV10_OCC, TYPE_PNV10_OCC)
struct PnvOCC {
DeviceState xd;
+ /* OCC dynamic model is driven by this timer. */
+ QEMUTimer state_machine_timer;
+
/* OCC Misc interrupt */
uint64_t occmisc;
qemu_irq psi_irq;
+ /* OCCs operate on regions of HOMER memory */
+ PnvHomer *homer;
+
MemoryRegion xscom_regs;
MemoryRegion sram_regs;
};
@@ -53,6 +59,9 @@ struct PnvOCC {
struct PnvOCCClass {
DeviceClass parent_class;
+ hwaddr opal_shared_memory_offset; /* offset in HOMER */
+ uint8_t opal_shared_memory_version;
+
int xscom_size;
const MemoryRegionOps *xscom_ops;
};
diff --git a/include/hw/ppc/pnv_pnor.h b/include/hw/ppc/pnv_pnor.h
index 2e37ac8..19c2d64 100644
--- a/include/hw/ppc/pnv_pnor.h
+++ b/include/hw/ppc/pnv_pnor.h
@@ -13,9 +13,11 @@
#include "hw/sysbus.h"
/*
- * PNOR offset on the LPC FW address space
+ * PNOR offset on the LPC FW address space. For now this should be 0 because
+ * skiboot 7.1 has a bug where IDSEL > 0 (LPC FW address > 256MB) access is
+ * not performed correctly.
*/
-#define PNOR_SPI_OFFSET 0x0c000000UL
+#define PNOR_SPI_OFFSET 0x00000000UL
#define TYPE_PNV_PNOR "pnv-pnor"
OBJECT_DECLARE_SIMPLE_TYPE(PnvPnor, PNV_PNOR)
diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h
index 648388a..a927aea 100644
--- a/include/hw/ppc/pnv_xscom.h
+++ b/include/hw/ppc/pnv_xscom.h
@@ -126,6 +126,8 @@ struct PnvXScomInterfaceClass {
#define PNV9_XSCOM_PEC_PCI_BASE 0xd010800
#define PNV9_XSCOM_PEC_PCI_SIZE 0x200
+#define PNV9_XSCOM_PEC_NEST_CPLT_BASE 0x0d000000
+
/* XSCOM PCI "pass-through" window to PHB SCOM */
#define PNV9_XSCOM_PEC_PCI_STK0 0x100
#define PNV9_XSCOM_PEC_PCI_STK1 0x140
@@ -197,6 +199,8 @@ struct PnvXScomInterfaceClass {
#define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */
#define PNV10_XSCOM_PEC_NEST_SIZE 0x100
+#define PNV10_XSCOM_PEC_NEST_CPLT_BASE 0x08000000
+
#define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */
#define PNV10_XSCOM_PEC_PCI_SIZE 0x200
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index a6c0547..39bd5bd 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -83,8 +83,10 @@ typedef enum {
#define SPAPR_CAP_AIL_MODE_3 0x0C
/* Nested PAPR */
#define SPAPR_CAP_NESTED_PAPR 0x0D
+/* DAWR1 */
+#define SPAPR_CAP_DAWR1 0x0E
/* Num Caps */
-#define SPAPR_CAP_NUM (SPAPR_CAP_NESTED_PAPR + 1)
+#define SPAPR_CAP_NUM (SPAPR_CAP_DAWR1 + 1)
/*
* Capability Values
@@ -201,6 +203,7 @@ struct SpaprMachineState {
uint32_t fdt_initial_size;
void *fdt_blob;
uint8_t fdt_rng_seed[32];
+ uint64_t hashpkey_val;
long kernel_size;
bool kernel_le;
uint64_t kernel_addr;
@@ -406,6 +409,7 @@ struct SpaprMachineState {
#define H_SET_MODE_RESOURCE_SET_DAWR0 2
#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
#define H_SET_MODE_RESOURCE_LE 4
+#define H_SET_MODE_RESOURCE_SET_DAWR1 5
/* Flags for H_SET_MODE_RESOURCE_LE */
#define H_SET_MODE_ENDIAN_BIG 0
@@ -1003,6 +1007,7 @@ extern const VMStateDescription vmstate_spapr_cap_fwnmi;
extern const VMStateDescription vmstate_spapr_cap_rpt_invalidate;
extern const VMStateDescription vmstate_spapr_cap_ail_mode_3;
extern const VMStateDescription vmstate_spapr_wdt;
+extern const VMStateDescription vmstate_spapr_cap_dawr1;
static inline uint8_t spapr_get_cap(SpaprMachineState *spapr, int cap)
{
diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h
index e420220..f7be0d5 100644
--- a/include/hw/ppc/spapr_nested.h
+++ b/include/hw/ppc/spapr_nested.h
@@ -11,7 +11,13 @@
#define GSB_TB_OFFSET 0x0004 /* Timebase Offset */
#define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */
#define GSB_PROCESS_TBL 0x0006 /* Process Table */
- /* RESERVED 0x0007 - 0x0BFF */
+ /* RESERVED 0x0007 - 0x07FF */
+#define GSB_L0_GUEST_HEAP_INUSE 0x0800 /* Guest Management Heap Size */
+#define GSB_L0_GUEST_HEAP_MAX 0x0801 /* Guest Management Heap Max Size */
+#define GSB_L0_GUEST_PGTABLE_SIZE_INUSE 0x0802 /* Guest Pagetable Size */
+#define GSB_L0_GUEST_PGTABLE_SIZE_MAX 0x0803 /* Guest Pagetable Max Size */
+#define GSB_L0_GUEST_PGTABLE_RECLAIMED 0x0804 /* Pagetable Reclaim in bytes */
+ /* RESERVED 0x0805 - 0xBFF */
#define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */
#define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */
#define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */
@@ -196,6 +202,38 @@ typedef struct SpaprMachineStateNested {
#define NESTED_API_PAPR 2
bool capabilities_set;
uint32_t pvr_base;
+
+ /**
+ * l0_guest_heap_inuse: The currently used bytes in the Hypervisor's Guest
+ * Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_heap_inuse;
+
+ /**
+ * host_heap_max: The maximum bytes available in the Hypervisor's Guest
+ * Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_heap_max;
+
+ /**
+ * host_pagetable: The currently used bytes in the Hypervisor's Guest
+ * Page Table Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_pgtable_size_inuse;
+
+ /**
+ * host_pagetable_max: The maximum bytes available in the Hypervisor's Guest
+ * Page Table Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_pgtable_size_max;
+
+ /**
+ * host_pagetable_reclaim: The amount of space in bytes that has been
+ * reclaimed due to overcommit in the Hypervisor's Guest Page Table
+ * Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_pgtable_reclaimed;
+
GHashTable *guests;
} SpaprMachineStateNested;
@@ -229,9 +267,15 @@ typedef struct SpaprMachineStateNestedGuest {
#define HVMASK_HDEXCR 0x00000000FFFFFFFF
#define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF
#define GSB_MAX_BUF_SIZE (1024 * 1024)
-#define H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE 0x8000000000000000
-#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1
-#define GUEST_STATE_REQUEST_SET 0x2
+#define H_GUEST_GET_STATE_FLAGS_MASK 0xC000000000000000ULL
+#define H_GUEST_SET_STATE_FLAGS_MASK 0x8000000000000000ULL
+#define H_GUEST_SET_STATE_FLAGS_GUEST_WIDE 0x8000000000000000ULL
+#define H_GUEST_GET_STATE_FLAGS_GUEST_WIDE 0x8000000000000000ULL
+#define H_GUEST_GET_STATE_FLAGS_HOST_WIDE 0x4000000000000000ULL
+
+#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1
+#define GUEST_STATE_REQUEST_HOST_WIDE 0x2
+#define GUEST_STATE_REQUEST_SET 0x4
/*
* As per ISA v3.1B, following bits are reserved:
@@ -251,6 +295,15 @@ typedef struct SpaprMachineStateNestedGuest {
.copy = (c) \
}
+#define GSBE_NESTED_MACHINE_DW(i, f) { \
+ .id = (i), \
+ .size = 8, \
+ .location = get_machine_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNested, f), \
+ .copy = copy_state_8to8, \
+ .mask = HVMASK_DEFAULT \
+}
+
#define GSBE_NESTED(i, sz, f, c) { \
.id = (i), \
.size = (sz), \
@@ -509,9 +562,11 @@ struct guest_state_element_type {
uint16_t id;
int size;
#define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1
-#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE 0x2
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x4
uint16_t flags;
- void *(*location)(SpaprMachineStateNestedGuest *, target_ulong);
+ void *(*location)(struct SpaprMachineState *, SpaprMachineStateNestedGuest *,
+ target_ulong);
size_t offset;
void (*copy)(void *, void *, bool);
uint64_t mask;
diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
index ea5d03a..538f438 100644
--- a/include/hw/ppc/xive.h
+++ b/include/hw/ppc/xive.h
@@ -130,11 +130,9 @@
* TCTX Thread interrupt Context
*
*
- * Copyright (c) 2017-2018, IBM Corporation.
- *
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * Copyright (c) 2017-2024, IBM Corporation.
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE_H
@@ -424,6 +422,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas);
typedef struct XiveTCTXMatch {
XiveTCTX *tctx;
uint8_t ring;
+ bool precluded;
} XiveTCTXMatch;
#define TYPE_XIVE_PRESENTER "xive-presenter"
@@ -439,10 +438,13 @@ struct XivePresenterClass {
InterfaceClass parent;
int (*match_nvt)(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
bool (*in_kernel)(const XivePresenter *xptr);
uint32_t (*get_config)(XivePresenter *xptr);
+ int (*broadcast)(XivePresenter *xptr,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool cam_ignore, uint8_t priority);
};
int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
@@ -451,8 +453,10 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
bool cam_ignore, uint32_t logic_serv);
bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
- uint32_t logic_serv);
+ bool crowd, bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, bool *precluded);
+
+uint32_t xive_get_vpgroup_size(uint32_t nvp_index);
/*
* XIVE Fabric (Interface between Interrupt Controller and Machine)
@@ -469,8 +473,10 @@ struct XiveFabricClass {
InterfaceClass parent;
int (*match_nvt)(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
+ int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool cam_ignore, uint8_t priority);
};
/*
@@ -510,6 +516,21 @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority)
0 : 1 << (XIVE_PRIORITY_MAX - priority);
}
+static inline uint8_t xive_priority_to_pipr(uint8_t priority)
+{
+ return priority > XIVE_PRIORITY_MAX ? 0xFF : priority;
+}
+
+/*
+ * Convert an Interrupt Pending Buffer (IPB) register to a Pending
+ * Interrupt Priority Register (PIPR), which contains the priority of
+ * the most favored pending notification.
+ */
+static inline uint8_t xive_ipb_to_pipr(uint8_t ibp)
+{
+ return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
+}
+
/*
* XIVE Thread Interrupt Management Aera (TIMA)
*
@@ -532,8 +553,10 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf);
Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
void xive_tctx_reset(XiveTCTX *tctx);
void xive_tctx_destroy(XiveTCTX *tctx);
-void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb);
+void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
+ uint8_t group_level);
void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring);
+void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level);
/*
* KVM XIVE device helpers
diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h
index 5bccf41..8cdf819 100644
--- a/include/hw/ppc/xive2.h
+++ b/include/hw/ppc/xive2.h
@@ -1,11 +1,9 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation.
- *
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * Copyright (c) 2019-2024, IBM Corporation.
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE2_H
@@ -90,7 +88,17 @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint32_t logic_serv);
+ bool crowd, bool cam_ignore,
+ uint32_t logic_serv);
+
+uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset);
+
+uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr,
+ bool crowd,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset, uint16_t val);
/*
* XIVE2 END ESBs (POWER10)
@@ -115,12 +123,18 @@ typedef struct Xive2EndSource {
* XIVE2 Thread Interrupt Management Area (POWER10)
*/
+void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size);
+void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size);
uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size);
void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
+bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority);
+void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority);
void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h
index 1d00c8d..b11395c 100644
--- a/include/hw/ppc/xive2_regs.h
+++ b/include/hw/ppc/xive2_regs.h
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC XIVE2 internal structure definitions (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation.
+ * Copyright (c) 2019-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE2_REGS_H
@@ -152,6 +151,9 @@ typedef struct Xive2Nvp {
uint32_t w0;
#define NVP2_W0_VALID PPC_BIT32(0)
#define NVP2_W0_HW PPC_BIT32(7)
+#define NVP2_W0_L PPC_BIT32(8)
+#define NVP2_W0_G PPC_BIT32(9)
+#define NVP2_W0_T PPC_BIT32(10)
#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
#define NVP2_W0_PGOFIRST PPC_BITMASK32(26, 31)
uint32_t w1;
@@ -163,6 +165,8 @@ typedef struct Xive2Nvp {
#define NVP2_W2_CPPR PPC_BITMASK32(0, 7)
#define NVP2_W2_IPB PPC_BITMASK32(8, 15)
#define NVP2_W2_LSMFB PPC_BITMASK32(16, 23)
+#define NVP2_W2_T PPC_BIT32(27)
+#define NVP2_W2_LGS PPC_BITMASK32(28, 31)
uint32_t w3;
uint32_t w4;
#define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */
@@ -229,4 +233,11 @@ typedef struct Xive2Nvgc {
void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx,
GString *buf);
+#define NVx_BACKLOG_OP PPC_BITMASK(52, 53)
+#define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59)
+
+/* split the 6-bit crowd/group level */
+#define NVx_CROWD_LVL(level) ((level >> 4) & 0b11)
+#define NVx_GROUP_LVL(level) (level & 0b1111)
+
#endif /* PPC_XIVE2_REGS_H */
diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index 326327f..54bc6c5 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -7,10 +7,9 @@
* access to the different fields.
*
*
- * Copyright (c) 2016-2018, IBM Corporation.
+ * Copyright (c) 2016-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE_REGS_H
@@ -146,7 +145,14 @@
#define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */
/* XXX more... */
-/* NSR fields for the various QW ack types */
+/*
+ * NSR fields for the various QW ack types
+ *
+ * P10 has an extra bit in QW3 for the group level instead of the
+ * reserved 'i' bit. Since it is not used and we don't support group
+ * interrupts on P9, we use the P10 definition for the group level so
+ * that we can have common macros for the NSR
+ */
#define TM_QW0_NSR_EB PPC_BIT8(0)
#define TM_QW1_NSR_EO PPC_BIT8(0)
#define TM_QW3_NSR_HE PPC_BITMASK8(0, 1)
@@ -154,8 +160,15 @@
#define TM_QW3_NSR_HE_POOL 1
#define TM_QW3_NSR_HE_PHYS 2
#define TM_QW3_NSR_HE_LSI 3
-#define TM_QW3_NSR_I PPC_BIT8(2)
-#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7)
+#define TM_NSR_GRP_LVL PPC_BITMASK8(2, 7)
+/*
+ * On P10, the format of the 6-bit group level is: 2 bits for the
+ * crowd size and 4 bits for the group size. Since group/crowd size is
+ * always a power of 2, we encode the log. For example, group_level=4
+ * means crowd size = 0 and group size = 16 (2^4)
+ * Same encoding is used in the NVP and NVGC structures for
+ * PGoFirst and PGoNext fields
+ */
/*
* EAS (Event Assignment Structure)
diff --git a/include/hw/ssi/pnv_spi.h b/include/hw/ssi/pnv_spi.h
index 8815f67..c591a06 100644
--- a/include/hw/ssi/pnv_spi.h
+++ b/include/hw/ssi/pnv_spi.h
@@ -23,6 +23,7 @@
#include "hw/ssi/ssi.h"
#include "hw/sysbus.h"
+#include "qemu/fifo8.h"
#define TYPE_PNV_SPI "pnv-spi"
OBJECT_DECLARE_SIMPLE_TYPE(PnvSpi, PNV_SPI)
@@ -30,15 +31,19 @@ OBJECT_DECLARE_SIMPLE_TYPE(PnvSpi, PNV_SPI)
#define PNV_SPI_REG_SIZE 8
#define PNV_SPI_REGS 7
-#define TYPE_PNV_SPI_BUS "pnv-spi-bus"
+#define TYPE_PNV_SPI_BUS "spi"
typedef struct PnvSpi {
SysBusDevice parent_obj;
SSIBus *ssi_bus;
qemu_irq *cs_line;
MemoryRegion xscom_spic_regs;
+ Fifo8 tx_fifo;
+ Fifo8 rx_fifo;
+ uint8_t fail_count; /* RDR Match failure counter */
/* SPI object number */
uint32_t spic_num;
+ uint32_t chip_id;
uint8_t transfer_len;
uint8_t responder_select;
/* To verify if shift_n1 happens prior to shift_n2 */
diff --git a/include/system/hostmem.h b/include/system/hostmem.h
index 5c21ca5..62642e6 100644
--- a/include/system/hostmem.h
+++ b/include/system/hostmem.h
@@ -93,4 +93,7 @@ bool host_memory_backend_is_mapped(HostMemoryBackend *backend);
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev);
char *host_memory_backend_get_name(HostMemoryBackend *backend);
+long qemu_minrampagesize(void);
+long qemu_maxrampagesize(void);
+
#endif
diff --git a/pc-bios/README b/pc-bios/README
index 700dcaa..f0f13e1 100644
--- a/pc-bios/README
+++ b/pc-bios/README
@@ -13,8 +13,8 @@
- SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware
implementation for certain IBM POWER hardware. The sources are at
- https://github.com/aik/SLOF, and the image currently in qemu is
- built from git tag qemu-slof-20230918.
+ https://gitlab.com/slof/slof, and the image currently in qemu is
+ built from git tag qemu-slof-20241106.
- VOF (Virtual Open Firmware) is a minimalistic firmware to work with
-machine pseries,x-vof=on. When enabled, the firmware acts as a slim shim and
@@ -43,6 +43,19 @@
run an hypervisor OS or simply a host OS on the "baremetal"
platform, also known as the PowerNV (Non-Virtualized) platform.
+- pnv-pnor.bin is a non-volatile RAM image used by PowerNV, which stores
+ NVRAM BIOS settings among other things. This image was created with the
+ following command (the ffspart tool can be found in the skiboot source tree):
+
+ ffspart -s 0x1000 -c 34 -i pnv-pnor.in -p pnv-pnor.bin
+
+ Where pnv-pnor.in contains the two lines (no leading whitespace):
+
+ NVRAM,0x01000,0x00020000,,,/dev/zero
+ VERSION,0x21000,0x00001000,,,/dev/zero
+
+ skiboot is then booted once to format the NVRAM partition.
+
- QemuMacDrivers (https://github.com/ozbenh/QemuMacDrivers) is a project to
provide virtualised drivers for PPC MacOS guests.
diff --git a/pc-bios/meson.build b/pc-bios/meson.build
index 51e95cc..34d6616 100644
--- a/pc-bios/meson.build
+++ b/pc-bios/meson.build
@@ -70,6 +70,7 @@ blobs = [
's390-ccw.img',
'slof.bin',
'skiboot.lid',
+ 'pnv-pnor.bin',
'palcode-clipper',
'u-boot.e500',
'u-boot-sam460-20100605.bin',
diff --git a/pc-bios/pnv-pnor.bin b/pc-bios/pnv-pnor.bin
new file mode 100644
index 0000000..3e6f700
--- /dev/null
+++ b/pc-bios/pnv-pnor.bin
Binary files differ
diff --git a/pc-bios/skiboot.lid b/pc-bios/skiboot.lid
index 906bd51..ffc77ee 100644
--- a/pc-bios/skiboot.lid
+++ b/pc-bios/skiboot.lid
Binary files differ
diff --git a/pc-bios/slof.bin b/pc-bios/slof.bin
index 27fed09..4314e17 100644
--- a/pc-bios/slof.bin
+++ b/pc-bios/slof.bin
Binary files differ
diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json
index 2877aff..4475e81 100644
--- a/qapi/qapi-schema.json
+++ b/qapi/qapi-schema.json
@@ -5,6 +5,8 @@
#
# This document describes all commands currently supported by QMP.
#
+# For locating a particular item, please see the `qapi-index`.
+#
# Most of the time their usage is exactly the same as in the user
# Monitor, this means that any other document which also describe
# commands (the manpage, QEMU's manual, etc) can and should be
diff --git a/roms/skiboot b/roms/skiboot
-Subproject 24a7eb35966d93455520bc2debdd7954314b638
+Subproject 785a5e3070a86e18521e62fe202b87209de30fa
diff --git a/scripts/qapi/main.py b/scripts/qapi/main.py
index 5b4679a..0e2a6ae 100644
--- a/scripts/qapi/main.py
+++ b/scripts/qapi/main.py
@@ -31,34 +31,28 @@ def create_backend(path: str) -> QAPIBackend:
module_path, dot, class_name = path.rpartition('.')
if not dot:
- print("argument of -B must be of the form MODULE.CLASS",
- file=sys.stderr)
- sys.exit(1)
+ raise QAPIError("argument of -B must be of the form MODULE.CLASS")
try:
mod = import_module(module_path)
except Exception as ex:
- print(f"unable to import '{module_path}': {ex}", file=sys.stderr)
- sys.exit(1)
+ raise QAPIError(f"unable to import '{module_path}': {ex}") from ex
try:
klass = getattr(mod, class_name)
- except AttributeError:
- print(f"module '{module_path}' has no class '{class_name}'",
- file=sys.stderr)
- sys.exit(1)
+ except AttributeError as ex:
+ raise QAPIError(
+ f"module '{module_path}' has no class '{class_name}'") from ex
try:
backend = klass()
except Exception as ex:
- print(f"backend '{path}' cannot be instantiated: {ex}",
- file=sys.stderr)
- sys.exit(1)
+ raise QAPIError(
+ f"backend '{path}' cannot be instantiated: {ex}") from ex
if not isinstance(backend, QAPIBackend):
- print(f"backend '{path}' must be an instance of QAPIBackend",
- file=sys.stderr)
- sys.exit(1)
+ raise QAPIError(
+ f"backend '{path}' must be an instance of QAPIBackend")
return backend
diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py
index 64f0bb8..949d9e8 100644
--- a/scripts/qapi/parser.py
+++ b/scripts/qapi/parser.py
@@ -14,6 +14,7 @@
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
+import enum
import os
import re
from typing import (
@@ -574,7 +575,10 @@ class QAPISchemaParser:
)
raise QAPIParseError(self, emsg)
- doc.new_tagged_section(self.info, match.group(1))
+ doc.new_tagged_section(
+ self.info,
+ QAPIDoc.Kind.from_string(match.group(1))
+ )
text = line[match.end():]
if text:
doc.append_line(text)
@@ -585,7 +589,7 @@ class QAPISchemaParser:
self,
"unexpected '=' markup in definition documentation")
else:
- # tag-less paragraph
+ # plain paragraph
doc.ensure_untagged_section(self.info)
doc.append_line(line)
line = self.get_doc_paragraph(doc)
@@ -634,23 +638,51 @@ class QAPIDoc:
Free-form documentation blocks consist only of a body section.
"""
+ class Kind(enum.Enum):
+ PLAIN = 0
+ MEMBER = 1
+ FEATURE = 2
+ RETURNS = 3
+ ERRORS = 4
+ SINCE = 5
+ TODO = 6
+
+ @staticmethod
+ def from_string(kind: str) -> 'QAPIDoc.Kind':
+ return QAPIDoc.Kind[kind.upper()]
+
+ def __str__(self) -> str:
+ return self.name.title()
+
class Section:
# pylint: disable=too-few-public-methods
- def __init__(self, info: QAPISourceInfo,
- tag: Optional[str] = None):
+ def __init__(
+ self,
+ info: QAPISourceInfo,
+ kind: 'QAPIDoc.Kind',
+ ):
# section source info, i.e. where it begins
self.info = info
- # section tag, if any ('Returns', '@name', ...)
- self.tag = tag
+ # section kind
+ self.kind = kind
# section text without tag
self.text = ''
+ def __repr__(self) -> str:
+ return f"<QAPIDoc.Section kind={self.kind!r} text={self.text!r}>"
+
def append_line(self, line: str) -> None:
self.text += line + '\n'
class ArgSection(Section):
- def __init__(self, info: QAPISourceInfo, tag: str):
- super().__init__(info, tag)
+ def __init__(
+ self,
+ info: QAPISourceInfo,
+ kind: 'QAPIDoc.Kind',
+ name: str
+ ):
+ super().__init__(info, kind)
+ self.name = name
self.member: Optional['QAPISchemaMember'] = None
def connect(self, member: 'QAPISchemaMember') -> None:
@@ -662,7 +694,9 @@ class QAPIDoc:
# definition doc's symbol, None for free-form doc
self.symbol: Optional[str] = symbol
# the sections in textual order
- self.all_sections: List[QAPIDoc.Section] = [QAPIDoc.Section(info)]
+ self.all_sections: List[QAPIDoc.Section] = [
+ QAPIDoc.Section(info, QAPIDoc.Kind.PLAIN)
+ ]
# the body section
self.body: Optional[QAPIDoc.Section] = self.all_sections[0]
# dicts mapping parameter/feature names to their description
@@ -679,55 +713,71 @@ class QAPIDoc:
def end(self) -> None:
for section in self.all_sections:
section.text = section.text.strip('\n')
- if section.tag is not None and section.text == '':
+ if section.kind != QAPIDoc.Kind.PLAIN and section.text == '':
raise QAPISemError(
- section.info, "text required after '%s:'" % section.tag)
+ section.info, "text required after '%s:'" % section.kind)
def ensure_untagged_section(self, info: QAPISourceInfo) -> None:
- if self.all_sections and not self.all_sections[-1].tag:
+ kind = QAPIDoc.Kind.PLAIN
+
+ if self.all_sections and self.all_sections[-1].kind == kind:
# extend current section
- self.all_sections[-1].text += '\n'
+ section = self.all_sections[-1]
+ if not section.text:
+ # Section is empty so far; update info to start *here*.
+ section.info = info
+ section.text += '\n'
return
+
# start new section
- section = self.Section(info)
+ section = self.Section(info, kind)
self.sections.append(section)
self.all_sections.append(section)
- def new_tagged_section(self, info: QAPISourceInfo, tag: str) -> None:
- section = self.Section(info, tag)
- if tag == 'Returns':
+ def new_tagged_section(
+ self,
+ info: QAPISourceInfo,
+ kind: 'QAPIDoc.Kind',
+ ) -> None:
+ section = self.Section(info, kind)
+ if kind == QAPIDoc.Kind.RETURNS:
if self.returns:
raise QAPISemError(
- info, "duplicated '%s' section" % tag)
+ info, "duplicated '%s' section" % kind)
self.returns = section
- elif tag == 'Errors':
+ elif kind == QAPIDoc.Kind.ERRORS:
if self.errors:
raise QAPISemError(
- info, "duplicated '%s' section" % tag)
+ info, "duplicated '%s' section" % kind)
self.errors = section
- elif tag == 'Since':
+ elif kind == QAPIDoc.Kind.SINCE:
if self.since:
raise QAPISemError(
- info, "duplicated '%s' section" % tag)
+ info, "duplicated '%s' section" % kind)
self.since = section
self.sections.append(section)
self.all_sections.append(section)
- def _new_description(self, info: QAPISourceInfo, name: str,
- desc: Dict[str, ArgSection]) -> None:
+ def _new_description(
+ self,
+ info: QAPISourceInfo,
+ name: str,
+ kind: 'QAPIDoc.Kind',
+ desc: Dict[str, ArgSection]
+ ) -> None:
if not name:
raise QAPISemError(info, "invalid parameter name")
if name in desc:
raise QAPISemError(info, "'%s' parameter name duplicated" % name)
- section = self.ArgSection(info, '@' + name)
+ section = self.ArgSection(info, kind, name)
self.all_sections.append(section)
desc[name] = section
def new_argument(self, info: QAPISourceInfo, name: str) -> None:
- self._new_description(info, name, self.args)
+ self._new_description(info, name, QAPIDoc.Kind.MEMBER, self.args)
def new_feature(self, info: QAPISourceInfo, name: str) -> None:
- self._new_description(info, name, self.features)
+ self._new_description(info, name, QAPIDoc.Kind.FEATURE, self.features)
def append_line(self, line: str) -> None:
self.all_sections[-1].append_line(line)
@@ -739,8 +789,23 @@ class QAPIDoc:
raise QAPISemError(member.info,
"%s '%s' lacks documentation"
% (member.role, member.name))
- self.args[member.name] = QAPIDoc.ArgSection(
- self.info, '@' + member.name)
+ # Insert stub documentation section for missing member docs.
+ # TODO: drop when undocumented members are outlawed
+
+ section = QAPIDoc.ArgSection(
+ self.info, QAPIDoc.Kind.MEMBER, member.name)
+ self.args[member.name] = section
+
+ # Determine where to insert stub doc - it should go at the
+ # end of the members section(s), if any. Note that index 0
+ # is assumed to be an untagged intro section, even if it is
+ # empty.
+ index = 1
+ if len(self.all_sections) > 1:
+ while self.all_sections[index].kind == QAPIDoc.Kind.MEMBER:
+ index += 1
+ self.all_sections.insert(index, section)
+
self.args[member.name].connect(member)
def connect_feature(self, feature: 'QAPISchemaFeature') -> None:
diff --git a/scripts/qapi/source.py b/scripts/qapi/source.py
index 7b379fd..ffdc3f4 100644
--- a/scripts/qapi/source.py
+++ b/scripts/qapi/source.py
@@ -47,9 +47,9 @@ class QAPISourceInfo:
self.defn_meta = meta
self.defn_name = name
- def next_line(self: T) -> T:
+ def next_line(self: T, n: int = 1) -> T:
info = copy.copy(self)
- info.line += 1
+ info.line += n
return info
def loc(self) -> str:
diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c
index d148cd7..bfcc695 100644
--- a/target/ppc/cpu.c
+++ b/target/ppc/cpu.c
@@ -130,11 +130,13 @@ void ppc_store_ciabr(CPUPPCState *env, target_ulong val)
ppc_update_ciabr(env);
}
-void ppc_update_daw0(CPUPPCState *env)
+void ppc_update_daw(CPUPPCState *env, int rid)
{
CPUState *cs = env_cpu(env);
- target_ulong deaw = env->spr[SPR_DAWR0] & PPC_BITMASK(0, 60);
- uint32_t dawrx = env->spr[SPR_DAWRX0];
+ int spr_dawr = rid ? SPR_DAWR1 : SPR_DAWR0;
+ int spr_dawrx = rid ? SPR_DAWRX1 : SPR_DAWRX0;
+ target_ulong deaw = env->spr[spr_dawr] & PPC_BITMASK(0, 60);
+ uint32_t dawrx = env->spr[spr_dawrx];
int mrd = extract32(dawrx, PPC_BIT_NR(48), 54 - 48);
bool dw = extract32(dawrx, PPC_BIT_NR(57), 1);
bool dr = extract32(dawrx, PPC_BIT_NR(58), 1);
@@ -144,9 +146,9 @@ void ppc_update_daw0(CPUPPCState *env)
vaddr len;
int flags;
- if (env->dawr0_watchpoint) {
- cpu_watchpoint_remove_by_ref(cs, env->dawr0_watchpoint);
- env->dawr0_watchpoint = NULL;
+ if (env->dawr_watchpoint[rid]) {
+ cpu_watchpoint_remove_by_ref(cs, env->dawr_watchpoint[rid]);
+ env->dawr_watchpoint[rid] = NULL;
}
if (!dr && !dw) {
@@ -166,28 +168,45 @@ void ppc_update_daw0(CPUPPCState *env)
flags |= BP_MEM_WRITE;
}
- cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr0_watchpoint);
+ cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr_watchpoint[rid]);
}
void ppc_store_dawr0(CPUPPCState *env, target_ulong val)
{
env->spr[SPR_DAWR0] = val;
- ppc_update_daw0(env);
+ ppc_update_daw(env, 0);
}
-void ppc_store_dawrx0(CPUPPCState *env, uint32_t val)
+static void ppc_store_dawrx(CPUPPCState *env, uint32_t val, int rid)
{
int hrammc = extract32(val, PPC_BIT_NR(56), 1);
if (hrammc) {
/* This might be done with a second watchpoint at the xor of DEAW[0] */
- qemu_log_mask(LOG_UNIMP, "%s: DAWRX0[HRAMMC] is unimplemented\n",
- __func__);
+ qemu_log_mask(LOG_UNIMP, "%s: DAWRX%d[HRAMMC] is unimplemented\n",
+ __func__, rid);
}
- env->spr[SPR_DAWRX0] = val;
- ppc_update_daw0(env);
+ env->spr[rid ? SPR_DAWRX1 : SPR_DAWRX0] = val;
+ ppc_update_daw(env, rid);
+}
+
+void ppc_store_dawrx0(CPUPPCState *env, uint32_t val)
+{
+ ppc_store_dawrx(env, val, 0);
+}
+
+void ppc_store_dawr1(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_DAWR1] = val;
+ ppc_update_daw(env, 1);
+}
+
+void ppc_store_dawrx1(CPUPPCState *env, uint32_t val)
+{
+ ppc_store_dawrx(env, val, 1);
}
+
#endif
#endif
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 0b8b4c0..efab54a 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1260,7 +1260,7 @@ struct CPUArchState {
#if defined(TARGET_PPC64)
ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */
struct CPUBreakpoint *ciabr_breakpoint;
- struct CPUWatchpoint *dawr0_watchpoint;
+ struct CPUWatchpoint *dawr_watchpoint[2];
#endif
target_ulong sr[32]; /* segment registers */
uint32_t nb_BATs; /* number of BATs */
@@ -1589,9 +1589,11 @@ void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val);
void ppc_update_ciabr(CPUPPCState *env);
void ppc_store_ciabr(CPUPPCState *env, target_ulong value);
-void ppc_update_daw0(CPUPPCState *env);
+void ppc_update_daw(CPUPPCState *env, int rid);
void ppc_store_dawr0(CPUPPCState *env, target_ulong value);
void ppc_store_dawrx0(CPUPPCState *env, uint32_t value);
+void ppc_store_dawr1(CPUPPCState *env, target_ulong value);
+void ppc_store_dawrx1(CPUPPCState *env, uint32_t value);
#endif /* !defined(CONFIG_USER_ONLY) */
void ppc_store_msr(CPUPPCState *env, target_ulong value);
@@ -2091,6 +2093,7 @@ void ppc_compat_add_property(Object *obj, const char *name,
#define SPR_VTB (0x351)
#define SPR_LDBAR (0x352)
#define SPR_MMCRC (0x353)
+#define SPR_PMSR (0x355)
#define SPR_PSSCR (0x357)
#define SPR_440_INV0 (0x370)
#define SPR_440_INV1 (0x371)
@@ -2098,8 +2101,10 @@ void ppc_compat_add_property(Object *obj, const char *name,
#define SPR_440_INV2 (0x372)
#define SPR_TRIG2 (0x372)
#define SPR_440_INV3 (0x373)
+#define SPR_PMCR (0x374)
#define SPR_440_ITV0 (0x374)
#define SPR_440_ITV1 (0x375)
+#define SPR_RWMR (0x375)
#define SPR_440_ITV2 (0x376)
#define SPR_440_ITV3 (0x377)
#define SPR_440_CCR1 (0x378)
@@ -2752,11 +2757,6 @@ static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
}
#endif
-G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception);
-G_NORETURN void raise_exception_ra(CPUPPCState *env, uint32_t exception,
- uintptr_t raddr);
-G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code);
G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
uint32_t error_code, uintptr_t raddr);
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index 1780cab..8b590e7 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -922,6 +922,18 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask,
#endif
}
+static void register_atb_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_ATBL, "ATBL",
+ &spr_read_atbl, SPR_NOACCESS,
+ &spr_read_atbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_ATBU, "ATBU",
+ &spr_read_atbu, SPR_NOACCESS,
+ &spr_read_atbu, SPR_NOACCESS,
+ 0x00000000);
+}
+
/* SPR specific to PowerPC 440 implementation */
static void register_440_sprs(CPUPPCState *env)
{
@@ -2911,6 +2923,11 @@ static void init_proc_e500(CPUPPCState *env, int version)
register_BookE206_sprs(env, 0x000000DF, tlbncfg, mmucfg);
register_usprgh_sprs(env);
+ if (version != fsl_e500v1) {
+ /* e500v1 has no support for alternate timebase */
+ register_atb_sprs(env);
+ }
+
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5172,6 +5189,20 @@ static void register_book3s_207_dbg_sprs(CPUPPCState *env)
KVM_REG_PPC_CIABR, 0x00000000);
}
+static void register_book3s_310_dbg_sprs(CPUPPCState *env)
+{
+ spr_register_kvm_hv(env, SPR_DAWR1, "DAWR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_dawr1,
+ KVM_REG_PPC_DAWR1, 0x00000000);
+ spr_register_kvm_hv(env, SPR_DAWRX1, "DAWRX1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_dawrx1,
+ KVM_REG_PPC_DAWRX1, 0x00000000);
+}
+
static void register_970_dbg_sprs(CPUPPCState *env)
{
/* Breakpoints */
@@ -5773,6 +5804,11 @@ static void register_power9_book4_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_WORT, 0);
+ spr_register_hv(env, SPR_RWMR, "RWMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
#endif
}
@@ -6451,6 +6487,17 @@ static void register_power9_common_sprs(CPUPPCState *env)
spr_read_generic, spr_write_generic,
KVM_REG_PPC_PSSCR, 0);
+ spr_register_hv(env, SPR_PMSR, "PMSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_pmsr, SPR_NOACCESS,
+ 0);
+ spr_register_hv(env, SPR_PMCR, "PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pmcr,
+ PPC_BIT(63)); /* Version 1 (POWER9/10) */
+
}
static void init_proc_POWER9(CPUPPCState *env)
@@ -6568,6 +6615,7 @@ static void init_proc_POWER10(CPUPPCState *env)
{
register_power9_common_sprs(env);
register_HEIR64_spr(env);
+ register_book3s_310_dbg_sprs(env);
register_power10_hash_sprs(env);
register_power10_dexcr_sprs(env);
register_power10_pmu_sup_sprs(env);
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index fde9912..44e19aa 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/log.h"
+#include "system/tcg.h"
#include "system/system.h"
#include "system/runstate.h"
#include "cpu.h"
@@ -29,12 +30,6 @@
#include "trace.h"
-#ifdef CONFIG_TCG
-#include "system/tcg.h"
-#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#endif
-
/*****************************************************************************/
/* Exception processing */
#ifndef CONFIG_USER_ONLY
@@ -136,27 +131,6 @@ static void dump_hcall(CPUPPCState *env)
env->nip);
}
-#ifdef CONFIG_TCG
-/* Return true iff byteswap is needed to load instruction */
-static inline bool insn_need_byteswap(CPUArchState *env)
-{
- /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
- return !!(env->msr & ((target_ulong)1 << MSR_LE));
-}
-
-static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
-{
- uint32_t insn = cpu_ldl_code(env, addr);
-
- if (insn_need_byteswap(env)) {
- insn = bswap32(insn);
- }
-
- return insn;
-}
-
-#endif
-
static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
{
const char *es;
@@ -420,57 +394,14 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
env->reserve_addr = -1;
}
-#ifdef CONFIG_TCG
-/*
- * This stops the machine and logs CPU state without killing QEMU (like
- * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
- * so the machine can still be debugged.
- */
-static G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
-{
- CPUState *cs = env_cpu(env);
- FILE *f;
-
- f = qemu_log_trylock();
- if (f) {
- fprintf(f, "Entering checkstop state: %s\n", reason);
- cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
- qemu_log_unlock(f);
- }
-
- /*
- * This stops the machine and logs CPU state without killing QEMU
- * (like cpu_abort()) so the machine can still be debugged (because
- * it is often a guest error).
- */
- qemu_system_guest_panicked(NULL);
- cpu_loop_exit_noexc(cs);
-}
-
-#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
-void helper_attn(CPUPPCState *env)
-{
- /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
- if ((*env->check_attn)(env)) {
- powerpc_checkstop(env, "host executed attn");
- } else {
- raise_exception_err(env, POWERPC_EXCP_HV_EMU,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
- }
-}
-#endif
-#endif /* CONFIG_TCG */
-
static void powerpc_mcheck_checkstop(CPUPPCState *env)
{
/* KVM guests always have MSR[ME] enabled */
-#ifdef CONFIG_TCG
if (FIELD_EX64(env->msr, MSR, ME)) {
return;
}
-
+ assert(tcg_enabled());
powerpc_checkstop(env, "machine check with MSR[ME]=0");
-#endif
}
static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
@@ -1620,7 +1551,7 @@ static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
}
#endif /* TARGET_PPC64 */
-static void powerpc_excp(PowerPCCPU *cpu, int excp)
+void powerpc_excp(PowerPCCPU *cpu, int excp)
{
CPUPPCState *env = &cpu->env;
@@ -2552,770 +2483,3 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
#endif /* !CONFIG_USER_ONLY */
-
-/*****************************************************************************/
-/* Exceptions processing helpers */
-
-void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
- uint32_t error_code, uintptr_t raddr)
-{
- CPUState *cs = env_cpu(env);
-
- cs->exception_index = exception;
- env->error_code = error_code;
- cpu_loop_exit_restore(cs, raddr);
-}
-
-void raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code)
-{
- raise_exception_err_ra(env, exception, error_code, 0);
-}
-
-void raise_exception(CPUPPCState *env, uint32_t exception)
-{
- raise_exception_err_ra(env, exception, 0, 0);
-}
-
-void raise_exception_ra(CPUPPCState *env, uint32_t exception,
- uintptr_t raddr)
-{
- raise_exception_err_ra(env, exception, 0, raddr);
-}
-
-#ifdef CONFIG_TCG
-void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code)
-{
- raise_exception_err_ra(env, exception, error_code, 0);
-}
-
-void helper_raise_exception(CPUPPCState *env, uint32_t exception)
-{
- raise_exception_err_ra(env, exception, 0, 0);
-}
-
-#ifndef CONFIG_USER_ONLY
-void helper_store_msr(CPUPPCState *env, target_ulong val)
-{
- uint32_t excp = hreg_store_msr(env, val, 0);
-
- if (excp != 0) {
- cpu_interrupt_exittb(env_cpu(env));
- raise_exception(env, excp);
- }
-}
-
-void helper_ppc_maybe_interrupt(CPUPPCState *env)
-{
- ppc_maybe_interrupt(env);
-}
-
-#ifdef TARGET_PPC64
-void helper_scv(CPUPPCState *env, uint32_t lev)
-{
- if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
- raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
- } else {
- raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
- }
-}
-
-void helper_pminsn(CPUPPCState *env, uint32_t insn)
-{
- CPUState *cs = env_cpu(env);
-
- cs->halted = 1;
-
- /* Condition for waking up at 0x100 */
- env->resume_as_sreset = (insn != PPC_PM_STOP) ||
- (env->spr[SPR_PSSCR] & PSSCR_EC);
-
- /* HDECR is not to wake from PM state, it may have already fired */
- if (env->resume_as_sreset) {
- PowerPCCPU *cpu = env_archcpu(env);
- ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
- }
-
- ppc_maybe_interrupt(env);
-}
-#endif /* TARGET_PPC64 */
-
-static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
-{
- /* MSR:POW cannot be set by any form of rfi */
- msr &= ~(1ULL << MSR_POW);
-
- /* MSR:TGPR cannot be set by any form of rfi */
- if (env->flags & POWERPC_FLAG_TGPR)
- msr &= ~(1ULL << MSR_TGPR);
-
-#ifdef TARGET_PPC64
- /* Switching to 32-bit ? Crop the nip */
- if (!msr_is_64bit(env, msr)) {
- nip = (uint32_t)nip;
- }
-#else
- nip = (uint32_t)nip;
-#endif
- /* XXX: beware: this is false if VLE is supported */
- env->nip = nip & ~((target_ulong)0x00000003);
- hreg_store_msr(env, msr, 1);
- trace_ppc_excp_rfi(env->nip, env->msr);
- /*
- * No need to raise an exception here, as rfi is always the last
- * insn of a TB
- */
- cpu_interrupt_exittb(env_cpu(env));
- /* Reset the reservation */
- env->reserve_addr = -1;
-
- /* Context synchronizing: check if TCG TLB needs flush */
- check_tlb_flush(env, false);
-}
-
-void helper_rfi(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
-}
-
-#ifdef TARGET_PPC64
-void helper_rfid(CPUPPCState *env)
-{
- /*
- * The architecture defines a number of rules for which bits can
- * change but in practice, we handle this in hreg_store_msr()
- * which will be called by do_rfi(), so there is no need to filter
- * here
- */
- do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
-}
-
-void helper_rfscv(CPUPPCState *env)
-{
- do_rfi(env, env->lr, env->ctr);
-}
-
-void helper_hrfid(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
-}
-
-void helper_rfebb(CPUPPCState *env, target_ulong s)
-{
- target_ulong msr = env->msr;
-
- /*
- * Handling of BESCR bits 32:33 according to PowerISA v3.1:
- *
- * "If BESCR 32:33 != 0b00 the instruction is treated as if
- * the instruction form were invalid."
- */
- if (env->spr[SPR_BESCR] & BESCR_INVALID) {
- raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
- }
-
- env->nip = env->spr[SPR_EBBRR];
-
- /* Switching to 32-bit ? Crop the nip */
- if (!msr_is_64bit(env, msr)) {
- env->nip = (uint32_t)env->spr[SPR_EBBRR];
- }
-
- if (s) {
- env->spr[SPR_BESCR] |= BESCR_GE;
- } else {
- env->spr[SPR_BESCR] &= ~BESCR_GE;
- }
-}
-
-/*
- * Triggers or queues an 'ebb_excp' EBB exception. All checks
- * but FSCR, HFSCR and msr_pr must be done beforehand.
- *
- * PowerISA v3.1 isn't clear about whether an EBB should be
- * postponed or cancelled if the EBB facility is unavailable.
- * Our assumption here is that the EBB is cancelled if both
- * FSCR and HFSCR EBB facilities aren't available.
- */
-static void do_ebb(CPUPPCState *env, int ebb_excp)
-{
- PowerPCCPU *cpu = env_archcpu(env);
-
- /*
- * FSCR_EBB and FSCR_IC_EBB are the same bits used with
- * HFSCR.
- */
- helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
- helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
-
- if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
- env->spr[SPR_BESCR] |= BESCR_PMEO;
- } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
- env->spr[SPR_BESCR] |= BESCR_EEO;
- }
-
- if (FIELD_EX64(env->msr, MSR, PR)) {
- powerpc_excp(cpu, ebb_excp);
- } else {
- ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
- }
-}
-
-void raise_ebb_perfm_exception(CPUPPCState *env)
-{
- bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
- env->spr[SPR_BESCR] & BESCR_PME &&
- env->spr[SPR_BESCR] & BESCR_GE;
-
- if (!perfm_ebb_enabled) {
- return;
- }
-
- do_ebb(env, POWERPC_EXCP_PERFM_EBB);
-}
-#endif /* TARGET_PPC64 */
-
-/*****************************************************************************/
-/* Embedded PowerPC specific helpers */
-void helper_40x_rfci(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
-}
-
-void helper_rfci(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
-}
-
-void helper_rfdi(CPUPPCState *env)
-{
- /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
- do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
-}
-
-void helper_rfmci(CPUPPCState *env)
-{
- /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
- do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
-}
-#endif /* !CONFIG_USER_ONLY */
-
-void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
- uint32_t flags)
-{
- if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
- ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
- ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
- ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
- ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
- raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP, GETPC());
- }
-}
-
-#ifdef TARGET_PPC64
-void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
- uint32_t flags)
-{
- if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
- ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
- ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
- ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
- ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
- raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP, GETPC());
- }
-}
-#endif /* TARGET_PPC64 */
-
-static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
-{
- const uint16_t c = 0xfffc;
- const uint64_t z0 = 0xfa2561cdf44ac398ULL;
- uint16_t z = 0, temp;
- uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
-
- for (int i = 3; i >= 0; i--) {
- k[i] = key & 0xffff;
- key >>= 16;
- }
- xleft[0] = x & 0xffff;
- xright[0] = (x >> 16) & 0xffff;
-
- for (int i = 0; i < 28; i++) {
- z = (z0 >> (63 - i)) & 1;
- temp = ror16(k[i + 3], 3) ^ k[i + 1];
- k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
- }
-
- for (int i = 0; i < 8; i++) {
- eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
- eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
- eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
- eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
- }
-
- for (int i = 0; i < 32; i++) {
- fxleft[i] = (rol16(xleft[i], 1) &
- rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
- xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
- xright[i + 1] = xleft[i];
- }
-
- return (((uint32_t)xright[32]) << 16) | xleft[32];
-}
-
-static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
-{
- uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
- uint64_t stage1_h, stage1_l;
-
- for (int i = 0; i < 4; i++) {
- stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
- stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
- stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
- stage0_l |= (ra & 0xff) << (8 * 2 * i);
- rb >>= 8;
- ra >>= 8;
- }
-
- stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
- stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
- stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
- stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
-
- return stage1_h ^ stage1_l;
-}
-
-static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
- target_ulong rb, uint64_t key, bool store)
-{
- uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
-
- if (store) {
- cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
- } else {
- loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
- if (loaded_hash != calculated_hash) {
- raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP, GETPC());
- }
- }
-}
-
-#include "qemu/guest-random.h"
-
-#ifdef TARGET_PPC64
-#define HELPER_HASH(op, key, store, dexcr_aspect) \
-void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
- target_ulong rb) \
-{ \
- if (env->msr & R_MSR_PR_MASK) { \
- if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
- env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
- return; \
- } else if (!(env->msr & R_MSR_HV_MASK)) { \
- if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
- env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
- return; \
- } else if (!(env->msr & R_MSR_S_MASK)) { \
- if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
- return; \
- } \
- \
- do_hash(env, ea, ra, rb, key, store); \
-}
-#else
-#define HELPER_HASH(op, key, store, dexcr_aspect) \
-void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
- target_ulong rb) \
-{ \
- do_hash(env, ea, ra, rb, key, store); \
-}
-#endif /* TARGET_PPC64 */
-
-HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
-HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
-HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
-HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
-
-#ifndef CONFIG_USER_ONLY
-/* Embedded.Processor Control */
-static int dbell2irq(target_ulong rb)
-{
- int msg = rb & DBELL_TYPE_MASK;
- int irq = -1;
-
- switch (msg) {
- case DBELL_TYPE_DBELL:
- irq = PPC_INTERRUPT_DOORBELL;
- break;
- case DBELL_TYPE_DBELL_CRIT:
- irq = PPC_INTERRUPT_CDOORBELL;
- break;
- case DBELL_TYPE_G_DBELL:
- case DBELL_TYPE_G_DBELL_CRIT:
- case DBELL_TYPE_G_DBELL_MC:
- /* XXX implement */
- default:
- break;
- }
-
- return irq;
-}
-
-void helper_msgclr(CPUPPCState *env, target_ulong rb)
-{
- int irq = dbell2irq(rb);
-
- if (irq < 0) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), irq, 0);
-}
-
-void helper_msgsnd(target_ulong rb)
-{
- int irq = dbell2irq(rb);
- int pir = rb & DBELL_PIRTAG_MASK;
- CPUState *cs;
-
- if (irq < 0) {
- return;
- }
-
- bql_lock();
- CPU_FOREACH(cs) {
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *cenv = &cpu->env;
-
- if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
- ppc_set_irq(cpu, irq, 1);
- }
- }
- bql_unlock();
-}
-
-/* Server Processor Control */
-
-static bool dbell_type_server(target_ulong rb)
-{
- /*
- * A Directed Hypervisor Doorbell message is sent only if the
- * message type is 5. All other types are reserved and the
- * instruction is a no-op
- */
- return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
-}
-
-static inline bool dbell_bcast_core(target_ulong rb)
-{
- return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
-}
-
-static inline bool dbell_bcast_subproc(target_ulong rb)
-{
- return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
-}
-
-/*
- * Send an interrupt to a thread in the same core as env).
- */
-static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
-{
- PowerPCCPU *cpu = env_archcpu(env);
- CPUState *cs = env_cpu(env);
-
- if (ppc_cpu_lpar_single_threaded(cs)) {
- if (target_tir == 0) {
- ppc_set_irq(cpu, irq, 1);
- }
- } else {
- CPUState *ccs;
-
- /* Does iothread need to be locked for walking CPU list? */
- bql_lock();
- THREAD_SIBLING_FOREACH(cs, ccs) {
- PowerPCCPU *ccpu = POWERPC_CPU(ccs);
- if (target_tir == ppc_cpu_tir(ccpu)) {
- ppc_set_irq(ccpu, irq, 1);
- break;
- }
- }
- bql_unlock();
- }
-}
-
-void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
-{
- if (!dbell_type_server(rb)) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
-}
-
-void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
-{
- int pir = rb & DBELL_PROCIDTAG_MASK;
- bool brdcast = false;
- CPUState *cs, *ccs;
- PowerPCCPU *cpu;
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
- if (!(env->insns_flags2 & PPC2_ISA300)) {
- msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
- return;
- }
-
- /* POWER9 and later msgsnd is a global (targets any thread) */
- cpu = ppc_get_vcpu_by_pir(pir);
- if (!cpu) {
- return;
- }
- cs = CPU(cpu);
-
- if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
- (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
- brdcast = true;
- }
-
- if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
- ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
- return;
- }
-
- /*
- * Why is bql needed for walking CPU list? Answer seems to be because ppc
- * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
- * so could this be removed?
- */
- bql_lock();
- THREAD_SIBLING_FOREACH(cs, ccs) {
- ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
- }
- bql_unlock();
-}
-
-#ifdef TARGET_PPC64
-void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
-{
- helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
-}
-
-/*
- * sends a message to another thread on the same
- * multi-threaded processor
- */
-void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
-{
- helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
-}
-#endif /* TARGET_PPC64 */
-
-/* Single-step tracing */
-void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
-{
- uint32_t error_code = 0;
- if (env->insns_flags2 & PPC2_ISA207S) {
- /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
- env->spr[SPR_POWER_SIAR] = prev_ip;
- error_code = PPC_BIT(33);
- }
- raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
-}
-
-void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- CPUPPCState *env = cpu_env(cs);
- uint32_t insn;
-
- /* Restore state and reload the insn we executed, for filling in DSISR. */
- cpu_restore_state(cs, retaddr);
- insn = ppc_ldl_code(env, env->nip);
-
- switch (env->mmu_model) {
- case POWERPC_MMU_SOFT_4xx:
- env->spr[SPR_40x_DEAR] = vaddr;
- break;
- case POWERPC_MMU_BOOKE:
- case POWERPC_MMU_BOOKE206:
- env->spr[SPR_BOOKE_DEAR] = vaddr;
- break;
- default:
- env->spr[SPR_DAR] = vaddr;
- break;
- }
-
- cs->exception_index = POWERPC_EXCP_ALIGN;
- env->error_code = insn & 0x03FF0000;
- cpu_loop_exit(cs);
-}
-
-void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
- vaddr vaddr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr)
-{
- CPUPPCState *env = cpu_env(cs);
-
- switch (env->excp_model) {
-#if defined(TARGET_PPC64)
- case POWERPC_EXCP_POWER8:
- case POWERPC_EXCP_POWER9:
- case POWERPC_EXCP_POWER10:
- case POWERPC_EXCP_POWER11:
- /*
- * Machine check codes can be found in processor User Manual or
- * Linux or skiboot source.
- */
- if (access_type == MMU_DATA_LOAD) {
- env->spr[SPR_DAR] = vaddr;
- env->spr[SPR_DSISR] = PPC_BIT(57);
- env->error_code = PPC_BIT(42);
-
- } else if (access_type == MMU_DATA_STORE) {
- /*
- * MCE for stores in POWER is asynchronous so hardware does
- * not set DAR, but QEMU can do better.
- */
- env->spr[SPR_DAR] = vaddr;
- env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
- env->error_code |= PPC_BIT(42);
-
- } else { /* Fetch */
- /*
- * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
- * the instruction, so that must always be clear for fetches.
- */
- env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
- }
- break;
-#endif
- default:
- /*
- * TODO: Check behaviour for other CPUs, for now do nothing.
- * Could add a basic MCE even if real hardware ignores.
- */
- return;
- }
-
- cs->exception_index = POWERPC_EXCP_MCHECK;
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-void ppc_cpu_debug_excp_handler(CPUState *cs)
-{
-#if defined(TARGET_PPC64)
- CPUPPCState *env = cpu_env(cs);
-
- if (env->insns_flags2 & PPC2_ISA207S) {
- if (cs->watchpoint_hit) {
- if (cs->watchpoint_hit->flags & BP_CPU) {
- env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
- env->spr[SPR_DSISR] = PPC_BIT(41);
- cs->watchpoint_hit = NULL;
- raise_exception(env, POWERPC_EXCP_DSI);
- }
- cs->watchpoint_hit = NULL;
- } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
- raise_exception_err(env, POWERPC_EXCP_TRACE,
- PPC_BIT(33) | PPC_BIT(43));
- }
- }
-#endif
-}
-
-bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
-{
-#if defined(TARGET_PPC64)
- CPUPPCState *env = cpu_env(cs);
-
- if (env->insns_flags2 & PPC2_ISA207S) {
- target_ulong priv;
-
- priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
- switch (priv) {
- case 0x1: /* problem */
- return env->msr & ((target_ulong)1 << MSR_PR);
- case 0x2: /* supervisor */
- return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
- !(env->msr & ((target_ulong)1 << MSR_HV)));
- case 0x3: /* hypervisor */
- return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
- (env->msr & ((target_ulong)1 << MSR_HV)));
- default:
- g_assert_not_reached();
- }
- }
-#endif
-
- return false;
-}
-
-bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
-{
-#if defined(TARGET_PPC64)
- CPUPPCState *env = cpu_env(cs);
-
- if (env->insns_flags2 & PPC2_ISA207S) {
- if (wp == env->dawr0_watchpoint) {
- uint32_t dawrx = env->spr[SPR_DAWRX0];
- bool wt = extract32(dawrx, PPC_BIT_NR(59), 1);
- bool wti = extract32(dawrx, PPC_BIT_NR(60), 1);
- bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
- bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
- bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
-
- if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
- return false;
- } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
- return false;
- } else if (!sv) {
- return false;
- }
-
- if (!wti) {
- if (env->msr & ((target_ulong)1 << MSR_DR)) {
- if (!wt) {
- return false;
- }
- } else {
- if (wt) {
- return false;
- }
- }
- }
-
- return true;
- }
- }
-#endif
-
- return false;
-}
-
-#endif /* !CONFIG_USER_ONLY */
-#endif /* CONFIG_TCG */
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 5a77e76..ca414f2 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -28,6 +28,8 @@ DEF_HELPER_2(store_pcr, void, env, tl)
DEF_HELPER_2(store_ciabr, void, env, tl)
DEF_HELPER_2(store_dawr0, void, env, tl)
DEF_HELPER_2(store_dawrx0, void, env, tl)
+DEF_HELPER_2(store_dawr1, void, env, tl)
+DEF_HELPER_2(store_dawrx1, void, env, tl)
DEF_HELPER_2(store_mmcr0, void, env, tl)
DEF_HELPER_2(store_mmcr1, void, env, tl)
DEF_HELPER_2(store_mmcrA, void, env, tl)
@@ -733,6 +735,8 @@ DEF_HELPER_2(store_tfmr, void, env, tl)
DEF_HELPER_FLAGS_2(store_sprc, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(load_sprd, TCG_CALL_NO_RWG_SE, tl, env)
DEF_HELPER_FLAGS_2(store_sprd, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_pmsr, TCG_CALL_NO_RWG_SE, tl, env)
+DEF_HELPER_FLAGS_2(store_pmcr, TCG_CALL_NO_RWG, void, env, tl)
#endif
DEF_HELPER_2(store_sdr1, void, env, tl)
DEF_HELPER_2(store_pidr, void, env, tl)
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index 20fb2ec..9012d38 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -268,6 +268,8 @@ static inline void pte_invalidate(target_ulong *pte0)
#define PTE_PTEM_MASK 0x7FFFFFBF
#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
+uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr);
+
#ifdef CONFIG_USER_ONLY
void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr,
MMUAccessType access_type,
@@ -287,7 +289,11 @@ void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
void ppc_cpu_debug_excp_handler(CPUState *cs);
bool ppc_cpu_debug_check_breakpoint(CPUState *cs);
bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
-#endif
+
+G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason);
+void powerpc_excp(PowerPCCPU *cpu, int excp);
+
+#endif /* !CONFIG_USER_ONLY */
FIELD(GER_MSK, XMSK, 0, 4)
FIELD(GER_MSK, YMSK, 4, 4)
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 216638d..992356c 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -92,6 +92,7 @@ static int cap_large_decr;
static int cap_fwnmi;
static int cap_rpt_invalidate;
static int cap_ail_mode_3;
+static int cap_dawr1;
#ifdef CONFIG_PSERIES
static int cap_papr;
@@ -152,6 +153,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
cap_large_decr = kvmppc_get_dec_bits();
cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
+ cap_dawr1 = kvm_vm_check_extension(s, KVM_CAP_PPC_DAWR1);
/*
* Note: setting it to false because there is not such capability
* in KVM at this moment.
@@ -2114,6 +2116,16 @@ int kvmppc_set_fwnmi(PowerPCCPU *cpu)
return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
}
+bool kvmppc_has_cap_dawr1(void)
+{
+ return !!cap_dawr1;
+}
+
+int kvmppc_set_cap_dawr1(int enable)
+{
+ return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_DAWR1, 0, enable);
+}
+
int kvmppc_smt_threads(void)
{
return cap_ppc_smt ? cap_ppc_smt : 1;
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index 1d8cb76..a8768c1 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -68,6 +68,8 @@ bool kvmppc_has_cap_htm(void);
bool kvmppc_has_cap_mmu_radix(void);
bool kvmppc_has_cap_mmu_hash_v3(void);
bool kvmppc_has_cap_xive(void);
+bool kvmppc_has_cap_dawr1(void);
+int kvmppc_set_cap_dawr1(int enable);
int kvmppc_get_cap_safe_cache(void);
int kvmppc_get_cap_safe_bounds_check(void);
int kvmppc_get_cap_safe_indirect_branch(void);
@@ -377,6 +379,16 @@ static inline bool kvmppc_has_cap_xive(void)
return false;
}
+static inline bool kvmppc_has_cap_dawr1(void)
+{
+ return false;
+}
+
+static inline int kvmppc_set_cap_dawr1(int enable)
+{
+ abort();
+}
+
static inline int kvmppc_get_cap_safe_cache(void)
{
return 0;
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 0bd7ae6..98df5b4 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -264,7 +264,8 @@ static int cpu_post_load(void *opaque, int version_id)
/* Re-set breaks based on regs */
#if defined(TARGET_PPC64)
ppc_update_ciabr(env);
- ppc_update_daw0(env);
+ ppc_update_daw(env, 0);
+ ppc_update_daw(env, 1);
#endif
/*
* TCG needs to re-start the decrementer timer and/or raise the
diff --git a/target/ppc/meson.build b/target/ppc/meson.build
index db3b7a0..8eed1fa 100644
--- a/target/ppc/meson.build
+++ b/target/ppc/meson.build
@@ -14,6 +14,7 @@ ppc_ss.add(when: 'CONFIG_TCG', if_true: files(
'int_helper.c',
'mem_helper.c',
'misc_helper.c',
+ 'tcg-excp_helper.c',
'timebase_helper.c',
'translate.c',
'power8-pmu.c',
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
index e379da6..2d9512c 100644
--- a/target/ppc/misc_helper.c
+++ b/target/ppc/misc_helper.c
@@ -234,6 +234,16 @@ void helper_store_dawrx0(CPUPPCState *env, target_ulong value)
ppc_store_dawrx0(env, value);
}
+void helper_store_dawr1(CPUPPCState *env, target_ulong value)
+{
+ ppc_store_dawr1(env, value);
+}
+
+void helper_store_dawrx1(CPUPPCState *env, target_ulong value)
+{
+ ppc_store_dawrx1(env, value);
+}
+
/*
* DPDES register is shared. Each bit reflects the state of the
* doorbell interrupt of a thread of the same core.
@@ -377,6 +387,59 @@ void helper_store_sprd(CPUPPCState *env, target_ulong val)
break;
}
}
+
+target_ulong helper_load_pmsr(CPUPPCState *env)
+{
+ target_ulong lowerps = extract64(env->spr[SPR_PMCR], PPC_BIT_NR(15), 8);
+ target_ulong val = 0;
+
+ val |= PPC_BIT(63); /* verion 0x1 (POWER9/10) */
+ /* Pmin = 0 */
+ /* XXX: POWER9 should be 3 */
+ val |= 4ULL << PPC_BIT_NR(31); /* Pmax */
+ val |= lowerps << PPC_BIT_NR(15); /* Local actual Pstate */
+ val |= lowerps << PPC_BIT_NR(7); /* Global actual Pstate */
+
+ return val;
+}
+
+static void ppc_set_pmcr(PowerPCCPU *cpu, target_ulong val)
+{
+ cpu->env.spr[SPR_PMCR] = val;
+}
+
+void helper_store_pmcr(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+
+ /* Leave version field unchanged (0x1) */
+ val &= ~PPC_BITMASK(60, 63);
+ val |= PPC_BIT(63);
+
+ val &= ~PPC_BITMASK(0, 7); /* UpperPS ignored */
+ if (val & PPC_BITMASK(16, 59)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Non-zero PMCR reserved bits "
+ TARGET_FMT_lx"\n", val);
+ val &= ~PPC_BITMASK(16, 59);
+ }
+
+ /* DPDES behaves as 1-thread in LPAR-per-thread mode */
+ if (ppc_cpu_lpar_single_threaded(cs)) {
+ ppc_set_pmcr(cpu, val);
+ return;
+ }
+
+ /* Does iothread need to be locked for walking CPU list? */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ ppc_set_pmcr(ccpu, val);
+ }
+ bql_unlock();
+}
+
#endif /* defined(TARGET_PPC64) */
void helper_store_pidr(CPUPPCState *env, target_ulong val)
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 1d3d9e1..461eda4 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -571,6 +571,20 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
prtbe0 = ldq_phys(cs->as, h_raddr);
}
+ /*
+ * Some Linux uses a zero process table entry in PID!=0 for kernel context
+ * without userspace in order to fault on NULL dereference, because using
+ * PIDR=0 for the kernel causes the Q0 page table to be used to translate
+ * Q3 as well. Check for that case here to avoid the invalid configuration
+ * message.
+ */
+ if (unlikely(!prtbe0)) {
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
+ }
+ return 1;
+ }
+
/* Walk Radix Tree from Process Table Entry to Convert EA to RA */
*g_page_size = PRTBE_R_GET_RTS(prtbe0);
base_addr = prtbe0 & PRTBE_R_RPDB;
diff --git a/target/ppc/spr_common.h b/target/ppc/spr_common.h
index 01aff44..84c910c 100644
--- a/target/ppc/spr_common.h
+++ b/target/ppc/spr_common.h
@@ -165,6 +165,8 @@ void spr_write_cfar(DisasContext *ctx, int sprn, int gprn);
void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn);
void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dawr1(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dawrx1(DisasContext *ctx, int sprn, int gprn);
void spr_write_ureg(DisasContext *ctx, int sprn, int gprn);
void spr_read_purr(DisasContext *ctx, int gprn, int sprn);
void spr_write_purr(DisasContext *ctx, int sprn, int gprn);
@@ -204,6 +206,8 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn);
void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn);
void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn);
void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_pmsr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_pmcr(DisasContext *ctx, int sprn, int gprn);
void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn);
void spr_read_ppr32(DisasContext *ctx, int sprn, int gprn);
void spr_write_ppr32(DisasContext *ctx, int sprn, int gprn);
diff --git a/target/ppc/tcg-excp_helper.c b/target/ppc/tcg-excp_helper.c
new file mode 100644
index 0000000..5a189dc
--- /dev/null
+++ b/target/ppc/tcg-excp_helper.c
@@ -0,0 +1,851 @@
+/*
+ * PowerPC exception emulation helpers for QEMU (TCG specific)
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/log.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "system/runstate.h"
+
+#include "helper_regs.h"
+#include "hw/ppc/ppc.h"
+#include "internal.h"
+#include "cpu.h"
+#include "trace.h"
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+
+void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = exception;
+ env->error_code = error_code;
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+void helper_raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
+ ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
+ ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
+ ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
+ ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+
+#ifdef TARGET_PPC64
+void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
+ ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
+ ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
+ ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
+ ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+#endif /* TARGET_PPC64 */
+
+static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
+{
+ const uint16_t c = 0xfffc;
+ const uint64_t z0 = 0xfa2561cdf44ac398ULL;
+ uint16_t z = 0, temp;
+ uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
+
+ for (int i = 3; i >= 0; i--) {
+ k[i] = key & 0xffff;
+ key >>= 16;
+ }
+ xleft[0] = x & 0xffff;
+ xright[0] = (x >> 16) & 0xffff;
+
+ for (int i = 0; i < 28; i++) {
+ z = (z0 >> (63 - i)) & 1;
+ temp = ror16(k[i + 3], 3) ^ k[i + 1];
+ k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
+ }
+
+ for (int i = 0; i < 8; i++) {
+ eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
+ eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
+ eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
+ eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
+ }
+
+ for (int i = 0; i < 32; i++) {
+ fxleft[i] = (rol16(xleft[i], 1) &
+ rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
+ xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
+ xright[i + 1] = xleft[i];
+ }
+
+ return (((uint32_t)xright[32]) << 16) | xleft[32];
+}
+
+static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
+{
+ uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
+ uint64_t stage1_h, stage1_l;
+
+ for (int i = 0; i < 4; i++) {
+ stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
+ stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
+ stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
+ stage0_l |= (ra & 0xff) << (8 * 2 * i);
+ rb >>= 8;
+ ra >>= 8;
+ }
+
+ stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
+ stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
+ stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
+ stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
+
+ return stage1_h ^ stage1_l;
+}
+
+static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
+ target_ulong rb, uint64_t key, bool store)
+{
+ uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
+
+ if (store) {
+ cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
+ } else {
+ loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
+ if (loaded_hash != calculated_hash) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+ }
+}
+
+#include "qemu/guest-random.h"
+
+#ifdef TARGET_PPC64
+#define HELPER_HASH(op, key, store, dexcr_aspect) \
+void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
+ target_ulong rb) \
+{ \
+ if (env->msr & R_MSR_PR_MASK) { \
+ if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
+ env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
+ return; \
+ } else if (!(env->msr & R_MSR_HV_MASK)) { \
+ if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
+ env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
+ return; \
+ } else if (!(env->msr & R_MSR_S_MASK)) { \
+ if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
+ return; \
+ } \
+ \
+ do_hash(env, ea, ra, rb, key, store); \
+}
+#else
+#define HELPER_HASH(op, key, store, dexcr_aspect) \
+void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
+ target_ulong rb) \
+{ \
+ do_hash(env, ea, ra, rb, key, store); \
+}
+#endif /* TARGET_PPC64 */
+
+HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
+HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
+HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
+HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
+
+#ifndef CONFIG_USER_ONLY
+
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CPUPPCState *env = cpu_env(cs);
+ uint32_t insn;
+
+ /* Restore state and reload the insn we executed, for filling in DSISR. */
+ cpu_restore_state(cs, retaddr);
+ insn = ppc_ldl_code(env, env->nip);
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_4xx:
+ env->spr[SPR_40x_DEAR] = vaddr;
+ break;
+ case POWERPC_MMU_BOOKE:
+ case POWERPC_MMU_BOOKE206:
+ env->spr[SPR_BOOKE_DEAR] = vaddr;
+ break;
+ default:
+ env->spr[SPR_DAR] = vaddr;
+ break;
+ }
+
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = insn & 0x03FF0000;
+ cpu_loop_exit(cs);
+}
+
+void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr vaddr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ CPUPPCState *env = cpu_env(cs);
+
+ switch (env->excp_model) {
+#if defined(TARGET_PPC64)
+ case POWERPC_EXCP_POWER8:
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
+ case POWERPC_EXCP_POWER11:
+ /*
+ * Machine check codes can be found in processor User Manual or
+ * Linux or skiboot source.
+ */
+ if (access_type == MMU_DATA_LOAD) {
+ env->spr[SPR_DAR] = vaddr;
+ env->spr[SPR_DSISR] = PPC_BIT(57);
+ env->error_code = PPC_BIT(42);
+
+ } else if (access_type == MMU_DATA_STORE) {
+ /*
+ * MCE for stores in POWER is asynchronous so hardware does
+ * not set DAR, but QEMU can do better.
+ */
+ env->spr[SPR_DAR] = vaddr;
+ env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
+ env->error_code |= PPC_BIT(42);
+
+ } else { /* Fetch */
+ /*
+ * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
+ * the instruction, so that must always be clear for fetches.
+ */
+ env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
+ }
+ break;
+#endif
+ default:
+ /*
+ * TODO: Check behaviour for other CPUs, for now do nothing.
+ * Could add a basic MCE even if real hardware ignores.
+ */
+ return;
+ }
+
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void ppc_cpu_debug_excp_handler(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
+ env->spr[SPR_DSISR] = PPC_BIT(41);
+ cs->watchpoint_hit = NULL;
+ raise_exception(env, POWERPC_EXCP_DSI);
+ }
+ cs->watchpoint_hit = NULL;
+ } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
+ raise_exception_err(env, POWERPC_EXCP_TRACE,
+ PPC_BIT(33) | PPC_BIT(43));
+ }
+ }
+#endif
+}
+
+bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ target_ulong priv;
+
+ priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
+ switch (priv) {
+ case 0x1: /* problem */
+ return env->msr & ((target_ulong)1 << MSR_PR);
+ case 0x2: /* supervisor */
+ return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
+ !(env->msr & ((target_ulong)1 << MSR_HV)));
+ case 0x3: /* hypervisor */
+ return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
+ (env->msr & ((target_ulong)1 << MSR_HV)));
+ default:
+ g_assert_not_reached();
+ }
+ }
+#endif
+
+ return false;
+}
+
+bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+ bool wt, wti, hv, sv, pr;
+ uint32_t dawrx;
+
+ if ((env->insns_flags2 & PPC2_ISA207S) &&
+ (wp == env->dawr_watchpoint[0])) {
+ dawrx = env->spr[SPR_DAWRX0];
+ } else if ((env->insns_flags2 & PPC2_ISA310) &&
+ (wp == env->dawr_watchpoint[1])) {
+ dawrx = env->spr[SPR_DAWRX1];
+ } else {
+ return false;
+ }
+
+ wt = extract32(dawrx, PPC_BIT_NR(59), 1);
+ wti = extract32(dawrx, PPC_BIT_NR(60), 1);
+ hv = extract32(dawrx, PPC_BIT_NR(61), 1);
+ sv = extract32(dawrx, PPC_BIT_NR(62), 1);
+ pr = extract32(dawrx, PPC_BIT_NR(62), 1);
+
+ if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
+ return false;
+ } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
+ return false;
+ } else if (!sv) {
+ return false;
+ }
+
+ if (!wti) {
+ if (env->msr & ((target_ulong)1 << MSR_DR)) {
+ return wt;
+ } else {
+ return !wt;
+ }
+ }
+
+ return true;
+#endif
+
+ return false;
+}
+
+/*
+ * This stops the machine and logs CPU state without killing QEMU (like
+ * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
+ * so the machine can still be debugged.
+ */
+G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
+{
+ CPUState *cs = env_cpu(env);
+ FILE *f;
+
+ f = qemu_log_trylock();
+ if (f) {
+ fprintf(f, "Entering checkstop state: %s\n", reason);
+ cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
+ qemu_log_unlock(f);
+ }
+
+ /*
+ * This stops the machine and logs CPU state without killing QEMU
+ * (like cpu_abort()) so the machine can still be debugged (because
+ * it is often a guest error).
+ */
+ qemu_system_guest_panicked(NULL);
+ cpu_loop_exit_noexc(cs);
+}
+
+/* Return true iff byteswap is needed to load instruction */
+static inline bool insn_need_byteswap(CPUArchState *env)
+{
+ /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
+ return !!(env->msr & ((target_ulong)1 << MSR_LE));
+}
+
+uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
+{
+ uint32_t insn = cpu_ldl_code(env, addr);
+
+ if (insn_need_byteswap(env)) {
+ insn = bswap32(insn);
+ }
+
+ return insn;
+}
+
+#if defined(TARGET_PPC64)
+void helper_attn(CPUPPCState *env)
+{
+ /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
+ if ((*env->check_attn)(env)) {
+ powerpc_checkstop(env, "host executed attn");
+ } else {
+ raise_exception_err(env, POWERPC_EXCP_HV_EMU,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ }
+}
+
+void helper_scv(CPUPPCState *env, uint32_t lev)
+{
+ if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
+ raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
+ } else {
+ raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
+ }
+}
+
+void helper_pminsn(CPUPPCState *env, uint32_t insn)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->halted = 1;
+
+ /* Condition for waking up at 0x100 */
+ env->resume_as_sreset = (insn != PPC_PM_STOP) ||
+ (env->spr[SPR_PSSCR] & PSSCR_EC);
+
+ /* HDECR is not to wake from PM state, it may have already fired */
+ if (env->resume_as_sreset) {
+ PowerPCCPU *cpu = env_archcpu(env);
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
+ }
+
+ ppc_maybe_interrupt(env);
+}
+
+#endif /* TARGET_PPC64 */
+void helper_store_msr(CPUPPCState *env, target_ulong val)
+{
+ uint32_t excp = hreg_store_msr(env, val, 0);
+
+ if (excp != 0) {
+ cpu_interrupt_exittb(env_cpu(env));
+ raise_exception(env, excp);
+ }
+}
+
+void helper_ppc_maybe_interrupt(CPUPPCState *env)
+{
+ ppc_maybe_interrupt(env);
+}
+
+static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
+{
+ /* MSR:POW cannot be set by any form of rfi */
+ msr &= ~(1ULL << MSR_POW);
+
+ /* MSR:TGPR cannot be set by any form of rfi */
+ if (env->flags & POWERPC_FLAG_TGPR) {
+ msr &= ~(1ULL << MSR_TGPR);
+ }
+
+#ifdef TARGET_PPC64
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ nip = (uint32_t)nip;
+ }
+#else
+ nip = (uint32_t)nip;
+#endif
+ /* XXX: beware: this is false if VLE is supported */
+ env->nip = nip & ~((target_ulong)0x00000003);
+ hreg_store_msr(env, msr, 1);
+ trace_ppc_excp_rfi(env->nip, env->msr);
+ /*
+ * No need to raise an exception here, as rfi is always the last
+ * insn of a TB
+ */
+ cpu_interrupt_exittb(env_cpu(env));
+ /* Reset the reservation */
+ env->reserve_addr = -1;
+
+ /* Context synchronizing: check if TCG TLB needs flush */
+ check_tlb_flush(env, false);
+}
+
+void helper_rfi(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
+}
+
+#ifdef TARGET_PPC64
+void helper_rfid(CPUPPCState *env)
+{
+ /*
+ * The architecture defines a number of rules for which bits can
+ * change but in practice, we handle this in hreg_store_msr()
+ * which will be called by do_rfi(), so there is no need to filter
+ * here
+ */
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
+}
+
+void helper_rfscv(CPUPPCState *env)
+{
+ do_rfi(env, env->lr, env->ctr);
+}
+
+void helper_hrfid(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+}
+
+void helper_rfebb(CPUPPCState *env, target_ulong s)
+{
+ target_ulong msr = env->msr;
+
+ /*
+ * Handling of BESCR bits 32:33 according to PowerISA v3.1:
+ *
+ * "If BESCR 32:33 != 0b00 the instruction is treated as if
+ * the instruction form were invalid."
+ */
+ if (env->spr[SPR_BESCR] & BESCR_INVALID) {
+ raise_exception_err(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ }
+
+ env->nip = env->spr[SPR_EBBRR];
+
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ env->nip = (uint32_t)env->spr[SPR_EBBRR];
+ }
+
+ if (s) {
+ env->spr[SPR_BESCR] |= BESCR_GE;
+ } else {
+ env->spr[SPR_BESCR] &= ~BESCR_GE;
+ }
+}
+
+/*
+ * Triggers or queues an 'ebb_excp' EBB exception. All checks
+ * but FSCR, HFSCR and msr_pr must be done beforehand.
+ *
+ * PowerISA v3.1 isn't clear about whether an EBB should be
+ * postponed or cancelled if the EBB facility is unavailable.
+ * Our assumption here is that the EBB is cancelled if both
+ * FSCR and HFSCR EBB facilities aren't available.
+ */
+static void do_ebb(CPUPPCState *env, int ebb_excp)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ /*
+ * FSCR_EBB and FSCR_IC_EBB are the same bits used with
+ * HFSCR.
+ */
+ helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
+ helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
+
+ if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_PMEO;
+ } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_EEO;
+ }
+
+ if (FIELD_EX64(env->msr, MSR, PR)) {
+ powerpc_excp(cpu, ebb_excp);
+ } else {
+ ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
+ }
+}
+
+void raise_ebb_perfm_exception(CPUPPCState *env)
+{
+ bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
+ env->spr[SPR_BESCR] & BESCR_PME &&
+ env->spr[SPR_BESCR] & BESCR_GE;
+
+ if (!perfm_ebb_enabled) {
+ return;
+ }
+
+ do_ebb(env, POWERPC_EXCP_PERFM_EBB);
+}
+#endif /* TARGET_PPC64 */
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+void helper_40x_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
+}
+
+void helper_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
+}
+
+void helper_rfdi(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
+}
+
+void helper_rfmci(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
+}
+
+/* Embedded.Processor Control */
+static int dbell2irq(target_ulong rb)
+{
+ int msg = rb & DBELL_TYPE_MASK;
+ int irq = -1;
+
+ switch (msg) {
+ case DBELL_TYPE_DBELL:
+ irq = PPC_INTERRUPT_DOORBELL;
+ break;
+ case DBELL_TYPE_DBELL_CRIT:
+ irq = PPC_INTERRUPT_CDOORBELL;
+ break;
+ case DBELL_TYPE_G_DBELL:
+ case DBELL_TYPE_G_DBELL_CRIT:
+ case DBELL_TYPE_G_DBELL_MC:
+ /* XXX implement */
+ default:
+ break;
+ }
+
+ return irq;
+}
+
+void helper_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+
+ if (irq < 0) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), irq, 0);
+}
+
+void helper_msgsnd(target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+ int pir = rb & DBELL_PIRTAG_MASK;
+ CPUState *cs;
+
+ if (irq < 0) {
+ return;
+ }
+
+ bql_lock();
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+
+ if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
+ ppc_set_irq(cpu, irq, 1);
+ }
+ }
+ bql_unlock();
+}
+
+/* Server Processor Control */
+
+static bool dbell_type_server(target_ulong rb)
+{
+ /*
+ * A Directed Hypervisor Doorbell message is sent only if the
+ * message type is 5. All other types are reserved and the
+ * instruction is a no-op
+ */
+ return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
+}
+
+static inline bool dbell_bcast_core(target_ulong rb)
+{
+ return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
+}
+
+static inline bool dbell_bcast_subproc(target_ulong rb)
+{
+ return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
+}
+
+/*
+ * Send an interrupt to a thread in the same core as env).
+ */
+static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+
+ if (ppc_cpu_lpar_single_threaded(cs)) {
+ if (target_tir == 0) {
+ ppc_set_irq(cpu, irq, 1);
+ }
+ } else {
+ CPUState *ccs;
+
+ /* Does iothread need to be locked for walking CPU list? */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ if (target_tir == ppc_cpu_tir(ccpu)) {
+ ppc_set_irq(ccpu, irq, 1);
+ break;
+ }
+ }
+ bql_unlock();
+ }
+}
+
+void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
+}
+
+void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
+{
+ int pir = rb & DBELL_PROCIDTAG_MASK;
+ bool brdcast = false;
+ CPUState *cs, *ccs;
+ PowerPCCPU *cpu;
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
+ if (!(env->insns_flags2 & PPC2_ISA300)) {
+ msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
+ return;
+ }
+
+ /* POWER9 and later msgsnd is a global (targets any thread) */
+ cpu = ppc_get_vcpu_by_pir(pir);
+ if (!cpu) {
+ return;
+ }
+ cs = CPU(cpu);
+
+ if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
+ (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
+ brdcast = true;
+ }
+
+ if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
+ return;
+ }
+
+ /*
+ * Why is bql needed for walking CPU list? Answer seems to be because ppc
+ * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
+ * so could this be removed?
+ */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
+ }
+ bql_unlock();
+}
+
+#ifdef TARGET_PPC64
+void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
+{
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
+}
+
+/*
+ * sends a message to another thread on the same
+ * multi-threaded processor
+ */
+void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
+{
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
+}
+#endif /* TARGET_PPC64 */
+
+/* Single-step tracing */
+void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
+{
+ uint32_t error_code = 0;
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
+ env->spr[SPR_POWER_SIAR] = prev_ip;
+ error_code = PPC_BIT(33);
+ }
+ raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 80638ab..a52cbc8 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -637,6 +637,18 @@ void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn)
translator_io_start(&ctx->base);
gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]);
}
+
+void spr_write_dawr1(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_dawr1(tcg_env, cpu_gpr[gprn]);
+}
+
+void spr_write_dawrx1(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_dawrx1(tcg_env, cpu_gpr[gprn]);
+}
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
/* CTR */
@@ -1326,6 +1338,22 @@ void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
translator_io_start(&ctx->base);
gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]);
}
+
+void spr_read_pmsr(DisasContext *ctx, int gprn, int sprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_load_pmsr(cpu_gpr[gprn], tcg_env);
+}
+
+void spr_write_pmcr(DisasContext *ctx, int sprn, int gprn)
+{
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+ translator_io_start(&ctx->base);
+ gen_helper_store_pmcr(tcg_env, cpu_gpr[gprn]);
+}
+
#endif /* !defined(CONFIG_USER_ONLY) */
void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
diff --git a/tests/functional/meson.build b/tests/functional/meson.build
index e78560a..74f8414 100644
--- a/tests/functional/meson.build
+++ b/tests/functional/meson.build
@@ -207,7 +207,6 @@ tests_ppc_system_quick = [
]
tests_ppc_system_thorough = [
- 'ppc_405',
'ppc_40p',
'ppc_amiga',
'ppc_bamboo',
diff --git a/tests/functional/test_ppc_405.py b/tests/functional/test_ppc_405.py
deleted file mode 100755
index 9851c03..0000000
--- a/tests/functional/test_ppc_405.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python3
-#
-# Test that the U-Boot firmware boots on ppc 405 machines and check the console
-#
-# Copyright (c) 2021 Red Hat, Inc.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from qemu_test import QemuSystemTest, Asset
-from qemu_test import wait_for_console_pattern
-from qemu_test import exec_command_and_wait_for_pattern
-
-class Ppc405Machine(QemuSystemTest):
-
- timeout = 90
-
- ASSET_UBOOT = Asset(
- ('https://gitlab.com/huth/u-boot/-/raw/taihu-2021-10-09/'
- 'u-boot-taihu.bin'),
- 'a076bb6cdeaafa406330e51e074b66d8878d9036d67d4caa0137be03ee4c112c')
-
- def do_test_ppc405(self):
- file_path = self.ASSET_UBOOT.fetch()
- self.vm.set_console(console_index=1)
- self.vm.add_args('-bios', file_path)
- self.vm.launch()
- wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board')
- exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP')
-
- def test_ppc_ref405ep(self):
- self.require_accelerator("tcg")
- self.set_machine('ref405ep')
- self.do_test_ppc405()
-
-if __name__ == '__main__':
- QemuSystemTest.main()
diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out
index 0a9da3e..5773f1d 100644
--- a/tests/qapi-schema/doc-good.out
+++ b/tests/qapi-schema/doc-good.out
@@ -113,7 +113,7 @@ The _one_ {and only}, description on the same line
Also _one_ {and only}
feature=enum-member-feat
a member feature
- section=None
+ section=Plain
@two is undocumented
doc symbol=Base
body=
@@ -171,15 +171,15 @@ description starts on the same line
a feature
feature=cmd-feat2
another feature
- section=None
+ section=Plain
.. note:: @arg3 is undocumented
section=Returns
@Object
section=Errors
some
- section=TODO
+ section=Todo
frobnicate
- section=None
+ section=Plain
.. admonition:: Notes
- Lorem ipsum dolor sit amet
@@ -212,7 +212,7 @@ If you're bored enough to read this, go see a video of boxed cats
a feature
feature=cmd-feat2
another feature
- section=None
+ section=Plain
.. qmp-example::
-> "this example"
diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py
index 8fe951c..4be9302 100755
--- a/tests/qapi-schema/test-qapi.py
+++ b/tests/qapi-schema/test-qapi.py
@@ -122,7 +122,7 @@ def test_frontend(fname):
for feat, section in doc.features.items():
print(' feature=%s\n%s' % (feat, section.text))
for section in doc.sections:
- print(' section=%s\n%s' % (section.tag, section.text))
+ print(' section=%s\n%s' % (section.kind, section.text))
def open_test_result(dir_name, file_name, update):
diff --git a/tests/qtest/m48t59-test.c b/tests/qtest/m48t59-test.c
index 605797a..1e39a0e 100644
--- a/tests/qtest/m48t59-test.c
+++ b/tests/qtest/m48t59-test.c
@@ -247,11 +247,6 @@ static void base_setup(void)
base_year = 1968;
base_machine = "SS-5";
use_mmio = true;
- } else if (g_str_equal(arch, "ppc") || g_str_equal(arch, "ppc64")) {
- base = 0xF0000000;
- base_year = 1968;
- base_machine = "ref405ep";
- use_mmio = true;
} else {
g_assert_not_reached();
}
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 9e5380b..5a8c1f1 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -171,7 +171,6 @@ qtests_mips64el = qtests_mips
qtests_ppc = \
qtests_filter + \
(config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \
- (config_all_devices.has_key('CONFIG_M48T59') ? ['m48t59-test'] : []) + \
(config_all_accel.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \
(config_all_accel.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \
['boot-order-test']
@@ -370,7 +369,8 @@ qtests = {
'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'],
'migration-test': migration_files + migration_tls_files,
'pxe-test': files('boot-sector.c'),
- 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c'),
+ 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c',
+ 'pnv-xive2-nvpg_bar.c'),
'qos-test': [chardev, io, qos_test_ss.apply({}).sources()],
'tpm-crb-swtpm-test': [io, tpmemu_files],
'tpm-crb-test': [io, tpmemu_files],
diff --git a/tests/qtest/pnv-spi-seeprom-test.c b/tests/qtest/pnv-spi-seeprom-test.c
index 57f20af..600493c 100644
--- a/tests/qtest/pnv-spi-seeprom-test.c
+++ b/tests/qtest/pnv-spi-seeprom-test.c
@@ -92,7 +92,7 @@ static void test_spi_seeprom(const void *data)
qts = qtest_initf("-machine powernv10 -smp 2,cores=2,"
"threads=1 -accel tcg,thread=single -nographic "
"-blockdev node-name=pib_spic2,driver=file,"
- "filename=%s -device 25csm04,bus=pnv-spi-bus.2,cs=0,"
+ "filename=%s -device 25csm04,bus=chip0.spi.2,cs=0,"
"drive=pib_spic2", tmp_path);
spi_seeprom_transaction(qts, chip);
qtest_quit(qts);
diff --git a/tests/qtest/pnv-xive2-common.h b/tests/qtest/pnv-xive2-common.h
index 9ae3477..2077c05 100644
--- a/tests/qtest/pnv-xive2-common.h
+++ b/tests/qtest/pnv-xive2-common.h
@@ -107,5 +107,6 @@ extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index,
void test_flush_sync_inject(QTestState *qts);
+void test_nvpg_bar(QTestState *qts);
#endif /* TEST_PNV_XIVE2_COMMON_H */
diff --git a/tests/qtest/pnv-xive2-flush-sync.c b/tests/qtest/pnv-xive2-flush-sync.c
index 3b32446..142826b 100644
--- a/tests/qtest/pnv-xive2-flush-sync.c
+++ b/tests/qtest/pnv-xive2-flush-sync.c
@@ -178,14 +178,14 @@ void test_flush_sync_inject(QTestState *qts)
int test_nr;
uint8_t byte;
- printf("# ============================================================\n");
- printf("# Starting cache flush/queue sync injection tests...\n");
+ g_test_message("=========================================================");
+ g_test_message("Starting cache flush/queue sync injection tests...");
for (test_nr = 0; test_nr < sizeof(xive_inject_tests);
test_nr++) {
int op_type = xive_inject_tests[test_nr];
- printf("# Running test %d\n", test_nr);
+ g_test_message("Running test %d", test_nr);
/* start with status byte set to 0 */
clr_sync(qts, src_pir, ic_topo_id, op_type);
diff --git a/tests/qtest/pnv-xive2-nvpg_bar.c b/tests/qtest/pnv-xive2-nvpg_bar.c
new file mode 100644
index 0000000..6ac8d36
--- /dev/null
+++ b/tests/qtest/pnv-xive2-nvpg_bar.c
@@ -0,0 +1,152 @@
+/*
+ * QTest testcase for PowerNV 10 interrupt controller (xive2)
+ * - Test NVPG BAR MMIO operations
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "libqtest.h"
+
+#include "pnv-xive2-common.h"
+
+#define NVPG_BACKLOG_OP_SHIFT 10
+#define NVPG_BACKLOG_PRIO_SHIFT 4
+
+#define XIVE_PRIORITY_MAX 7
+
+enum NVx {
+ NVP,
+ NVG,
+ NVC
+};
+
+typedef enum {
+ INCR_STORE = 0b100,
+ INCR_LOAD = 0b000,
+ DECR_STORE = 0b101,
+ DECR_LOAD = 0b001,
+ READ_x = 0b010,
+ READ_y = 0b011,
+} backlog_op;
+
+static uint32_t nvpg_backlog_op(QTestState *qts, backlog_op op,
+ enum NVx type, uint64_t index,
+ uint8_t priority, uint8_t delta)
+{
+ uint64_t addr, offset;
+ uint32_t count = 0;
+
+ switch (type) {
+ case NVP:
+ addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1));
+ break;
+ case NVG:
+ addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)) +
+ (1 << XIVE_PAGE_SHIFT);
+ break;
+ case NVC:
+ addr = XIVE_NVC_ADDR + (index << XIVE_PAGE_SHIFT);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ offset = (op & 0b11) << NVPG_BACKLOG_OP_SHIFT;
+ offset |= priority << NVPG_BACKLOG_PRIO_SHIFT;
+ if (op >> 2) {
+ qtest_writeb(qts, addr + offset, delta);
+ } else {
+ count = qtest_readw(qts, addr + offset);
+ }
+ return count;
+}
+
+void test_nvpg_bar(QTestState *qts)
+{
+ uint32_t nvp_target = 0x11;
+ uint32_t group_target = 0x17; /* size 16 */
+ uint32_t vp_irq = 33, group_irq = 47;
+ uint32_t vp_end = 3, group_end = 97;
+ uint32_t vp_irq_data = 0x33333333;
+ uint32_t group_irq_data = 0x66666666;
+ uint8_t vp_priority = 0, group_priority = 5;
+ uint32_t vp_count[XIVE_PRIORITY_MAX + 1] = { 0 };
+ uint32_t group_count[XIVE_PRIORITY_MAX + 1] = { 0 };
+ uint32_t count, delta;
+ uint8_t i;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing NVPG BAR operations");
+
+ set_nvg(qts, group_target, 0);
+ set_nvp(qts, nvp_target, 0x04);
+ set_nvp(qts, group_target, 0x04);
+
+ /*
+ * Setup: trigger a VP-specific interrupt and a group interrupt
+ * so that the backlog counters are initialized to something else
+ * than 0 for at least one priority level
+ */
+ set_eas(qts, vp_irq, vp_end, vp_irq_data);
+ set_end(qts, vp_end, nvp_target, vp_priority, false /* group */);
+
+ set_eas(qts, group_irq, group_end, group_irq_data);
+ set_end(qts, group_end, group_target, group_priority, true /* group */);
+
+ get_esb(qts, vp_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, vp_irq, XIVE_TRIGGER_PAGE, 0, 0);
+ vp_count[vp_priority]++;
+
+ get_esb(qts, group_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, group_irq, XIVE_TRIGGER_PAGE, 0, 0);
+ group_count[group_priority]++;
+
+ /* check the initial counters */
+ for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
+ count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, i, 0);
+ g_assert_cmpuint(count, ==, vp_count[i]);
+
+ count = nvpg_backlog_op(qts, READ_y, NVG, group_target, i, 0);
+ g_assert_cmpuint(count, ==, group_count[i]);
+ }
+
+ /* do a few ops on the VP. Counter can only be 0 and 1 */
+ vp_priority = 2;
+ delta = 7;
+ nvpg_backlog_op(qts, INCR_STORE, NVP, nvp_target, vp_priority, delta);
+ vp_count[vp_priority] = 1;
+ count = nvpg_backlog_op(qts, INCR_LOAD, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+ count = nvpg_backlog_op(qts, READ_y, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+
+ count = nvpg_backlog_op(qts, DECR_LOAD, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+ vp_count[vp_priority] = 0;
+ nvpg_backlog_op(qts, DECR_STORE, NVP, nvp_target, vp_priority, delta);
+ count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+
+ /* do a few ops on the group */
+ group_priority = 2;
+ delta = 9;
+ /* can't go negative */
+ nvpg_backlog_op(qts, DECR_STORE, NVG, group_target, group_priority, delta);
+ count = nvpg_backlog_op(qts, READ_y, NVG, group_target, group_priority, 0);
+ g_assert_cmpuint(count, ==, 0);
+ nvpg_backlog_op(qts, INCR_STORE, NVG, group_target, group_priority, delta);
+ group_count[group_priority] += delta;
+ count = nvpg_backlog_op(qts, INCR_LOAD, NVG, group_target,
+ group_priority, delta);
+ g_assert_cmpuint(count, ==, group_count[group_priority]);
+ group_count[group_priority]++;
+
+ count = nvpg_backlog_op(qts, DECR_LOAD, NVG, group_target,
+ group_priority, delta);
+ g_assert_cmpuint(count, ==, group_count[group_priority]);
+ group_count[group_priority]--;
+ count = nvpg_backlog_op(qts, READ_x, NVG, group_target, group_priority, 0);
+ g_assert_cmpuint(count, ==, group_count[group_priority]);
+}
diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c
index dd19e88..5313d4e 100644
--- a/tests/qtest/pnv-xive2-test.c
+++ b/tests/qtest/pnv-xive2-test.c
@@ -2,6 +2,9 @@
* QTest testcase for PowerNV 10 interrupt controller (xive2)
* - Test irq to hardware thread
* - Test 'Pull Thread Context to Odd Thread Reporting Line'
+ * - Test irq to hardware group
+ * - Test irq to hardware group going through backlog
+ * - Test irq to pool thread
*
* Copyright (c) 2024, IBM Corporation.
*
@@ -218,8 +221,8 @@ static void test_hw_irq(QTestState *qts)
uint16_t reg16;
uint8_t pq, nsr, cppr;
- printf("# ============================================================\n");
- printf("# Testing irq %d to hardware thread %d\n", irq, target_pir);
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to hardware thread %d", irq, target_pir);
/* irq config */
set_eas(qts, irq, end_index, irq_data);
@@ -264,6 +267,79 @@ static void test_hw_irq(QTestState *qts)
g_assert_cmphex(cppr, ==, 0xFF);
}
+static void test_pool_irq(QTestState *qts)
+{
+ uint32_t irq = 2;
+ uint32_t irq_data = 0x600d0d06;
+ uint32_t end_index = 5;
+ uint32_t target_pir = 1;
+ uint32_t target_nvp = 0x100 + target_pir;
+ uint8_t priority = 5;
+ uint32_t reg32;
+ uint16_t reg16;
+ uint8_t pq, nsr, cppr, ipb;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to pool thread %d", irq, target_pir);
+
+ /* irq config */
+ set_eas(qts, irq, end_index, irq_data);
+ set_end(qts, end_index, target_nvp, priority, false /* group */);
+
+ /* enable and trigger irq */
+ get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
+
+ /* check irq is raised on cpu */
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
+
+ /* check TIMA values in the PHYS ring (shared by POOL ring) */
+ reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x40);
+ g_assert_cmphex(cppr, ==, 0xFF);
+
+ /* check TIMA values in the POOL ring */
+ reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ ipb = (reg32 >> 8) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0);
+ g_assert_cmphex(cppr, ==, 0);
+ g_assert_cmphex(ipb, ==, 0x80 >> priority);
+
+ /* ack the irq */
+ reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG);
+ nsr = reg16 >> 8;
+ cppr = reg16 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x40);
+ g_assert_cmphex(cppr, ==, priority);
+
+ /* check irq data is what was configured */
+ reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
+ g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
+
+ /* check IPB is cleared in the POOL ring */
+ reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0);
+ ipb = (reg32 >> 8) & 0xFF;
+ g_assert_cmphex(ipb, ==, 0);
+
+ /* End Of Interrupt */
+ set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
+
+ /* reset CPPR */
+ set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
+ reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x00);
+ g_assert_cmphex(cppr, ==, 0xFF);
+}
+
#define XIVE_ODD_CL 0x80
static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts)
{
@@ -276,8 +352,9 @@ static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts)
uint32_t cl_word;
uint32_t word2;
- printf("# ============================================================\n");
- printf("# Testing 'Pull Thread Context to Odd Thread Reporting Line'\n");
+ g_test_message("=========================================================");
+ g_test_message("Testing 'Pull Thread Context to Odd Thread Reporting " \
+ "Line'");
/* clear odd cache line prior to pull operation */
memset(cl_pair, 0, sizeof(cl_pair));
@@ -315,6 +392,158 @@ static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts)
word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2);
g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0);
}
+
+static void test_hw_group_irq(QTestState *qts)
+{
+ uint32_t irq = 100;
+ uint32_t irq_data = 0xdeadbeef;
+ uint32_t end_index = 23;
+ uint32_t chosen_one;
+ uint32_t target_nvp = 0x81; /* group size = 4 */
+ uint8_t priority = 6;
+ uint32_t reg32;
+ uint16_t reg16;
+ uint8_t pq, nsr, cppr;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to hardware group of size 4", irq);
+
+ /* irq config */
+ set_eas(qts, irq, end_index, irq_data);
+ set_end(qts, end_index, target_nvp, priority, true /* group */);
+
+ /* enable and trigger irq */
+ get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
+
+ /* check irq is raised on cpu */
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
+
+ /* find the targeted vCPU */
+ for (chosen_one = 0; chosen_one < SMT; chosen_one++) {
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ if (nsr == 0x82) {
+ break;
+ }
+ }
+ g_assert_cmphex(chosen_one, <, SMT);
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, 0xFF);
+
+ /* ack the irq */
+ reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG);
+ nsr = reg16 >> 8;
+ cppr = reg16 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, priority);
+
+ /* check irq data is what was configured */
+ reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
+ g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
+
+ /* End Of Interrupt */
+ set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
+
+ /* reset CPPR */
+ set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x00);
+ g_assert_cmphex(cppr, ==, 0xFF);
+}
+
+static void test_hw_group_irq_backlog(QTestState *qts)
+{
+ uint32_t irq = 31;
+ uint32_t irq_data = 0x01234567;
+ uint32_t end_index = 129;
+ uint32_t target_nvp = 0x81; /* group size = 4 */
+ uint32_t chosen_one = 3;
+ uint8_t blocking_priority, priority = 3;
+ uint32_t reg32;
+ uint16_t reg16;
+ uint8_t pq, nsr, cppr, lsmfb, i;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to hardware group of size 4 going " \
+ "through backlog",
+ irq);
+
+ /*
+ * set current priority of all threads in the group to something
+ * higher than what we're about to trigger
+ */
+ blocking_priority = priority - 1;
+ for (i = 0; i < SMT; i++) {
+ set_tima8(qts, i, TM_QW3_HV_PHYS + TM_CPPR, blocking_priority);
+ }
+
+ /* irq config */
+ set_eas(qts, irq, end_index, irq_data);
+ set_end(qts, end_index, target_nvp, priority, true /* group */);
+
+ /* enable and trigger irq */
+ get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
+
+ /* check irq is raised on cpu */
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
+
+ /* check no interrupt is pending on the 2 possible targets */
+ for (i = 0; i < SMT; i++) {
+ reg32 = get_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ lsmfb = reg32 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x0);
+ g_assert_cmphex(cppr, ==, blocking_priority);
+ g_assert_cmphex(lsmfb, ==, priority);
+ }
+
+ /* lower priority of one thread */
+ set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, priority + 1);
+
+ /* check backlogged interrupt is presented */
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, priority + 1);
+
+ /* ack the irq */
+ reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG);
+ nsr = reg16 >> 8;
+ cppr = reg16 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, priority);
+
+ /* check irq data is what was configured */
+ reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
+ g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
+
+ /* End Of Interrupt */
+ set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
+
+ /* reset CPPR */
+ set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ lsmfb = reg32 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x00);
+ g_assert_cmphex(cppr, ==, 0xFF);
+ g_assert_cmphex(lsmfb, ==, 0xFF);
+}
+
static void test_xive(void)
{
QTestState *qts;
@@ -331,8 +560,20 @@ static void test_xive(void)
test_pull_thread_ctx_to_odd_thread_cl(qts);
reset_state(qts);
+ test_pool_irq(qts);
+
+ reset_state(qts);
+ test_hw_group_irq(qts);
+
+ reset_state(qts);
+ test_hw_group_irq_backlog(qts);
+
+ reset_state(qts);
test_flush_sync_inject(qts);
+ reset_state(qts);
+ test_nvpg_bar(qts);
+
qtest_quit(qts);
}
diff --git a/trace/control-target.c b/trace/control-target.c
index d58e84f..57ceac2 100644
--- a/trace/control-target.c
+++ b/trace/control-target.c
@@ -8,8 +8,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu/lockable.h"
-#include "cpu.h"
#include "trace/control.h"
diff --git a/trace/meson.build b/trace/meson.build
index c3412dc..3df4549 100644
--- a/trace/meson.build
+++ b/trace/meson.build
@@ -1,6 +1,4 @@
-system_ss.add(files('trace-hmp-cmds.c'))
-
-specific_ss.add(files('control-target.c'))
+system_ss.add(files('control-target.c', 'trace-hmp-cmds.c'))
trace_events_files = []
foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events