diff options
Diffstat (limited to 'rust/qemu-api')
29 files changed, 6619 insertions, 0 deletions
diff --git a/rust/qemu-api/.gitignore b/rust/qemu-api/.gitignore new file mode 100644 index 0000000..df6c216 --- /dev/null +++ b/rust/qemu-api/.gitignore @@ -0,0 +1,2 @@ +# Ignore generated bindings file overrides. +/src/bindings.inc.rs diff --git a/rust/qemu-api/Cargo.toml b/rust/qemu-api/Cargo.toml new file mode 100644 index 0000000..db7000d --- /dev/null +++ b/rust/qemu-api/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "qemu_api" +version = "0.1.0" +authors = ["Manos Pitsidianakis <manos.pitsidianakis@linaro.org>"] +description = "Rust bindings for QEMU" +readme = "README.md" +resolver = "2" +publish = false + +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +qemu_api_macros = { path = "../qemu-api-macros" } +anyhow = "~1.0" +libc = "0.2.162" +foreign = "~0.3.1" + +[features] +default = ["debug_cell"] +allocator = [] +debug_cell = [] + +[lints] +workspace = true diff --git a/rust/qemu-api/README.md b/rust/qemu-api/README.md new file mode 100644 index 0000000..ed1b7ab --- /dev/null +++ b/rust/qemu-api/README.md @@ -0,0 +1,19 @@ +# QEMU bindings and API wrappers + +This library exports helper Rust types, Rust macros and C FFI bindings for internal QEMU APIs. + +The C bindings can be generated with `bindgen`, using this build target: + +```console +$ make bindings.inc.rs +``` + +## Generate Rust documentation + +Common Cargo tasks can be performed from the QEMU build directory + +```console +$ make clippy +$ make rustfmt +$ make rustdoc +``` diff --git a/rust/qemu-api/build.rs b/rust/qemu-api/build.rs new file mode 100644 index 0000000..7849486 --- /dev/null +++ b/rust/qemu-api/build.rs @@ -0,0 +1,41 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +// SPDX-License-Identifier: GPL-2.0-or-later + +#[cfg(unix)] +use std::os::unix::fs::symlink as symlink_file; +#[cfg(windows)] +use std::os::windows::fs::symlink_file; +use std::{env, fs::remove_file, io::Result, path::Path}; + +fn main() -> Result<()> { + // Placing bindings.inc.rs in the source directory is supported + // but not documented or encouraged. + let path = env::var("MESON_BUILD_ROOT") + .unwrap_or_else(|_| format!("{}/src", env!("CARGO_MANIFEST_DIR"))); + + let file = format!("{path}/rust/qemu-api/bindings.inc.rs"); + let file = Path::new(&file); + if !Path::new(&file).exists() { + panic!(concat!( + "\n", + " No generated C bindings found! Maybe you wanted one of\n", + " `make clippy`, `make rustfmt`, `make rustdoc`?\n", + "\n", + " For other uses of `cargo`, start a subshell with\n", + " `pyvenv/bin/meson devenv`, or point MESON_BUILD_ROOT to\n", + " the top of the build tree." + )); + } + + let out_dir = env::var("OUT_DIR").unwrap(); + let dest_path = format!("{out_dir}/bindings.inc.rs"); + let dest_path = Path::new(&dest_path); + if dest_path.symlink_metadata().is_ok() { + remove_file(dest_path)?; + } + symlink_file(file, dest_path)?; + + println!("cargo:rerun-if-changed=build.rs"); + Ok(()) +} diff --git a/rust/qemu-api/meson.build b/rust/qemu-api/meson.build new file mode 100644 index 0000000..a090297 --- /dev/null +++ b/rust/qemu-api/meson.build @@ -0,0 +1,114 @@ +_qemu_api_cfg = run_command(rustc_args, + '--config-headers', config_host_h, '--features', files('Cargo.toml'), + capture: true, check: true).stdout().strip().splitlines() + +# _qemu_api_cfg += ['--cfg', 'feature="allocator"'] +if get_option('debug_mutex') + _qemu_api_cfg += ['--cfg', 'feature="debug_cell"'] +endif + +c_enums = [ + 'DeviceCategory', + 'GpioPolarity', + 'MachineInitPhase', + 'MemoryDeviceInfoKind', + 'MigrationPolicy', + 'MigrationPriority', + 'QEMUChrEvent', + 'QEMUClockType', + 'ResetType', + 'device_endian', + 'module_init_type', +] +_qemu_api_bindgen_args = [] +foreach enum : c_enums + _qemu_api_bindgen_args += ['--rustified-enum', enum] +endforeach +c_bitfields = [ + 'ClockEvent', + 'VMStateFlags', +] +foreach enum : c_bitfields + _qemu_api_bindgen_args += ['--bitfield-enum', enum] +endforeach + +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_qemu_api_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common + _qemu_api_bindgen_args, + ) + +_qemu_api_rs = static_library( + 'qemu_api', + structured_sources( + [ + 'src/lib.rs', + 'src/assertions.rs', + 'src/bindings.rs', + 'src/bitops.rs', + 'src/callbacks.rs', + 'src/cell.rs', + 'src/chardev.rs', + 'src/errno.rs', + 'src/error.rs', + 'src/irq.rs', + 'src/log.rs', + 'src/memory.rs', + 'src/module.rs', + 'src/prelude.rs', + 'src/qdev.rs', + 'src/qom.rs', + 'src/sysbus.rs', + 'src/timer.rs', + 'src/uninit.rs', + 'src/vmstate.rs', + 'src/zeroable.rs', + ], + {'.' : _qemu_api_bindings_inc_rs}, + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: _qemu_api_cfg, + dependencies: [anyhow_rs, foreign_rs, libc_rs, qemu_api_macros, qemuutil_rs, + qom, hwcore, chardev, migration], +) + +rust.test('rust-qemu-api-tests', _qemu_api_rs, + suite: ['unit', 'rust']) + +qemu_api = declare_dependency(link_with: [_qemu_api_rs], + dependencies: [qemu_api_macros, qom, hwcore, chardev, migration]) + +# Doctests are essentially integration tests, so they need the same dependencies. +# Note that running them requires the object files for C code, so place them +# in a separate suite that is run by the "build" CI jobs rather than "check". +rust.doctest('rust-qemu-api-doctests', + _qemu_api_rs, + protocol: 'rust', + dependencies: qemu_api, + suite: ['doc', 'rust']) + +test('rust-qemu-api-integration', + executable( + 'rust-qemu-api-integration', + files('tests/tests.rs', 'tests/vmstate_tests.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_args: ['--test'], + install: false, + dependencies: [qemu_api]), + args: [ + '--test', '--test-threads', '1', + '--format', 'pretty', + ], + protocol: 'rust', + suite: ['unit', 'rust']) diff --git a/rust/qemu-api/src/assertions.rs b/rust/qemu-api/src/assertions.rs new file mode 100644 index 0000000..a2d38c8 --- /dev/null +++ b/rust/qemu-api/src/assertions.rs @@ -0,0 +1,152 @@ +// Copyright 2024, Red Hat Inc. +// Author(s): Paolo Bonzini <pbonzini@redhat.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +#![doc(hidden)] +//! This module provides macros to check the equality of types and +//! the type of `struct` fields. This can be useful to ensure that +//! types match the expectations of C code. +//! +//! Documentation is hidden because it only exposes macros, which +//! are exported directly from `qemu_api`. + +// Based on https://stackoverflow.com/questions/64251852/x/70978292#70978292 +// (stackoverflow answers are released under MIT license). + +#[doc(hidden)] +pub trait EqType { + type Itself; +} + +impl<T> EqType for T { + type Itself = T; +} + +/// Assert that two types are the same. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::assert_same_type; +/// # use std::ops::Deref; +/// assert_same_type!(u32, u32); +/// assert_same_type!(<Box<u32> as Deref>::Target, u32); +/// ``` +/// +/// Different types will cause a compile failure +/// +/// ```compile_fail +/// # use qemu_api::assert_same_type; +/// assert_same_type!(&Box<u32>, &u32); +/// ``` +#[macro_export] +macro_rules! assert_same_type { + ($t1:ty, $t2:ty) => { + const _: () = { + #[allow(unused)] + fn assert_same_type(v: $t1) { + fn types_must_be_equal<T, U>(_: T) + where + T: $crate::assertions::EqType<Itself = U>, + { + } + types_must_be_equal::<_, $t2>(v); + } + }; + }; +} + +/// Assert that a field of a struct has the given type. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::assert_field_type; +/// pub struct A { +/// field1: u32, +/// } +/// +/// assert_field_type!(A, field1, u32); +/// ``` +/// +/// Different types will cause a compile failure +/// +/// ```compile_fail +/// # use qemu_api::assert_field_type; +/// # pub struct A { field1: u32 } +/// assert_field_type!(A, field1, i32); +/// ``` +#[macro_export] +macro_rules! assert_field_type { + (@internal $param_name:ident, $ti:ty, $t:ty, $($field:tt)*) => { + const _: () = { + #[allow(unused)] + fn assert_field_type($param_name: &$t) { + fn types_must_be_equal<T, U>(_: &T) + where + T: $crate::assertions::EqType<Itself = U>, + { + } + types_must_be_equal::<_, $ti>(&$($field)*); + } + }; + }; + + ($t:ty, $i:tt, $ti:ty) => { + $crate::assert_field_type!(@internal v, $ti, $t, v.$i); + }; + + ($t:ty, $i:tt, $ti:ty, num = $num:ident) => { + $crate::assert_field_type!(@internal v, $ti, $t, v.$i[0]); + }; +} + +/// Assert that an expression matches a pattern. This can also be +/// useful to compare enums that do not implement `Eq`. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::assert_match; +/// // JoinHandle does not implement `Eq`, therefore the result +/// // does not either. +/// let result: Result<std::thread::JoinHandle<()>, u32> = Err(42); +/// assert_match!(result, Err(42)); +/// ``` +#[macro_export] +macro_rules! assert_match { + ($a:expr, $b:pat) => { + assert!( + match $a { + $b => true, + _ => false, + }, + "{} = {:?} does not match {}", + stringify!($a), + $a, + stringify!($b) + ); + }; +} + +/// Assert at compile time that an expression is true. This is similar +/// to `const { assert!(...); }` but it works outside functions, as well as +/// on versions of Rust before 1.79. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::static_assert; +/// static_assert!("abc".len() == 3); +/// ``` +/// +/// ```compile_fail +/// # use qemu_api::static_assert; +/// static_assert!("abc".len() == 2); // does not compile +/// ``` +#[macro_export] +macro_rules! static_assert { + ($x:expr) => { + const _: () = assert!($x); + }; +} diff --git a/rust/qemu-api/src/bindings.rs b/rust/qemu-api/src/bindings.rs new file mode 100644 index 0000000..057de4b --- /dev/null +++ b/rust/qemu-api/src/bindings.rs @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc +)] + +//! `bindgen`-generated declarations. + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); + +// SAFETY: these are implemented in C; the bindings need to assert that the +// BQL is taken, either directly or via `BqlCell` and `BqlRefCell`. +// When bindings for character devices are introduced, this can be +// moved to the Opaque<> wrapper in src/chardev.rs. +unsafe impl Send for CharBackend {} +unsafe impl Sync for CharBackend {} + +// SAFETY: this is a pure data struct +unsafe impl Send for CoalescedMemoryRange {} +unsafe impl Sync for CoalescedMemoryRange {} + +// SAFETY: these are constants and vtables; the Send and Sync requirements +// are deferred to the unsafe callbacks that they contain +unsafe impl Send for MemoryRegionOps {} +unsafe impl Sync for MemoryRegionOps {} + +unsafe impl Send for Property {} +unsafe impl Sync for Property {} + +unsafe impl Send for TypeInfo {} +unsafe impl Sync for TypeInfo {} + +unsafe impl Send for VMStateDescription {} +unsafe impl Sync for VMStateDescription {} + +unsafe impl Send for VMStateField {} +unsafe impl Sync for VMStateField {} + +unsafe impl Send for VMStateInfo {} +unsafe impl Sync for VMStateInfo {} diff --git a/rust/qemu-api/src/bitops.rs b/rust/qemu-api/src/bitops.rs new file mode 100644 index 0000000..b1e3a53 --- /dev/null +++ b/rust/qemu-api/src/bitops.rs @@ -0,0 +1,119 @@ +// Copyright (C) 2024 Intel Corporation. +// Author(s): Zhao Liu <zhao1.liu@intel.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! This module provides bit operation extensions to integer types. +//! It is usually included via the `qemu_api` prelude. + +use std::ops::{ + Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, + Mul, MulAssign, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, +}; + +/// Trait for extensions to integer types +pub trait IntegerExt: + Add<Self, Output = Self> + AddAssign<Self> + + BitAnd<Self, Output = Self> + BitAndAssign<Self> + + BitOr<Self, Output = Self> + BitOrAssign<Self> + + BitXor<Self, Output = Self> + BitXorAssign<Self> + + Copy + + Div<Self, Output = Self> + DivAssign<Self> + + Eq + + Mul<Self, Output = Self> + MulAssign<Self> + + Not<Output = Self> + Ord + PartialOrd + + Rem<Self, Output = Self> + RemAssign<Self> + + Shl<Self, Output = Self> + ShlAssign<Self> + + Shl<u32, Output = Self> + ShlAssign<u32> + // add more as needed + Shr<Self, Output = Self> + ShrAssign<Self> + + Shr<u32, Output = Self> + ShrAssign<u32> // add more as needed +{ + const BITS: u32; + const MAX: Self; + const MIN: Self; + const ONE: Self; + const ZERO: Self; + + #[inline] + #[must_use] + fn bit(start: u32) -> Self + { + debug_assert!(start < Self::BITS); + + Self::ONE << start + } + + #[inline] + #[must_use] + fn mask(start: u32, length: u32) -> Self + { + /* FIXME: Implement a more elegant check with error handling support? */ + debug_assert!(start < Self::BITS && length > 0 && length <= Self::BITS - start); + + (Self::MAX >> (Self::BITS - length)) << start + } + + #[inline] + #[must_use] + fn deposit<U: IntegerExt>(self, start: u32, length: u32, + fieldval: U) -> Self + where Self: From<U> + { + debug_assert!(length <= U::BITS); + + let mask = Self::mask(start, length); + (self & !mask) | ((Self::from(fieldval) << start) & mask) + } + + #[inline] + #[must_use] + fn extract(self, start: u32, length: u32) -> Self + { + let mask = Self::mask(start, length); + (self & mask) >> start + } +} + +macro_rules! impl_num_ext { + ($type:ty) => { + impl IntegerExt for $type { + const BITS: u32 = <$type>::BITS; + const MAX: Self = <$type>::MAX; + const MIN: Self = <$type>::MIN; + const ONE: Self = 1; + const ZERO: Self = 0; + } + }; +} + +impl_num_ext!(u8); +impl_num_ext!(u16); +impl_num_ext!(u32); +impl_num_ext!(u64); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_deposit() { + assert_eq!(15u32.deposit(8, 8, 1u32), 256 + 15); + assert_eq!(15u32.deposit(8, 1, 255u8), 256 + 15); + } + + #[test] + fn test_extract() { + assert_eq!(15u32.extract(2, 4), 3); + } + + #[test] + fn test_bit() { + assert_eq!(u8::bit(7), 128); + assert_eq!(u32::bit(16), 0x10000); + } + + #[test] + fn test_mask() { + assert_eq!(u8::mask(7, 1), 128); + assert_eq!(u32::mask(8, 8), 0xff00); + } +} diff --git a/rust/qemu-api/src/callbacks.rs b/rust/qemu-api/src/callbacks.rs new file mode 100644 index 0000000..9642a16 --- /dev/null +++ b/rust/qemu-api/src/callbacks.rs @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: MIT + +//! Utility functions to deal with callbacks from C to Rust. + +use std::{mem, ptr::NonNull}; + +/// Trait for functions (types implementing [`Fn`]) that can be used as +/// callbacks. These include both zero-capture closures and function pointers. +/// +/// In Rust, calling a function through the `Fn` trait normally requires a +/// `self` parameter, even though for zero-sized functions (including function +/// pointers) the type itself contains all necessary information to call the +/// function. This trait provides a `call` function that doesn't require `self`, +/// allowing zero-sized functions to be called using only their type. +/// +/// This enables zero-sized functions to be passed entirely through generic +/// parameters and resolved at compile-time. A typical use is a function +/// receiving an unused parameter of generic type `F` and calling it via +/// `F::call` or passing it to another function via `func::<F>`. +/// +/// QEMU uses this trick to create wrappers to C callbacks. The wrappers +/// are needed to convert an opaque `*mut c_void` into a Rust reference, +/// but they only have a single opaque that they can use. The `FnCall` +/// trait makes it possible to use that opaque for `self` or any other +/// reference: +/// +/// ```ignore +/// // The compiler creates a new `rust_bh_cb` wrapper for each function +/// // passed to `qemu_bh_schedule_oneshot` below. +/// unsafe extern "C" fn rust_bh_cb<T, F: for<'a> FnCall<(&'a T,)>>( +/// opaque: *mut c_void, +/// ) { +/// // SAFETY: the opaque was passed as a reference to `T`. +/// F::call((unsafe { &*(opaque.cast::<T>()) }, )) +/// } +/// +/// // The `_f` parameter is unused but it helps the compiler build the appropriate `F`. +/// // Using a reference allows usage in const context. +/// fn qemu_bh_schedule_oneshot<T, F: for<'a> FnCall<(&'a T,)>>(_f: &F, opaque: &T) { +/// let cb: unsafe extern "C" fn(*mut c_void) = rust_bh_cb::<T, F>; +/// unsafe { +/// bindings::qemu_bh_schedule_oneshot(cb, opaque as *const T as *const c_void as *mut c_void) +/// } +/// } +/// ``` +/// +/// Each wrapper is a separate instance of `rust_bh_cb` and is therefore +/// compiled to a separate function ("monomorphization"). If you wanted +/// to pass `self` as the opaque value, the generic parameters would be +/// `rust_bh_cb::<Self, F>`. +/// +/// `Args` is a tuple type whose types are the arguments of the function, +/// while `R` is the returned type. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::callbacks::FnCall; +/// fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { +/// F::call((s,)) +/// } +/// +/// let s: String = call_it(&str::to_owned, "hello world"); +/// assert_eq!(s, "hello world"); +/// ``` +/// +/// Note that the compiler will produce a different version of `call_it` for +/// each function that is passed to it. Therefore the argument is not really +/// used, except to decide what is `F` and what `F::call` does. +/// +/// Attempting to pass a non-zero-sized closure causes a compile-time failure: +/// +/// ```compile_fail +/// # use qemu_api::callbacks::FnCall; +/// # fn call_it<'a, F: FnCall<(&'a str,), String>>(_f: &F, s: &'a str) -> String { +/// # F::call((s,)) +/// # } +/// let x: &'static str = "goodbye world"; +/// call_it(&move |_| String::from(x), "hello workd"); +/// ``` +/// +/// `()` can be used to indicate "no function": +/// +/// ``` +/// # use qemu_api::callbacks::FnCall; +/// fn optional<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> Option<String> { +/// if F::IS_SOME { +/// Some(F::call((s,))) +/// } else { +/// None +/// } +/// } +/// +/// assert!(optional(&(), "hello world").is_none()); +/// ``` +/// +/// Invoking `F::call` will then be a run-time error. +/// +/// ```should_panic +/// # use qemu_api::callbacks::FnCall; +/// # fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { +/// # F::call((s,)) +/// # } +/// let s: String = call_it(&(), "hello world"); // panics +/// ``` +/// +/// # Safety +/// +/// Because `Self` is a zero-sized type, all instances of the type are +/// equivalent. However, in addition to this, `Self` must have no invariants +/// that could be violated by creating a reference to it. +/// +/// This is always true for zero-capture closures and function pointers, as long +/// as the code is able to name the function in the first place. +pub unsafe trait FnCall<Args, R = ()>: 'static + Sync + Sized { + /// Referring to this internal constant asserts that the `Self` type is + /// zero-sized. Can be replaced by an inline const expression in + /// Rust 1.79.0+. + const ASSERT_ZERO_SIZED: () = { assert!(mem::size_of::<Self>() == 0) }; + + /// Referring to this constant asserts that the `Self` type is an actual + /// function type, which can be used to catch incorrect use of `()` + /// at compile time. + /// + /// # Examples + /// + /// ```compile_fail + /// # use qemu_api::callbacks::FnCall; + /// fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { + /// let _: () = F::ASSERT_IS_SOME; + /// F::call((s,)) + /// } + /// + /// let s: String = call_it((), "hello world"); // does not compile + /// ``` + /// + /// Note that this can be more simply `const { assert!(F::IS_SOME) }` in + /// Rust 1.79.0 or newer. + const ASSERT_IS_SOME: () = { assert!(Self::IS_SOME) }; + + /// `true` if `Self` is an actual function type and not `()`. + /// + /// # Examples + /// + /// You can use `IS_SOME` to catch this at compile time: + /// + /// ```compile_fail + /// # use qemu_api::callbacks::FnCall; + /// fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { + /// const { assert!(F::IS_SOME) } + /// F::call((s,)) + /// } + /// + /// let s: String = call_it((), "hello world"); // does not compile + /// ``` + const IS_SOME: bool; + + /// `false` if `Self` is an actual function type, `true` if it is `()`. + fn is_none() -> bool { + !Self::IS_SOME + } + + /// `true` if `Self` is an actual function type, `false` if it is `()`. + fn is_some() -> bool { + Self::IS_SOME + } + + /// Call the function with the arguments in args. + fn call(a: Args) -> R; +} + +/// `()` acts as a "null" callback. Using `()` and `function` is nicer +/// than `None` and `Some(function)`, because the compiler is unable to +/// infer the type of just `None`. Therefore, the trait itself acts as the +/// option type, with functions [`FnCall::is_some`] and [`FnCall::is_none`]. +unsafe impl<Args, R> FnCall<Args, R> for () { + const IS_SOME: bool = false; + + /// Call the function with the arguments in args. + fn call(_a: Args) -> R { + panic!("callback not specified") + } +} + +macro_rules! impl_call { + ($($args:ident,)* ) => ( + // SAFETY: because each function is treated as a separate type, + // accessing `FnCall` is only possible in code that would be + // allowed to call the function. + unsafe impl<F, $($args,)* R> FnCall<($($args,)*), R> for F + where + F: 'static + Sync + Sized + Fn($($args, )*) -> R, + { + const IS_SOME: bool = true; + + #[inline(always)] + fn call(a: ($($args,)*)) -> R { + let _: () = Self::ASSERT_ZERO_SIZED; + + // SAFETY: the safety of this method is the condition for implementing + // `FnCall`. As to the `NonNull` idiom to create a zero-sized type, + // see https://github.com/rust-lang/libs-team/issues/292. + let f: &'static F = unsafe { &*NonNull::<Self>::dangling().as_ptr() }; + let ($($args,)*) = a; + f($($args,)*) + } + } + ) +} + +impl_call!(_1, _2, _3, _4, _5,); +impl_call!(_1, _2, _3, _4,); +impl_call!(_1, _2, _3,); +impl_call!(_1, _2,); +impl_call!(_1,); +impl_call!(); + +#[cfg(test)] +mod tests { + use super::*; + + // The `_f` parameter is unused but it helps the compiler infer `F`. + fn do_test_call<'a, F: FnCall<(&'a str,), String>>(_f: &F) -> String { + F::call(("hello world",)) + } + + #[test] + fn test_call() { + assert_eq!(do_test_call(&str::to_owned), "hello world") + } + + // The `_f` parameter is unused but it helps the compiler infer `F`. + fn do_test_is_some<'a, F: FnCall<(&'a str,), String>>(_f: &F) { + assert!(F::is_some()); + } + + #[test] + fn test_is_some() { + do_test_is_some(&str::to_owned); + } +} diff --git a/rust/qemu-api/src/cell.rs b/rust/qemu-api/src/cell.rs new file mode 100644 index 0000000..27063b0 --- /dev/null +++ b/rust/qemu-api/src/cell.rs @@ -0,0 +1,1101 @@ +// SPDX-License-Identifier: MIT +// +// This file is based on library/core/src/cell.rs from +// Rust 1.82.0. +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! QEMU-specific mutable containers +//! +//! Rust memory safety is based on this rule: Given an object `T`, it is only +//! possible to have one of the following: +//! +//! - Having several immutable references (`&T`) to the object (also known as +//! **aliasing**). +//! - Having one mutable reference (`&mut T`) to the object (also known as +//! **mutability**). +//! +//! This is enforced by the Rust compiler. However, there are situations where +//! this rule is not flexible enough. Sometimes it is required to have multiple +//! references to an object and yet mutate it. In particular, QEMU objects +//! usually have their pointer shared with the "outside world very early in +//! their lifetime", for example when they create their +//! [`MemoryRegion`s](crate::bindings::MemoryRegion). Therefore, individual +//! parts of a device must be made mutable in a controlled manner; this module +//! provides the tools to do so. +//! +//! ## Cell types +//! +//! [`BqlCell<T>`] and [`BqlRefCell<T>`] allow doing this via the Big QEMU Lock. +//! While they are essentially the same single-threaded primitives that are +//! available in `std::cell`, the BQL allows them to be used from a +//! multi-threaded context and to share references across threads, while +//! maintaining Rust's safety guarantees. For this reason, unlike +//! their `std::cell` counterparts, `BqlCell` and `BqlRefCell` implement the +//! `Sync` trait. +//! +//! BQL checks are performed in debug builds but can be optimized away in +//! release builds, providing runtime safety during development with no overhead +//! in production. +//! +//! The two provide different ways of handling interior mutability. +//! `BqlRefCell` is best suited for data that is primarily accessed by the +//! device's own methods, where multiple reads and writes can be grouped within +//! a single borrow and a mutable reference can be passed around. Instead, +//! [`BqlCell`] is a better choice when sharing small pieces of data with +//! external code (especially C code), because it provides simple get/set +//! operations that can be used one at a time. +//! +//! Warning: While `BqlCell` and `BqlRefCell` are similar to their `std::cell` +//! counterparts, they are not interchangeable. Using `std::cell` types in +//! QEMU device implementations is usually incorrect and can lead to +//! thread-safety issues. +//! +//! ### Example +//! +//! ``` +//! # use qemu_api::prelude::*; +//! # use qemu_api::{cell::BqlRefCell, irq::InterruptSource, irq::IRQState}; +//! # use qemu_api::{sysbus::SysBusDevice, qom::Owned, qom::ParentField}; +//! # const N_GPIOS: usize = 8; +//! # struct PL061Registers { /* ... */ } +//! # unsafe impl ObjectType for PL061State { +//! # type Class = <SysBusDevice as ObjectType>::Class; +//! # const TYPE_NAME: &'static std::ffi::CStr = c"pl061"; +//! # } +//! struct PL061State { +//! parent_obj: ParentField<SysBusDevice>, +//! +//! // Configuration is read-only after initialization +//! pullups: u32, +//! pulldowns: u32, +//! +//! // Single values shared with C code use BqlCell, in this case via InterruptSource +//! out: [InterruptSource; N_GPIOS], +//! interrupt: InterruptSource, +//! +//! // Larger state accessed by device methods uses BqlRefCell or Mutex +//! registers: BqlRefCell<PL061Registers>, +//! } +//! ``` +//! +//! ### `BqlCell<T>` +//! +//! [`BqlCell<T>`] implements interior mutability by moving values in and out of +//! the cell. That is, an `&mut T` to the inner value can never be obtained as +//! long as the cell is shared. The value itself cannot be directly obtained +//! without copying it, cloning it, or replacing it with something else. This +//! type provides the following methods, all of which can be called only while +//! the BQL is held: +//! +//! - For types that implement [`Copy`], the [`get`](BqlCell::get) method +//! retrieves the current interior value by duplicating it. +//! - For types that implement [`Default`], the [`take`](BqlCell::take) method +//! replaces the current interior value with [`Default::default()`] and +//! returns the replaced value. +//! - All types have: +//! - [`replace`](BqlCell::replace): replaces the current interior value and +//! returns the replaced value. +//! - [`set`](BqlCell::set): this method replaces the interior value, +//! dropping the replaced value. +//! +//! ### `BqlRefCell<T>` +//! +//! [`BqlRefCell<T>`] uses Rust's lifetimes to implement "dynamic borrowing", a +//! process whereby one can claim temporary, exclusive, mutable access to the +//! inner value: +//! +//! ```ignore +//! fn clear_interrupts(&self, val: u32) { +//! // A mutable borrow gives read-write access to the registers +//! let mut regs = self.registers.borrow_mut(); +//! let old = regs.interrupt_status(); +//! regs.update_interrupt_status(old & !val); +//! } +//! ``` +//! +//! Borrows for `BqlRefCell<T>`s are tracked at _runtime_, unlike Rust's native +//! reference types which are entirely tracked statically, at compile time. +//! Multiple immutable borrows are allowed via [`borrow`](BqlRefCell::borrow), +//! or a single mutable borrow via [`borrow_mut`](BqlRefCell::borrow_mut). The +//! thread will panic if these rules are violated or if the BQL is not held. +//! +//! ## Opaque wrappers +//! +//! The cell types from the previous section are useful at the boundaries +//! of code that requires interior mutability. When writing glue code that +//! interacts directly with C structs, however, it is useful to operate +//! at a lower level. +//! +//! C functions often violate Rust's fundamental assumptions about memory +//! safety by modifying memory even if it is shared. Furthermore, C structs +//! often start their life uninitialized and may be populated lazily. +//! +//! For this reason, this module provides the [`Opaque<T>`] type to opt out +//! of Rust's usual guarantees about the wrapped type. Access to the wrapped +//! value is always through raw pointers, obtained via methods like +//! [`as_mut_ptr()`](Opaque::as_mut_ptr) and [`as_ptr()`](Opaque::as_ptr). These +//! pointers can then be passed to C functions or dereferenced; both actions +//! require `unsafe` blocks, making it clear where safety guarantees must be +//! manually verified. For example +//! +//! ```ignore +//! unsafe { +//! let state = Opaque::<MyStruct>::uninit(); +//! qemu_struct_init(state.as_mut_ptr()); +//! } +//! ``` +//! +//! [`Opaque<T>`] will usually be wrapped one level further, so that +//! bridge methods can be added to the wrapper: +//! +//! ```ignore +//! pub struct MyStruct(Opaque<bindings::MyStruct>); +//! +//! impl MyStruct { +//! fn new() -> Pin<Box<MyStruct>> { +//! let result = Box::pin(unsafe { Opaque::uninit() }); +//! unsafe { qemu_struct_init(result.as_mut_ptr()) }; +//! result +//! } +//! } +//! ``` +//! +//! This pattern of wrapping bindgen-generated types in [`Opaque<T>`] provides +//! several advantages: +//! +//! * The choice of traits to be implemented is not limited by the +//! bindgen-generated code. For example, [`Drop`] can be added without +//! disabling [`Copy`] on the underlying bindgen type +//! +//! * [`Send`] and [`Sync`] implementations can be controlled by the wrapper +//! type rather than being automatically derived from the C struct's layout +//! +//! * Methods can be implemented in a separate crate from the bindgen-generated +//! bindings +//! +//! * [`Debug`](std::fmt::Debug) and [`Display`](std::fmt::Display) +//! implementations can be customized to be more readable than the raw C +//! struct representation +//! +//! The [`Opaque<T>`] type does not include BQL validation; it is possible to +//! assert in the code that the right lock is taken, to use it together +//! with a custom lock guard type, or to let C code take the lock, as +//! appropriate. It is also possible to use it with non-thread-safe +//! types, since by default (unlike [`BqlCell`] and [`BqlRefCell`] +//! it is neither `Sync` nor `Send`. +//! +//! While [`Opaque<T>`] is necessary for C interop, it should be used sparingly +//! and only at FFI boundaries. For QEMU-specific types that need interior +//! mutability, prefer [`BqlCell`] or [`BqlRefCell`]. + +use std::{ + cell::{Cell, UnsafeCell}, + cmp::Ordering, + fmt, + marker::{PhantomData, PhantomPinned}, + mem::{self, MaybeUninit}, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +use crate::bindings; + +/// An internal function that is used by doctests. +pub fn bql_start_test() { + // SAFETY: integration tests are run with --test-threads=1, while + // unit tests and doctests are not multithreaded and do not have + // any BQL-protected data. Just set bql_locked to true. + unsafe { + bindings::rust_bql_mock_lock(); + } +} + +pub fn bql_locked() -> bool { + // SAFETY: the function does nothing but return a thread-local bool + unsafe { bindings::bql_locked() } +} + +fn bql_block_unlock(increase: bool) { + // SAFETY: this only adjusts a counter + unsafe { + bindings::bql_block_unlock(increase); + } +} + +/// A mutable memory location that is protected by the Big QEMU Lock. +/// +/// # Memory layout +/// +/// `BqlCell<T>` has the same in-memory representation as its inner type `T`. +#[repr(transparent)] +pub struct BqlCell<T> { + value: UnsafeCell<T>, +} + +// SAFETY: Same as for std::sync::Mutex. In the end this *is* a Mutex, +// except it is stored out-of-line +unsafe impl<T: Send> Send for BqlCell<T> {} +unsafe impl<T: Send> Sync for BqlCell<T> {} + +impl<T: Copy> Clone for BqlCell<T> { + #[inline] + fn clone(&self) -> BqlCell<T> { + BqlCell::new(self.get()) + } +} + +impl<T: Default> Default for BqlCell<T> { + /// Creates a `BqlCell<T>`, with the `Default` value for T. + #[inline] + fn default() -> BqlCell<T> { + BqlCell::new(Default::default()) + } +} + +impl<T: PartialEq + Copy> PartialEq for BqlCell<T> { + #[inline] + fn eq(&self, other: &BqlCell<T>) -> bool { + self.get() == other.get() + } +} + +impl<T: Eq + Copy> Eq for BqlCell<T> {} + +impl<T: PartialOrd + Copy> PartialOrd for BqlCell<T> { + #[inline] + fn partial_cmp(&self, other: &BqlCell<T>) -> Option<Ordering> { + self.get().partial_cmp(&other.get()) + } +} + +impl<T: Ord + Copy> Ord for BqlCell<T> { + #[inline] + fn cmp(&self, other: &BqlCell<T>) -> Ordering { + self.get().cmp(&other.get()) + } +} + +impl<T> From<T> for BqlCell<T> { + /// Creates a new `BqlCell<T>` containing the given value. + fn from(t: T) -> BqlCell<T> { + BqlCell::new(t) + } +} + +impl<T: fmt::Debug + Copy> fmt::Debug for BqlCell<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.get().fmt(f) + } +} + +impl<T: fmt::Display + Copy> fmt::Display for BqlCell<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.get().fmt(f) + } +} + +impl<T> BqlCell<T> { + /// Creates a new `BqlCell` containing the given value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// ``` + #[inline] + pub const fn new(value: T) -> BqlCell<T> { + BqlCell { + value: UnsafeCell::new(value), + } + } + + /// Sets the contained value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// + /// c.set(10); + /// ``` + #[inline] + pub fn set(&self, val: T) { + self.replace(val); + } + + /// Replaces the contained value with `val`, and returns the old contained + /// value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let cell = BqlCell::new(5); + /// assert_eq!(cell.get(), 5); + /// assert_eq!(cell.replace(10), 5); + /// assert_eq!(cell.get(), 10); + /// ``` + #[inline] + pub fn replace(&self, val: T) -> T { + assert!(bql_locked()); + // SAFETY: This can cause data races if called from multiple threads, + // but it won't happen as long as C code accesses the value + // under BQL protection only. + mem::replace(unsafe { &mut *self.value.get() }, val) + } + + /// Unwraps the value, consuming the cell. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// let five = c.into_inner(); + /// + /// assert_eq!(five, 5); + /// ``` + pub fn into_inner(self) -> T { + assert!(bql_locked()); + self.value.into_inner() + } +} + +impl<T: Copy> BqlCell<T> { + /// Returns a copy of the contained value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// + /// let five = c.get(); + /// ``` + #[inline] + pub fn get(&self) -> T { + assert!(bql_locked()); + // SAFETY: This can cause data races if called from multiple threads, + // but it won't happen as long as C code accesses the value + // under BQL protection only. + unsafe { *self.value.get() } + } +} + +impl<T> BqlCell<T> { + /// Returns a raw pointer to the underlying data in this cell. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// + /// let ptr = c.as_ptr(); + /// ``` + #[inline] + pub const fn as_ptr(&self) -> *mut T { + self.value.get() + } +} + +impl<T: Default> BqlCell<T> { + /// Takes the value of the cell, leaving `Default::default()` in its place. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// let five = c.take(); + /// + /// assert_eq!(five, 5); + /// assert_eq!(c.into_inner(), 0); + /// ``` + pub fn take(&self) -> T { + self.replace(Default::default()) + } +} + +/// A mutable memory location with dynamically checked borrow rules, +/// protected by the Big QEMU Lock. +/// +/// See the [module-level documentation](self) for more. +/// +/// # Memory layout +/// +/// `BqlRefCell<T>` starts with the same in-memory representation as its +/// inner type `T`. +#[repr(C)] +pub struct BqlRefCell<T> { + // It is important that this is the first field (which is not the case + // for std::cell::BqlRefCell), so that we can use offset_of! on it. + // UnsafeCell and repr(C) both prevent usage of niches. + value: UnsafeCell<T>, + borrow: Cell<BorrowFlag>, + // Stores the location of the earliest currently active borrow. + // This gets updated whenever we go from having zero borrows + // to having a single borrow. When a borrow occurs, this gets included + // in the panic message + #[cfg(feature = "debug_cell")] + borrowed_at: Cell<Option<&'static std::panic::Location<'static>>>, +} + +// Positive values represent the number of `BqlRef` active. Negative values +// represent the number of `BqlRefMut` active. Right now QEMU's implementation +// does not allow to create `BqlRefMut`s that refer to distinct, nonoverlapping +// components of a `BqlRefCell` (e.g., different ranges of a slice). +// +// `BqlRef` and `BqlRefMut` are both two words in size, and so there will likely +// never be enough `BqlRef`s or `BqlRefMut`s in existence to overflow half of +// the `usize` range. Thus, a `BorrowFlag` will probably never overflow or +// underflow. However, this is not a guarantee, as a pathological program could +// repeatedly create and then mem::forget `BqlRef`s or `BqlRefMut`s. Thus, all +// code must explicitly check for overflow and underflow in order to avoid +// unsafety, or at least behave correctly in the event that overflow or +// underflow happens (e.g., see BorrowRef::new). +type BorrowFlag = isize; +const UNUSED: BorrowFlag = 0; + +#[inline(always)] +const fn is_writing(x: BorrowFlag) -> bool { + x < UNUSED +} + +#[inline(always)] +const fn is_reading(x: BorrowFlag) -> bool { + x > UNUSED +} + +impl<T> BqlRefCell<T> { + /// Creates a new `BqlRefCell` containing `value`. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// + /// let c = BqlRefCell::new(5); + /// ``` + #[inline] + pub const fn new(value: T) -> BqlRefCell<T> { + BqlRefCell { + value: UnsafeCell::new(value), + borrow: Cell::new(UNUSED), + #[cfg(feature = "debug_cell")] + borrowed_at: Cell::new(None), + } + } +} + +// This ensures the panicking code is outlined from `borrow_mut` for +// `BqlRefCell`. +#[inline(never)] +#[cold] +#[cfg(feature = "debug_cell")] +fn panic_already_borrowed(source: &Cell<Option<&'static std::panic::Location<'static>>>) -> ! { + // If a borrow occurred, then we must already have an outstanding borrow, + // so `borrowed_at` will be `Some` + panic!("already borrowed at {:?}", source.take().unwrap()) +} + +#[inline(never)] +#[cold] +#[cfg(not(feature = "debug_cell"))] +fn panic_already_borrowed() -> ! { + panic!("already borrowed") +} + +impl<T> BqlRefCell<T> { + #[inline] + #[allow(clippy::unused_self)] + fn panic_already_borrowed(&self) -> ! { + #[cfg(feature = "debug_cell")] + { + panic_already_borrowed(&self.borrowed_at) + } + #[cfg(not(feature = "debug_cell"))] + { + panic_already_borrowed() + } + } + + /// Immutably borrows the wrapped value. + /// + /// The borrow lasts until the returned `BqlRef` exits scope. Multiple + /// immutable borrows can be taken out at the same time. + /// + /// # Panics + /// + /// Panics if the value is currently mutably borrowed. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new(5); + /// + /// let borrowed_five = c.borrow(); + /// let borrowed_five2 = c.borrow(); + /// ``` + /// + /// An example of panic: + /// + /// ```should_panic + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new(5); + /// + /// let m = c.borrow_mut(); + /// let b = c.borrow(); // this causes a panic + /// ``` + #[inline] + #[track_caller] + pub fn borrow(&self) -> BqlRef<'_, T> { + if let Some(b) = BorrowRef::new(&self.borrow) { + // `borrowed_at` is always the *first* active borrow + if b.borrow.get() == 1 { + #[cfg(feature = "debug_cell")] + self.borrowed_at.set(Some(std::panic::Location::caller())); + } + + bql_block_unlock(true); + + // SAFETY: `BorrowRef` ensures that there is only immutable access + // to the value while borrowed. + let value = unsafe { NonNull::new_unchecked(self.value.get()) }; + BqlRef { value, borrow: b } + } else { + self.panic_already_borrowed() + } + } + + /// Mutably borrows the wrapped value. + /// + /// The borrow lasts until the returned `BqlRefMut` or all `BqlRefMut`s + /// derived from it exit scope. The value cannot be borrowed while this + /// borrow is active. + /// + /// # Panics + /// + /// Panics if the value is currently borrowed. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new("hello".to_owned()); + /// + /// *c.borrow_mut() = "bonjour".to_owned(); + /// + /// assert_eq!(&*c.borrow(), "bonjour"); + /// ``` + /// + /// An example of panic: + /// + /// ```should_panic + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new(5); + /// let m = c.borrow(); + /// + /// let b = c.borrow_mut(); // this causes a panic + /// ``` + #[inline] + #[track_caller] + pub fn borrow_mut(&self) -> BqlRefMut<'_, T> { + if let Some(b) = BorrowRefMut::new(&self.borrow) { + #[cfg(feature = "debug_cell")] + { + self.borrowed_at.set(Some(std::panic::Location::caller())); + } + + // SAFETY: this only adjusts a counter + bql_block_unlock(true); + + // SAFETY: `BorrowRefMut` guarantees unique access. + let value = unsafe { NonNull::new_unchecked(self.value.get()) }; + BqlRefMut { + value, + _borrow: b, + marker: PhantomData, + } + } else { + self.panic_already_borrowed() + } + } + + /// Returns a raw pointer to the underlying data in this cell. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// + /// let c = BqlRefCell::new(5); + /// + /// let ptr = c.as_ptr(); + /// ``` + #[inline] + pub const fn as_ptr(&self) -> *mut T { + self.value.get() + } +} + +// SAFETY: Same as for std::sync::Mutex. In the end this is a Mutex that is +// stored out-of-line. Even though BqlRefCell includes Cells, they are +// themselves protected by the Big QEMU Lock. Furtheremore, the Big QEMU +// Lock cannot be released while any borrows is active. +unsafe impl<T> Send for BqlRefCell<T> where T: Send {} +unsafe impl<T> Sync for BqlRefCell<T> {} + +impl<T: Clone> Clone for BqlRefCell<T> { + /// # Panics + /// + /// Panics if the value is currently mutably borrowed. + #[inline] + #[track_caller] + fn clone(&self) -> BqlRefCell<T> { + BqlRefCell::new(self.borrow().clone()) + } + + /// # Panics + /// + /// Panics if `source` is currently mutably borrowed. + #[inline] + #[track_caller] + fn clone_from(&mut self, source: &Self) { + self.value.get_mut().clone_from(&source.borrow()) + } +} + +impl<T: Default> Default for BqlRefCell<T> { + /// Creates a `BqlRefCell<T>`, with the `Default` value for T. + #[inline] + fn default() -> BqlRefCell<T> { + BqlRefCell::new(Default::default()) + } +} + +impl<T: PartialEq> PartialEq for BqlRefCell<T> { + /// # Panics + /// + /// Panics if the value in either `BqlRefCell` is currently mutably + /// borrowed. + #[inline] + fn eq(&self, other: &BqlRefCell<T>) -> bool { + *self.borrow() == *other.borrow() + } +} + +impl<T: Eq> Eq for BqlRefCell<T> {} + +impl<T: PartialOrd> PartialOrd for BqlRefCell<T> { + /// # Panics + /// + /// Panics if the value in either `BqlRefCell` is currently mutably + /// borrowed. + #[inline] + fn partial_cmp(&self, other: &BqlRefCell<T>) -> Option<Ordering> { + self.borrow().partial_cmp(&*other.borrow()) + } +} + +impl<T: Ord> Ord for BqlRefCell<T> { + /// # Panics + /// + /// Panics if the value in either `BqlRefCell` is currently mutably + /// borrowed. + #[inline] + fn cmp(&self, other: &BqlRefCell<T>) -> Ordering { + self.borrow().cmp(&*other.borrow()) + } +} + +impl<T> From<T> for BqlRefCell<T> { + /// Creates a new `BqlRefCell<T>` containing the given value. + fn from(t: T) -> BqlRefCell<T> { + BqlRefCell::new(t) + } +} + +struct BorrowRef<'b> { + borrow: &'b Cell<BorrowFlag>, +} + +impl<'b> BorrowRef<'b> { + #[inline] + fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRef<'b>> { + let b = borrow.get().wrapping_add(1); + if !is_reading(b) { + // Incrementing borrow can result in a non-reading value (<= 0) in these cases: + // 1. It was < 0, i.e. there are writing borrows, so we can't allow a read + // borrow due to Rust's reference aliasing rules + // 2. It was isize::MAX (the max amount of reading borrows) and it overflowed + // into isize::MIN (the max amount of writing borrows) so we can't allow an + // additional read borrow because isize can't represent so many read borrows + // (this can only happen if you mem::forget more than a small constant amount + // of `BqlRef`s, which is not good practice) + None + } else { + // Incrementing borrow can result in a reading value (> 0) in these cases: + // 1. It was = 0, i.e. it wasn't borrowed, and we are taking the first read + // borrow + // 2. It was > 0 and < isize::MAX, i.e. there were read borrows, and isize is + // large enough to represent having one more read borrow + borrow.set(b); + Some(BorrowRef { borrow }) + } + } +} + +impl Drop for BorrowRef<'_> { + #[inline] + fn drop(&mut self) { + let borrow = self.borrow.get(); + debug_assert!(is_reading(borrow)); + self.borrow.set(borrow - 1); + bql_block_unlock(false) + } +} + +impl Clone for BorrowRef<'_> { + #[inline] + fn clone(&self) -> Self { + BorrowRef::new(self.borrow).unwrap() + } +} + +/// Wraps a borrowed reference to a value in a `BqlRefCell` box. +/// A wrapper type for an immutably borrowed value from a `BqlRefCell<T>`. +/// +/// See the [module-level documentation](self) for more. +pub struct BqlRef<'b, T: 'b> { + // NB: we use a pointer instead of `&'b T` to avoid `noalias` violations, because a + // `BqlRef` argument doesn't hold immutability for its whole scope, only until it drops. + // `NonNull` is also covariant over `T`, just like we would have with `&T`. + value: NonNull<T>, + borrow: BorrowRef<'b>, +} + +impl<T> Deref for BqlRef<'_, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + // SAFETY: the value is accessible as long as we hold our borrow. + unsafe { self.value.as_ref() } + } +} + +impl<'b, T> BqlRef<'b, T> { + /// Copies a `BqlRef`. + /// + /// The `BqlRefCell` is already immutably borrowed, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `BqlRef::clone(...)`. A `Clone` implementation or a method would + /// interfere with the widespread use of `r.borrow().clone()` to clone + /// the contents of a `BqlRefCell`. + #[must_use] + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn clone(orig: &BqlRef<'b, T>) -> BqlRef<'b, T> { + BqlRef { + value: orig.value, + borrow: orig.borrow.clone(), + } + } +} + +impl<T: fmt::Debug> fmt::Debug for BqlRef<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl<T: fmt::Display> fmt::Display for BqlRef<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +struct BorrowRefMut<'b> { + borrow: &'b Cell<BorrowFlag>, +} + +impl<'b> BorrowRefMut<'b> { + #[inline] + fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRefMut<'b>> { + // There must currently be no existing references when borrow_mut() is + // called, so we explicitly only allow going from UNUSED to UNUSED - 1. + match borrow.get() { + UNUSED => { + borrow.set(UNUSED - 1); + Some(BorrowRefMut { borrow }) + } + _ => None, + } + } +} + +impl Drop for BorrowRefMut<'_> { + #[inline] + fn drop(&mut self) { + let borrow = self.borrow.get(); + debug_assert!(is_writing(borrow)); + self.borrow.set(borrow + 1); + bql_block_unlock(false) + } +} + +/// A wrapper type for a mutably borrowed value from a `BqlRefCell<T>`. +/// +/// See the [module-level documentation](self) for more. +pub struct BqlRefMut<'b, T: 'b> { + // NB: we use a pointer instead of `&'b mut T` to avoid `noalias` violations, because a + // `BqlRefMut` argument doesn't hold exclusivity for its whole scope, only until it drops. + value: NonNull<T>, + _borrow: BorrowRefMut<'b>, + // `NonNull` is covariant over `T`, so we need to reintroduce invariance. + marker: PhantomData<&'b mut T>, +} + +impl<T> Deref for BqlRefMut<'_, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + // SAFETY: the value is accessible as long as we hold our borrow. + unsafe { self.value.as_ref() } + } +} + +impl<T> DerefMut for BqlRefMut<'_, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: the value is accessible as long as we hold our borrow. + unsafe { self.value.as_mut() } + } +} + +impl<T: fmt::Debug> fmt::Debug for BqlRefMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl<T: fmt::Display> fmt::Display for BqlRefMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +/// Stores an opaque value that is shared with C code. +/// +/// Often, C structs can changed when calling a C function even if they are +/// behind a shared Rust reference, or they can be initialized lazily and have +/// invalid bit patterns (e.g. `3` for a [`bool`]). This goes against Rust's +/// strict aliasing rules, which normally prevent mutation through shared +/// references. +/// +/// Wrapping the struct with `Opaque<T>` ensures that the Rust compiler does not +/// assume the usual constraints that Rust structs require, and allows using +/// shared references on the Rust side. +/// +/// `Opaque<T>` is `#[repr(transparent)]`, so that it matches the memory layout +/// of `T`. +#[repr(transparent)] +pub struct Opaque<T> { + value: UnsafeCell<MaybeUninit<T>>, + // PhantomPinned also allows multiple references to the `Opaque<T>`, i.e. + // one `&mut Opaque<T>` can coexist with a `&mut T` or any number of `&T`; + // see https://docs.rs/pinned-aliasable/latest/pinned_aliasable/. + _pin: PhantomPinned, +} + +impl<T> Opaque<T> { + /// Creates a new shared reference from a C pointer + /// + /// # Safety + /// + /// The pointer must be valid, though it need not point to a valid value. + pub unsafe fn from_raw<'a>(ptr: *mut T) -> &'a Self { + let ptr = NonNull::new(ptr).unwrap().cast::<Self>(); + // SAFETY: Self is a transparent wrapper over T + unsafe { ptr.as_ref() } + } + + /// Creates a new opaque object with uninitialized contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be initialized and pinned before + /// calling them. + #[allow(clippy::missing_const_for_fn)] + pub unsafe fn uninit() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::uninit()), + _pin: PhantomPinned, + } + } + + /// Creates a new opaque object with zeroed contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be pinned (and possibly initialized) + /// before calling them. + #[allow(clippy::missing_const_for_fn)] + pub unsafe fn zeroed() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::zeroed()), + _pin: PhantomPinned, + } + } + + /// Returns a raw mutable pointer to the opaque data. + pub const fn as_mut_ptr(&self) -> *mut T { + UnsafeCell::get(&self.value).cast() + } + + /// Returns a raw pointer to the opaque data. + pub const fn as_ptr(&self) -> *const T { + self.as_mut_ptr().cast_const() + } + + /// Returns a raw pointer to the opaque data that can be passed to a + /// C function as `void *`. + pub const fn as_void_ptr(&self) -> *mut std::ffi::c_void { + UnsafeCell::get(&self.value).cast() + } + + /// Converts a raw pointer to the wrapped type. + pub const fn raw_get(slot: *mut Self) -> *mut T { + // Compare with Linux's raw_get method, which goes through an UnsafeCell + // because it takes a *const Self instead. + slot.cast() + } +} + +impl<T> fmt::Debug for Opaque<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut name: String = "Opaque<".to_string(); + name += std::any::type_name::<T>(); + name += ">"; + f.debug_tuple(&name).field(&self.as_ptr()).finish() + } +} + +impl<T: Default> Opaque<T> { + /// Creates a new opaque object with default contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be pinned before calling them. + pub unsafe fn new() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::new(T::default())), + _pin: PhantomPinned, + } + } +} + +/// Annotates [`Self`] as a transparent wrapper for another type. +/// +/// Usually defined via the [`qemu_api_macros::Wrapper`] derive macro. +/// +/// # Examples +/// +/// ``` +/// # use std::mem::ManuallyDrop; +/// # use qemu_api::cell::Wrapper; +/// #[repr(transparent)] +/// pub struct Example { +/// inner: ManuallyDrop<String>, +/// } +/// +/// unsafe impl Wrapper for Example { +/// type Wrapped = String; +/// } +/// ``` +/// +/// # Safety +/// +/// `Self` must be a `#[repr(transparent)]` wrapper for the `Wrapped` type, +/// whether directly or indirectly. +/// +/// # Methods +/// +/// By convention, types that implement Wrapper also implement the following +/// methods: +/// +/// ```ignore +/// pub const unsafe fn from_raw<'a>(value: *mut Self::Wrapped) -> &'a Self; +/// pub const unsafe fn as_mut_ptr(&self) -> *mut Self::Wrapped; +/// pub const unsafe fn as_ptr(&self) -> *const Self::Wrapped; +/// pub const unsafe fn raw_get(slot: *mut Self) -> *const Self::Wrapped; +/// ``` +/// +/// They are not defined here to allow them to be `const`. +pub unsafe trait Wrapper { + type Wrapped; +} + +unsafe impl<T> Wrapper for Opaque<T> { + type Wrapped = T; +} diff --git a/rust/qemu-api/src/chardev.rs b/rust/qemu-api/src/chardev.rs new file mode 100644 index 0000000..6e0590d --- /dev/null +++ b/rust/qemu-api/src/chardev.rs @@ -0,0 +1,260 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini <pbonzini@redhat.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for character devices +//! +//! Character devices in QEMU can run under the big QEMU lock or in a separate +//! `GMainContext`. Here we only support the former, because the bindings +//! enforce that the BQL is taken whenever the functions in [`CharBackend`] are +//! called. + +use std::{ + ffi::{c_int, c_void, CStr}, + fmt::{self, Debug}, + io::{self, ErrorKind, Write}, + marker::PhantomPinned, + ptr::addr_of_mut, + slice, +}; + +use crate::{ + bindings, + callbacks::FnCall, + cell::{BqlRefMut, Opaque}, + prelude::*, +}; + +/// A safe wrapper around [`bindings::Chardev`]. +#[repr(transparent)] +#[derive(qemu_api_macros::Wrapper)] +pub struct Chardev(Opaque<bindings::Chardev>); + +pub type ChardevClass = bindings::ChardevClass; +pub type Event = bindings::QEMUChrEvent; + +/// A safe wrapper around [`bindings::CharBackend`], denoting the character +/// back-end that is used for example by a device. Compared to the +/// underlying C struct it adds BQL protection, and is marked as pinned +/// because the QOM object ([`bindings::Chardev`]) contains a pointer to +/// the `CharBackend`. +pub struct CharBackend { + inner: BqlRefCell<bindings::CharBackend>, + _pin: PhantomPinned, +} + +impl Write for BqlRefMut<'_, bindings::CharBackend> { + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + let chr: &mut bindings::CharBackend = self; + + let len = buf.len().try_into().unwrap(); + let r = unsafe { bindings::qemu_chr_fe_write(addr_of_mut!(*chr), buf.as_ptr(), len) }; + errno::into_io_result(r).map(|cnt| cnt as usize) + } + + fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { + let chr: &mut bindings::CharBackend = self; + + let len = buf.len().try_into().unwrap(); + let r = unsafe { bindings::qemu_chr_fe_write_all(addr_of_mut!(*chr), buf.as_ptr(), len) }; + errno::into_io_result(r).and_then(|cnt| { + if cnt as usize == buf.len() { + Ok(()) + } else { + Err(ErrorKind::WriteZero.into()) + } + }) + } +} + +impl Debug for CharBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // SAFETY: accessed just to print the values + let chr = self.inner.as_ptr(); + Debug::fmt(unsafe { &*chr }, f) + } +} + +// FIXME: use something like PinnedDrop from the pinned_init crate +impl Drop for CharBackend { + fn drop(&mut self) { + self.disable_handlers(); + } +} + +impl CharBackend { + /// Enable the front-end's character device handlers, if there is an + /// associated `Chardev`. + pub fn enable_handlers< + 'chardev, + 'owner: 'chardev, + T, + CanReceiveFn: for<'a> FnCall<(&'a T,), u32>, + ReceiveFn: for<'a, 'b> FnCall<(&'a T, &'b [u8])>, + EventFn: for<'a> FnCall<(&'a T, Event)>, + >( + // When "self" is dropped, the handlers are automatically disabled. + // However, this is not necessarily true if the owner is dropped. + // So require the owner to outlive the character device. + &'chardev self, + owner: &'owner T, + _can_receive: CanReceiveFn, + _receive: ReceiveFn, + _event: EventFn, + ) { + unsafe extern "C" fn rust_can_receive_cb<T, F: for<'a> FnCall<(&'a T,), u32>>( + opaque: *mut c_void, + ) -> c_int { + // SAFETY: the values are safe according to the contract of + // enable_handlers() and qemu_chr_fe_set_handlers() + let owner: &T = unsafe { &*(opaque.cast::<T>()) }; + let r = F::call((owner,)); + r.try_into().unwrap() + } + + unsafe extern "C" fn rust_receive_cb<T, F: for<'a, 'b> FnCall<(&'a T, &'b [u8])>>( + opaque: *mut c_void, + buf: *const u8, + size: c_int, + ) { + // SAFETY: the values are safe according to the contract of + // enable_handlers() and qemu_chr_fe_set_handlers() + let owner: &T = unsafe { &*(opaque.cast::<T>()) }; + let buf = unsafe { slice::from_raw_parts(buf, size.try_into().unwrap()) }; + F::call((owner, buf)) + } + + unsafe extern "C" fn rust_event_cb<T, F: for<'a> FnCall<(&'a T, Event)>>( + opaque: *mut c_void, + event: Event, + ) { + // SAFETY: the values are safe according to the contract of + // enable_handlers() and qemu_chr_fe_set_handlers() + let owner: &T = unsafe { &*(opaque.cast::<T>()) }; + F::call((owner, event)) + } + + let _: () = CanReceiveFn::ASSERT_IS_SOME; + let receive_cb: Option<unsafe extern "C" fn(*mut c_void, *const u8, c_int)> = + if ReceiveFn::is_some() { + Some(rust_receive_cb::<T, ReceiveFn>) + } else { + None + }; + let event_cb: Option<unsafe extern "C" fn(*mut c_void, Event)> = if EventFn::is_some() { + Some(rust_event_cb::<T, EventFn>) + } else { + None + }; + + let mut chr = self.inner.borrow_mut(); + // SAFETY: the borrow promises that the BQL is taken + unsafe { + bindings::qemu_chr_fe_set_handlers( + addr_of_mut!(*chr), + Some(rust_can_receive_cb::<T, CanReceiveFn>), + receive_cb, + event_cb, + None, + (owner as *const T).cast_mut().cast::<c_void>(), + core::ptr::null_mut(), + true, + ); + } + } + + /// Disable the front-end's character device handlers. + pub fn disable_handlers(&self) { + let mut chr = self.inner.borrow_mut(); + // SAFETY: the borrow promises that the BQL is taken + unsafe { + bindings::qemu_chr_fe_set_handlers( + addr_of_mut!(*chr), + None, + None, + None, + None, + core::ptr::null_mut(), + core::ptr::null_mut(), + true, + ); + } + } + + /// Notify that the frontend is ready to receive data. + pub fn accept_input(&self) { + let mut chr = self.inner.borrow_mut(); + // SAFETY: the borrow promises that the BQL is taken + unsafe { bindings::qemu_chr_fe_accept_input(addr_of_mut!(*chr)) } + } + + /// Temporarily borrow the character device, allowing it to be used + /// as an implementor of `Write`. Note that it is not valid to drop + /// the big QEMU lock while the character device is borrowed, as + /// that might cause C code to write to the character device. + pub fn borrow_mut(&self) -> impl Write + '_ { + self.inner.borrow_mut() + } + + /// Send a continuous stream of zero bits on the line if `enabled` is + /// true, or a short stream if `enabled` is false. + pub fn send_break(&self, long: bool) -> io::Result<()> { + let mut chr = self.inner.borrow_mut(); + let mut duration: c_int = long.into(); + // SAFETY: the borrow promises that the BQL is taken + let r = unsafe { + bindings::qemu_chr_fe_ioctl( + addr_of_mut!(*chr), + bindings::CHR_IOCTL_SERIAL_SET_BREAK as i32, + addr_of_mut!(duration).cast::<c_void>(), + ) + }; + + errno::into_io_result(r).map(|_| ()) + } + + /// Write data to a character backend from the front end. This function + /// will send data from the front end to the back end. Unlike + /// `write`, this function will block if the back end cannot + /// consume all of the data attempted to be written. + /// + /// Returns the number of bytes consumed (0 if no associated Chardev) or an + /// error. + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + let len = buf.len().try_into().unwrap(); + // SAFETY: qemu_chr_fe_write is thread-safe + let r = unsafe { bindings::qemu_chr_fe_write(self.inner.as_ptr(), buf.as_ptr(), len) }; + errno::into_io_result(r).map(|cnt| cnt as usize) + } + + /// Write data to a character backend from the front end. This function + /// will send data from the front end to the back end. Unlike + /// `write`, this function will block if the back end cannot + /// consume all of the data attempted to be written. + /// + /// Returns the number of bytes consumed (0 if no associated Chardev) or an + /// error. + pub fn write_all(&self, buf: &[u8]) -> io::Result<()> { + let len = buf.len().try_into().unwrap(); + // SAFETY: qemu_chr_fe_write_all is thread-safe + let r = unsafe { bindings::qemu_chr_fe_write_all(self.inner.as_ptr(), buf.as_ptr(), len) }; + errno::into_io_result(r).and_then(|cnt| { + if cnt as usize == buf.len() { + Ok(()) + } else { + Err(ErrorKind::WriteZero.into()) + } + }) + } +} + +unsafe impl ObjectType for Chardev { + type Class = ChardevClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_CHARDEV) }; +} +qom_isa!(Chardev: Object); diff --git a/rust/qemu-api/src/errno.rs b/rust/qemu-api/src/errno.rs new file mode 100644 index 0000000..18d1014 --- /dev/null +++ b/rust/qemu-api/src/errno.rs @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Utility functions to convert `errno` to and from +//! [`io::Error`]/[`io::Result`] +//! +//! QEMU C functions often have a "positive success/negative `errno`" calling +//! convention. This module provides functions to portably convert an integer +//! into an [`io::Result`] and back. + +use std::{convert::TryFrom, io, io::ErrorKind}; + +/// An `errno` value that can be converted into an [`io::Error`] +pub struct Errno(pub u16); + +// On Unix, from_raw_os_error takes an errno value and OS errors +// are printed using strerror. On Windows however it takes a +// GetLastError() value; therefore we need to convert errno values +// into io::Error by hand. This is the same mapping that the +// standard library uses to retrieve the kind of OS errors +// (`std::sys::pal::unix::decode_error_kind`). +impl From<Errno> for ErrorKind { + fn from(value: Errno) -> ErrorKind { + use ErrorKind::*; + let Errno(errno) = value; + match i32::from(errno) { + libc::EPERM | libc::EACCES => PermissionDenied, + libc::ENOENT => NotFound, + libc::EINTR => Interrupted, + x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => WouldBlock, + libc::ENOMEM => OutOfMemory, + libc::EEXIST => AlreadyExists, + libc::EINVAL => InvalidInput, + libc::EPIPE => BrokenPipe, + libc::EADDRINUSE => AddrInUse, + libc::EADDRNOTAVAIL => AddrNotAvailable, + libc::ECONNABORTED => ConnectionAborted, + libc::ECONNREFUSED => ConnectionRefused, + libc::ECONNRESET => ConnectionReset, + libc::ENOTCONN => NotConnected, + libc::ENOTSUP => Unsupported, + libc::ETIMEDOUT => TimedOut, + _ => Other, + } + } +} + +// This is used on Windows for all io::Errors, but also on Unix if the +// io::Error does not have a raw OS error. This is the reversed +// mapping of the above; EIO is returned for unknown ErrorKinds. +impl From<io::ErrorKind> for Errno { + fn from(value: io::ErrorKind) -> Errno { + use ErrorKind::*; + let errno = match value { + // can be both EPERM or EACCES :( pick one + PermissionDenied => libc::EPERM, + NotFound => libc::ENOENT, + Interrupted => libc::EINTR, + WouldBlock => libc::EAGAIN, + OutOfMemory => libc::ENOMEM, + AlreadyExists => libc::EEXIST, + InvalidInput => libc::EINVAL, + BrokenPipe => libc::EPIPE, + AddrInUse => libc::EADDRINUSE, + AddrNotAvailable => libc::EADDRNOTAVAIL, + ConnectionAborted => libc::ECONNABORTED, + ConnectionRefused => libc::ECONNREFUSED, + ConnectionReset => libc::ECONNRESET, + NotConnected => libc::ENOTCONN, + Unsupported => libc::ENOTSUP, + TimedOut => libc::ETIMEDOUT, + _ => libc::EIO, + }; + Errno(errno as u16) + } +} + +impl From<Errno> for io::Error { + #[cfg(unix)] + fn from(value: Errno) -> io::Error { + let Errno(errno) = value; + io::Error::from_raw_os_error(errno.into()) + } + + #[cfg(windows)] + fn from(value: Errno) -> io::Error { + let error_kind: ErrorKind = value.into(); + error_kind.into() + } +} + +impl From<io::Error> for Errno { + fn from(value: io::Error) -> Errno { + if cfg!(unix) { + if let Some(errno) = value.raw_os_error() { + return Errno(u16::try_from(errno).unwrap()); + } + } + value.kind().into() + } +} + +/// Internal traits; used to enable [`into_io_result`] and [`into_neg_errno`] +/// for the "right" set of types. +mod traits { + use super::Errno; + + /// A signed type that can be converted into an + /// [`io::Result`](std::io::Result) + pub trait GetErrno { + /// Unsigned variant of `Self`, used as the type for the `Ok` case. + type Out; + + /// Return `Ok(self)` if positive, `Err(Errno(-self))` if negative + fn into_errno_result(self) -> Result<Self::Out, Errno>; + } + + /// A type that can be taken out of an [`io::Result`](std::io::Result) and + /// converted into "positive success/negative `errno`" convention. + pub trait MergeErrno { + /// Signed variant of `Self`, used as the return type of + /// [`into_neg_errno`](super::into_neg_errno). + type Out: From<u16> + std::ops::Neg<Output = Self::Out>; + + /// Return `self`, asserting that it is in range + fn map_ok(self) -> Self::Out; + } + + macro_rules! get_errno { + ($t:ty, $out:ty) => { + impl GetErrno for $t { + type Out = $out; + fn into_errno_result(self) -> Result<Self::Out, Errno> { + match self { + 0.. => Ok(self as $out), + -65535..=-1 => Err(Errno(-self as u16)), + _ => panic!("{self} is not a negative errno"), + } + } + } + }; + } + + get_errno!(i32, u32); + get_errno!(i64, u64); + get_errno!(isize, usize); + + macro_rules! merge_errno { + ($t:ty, $out:ty) => { + impl MergeErrno for $t { + type Out = $out; + fn map_ok(self) -> Self::Out { + self.try_into().unwrap() + } + } + }; + } + + merge_errno!(u8, i32); + merge_errno!(u16, i32); + merge_errno!(u32, i32); + merge_errno!(u64, i64); + + impl MergeErrno for () { + type Out = i32; + fn map_ok(self) -> i32 { + 0 + } + } +} + +use traits::{GetErrno, MergeErrno}; + +/// Convert an integer value into a [`io::Result`]. +/// +/// Positive values are turned into an `Ok` result; negative values +/// are interpreted as negated `errno` and turned into an `Err`. +/// +/// ``` +/// # use qemu_api::errno::into_io_result; +/// # use std::io::ErrorKind; +/// let ok = into_io_result(1i32).unwrap(); +/// assert_eq!(ok, 1u32); +/// +/// let err = into_io_result(-1i32).unwrap_err(); // -EPERM +/// assert_eq!(err.kind(), ErrorKind::PermissionDenied); +/// ``` +/// +/// # Panics +/// +/// Since the result is an unsigned integer, negative values must +/// be close to 0; values that are too far away are considered +/// likely overflows and will panic: +/// +/// ```should_panic +/// # use qemu_api::errno::into_io_result; +/// # #[allow(dead_code)] +/// let err = into_io_result(-0x1234_5678i32); // panic +/// ``` +pub fn into_io_result<T: GetErrno>(value: T) -> io::Result<T::Out> { + value.into_errno_result().map_err(Into::into) +} + +/// Convert a [`Result`] into an integer value, using negative `errno` +/// values to report errors. +/// +/// ``` +/// # use qemu_api::errno::into_neg_errno; +/// # use std::io::{self, ErrorKind}; +/// let ok: io::Result<()> = Ok(()); +/// assert_eq!(into_neg_errno(ok), 0); +/// +/// let err: io::Result<()> = Err(ErrorKind::InvalidInput.into()); +/// assert_eq!(into_neg_errno(err), -22); // -EINVAL +/// ``` +/// +/// Since this module also provides the ability to convert [`io::Error`] +/// to an `errno` value, [`io::Result`] is the most commonly used type +/// for the argument of this function: +/// +/// # Panics +/// +/// Since the result is a signed integer, integer `Ok` values must remain +/// positive: +/// +/// ```should_panic +/// # use qemu_api::errno::into_neg_errno; +/// # use std::io; +/// let err: io::Result<u32> = Ok(0x8899_AABB); +/// into_neg_errno(err) // panic +/// # ; +/// ``` +pub fn into_neg_errno<T: MergeErrno, E: Into<Errno>>(value: Result<T, E>) -> T::Out { + match value { + Ok(x) => x.map_ok(), + Err(err) => -T::Out::from(err.into().0), + } +} + +#[cfg(test)] +mod tests { + use std::io::ErrorKind; + + use super::*; + use crate::assert_match; + + #[test] + pub fn test_from_u8() { + let ok: io::Result<_> = Ok(42u8); + assert_eq!(into_neg_errno(ok), 42); + + let err: io::Result<u8> = Err(io::ErrorKind::PermissionDenied.into()); + assert_eq!(into_neg_errno(err), -1); + + if cfg!(unix) { + let os_err: io::Result<u8> = Err(io::Error::from_raw_os_error(10)); + assert_eq!(into_neg_errno(os_err), -10); + } + } + + #[test] + pub fn test_from_u16() { + let ok: io::Result<_> = Ok(1234u16); + assert_eq!(into_neg_errno(ok), 1234); + + let err: io::Result<u16> = Err(io::ErrorKind::PermissionDenied.into()); + assert_eq!(into_neg_errno(err), -1); + + if cfg!(unix) { + let os_err: io::Result<u16> = Err(io::Error::from_raw_os_error(10)); + assert_eq!(into_neg_errno(os_err), -10); + } + } + + #[test] + pub fn test_i32() { + assert_match!(into_io_result(1234i32), Ok(1234)); + + let err = into_io_result(-1i32).unwrap_err(); + #[cfg(unix)] + assert_match!(err.raw_os_error(), Some(1)); + assert_match!(err.kind(), ErrorKind::PermissionDenied); + } + + #[test] + pub fn test_from_u32() { + let ok: io::Result<_> = Ok(1234u32); + assert_eq!(into_neg_errno(ok), 1234); + + let err: io::Result<u32> = Err(io::ErrorKind::PermissionDenied.into()); + assert_eq!(into_neg_errno(err), -1); + + if cfg!(unix) { + let os_err: io::Result<u32> = Err(io::Error::from_raw_os_error(10)); + assert_eq!(into_neg_errno(os_err), -10); + } + } + + #[test] + pub fn test_i64() { + assert_match!(into_io_result(1234i64), Ok(1234)); + + let err = into_io_result(-22i64).unwrap_err(); + #[cfg(unix)] + assert_match!(err.raw_os_error(), Some(22)); + assert_match!(err.kind(), ErrorKind::InvalidInput); + } + + #[test] + pub fn test_from_u64() { + let ok: io::Result<_> = Ok(1234u64); + assert_eq!(into_neg_errno(ok), 1234); + + let err: io::Result<u64> = Err(io::ErrorKind::InvalidInput.into()); + assert_eq!(into_neg_errno(err), -22); + + if cfg!(unix) { + let os_err: io::Result<u64> = Err(io::Error::from_raw_os_error(6)); + assert_eq!(into_neg_errno(os_err), -6); + } + } + + #[test] + pub fn test_isize() { + assert_match!(into_io_result(1234isize), Ok(1234)); + + let err = into_io_result(-4isize).unwrap_err(); + #[cfg(unix)] + assert_match!(err.raw_os_error(), Some(4)); + assert_match!(err.kind(), ErrorKind::Interrupted); + } + + #[test] + pub fn test_from_unit() { + let ok: io::Result<_> = Ok(()); + assert_eq!(into_neg_errno(ok), 0); + + let err: io::Result<()> = Err(io::ErrorKind::OutOfMemory.into()); + assert_eq!(into_neg_errno(err), -12); + + if cfg!(unix) { + let os_err: io::Result<()> = Err(io::Error::from_raw_os_error(2)); + assert_eq!(into_neg_errno(os_err), -2); + } + } +} diff --git a/rust/qemu-api/src/error.rs b/rust/qemu-api/src/error.rs new file mode 100644 index 0000000..e114fc4 --- /dev/null +++ b/rust/qemu-api/src/error.rs @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Error propagation for QEMU Rust code +//! +//! This module contains [`Error`], the bridge between Rust errors and +//! [`Result`](std::result::Result)s and QEMU's C [`Error`](bindings::Error) +//! struct. +//! +//! For FFI code, [`Error`] provides functions to simplify conversion between +//! the Rust ([`Result<>`](std::result::Result)) and C (`Error**`) conventions: +//! +//! * [`ok_or_propagate`](crate::Error::ok_or_propagate), +//! [`bool_or_propagate`](crate::Error::bool_or_propagate), +//! [`ptr_or_propagate`](crate::Error::ptr_or_propagate) can be used to build +//! a C return value while also propagating an error condition +//! +//! * [`err_or_else`](crate::Error::err_or_else) and +//! [`err_or_unit`](crate::Error::err_or_unit) can be used to build a `Result` +//! +//! This module is most commonly used at the boundary between C and Rust code; +//! other code will usually access it through the +//! [`qemu_api::Result`](crate::Result) type alias, and will use the +//! [`std::error::Error`] interface to let C errors participate in Rust's error +//! handling functionality. +//! +//! Rust code can also create use this module to create an error object that +//! will be passed up to C code, though in most cases this will be done +//! transparently through the `?` operator. Errors can be constructed from a +//! simple error string, from an [`anyhow::Error`] to pass any other Rust error +//! type up to C code, or from a combination of the two. +//! +//! The third case, corresponding to [`Error::with_error`], is the only one that +//! requires mentioning [`qemu_api::Error`](crate::Error) explicitly. Similar +//! to how QEMU's C code handles errno values, the string and the +//! `anyhow::Error` object will be concatenated with `:` as the separator. + +use std::{ + borrow::Cow, + ffi::{c_char, c_int, c_void, CStr}, + fmt::{self, Display}, + panic, ptr, +}; + +use foreign::{prelude::*, OwnedPointer}; + +use crate::bindings; + +pub type Result<T> = std::result::Result<T, Error>; + +#[derive(Debug)] +pub struct Error { + msg: Option<Cow<'static, str>>, + /// Appends the print string of the error to the msg if not None + cause: Option<anyhow::Error>, + file: &'static str, + line: u32, +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + self.cause.as_ref().map(AsRef::as_ref) + } + + #[allow(deprecated)] + fn description(&self) -> &str { + self.msg + .as_deref() + .or_else(|| self.cause.as_deref().map(std::error::Error::description)) + .expect("no message nor cause?") + } +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut prefix = ""; + if let Some(ref msg) = self.msg { + write!(f, "{msg}")?; + prefix = ": "; + } + if let Some(ref cause) = self.cause { + write!(f, "{prefix}{cause}")?; + } else if prefix.is_empty() { + panic!("no message nor cause?"); + } + Ok(()) + } +} + +impl From<String> for Error { + #[track_caller] + fn from(msg: String) -> Self { + let location = panic::Location::caller(); + Error { + msg: Some(Cow::Owned(msg)), + cause: None, + file: location.file(), + line: location.line(), + } + } +} + +impl From<&'static str> for Error { + #[track_caller] + fn from(msg: &'static str) -> Self { + let location = panic::Location::caller(); + Error { + msg: Some(Cow::Borrowed(msg)), + cause: None, + file: location.file(), + line: location.line(), + } + } +} + +impl From<anyhow::Error> for Error { + #[track_caller] + fn from(error: anyhow::Error) -> Self { + let location = panic::Location::caller(); + Error { + msg: None, + cause: Some(error), + file: location.file(), + line: location.line(), + } + } +} + +impl Error { + /// Create a new error, prepending `msg` to the + /// description of `cause` + #[track_caller] + pub fn with_error(msg: impl Into<Cow<'static, str>>, cause: impl Into<anyhow::Error>) -> Self { + let location = panic::Location::caller(); + Error { + msg: Some(msg.into()), + cause: Some(cause.into()), + file: location.file(), + line: location.line(), + } + } + + /// Consume a result, returning `false` if it is an error and + /// `true` if it is successful. The error is propagated into + /// `errp` like the C API `error_propagate` would do. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; + /// typically it is received from C code and need not be + /// checked further at the Rust↔C boundary. + pub unsafe fn bool_or_propagate(result: Result<()>, errp: *mut *mut bindings::Error) -> bool { + // SAFETY: caller guarantees errp is valid + unsafe { Self::ok_or_propagate(result, errp) }.is_some() + } + + /// Consume a result, returning a `NULL` pointer if it is an error and + /// a C representation of the contents if it is successful. This is + /// similar to the C API `error_propagate`, but it panics if `*errp` + /// is not `NULL`. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; + /// typically it is received from C code and need not be + /// checked further at the Rust↔C boundary. + /// + /// See [`propagate`](Error::propagate) for more information. + #[must_use] + pub unsafe fn ptr_or_propagate<T: CloneToForeign>( + result: Result<T>, + errp: *mut *mut bindings::Error, + ) -> *mut T::Foreign { + // SAFETY: caller guarantees errp is valid + unsafe { Self::ok_or_propagate(result, errp) }.clone_to_foreign_ptr() + } + + /// Consume a result in the same way as `self.ok()`, but also propagate + /// a possible error into `errp`. This is similar to the C API + /// `error_propagate`, but it panics if `*errp` is not `NULL`. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; + /// typically it is received from C code and need not be + /// checked further at the Rust↔C boundary. + /// + /// See [`propagate`](Error::propagate) for more information. + pub unsafe fn ok_or_propagate<T>( + result: Result<T>, + errp: *mut *mut bindings::Error, + ) -> Option<T> { + result.map_err(|err| unsafe { err.propagate(errp) }).ok() + } + + /// Equivalent of the C function `error_propagate`. Fill `*errp` + /// with the information container in `self` if `errp` is not NULL; + /// then consume it. + /// + /// This is similar to the C API `error_propagate`, but it panics if + /// `*errp` is not `NULL`. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; it can be + /// `NULL` or it can point to any of: + /// * `error_abort` + /// * `error_fatal` + /// * a local variable of (C) type `Error *` + /// + /// Typically `errp` is received from C code and need not be + /// checked further at the Rust↔C boundary. + pub unsafe fn propagate(self, errp: *mut *mut bindings::Error) { + if errp.is_null() { + return; + } + + // SAFETY: caller guarantees that errp and *errp are valid + unsafe { + assert_eq!(*errp, ptr::null_mut()); + bindings::error_propagate(errp, self.clone_to_foreign_ptr()); + } + } + + /// Convert a C `Error*` into a Rust `Result`, using + /// `Ok(())` if `c_error` is NULL. Free the `Error*`. + /// + /// # Safety + /// + /// `c_error` must be `NULL` or valid; typically it was initialized + /// with `ptr::null_mut()` and passed by reference to a C function. + pub unsafe fn err_or_unit(c_error: *mut bindings::Error) -> Result<()> { + // SAFETY: caller guarantees c_error is valid + unsafe { Self::err_or_else(c_error, || ()) } + } + + /// Convert a C `Error*` into a Rust `Result`, calling `f()` to + /// obtain an `Ok` value if `c_error` is NULL. Free the `Error*`. + /// + /// # Safety + /// + /// `c_error` must be `NULL` or point to a valid C [`struct + /// Error`](bindings::Error); typically it was initialized with + /// `ptr::null_mut()` and passed by reference to a C function. + pub unsafe fn err_or_else<T, F: FnOnce() -> T>( + c_error: *mut bindings::Error, + f: F, + ) -> Result<T> { + // SAFETY: caller guarantees c_error is valid + let err = unsafe { Option::<Self>::from_foreign(c_error) }; + match err { + None => Ok(f()), + Some(err) => Err(err), + } + } +} + +impl FreeForeign for Error { + type Foreign = bindings::Error; + + unsafe fn free_foreign(p: *mut bindings::Error) { + // SAFETY: caller guarantees p is valid + unsafe { + bindings::error_free(p); + } + } +} + +impl CloneToForeign for Error { + fn clone_to_foreign(&self) -> OwnedPointer<Self> { + // SAFETY: all arguments are controlled by this function + unsafe { + let err: *mut c_void = libc::malloc(std::mem::size_of::<bindings::Error>()); + let err: &mut bindings::Error = &mut *err.cast(); + *err = bindings::Error { + msg: format!("{self}").clone_to_foreign_ptr(), + err_class: bindings::ERROR_CLASS_GENERIC_ERROR, + src_len: self.file.len() as c_int, + src: self.file.as_ptr().cast::<c_char>(), + line: self.line as c_int, + func: ptr::null_mut(), + hint: ptr::null_mut(), + }; + OwnedPointer::new(err) + } + } +} + +impl FromForeign for Error { + unsafe fn cloned_from_foreign(c_error: *const bindings::Error) -> Self { + // SAFETY: caller guarantees c_error is valid + unsafe { + let error = &*c_error; + let file = if error.src_len < 0 { + // NUL-terminated + CStr::from_ptr(error.src).to_str() + } else { + // Can become str::from_utf8 with Rust 1.87.0 + std::str::from_utf8(std::slice::from_raw_parts( + &*error.src.cast::<u8>(), + error.src_len as usize, + )) + }; + + Error { + msg: FromForeign::cloned_from_foreign(error.msg), + cause: None, + file: file.unwrap(), + line: error.line as u32, + } + } + } +} + +#[cfg(test)] +mod tests { + use std::ffi::CStr; + + use anyhow::anyhow; + use foreign::OwnedPointer; + + use super::*; + use crate::{assert_match, bindings}; + + #[track_caller] + fn error_for_test(msg: &CStr) -> OwnedPointer<Error> { + // SAFETY: all arguments are controlled by this function + let location = panic::Location::caller(); + unsafe { + let err: *mut c_void = libc::malloc(std::mem::size_of::<bindings::Error>()); + let err: &mut bindings::Error = &mut *err.cast(); + *err = bindings::Error { + msg: msg.clone_to_foreign_ptr(), + err_class: bindings::ERROR_CLASS_GENERIC_ERROR, + src_len: location.file().len() as c_int, + src: location.file().as_ptr().cast::<c_char>(), + line: location.line() as c_int, + func: ptr::null_mut(), + hint: ptr::null_mut(), + }; + OwnedPointer::new(err) + } + } + + unsafe fn error_get_pretty<'a>(local_err: *mut bindings::Error) -> &'a CStr { + unsafe { CStr::from_ptr(bindings::error_get_pretty(local_err)) } + } + + #[test] + #[allow(deprecated)] + fn test_description() { + use std::error::Error; + + assert_eq!(super::Error::from("msg").description(), "msg"); + assert_eq!(super::Error::from("msg".to_owned()).description(), "msg"); + } + + #[test] + fn test_display() { + assert_eq!(&*format!("{}", Error::from("msg")), "msg"); + assert_eq!(&*format!("{}", Error::from("msg".to_owned())), "msg"); + assert_eq!(&*format!("{}", Error::from(anyhow!("msg"))), "msg"); + + assert_eq!( + &*format!("{}", Error::with_error("msg", anyhow!("cause"))), + "msg: cause" + ); + } + + #[test] + fn test_bool_or_propagate() { + unsafe { + let mut local_err: *mut bindings::Error = ptr::null_mut(); + + assert!(Error::bool_or_propagate(Ok(()), &mut local_err)); + assert_eq!(local_err, ptr::null_mut()); + + let my_err = Error::from("msg"); + assert!(!Error::bool_or_propagate(Err(my_err), &mut local_err)); + assert_ne!(local_err, ptr::null_mut()); + assert_eq!(error_get_pretty(local_err), c"msg"); + bindings::error_free(local_err); + } + } + + #[test] + fn test_ptr_or_propagate() { + unsafe { + let mut local_err: *mut bindings::Error = ptr::null_mut(); + + let ret = Error::ptr_or_propagate(Ok("abc".to_owned()), &mut local_err); + assert_eq!(String::from_foreign(ret), "abc"); + assert_eq!(local_err, ptr::null_mut()); + + let my_err = Error::from("msg"); + assert_eq!( + Error::ptr_or_propagate(Err::<String, _>(my_err), &mut local_err), + ptr::null_mut() + ); + assert_ne!(local_err, ptr::null_mut()); + assert_eq!(error_get_pretty(local_err), c"msg"); + bindings::error_free(local_err); + } + } + + #[test] + fn test_err_or_unit() { + unsafe { + let result = Error::err_or_unit(ptr::null_mut()); + assert_match!(result, Ok(())); + + let err = error_for_test(c"msg"); + let err = Error::err_or_unit(err.into_inner()).unwrap_err(); + assert_eq!(&*format!("{err}"), "msg"); + } + } +} diff --git a/rust/qemu-api/src/irq.rs b/rust/qemu-api/src/irq.rs new file mode 100644 index 0000000..1526e6f --- /dev/null +++ b/rust/qemu-api/src/irq.rs @@ -0,0 +1,115 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini <pbonzini@redhat.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for interrupt sources + +use std::{ + ffi::{c_int, CStr}, + marker::PhantomData, + ptr, +}; + +use crate::{ + bindings::{self, qemu_set_irq}, + cell::Opaque, + prelude::*, + qom::ObjectClass, +}; + +/// An opaque wrapper around [`bindings::IRQState`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct IRQState(Opaque<bindings::IRQState>); + +/// Interrupt sources are used by devices to pass changes to a value (typically +/// a boolean). The interrupt sink is usually an interrupt controller or +/// GPIO controller. +/// +/// As far as devices are concerned, interrupt sources are always active-high: +/// for example, `InterruptSource<bool>`'s [`raise`](InterruptSource::raise) +/// method sends a `true` value to the sink. If the guest has to see a +/// different polarity, that change is performed by the board between the +/// device and the interrupt controller. +/// +/// Interrupts are implemented as a pointer to the interrupt "sink", which has +/// type [`IRQState`]. A device exposes its source as a QOM link property using +/// a function such as [`SysBusDeviceMethods::init_irq`], and +/// initially leaves the pointer to a NULL value, representing an unconnected +/// interrupt. To connect it, whoever creates the device fills the pointer with +/// the sink's `IRQState *`, for example using `sysbus_connect_irq`. Because +/// devices are generally shared objects, interrupt sources are an example of +/// the interior mutability pattern. +/// +/// Interrupt sources can only be triggered under the Big QEMU Lock; `BqlCell` +/// allows access from whatever thread has it. +#[derive(Debug)] +#[repr(transparent)] +pub struct InterruptSource<T = bool> +where + c_int: From<T>, +{ + cell: BqlCell<*mut bindings::IRQState>, + _marker: PhantomData<T>, +} + +// SAFETY: the implementation asserts via `BqlCell` that the BQL is taken +unsafe impl<T> Sync for InterruptSource<T> where c_int: From<T> {} + +impl InterruptSource<bool> { + /// Send a low (`false`) value to the interrupt sink. + pub fn lower(&self) { + self.set(false); + } + + /// Send a high-low pulse to the interrupt sink. + pub fn pulse(&self) { + self.set(true); + self.set(false); + } + + /// Send a high (`true`) value to the interrupt sink. + pub fn raise(&self) { + self.set(true); + } +} + +impl<T> InterruptSource<T> +where + c_int: From<T>, +{ + /// Send `level` to the interrupt sink. + pub fn set(&self, level: T) { + let ptr = self.cell.get(); + // SAFETY: the pointer is retrieved under the BQL and remains valid + // until the BQL is released, which is after qemu_set_irq() is entered. + unsafe { + qemu_set_irq(ptr, level.into()); + } + } + + pub(crate) const fn as_ptr(&self) -> *mut *mut bindings::IRQState { + self.cell.as_ptr() + } + + pub(crate) const fn slice_as_ptr(slice: &[Self]) -> *mut *mut bindings::IRQState { + assert!(!slice.is_empty()); + slice[0].as_ptr() + } +} + +impl Default for InterruptSource { + fn default() -> Self { + InterruptSource { + cell: BqlCell::new(ptr::null_mut()), + _marker: PhantomData, + } + } +} + +unsafe impl ObjectType for IRQState { + type Class = ObjectClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_IRQ) }; +} +qom_isa!(IRQState: Object); diff --git a/rust/qemu-api/src/lib.rs b/rust/qemu-api/src/lib.rs new file mode 100644 index 0000000..86dcd8e --- /dev/null +++ b/rust/qemu-api/src/lib.rs @@ -0,0 +1,170 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +// SPDX-License-Identifier: GPL-2.0-or-later + +#![cfg_attr(not(MESON), doc = include_str!("../README.md"))] +#![deny(clippy::missing_const_for_fn)] + +#[rustfmt::skip] +pub mod bindings; + +// preserve one-item-per-"use" syntax, it is clearer +// for prelude-like modules +#[rustfmt::skip] +pub mod prelude; + +pub mod assertions; +pub mod bitops; +pub mod callbacks; +pub mod cell; +pub mod chardev; +pub mod errno; +pub mod error; +pub mod irq; +pub mod log; +pub mod memory; +pub mod module; +pub mod qdev; +pub mod qom; +pub mod sysbus; +pub mod timer; +pub mod uninit; +pub mod vmstate; +pub mod zeroable; + +use std::{ + alloc::{GlobalAlloc, Layout}, + ffi::c_void, +}; + +pub use error::{Error, Result}; + +#[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] +extern "C" { + fn g_aligned_alloc0( + n_blocks: bindings::gsize, + n_block_bytes: bindings::gsize, + alignment: bindings::gsize, + ) -> bindings::gpointer; + fn g_aligned_free(mem: bindings::gpointer); +} + +#[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] +extern "C" { + fn qemu_memalign(alignment: usize, size: usize) -> *mut c_void; + fn qemu_vfree(ptr: *mut c_void); +} + +extern "C" { + fn g_malloc0(n_bytes: bindings::gsize) -> bindings::gpointer; + fn g_free(mem: bindings::gpointer); +} + +/// An allocator that uses the same allocator as QEMU in C. +/// +/// It is enabled by default with the `allocator` feature. +/// +/// To set it up manually as a global allocator in your crate: +/// +/// ```ignore +/// use qemu_api::QemuAllocator; +/// +/// #[global_allocator] +/// static GLOBAL: QemuAllocator = QemuAllocator::new(); +/// ``` +#[derive(Clone, Copy, Debug)] +#[repr(C)] +pub struct QemuAllocator { + _unused: [u8; 0], +} + +#[cfg_attr(all(feature = "allocator", not(test)), global_allocator)] +pub static GLOBAL: QemuAllocator = QemuAllocator::new(); + +impl QemuAllocator { + // From the glibc documentation, on GNU systems, malloc guarantees 16-byte + // alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See + // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html. + // This alignment guarantee also applies to Windows and Android. On Darwin + // and OpenBSD, the alignment is 16 bytes on both 64-bit and 32-bit systems. + #[cfg(all( + target_pointer_width = "32", + not(any(target_os = "macos", target_os = "openbsd")) + ))] + pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = Some(8); + #[cfg(all( + target_pointer_width = "64", + not(any(target_os = "macos", target_os = "openbsd")) + ))] + pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = Some(16); + #[cfg(all( + any(target_pointer_width = "32", target_pointer_width = "64"), + any(target_os = "macos", target_os = "openbsd") + ))] + pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = Some(16); + #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] + pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = None; + + pub const fn new() -> Self { + Self { _unused: [] } + } +} + +impl Default for QemuAllocator { + fn default() -> Self { + Self::new() + } +} + +// Sanity check. +const _: [(); 8] = [(); ::core::mem::size_of::<*mut c_void>()]; + +unsafe impl GlobalAlloc for QemuAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0)) + { + // SAFETY: g_malloc0() is safe to call. + unsafe { g_malloc0(layout.size().try_into().unwrap()).cast::<u8>() } + } else { + #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] + { + // SAFETY: g_aligned_alloc0() is safe to call. + unsafe { + g_aligned_alloc0( + layout.size().try_into().unwrap(), + 1, + layout.align().try_into().unwrap(), + ) + .cast::<u8>() + } + } + #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] + { + // SAFETY: qemu_memalign() is safe to call. + unsafe { qemu_memalign(layout.align(), layout.size()).cast::<u8>() } + } + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0)) + { + // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid + // glib-allocated pointer, so `g_free`ing is safe. + unsafe { g_free(ptr.cast::<_>()) } + } else { + #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] + { + // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned + // glib-allocated pointer, so `g_aligned_free`ing is safe. + unsafe { g_aligned_free(ptr.cast::<_>()) } + } + #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] + { + // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned + // glib-allocated pointer, so `qemu_vfree`ing is safe. + unsafe { qemu_vfree(ptr.cast::<_>()) } + } + } + } +} diff --git a/rust/qemu-api/src/log.rs b/rust/qemu-api/src/log.rs new file mode 100644 index 0000000..d6c3d6c --- /dev/null +++ b/rust/qemu-api/src/log.rs @@ -0,0 +1,73 @@ +// Copyright 2025 Bernhard Beschow <shentey@gmail.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for QEMU's logging infrastructure + +#[repr(u32)] +/// Represents specific error categories within QEMU's logging system. +/// +/// The `Log` enum provides a Rust abstraction for logging errors, corresponding +/// to a subset of the error categories defined in the C implementation. +pub enum Log { + /// Log invalid access caused by the guest. + /// Corresponds to `LOG_GUEST_ERROR` in the C implementation. + GuestError = crate::bindings::LOG_GUEST_ERROR, + + /// Log guest access of unimplemented functionality. + /// Corresponds to `LOG_UNIMP` in the C implementation. + Unimp = crate::bindings::LOG_UNIMP, +} + +/// A macro to log messages conditionally based on a provided mask. +/// +/// The `log_mask_ln` macro checks whether the given mask matches the current +/// log level and, if so, formats and logs the message. It is the Rust +/// counterpart of the `qemu_log_mask()` macro in the C implementation. +/// +/// # Parameters +/// +/// - `$mask`: A log level mask. This should be a variant of the `Log` enum. +/// - `$fmt`: A format string following the syntax and rules of the `format!` +/// macro. It specifies the structure of the log message. +/// - `$args`: Optional arguments to be interpolated into the format string. +/// +/// # Example +/// +/// ``` +/// use qemu_api::{log::Log, log_mask_ln}; +/// +/// let error_address = 0xbad; +/// log_mask_ln!(Log::GuestError, "Address 0x{error_address:x} out of range"); +/// ``` +/// +/// It is also possible to use printf-style formatting, as well as having a +/// trailing `,`: +/// +/// ``` +/// use qemu_api::{log::Log, log_mask_ln}; +/// +/// let error_address = 0xbad; +/// log_mask_ln!( +/// Log::GuestError, +/// "Address 0x{:x} out of range", +/// error_address, +/// ); +/// ``` +#[macro_export] +macro_rules! log_mask_ln { + ($mask:expr, $fmt:tt $($args:tt)*) => {{ + // Type assertion to enforce type `Log` for $mask + let _: Log = $mask; + + if unsafe { + (::qemu_api::bindings::qemu_loglevel & ($mask as std::os::raw::c_int)) != 0 + } { + let formatted_string = format!("{}\n", format_args!($fmt $($args)*)); + let c_string = std::ffi::CString::new(formatted_string).unwrap(); + + unsafe { + ::qemu_api::bindings::qemu_log(c_string.as_ptr()); + } + } + }}; +} diff --git a/rust/qemu-api/src/memory.rs b/rust/qemu-api/src/memory.rs new file mode 100644 index 0000000..e40fad6 --- /dev/null +++ b/rust/qemu-api/src/memory.rs @@ -0,0 +1,204 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini <pbonzini@redhat.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for `MemoryRegion`, `MemoryRegionOps` and `MemTxAttrs` + +use std::{ + ffi::{c_uint, c_void, CStr, CString}, + marker::PhantomData, +}; + +pub use bindings::{hwaddr, MemTxAttrs}; + +use crate::{ + bindings::{self, device_endian, memory_region_init_io}, + callbacks::FnCall, + cell::Opaque, + prelude::*, + uninit::MaybeUninitField, + zeroable::Zeroable, +}; + +pub struct MemoryRegionOps<T>( + bindings::MemoryRegionOps, + // Note: quite often you'll see PhantomData<fn(&T)> mentioned when discussing + // covariance and contravariance; you don't need any of those to understand + // this usage of PhantomData. Quite simply, MemoryRegionOps<T> *logically* + // holds callbacks that take an argument of type &T, except the type is erased + // before the callback is stored in the bindings::MemoryRegionOps field. + // The argument of PhantomData is a function pointer in order to represent + // that relationship; while that will also provide desirable and safe variance + // for T, variance is not the point but just a consequence. + PhantomData<fn(&T)>, +); + +// SAFETY: When a *const T is passed to the callbacks, the call itself +// is done in a thread-safe manner. The invocation is okay as long as +// T itself is `Sync`. +unsafe impl<T: Sync> Sync for MemoryRegionOps<T> {} + +#[derive(Clone)] +pub struct MemoryRegionOpsBuilder<T>(bindings::MemoryRegionOps, PhantomData<fn(&T)>); + +unsafe extern "C" fn memory_region_ops_read_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>( + opaque: *mut c_void, + addr: hwaddr, + size: c_uint, +) -> u64 { + F::call((unsafe { &*(opaque.cast::<T>()) }, addr, size)) +} + +unsafe extern "C" fn memory_region_ops_write_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>( + opaque: *mut c_void, + addr: hwaddr, + data: u64, + size: c_uint, +) { + F::call((unsafe { &*(opaque.cast::<T>()) }, addr, data, size)) +} + +impl<T> MemoryRegionOpsBuilder<T> { + #[must_use] + pub const fn read<F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(mut self, _f: &F) -> Self { + self.0.read = Some(memory_region_ops_read_cb::<T, F>); + self + } + + #[must_use] + pub const fn write<F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(mut self, _f: &F) -> Self { + self.0.write = Some(memory_region_ops_write_cb::<T, F>); + self + } + + #[must_use] + pub const fn big_endian(mut self) -> Self { + self.0.endianness = device_endian::DEVICE_BIG_ENDIAN; + self + } + + #[must_use] + pub const fn little_endian(mut self) -> Self { + self.0.endianness = device_endian::DEVICE_LITTLE_ENDIAN; + self + } + + #[must_use] + pub const fn native_endian(mut self) -> Self { + self.0.endianness = device_endian::DEVICE_NATIVE_ENDIAN; + self + } + + #[must_use] + pub const fn valid_sizes(mut self, min: u32, max: u32) -> Self { + self.0.valid.min_access_size = min; + self.0.valid.max_access_size = max; + self + } + + #[must_use] + pub const fn valid_unaligned(mut self) -> Self { + self.0.valid.unaligned = true; + self + } + + #[must_use] + pub const fn impl_sizes(mut self, min: u32, max: u32) -> Self { + self.0.impl_.min_access_size = min; + self.0.impl_.max_access_size = max; + self + } + + #[must_use] + pub const fn impl_unaligned(mut self) -> Self { + self.0.impl_.unaligned = true; + self + } + + #[must_use] + pub const fn build(self) -> MemoryRegionOps<T> { + MemoryRegionOps::<T>(self.0, PhantomData) + } + + #[must_use] + pub const fn new() -> Self { + Self(bindings::MemoryRegionOps::ZERO, PhantomData) + } +} + +impl<T> Default for MemoryRegionOpsBuilder<T> { + fn default() -> Self { + Self::new() + } +} + +/// A safe wrapper around [`bindings::MemoryRegion`]. +#[repr(transparent)] +#[derive(qemu_api_macros::Wrapper)] +pub struct MemoryRegion(Opaque<bindings::MemoryRegion>); + +unsafe impl Send for MemoryRegion {} +unsafe impl Sync for MemoryRegion {} + +impl MemoryRegion { + // inline to ensure that it is not included in tests, which only + // link to hwcore and qom. FIXME: inlining is actually the opposite + // of what we want, since this is the type-erased version of the + // init_io function below. Look into splitting the qemu_api crate. + #[inline(always)] + unsafe fn do_init_io( + slot: *mut bindings::MemoryRegion, + owner: *mut bindings::Object, + ops: &'static bindings::MemoryRegionOps, + name: &'static str, + size: u64, + ) { + unsafe { + let cstr = CString::new(name).unwrap(); + memory_region_init_io( + slot, + owner, + ops, + owner.cast::<c_void>(), + cstr.as_ptr(), + size, + ); + } + } + + pub fn init_io<T: IsA<Object>>( + this: &mut MaybeUninitField<'_, T, Self>, + ops: &'static MemoryRegionOps<T>, + name: &'static str, + size: u64, + ) { + unsafe { + Self::do_init_io( + this.as_mut_ptr().cast(), + MaybeUninitField::parent_mut(this).cast(), + &ops.0, + name, + size, + ); + } + } +} + +unsafe impl ObjectType for MemoryRegion { + type Class = bindings::MemoryRegionClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_MEMORY_REGION) }; +} +qom_isa!(MemoryRegion: Object); + +/// A special `MemTxAttrs` constant, used to indicate that no memory +/// attributes are specified. +/// +/// Bus masters which don't specify any attributes will get this, +/// which has all attribute bits clear except the topmost one +/// (so that we can distinguish "all attributes deliberately clear" +/// from "didn't specify" if necessary). +pub const MEMTXATTRS_UNSPECIFIED: MemTxAttrs = MemTxAttrs { + unspecified: true, + ..Zeroable::ZERO +}; diff --git a/rust/qemu-api/src/module.rs b/rust/qemu-api/src/module.rs new file mode 100644 index 0000000..fa5cea3 --- /dev/null +++ b/rust/qemu-api/src/module.rs @@ -0,0 +1,43 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Macro to register blocks of code that run as QEMU starts up. + +#[macro_export] +macro_rules! module_init { + ($type:ident => $body:block) => { + const _: () = { + #[used] + #[cfg_attr( + not(any(target_vendor = "apple", target_os = "windows")), + link_section = ".init_array" + )] + #[cfg_attr(target_vendor = "apple", link_section = "__DATA,__mod_init_func")] + #[cfg_attr(target_os = "windows", link_section = ".CRT$XCU")] + pub static LOAD_MODULE: extern "C" fn() = { + extern "C" fn init_fn() { + $body + } + + extern "C" fn ctor_fn() { + unsafe { + $crate::bindings::register_module_init( + Some(init_fn), + $crate::bindings::module_init_type::$type, + ); + } + } + + ctor_fn + }; + }; + }; + + // shortcut because it's quite common that $body needs unsafe {} + ($type:ident => unsafe $body:block) => { + $crate::module_init! { + $type => { unsafe { $body } } + } + }; +} diff --git a/rust/qemu-api/src/prelude.rs b/rust/qemu-api/src/prelude.rs new file mode 100644 index 0000000..8f9e23e --- /dev/null +++ b/rust/qemu-api/src/prelude.rs @@ -0,0 +1,31 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini <pbonzini@redhat.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Commonly used traits and types for QEMU. + +pub use crate::bitops::IntegerExt; + +pub use crate::cell::BqlCell; +pub use crate::cell::BqlRefCell; + +pub use crate::errno; + +pub use crate::log_mask_ln; + +pub use crate::qdev::DeviceMethods; + +pub use crate::qom::InterfaceType; +pub use crate::qom::IsA; +pub use crate::qom::Object; +pub use crate::qom::ObjectCast; +pub use crate::qom::ObjectDeref; +pub use crate::qom::ObjectClassMethods; +pub use crate::qom::ObjectMethods; +pub use crate::qom::ObjectType; + +pub use crate::qom_isa; + +pub use crate::sysbus::SysBusDeviceMethods; + +pub use crate::vmstate::VMState; diff --git a/rust/qemu-api/src/qdev.rs b/rust/qemu-api/src/qdev.rs new file mode 100644 index 0000000..36f02fb --- /dev/null +++ b/rust/qemu-api/src/qdev.rs @@ -0,0 +1,410 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings to create devices and access device functionality from Rust. + +use std::{ + ffi::{c_int, c_void, CStr, CString}, + ptr::NonNull, +}; + +pub use bindings::{ClockEvent, DeviceClass, Property, ResetType}; + +use crate::{ + bindings::{self, qdev_init_gpio_in, qdev_init_gpio_out, ResettableClass}, + callbacks::FnCall, + cell::{bql_locked, Opaque}, + chardev::Chardev, + error::{Error, Result}, + irq::InterruptSource, + prelude::*, + qom::{ObjectClass, ObjectImpl, Owned, ParentInit}, + vmstate::VMStateDescription, +}; + +/// A safe wrapper around [`bindings::Clock`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct Clock(Opaque<bindings::Clock>); + +unsafe impl Send for Clock {} +unsafe impl Sync for Clock {} + +/// A safe wrapper around [`bindings::DeviceState`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct DeviceState(Opaque<bindings::DeviceState>); + +unsafe impl Send for DeviceState {} +unsafe impl Sync for DeviceState {} + +/// Trait providing the contents of the `ResettablePhases` struct, +/// which is part of the QOM `Resettable` interface. +pub trait ResettablePhasesImpl { + /// If not None, this is called when the object enters reset. It + /// can reset local state of the object, but it must not do anything that + /// has a side-effect on other objects, such as raising or lowering an + /// [`InterruptSource`], or reading or writing guest memory. It takes the + /// reset's type as argument. + const ENTER: Option<fn(&Self, ResetType)> = None; + + /// If not None, this is called when the object for entry into reset, once + /// every object in the system which is being reset has had its + /// `ResettablePhasesImpl::ENTER` method called. At this point devices + /// can do actions that affect other objects. + /// + /// If in doubt, implement this method. + const HOLD: Option<fn(&Self, ResetType)> = None; + + /// If not None, this phase is called when the object leaves the reset + /// state. Actions affecting other objects are permitted. + const EXIT: Option<fn(&Self, ResetType)> = None; +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_resettable_enter_fn<T: ResettablePhasesImpl>( + obj: *mut bindings::Object, + typ: ResetType, +) { + let state = NonNull::new(obj).unwrap().cast::<T>(); + T::ENTER.unwrap()(unsafe { state.as_ref() }, typ); +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_resettable_hold_fn<T: ResettablePhasesImpl>( + obj: *mut bindings::Object, + typ: ResetType, +) { + let state = NonNull::new(obj).unwrap().cast::<T>(); + T::HOLD.unwrap()(unsafe { state.as_ref() }, typ); +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_resettable_exit_fn<T: ResettablePhasesImpl>( + obj: *mut bindings::Object, + typ: ResetType, +) { + let state = NonNull::new(obj).unwrap().cast::<T>(); + T::EXIT.unwrap()(unsafe { state.as_ref() }, typ); +} + +/// Trait providing the contents of [`DeviceClass`]. +pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA<DeviceState> { + /// _Realization_ is the second stage of device creation. It contains + /// all operations that depend on device properties and can fail (note: + /// this is not yet supported for Rust devices). + /// + /// If not `None`, the parent class's `realize` method is overridden + /// with the function pointed to by `REALIZE`. + const REALIZE: Option<fn(&Self) -> Result<()>> = None; + + /// An array providing the properties that the user can set on the + /// device. Not a `const` because referencing statics in constants + /// is unstable until Rust 1.83.0. + fn properties() -> &'static [Property] { + &[] + } + + /// A `VMStateDescription` providing the migration format for the device + /// Not a `const` because referencing statics in constants is unstable + /// until Rust 1.83.0. + fn vmsd() -> Option<&'static VMStateDescription> { + None + } +} + +/// # Safety +/// +/// This function is only called through the QOM machinery and +/// used by `DeviceClass::class_init`. +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_realize_fn<T: DeviceImpl>( + dev: *mut bindings::DeviceState, + errp: *mut *mut bindings::Error, +) { + let state = NonNull::new(dev).unwrap().cast::<T>(); + let result = T::REALIZE.unwrap()(unsafe { state.as_ref() }); + unsafe { + Error::ok_or_propagate(result, errp); + } +} + +unsafe impl InterfaceType for ResettableClass { + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_RESETTABLE_INTERFACE) }; +} + +impl ResettableClass { + /// Fill in the virtual methods of `ResettableClass` based on the + /// definitions in the `ResettablePhasesImpl` trait. + pub fn class_init<T: ResettablePhasesImpl>(&mut self) { + if <T as ResettablePhasesImpl>::ENTER.is_some() { + self.phases.enter = Some(rust_resettable_enter_fn::<T>); + } + if <T as ResettablePhasesImpl>::HOLD.is_some() { + self.phases.hold = Some(rust_resettable_hold_fn::<T>); + } + if <T as ResettablePhasesImpl>::EXIT.is_some() { + self.phases.exit = Some(rust_resettable_exit_fn::<T>); + } + } +} + +impl DeviceClass { + /// Fill in the virtual methods of `DeviceClass` based on the definitions in + /// the `DeviceImpl` trait. + pub fn class_init<T: DeviceImpl>(&mut self) { + if <T as DeviceImpl>::REALIZE.is_some() { + self.realize = Some(rust_realize_fn::<T>); + } + if let Some(vmsd) = <T as DeviceImpl>::vmsd() { + self.vmsd = vmsd; + } + let prop = <T as DeviceImpl>::properties(); + if !prop.is_empty() { + unsafe { + bindings::device_class_set_props_n(self, prop.as_ptr(), prop.len()); + } + } + + ResettableClass::cast::<DeviceState>(self).class_init::<T>(); + self.parent_class.class_init::<T>(); + } +} + +#[macro_export] +macro_rules! define_property { + ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, bit = $bitnr:expr, default = $defval:expr$(,)*) => { + $crate::bindings::Property { + // use associated function syntax for type checking + name: ::std::ffi::CStr::as_ptr($name), + info: $prop, + offset: ::std::mem::offset_of!($state, $field) as isize, + bitnr: $bitnr, + set_default: true, + defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 }, + ..$crate::zeroable::Zeroable::ZERO + } + }; + ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, default = $defval:expr$(,)*) => { + $crate::bindings::Property { + // use associated function syntax for type checking + name: ::std::ffi::CStr::as_ptr($name), + info: $prop, + offset: ::std::mem::offset_of!($state, $field) as isize, + set_default: true, + defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 }, + ..$crate::zeroable::Zeroable::ZERO + } + }; + ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty$(,)*) => { + $crate::bindings::Property { + // use associated function syntax for type checking + name: ::std::ffi::CStr::as_ptr($name), + info: $prop, + offset: ::std::mem::offset_of!($state, $field) as isize, + set_default: false, + ..$crate::zeroable::Zeroable::ZERO + } + }; +} + +#[macro_export] +macro_rules! declare_properties { + ($ident:ident, $($prop:expr),*$(,)*) => { + pub static $ident: [$crate::bindings::Property; { + let mut len = 0; + $({ + _ = stringify!($prop); + len += 1; + })* + len + }] = [ + $($prop),*, + ]; + }; +} + +unsafe impl ObjectType for DeviceState { + type Class = DeviceClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_DEVICE) }; +} +qom_isa!(DeviceState: Object); + +/// Initialization methods take a [`ParentInit`] and can be called as +/// associated functions. +impl DeviceState { + /// Add an input clock named `name`. Invoke the callback with + /// `self` as the first parameter for the events that are requested. + /// + /// The resulting clock is added as a child of `self`, but it also + /// stays alive until after `Drop::drop` is called because C code + /// keeps an extra reference to it until `device_finalize()` calls + /// `qdev_finalize_clocklist()`. Therefore (unlike most cases in + /// which Rust code has a reference to a child object) it would be + /// possible for this function to return a `&Clock` too. + #[inline] + pub fn init_clock_in<T: DeviceImpl, F: for<'a> FnCall<(&'a T, ClockEvent)>>( + this: &mut ParentInit<T>, + name: &str, + _cb: &F, + events: ClockEvent, + ) -> Owned<Clock> + where + T::ParentType: IsA<DeviceState>, + { + fn do_init_clock_in( + dev: &DeviceState, + name: &str, + cb: Option<unsafe extern "C" fn(*mut c_void, ClockEvent)>, + events: ClockEvent, + ) -> Owned<Clock> { + assert!(bql_locked()); + + // SAFETY: the clock is heap allocated, but qdev_init_clock_in() + // does not gift the reference to its caller; so use Owned::from to + // add one. The callback is disabled automatically when the clock + // is unparented, which happens before the device is finalized. + unsafe { + let cstr = CString::new(name).unwrap(); + let clk = bindings::qdev_init_clock_in( + dev.0.as_mut_ptr(), + cstr.as_ptr(), + cb, + dev.0.as_void_ptr(), + events.0, + ); + + let clk: &Clock = Clock::from_raw(clk); + Owned::from(clk) + } + } + + let cb: Option<unsafe extern "C" fn(*mut c_void, ClockEvent)> = if F::is_some() { + unsafe extern "C" fn rust_clock_cb<T, F: for<'a> FnCall<(&'a T, ClockEvent)>>( + opaque: *mut c_void, + event: ClockEvent, + ) { + // SAFETY: the opaque is "this", which is indeed a pointer to T + F::call((unsafe { &*(opaque.cast::<T>()) }, event)) + } + Some(rust_clock_cb::<T, F>) + } else { + None + }; + + do_init_clock_in(unsafe { this.upcast_mut() }, name, cb, events) + } + + /// Add an output clock named `name`. + /// + /// The resulting clock is added as a child of `self`, but it also + /// stays alive until after `Drop::drop` is called because C code + /// keeps an extra reference to it until `device_finalize()` calls + /// `qdev_finalize_clocklist()`. Therefore (unlike most cases in + /// which Rust code has a reference to a child object) it would be + /// possible for this function to return a `&Clock` too. + #[inline] + pub fn init_clock_out<T: DeviceImpl>(this: &mut ParentInit<T>, name: &str) -> Owned<Clock> + where + T::ParentType: IsA<DeviceState>, + { + unsafe { + let cstr = CString::new(name).unwrap(); + let dev: &mut DeviceState = this.upcast_mut(); + let clk = bindings::qdev_init_clock_out(dev.0.as_mut_ptr(), cstr.as_ptr()); + + let clk: &Clock = Clock::from_raw(clk); + Owned::from(clk) + } + } +} + +/// Trait for methods exposed by the [`DeviceState`] class. The methods can be +/// called on all objects that have the trait `IsA<DeviceState>`. +/// +/// The trait should only be used through the blanket implementation, +/// which guarantees safety via `IsA`. +pub trait DeviceMethods: ObjectDeref +where + Self::Target: IsA<DeviceState>, +{ + fn prop_set_chr(&self, propname: &str, chr: &Owned<Chardev>) { + assert!(bql_locked()); + let c_propname = CString::new(propname).unwrap(); + let chr: &Chardev = chr; + unsafe { + bindings::qdev_prop_set_chr( + self.upcast().as_mut_ptr(), + c_propname.as_ptr(), + chr.as_mut_ptr(), + ); + } + } + + fn init_gpio_in<F: for<'a> FnCall<(&'a Self::Target, u32, u32)>>( + &self, + num_lines: u32, + _cb: F, + ) { + fn do_init_gpio_in( + dev: &DeviceState, + num_lines: u32, + gpio_in_cb: unsafe extern "C" fn(*mut c_void, c_int, c_int), + ) { + unsafe { + qdev_init_gpio_in(dev.as_mut_ptr(), Some(gpio_in_cb), num_lines as c_int); + } + } + + let _: () = F::ASSERT_IS_SOME; + unsafe extern "C" fn rust_irq_handler<T, F: for<'a> FnCall<(&'a T, u32, u32)>>( + opaque: *mut c_void, + line: c_int, + level: c_int, + ) { + // SAFETY: the opaque was passed as a reference to `T` + F::call((unsafe { &*(opaque.cast::<T>()) }, line as u32, level as u32)) + } + + let gpio_in_cb: unsafe extern "C" fn(*mut c_void, c_int, c_int) = + rust_irq_handler::<Self::Target, F>; + + do_init_gpio_in(self.upcast(), num_lines, gpio_in_cb); + } + + fn init_gpio_out(&self, pins: &[InterruptSource]) { + unsafe { + qdev_init_gpio_out( + self.upcast().as_mut_ptr(), + InterruptSource::slice_as_ptr(pins), + pins.len() as c_int, + ); + } + } +} + +impl<R: ObjectDeref> DeviceMethods for R where R::Target: IsA<DeviceState> {} + +unsafe impl ObjectType for Clock { + type Class = ObjectClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_CLOCK) }; +} +qom_isa!(Clock: Object); diff --git a/rust/qemu-api/src/qom.rs b/rust/qemu-api/src/qom.rs new file mode 100644 index 0000000..e20ee01 --- /dev/null +++ b/rust/qemu-api/src/qom.rs @@ -0,0 +1,950 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings to access QOM functionality from Rust. +//! +//! The QEMU Object Model (QOM) provides inheritance and dynamic typing for QEMU +//! devices. This module makes QOM's features available in Rust through three +//! main mechanisms: +//! +//! * Automatic creation and registration of `TypeInfo` for classes that are +//! written in Rust, as well as mapping between Rust traits and QOM vtables. +//! +//! * Type-safe casting between parent and child classes, through the [`IsA`] +//! trait and methods such as [`upcast`](ObjectCast::upcast) and +//! [`downcast`](ObjectCast::downcast). +//! +//! * Automatic delegation of parent class methods to child classes. When a +//! trait uses [`IsA`] as a bound, its contents become available to all child +//! classes through blanket implementations. This works both for class methods +//! and for instance methods accessed through references or smart pointers. +//! +//! # Structure of a class +//! +//! A leaf class only needs a struct holding instance state. The struct must +//! implement the [`ObjectType`] and [`IsA`] traits, as well as any `*Impl` +//! traits that exist for its superclasses. +//! +//! If a class has subclasses, it will also provide a struct for instance data, +//! with the same characteristics as for concrete classes, but it also needs +//! additional components to support virtual methods: +//! +//! * a struct for class data, for example `DeviceClass`. This corresponds to +//! the C "class struct" and holds the vtable that is used by instances of the +//! class and its subclasses. It must start with its parent's class struct. +//! +//! * a trait for virtual method implementations, for example `DeviceImpl`. +//! Child classes implement this trait to provide their own behavior for +//! virtual methods. The trait's methods take `&self` to access instance data. +//! The traits have the appropriate specialization of `IsA<>` as a supertrait, +//! for example `IsA<DeviceState>` for `DeviceImpl`. +//! +//! * a trait for instance methods, for example `DeviceMethods`. This trait is +//! automatically implemented for any reference or smart pointer to a device +//! instance. It calls into the vtable provides access across all subclasses +//! to methods defined for the class. +//! +//! * optionally, a trait for class methods, for example `DeviceClassMethods`. +//! This provides access to class-wide functionality that doesn't depend on +//! instance data. Like instance methods, these are automatically inherited by +//! child classes. +//! +//! # Class structures +//! +//! Each QOM class that has virtual methods describes them in a +//! _class struct_. Class structs include a parent field corresponding +//! to the vtable of the parent class, all the way up to [`ObjectClass`]. +//! +//! As mentioned above, virtual methods are defined via traits such as +//! `DeviceImpl`. Class structs do not define any trait but, conventionally, +//! all of them have a `class_init` method to initialize the virtual methods +//! based on the trait and then call the same method on the superclass. +//! +//! ```ignore +//! impl YourSubclassClass +//! { +//! pub fn class_init<T: YourSubclassImpl>(&mut self) { +//! ... +//! klass.parent_class::class_init<T>(); +//! } +//! } +//! ``` +//! +//! If a class implements a QOM interface. In that case, the function must +//! contain, for each interface, an extra forwarding call as follows: +//! +//! ```ignore +//! ResettableClass::cast::<Self>(self).class_init::<Self>(); +//! ``` +//! +//! These `class_init` functions are methods on the class rather than a trait, +//! because the bound on `T` (`DeviceImpl` in this case), will change for every +//! class struct. The functions are pointed to by the +//! [`ObjectImpl::CLASS_INIT`] function pointer. While there is no default +//! implementation, in most cases it will be enough to write it as follows: +//! +//! ```ignore +//! const CLASS_INIT: fn(&mut Self::Class)> = Self::Class::class_init::<Self>; +//! ``` +//! +//! This design incurs a small amount of code duplication but, by not using +//! traits, it allows the flexibility of implementing bindings in any crate, +//! without incurring into violations of orphan rules for traits. + +use std::{ + ffi::{c_void, CStr}, + fmt, + marker::PhantomData, + mem::{ManuallyDrop, MaybeUninit}, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +pub use bindings::ObjectClass; + +use crate::{ + bindings::{ + self, object_class_dynamic_cast, object_dynamic_cast, object_get_class, + object_get_typename, object_new, object_ref, object_unref, TypeInfo, + }, + cell::{bql_locked, Opaque}, +}; + +/// A safe wrapper around [`bindings::Object`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct Object(Opaque<bindings::Object>); + +unsafe impl Send for Object {} +unsafe impl Sync for Object {} + +/// Marker trait: `Self` can be statically upcasted to `P` (i.e. `P` is a direct +/// or indirect parent of `Self`). +/// +/// # Safety +/// +/// The struct `Self` must be `#[repr(C)]` and must begin, directly or +/// indirectly, with a field of type `P`. This ensures that invalid casts, +/// which rely on `IsA<>` for static checking, are rejected at compile time. +pub unsafe trait IsA<P: ObjectType>: ObjectType {} + +// SAFETY: it is always safe to cast to your own type +unsafe impl<T: ObjectType> IsA<T> for T {} + +/// Macro to mark superclasses of QOM classes. This enables type-safe +/// up- and downcasting. +/// +/// # Safety +/// +/// This macro is a thin wrapper around the [`IsA`] trait and performs +/// no checking whatsoever of what is declared. It is the caller's +/// responsibility to have $struct begin, directly or indirectly, with +/// a field of type `$parent`. +#[macro_export] +macro_rules! qom_isa { + ($struct:ty : $($parent:ty),* ) => { + $( + // SAFETY: it is the caller responsibility to have $parent as the + // first field + unsafe impl $crate::qom::IsA<$parent> for $struct {} + + impl AsRef<$parent> for $struct { + fn as_ref(&self) -> &$parent { + // SAFETY: follows the same rules as for IsA<U>, which is + // declared above. + let ptr: *const Self = self; + unsafe { &*ptr.cast::<$parent>() } + } + } + )* + }; +} + +/// This is the same as [`ManuallyDrop<T>`](std::mem::ManuallyDrop), though +/// it hides the standard methods of `ManuallyDrop`. +/// +/// The first field of an `ObjectType` must be of type `ParentField<T>`. +/// (Technically, this is only necessary if there is at least one Rust +/// superclass in the hierarchy). This is to ensure that the parent field is +/// dropped after the subclass; this drop order is enforced by the C +/// `object_deinit` function. +/// +/// # Examples +/// +/// ```ignore +/// #[repr(C)] +/// #[derive(qemu_api_macros::Object)] +/// pub struct MyDevice { +/// parent: ParentField<DeviceState>, +/// ... +/// } +/// ``` +#[derive(Debug)] +#[repr(transparent)] +pub struct ParentField<T: ObjectType>(std::mem::ManuallyDrop<T>); + +impl<T: ObjectType> Deref for ParentField<T> { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<T: ObjectType> DerefMut for ParentField<T> { + #[inline(always)] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<T: fmt::Display + ObjectType> fmt::Display for ParentField<T> { + #[inline(always)] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + self.0.fmt(f) + } +} + +/// This struct knows that the superclasses of the object have already been +/// initialized. +/// +/// The declaration of `ParentInit` is.. *"a kind of magic"*. It uses a +/// technique that is found in several crates, the main ones probably being +/// `ghost-cell` (in fact it was introduced by the [`GhostCell` paper](https://plv.mpi-sws.org/rustbelt/ghostcell/)) +/// and `generativity`. +/// +/// The `PhantomData` makes the `ParentInit` type *invariant* with respect to +/// the lifetime argument `'init`. This, together with the `for<'...>` in +/// `[ParentInit::with]`, block any attempt of the compiler to be creative when +/// operating on types of type `ParentInit` and to extend their lifetimes. In +/// particular, it ensures that the `ParentInit` cannot be made to outlive the +/// `rust_instance_init()` function that creates it, and therefore that the +/// `&'init T` reference is valid. +/// +/// This implementation of the same concept, without the QOM baggage, can help +/// understanding the effect: +/// +/// ``` +/// use std::marker::PhantomData; +/// +/// #[derive(PartialEq, Eq)] +/// pub struct Jail<'closure, T: Copy>(&'closure T, PhantomData<fn(&'closure ()) -> &'closure ()>); +/// +/// impl<'closure, T: Copy> Jail<'closure, T> { +/// fn get(&self) -> T { +/// *self.0 +/// } +/// +/// #[inline] +/// fn with<U>(v: T, f: impl for<'id> FnOnce(Jail<'id, T>) -> U) -> U { +/// let parent_init = Jail(&v, PhantomData); +/// f(parent_init) +/// } +/// } +/// ``` +/// +/// It's impossible to escape the `Jail`; `token1` cannot be moved out of the +/// closure: +/// +/// ```ignore +/// let x = 42; +/// let escape = Jail::with(&x, |token1| { +/// println!("{}", token1.get()); +/// // fails to compile... +/// token1 +/// }); +/// // ... so you cannot do this: +/// println!("{}", escape.get()); +/// ``` +/// +/// Likewise, in the QOM case the `ParentInit` cannot be moved out of +/// `instance_init()`. Without this trick it would be possible to stash a +/// `ParentInit` and use it later to access uninitialized memory. +/// +/// Here is another example, showing how separately-created "identities" stay +/// isolated: +/// +/// ```ignore +/// impl<'closure, T: Copy> Clone for Jail<'closure, T> { +/// fn clone(&self) -> Jail<'closure, T> { +/// Jail(self.0, PhantomData) +/// } +/// } +/// +/// fn main() { +/// Jail::with(42, |token1| { +/// // this works and returns true: the clone has the same "identity" +/// println!("{}", token1 == token1.clone()); +/// Jail::with(42, |token2| { +/// // here the outer token remains accessible... +/// println!("{}", token1.get()); +/// // ... but the two are separate: this fails to compile: +/// println!("{}", token1 == token2); +/// }); +/// }); +/// } +/// ``` +pub struct ParentInit<'init, T>( + &'init mut MaybeUninit<T>, + PhantomData<fn(&'init ()) -> &'init ()>, +); + +impl<'init, T> ParentInit<'init, T> { + #[inline] + pub fn with(obj: &'init mut MaybeUninit<T>, f: impl for<'id> FnOnce(ParentInit<'id, T>)) { + let parent_init = ParentInit(obj, PhantomData); + f(parent_init) + } +} + +impl<T: ObjectType> ParentInit<'_, T> { + /// Return the receiver as a mutable raw pointer to Object. + /// + /// # Safety + /// + /// Fields beyond `Object` could be uninitialized and it's your + /// responsibility to avoid that they're used when the pointer is + /// dereferenced, either directly or through a cast. + pub fn as_object_mut_ptr(&self) -> *mut bindings::Object { + self.as_object_ptr().cast_mut() + } + + /// Return the receiver as a mutable raw pointer to Object. + /// + /// # Safety + /// + /// Fields beyond `Object` could be uninitialized and it's your + /// responsibility to avoid that they're used when the pointer is + /// dereferenced, either directly or through a cast. + pub fn as_object_ptr(&self) -> *const bindings::Object { + self.0.as_ptr().cast() + } +} + +impl<'a, T: ObjectImpl> ParentInit<'a, T> { + /// Convert from a derived type to one of its parent types, which + /// have already been initialized. + /// + /// # Safety + /// + /// Structurally this is always a safe operation; the [`IsA`] trait + /// provides static verification trait that `Self` dereferences to `U` or + /// a child of `U`, and only parent types of `T` are allowed. + /// + /// However, while the fields of the resulting reference are initialized, + /// calls might use uninitialized fields of the subclass. It is your + /// responsibility to avoid this. + pub unsafe fn upcast<U: ObjectType>(&self) -> &'a U + where + T::ParentType: IsA<U>, + { + // SAFETY: soundness is declared via IsA<U>, which is an unsafe trait; + // the parent has been initialized before `instance_init `is called + unsafe { &*(self.0.as_ptr().cast::<U>()) } + } + + /// Convert from a derived type to one of its parent types, which + /// have already been initialized. + /// + /// # Safety + /// + /// Structurally this is always a safe operation; the [`IsA`] trait + /// provides static verification trait that `Self` dereferences to `U` or + /// a child of `U`, and only parent types of `T` are allowed. + /// + /// However, while the fields of the resulting reference are initialized, + /// calls might use uninitialized fields of the subclass. It is your + /// responsibility to avoid this. + pub unsafe fn upcast_mut<U: ObjectType>(&mut self) -> &'a mut U + where + T::ParentType: IsA<U>, + { + // SAFETY: soundness is declared via IsA<U>, which is an unsafe trait; + // the parent has been initialized before `instance_init `is called + unsafe { &mut *(self.0.as_mut_ptr().cast::<U>()) } + } +} + +impl<T> Deref for ParentInit<'_, T> { + type Target = MaybeUninit<T>; + + fn deref(&self) -> &Self::Target { + self.0 + } +} + +impl<T> DerefMut for ParentInit<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.0 + } +} + +unsafe extern "C" fn rust_instance_init<T: ObjectImpl>(obj: *mut bindings::Object) { + let mut state = NonNull::new(obj).unwrap().cast::<MaybeUninit<T>>(); + + // SAFETY: obj is an instance of T, since rust_instance_init<T> + // is called from QOM core as the instance_init function + // for class T + unsafe { + ParentInit::with(state.as_mut(), |parent_init| { + T::INSTANCE_INIT.unwrap()(parent_init); + }); + } +} + +unsafe extern "C" fn rust_instance_post_init<T: ObjectImpl>(obj: *mut bindings::Object) { + let state = NonNull::new(obj).unwrap().cast::<T>(); + // SAFETY: obj is an instance of T, since rust_instance_post_init<T> + // is called from QOM core as the instance_post_init function + // for class T + T::INSTANCE_POST_INIT.unwrap()(unsafe { state.as_ref() }); +} + +unsafe extern "C" fn rust_class_init<T: ObjectType + ObjectImpl>( + klass: *mut ObjectClass, + _data: *const c_void, +) { + let mut klass = NonNull::new(klass) + .unwrap() + .cast::<<T as ObjectType>::Class>(); + // SAFETY: klass is a T::Class, since rust_class_init<T> + // is called from QOM core as the class_init function + // for class T + <T as ObjectImpl>::CLASS_INIT(unsafe { klass.as_mut() }) +} + +unsafe extern "C" fn drop_object<T: ObjectImpl>(obj: *mut bindings::Object) { + // SAFETY: obj is an instance of T, since drop_object<T> is called + // from the QOM core function object_deinit() as the instance_finalize + // function for class T. Note that while object_deinit() will drop the + // superclass field separately after this function returns, `T` must + // implement the unsafe trait ObjectType; the safety rules for the + // trait mandate that the parent field is manually dropped. + unsafe { std::ptr::drop_in_place(obj.cast::<T>()) } +} + +/// Trait exposed by all structs corresponding to QOM objects. +/// +/// # Safety +/// +/// For classes declared in C: +/// +/// - `Class` and `TYPE` must match the data in the `TypeInfo`; +/// +/// - the first field of the struct must be of the instance type corresponding +/// to the superclass, as declared in the `TypeInfo` +/// +/// - likewise, the first field of the `Class` struct must be of the class type +/// corresponding to the superclass +/// +/// For classes declared in Rust and implementing [`ObjectImpl`]: +/// +/// - the struct must be `#[repr(C)]`; +/// +/// - the first field of the struct must be of type +/// [`ParentField<T>`](ParentField), where `T` is the parent type +/// [`ObjectImpl::ParentType`] +/// +/// - the first field of the `Class` must be of the class struct corresponding +/// to the superclass, which is `ObjectImpl::ParentType::Class`. `ParentField` +/// is not needed here. +/// +/// In both cases, having a separate class type is not necessary if the subclass +/// does not add any field. +pub unsafe trait ObjectType: Sized { + /// The QOM class object corresponding to this struct. This is used + /// to automatically generate a `class_init` method. + type Class; + + /// The name of the type, which can be passed to `object_new()` to + /// generate an instance of this type. + const TYPE_NAME: &'static CStr; + + /// Return the receiver as an Object. This is always safe, even + /// if this type represents an interface. + fn as_object(&self) -> &Object { + unsafe { &*self.as_ptr().cast() } + } + + /// Return the receiver as a const raw pointer to Object. + /// This is preferable to `as_object_mut_ptr()` if a C + /// function only needs a `const Object *`. + fn as_object_ptr(&self) -> *const bindings::Object { + self.as_object().as_ptr() + } + + /// Return the receiver as a mutable raw pointer to Object. + /// + /// # Safety + /// + /// This cast is always safe, but because the result is mutable + /// and the incoming reference is not, this should only be used + /// for calls to C functions, and only if needed. + unsafe fn as_object_mut_ptr(&self) -> *mut bindings::Object { + self.as_object().as_mut_ptr() + } +} + +/// Trait exposed by all structs corresponding to QOM interfaces. +/// Unlike `ObjectType`, it is implemented on the class type (which provides +/// the vtable for the interfaces). +/// +/// # Safety +/// +/// `TYPE` must match the contents of the `TypeInfo` as found in the C code; +/// right now, interfaces can only be declared in C. +pub unsafe trait InterfaceType: Sized { + /// The name of the type, which can be passed to + /// `object_class_dynamic_cast()` to obtain the pointer to the vtable + /// for this interface. + const TYPE_NAME: &'static CStr; + + /// Return the vtable for the interface; `U` is the type that + /// lists the interface in its `TypeInfo`. + /// + /// # Examples + /// + /// This function is usually called by a `class_init` method in `U::Class`. + /// For example, `DeviceClass::class_init<T>` initializes its `Resettable` + /// interface as follows: + /// + /// ```ignore + /// ResettableClass::cast::<DeviceState>(self).class_init::<T>(); + /// ``` + /// + /// where `T` is the concrete subclass that is being initialized. + /// + /// # Panics + /// + /// Panic if the incoming argument if `T` does not implement the interface. + fn cast<U: ObjectType>(klass: &mut U::Class) -> &mut Self { + unsafe { + // SAFETY: upcasting to ObjectClass is always valid, and the + // return type is either NULL or the argument itself + let result: *mut Self = object_class_dynamic_cast( + (klass as *mut U::Class).cast(), + Self::TYPE_NAME.as_ptr(), + ) + .cast(); + result.as_mut().unwrap() + } + } +} + +/// This trait provides safe casting operations for QOM objects to raw pointers, +/// to be used for example for FFI. The trait can be applied to any kind of +/// reference or smart pointers, and enforces correctness through the [`IsA`] +/// trait. +pub trait ObjectDeref: Deref +where + Self::Target: ObjectType, +{ + /// Convert to a const Rust pointer, to be used for example for FFI. + /// The target pointer type must be the type of `self` or a superclass + fn as_ptr<U: ObjectType>(&self) -> *const U + where + Self::Target: IsA<U>, + { + let ptr: *const Self::Target = self.deref(); + ptr.cast::<U>() + } + + /// Convert to a mutable Rust pointer, to be used for example for FFI. + /// The target pointer type must be the type of `self` or a superclass. + /// Used to implement interior mutability for objects. + /// + /// # Safety + /// + /// This method is safe because only the actual dereference of the pointer + /// has to be unsafe. Bindings to C APIs will use it a lot, but care has + /// to be taken because it overrides the const-ness of `&self`. + fn as_mut_ptr<U: ObjectType>(&self) -> *mut U + where + Self::Target: IsA<U>, + { + #[allow(clippy::as_ptr_cast_mut)] + { + self.as_ptr::<U>().cast_mut() + } + } +} + +/// Trait that adds extra functionality for `&T` where `T` is a QOM +/// object type. Allows conversion to/from C objects in generic code. +pub trait ObjectCast: ObjectDeref + Copy +where + Self::Target: ObjectType, +{ + /// Safely convert from a derived type to one of its parent types. + /// + /// This is always safe; the [`IsA`] trait provides static verification + /// trait that `Self` dereferences to `U` or a child of `U`. + fn upcast<'a, U: ObjectType>(self) -> &'a U + where + Self::Target: IsA<U>, + Self: 'a, + { + // SAFETY: soundness is declared via IsA<U>, which is an unsafe trait + unsafe { self.unsafe_cast::<U>() } + } + + /// Attempt to convert to a derived type. + /// + /// Returns `None` if the object is not actually of type `U`. This is + /// verified at runtime by checking the object's type information. + fn downcast<'a, U: IsA<Self::Target>>(self) -> Option<&'a U> + where + Self: 'a, + { + self.dynamic_cast::<U>() + } + + /// Attempt to convert between any two types in the QOM hierarchy. + /// + /// Returns `None` if the object is not actually of type `U`. This is + /// verified at runtime by checking the object's type information. + fn dynamic_cast<'a, U: ObjectType>(self) -> Option<&'a U> + where + Self: 'a, + { + unsafe { + // SAFETY: upcasting to Object is always valid, and the + // return type is either NULL or the argument itself + let result: *const U = + object_dynamic_cast(self.as_object_mut_ptr(), U::TYPE_NAME.as_ptr()).cast(); + + result.as_ref() + } + } + + /// Convert to any QOM type without verification. + /// + /// # Safety + /// + /// What safety? You need to know yourself that the cast is correct; only + /// use when performance is paramount. It is still better than a raw + /// pointer `cast()`, which does not even check that you remain in the + /// realm of QOM `ObjectType`s. + /// + /// `unsafe_cast::<Object>()` is always safe. + unsafe fn unsafe_cast<'a, U: ObjectType>(self) -> &'a U + where + Self: 'a, + { + unsafe { &*(self.as_ptr::<Self::Target>().cast::<U>()) } + } +} + +impl<T: ObjectType> ObjectDeref for &T {} +impl<T: ObjectType> ObjectCast for &T {} + +impl<T: ObjectType> ObjectDeref for &mut T {} + +/// Trait a type must implement to be registered with QEMU. +pub trait ObjectImpl: ObjectType + IsA<Object> { + /// The parent of the type. This should match the first field of the + /// struct that implements `ObjectImpl`, minus the `ParentField<_>` wrapper. + type ParentType: ObjectType; + + /// Whether the object can be instantiated + const ABSTRACT: bool = false; + + /// Function that is called to initialize an object. The parent class will + /// have already been initialized so the type is only responsible for + /// initializing its own members. + /// + /// FIXME: The argument is not really a valid reference. `&mut + /// MaybeUninit<Self>` would be a better description. + const INSTANCE_INIT: Option<unsafe fn(ParentInit<Self>)> = None; + + /// Function that is called to finish initialization of an object, once + /// `INSTANCE_INIT` functions have been called. + const INSTANCE_POST_INIT: Option<fn(&Self)> = None; + + /// Called on descendant classes after all parent class initialization + /// has occurred, but before the class itself is initialized. This + /// is only useful if a class is not a leaf, and can be used to undo + /// the effects of copying the contents of the parent's class struct + /// to the descendants. + const CLASS_BASE_INIT: Option< + unsafe extern "C" fn(klass: *mut ObjectClass, data: *const c_void), + > = None; + + const TYPE_INFO: TypeInfo = TypeInfo { + name: Self::TYPE_NAME.as_ptr(), + parent: Self::ParentType::TYPE_NAME.as_ptr(), + instance_size: core::mem::size_of::<Self>(), + instance_align: core::mem::align_of::<Self>(), + instance_init: match Self::INSTANCE_INIT { + None => None, + Some(_) => Some(rust_instance_init::<Self>), + }, + instance_post_init: match Self::INSTANCE_POST_INIT { + None => None, + Some(_) => Some(rust_instance_post_init::<Self>), + }, + instance_finalize: Some(drop_object::<Self>), + abstract_: Self::ABSTRACT, + class_size: core::mem::size_of::<Self::Class>(), + class_init: Some(rust_class_init::<Self>), + class_base_init: Self::CLASS_BASE_INIT, + class_data: core::ptr::null(), + interfaces: core::ptr::null(), + }; + + // methods on ObjectClass + const UNPARENT: Option<fn(&Self)> = None; + + /// Store into the argument the virtual method implementations + /// for `Self`. On entry, the virtual method pointers are set to + /// the default values coming from the parent classes; the function + /// can change them to override virtual methods of a parent class. + /// + /// Usually defined simply as `Self::Class::class_init::<Self>`; + /// however a default implementation cannot be included here, because the + /// bounds that the `Self::Class::class_init` method places on `Self` are + /// not known in advance. + /// + /// # Safety + /// + /// While `klass`'s parent class is initialized on entry, the other fields + /// are all zero; it is therefore assumed that all fields in `T` can be + /// zeroed, otherwise it would not be possible to provide the class as a + /// `&mut T`. TODO: it may be possible to add an unsafe trait that checks + /// that all fields *after the parent class* (but not the parent class + /// itself) are Zeroable. This unsafe trait can be added via a derive + /// macro. + const CLASS_INIT: fn(&mut Self::Class); +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_unparent_fn<T: ObjectImpl>(dev: *mut bindings::Object) { + let state = NonNull::new(dev).unwrap().cast::<T>(); + T::UNPARENT.unwrap()(unsafe { state.as_ref() }); +} + +impl ObjectClass { + /// Fill in the virtual methods of `ObjectClass` based on the definitions in + /// the `ObjectImpl` trait. + pub fn class_init<T: ObjectImpl>(&mut self) { + if <T as ObjectImpl>::UNPARENT.is_some() { + self.unparent = Some(rust_unparent_fn::<T>); + } + } +} + +unsafe impl ObjectType for Object { + type Class = ObjectClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_OBJECT) }; +} + +/// A reference-counted pointer to a QOM object. +/// +/// `Owned<T>` wraps `T` with automatic reference counting. It increases the +/// reference count when created via [`Owned::from`] or cloned, and decreases +/// it when dropped. This ensures that the reference count remains elevated +/// as long as any `Owned<T>` references to it exist. +/// +/// `Owned<T>` can be used for two reasons: +/// * because the lifetime of the QOM object is unknown and someone else could +/// take a reference (similar to `Arc<T>`, for example): in this case, the +/// object can escape and outlive the Rust struct that contains the `Owned<T>` +/// field; +/// +/// * to ensure that the object stays alive until after `Drop::drop` is called +/// on the Rust struct: in this case, the object will always die together with +/// the Rust struct that contains the `Owned<T>` field. +/// +/// Child properties are an example of the second case: in C, an object that +/// is created with `object_initialize_child` will die *before* +/// `instance_finalize` is called, whereas Rust expects the struct to have valid +/// contents when `Drop::drop` is called. Therefore Rust structs that have +/// child properties need to keep a reference to the child object. Right now +/// this can be done with `Owned<T>`; in the future one might have a separate +/// `Child<'parent, T>` smart pointer that keeps a reference to a `T`, like +/// `Owned`, but does not allow cloning. +/// +/// Note that dropping an `Owned<T>` requires the big QEMU lock to be taken. +#[repr(transparent)] +#[derive(PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct Owned<T: ObjectType>(NonNull<T>); + +// The following rationale for safety is taken from Linux's kernel::sync::Arc. + +// SAFETY: It is safe to send `Owned<T>` to another thread when the underlying +// `T` is `Sync` because it effectively means sharing `&T` (which is safe +// because `T` is `Sync`); additionally, it needs `T` to be `Send` because any +// thread that has an `Owned<T>` may ultimately access `T` using a +// mutable reference when the reference count reaches zero and `T` is dropped. +unsafe impl<T: ObjectType + Send + Sync> Send for Owned<T> {} + +// SAFETY: It is safe to send `&Owned<T>` to another thread when the underlying +// `T` is `Sync` because it effectively means sharing `&T` (which is safe +// because `T` is `Sync`); additionally, it needs `T` to be `Send` because any +// thread that has a `&Owned<T>` may clone it and get an `Owned<T>` on that +// thread, so the thread may ultimately access `T` using a mutable reference +// when the reference count reaches zero and `T` is dropped. +unsafe impl<T: ObjectType + Sync + Send> Sync for Owned<T> {} + +impl<T: ObjectType> Owned<T> { + /// Convert a raw C pointer into an owned reference to the QOM + /// object it points to. The object's reference count will be + /// decreased when the `Owned` is dropped. + /// + /// # Panics + /// + /// Panics if `ptr` is NULL. + /// + /// # Safety + /// + /// The caller must indeed own a reference to the QOM object. + /// The object must not be embedded in another unless the outer + /// object is guaranteed to have a longer lifetime. + /// + /// A raw pointer obtained via [`Owned::into_raw()`] can always be passed + /// back to `from_raw()` (assuming the original `Owned` was valid!), + /// since the owned reference remains there between the calls to + /// `into_raw()` and `from_raw()`. + pub unsafe fn from_raw(ptr: *const T) -> Self { + // SAFETY NOTE: while NonNull requires a mutable pointer, only + // Deref is implemented so the pointer passed to from_raw + // remains const + Owned(NonNull::new(ptr.cast_mut()).unwrap()) + } + + /// Obtain a raw C pointer from a reference. `src` is consumed + /// and the reference is leaked. + #[allow(clippy::missing_const_for_fn)] + pub fn into_raw(src: Owned<T>) -> *mut T { + let src = ManuallyDrop::new(src); + src.0.as_ptr() + } + + /// Increase the reference count of a QOM object and return + /// a new owned reference to it. + /// + /// # Safety + /// + /// The object must not be embedded in another, unless the outer + /// object is guaranteed to have a longer lifetime. + pub unsafe fn from(obj: &T) -> Self { + unsafe { + object_ref(obj.as_object_mut_ptr().cast::<c_void>()); + + // SAFETY NOTE: while NonNull requires a mutable pointer, only + // Deref is implemented so the reference passed to from_raw + // remains shared + Owned(NonNull::new_unchecked(obj.as_mut_ptr())) + } + } +} + +impl<T: ObjectType> Clone for Owned<T> { + fn clone(&self) -> Self { + // SAFETY: creation method is unsafe; whoever calls it has + // responsibility that the pointer is valid, and remains valid + // throughout the lifetime of the `Owned<T>` and its clones. + unsafe { Owned::from(self.deref()) } + } +} + +impl<T: ObjectType> Deref for Owned<T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: creation method is unsafe; whoever calls it has + // responsibility that the pointer is valid, and remains valid + // throughout the lifetime of the `Owned<T>` and its clones. + // With that guarantee, reference counting ensures that + // the object remains alive. + unsafe { &*self.0.as_ptr() } + } +} +impl<T: ObjectType> ObjectDeref for Owned<T> {} + +impl<T: ObjectType> Drop for Owned<T> { + fn drop(&mut self) { + assert!(bql_locked()); + // SAFETY: creation method is unsafe, and whoever calls it has + // responsibility that the pointer is valid, and remains valid + // throughout the lifetime of the `Owned<T>` and its clones. + unsafe { + object_unref(self.as_object_mut_ptr().cast::<c_void>()); + } + } +} + +impl<T: IsA<Object>> fmt::Debug for Owned<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.deref().debug_fmt(f) + } +} + +/// Trait for class methods exposed by the Object class. The methods can be +/// called on all objects that have the trait `IsA<Object>`. +/// +/// The trait should only be used through the blanket implementation, +/// which guarantees safety via `IsA` +pub trait ObjectClassMethods: IsA<Object> { + /// Return a new reference counted instance of this class + fn new() -> Owned<Self> { + assert!(bql_locked()); + // SAFETY: the object created by object_new is allocated on + // the heap and has a reference count of 1 + unsafe { + let raw_obj = object_new(Self::TYPE_NAME.as_ptr()); + let obj = Object::from_raw(raw_obj).unsafe_cast::<Self>(); + Owned::from_raw(obj) + } + } +} + +/// Trait for methods exposed by the Object class. The methods can be +/// called on all objects that have the trait `IsA<Object>`. +/// +/// The trait should only be used through the blanket implementation, +/// which guarantees safety via `IsA` +pub trait ObjectMethods: ObjectDeref +where + Self::Target: IsA<Object>, +{ + /// Return the name of the type of `self` + fn typename(&self) -> std::borrow::Cow<'_, str> { + let obj = self.upcast::<Object>(); + // SAFETY: safety of this is the requirement for implementing IsA + // The result of the C API has static lifetime + unsafe { + let p = object_get_typename(obj.as_mut_ptr()); + CStr::from_ptr(p).to_string_lossy() + } + } + + fn get_class(&self) -> &'static <Self::Target as ObjectType>::Class { + let obj = self.upcast::<Object>(); + + // SAFETY: all objects can call object_get_class; the actual class + // type is guaranteed by the implementation of `ObjectType` and + // `ObjectImpl`. + let klass: &'static <Self::Target as ObjectType>::Class = + unsafe { &*object_get_class(obj.as_mut_ptr()).cast() }; + + klass + } + + /// Convenience function for implementing the Debug trait + fn debug_fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple(&self.typename()) + .field(&(self as *const Self)) + .finish() + } +} + +impl<T> ObjectClassMethods for T where T: IsA<Object> {} +impl<R: ObjectDeref> ObjectMethods for R where R::Target: IsA<Object> {} diff --git a/rust/qemu-api/src/sysbus.rs b/rust/qemu-api/src/sysbus.rs new file mode 100644 index 0000000..e92502a --- /dev/null +++ b/rust/qemu-api/src/sysbus.rs @@ -0,0 +1,122 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini <pbonzini@redhat.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings to access `sysbus` functionality from Rust. + +use std::{ffi::CStr, ptr::addr_of_mut}; + +pub use bindings::SysBusDeviceClass; + +use crate::{ + bindings, + cell::{bql_locked, Opaque}, + irq::{IRQState, InterruptSource}, + memory::MemoryRegion, + prelude::*, + qdev::{DeviceImpl, DeviceState}, + qom::Owned, +}; + +/// A safe wrapper around [`bindings::SysBusDevice`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct SysBusDevice(Opaque<bindings::SysBusDevice>); + +unsafe impl Send for SysBusDevice {} +unsafe impl Sync for SysBusDevice {} + +unsafe impl ObjectType for SysBusDevice { + type Class = SysBusDeviceClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_SYS_BUS_DEVICE) }; +} +qom_isa!(SysBusDevice: DeviceState, Object); + +// TODO: add virtual methods +pub trait SysBusDeviceImpl: DeviceImpl + IsA<SysBusDevice> {} + +impl SysBusDeviceClass { + /// Fill in the virtual methods of `SysBusDeviceClass` based on the + /// definitions in the `SysBusDeviceImpl` trait. + pub fn class_init<T: SysBusDeviceImpl>(self: &mut SysBusDeviceClass) { + self.parent_class.class_init::<T>(); + } +} + +/// Trait for methods of [`SysBusDevice`] and its subclasses. +pub trait SysBusDeviceMethods: ObjectDeref +where + Self::Target: IsA<SysBusDevice>, +{ + /// Expose a memory region to the board so that it can give it an address + /// in guest memory. Note that the ordering of calls to `init_mmio` is + /// important, since whoever creates the sysbus device will refer to the + /// region with a number that corresponds to the order of calls to + /// `init_mmio`. + fn init_mmio(&self, iomem: &MemoryRegion) { + assert!(bql_locked()); + unsafe { + bindings::sysbus_init_mmio(self.upcast().as_mut_ptr(), iomem.as_mut_ptr()); + } + } + + /// Expose an interrupt source outside the device as a qdev GPIO output. + /// Note that the ordering of calls to `init_irq` is important, since + /// whoever creates the sysbus device will refer to the interrupts with + /// a number that corresponds to the order of calls to `init_irq`. + fn init_irq(&self, irq: &InterruptSource) { + assert!(bql_locked()); + unsafe { + bindings::sysbus_init_irq(self.upcast().as_mut_ptr(), irq.as_ptr()); + } + } + + // TODO: do we want a type like GuestAddress here? + fn mmio_addr(&self, id: u32) -> Option<u64> { + assert!(bql_locked()); + // SAFETY: the BQL ensures that no one else writes to sbd.mmio[], and + // the SysBusDevice must be initialized to get an IsA<SysBusDevice>. + let sbd = unsafe { *self.upcast().as_ptr() }; + let id: usize = id.try_into().unwrap(); + if sbd.mmio[id].memory.is_null() { + None + } else { + Some(sbd.mmio[id].addr) + } + } + + // TODO: do we want a type like GuestAddress here? + fn mmio_map(&self, id: u32, addr: u64) { + assert!(bql_locked()); + let id: i32 = id.try_into().unwrap(); + unsafe { + bindings::sysbus_mmio_map(self.upcast().as_mut_ptr(), id, addr); + } + } + + // Owned<> is used here because sysbus_connect_irq (via + // object_property_set_link) adds a reference to the IRQState, + // which can prolong its life + fn connect_irq(&self, id: u32, irq: &Owned<IRQState>) { + assert!(bql_locked()); + let id: i32 = id.try_into().unwrap(); + let irq: &IRQState = irq; + unsafe { + bindings::sysbus_connect_irq(self.upcast().as_mut_ptr(), id, irq.as_mut_ptr()); + } + } + + fn sysbus_realize(&self) { + // TODO: return an Error + assert!(bql_locked()); + unsafe { + bindings::sysbus_realize( + self.upcast().as_mut_ptr(), + addr_of_mut!(bindings::error_fatal), + ); + } + } +} + +impl<R: ObjectDeref> SysBusDeviceMethods for R where R::Target: IsA<SysBusDevice> {} diff --git a/rust/qemu-api/src/timer.rs b/rust/qemu-api/src/timer.rs new file mode 100644 index 0000000..0a2d111 --- /dev/null +++ b/rust/qemu-api/src/timer.rs @@ -0,0 +1,125 @@ +// Copyright (C) 2024 Intel Corporation. +// Author(s): Zhao Liu <zhao1.liu@intel.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ + ffi::{c_int, c_void}, + pin::Pin, +}; + +use crate::{ + bindings::{self, qemu_clock_get_ns, timer_del, timer_init_full, timer_mod, QEMUClockType}, + callbacks::FnCall, + cell::Opaque, +}; + +/// A safe wrapper around [`bindings::QEMUTimer`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct Timer(Opaque<bindings::QEMUTimer>); + +unsafe impl Send for Timer {} +unsafe impl Sync for Timer {} + +#[repr(transparent)] +#[derive(qemu_api_macros::Wrapper)] +pub struct TimerListGroup(Opaque<bindings::QEMUTimerListGroup>); + +unsafe impl Send for TimerListGroup {} +unsafe impl Sync for TimerListGroup {} + +impl Timer { + pub const MS: u32 = bindings::SCALE_MS; + pub const US: u32 = bindings::SCALE_US; + pub const NS: u32 = bindings::SCALE_NS; + + /// Create a `Timer` struct without initializing it. + /// + /// # Safety + /// + /// The timer must be initialized before it is armed with + /// [`modify`](Self::modify). + pub unsafe fn new() -> Self { + // SAFETY: requirements relayed to callers of Timer::new + Self(unsafe { Opaque::zeroed() }) + } + + /// Create a new timer with the given attributes. + pub fn init_full<'timer, 'opaque: 'timer, T, F>( + self: Pin<&'timer mut Self>, + timer_list_group: Option<&TimerListGroup>, + clk_type: ClockType, + scale: u32, + attributes: u32, + _cb: F, + opaque: &'opaque T, + ) where + F: for<'a> FnCall<(&'a T,)>, + { + let _: () = F::ASSERT_IS_SOME; + + /// timer expiration callback + unsafe extern "C" fn rust_timer_handler<T, F: for<'a> FnCall<(&'a T,)>>( + opaque: *mut c_void, + ) { + // SAFETY: the opaque was passed as a reference to `T`. + F::call((unsafe { &*(opaque.cast::<T>()) },)) + } + + let timer_cb: unsafe extern "C" fn(*mut c_void) = rust_timer_handler::<T, F>; + + // SAFETY: the opaque outlives the timer + unsafe { + timer_init_full( + self.as_mut_ptr(), + if let Some(g) = timer_list_group { + g as *const TimerListGroup as *mut _ + } else { + ::core::ptr::null_mut() + }, + clk_type.id, + scale as c_int, + attributes as c_int, + Some(timer_cb), + (opaque as *const T).cast::<c_void>().cast_mut(), + ) + } + } + + pub fn modify(&self, expire_time: u64) { + // SAFETY: the only way to obtain a Timer safely is via methods that + // take a Pin<&mut Self>, therefore the timer is pinned + unsafe { timer_mod(self.as_mut_ptr(), expire_time as i64) } + } + + pub fn delete(&self) { + // SAFETY: the only way to obtain a Timer safely is via methods that + // take a Pin<&mut Self>, therefore the timer is pinned + unsafe { timer_del(self.as_mut_ptr()) } + } +} + +// FIXME: use something like PinnedDrop from the pinned_init crate +impl Drop for Timer { + fn drop(&mut self) { + self.delete() + } +} + +pub struct ClockType { + id: QEMUClockType, +} + +impl ClockType { + pub fn get_ns(&self) -> u64 { + // SAFETY: cannot be created outside this module, therefore id + // is valid + (unsafe { qemu_clock_get_ns(self.id) }) as u64 + } +} + +pub const CLOCK_VIRTUAL: ClockType = ClockType { + id: QEMUClockType::QEMU_CLOCK_VIRTUAL, +}; + +pub const NANOSECONDS_PER_SECOND: u64 = 1000000000; diff --git a/rust/qemu-api/src/uninit.rs b/rust/qemu-api/src/uninit.rs new file mode 100644 index 0000000..04123b4 --- /dev/null +++ b/rust/qemu-api/src/uninit.rs @@ -0,0 +1,85 @@ +//! Access fields of a [`MaybeUninit`] + +use std::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, +}; + +pub struct MaybeUninitField<'a, T, U> { + parent: &'a mut MaybeUninit<T>, + child: *mut U, +} + +impl<'a, T, U> MaybeUninitField<'a, T, U> { + #[doc(hidden)] + pub fn new(parent: &'a mut MaybeUninit<T>, child: *mut U) -> Self { + MaybeUninitField { parent, child } + } + + /// Return a constant pointer to the containing object of the field. + /// + /// Because the `MaybeUninitField` remembers the containing object, + /// it is possible to use it in foreign APIs that initialize the + /// child. + pub fn parent(f: &Self) -> *const T { + f.parent.as_ptr() + } + + /// Return a mutable pointer to the containing object. + /// + /// Because the `MaybeUninitField` remembers the containing object, + /// it is possible to use it in foreign APIs that initialize the + /// child. + pub fn parent_mut(f: &mut Self) -> *mut T { + f.parent.as_mut_ptr() + } +} + +impl<'a, T, U> Deref for MaybeUninitField<'a, T, U> { + type Target = MaybeUninit<U>; + + fn deref(&self) -> &MaybeUninit<U> { + // SAFETY: self.child was obtained by dereferencing a valid mutable + // reference; the content of the memory may be invalid or uninitialized + // but MaybeUninit<_> makes no assumption on it + unsafe { &*(self.child.cast()) } + } +} + +impl<'a, T, U> DerefMut for MaybeUninitField<'a, T, U> { + fn deref_mut(&mut self) -> &mut MaybeUninit<U> { + // SAFETY: self.child was obtained by dereferencing a valid mutable + // reference; the content of the memory may be invalid or uninitialized + // but MaybeUninit<_> makes no assumption on it + unsafe { &mut *(self.child.cast()) } + } +} + +/// ``` +/// #[derive(Debug)] +/// struct S { +/// x: u32, +/// y: u32, +/// } +/// +/// # use std::mem::MaybeUninit; +/// # use qemu_api::{assert_match, uninit_field_mut}; +/// +/// let mut s: MaybeUninit<S> = MaybeUninit::zeroed(); +/// uninit_field_mut!(s, x).write(5); +/// let s = unsafe { s.assume_init() }; +/// assert_match!(s, S { x: 5, y: 0 }); +/// ``` +#[macro_export] +macro_rules! uninit_field_mut { + ($container:expr, $($field:tt)+) => {{ + let container__: &mut ::std::mem::MaybeUninit<_> = &mut $container; + let container_ptr__ = container__.as_mut_ptr(); + + // SAFETY: the container is not used directly, only through a MaybeUninit<>, + // so the safety is delegated to the caller and to final invocation of + // assume_init() + let target__ = unsafe { std::ptr::addr_of_mut!((*container_ptr__).$($field)+) }; + $crate::uninit::MaybeUninitField::new(container__, target__) + }}; +} diff --git a/rust/qemu-api/src/vmstate.rs b/rust/qemu-api/src/vmstate.rs new file mode 100644 index 0000000..812f390 --- /dev/null +++ b/rust/qemu-api/src/vmstate.rs @@ -0,0 +1,604 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Helper macros to declare migration state for device models. +//! +//! This module includes four families of macros: +//! +//! * [`vmstate_unused!`](crate::vmstate_unused) and +//! [`vmstate_of!`](crate::vmstate_of), which are used to express the +//! migration format for a struct. This is based on the [`VMState`] trait, +//! which is defined by all migratable types. +//! +//! * [`impl_vmstate_forward`](crate::impl_vmstate_forward) and +//! [`impl_vmstate_bitsized`](crate::impl_vmstate_bitsized), which help with +//! the definition of the [`VMState`] trait (respectively for transparent +//! structs and for `bilge`-defined types) +//! +//! * helper macros to declare a device model state struct, in particular +//! [`vmstate_subsections`](crate::vmstate_subsections) and +//! [`vmstate_fields`](crate::vmstate_fields). +//! +//! * direct equivalents to the C macros declared in +//! `include/migration/vmstate.h`. These are not type-safe and only provide +//! functionality that is missing from `vmstate_of!`. + +use core::{marker::PhantomData, mem, ptr::NonNull}; +use std::ffi::{c_int, c_void}; + +pub use crate::bindings::{VMStateDescription, VMStateField}; +use crate::{ + bindings::VMStateFlags, callbacks::FnCall, prelude::*, qom::Owned, zeroable::Zeroable, +}; + +/// This macro is used to call a function with a generic argument bound +/// to the type of a field. The function must take a +/// [`PhantomData`]`<T>` argument; `T` is the type of +/// field `$field` in the `$typ` type. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::call_func_with_field; +/// # use core::marker::PhantomData; +/// const fn size_of_field<T>(_: PhantomData<T>) -> usize { +/// std::mem::size_of::<T>() +/// } +/// +/// struct Foo { +/// x: u16, +/// }; +/// // calls size_of_field::<u16>() +/// assert_eq!(call_func_with_field!(size_of_field, Foo, x), 2); +/// ``` +#[macro_export] +macro_rules! call_func_with_field { + // Based on the answer by user steffahn (Frank Steffahn) at + // https://users.rust-lang.org/t/inferring-type-of-field/122857 + // and used under MIT license + ($func:expr, $typ:ty, $($field:tt).+) => { + $func(loop { + #![allow(unreachable_code)] + const fn phantom__<T>(_: &T) -> ::core::marker::PhantomData<T> { ::core::marker::PhantomData } + // Unreachable code is exempt from checks on uninitialized values. + // Use that trick to infer the type of this PhantomData. + break ::core::marker::PhantomData; + break phantom__(&{ let value__: $typ; value__.$($field).+ }); + }) + }; +} + +/// Workaround for lack of `const_refs_static`: references to global variables +/// can be included in a `static`, but not in a `const`; unfortunately, this +/// is exactly what would go in the `VMStateField`'s `info` member. +/// +/// This enum contains the contents of the `VMStateField`'s `info` member, +/// but as an `enum` instead of a pointer. +#[allow(non_camel_case_types)] +pub enum VMStateFieldType { + null, + vmstate_info_bool, + vmstate_info_int8, + vmstate_info_int16, + vmstate_info_int32, + vmstate_info_int64, + vmstate_info_uint8, + vmstate_info_uint16, + vmstate_info_uint32, + vmstate_info_uint64, + vmstate_info_timer, +} + +/// Workaround for lack of `const_refs_static`. Converts a `VMStateFieldType` +/// to a `*const VMStateInfo`, for inclusion in a `VMStateField`. +#[macro_export] +macro_rules! info_enum_to_ref { + ($e:expr) => { + unsafe { + match $e { + $crate::vmstate::VMStateFieldType::null => ::core::ptr::null(), + $crate::vmstate::VMStateFieldType::vmstate_info_bool => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_bool) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int8 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int8) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int16 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int16) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int32 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int32) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int64 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int64) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint8 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint8) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint16 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint16) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint32 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint32) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint64 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint64) + } + $crate::vmstate::VMStateFieldType::vmstate_info_timer => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_timer) + } + } + } + }; +} + +/// A trait for types that can be included in a device's migration stream. It +/// provides the base contents of a `VMStateField` (minus the name and offset). +/// +/// # Safety +/// +/// The contents of this trait go straight into structs that are parsed by C +/// code and used to introspect into other structs. Generally, you don't need +/// to implement it except via macros that do it for you, such as +/// `impl_vmstate_bitsized!`. +pub unsafe trait VMState { + /// The `info` member of a `VMStateField` is a pointer and as such cannot + /// yet be included in the [`BASE`](VMState::BASE) associated constant; + /// this is only allowed by Rust 1.83.0 and newer. For now, include the + /// member as an enum which is stored in a separate constant. + const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::null; + + /// The base contents of a `VMStateField` (minus the name and offset) for + /// the type that is implementing the trait. + const BASE: VMStateField; + + /// A flag that is added to another field's `VMStateField` to specify the + /// length's type in a variable-sized array. If this is not a supported + /// type for the length (i.e. if it is not `u8`, `u16`, `u32`), using it + /// in a call to [`vmstate_of!`](crate::vmstate_of) will cause a + /// compile-time error. + const VARRAY_FLAG: VMStateFlags = { + panic!("invalid type for variable-sized array"); + }; +} + +/// Internal utility function to retrieve a type's `VMStateFieldType`; +/// used by [`vmstate_of!`](crate::vmstate_of). +pub const fn vmstate_scalar_type<T: VMState>(_: PhantomData<T>) -> VMStateFieldType { + T::SCALAR_TYPE +} + +/// Internal utility function to retrieve a type's `VMStateField`; +/// used by [`vmstate_of!`](crate::vmstate_of). +pub const fn vmstate_base<T: VMState>(_: PhantomData<T>) -> VMStateField { + T::BASE +} + +/// Internal utility function to retrieve a type's `VMStateFlags` when it +/// is used as the element count of a `VMSTATE_VARRAY`; used by +/// [`vmstate_of!`](crate::vmstate_of). +pub const fn vmstate_varray_flag<T: VMState>(_: PhantomData<T>) -> VMStateFlags { + T::VARRAY_FLAG +} + +/// Return the `VMStateField` for a field of a struct. The field must be +/// visible in the current scope. +/// +/// Only a limited set of types is supported out of the box: +/// * scalar types (integer and `bool`) +/// * the C struct `QEMUTimer` +/// * a transparent wrapper for any of the above (`Cell`, `UnsafeCell`, +/// [`BqlCell`], [`BqlRefCell`] +/// * a raw pointer to any of the above +/// * a `NonNull` pointer, a `Box` or an [`Owned`] for any of the above +/// * an array of any of the above +/// +/// In order to support other types, the trait `VMState` must be implemented +/// for them. The macros +/// [`impl_vmstate_bitsized!`](crate::impl_vmstate_bitsized) +/// and [`impl_vmstate_forward!`](crate::impl_vmstate_forward) help with this. +#[macro_export] +macro_rules! vmstate_of { + ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])? $(, $test_fn:expr)? $(,)?) => { + $crate::bindings::VMStateField { + name: ::core::concat!(::core::stringify!($field_name), "\0") + .as_bytes() + .as_ptr() as *const ::std::os::raw::c_char, + offset: ::std::mem::offset_of!($struct_name, $field_name), + $(num_offset: ::std::mem::offset_of!($struct_name, $num),)? + $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)? + // The calls to `call_func_with_field!` are the magic that + // computes most of the VMStateField from the type of the field. + info: $crate::info_enum_to_ref!($crate::call_func_with_field!( + $crate::vmstate::vmstate_scalar_type, + $struct_name, + $field_name + )), + ..$crate::call_func_with_field!( + $crate::vmstate::vmstate_base, + $struct_name, + $field_name + )$(.with_varray_flag($crate::call_func_with_field!( + $crate::vmstate::vmstate_varray_flag, + $struct_name, + $num)) + $(.with_varray_multiply($factor))?)? + } + }; +} + +impl VMStateFlags { + const VMS_VARRAY_FLAGS: VMStateFlags = VMStateFlags( + VMStateFlags::VMS_VARRAY_INT32.0 + | VMStateFlags::VMS_VARRAY_UINT8.0 + | VMStateFlags::VMS_VARRAY_UINT16.0 + | VMStateFlags::VMS_VARRAY_UINT32.0, + ); +} + +// Add a couple builder-style methods to VMStateField, allowing +// easy derivation of VMStateField constants from other types. +impl VMStateField { + #[must_use] + pub const fn with_version_id(mut self, version_id: i32) -> Self { + assert!(version_id >= 0); + self.version_id = version_id; + self + } + + #[must_use] + pub const fn with_array_flag(mut self, num: usize) -> Self { + assert!(num <= 0x7FFF_FFFFusize); + assert!((self.flags.0 & VMStateFlags::VMS_ARRAY.0) == 0); + assert!((self.flags.0 & VMStateFlags::VMS_VARRAY_FLAGS.0) == 0); + if (self.flags.0 & VMStateFlags::VMS_POINTER.0) != 0 { + self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_POINTER.0); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0); + // VMS_ARRAY_OF_POINTER flag stores the size of pointer. + // FIXME: *const, *mut, NonNull and Box<> have the same size as usize. + // Resize if more smart pointers are supported. + self.size = std::mem::size_of::<usize>(); + } + self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_SINGLE.0); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_ARRAY.0); + self.num = num as i32; + self + } + + #[must_use] + pub const fn with_pointer_flag(mut self) -> Self { + assert!((self.flags.0 & VMStateFlags::VMS_POINTER.0) == 0); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_POINTER.0); + self + } + + #[must_use] + pub const fn with_varray_flag_unchecked(mut self, flag: VMStateFlags) -> VMStateField { + self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_ARRAY.0); + self.flags = VMStateFlags(self.flags.0 | flag.0); + self.num = 0; // varray uses num_offset instead of num. + self + } + + #[must_use] + #[allow(unused_mut)] + pub const fn with_varray_flag(mut self, flag: VMStateFlags) -> VMStateField { + assert!((self.flags.0 & VMStateFlags::VMS_ARRAY.0) != 0); + self.with_varray_flag_unchecked(flag) + } + + #[must_use] + pub const fn with_varray_multiply(mut self, num: u32) -> VMStateField { + assert!(num <= 0x7FFF_FFFFu32); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0); + self.num = num as i32; + self + } +} + +/// This macro can be used (by just passing it a type) to forward the `VMState` +/// trait to the first field of a tuple. This is a workaround for lack of +/// support of nested [`offset_of`](core::mem::offset_of) until Rust 1.82.0. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::impl_vmstate_forward; +/// pub struct Fifo([u8; 16]); +/// impl_vmstate_forward!(Fifo); +/// ``` +#[macro_export] +macro_rules! impl_vmstate_forward { + // This is similar to impl_vmstate_transparent below, but it + // uses the same trick as vmstate_of! to obtain the type of + // the first field of the tuple + ($tuple:ty) => { + unsafe impl $crate::vmstate::VMState for $tuple { + const SCALAR_TYPE: $crate::vmstate::VMStateFieldType = + $crate::call_func_with_field!($crate::vmstate::vmstate_scalar_type, $tuple, 0); + const BASE: $crate::bindings::VMStateField = + $crate::call_func_with_field!($crate::vmstate::vmstate_base, $tuple, 0); + } + }; +} + +// Transparent wrappers: just use the internal type + +macro_rules! impl_vmstate_transparent { + ($type:ty where $base:tt: VMState $($where:tt)*) => { + unsafe impl<$base> VMState for $type where $base: VMState $($where)* { + const SCALAR_TYPE: VMStateFieldType = <$base as VMState>::SCALAR_TYPE; + const BASE: VMStateField = VMStateField { + size: mem::size_of::<$type>(), + ..<$base as VMState>::BASE + }; + const VARRAY_FLAG: VMStateFlags = <$base as VMState>::VARRAY_FLAG; + } + }; +} + +impl_vmstate_transparent!(std::cell::Cell<T> where T: VMState); +impl_vmstate_transparent!(std::cell::UnsafeCell<T> where T: VMState); +impl_vmstate_transparent!(std::pin::Pin<T> where T: VMState); +impl_vmstate_transparent!(crate::cell::BqlCell<T> where T: VMState); +impl_vmstate_transparent!(crate::cell::BqlRefCell<T> where T: VMState); +impl_vmstate_transparent!(crate::cell::Opaque<T> where T: VMState); + +#[macro_export] +macro_rules! impl_vmstate_bitsized { + ($type:ty) => { + unsafe impl $crate::vmstate::VMState for $type { + const SCALAR_TYPE: $crate::vmstate::VMStateFieldType = + <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt + as ::bilge::prelude::Number>::UnderlyingType + as $crate::vmstate::VMState>::SCALAR_TYPE; + const BASE: $crate::bindings::VMStateField = + <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt + as ::bilge::prelude::Number>::UnderlyingType + as $crate::vmstate::VMState>::BASE; + const VARRAY_FLAG: $crate::bindings::VMStateFlags = + <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt + as ::bilge::prelude::Number>::UnderlyingType + as $crate::vmstate::VMState>::VARRAY_FLAG; + } + }; +} + +// Scalar types using predefined VMStateInfos + +macro_rules! impl_vmstate_scalar { + ($info:ident, $type:ty$(, $varray_flag:ident)?) => { + unsafe impl VMState for $type { + const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::$info; + const BASE: VMStateField = VMStateField { + size: mem::size_of::<$type>(), + flags: VMStateFlags::VMS_SINGLE, + ..Zeroable::ZERO + }; + $(const VARRAY_FLAG: VMStateFlags = VMStateFlags::$varray_flag;)? + } + }; +} + +impl_vmstate_scalar!(vmstate_info_bool, bool); +impl_vmstate_scalar!(vmstate_info_int8, i8); +impl_vmstate_scalar!(vmstate_info_int16, i16); +impl_vmstate_scalar!(vmstate_info_int32, i32); +impl_vmstate_scalar!(vmstate_info_int64, i64); +impl_vmstate_scalar!(vmstate_info_uint8, u8, VMS_VARRAY_UINT8); +impl_vmstate_scalar!(vmstate_info_uint16, u16, VMS_VARRAY_UINT16); +impl_vmstate_scalar!(vmstate_info_uint32, u32, VMS_VARRAY_UINT32); +impl_vmstate_scalar!(vmstate_info_uint64, u64); +impl_vmstate_scalar!(vmstate_info_timer, crate::timer::Timer); + +// Pointer types using the underlying type's VMState plus VMS_POINTER +// Note that references are not supported, though references to cells +// could be allowed. + +macro_rules! impl_vmstate_pointer { + ($type:ty where $base:tt: VMState $($where:tt)*) => { + unsafe impl<$base> VMState for $type where $base: VMState $($where)* { + const SCALAR_TYPE: VMStateFieldType = <T as VMState>::SCALAR_TYPE; + const BASE: VMStateField = <$base as VMState>::BASE.with_pointer_flag(); + } + }; +} + +impl_vmstate_pointer!(*const T where T: VMState); +impl_vmstate_pointer!(*mut T where T: VMState); +impl_vmstate_pointer!(NonNull<T> where T: VMState); + +// Unlike C pointers, Box is always non-null therefore there is no need +// to specify VMS_ALLOC. +impl_vmstate_pointer!(Box<T> where T: VMState); +impl_vmstate_pointer!(Owned<T> where T: VMState + ObjectType); + +// Arrays using the underlying type's VMState plus +// VMS_ARRAY/VMS_ARRAY_OF_POINTER + +unsafe impl<T: VMState, const N: usize> VMState for [T; N] { + const SCALAR_TYPE: VMStateFieldType = <T as VMState>::SCALAR_TYPE; + const BASE: VMStateField = <T as VMState>::BASE.with_array_flag(N); +} + +#[doc(alias = "VMSTATE_UNUSED")] +#[macro_export] +macro_rules! vmstate_unused { + ($size:expr) => {{ + $crate::bindings::VMStateField { + name: c"unused".as_ptr(), + size: $size, + info: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_info_unused_buffer) }, + flags: $crate::bindings::VMStateFlags::VMS_BUFFER, + ..$crate::zeroable::Zeroable::ZERO + } + }}; +} + +pub extern "C" fn rust_vms_test_field_exists<T, F: for<'a> FnCall<(&'a T, u8), bool>>( + opaque: *mut c_void, + version_id: c_int, +) -> bool { + // SAFETY: the opaque was passed as a reference to `T`. + let owner: &T = unsafe { &*(opaque.cast::<T>()) }; + let version: u8 = version_id.try_into().unwrap(); + F::call((owner, version)) +} + +pub type VMSFieldExistCb = unsafe extern "C" fn( + opaque: *mut std::os::raw::c_void, + version_id: std::os::raw::c_int, +) -> bool; + +#[macro_export] +macro_rules! vmstate_exist_fn { + ($struct_name:ty, $test_fn:expr) => {{ + const fn test_cb_builder__<T, F: for<'a> $crate::callbacks::FnCall<(&'a T, u8), bool>>( + _phantom: ::core::marker::PhantomData<F>, + ) -> $crate::vmstate::VMSFieldExistCb { + let _: () = F::ASSERT_IS_SOME; + $crate::vmstate::rust_vms_test_field_exists::<T, F> + } + + const fn phantom__<T>(_: &T) -> ::core::marker::PhantomData<T> { + ::core::marker::PhantomData + } + Some(test_cb_builder__::<$struct_name, _>(phantom__(&$test_fn))) + }}; +} + +// FIXME: including the `vmsd` field in a `const` is not possible without +// the const_refs_static feature (stabilized in Rust 1.83.0). Without it, +// it is not possible to use VMS_STRUCT in a transparent manner using +// `vmstate_of!`. While VMSTATE_CLOCK can at least try to be type-safe, +// VMSTATE_STRUCT includes $type only for documentation purposes; it +// is checked against $field_name and $struct_name, but not against $vmsd +// which is what really would matter. +#[doc(alias = "VMSTATE_STRUCT")] +#[macro_export] +macro_rules! vmstate_struct { + ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?, $vmsd:expr, $type:ty $(, $test_fn:expr)? $(,)?) => { + $crate::bindings::VMStateField { + name: ::core::concat!(::core::stringify!($field_name), "\0") + .as_bytes() + .as_ptr() as *const ::std::os::raw::c_char, + $(num_offset: ::std::mem::offset_of!($struct_name, $num),)? + offset: { + $crate::assert_field_type!($struct_name, $field_name, $type $(, num = $num)?); + ::std::mem::offset_of!($struct_name, $field_name) + }, + size: ::core::mem::size_of::<$type>(), + flags: $crate::bindings::VMStateFlags::VMS_STRUCT, + vmsd: $vmsd, + $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)? + ..$crate::zeroable::Zeroable::ZERO + } $(.with_varray_flag_unchecked( + $crate::call_func_with_field!( + $crate::vmstate::vmstate_varray_flag, + $struct_name, + $num + ) + ) + $(.with_varray_multiply($factor))?)? + }; +} + +#[doc(alias = "VMSTATE_CLOCK")] +#[macro_export] +macro_rules! vmstate_clock { + ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?) => {{ + $crate::bindings::VMStateField { + name: ::core::concat!(::core::stringify!($field_name), "\0") + .as_bytes() + .as_ptr() as *const ::std::os::raw::c_char, + offset: { + $crate::assert_field_type!( + $struct_name, + $field_name, + $crate::qom::Owned<$crate::qdev::Clock> $(, num = $num)? + ); + ::std::mem::offset_of!($struct_name, $field_name) + }, + size: ::core::mem::size_of::<*const $crate::qdev::Clock>(), + flags: $crate::bindings::VMStateFlags( + $crate::bindings::VMStateFlags::VMS_STRUCT.0 + | $crate::bindings::VMStateFlags::VMS_POINTER.0, + ), + vmsd: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_clock) }, + ..$crate::zeroable::Zeroable::ZERO + } $(.with_varray_flag_unchecked( + $crate::call_func_with_field!( + $crate::vmstate::vmstate_varray_flag, + $struct_name, + $num + ) + ) + $(.with_varray_multiply($factor))?)? + }}; +} + +/// Helper macro to declare a list of +/// ([`VMStateField`](`crate::bindings::VMStateField`)) into a static and return +/// a pointer to the array of values it created. +#[macro_export] +macro_rules! vmstate_fields { + ($($field:expr),*$(,)*) => {{ + static _FIELDS: &[$crate::bindings::VMStateField] = &[ + $($field),*, + $crate::bindings::VMStateField { + flags: $crate::bindings::VMStateFlags::VMS_END, + ..$crate::zeroable::Zeroable::ZERO + } + ]; + _FIELDS.as_ptr() + }} +} + +#[doc(alias = "VMSTATE_VALIDATE")] +#[macro_export] +macro_rules! vmstate_validate { + ($struct_name:ty, $test_name:expr, $test_fn:expr $(,)?) => { + $crate::bindings::VMStateField { + name: ::std::ffi::CStr::as_ptr($test_name), + field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn), + flags: $crate::bindings::VMStateFlags( + $crate::bindings::VMStateFlags::VMS_MUST_EXIST.0 + | $crate::bindings::VMStateFlags::VMS_ARRAY.0, + ), + num: 0, // 0 elements: no data, only run test_fn callback + ..$crate::zeroable::Zeroable::ZERO + } + }; +} + +/// A transparent wrapper type for the `subsections` field of +/// [`VMStateDescription`]. +/// +/// This is necessary to be able to declare subsection descriptions as statics, +/// because the only way to implement `Sync` for a foreign type (and `*const` +/// pointers are foreign types in Rust) is to create a wrapper struct and +/// `unsafe impl Sync` for it. +/// +/// This struct is used in the +/// [`vm_state_subsections`](crate::vmstate_subsections) macro implementation. +#[repr(transparent)] +pub struct VMStateSubsectionsWrapper(pub &'static [*const crate::bindings::VMStateDescription]); + +unsafe impl Sync for VMStateSubsectionsWrapper {} + +/// Helper macro to declare a list of subsections ([`VMStateDescription`]) +/// into a static and return a pointer to the array of pointers it created. +#[macro_export] +macro_rules! vmstate_subsections { + ($($subsection:expr),*$(,)*) => {{ + static _SUBSECTIONS: $crate::vmstate::VMStateSubsectionsWrapper = $crate::vmstate::VMStateSubsectionsWrapper(&[ + $({ + static _SUBSECTION: $crate::bindings::VMStateDescription = $subsection; + ::core::ptr::addr_of!(_SUBSECTION) + }),*, + ::core::ptr::null() + ]); + _SUBSECTIONS.0.as_ptr() + }} +} diff --git a/rust/qemu-api/src/zeroable.rs b/rust/qemu-api/src/zeroable.rs new file mode 100644 index 0000000..d8239d0 --- /dev/null +++ b/rust/qemu-api/src/zeroable.rs @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Defines a trait for structs that can be safely initialized with zero bytes. + +/// Encapsulates the requirement that +/// `MaybeUninit::<Self>::zeroed().assume_init()` does not cause undefined +/// behavior. +/// +/// # Safety +/// +/// Do not add this trait to a type unless all-zeroes is a valid value for the +/// type. In particular, raw pointers can be zero, but references and +/// `NonNull<T>` cannot. +pub unsafe trait Zeroable: Default { + /// Return a value of Self whose memory representation consists of all + /// zeroes, with the possible exclusion of padding bytes. + const ZERO: Self = unsafe { ::core::mem::MaybeUninit::<Self>::zeroed().assume_init() }; +} + +// bindgen does not derive Default here +#[allow(clippy::derivable_impls)] +impl Default for crate::bindings::VMStateFlags { + fn default() -> Self { + Self(0) + } +} + +unsafe impl Zeroable for crate::bindings::Property__bindgen_ty_1 {} +unsafe impl Zeroable for crate::bindings::Property {} +unsafe impl Zeroable for crate::bindings::VMStateFlags {} +unsafe impl Zeroable for crate::bindings::VMStateField {} +unsafe impl Zeroable for crate::bindings::VMStateDescription {} +unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_1 {} +unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_2 {} +unsafe impl Zeroable for crate::bindings::MemoryRegionOps {} +unsafe impl Zeroable for crate::bindings::MemTxAttrs {} +unsafe impl Zeroable for crate::bindings::CharBackend {} diff --git a/rust/qemu-api/tests/tests.rs b/rust/qemu-api/tests/tests.rs new file mode 100644 index 0000000..a658a49 --- /dev/null +++ b/rust/qemu-api/tests/tests.rs @@ -0,0 +1,180 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ffi::CStr, ptr::addr_of}; + +use qemu_api::{ + bindings::{module_call_init, module_init_type, qdev_prop_bool}, + cell::{self, BqlCell}, + declare_properties, define_property, + prelude::*, + qdev::{DeviceImpl, DeviceState, Property, ResettablePhasesImpl}, + qom::{ObjectImpl, ParentField}, + sysbus::SysBusDevice, + vmstate::VMStateDescription, + zeroable::Zeroable, +}; + +mod vmstate_tests; + +// Test that macros can compile. +pub static VMSTATE: VMStateDescription = VMStateDescription { + name: c"name".as_ptr(), + unmigratable: true, + ..Zeroable::ZERO +}; + +#[repr(C)] +#[derive(qemu_api_macros::Object)] +pub struct DummyState { + parent: ParentField<DeviceState>, + migrate_clock: bool, +} + +qom_isa!(DummyState: Object, DeviceState); + +pub struct DummyClass { + parent_class: <DeviceState as ObjectType>::Class, +} + +impl DummyClass { + pub fn class_init<T: DeviceImpl>(self: &mut DummyClass) { + self.parent_class.class_init::<T>(); + } +} + +declare_properties! { + DUMMY_PROPERTIES, + define_property!( + c"migrate-clk", + DummyState, + migrate_clock, + unsafe { &qdev_prop_bool }, + bool + ), +} + +unsafe impl ObjectType for DummyState { + type Class = DummyClass; + const TYPE_NAME: &'static CStr = c"dummy"; +} + +impl ObjectImpl for DummyState { + type ParentType = DeviceState; + const ABSTRACT: bool = false; + const CLASS_INIT: fn(&mut DummyClass) = DummyClass::class_init::<Self>; +} + +impl ResettablePhasesImpl for DummyState {} + +impl DeviceImpl for DummyState { + fn properties() -> &'static [Property] { + &DUMMY_PROPERTIES + } + fn vmsd() -> Option<&'static VMStateDescription> { + Some(&VMSTATE) + } +} + +#[repr(C)] +#[derive(qemu_api_macros::Object)] +pub struct DummyChildState { + parent: ParentField<DummyState>, +} + +qom_isa!(DummyChildState: Object, DeviceState, DummyState); + +pub struct DummyChildClass { + parent_class: <DummyState as ObjectType>::Class, +} + +unsafe impl ObjectType for DummyChildState { + type Class = DummyChildClass; + const TYPE_NAME: &'static CStr = c"dummy_child"; +} + +impl ObjectImpl for DummyChildState { + type ParentType = DummyState; + const ABSTRACT: bool = false; + const CLASS_INIT: fn(&mut DummyChildClass) = DummyChildClass::class_init::<Self>; +} + +impl ResettablePhasesImpl for DummyChildState {} +impl DeviceImpl for DummyChildState {} + +impl DummyChildClass { + pub fn class_init<T: DeviceImpl>(self: &mut DummyChildClass) { + self.parent_class.class_init::<T>(); + } +} + +fn init_qom() { + static ONCE: BqlCell<bool> = BqlCell::new(false); + + cell::bql_start_test(); + if !ONCE.get() { + unsafe { + module_call_init(module_init_type::MODULE_INIT_QOM); + } + ONCE.set(true); + } +} + +#[test] +/// Create and immediately drop an instance. +fn test_object_new() { + init_qom(); + drop(DummyState::new()); + drop(DummyChildState::new()); +} + +#[test] +#[allow(clippy::redundant_clone)] +/// Create, clone and then drop an instance. +fn test_clone() { + init_qom(); + let p = DummyState::new(); + assert_eq!(p.clone().typename(), "dummy"); + drop(p); +} + +#[test] +/// Try invoking a method on an object. +fn test_typename() { + init_qom(); + let p = DummyState::new(); + assert_eq!(p.typename(), "dummy"); +} + +// a note on all "cast" tests: usually, especially for downcasts the desired +// class would be placed on the right, for example: +// +// let sbd_ref = p.dynamic_cast::<SysBusDevice>(); +// +// Here I am doing the opposite to check that the resulting type is correct. + +#[test] +#[allow(clippy::shadow_unrelated)] +/// Test casts on shared references. +fn test_cast() { + init_qom(); + let p = DummyState::new(); + let p_ptr: *mut DummyState = p.as_mut_ptr(); + let p_ref: &mut DummyState = unsafe { &mut *p_ptr }; + + let obj_ref: &Object = p_ref.upcast(); + assert_eq!(addr_of!(*obj_ref), p_ptr.cast()); + + let sbd_ref: Option<&SysBusDevice> = obj_ref.dynamic_cast(); + assert!(sbd_ref.is_none()); + + let dev_ref: Option<&DeviceState> = obj_ref.downcast(); + assert_eq!(addr_of!(*dev_ref.unwrap()), p_ptr.cast()); + + // SAFETY: the cast is wrong, but the value is only used for comparison + unsafe { + let sbd_ref: &SysBusDevice = obj_ref.unsafe_cast(); + assert_eq!(addr_of!(*sbd_ref), p_ptr.cast()); + } +} diff --git a/rust/qemu-api/tests/vmstate_tests.rs b/rust/qemu-api/tests/vmstate_tests.rs new file mode 100644 index 0000000..bded836 --- /dev/null +++ b/rust/qemu-api/tests/vmstate_tests.rs @@ -0,0 +1,505 @@ +// Copyright (C) 2025 Intel Corporation. +// Author(s): Zhao Liu <zhao1.liu@intel.com> +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ + ffi::{c_void, CStr}, + mem::size_of, + ptr::NonNull, + slice, +}; + +use qemu_api::{ + bindings::{ + vmstate_info_bool, vmstate_info_int32, vmstate_info_int64, vmstate_info_int8, + vmstate_info_uint64, vmstate_info_uint8, vmstate_info_unused_buffer, VMStateFlags, + }, + cell::{BqlCell, Opaque}, + impl_vmstate_forward, + vmstate::{VMStateDescription, VMStateField}, + vmstate_fields, vmstate_of, vmstate_struct, vmstate_unused, vmstate_validate, + zeroable::Zeroable, +}; + +const FOO_ARRAY_MAX: usize = 3; + +// =========================== Test VMSTATE_FOOA =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOA: +// - VMSTATE_U16 +// - VMSTATE_UNUSED +// - VMSTATE_VARRAY_UINT16_UNSAFE +// - VMSTATE_VARRAY_MULTIPLY +#[repr(C)] +#[derive(Default)] +struct FooA { + arr: [u8; FOO_ARRAY_MAX], + num: u16, + arr_mul: [i8; FOO_ARRAY_MAX], + num_mul: u32, + elem: i8, +} + +static VMSTATE_FOOA: VMStateDescription = VMStateDescription { + name: c"foo_a".as_ptr(), + version_id: 1, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_of!(FooA, elem), + vmstate_unused!(size_of::<i64>()), + vmstate_of!(FooA, arr[0 .. num]).with_version_id(0), + vmstate_of!(FooA, arr_mul[0 .. num_mul * 16]), + }, + ..Zeroable::ZERO +}; + +#[test] +fn test_vmstate_uint16() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 1st VMStateField ("elem") in VMSTATE_FOOA (corresponding to VMSTATE_UINT16) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"elem\0" + ); + assert_eq!(foo_fields[0].offset, 16); + assert_eq!(foo_fields[0].num_offset, 0); + assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_int8 }); + assert_eq!(foo_fields[0].version_id, 0); + assert_eq!(foo_fields[0].size, 1); + assert_eq!(foo_fields[0].num, 0); + assert_eq!(foo_fields[0].flags, VMStateFlags::VMS_SINGLE); + assert!(foo_fields[0].vmsd.is_null()); + assert!(foo_fields[0].field_exists.is_none()); +} + +#[test] +fn test_vmstate_unused() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 2nd VMStateField ("unused") in VMSTATE_FOOA (corresponding to VMSTATE_UNUSED) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(), + b"unused\0" + ); + assert_eq!(foo_fields[1].offset, 0); + assert_eq!(foo_fields[1].num_offset, 0); + assert_eq!(foo_fields[1].info, unsafe { &vmstate_info_unused_buffer }); + assert_eq!(foo_fields[1].version_id, 0); + assert_eq!(foo_fields[1].size, 8); + assert_eq!(foo_fields[1].num, 0); + assert_eq!(foo_fields[1].flags, VMStateFlags::VMS_BUFFER); + assert!(foo_fields[1].vmsd.is_null()); + assert!(foo_fields[1].field_exists.is_none()); +} + +#[test] +fn test_vmstate_varray_uint16_unsafe() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 3rd VMStateField ("arr") in VMSTATE_FOOA (corresponding to + // VMSTATE_VARRAY_UINT16_UNSAFE) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"arr\0" + ); + assert_eq!(foo_fields[2].offset, 0); + assert_eq!(foo_fields[2].num_offset, 4); + assert_eq!(foo_fields[2].info, unsafe { &vmstate_info_uint8 }); + assert_eq!(foo_fields[2].version_id, 0); + assert_eq!(foo_fields[2].size, 1); + assert_eq!(foo_fields[2].num, 0); + assert_eq!(foo_fields[2].flags, VMStateFlags::VMS_VARRAY_UINT16); + assert!(foo_fields[2].vmsd.is_null()); + assert!(foo_fields[2].field_exists.is_none()); +} + +#[test] +fn test_vmstate_varray_multiply() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 4th VMStateField ("arr_mul") in VMSTATE_FOOA (corresponding to + // VMSTATE_VARRAY_MULTIPLY) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(), + b"arr_mul\0" + ); + assert_eq!(foo_fields[3].offset, 6); + assert_eq!(foo_fields[3].num_offset, 12); + assert_eq!(foo_fields[3].info, unsafe { &vmstate_info_int8 }); + assert_eq!(foo_fields[3].version_id, 0); + assert_eq!(foo_fields[3].size, 1); + assert_eq!(foo_fields[3].num, 16); + assert_eq!( + foo_fields[3].flags.0, + VMStateFlags::VMS_VARRAY_UINT32.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0 + ); + assert!(foo_fields[3].vmsd.is_null()); + assert!(foo_fields[3].field_exists.is_none()); + + // The last VMStateField in VMSTATE_FOOA. + assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_END); +} + +// =========================== Test VMSTATE_FOOB =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOB: +// - VMSTATE_BOOL_V +// - VMSTATE_U64 +// - VMSTATE_STRUCT_VARRAY_UINT8 +// - (no C version) MULTIPLY variant of VMSTATE_STRUCT_VARRAY_UINT32 +// - VMSTATE_ARRAY +// - VMSTATE_STRUCT_VARRAY_UINT8 with BqlCell wrapper & test_fn +#[repr(C)] +#[derive(Default)] +struct FooB { + arr_a: [FooA; FOO_ARRAY_MAX], + num_a: u8, + arr_a_mul: [FooA; FOO_ARRAY_MAX], + num_a_mul: u32, + wrap: BqlCell<u64>, + val: bool, + // FIXME: Use Timer array. Now we can't since it's hard to link savevm.c to test. + arr_i64: [i64; FOO_ARRAY_MAX], + arr_a_wrap: [FooA; FOO_ARRAY_MAX], + num_a_wrap: BqlCell<u32>, +} + +fn validate_foob(_state: &FooB, _version_id: u8) -> bool { + true +} + +static VMSTATE_FOOB: VMStateDescription = VMStateDescription { + name: c"foo_b".as_ptr(), + version_id: 2, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_of!(FooB, val).with_version_id(2), + vmstate_of!(FooB, wrap), + vmstate_struct!(FooB, arr_a[0 .. num_a], &VMSTATE_FOOA, FooA).with_version_id(1), + vmstate_struct!(FooB, arr_a_mul[0 .. num_a_mul * 32], &VMSTATE_FOOA, FooA).with_version_id(2), + vmstate_of!(FooB, arr_i64), + vmstate_struct!(FooB, arr_a_wrap[0 .. num_a_wrap], &VMSTATE_FOOA, FooA, validate_foob), + }, + ..Zeroable::ZERO +}; + +#[test] +fn test_vmstate_bool_v() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 1st VMStateField ("val") in VMSTATE_FOOB (corresponding to VMSTATE_BOOL_V) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"val\0" + ); + assert_eq!(foo_fields[0].offset, 136); + assert_eq!(foo_fields[0].num_offset, 0); + assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_bool }); + assert_eq!(foo_fields[0].version_id, 2); + assert_eq!(foo_fields[0].size, 1); + assert_eq!(foo_fields[0].num, 0); + assert_eq!(foo_fields[0].flags, VMStateFlags::VMS_SINGLE); + assert!(foo_fields[0].vmsd.is_null()); + assert!(foo_fields[0].field_exists.is_none()); +} + +#[test] +fn test_vmstate_uint64() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 2nd VMStateField ("wrap") in VMSTATE_FOOB (corresponding to VMSTATE_U64) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(), + b"wrap\0" + ); + assert_eq!(foo_fields[1].offset, 128); + assert_eq!(foo_fields[1].num_offset, 0); + assert_eq!(foo_fields[1].info, unsafe { &vmstate_info_uint64 }); + assert_eq!(foo_fields[1].version_id, 0); + assert_eq!(foo_fields[1].size, 8); + assert_eq!(foo_fields[1].num, 0); + assert_eq!(foo_fields[1].flags, VMStateFlags::VMS_SINGLE); + assert!(foo_fields[1].vmsd.is_null()); + assert!(foo_fields[1].field_exists.is_none()); +} + +#[test] +fn test_vmstate_struct_varray_uint8() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 3rd VMStateField ("arr_a") in VMSTATE_FOOB (corresponding to + // VMSTATE_STRUCT_VARRAY_UINT8) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"arr_a\0" + ); + assert_eq!(foo_fields[2].offset, 0); + assert_eq!(foo_fields[2].num_offset, 60); + assert!(foo_fields[2].info.is_null()); // VMSTATE_STRUCT_VARRAY_UINT8 doesn't set info field. + assert_eq!(foo_fields[2].version_id, 1); + assert_eq!(foo_fields[2].size, 20); + assert_eq!(foo_fields[2].num, 0); + assert_eq!( + foo_fields[2].flags.0, + VMStateFlags::VMS_STRUCT.0 | VMStateFlags::VMS_VARRAY_UINT8.0 + ); + assert_eq!(foo_fields[2].vmsd, &VMSTATE_FOOA); + assert!(foo_fields[2].field_exists.is_none()); +} + +#[test] +fn test_vmstate_struct_varray_uint32_multiply() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 4th VMStateField ("arr_a_mul") in VMSTATE_FOOB (corresponding to + // (no C version) MULTIPLY variant of VMSTATE_STRUCT_VARRAY_UINT32) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(), + b"arr_a_mul\0" + ); + assert_eq!(foo_fields[3].offset, 64); + assert_eq!(foo_fields[3].num_offset, 124); + assert!(foo_fields[3].info.is_null()); // VMSTATE_STRUCT_VARRAY_UINT8 doesn't set info field. + assert_eq!(foo_fields[3].version_id, 2); + assert_eq!(foo_fields[3].size, 20); + assert_eq!(foo_fields[3].num, 32); + assert_eq!( + foo_fields[3].flags.0, + VMStateFlags::VMS_STRUCT.0 + | VMStateFlags::VMS_VARRAY_UINT32.0 + | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0 + ); + assert_eq!(foo_fields[3].vmsd, &VMSTATE_FOOA); + assert!(foo_fields[3].field_exists.is_none()); +} + +#[test] +fn test_vmstate_macro_array() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 5th VMStateField ("arr_i64") in VMSTATE_FOOB (corresponding to + // VMSTATE_ARRAY) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[4].name) }.to_bytes_with_nul(), + b"arr_i64\0" + ); + assert_eq!(foo_fields[4].offset, 144); + assert_eq!(foo_fields[4].num_offset, 0); + assert_eq!(foo_fields[4].info, unsafe { &vmstate_info_int64 }); + assert_eq!(foo_fields[4].version_id, 0); + assert_eq!(foo_fields[4].size, 8); + assert_eq!(foo_fields[4].num, FOO_ARRAY_MAX as i32); + assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_ARRAY); + assert!(foo_fields[4].vmsd.is_null()); + assert!(foo_fields[4].field_exists.is_none()); +} + +#[test] +fn test_vmstate_struct_varray_uint8_wrapper() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let mut foo_b: FooB = Default::default(); + let foo_b_p = std::ptr::addr_of_mut!(foo_b).cast::<c_void>(); + + // 6th VMStateField ("arr_a_wrap") in VMSTATE_FOOB (corresponding to + // VMSTATE_STRUCT_VARRAY_UINT8). Other fields are checked in + // test_vmstate_struct_varray_uint8. + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[5].name) }.to_bytes_with_nul(), + b"arr_a_wrap\0" + ); + assert_eq!(foo_fields[5].num_offset, 228); + assert!(unsafe { foo_fields[5].field_exists.unwrap()(foo_b_p, 0) }); + + // The last VMStateField in VMSTATE_FOOB. + assert_eq!(foo_fields[6].flags, VMStateFlags::VMS_END); +} + +// =========================== Test VMSTATE_FOOC =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOC: +// - VMSTATE_POINTER +// - VMSTATE_ARRAY_OF_POINTER +struct FooCWrapper([Opaque<*mut u8>; FOO_ARRAY_MAX]); // Though Opaque<> array is almost impossible. + +impl_vmstate_forward!(FooCWrapper); + +#[repr(C)] +struct FooC { + ptr: *const i32, + ptr_a: NonNull<FooA>, + arr_ptr: [Box<u8>; FOO_ARRAY_MAX], + arr_ptr_wrap: FooCWrapper, +} + +static VMSTATE_FOOC: VMStateDescription = VMStateDescription { + name: c"foo_c".as_ptr(), + version_id: 3, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_of!(FooC, ptr).with_version_id(2), + // FIXME: Currently vmstate_struct doesn't support the pointer to structure. + // VMSTATE_STRUCT_POINTER: vmstate_struct!(FooC, ptr_a, VMSTATE_FOOA, NonNull<FooA>) + vmstate_unused!(size_of::<NonNull<FooA>>()), + vmstate_of!(FooC, arr_ptr), + vmstate_of!(FooC, arr_ptr_wrap), + }, + ..Zeroable::ZERO +}; + +const PTR_SIZE: usize = size_of::<*mut ()>(); + +#[test] +fn test_vmstate_pointer() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + + // 1st VMStateField ("ptr") in VMSTATE_FOOC (corresponding to VMSTATE_POINTER) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"ptr\0" + ); + assert_eq!(foo_fields[0].offset, 0); + assert_eq!(foo_fields[0].num_offset, 0); + assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_int32 }); + assert_eq!(foo_fields[0].version_id, 2); + assert_eq!(foo_fields[0].size, 4); + assert_eq!(foo_fields[0].num, 0); + assert_eq!( + foo_fields[0].flags.0, + VMStateFlags::VMS_SINGLE.0 | VMStateFlags::VMS_POINTER.0 + ); + assert!(foo_fields[0].vmsd.is_null()); + assert!(foo_fields[0].field_exists.is_none()); +} + +#[test] +fn test_vmstate_macro_array_of_pointer() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + + // 3rd VMStateField ("arr_ptr") in VMSTATE_FOOC (corresponding to + // VMSTATE_ARRAY_OF_POINTER) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"arr_ptr\0" + ); + assert_eq!(foo_fields[2].offset, 2 * PTR_SIZE); + assert_eq!(foo_fields[2].num_offset, 0); + assert_eq!(foo_fields[2].info, unsafe { &vmstate_info_uint8 }); + assert_eq!(foo_fields[2].version_id, 0); + assert_eq!(foo_fields[2].size, PTR_SIZE); + assert_eq!(foo_fields[2].num, FOO_ARRAY_MAX as i32); + assert_eq!( + foo_fields[2].flags.0, + VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0 + ); + assert!(foo_fields[2].vmsd.is_null()); + assert!(foo_fields[2].field_exists.is_none()); +} + +#[test] +fn test_vmstate_macro_array_of_pointer_wrapped() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + + // 4th VMStateField ("arr_ptr_wrap") in VMSTATE_FOOC (corresponding to + // VMSTATE_ARRAY_OF_POINTER) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(), + b"arr_ptr_wrap\0" + ); + assert_eq!(foo_fields[3].offset, (FOO_ARRAY_MAX + 2) * PTR_SIZE); + assert_eq!(foo_fields[3].num_offset, 0); + assert_eq!(foo_fields[3].info, unsafe { &vmstate_info_uint8 }); + assert_eq!(foo_fields[3].version_id, 0); + assert_eq!(foo_fields[3].size, PTR_SIZE); + assert_eq!(foo_fields[3].num, FOO_ARRAY_MAX as i32); + assert_eq!( + foo_fields[3].flags.0, + VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0 + ); + assert!(foo_fields[3].vmsd.is_null()); + assert!(foo_fields[3].field_exists.is_none()); + + // The last VMStateField in VMSTATE_FOOC. + assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_END); +} + +// =========================== Test VMSTATE_FOOD =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOD: +// - VMSTATE_VALIDATE + +// Add more member fields when vmstate_of/vmstate_struct support "test" +// parameter. +struct FooD; + +impl FooD { + fn validate_food_0(&self, _version_id: u8) -> bool { + true + } + + fn validate_food_1(_state: &FooD, _version_id: u8) -> bool { + false + } +} + +fn validate_food_2(_state: &FooD, _version_id: u8) -> bool { + true +} + +static VMSTATE_FOOD: VMStateDescription = VMStateDescription { + name: c"foo_d".as_ptr(), + version_id: 3, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_validate!(FooD, c"foo_d_0", FooD::validate_food_0), + vmstate_validate!(FooD, c"foo_d_1", FooD::validate_food_1), + vmstate_validate!(FooD, c"foo_d_2", validate_food_2), + }, + ..Zeroable::ZERO +}; + +#[test] +fn test_vmstate_validate() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOD.fields, 4) }; + let mut foo_d = FooD; + let foo_d_p = std::ptr::addr_of_mut!(foo_d).cast::<c_void>(); + + // 1st VMStateField in VMSTATE_FOOD + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"foo_d_0\0" + ); + assert_eq!(foo_fields[0].offset, 0); + assert_eq!(foo_fields[0].num_offset, 0); + assert!(foo_fields[0].info.is_null()); + assert_eq!(foo_fields[0].version_id, 0); + assert_eq!(foo_fields[0].size, 0); + assert_eq!(foo_fields[0].num, 0); + assert_eq!( + foo_fields[0].flags.0, + VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_MUST_EXIST.0 + ); + assert!(foo_fields[0].vmsd.is_null()); + assert!(unsafe { foo_fields[0].field_exists.unwrap()(foo_d_p, 0) }); + + // 2nd VMStateField in VMSTATE_FOOD + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(), + b"foo_d_1\0" + ); + assert!(!unsafe { foo_fields[1].field_exists.unwrap()(foo_d_p, 1) }); + + // 3rd VMStateField in VMSTATE_FOOD + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"foo_d_2\0" + ); + assert!(unsafe { foo_fields[2].field_exists.unwrap()(foo_d_p, 2) }); + + // The last VMStateField in VMSTATE_FOOD. + assert_eq!(foo_fields[3].flags, VMStateFlags::VMS_END); +} diff --git a/rust/qemu-api/wrapper.h b/rust/qemu-api/wrapper.h new file mode 100644 index 0000000..15a1b19 --- /dev/null +++ b/rust/qemu-api/wrapper.h @@ -0,0 +1,71 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2024 Linaro Ltd. + * + * Authors: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/log-for-trace.h" +#include "qemu/module.h" +#include "qemu-io.h" +#include "system/system.h" +#include "hw/sysbus.h" +#include "system/memory.h" +#include "chardev/char-fe.h" +#include "hw/clock.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qapi/error-internal.h" +#include "migration/vmstate.h" +#include "chardev/char-serial.h" +#include "exec/memattrs.h" +#include "qemu/timer.h" +#include "system/address-spaces.h" +#include "hw/char/pl011.h" |