Merge "UPSTREAM: Revert "aarch64: Use cpu_capacity to determine clustering instead"" into main
diff --git a/.gitignore b/.gitignore
index 6d002d5..f6f1267 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
target/
+target.tmp/
**/__pycache__
**/*.rs.bk
**/*.sw[po]
diff --git a/Android.bp b/Android.bp
index de32722..77c2eeb 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -45,6 +46,8 @@
edition: "2021",
features: [
"android-sparse",
+ "android_display",
+ "android_display_stub",
"audio",
"balloon",
"config-file",
@@ -74,7 +77,6 @@
"libcros_async",
"libcros_tracing",
"libcrosvm_cli",
- "libdata_model",
"libdevices",
"libdisk",
"libgdbstub",
@@ -134,6 +136,10 @@
},
android: {
shared_libs: [
+ // TODO(b/332677108): remove libc++ when display service is rewritten in rust.
+ "libc++",
+ "libbinder_ndk",
+ "libnativewindow",
"libprocessgroup",
],
},
@@ -178,6 +184,8 @@
edition: "2021",
features: [
"android-sparse",
+ "android_display",
+ "android_display_stub",
"audio",
"balloon",
"config-file",
@@ -207,7 +215,6 @@
"libcros_async",
"libcros_tracing",
"libcrosvm_cli",
- "libdata_model",
"libdevices",
"libdisk",
"libgdbstub",
@@ -309,7 +316,6 @@
},
},
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
defaults_visibility: [
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 402c508..8463fe4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -33,7 +33,7 @@
### Commit Messages
As for commit messages, we follow
-[ChromeOS's guideline](https://chromium.googlesource.com/chromiumos/docs/+/HEAD/contributing.md#commit-messages)
+[ChromeOS's guideline](https://www.chromium.org/chromium-os/developer-library/guides/development/contributing/#commit-messages)
in general.
Here is an example of a good commit message:
diff --git a/Cargo.lock b/Cargo.lock
index 464485f..59faa9f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9,7 +9,6 @@
"arch",
"base",
"cros_fdt",
- "data_model",
"devices",
"gdbstub",
"gdbstub_arch",
@@ -39,6 +38,21 @@
]
[[package]]
+name = "addr2line"
+version = "0.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
name = "aho-corasick"
version = "0.7.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -83,6 +97,7 @@
"jail",
"kernel_cmdline",
"libc",
+ "metrics",
"minijail",
"power_monitor",
"remain",
@@ -217,6 +232,21 @@
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
+name = "backtrace"
+version = "0.3.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+]
+
+[[package]]
name = "balloon_control"
version = "0.1.0"
dependencies = [
@@ -231,7 +261,6 @@
"base_event_token_derive",
"cfg-if",
"chrono",
- "data_model",
"env_logger",
"libc",
"libtest-mimic",
@@ -426,11 +455,12 @@
[[package]]
name = "cc"
-version = "1.0.73"
+version = "1.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
+checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5"
dependencies = [
"jobserver",
+ "libc",
]
[[package]]
@@ -449,16 +479,19 @@
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
-name = "chrono"
-version = "0.4.19"
+name = "cfg_aliases"
+version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
+checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
+
+[[package]]
+name = "chrono"
+version = "0.4.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a"
dependencies = [
- "libc",
- "num-integer",
"num-traits",
"serde",
- "winapi",
]
[[package]]
@@ -636,6 +669,7 @@
"sync",
"tempfile",
"thiserror",
+ "tokio",
"win_util",
"winapi",
]
@@ -645,6 +679,7 @@
version = "0.1.0"
dependencies = [
"anyhow",
+ "indexmap",
"remain",
"thiserror",
]
@@ -741,7 +776,6 @@
"crosvm_cli",
"crosvm_plugin",
"ctrlc",
- "data_model",
"devices",
"disk",
"document-features",
@@ -859,7 +893,7 @@
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]]
-name = "crypto_product"
+name = "crypto_generic"
version = "0.1.0"
dependencies = [
"anyhow",
@@ -1142,6 +1176,7 @@
"log",
"prebuilts",
"rand",
+ "readclock",
"serde",
"serde_json",
"shlex",
@@ -1187,7 +1222,6 @@
"bitflags 2.4.0",
"cros_tracing",
"crossbeam-utils",
- "data_model",
"enumn",
"libc",
"remain",
@@ -1321,6 +1355,12 @@
]
[[package]]
+name = "gimli"
+version = "0.28.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
+
+[[package]]
name = "glob"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1336,7 +1376,6 @@
"cc",
"cfg-if",
"cros_tracing",
- "data_model",
"euclid",
"libc",
"linux_input_sys",
@@ -1592,9 +1631,9 @@
[[package]]
name = "libc"
-version = "0.2.150"
+version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c"
+checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "libcras"
@@ -1800,6 +1839,7 @@
"base",
"cfg-if",
"chrono",
+ "metrics_events",
"metrics_generic",
"serde",
"sync",
@@ -1807,16 +1847,30 @@
]
[[package]]
+name = "metrics_events"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "cfg-if",
+ "metrics_events_generic",
+ "serde",
+ "win_util",
+]
+
+[[package]]
+name = "metrics_events_generic"
+version = "0.1.0"
+dependencies = [
+ "serde",
+]
+
+[[package]]
name = "metrics_generic"
version = "0.1.0"
dependencies = [
"anyhow",
"base",
- "cfg-if",
- "proto_build_tools",
- "protobuf",
- "serde",
- "win_util",
+ "metrics_events",
]
[[package]]
@@ -1844,6 +1898,26 @@
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
+name = "miniz_oxide"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7"
+dependencies = [
+ "adler",
+]
+
+[[package]]
+name = "mio"
+version = "0.8.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
+dependencies = [
+ "libc",
+ "wasi",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
name = "named-lock"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1910,6 +1984,18 @@
"bitflags 2.4.0",
"cfg-if",
"libc",
+]
+
+[[package]]
+name = "nix"
+version = "0.28.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
+dependencies = [
+ "bitflags 2.4.0",
+ "cfg-if",
+ "cfg_aliases",
+ "libc",
"memoffset 0.9.0",
]
@@ -1924,16 +2010,6 @@
]
[[package]]
-name = "num-integer"
-version = "0.1.45"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
-dependencies = [
- "autocfg",
- "num-traits",
-]
-
-[[package]]
name = "num-traits"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1962,6 +2038,15 @@
]
[[package]]
+name = "object"
+version = "0.32.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
name = "once_cell"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2093,7 +2178,6 @@
"base",
"cfg-if",
"cros_tracing_types",
- "data_model",
"openssl",
"proto_build_tools",
"protobuf",
@@ -2104,9 +2188,9 @@
[[package]]
name = "pin-project-lite"
-version = "0.2.9"
+version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
+checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pin-utils"
@@ -2388,7 +2472,6 @@
"arch",
"base",
"cros_fdt",
- "data_model",
"devices",
"gdbstub",
"gdbstub_arch",
@@ -2408,6 +2491,12 @@
]
[[package]]
+name = "rustc-demangle"
+version = "0.1.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
+
+[[package]]
name = "rustc-hash"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2435,7 +2524,7 @@
"cfg-if",
"libc",
"log",
- "nix 0.27.1",
+ "nix 0.28.0",
"pkg-config",
"remain",
"thiserror",
@@ -2574,6 +2663,16 @@
checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
[[package]]
+name = "socket2"
+version = "0.4.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
+[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2593,10 +2692,10 @@
"base",
"cfg-if",
"cros_tracing",
- "data_model",
"jail",
"libc",
"libtest-mimic",
+ "metrics",
"num_cpus",
"once_cell",
"remain",
@@ -2738,6 +2837,22 @@
checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
[[package]]
+name = "tokio"
+version = "1.29.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da"
+dependencies = [
+ "autocfg",
+ "backtrace",
+ "libc",
+ "mio",
+ "num_cpus",
+ "pin-project-lite",
+ "socket2",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
name = "toml"
version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2751,7 +2866,6 @@
version = "0.1.0"
dependencies = [
"base",
- "data_model",
"rand",
"serde",
"serde_json",
@@ -2925,7 +3039,7 @@
"balloon_control",
"base",
"cfg-if",
- "data_model",
+ "crypto_generic",
"gdbstub",
"gdbstub_arch",
"hypervisor",
@@ -2981,7 +3095,6 @@
"base",
"bitflags 2.4.0",
"cfg-if",
- "data_model",
"enumn",
"libc",
"remain",
@@ -3127,12 +3240,12 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
dependencies = [
- "windows_aarch64_gnullvm",
+ "windows_aarch64_gnullvm 0.42.1",
"windows_aarch64_msvc 0.42.1",
"windows_i686_gnu 0.42.1",
"windows_i686_msvc 0.42.1",
"windows_x86_64_gnu 0.42.1",
- "windows_x86_64_gnullvm",
+ "windows_x86_64_gnullvm 0.42.1",
"windows_x86_64_msvc 0.42.1",
]
@@ -3142,7 +3255,16 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
- "windows-targets",
+ "windows-targets 0.42.1",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets 0.48.5",
]
[[package]]
@@ -3151,22 +3273,43 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
dependencies = [
- "windows_aarch64_gnullvm",
+ "windows_aarch64_gnullvm 0.42.1",
"windows_aarch64_msvc 0.42.1",
"windows_i686_gnu 0.42.1",
"windows_i686_msvc 0.42.1",
"windows_x86_64_gnu 0.42.1",
- "windows_x86_64_gnullvm",
+ "windows_x86_64_gnullvm 0.42.1",
"windows_x86_64_msvc 0.42.1",
]
[[package]]
+name = "windows-targets"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
+dependencies = [
+ "windows_aarch64_gnullvm 0.48.5",
+ "windows_aarch64_msvc 0.48.5",
+ "windows_i686_gnu 0.48.5",
+ "windows_i686_msvc 0.48.5",
+ "windows_x86_64_gnu 0.48.5",
+ "windows_x86_64_gnullvm 0.48.5",
+ "windows_x86_64_msvc 0.48.5",
+]
+
+[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608"
[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
+
+[[package]]
name = "windows_aarch64_msvc"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3179,6 +3322,12 @@
checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7"
[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
+
+[[package]]
name = "windows_i686_gnu"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3191,6 +3340,12 @@
checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640"
[[package]]
+name = "windows_i686_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
+
+[[package]]
name = "windows_i686_msvc"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3203,6 +3358,12 @@
checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605"
[[package]]
+name = "windows_i686_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+
+[[package]]
name = "windows_x86_64_gnu"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3215,12 +3376,24 @@
checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45"
[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
+
+[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463"
[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
+
+[[package]]
name = "windows_x86_64_msvc"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3233,6 +3406,12 @@
checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
[[package]]
+name = "windows_x86_64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+
+[[package]]
name = "wio"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/Cargo.toml b/Cargo.toml
index 615f93c..1e3b7a8 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -72,6 +72,7 @@
"linux_input_sys",
"media/ffmpeg",
"metrics",
+ "metrics_events",
"net_sys",
"net_util",
"power_monitor",
@@ -155,6 +156,9 @@
## Enables PCI hotplug. Only available on Linux, and currently only for x86/x86-64.
pci-hotplug = ["devices/pci-hotplug", "vm_control/pci-hotplug"]
+## Enables virtio-pvclock. Only available on Linux, and currently only for x86/x86-64.
+pvclock = ["devices/pvclock"]
+
## Enables the use of the qcow format for block devices.
qcow = ["disk/qcow"]
@@ -167,6 +171,9 @@
## Enables collection of VM statistics.
stats = ["devices/stats"]
+## Supports tokio as an asynchronous executor.
+tokio = ["cros_async/tokio"]
+
## Enables USB host device passthrough via an emulated XHCI controller.
## USB is supported only on unix/linux. The feature is a no-op on windows.
usb = ["devices/usb"]
@@ -206,6 +213,14 @@
# see rutabaga_gfx/Cargo.toml for instructions on building with enabled.
vulkano = ["rutabaga_gfx/vulkano"]
+# Enables the GPU display backend for Android. The backend uses Android surface as the backing
+# store.
+android_display = ["devices/android_display"]
+
+# Stub implementation of the Android display backend. This is only used for building and testing the
+# Android display backend on a non-Android target
+android_display_stub = ["devices/android_display_stub"]
+
#! ### Video features
#!
#! See [Video Device](https://crosvm.dev/book/devices/video.html) for more information.
@@ -336,6 +351,7 @@
"registered_events",
"slirp",
"swap",
+ "tokio",
"trace_marker",
"vaapi",
"video-decoder",
@@ -349,6 +365,8 @@
## All features that are compiled and tested for aarch64
all-aarch64 = [
"all-default",
+ "android_display",
+ "android_display_stub",
"gunyah",
]
@@ -360,7 +378,10 @@
## All features that are compiled and tested for x86_64
all-x86_64 = [
"all-default",
+ "android_display",
+ "android_display_stub",
"plugin",
+ "pvclock",
"scudo"
]
@@ -407,7 +428,6 @@
cros_tracing = { path = "cros_tracing" }
crosvm_cli = { path = "crosvm_cli" }
crosvm_plugin = { path = "crosvm_plugin", optional = true }
-data_model = { path ="common/data_model" }
devices = { path = "devices" }
disk = { path = "disk" }
document-features = { version = "0.2", optional = true }
@@ -434,7 +454,7 @@
remain = "*"
resources = { path = "resources" }
scudo = { version = "0.1", optional = true }
-serde = "*"
+serde = { version = "*", features = ["rc"] }
serde_json = "*"
serde_keyvalue = { path = "serde_keyvalue", features = ["argh_derive"] }
smallvec = "1.6.1"
diff --git a/OWNERS b/OWNERS
index 9185006..6d92e49 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,26 +1,33 @@
+# Automatically assign a reviewer
+crosvm-reviews@google.com
+
+# Please use the above alias to automatically select an owner for review.
+# All owners below are marked as #{LAST_RESORT_SUGGESTION} so that
+# they won't be suggested by gerrit.
+
# ChromeOS Team
-acourbot@chromium.org
-denniskempin@google.com
-dtor@chromium.org
-dverkamp@chromium.org
-keiichiw@chromium.org
-paulhsia@chromium.org
-stevensd@chromium.org
-takayas@chromium.org
-uekawa@chromium.org
+acourbot@chromium.org #{LAST_RESORT_SUGGESTION}
+denniskempin@google.com #{LAST_RESORT_SUGGESTION}
+dtor@chromium.org #{LAST_RESORT_SUGGESTION}
+dverkamp@chromium.org #{LAST_RESORT_SUGGESTION}
+keiichiw@chromium.org #{LAST_RESORT_SUGGESTION}
+paulhsia@chromium.org #{LAST_RESORT_SUGGESTION}
+stevensd@chromium.org #{LAST_RESORT_SUGGESTION}
+takayas@chromium.org #{LAST_RESORT_SUGGESTION}
+uekawa@chromium.org #{LAST_RESORT_SUGGESTION}
# Cloud Android Team
-fmayle@google.com
+fmayle@google.com #{LAST_RESORT_SUGGESTION}
# For Windows
-auradkar@google.com
-nkgold@google.com
-rizhang@google.com
+auradkar@google.com #{LAST_RESORT_SUGGESTION}
+nkgold@google.com #{LAST_RESORT_SUGGESTION}
+rizhang@google.com #{LAST_RESORT_SUGGESTION}
# Bots
-crosvm-bot@crosvm-packages.iam.gserviceaccount.com
-recipe-mega-autoroller@chops-service-accounts.iam.gserviceaccount.com
-crosvm-luci-ci-builder@crosvm-infra.iam.gserviceaccount.com
+crosvm-bot@crosvm-packages.iam.gserviceaccount.com #{LAST_RESORT_SUGGESTION}
+recipe-mega-autoroller@chops-service-accounts.iam.gserviceaccount.com #{LAST_RESORT_SUGGESTION}
+crosvm-luci-ci-builder@crosvm-infra.iam.gserviceaccount.com #{LAST_RESORT_SUGGESTION}
# Changes to Cargo.lock will require additional reviews by the crosvm council.
per-file Cargo.lock = set noparent
diff --git a/aarch64/Android.bp b/aarch64/Android.bp
index cca58ab..e51fc5c 100644
--- a/aarch64/Android.bp
+++ b/aarch64/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -29,7 +30,6 @@
"libarch",
"libbase_rust",
"libcros_fdt",
- "libdata_model",
"libdevices",
"libgdbstub",
"libgdbstub_arch",
diff --git a/aarch64/Cargo.toml b/aarch64/Cargo.toml
index e6d1a5a..3ffcb05 100644
--- a/aarch64/Cargo.toml
+++ b/aarch64/Cargo.toml
@@ -11,7 +11,6 @@
[dependencies]
arch = { path = "../arch" }
cros_fdt = { path = "../cros_fdt" }
-data_model = { path = "../common/data_model" }
devices = { path = "../devices" }
gdbstub = { version = "0.7.0", optional = true }
gdbstub_arch = { version = "0.3.0", optional = true }
diff --git a/aarch64/src/fdt.rs b/aarch64/src/fdt.rs
index 820ffc1..b01ffa1 100644
--- a/aarch64/src/fdt.rs
+++ b/aarch64/src/fdt.rs
@@ -8,11 +8,11 @@
use std::path::PathBuf;
use arch::apply_device_tree_overlays;
+use arch::serial::SerialDeviceInfo;
use arch::CpuSet;
use arch::DtbOverlay;
#[cfg(any(target_os = "android", target_os = "linux"))]
use arch::PlatformBusResources;
-use arch::SERIAL_ADDR;
use cros_fdt::Error;
use cros_fdt::Fdt;
use cros_fdt::Result;
@@ -44,9 +44,6 @@
use crate::AARCH64_RTC_IRQ;
use crate::AARCH64_RTC_SIZE;
// These are serial device related constants.
-use crate::AARCH64_SERIAL_1_3_IRQ;
-use crate::AARCH64_SERIAL_2_4_IRQ;
-use crate::AARCH64_SERIAL_SIZE;
use crate::AARCH64_SERIAL_SPEED;
use crate::AARCH64_VIRTFREQ_BASE;
use crate::AARCH64_VIRTFREQ_SIZE;
@@ -136,6 +133,7 @@
fn create_cpu_nodes(
fdt: &mut Fdt,
num_cpus: u32,
+ cpu_mpidr_generator: &impl Fn(usize) -> Option<u64>,
cpu_clusters: Vec<CpuSet>,
cpu_capacity: BTreeMap<usize, u32>,
dynamic_power_coefficient: BTreeMap<usize, u32>,
@@ -147,14 +145,18 @@
cpus_node.set_prop("#size-cells", 0x0u32)?;
for cpu_id in 0..num_cpus {
- let cpu_name = format!("cpu@{:x}", cpu_id);
+ let reg = u32::try_from(
+ cpu_mpidr_generator(cpu_id.try_into().unwrap()).ok_or(Error::PropertyValueInvalid)?,
+ )
+ .map_err(|_| Error::PropertyValueTooLarge)?;
+ let cpu_name = format!("cpu@{:x}", reg);
let cpu_node = cpus_node.subnode_mut(&cpu_name)?;
cpu_node.set_prop("device_type", "cpu")?;
- cpu_node.set_prop("compatible", "arm,arm-v8")?;
+ cpu_node.set_prop("compatible", "arm,armv8")?;
if num_cpus > 1 {
cpu_node.set_prop("enable-method", "psci")?;
}
- cpu_node.set_prop("reg", cpu_id)?;
+ cpu_node.set_prop("reg", reg)?;
cpu_node.set_prop("phandle", PHANDLE_CPU0 + cpu_id)?;
if let Some(pwr_coefficient) = dynamic_power_coefficient.get(&(cpu_id as usize)) {
@@ -261,8 +263,8 @@
Ok(())
}
-fn create_serial_node(fdt: &mut Fdt, addr: u64, irq: u32) -> Result<()> {
- let serial_reg_prop = [addr, AARCH64_SERIAL_SIZE];
+fn create_serial_node(fdt: &mut Fdt, addr: u64, size: u64, irq: u32) -> Result<()> {
+ let serial_reg_prop = [addr, size];
let irq = [GIC_FDT_IRQ_TYPE_SPI, irq, IRQ_TYPE_EDGE_RISING];
let serial_node = fdt
@@ -276,14 +278,10 @@
Ok(())
}
-fn create_serial_nodes(fdt: &mut Fdt) -> Result<()> {
- // Note that SERIAL_ADDR contains the I/O port addresses conventionally used
- // for serial ports on x86. This uses the same addresses (but on the MMIO bus)
- // to simplify the shared serial code.
- create_serial_node(fdt, SERIAL_ADDR[0], AARCH64_SERIAL_1_3_IRQ)?;
- create_serial_node(fdt, SERIAL_ADDR[1], AARCH64_SERIAL_2_4_IRQ)?;
- create_serial_node(fdt, SERIAL_ADDR[2], AARCH64_SERIAL_1_3_IRQ)?;
- create_serial_node(fdt, SERIAL_ADDR[3], AARCH64_SERIAL_2_4_IRQ)?;
+fn create_serial_nodes(fdt: &mut Fdt, serial_devices: &[SerialDeviceInfo]) -> Result<()> {
+ for dev in serial_devices {
+ create_serial_node(fdt, dev.address, dev.size, dev.irq)?;
+ }
Ok(())
}
@@ -320,12 +318,15 @@
fdt: &mut Fdt,
cmdline: &str,
initrd: Option<(GuestAddress, usize)>,
+ stdout_path: Option<&str>,
) -> Result<()> {
let chosen_node = fdt.root_mut().subnode_mut("chosen")?;
chosen_node.set_prop("linux,pci-probe-only", 1u32)?;
chosen_node.set_prop("bootargs", cmdline)?;
- // Used by android bootloader for boot console output
- chosen_node.set_prop("stdout-path", format!("/U6_16550A@{:x}", SERIAL_ADDR[0]))?;
+ if let Some(stdout_path) = stdout_path {
+ // Used by android bootloader for boot console output
+ chosen_node.set_prop("stdout-path", stdout_path)?;
+ }
let mut kaslr_seed_bytes = [0u8; 8];
OsRng.fill_bytes(&mut kaslr_seed_bytes);
@@ -631,6 +632,7 @@
PlatformBusResources,
>,
num_cpus: u32,
+ cpu_mpidr_generator: &impl Fn(usize) -> Option<u64>,
cpu_clusters: Vec<CpuSet>,
cpu_capacity: BTreeMap<usize, u32>,
cpu_frequencies: BTreeMap<usize, Vec<u32>>,
@@ -649,6 +651,7 @@
vm_generator: &impl Fn(&mut Fdt, &BTreeMap<&str, u32>) -> cros_fdt::Result<()>,
dynamic_power_coefficient: BTreeMap<usize, u32>,
device_tree_overlays: Vec<DtbOverlay>,
+ serial_devices: &[SerialDeviceInfo],
) -> Result<()> {
let mut fdt = Fdt::new(&[]);
let mut phandles_key_cache = Vec::new();
@@ -664,7 +667,10 @@
if let Some(android_fstab) = android_fstab {
arch::android::create_android_fdt(&mut fdt, android_fstab)?;
}
- create_chosen_node(&mut fdt, cmdline, initrd)?;
+ let stdout_path = serial_devices
+ .first()
+ .map(|first_serial| format!("/U6_16550A@{:x}", first_serial.address));
+ create_chosen_node(&mut fdt, cmdline, initrd, stdout_path.as_deref())?;
create_config_node(&mut fdt, image)?;
create_memory_node(&mut fdt, guest_mem)?;
let dma_pool_phandle = match swiotlb {
@@ -678,6 +684,7 @@
create_cpu_nodes(
&mut fdt,
num_cpus,
+ cpu_mpidr_generator,
cpu_clusters,
cpu_capacity,
dynamic_power_coefficient,
@@ -688,7 +695,7 @@
if use_pmu {
create_pmu_node(&mut fdt, num_cpus)?;
}
- create_serial_nodes(&mut fdt)?;
+ create_serial_nodes(&mut fdt, serial_devices)?;
create_psci_node(&mut fdt, &psci_version)?;
create_pci_nodes(&mut fdt, pci_irqs, pci_cfg, pci_ranges, dma_pool_phandle)?;
create_rtc_node(&mut fdt)?;
diff --git a/aarch64/src/lib.rs b/aarch64/src/lib.rs
index 4154445..f7de776 100644
--- a/aarch64/src/lib.rs
+++ b/aarch64/src/lib.rs
@@ -173,8 +173,6 @@
}
}
-// Serial device requires 8 bytes of registers;
-const AARCH64_SERIAL_SIZE: u64 = 0x8;
// This was the speed kvmtool used, not sure if it matters.
const AARCH64_SERIAL_SPEED: u32 = 1843200;
// The serial device gets the first interrupt line
@@ -374,6 +372,12 @@
block_size as u64
}
+fn get_vcpu_mpidr_aff<Vcpu: VcpuAArch64>(vcpus: &[Vcpu], index: usize) -> Option<u64> {
+ const MPIDR_AFF_MASK: u64 = 0xff_00ff_ffff;
+
+ Some(vcpus.get(index)?.get_mpidr().ok()? & MPIDR_AFF_MASK)
+}
+
impl arch::LinuxArch for AArch64 {
type Error = Error;
@@ -508,6 +512,7 @@
&payload,
fdt_offset,
components.hv_cfg.protection_type,
+ components.boot_cpu,
)
};
has_pvtime &= vcpu.has_pvtime_support();
@@ -518,7 +523,7 @@
// Initialize Vcpus after all Vcpu objects have been created.
for (vcpu_id, vcpu) in vcpus.iter().enumerate() {
- vcpu.init(&Self::vcpu_features(vcpu_id, use_pmu))
+ vcpu.init(&Self::vcpu_features(vcpu_id, use_pmu, components.boot_cpu))
.map_err(Error::VcpuInit)?;
}
@@ -632,11 +637,11 @@
let com_evt_1_3 = devices::IrqEdgeEvent::new().map_err(Error::CreateEvent)?;
let com_evt_2_4 = devices::IrqEdgeEvent::new().map_err(Error::CreateEvent)?;
- arch::add_serial_devices(
+ let serial_devices = arch::add_serial_devices(
components.hv_cfg.protection_type,
&mmio_bus,
- com_evt_1_3.get_trigger(),
- com_evt_2_4.get_trigger(),
+ (AARCH64_SERIAL_1_3_IRQ, com_evt_1_3.get_trigger()),
+ (AARCH64_SERIAL_2_4_IRQ, com_evt_2_4.get_trigger()),
serial_parameters,
serial_jail,
#[cfg(feature = "swap")]
@@ -695,7 +700,7 @@
}
let mut cmdline = Self::get_base_linux_cmdline();
- get_serial_cmdline(&mut cmdline, serial_parameters, "mmio")
+ get_serial_cmdline(&mut cmdline, serial_parameters, "mmio", &serial_devices)
.map_err(Error::GetSerialCmdline)?;
for param in components.extra_kernel_params {
cmdline.insert_str(¶m).map_err(Error::Cmdline)?;
@@ -768,6 +773,7 @@
&pci_ranges,
dev_resources,
vcpu_count as u32,
+ &|n| get_vcpu_mpidr_aff(&vcpus, n),
components.cpu_clusters,
components.cpu_capacity,
components.cpu_frequencies,
@@ -791,6 +797,7 @@
&|writer, phandles| vm.create_fdt(writer, phandles),
components.dynamic_power_coefficient,
device_tree_overlays,
+ &serial_devices,
)
.map_err(Error::CreateFdt)?;
@@ -1058,13 +1065,13 @@
///
/// * `vcpu_id` - The VM's index for `vcpu`.
/// * `use_pmu` - Should `vcpu` be configured to use the Performance Monitor Unit.
- fn vcpu_features(vcpu_id: usize, use_pmu: bool) -> Vec<VcpuFeature> {
+ fn vcpu_features(vcpu_id: usize, use_pmu: bool, boot_cpu: usize) -> Vec<VcpuFeature> {
let mut features = vec![VcpuFeature::PsciV0_2];
if use_pmu {
features.push(VcpuFeature::PmuV3);
}
// Non-boot cpus are powered off initially
- if vcpu_id != 0 {
+ if vcpu_id != boot_cpu {
features.push(VcpuFeature::PowerOff);
}
@@ -1081,6 +1088,7 @@
payload: &PayloadType,
fdt_address: GuestAddress,
protection_type: ProtectionType,
+ boot_cpu: usize,
) -> VcpuInitAArch64 {
let mut regs: BTreeMap<VcpuRegAArch64, u64> = Default::default();
@@ -1089,7 +1097,7 @@
regs.insert(VcpuRegAArch64::Pstate, pstate);
// Other cpus are powered off initially
- if vcpu_id == 0 {
+ if vcpu_id == boot_cpu {
let entry_addr = if protection_type.loads_firmware() {
Some(AARCH64_PROTECTED_VM_FW_START)
} else if protection_type.runs_firmware() {
@@ -1140,7 +1148,7 @@
let fdt_address = GuestAddress(0x1234);
let prot = ProtectionType::Unprotected;
- let vcpu_init = AArch64::vcpu_init(0, &payload, fdt_address, prot);
+ let vcpu_init = AArch64::vcpu_init(0, &payload, fdt_address, prot, 0);
// PC: kernel image entry point
assert_eq!(vcpu_init.regs.get(&VcpuRegAArch64::Pc), Some(&0x8080_0000));
@@ -1158,7 +1166,7 @@
let fdt_address = GuestAddress(0x1234);
let prot = ProtectionType::Unprotected;
- let vcpu_init = AArch64::vcpu_init(0, &payload, fdt_address, prot);
+ let vcpu_init = AArch64::vcpu_init(0, &payload, fdt_address, prot, 0);
// PC: bios image entry point
assert_eq!(vcpu_init.regs.get(&VcpuRegAArch64::Pc), Some(&0x8020_0000));
@@ -1177,7 +1185,7 @@
let fdt_address = GuestAddress(0x1234);
let prot = ProtectionType::Protected;
- let vcpu_init = AArch64::vcpu_init(0, &payload, fdt_address, prot);
+ let vcpu_init = AArch64::vcpu_init(0, &payload, fdt_address, prot, 0);
// The hypervisor provides the initial value of PC, so PC should not be present in the
// vcpu_init register map.
diff --git a/acpi_tables/Android.bp b/acpi_tables/Android.bp
index ae09cfb..f39bca6 100644
--- a/acpi_tables/Android.bp
+++ b/acpi_tables/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/android-merge-2-cargo-embargo.sh b/android-merge-2-cargo-embargo.sh
index 95063c5..ceaaa1d 100755
--- a/android-merge-2-cargo-embargo.sh
+++ b/android-merge-2-cargo-embargo.sh
@@ -24,15 +24,15 @@
exit 1
fi
-if ! (dpkg -l meson); then
- echo 'Error: "meson" not found. Please install.' >&2
- exit 1
-fi
-
-if ! (dpkg -l protobuf-compiler); then
- echo 'Error: "protobuf-compiler" not found. Please install.' >&2
- exit 1
-fi
+# If there is need to verify installation of some packages, add them here in pkges.
+pkges='meson protobuf-compiler'
+for pkg in $pkges; do
+ result="$(dpkg-query -W --showformat='${db:Status-Status}' "$pkg" 2>&1)"
+ if [ ! $? = 0 ] || [ ! "$result" = installed ]; then
+ echo $pkg' not found. Please install.' >&2
+ exit 1
+ fi
+done
# Use the specific rust version that crosvm upstream expects.
#
@@ -40,7 +40,7 @@
#
# TODO: Consider using android's prebuilt rust binaries. Currently doesn't work
# because they try to incorrectly use system clang and llvm.
-RUST_TOOLCHAIN="1.68.2"
+RUST_TOOLCHAIN="1.73.0"
rustup which --toolchain $RUST_TOOLCHAIN cargo || \
rustup toolchain install $RUST_TOOLCHAIN
CARGO_BIN="$(dirname $(rustup which --toolchain $RUST_TOOLCHAIN cargo))"
@@ -67,7 +67,3 @@
# cargo_embargo runs. This didn't happen with cargo2android.py because it
# ignored the lock file.
git restore Cargo.lock
-
-# Fix workstation specific path in "metrics" crate's generated files.
-# TODO(b/232150148): Find a better solution for protobuf generated files.
-sed --in-place 's/path = ".*\/out/path = "./' vendor/generic/metrics/src/out/generated.rs
diff --git a/arch/Android.bp b/arch/Android.bp
index 4c3e684..738e275 100644
--- a/arch/Android.bp
+++ b/arch/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -44,6 +45,7 @@
"libjail",
"libkernel_cmdline",
"liblibc",
+ "libmetrics",
"libminijail_rust",
"libpower_monitor",
"libresources",
@@ -88,6 +90,7 @@
"libjail",
"libkernel_cmdline",
"liblibc",
+ "libmetrics",
"libminijail_rust",
"libpower_monitor",
"libresources",
diff --git a/arch/Cargo.toml b/arch/Cargo.toml
index f362ff4..8fb2999 100644
--- a/arch/Cargo.toml
+++ b/arch/Cargo.toml
@@ -25,6 +25,7 @@
jail = { path = "../jail" }
kernel_cmdline = { path = "../kernel_cmdline" }
libc = "*"
+metrics = { path = "../metrics" }
resources = { path = "../resources" }
remain = "*"
serde = { version = "*", features = [ "derive"] }
diff --git a/arch/src/lib.rs b/arch/src/lib.rs
index 65ae770..de7a193 100644
--- a/arch/src/lib.rs
+++ b/arch/src/lib.rs
@@ -331,6 +331,7 @@
pub ac_adapter: bool,
pub acpi_sdts: Vec<SDT>,
pub android_fstab: Option<File>,
+ pub boot_cpu: usize,
pub bootorder_fw_cfg_blob: Vec<u8>,
#[cfg(target_arch = "x86_64")]
pub break_linux_pci_config_io: bool,
@@ -739,6 +740,7 @@
let mut keep_rds = device.keep_rds();
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
device
.register_device_capabilities()
@@ -820,6 +822,7 @@
let mut keep_rds = device.keep_rds();
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
let irq_num = resources
.allocate_irq()
@@ -1147,6 +1150,7 @@
let mut keep_rds = device.keep_rds();
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
keep_rds.append(&mut vm.get_memory().as_raw_descriptors());
let ranges = io_ranges.remove(&dev_idx).unwrap_or_default();
diff --git a/arch/src/serial.rs b/arch/src/serial.rs
index 5c4e739..0040f22 100644
--- a/arch/src/serial.rs
+++ b/arch/src/serial.rs
@@ -83,6 +83,19 @@
/// Address for Serial ports in x86
pub const SERIAL_ADDR: [u64; 4] = [0x3f8, 0x2f8, 0x3e8, 0x2e8];
+/// Information about a serial device (16550-style UART) created by `add_serial_devices()`.
+pub struct SerialDeviceInfo {
+ /// Address of the device on the bus.
+ /// This is the I/O bus on x86 machines and MMIO otherwise.
+ pub address: u64,
+
+ /// Size of the device's address space on the bus.
+ pub size: u64,
+
+ /// IRQ number of the device.
+ pub irq: u32,
+}
+
/// Adds serial devices to the provided bus based on the serial parameters given.
///
/// Only devices with hardware type `SerialHardware::Serial` are added by this function.
@@ -91,20 +104,21 @@
///
/// * `protection_type` - VM protection mode.
/// * `io_bus` - Bus to add the devices to
-/// * `com_evt_1_3` - event for com1 and com3
-/// * `com_evt_1_4` - event for com2 and com4
+/// * `com_evt_1_3` - irq and event for com1 and com3
+/// * `com_evt_1_4` - irq and event for com2 and com4
/// * `serial_parameters` - definitions of serial parameter configurations.
/// * `serial_jail` - minijail object cloned for use with each serial device. All four of the
/// traditional PC-style serial ports (COM1-COM4) must be specified.
pub fn add_serial_devices(
protection_type: ProtectionType,
io_bus: &Bus,
- com_evt_1_3: &Event,
- com_evt_2_4: &Event,
+ com_evt_1_3: (u32, &Event),
+ com_evt_2_4: (u32, &Event),
serial_parameters: &BTreeMap<(SerialHardware, u8), SerialParameters>,
#[cfg_attr(windows, allow(unused_variables))] serial_jail: Option<Minijail>,
#[cfg(feature = "swap")] swap_controller: &mut Option<swap::SwapController>,
-) -> std::result::Result<(), DeviceRegistrationError> {
+) -> std::result::Result<Vec<SerialDeviceInfo>, DeviceRegistrationError> {
+ let mut devices = Vec::new();
for com_num in 0..=3 {
let com_evt = match com_num {
0 => &com_evt_1_3,
@@ -114,6 +128,8 @@
_ => &com_evt_1_3,
};
+ let (irq, com_evt) = (com_evt.0, com_evt.1);
+
let param = serial_parameters
.get(&(SerialHardware::Serial, com_num + 1))
.ok_or(DeviceRegistrationError::MissingRequiredSerialDevice(
@@ -143,19 +159,22 @@
#[cfg(windows)]
let serial_jail = None;
- sys::add_serial_device(
- com_num as usize,
+ let com = sys::add_serial_device(
com,
param,
serial_jail,
preserved_descriptors,
- io_bus,
#[cfg(feature = "swap")]
swap_controller,
)?;
+
+ let address = SERIAL_ADDR[usize::from(com_num)];
+ let size = 0x8; // 16550 UART uses 8 bytes of address space.
+ io_bus.insert(com, address, size).unwrap();
+ devices.push(SerialDeviceInfo { address, size, irq })
}
- Ok(())
+ Ok(devices)
}
#[sorted]
@@ -177,6 +196,7 @@
cmdline: &mut kernel_cmdline::Cmdline,
serial_parameters: &BTreeMap<(SerialHardware, u8), SerialParameters>,
serial_io_type: &str,
+ serial_devices: &[SerialDeviceInfo],
) -> GetSerialCmdlineResult<()> {
for serial_parameter in serial_parameters
.iter()
@@ -205,11 +225,11 @@
.next()
{
Some((SerialHardware::Serial, num)) => {
- if let Some(addr) = SERIAL_ADDR.get(*num as usize - 1) {
+ if let Some(serial_device) = serial_devices.get(*num as usize - 1) {
cmdline
.insert(
"earlycon",
- &format!("uart8250,{},0x{:x}", serial_io_type, addr),
+ &format!("uart8250,{},0x{:x}", serial_io_type, serial_device.address),
)
.map_err(GetSerialCmdlineError::KernelCmdline)?;
}
@@ -225,6 +245,7 @@
#[cfg(test)]
mod tests {
+ use devices::BusType;
use kernel_cmdline::Cmdline;
use super::*;
@@ -233,9 +254,23 @@
fn get_serial_cmdline_default() {
let mut cmdline = Cmdline::new(4096);
let mut serial_parameters = BTreeMap::new();
+ let io_bus = Bus::new(BusType::Io);
+ let evt1_3 = Event::new().unwrap();
+ let evt2_4 = Event::new().unwrap();
set_default_serial_parameters(&mut serial_parameters, false);
- get_serial_cmdline(&mut cmdline, &serial_parameters, "io")
+ let serial_devices = add_serial_devices(
+ ProtectionType::Unprotected,
+ &io_bus,
+ (4, &evt1_3),
+ (3, &evt2_4),
+ &serial_parameters,
+ None,
+ #[cfg(feature = "swap")]
+ &mut None,
+ )
+ .unwrap();
+ get_serial_cmdline(&mut cmdline, &serial_parameters, "io", &serial_devices)
.expect("get_serial_cmdline failed");
let cmdline_str = cmdline.as_str();
@@ -246,6 +281,9 @@
fn get_serial_cmdline_virtio_console() {
let mut cmdline = Cmdline::new(4096);
let mut serial_parameters = BTreeMap::new();
+ let io_bus = Bus::new(BusType::Io);
+ let evt1_3 = Event::new().unwrap();
+ let evt2_4 = Event::new().unwrap();
// Add a virtio-console device with console=true.
serial_parameters.insert(
@@ -267,7 +305,18 @@
);
set_default_serial_parameters(&mut serial_parameters, false);
- get_serial_cmdline(&mut cmdline, &serial_parameters, "io")
+ let serial_devices = add_serial_devices(
+ ProtectionType::Unprotected,
+ &io_bus,
+ (4, &evt1_3),
+ (3, &evt2_4),
+ &serial_parameters,
+ None,
+ #[cfg(feature = "swap")]
+ &mut None,
+ )
+ .unwrap();
+ get_serial_cmdline(&mut cmdline, &serial_parameters, "io", &serial_devices)
.expect("get_serial_cmdline failed");
let cmdline_str = cmdline.as_str();
@@ -278,6 +327,9 @@
fn get_serial_cmdline_virtio_console_serial_earlycon() {
let mut cmdline = Cmdline::new(4096);
let mut serial_parameters = BTreeMap::new();
+ let io_bus = Bus::new(BusType::Io);
+ let evt1_3 = Event::new().unwrap();
+ let evt2_4 = Event::new().unwrap();
// Add a virtio-console device with console=true.
serial_parameters.insert(
@@ -318,7 +370,18 @@
);
set_default_serial_parameters(&mut serial_parameters, false);
- get_serial_cmdline(&mut cmdline, &serial_parameters, "io")
+ let serial_devices = add_serial_devices(
+ ProtectionType::Unprotected,
+ &io_bus,
+ (4, &evt1_3),
+ (3, &evt2_4),
+ &serial_parameters,
+ None,
+ #[cfg(feature = "swap")]
+ &mut None,
+ )
+ .unwrap();
+ get_serial_cmdline(&mut cmdline, &serial_parameters, "io", &serial_devices)
.expect("get_serial_cmdline failed");
let cmdline_str = cmdline.as_str();
@@ -330,6 +393,9 @@
fn get_serial_cmdline_virtio_console_invalid_earlycon() {
let mut cmdline = Cmdline::new(4096);
let mut serial_parameters = BTreeMap::new();
+ let io_bus = Bus::new(BusType::Io);
+ let evt1_3 = Event::new().unwrap();
+ let evt2_4 = Event::new().unwrap();
// Try to add a virtio-console device with earlycon=true (unsupported).
serial_parameters.insert(
@@ -351,7 +417,18 @@
);
set_default_serial_parameters(&mut serial_parameters, false);
- get_serial_cmdline(&mut cmdline, &serial_parameters, "io")
+ let serial_devices = add_serial_devices(
+ ProtectionType::Unprotected,
+ &io_bus,
+ (4, &evt1_3),
+ (3, &evt2_4),
+ &serial_parameters,
+ None,
+ #[cfg(feature = "swap")]
+ &mut None,
+ )
+ .unwrap();
+ get_serial_cmdline(&mut cmdline, &serial_parameters, "io", &serial_devices)
.expect_err("get_serial_cmdline succeeded");
}
}
diff --git a/arch/src/serial/sys/linux.rs b/arch/src/serial/sys/linux.rs
index c13a503..dc412a3 100644
--- a/arch/src/serial/sys/linux.rs
+++ b/arch/src/serial/sys/linux.rs
@@ -6,25 +6,21 @@
use base::RawDescriptor;
use devices::serial_device::SerialParameters;
-use devices::Bus;
use devices::BusDevice;
use devices::ProxyDevice;
use devices::Serial;
use minijail::Minijail;
use sync::Mutex;
-use crate::serial::SERIAL_ADDR;
use crate::DeviceRegistrationError;
pub fn add_serial_device(
- com_num: usize,
com: Serial,
_serial_parameters: &SerialParameters,
serial_jail: Option<Minijail>,
preserved_descriptors: Vec<RawDescriptor>,
- io_bus: &Bus,
#[cfg(feature = "swap")] swap_controller: &mut Option<swap::SwapController>,
-) -> std::result::Result<(), DeviceRegistrationError> {
+) -> std::result::Result<Arc<Mutex<dyn BusDevice>>, DeviceRegistrationError> {
let com: Arc<Mutex<dyn BusDevice>> = if let Some(serial_jail) = serial_jail {
Arc::new(Mutex::new(
ProxyDevice::new(
@@ -39,6 +35,5 @@
} else {
Arc::new(Mutex::new(com))
};
- io_bus.insert(com, SERIAL_ADDR[com_num], 0x8).unwrap();
- Ok(())
+ Ok(com)
}
diff --git a/arch/src/serial/sys/windows.rs b/arch/src/serial/sys/windows.rs
index c9b894c..c9ceff7 100644
--- a/arch/src/serial/sys/windows.rs
+++ b/arch/src/serial/sys/windows.rs
@@ -13,12 +13,11 @@
use base::Result;
use devices::serial_device::SerialParameters;
use devices::serial_device::SerialType;
-use devices::Bus;
+use devices::BusDevice;
use devices::Serial;
use jail::FakeMinijailStub as Minijail;
use sync::Mutex;
-use crate::serial::SERIAL_ADDR;
use crate::DeviceRegistrationError;
/// A type for queueing input bytes to a serial device that abstracts if the device is local or part
@@ -46,51 +45,45 @@
}
pub fn add_serial_device(
- com_num: usize,
com: Serial,
serial_params: &SerialParameters,
serial_jail: Option<Minijail>,
_preserved_descriptors: Vec<RawDescriptor>,
- io_bus: &Bus,
-) -> std::result::Result<(), DeviceRegistrationError> {
- match serial_jail {
- Some(_) => (),
- None => {
- let com = Arc::new(Mutex::new(com));
- io_bus
- .insert(com.clone(), SERIAL_ADDR[com_num], 0x8)
- .unwrap();
+) -> std::result::Result<Arc<Mutex<dyn BusDevice>>, DeviceRegistrationError> {
+ assert!(serial_jail.is_none());
- if !serial_params.stdin {
- if let SerialType::SystemSerialType = serial_params.type_ {
- let mut in_pipe_result = com
- .lock()
- .system_params
- .in_stream
- .as_ref()
- .unwrap()
- .try_clone();
- thread::spawn(move || {
- let serial_input = SerialInput::new_local(com);
- let in_pipe = in_pipe_result.as_mut().unwrap();
+ let com = Arc::new(Mutex::new(com));
- let mut buffer: [u8; 255] = [0; 255];
- loop {
- // Safe because we are reading bytes.
- let bytes = in_pipe.read(&mut buffer).unwrap_or(0);
- if bytes > 0 {
- serial_input.queue_input_bytes(&buffer[0..bytes]).unwrap();
- }
- // We can't issue blocking reads here and overlapped I/O is
- // incompatible with the call site where writes to this pipe are being
- // made, so instead we issue a small wait to prevent us from hogging
- // the CPU. This 20ms delay while typing doesn't seem to be noticeable.
- thread::sleep(Duration::from_millis(20));
- }
- });
+ if !serial_params.stdin {
+ if let SerialType::SystemSerialType = serial_params.type_ {
+ let mut in_pipe_result = com
+ .lock()
+ .system_params
+ .in_stream
+ .as_ref()
+ .unwrap()
+ .try_clone();
+ let com = com.clone();
+ thread::spawn(move || {
+ let serial_input = SerialInput::new_local(com);
+ let in_pipe = in_pipe_result.as_mut().unwrap();
+
+ let mut buffer: [u8; 255] = [0; 255];
+ loop {
+ // Safe because we are reading bytes.
+ let bytes = in_pipe.read(&mut buffer).unwrap_or(0);
+ if bytes > 0 {
+ serial_input.queue_input_bytes(&buffer[0..bytes]).unwrap();
+ }
+ // We can't issue blocking reads here and overlapped I/O is
+ // incompatible with the call site where writes to this pipe are being
+ // made, so instead we issue a small wait to prevent us from hogging
+ // the CPU. This 20ms delay while typing doesn't seem to be noticeable.
+ thread::sleep(Duration::from_millis(20));
}
- }
+ });
}
}
- Ok(())
+
+ Ok(com)
}
diff --git a/arch/src/sys/linux.rs b/arch/src/sys/linux.rs
index 4289aea..f536535 100644
--- a/arch/src/sys/linux.rs
+++ b/arch/src/sys/linux.rs
@@ -93,6 +93,7 @@
let mut keep_rds = goldfish_bat.keep_rds();
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
mmio_bus
.insert(
Arc::new(Mutex::new(
@@ -188,6 +189,7 @@
let mut keep_rds = device.keep_rds();
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
let irqs = device
.get_platform_irqs()
diff --git a/argh_helpers/Android.bp b/argh_helpers/Android.bp
index 95ec352..33024d1 100644
--- a/argh_helpers/Android.bp
+++ b/argh_helpers/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/audio_util/Android.bp b/audio_util/Android.bp
index afc8937..4000343 100644
--- a/audio_util/Android.bp
+++ b/audio_util/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/base/Android.bp b/base/Android.bp
index 0c3d646..4a871fb 100644
--- a/base/Android.bp
+++ b/base/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -29,7 +30,6 @@
"libaudio_streams",
"libcfg_if",
"libchrono",
- "libdata_model",
"libenv_logger",
"liblibc",
"liblog_rust",
@@ -88,7 +88,6 @@
"libbase_rust",
"libcfg_if",
"libchrono",
- "libdata_model",
"libenv_logger",
"liblibc",
"liblog_rust",
@@ -128,7 +127,6 @@
"libbase_rust",
"libcfg_if",
"libchrono",
- "libdata_model",
"libenv_logger",
"liblibc",
"liblog_rust",
@@ -168,7 +166,6 @@
"libbase_rust",
"libcfg_if",
"libchrono",
- "libdata_model",
"libenv_logger",
"liblibc",
"liblog_rust",
@@ -221,7 +218,6 @@
"libaudio_streams",
"libcfg_if",
"libchrono",
- "libdata_model",
"libenv_logger",
"liblibc",
"liblog_rust",
diff --git a/base/Cargo.toml b/base/Cargo.toml
index 6180265..8105688 100644
--- a/base/Cargo.toml
+++ b/base/Cargo.toml
@@ -18,11 +18,10 @@
[dependencies]
audio_streams = { path = "../common/audio_streams" } # provided by ebuild
base_event_token_derive = { path = "base_event_token_derive", version = "*" }
-data_model = { path = "../common/data_model" } # provided by ebuild
sync = { path = "../common/sync" } # provided by ebuild
cfg-if = "*"
-chrono = { version = "0.4.19", features = ["clock"], default-features = false }
+chrono = { version = "0.4.34", features = ["now"], default-features = false }
env_logger = { version = "0.9.0", default-features = false }
libc = "*"
log = "0.4"
diff --git a/base/base_event_token_derive/Android.bp b/base/base_event_token_derive/Android.bp
index 92a8f2b..7f9b086 100644
--- a/base/base_event_token_derive/Android.bp
+++ b/base/base_event_token_derive/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/base/src/lib.rs b/base/src/lib.rs
index f4e9af8..2b21514 100644
--- a/base/src/lib.rs
+++ b/base/src/lib.rs
@@ -15,6 +15,7 @@
mod iobuf;
mod mmap;
mod notifiers;
+mod periodic_logger;
mod shm;
pub mod syslog;
pub mod test_utils;
diff --git a/base/src/mmap.rs b/base/src/mmap.rs
index 6654d07..8095135 100644
--- a/base/src/mmap.rs
+++ b/base/src/mmap.rs
@@ -13,6 +13,7 @@
use std::ptr::write_volatile;
use std::sync::atomic::fence;
use std::sync::atomic::Ordering;
+use std::sync::OnceLock;
use remain::sorted;
use serde::Deserialize;
@@ -29,6 +30,44 @@
use crate::VolatileMemoryResult;
use crate::VolatileSlice;
+static CACHELINE_SIZE: OnceLock<usize> = OnceLock::new();
+
+#[allow(unused_assignments)]
+fn get_cacheline_size_once() -> usize {
+ let mut assume_reason: &str = "unknown";
+ cfg_if::cfg_if! {
+ if #[cfg(all(any(target_os = "android", target_os = "linux"), not(target_env = "musl")))] {
+ // SAFETY:
+ // Safe because we check the return value for errors or unsupported requests
+ let linesize = unsafe { libc::sysconf(libc::_SC_LEVEL1_DCACHE_LINESIZE) };
+ if linesize > 0 {
+ return linesize as usize;
+ } else {
+ assume_reason = "sysconf cacheline size query failed";
+ }
+ } else {
+ assume_reason = "cacheline size query not implemented for platform/arch";
+ }
+ }
+
+ let assumed_size = 64;
+ log::debug!(
+ "assuming cacheline_size={}; reason: {}.",
+ assumed_size,
+ assume_reason
+ );
+ assumed_size
+}
+
+/// Returns the system's effective cacheline size (e.g. the granularity at which arch-specific
+/// cacheline management, such as with the clflush instruction, is expected to occur).
+#[inline(always)]
+fn get_cacheline_size() -> usize {
+ let size = *CACHELINE_SIZE.get_or_init(get_cacheline_size_once);
+ assert!(size > 0);
+ size
+}
+
#[sorted]
#[derive(Debug, thiserror::Error)]
pub enum Error {
@@ -44,6 +83,8 @@
InvalidOffset,
#[error("requested memory range spans past the end of the region: offset={0} count={1} region_size={2}")]
InvalidRange(usize, usize, usize),
+ #[error("operation is not implemented on platform/architecture: {0}")]
+ NotImplemented(&'static str),
#[error("requested memory is not page aligned")]
NotPageAligned,
#[error("failed to read from file to memory: {0}")]
@@ -132,6 +173,27 @@
pub(crate) _file_descriptor: Option<SafeDescriptor>,
}
+#[inline(always)]
+unsafe fn flush_one(_addr: *const u8) -> Result<()> {
+ cfg_if::cfg_if! {
+ if #[cfg(target_arch = "x86_64")] {
+ // As per table 11-7 of the SDM, processors are not required to
+ // snoop UC mappings, so flush the target to memory.
+ // SAFETY: assumes that the caller has supplied a valid address.
+ unsafe { core::arch::x86_64::_mm_clflush(_addr) };
+ Ok(())
+ } else if #[cfg(target_arch = "aarch64")] {
+ // Data cache clean by VA to PoC.
+ std::arch::asm!("DC CVAC, {x}", x = in(reg) _addr);
+ Ok(())
+ } else if #[cfg(target_arch = "arm")] {
+ Err(Error::NotImplemented("Userspace cannot flush to PoC"))
+ } else {
+ Err(Error::NotImplemented("Cache flush not implemented"))
+ }
+ }
+}
+
impl MemoryMapping {
pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
match self.mapping.size().checked_sub(offset) {
@@ -294,42 +356,63 @@
self.mapping.msync()
}
- /// Flush memory which the guest may be accessing through an uncached mapping.
+ /// Flush a region of the MemoryMapping from the system's caching hierarchy.
+ /// There are several uses for flushing:
///
- /// Reads via an uncached mapping can bypass the cache and directly access main
- /// memory. This is outside the memory model of Rust, which means that even with
- /// proper synchronization, guest reads via an uncached mapping might not see
- /// updates from the host. As such, it is necessary to perform architectural
- /// cache maintainance to flush the host writes to main memory.
+ /// * Cached memory which the guest may be reading through an uncached mapping:
///
- /// Note that this does not support writable uncached guest mappings, as doing so
- /// requires invalidating the cache, not flushing the cache.
+ /// Guest reads via an uncached mapping can bypass the cache and directly access main
+ /// memory. This is outside the memory model of Rust, which means that even with proper
+ /// synchronization, guest reads via an uncached mapping might not see updates from the
+ /// host. As such, it is necessary to perform architectural cache maintainance to flush the
+ /// host writes to main memory.
+ ///
+ /// Note that this does not support writable uncached guest mappings, as doing so
+ /// requires invalidating the cache, not flushing the cache.
+ ///
+ /// * Uncached memory which the guest may be writing through a cached mapping:
+ ///
+ /// Guest writes via a cached mapping of a host's uncached memory may never make it to
+ /// system/device memory prior to being read. In such cases, explicit flushing of the cached
+ /// writes is necessary, since other managers of the host's uncached mapping (e.g. DRM) see
+ /// no need to flush, as they believe all writes would explicitly bypass the caches.
///
/// Currently only supported on x86_64 and aarch64. Cannot be supported on 32-bit arm.
- pub fn flush_uncached_guest_mapping(&self, offset: usize) {
- if offset > self.mapping.size() {
- return;
+ pub fn flush_region(&self, offset: usize, len: usize) -> Result<()> {
+ let addr: *const u8 = self.as_ptr();
+ let size = self.size();
+
+ // disallow overflow/wrapping ranges and subregion extending beyond mapped range
+ if usize::MAX - size < addr as usize || offset >= size || size - offset < len {
+ return Err(Error::InvalidRange(offset, len, size));
}
- // SAFETY: We checked that offset is within the mapping, and flushing
- // the cache doesn't affect any rust safety properties.
- unsafe {
- #[allow(unused)]
- let target = self.mapping.as_ptr().add(offset);
- cfg_if::cfg_if! {
- if #[cfg(target_arch = "x86_64")] {
- // As per table 11-7 of the SDM, processors are not required to
- // snoop UC mappings, so flush the target to memory.
- core::arch::x86_64::_mm_clflush(target);
- } else if #[cfg(target_arch = "aarch64")] {
- // Data cache clean by VA to PoC.
- std::arch::asm!("DC CVAC, {x}", x = in(reg) target);
- } else if #[cfg(target_arch = "arm")] {
- panic!("Userspace cannot flush to PoC");
- } else {
- unimplemented!("Cache flush not implemented")
- }
- }
+
+ // SAFETY:
+ // Safe because already validated that `next` will be an address in the mapping:
+ // * mapped region is non-wrapping
+ // * subregion is bounded within the mapped region
+ let mut next: *const u8 = unsafe { addr.add(offset) };
+
+ let cacheline_size = get_cacheline_size();
+ let cacheline_count = len.div_ceil(cacheline_size);
+
+ for _ in 0..cacheline_count {
+ // SAFETY:
+ // Safe because `next` is guaranteed to be within the mapped region (see earlier
+ // validations), and flushing the cache doesn't affect any rust safety properties.
+ unsafe { flush_one(next)? };
+
+ // SAFETY:
+ // Safe because we never use next if it goes out of the mapped region or overflows its
+ // storage type (based on earlier validations and the loop bounds).
+ next = unsafe { next.add(cacheline_size) };
}
+ Ok(())
+ }
+
+ /// Flush all backing memory for a mapping in an arch-specific manner (see `flush_region()`).
+ pub fn flush_all(&self) -> Result<()> {
+ self.flush_region(0, self.size())
}
}
diff --git a/base/src/periodic_logger.rs b/base/src/periodic_logger.rs
new file mode 100644
index 0000000..7fd1a04
--- /dev/null
+++ b/base/src/periodic_logger.rs
@@ -0,0 +1,232 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(b/318439696): Remove once it is used
+#![allow(dead_code)]
+
+use std::collections::HashMap;
+use std::fmt::Write;
+use std::sync::atomic::AtomicU32;
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+use std::sync::RwLock;
+use std::time::Duration;
+
+use thiserror::Error as ThisError;
+
+use crate::EventToken;
+use crate::Timer;
+use crate::TimerTrait;
+use crate::WaitContext;
+use crate::WorkerThread;
+
+/// Utility class that helps count and log high frequency events periodically.
+pub struct PeriodicLogger {
+ // Name that is printed out to differentiate between other `PeriodicLogger`s
+ name: String,
+ // Interval to log
+ interval: Duration,
+ // Map of event counters that are periodically logged
+ counters: Arc<RwLock<HashMap<String, AtomicU32>>>,
+ // The periodic logger thread
+ worker_thread: Option<WorkerThread<Result<(), PeriodicLoggerError>>>,
+}
+
+impl PeriodicLogger {
+ pub fn new(name: String, interval: Duration) -> Self {
+ PeriodicLogger {
+ name,
+ interval,
+ counters: Arc::new(RwLock::new(HashMap::new())),
+ worker_thread: None,
+ }
+ }
+
+ /// Add a new event item to be counted.
+ pub fn add_counter_item(&self, name: String) -> Result<(), PeriodicLoggerError> {
+ // This write lock will likely be acquired infrequently.
+ let mut counters_write_lock = self
+ .counters
+ .write()
+ .map_err(|e| PeriodicLoggerError::WriteLockError(e.to_string()))?;
+
+ if counters_write_lock.contains_key(&name) {
+ return Err(PeriodicLoggerError::CounterAlreadyExist(name));
+ }
+
+ counters_write_lock.insert(name, AtomicU32::new(0));
+ Ok(())
+ }
+
+ /// Increment event counter by an `amount`
+ pub fn increment_counter(&self, name: String, amount: u32) -> Result<(), PeriodicLoggerError> {
+ match self.counters.read() {
+ Ok(counters_map) => {
+ if let Some(atomic_counter) = counters_map.get(&name) {
+ atomic_counter.fetch_add(amount, Ordering::Relaxed);
+ Ok(())
+ } else {
+ Err(PeriodicLoggerError::CounterDoesNotExist(name))
+ }
+ }
+ Err(e) => Err(PeriodicLoggerError::ReadLockError(e.to_string())),
+ }
+ }
+
+ /// Starts a thread that will log the count of events within a `self.interval` time period.
+ /// All counters will be reset to 0 after logging.
+ pub fn start_logging_thread(&mut self) -> Result<(), PeriodicLoggerError> {
+ if self.worker_thread.is_some() {
+ return Err(PeriodicLoggerError::ThreadAlreadyStarted);
+ }
+
+ #[derive(EventToken)]
+ enum Token {
+ Exit,
+ PeriodicLog,
+ }
+
+ let cloned_counter = self.counters.clone();
+ let interval_copy = self.interval;
+ let name_copy = self.name.clone();
+ self.worker_thread = Some(WorkerThread::start(
+ format!("PeriodicLogger_{}", self.name),
+ move |kill_evt| {
+ let mut timer = Timer::new().map_err(PeriodicLoggerError::TimerNewError)?;
+ timer
+ .reset(interval_copy, Some(interval_copy))
+ .map_err(PeriodicLoggerError::TimerResetError)?;
+
+ let wait_ctx = WaitContext::build_with(&[
+ (&kill_evt, Token::Exit),
+ (&timer, Token::PeriodicLog),
+ ])
+ .map_err(PeriodicLoggerError::WaitContextBuildError)?;
+
+ 'outer: loop {
+ let events = wait_ctx.wait().expect("wait failed");
+ for event in events.iter().filter(|e| e.is_readable) {
+ match event.token {
+ Token::Exit => {
+ break 'outer;
+ }
+ Token::PeriodicLog => {
+ let counter_map = cloned_counter.read().map_err(|e| {
+ PeriodicLoggerError::ReadLockError(e.to_string())
+ })?;
+
+ let mut logged_string =
+ format!("{} {:?}:", name_copy, interval_copy);
+ for (counter_name, counter_value) in counter_map.iter() {
+ let value = counter_value.swap(0, Ordering::Relaxed);
+ let _ =
+ write!(logged_string, "\n {}: {}", counter_name, value);
+ }
+
+ // Log all counters
+ crate::info!("{}", logged_string);
+ }
+ }
+ }
+ }
+ Ok(())
+ },
+ ));
+
+ Ok(())
+ }
+}
+
+#[derive(Debug, ThisError, PartialEq)]
+pub enum PeriodicLoggerError {
+ #[error("Periodic logger thread already started.")]
+ ThreadAlreadyStarted,
+ #[error("Failed to acquire write lock: {0}")]
+ WriteLockError(String),
+ #[error("Failed to acquire read lock: {0}")]
+ ReadLockError(String),
+ #[error("Counter already exists: {0}")]
+ CounterAlreadyExist(String),
+ #[error("Counter does not exist: {0}")]
+ CounterDoesNotExist(String),
+ #[error("Failed to build WaitContext: {0}")]
+ WaitContextBuildError(crate::Error),
+ #[error("Failed to wait on WaitContext: {0}")]
+ WaitContextWaitError(crate::Error),
+ #[error("Failed to reset Timer: {0}")]
+ TimerResetError(crate::Error),
+ #[error("Failed initialize Timer: {0}")]
+ TimerNewError(crate::Error),
+}
+
+#[cfg(test)]
+mod tests {
+ use std::thread;
+
+ use super::*;
+
+ #[test]
+ fn periodic_add() {
+ let periodic_logger = PeriodicLogger::new("test".to_string(), Duration::from_secs(3));
+ periodic_logger
+ .add_counter_item("counter_1".to_string())
+ .unwrap();
+ periodic_logger
+ .increment_counter("counter_1".to_string(), 2)
+ .unwrap();
+ periodic_logger
+ .increment_counter("counter_1".to_string(), 5)
+ .unwrap();
+
+ assert_eq!(periodic_logger.counters.read().unwrap().len(), 1);
+ assert_eq!(
+ periodic_logger
+ .counters
+ .read()
+ .unwrap()
+ .get("counter_1")
+ .unwrap()
+ .load(Ordering::Relaxed),
+ 7
+ );
+ }
+
+ #[test]
+ fn worker_thread_cannot_start_twice() {
+ let mut periodic_logger = PeriodicLogger::new("test".to_string(), Duration::from_secs(3));
+ assert!(periodic_logger.start_logging_thread().is_ok());
+ assert!(periodic_logger.start_logging_thread().is_err());
+ }
+
+ #[test]
+ fn add_same_counter_item_twice_return_err() {
+ let periodic_logger = PeriodicLogger::new("test".to_string(), Duration::from_secs(3));
+ assert!(periodic_logger
+ .add_counter_item("counter_1".to_string())
+ .is_ok());
+ assert_eq!(
+ periodic_logger.add_counter_item("counter_1".to_string()),
+ Err(PeriodicLoggerError::CounterAlreadyExist(
+ "counter_1".to_string()
+ ))
+ );
+ }
+
+ /// Ignored because this is intended to be ran locally
+ #[ignore]
+ #[test]
+ fn periodic_logger_smoke_test() {
+ let mut periodic_logger = PeriodicLogger::new("test".to_string(), Duration::from_secs(3));
+ periodic_logger
+ .add_counter_item("counter_1".to_string())
+ .unwrap();
+
+ periodic_logger.start_logging_thread().unwrap();
+ periodic_logger
+ .increment_counter("counter_1".to_string(), 5)
+ .unwrap();
+
+ thread::sleep(Duration::from_secs(5));
+ }
+}
diff --git a/base/src/sys/linux/event.rs b/base/src/sys/linux/event.rs
index 8559e7a..4d174a2 100644
--- a/base/src/sys/linux/event.rs
+++ b/base/src/sys/linux/event.rs
@@ -14,7 +14,6 @@
use serde::Deserialize;
use serde::Serialize;
-use super::duration_to_timespec;
use super::errno_result;
use super::RawDescriptor;
use super::Result;
@@ -22,6 +21,7 @@
use crate::descriptor::FromRawDescriptor;
use crate::descriptor::IntoRawDescriptor;
use crate::descriptor::SafeDescriptor;
+use crate::unix::duration_to_timespec;
use crate::EventWaitResult;
/// A safe wrapper around a Linux eventfd (man 2 eventfd).
diff --git a/base/src/sys/linux/mod.rs b/base/src/sys/linux/mod.rs
index bd72d5e..aa91e60 100644
--- a/base/src/sys/linux/mod.rs
+++ b/base/src/sys/linux/mod.rs
@@ -503,17 +503,6 @@
fds.revents & libc::POLLIN != 0
}
-/// Return a timespec filed with the specified Duration `duration`.
-#[allow(clippy::useless_conversion)]
-pub fn duration_to_timespec(duration: Duration) -> libc::timespec {
- // nsec always fits in i32 because subsec_nanos is defined to be less than one billion.
- let nsec = duration.subsec_nanos() as i32;
- libc::timespec {
- tv_sec: duration.as_secs() as libc::time_t,
- tv_nsec: nsec.into(),
- }
-}
-
/// Return the maximum Duration that can be used with libc::timespec.
pub fn max_timeout() -> Duration {
Duration::new(libc::time_t::max_value() as u64, 999999999)
diff --git a/base/src/sys/linux/signal.rs b/base/src/sys/linux/signal.rs
index 4ef4334..596a24d 100644
--- a/base/src/sys/linux/signal.rs
+++ b/base/src/sys/linux/signal.rs
@@ -37,13 +37,13 @@
use remain::sorted;
use thiserror::Error;
-use super::duration_to_timespec;
use super::errno_result;
use super::Error as ErrnoError;
use super::Pid;
use super::Result;
use crate::handle_eintr_errno;
use crate::handle_eintr_rc;
+use crate::unix::duration_to_timespec;
#[sorted]
#[derive(Error, Debug)]
diff --git a/base/src/sys/linux/timer.rs b/base/src/sys/linux/timer.rs
index afd6cd8..7652183 100644
--- a/base/src/sys/linux/timer.rs
+++ b/base/src/sys/linux/timer.rs
@@ -19,13 +19,13 @@
use super::super::errno_result;
use super::super::Error;
use super::super::Result;
-use super::duration_to_timespec;
use crate::descriptor::AsRawDescriptor;
use crate::descriptor::FromRawDescriptor;
use crate::descriptor::SafeDescriptor;
use crate::handle_eintr_errno;
use crate::timer::Timer;
use crate::timer::TimerTrait;
+use crate::unix::duration_to_timespec;
impl AsRawFd for Timer {
fn as_raw_fd(&self) -> RawFd {
diff --git a/base/src/sys/macos/event.rs b/base/src/sys/macos/event.rs
new file mode 100644
index 0000000..90d6330
--- /dev/null
+++ b/base/src/sys/macos/event.rs
@@ -0,0 +1,145 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::ptr;
+use std::time::Duration;
+
+use crate::descriptor::AsRawDescriptor;
+use crate::descriptor::FromRawDescriptor;
+use crate::errno::errno_result;
+use crate::errno::Result;
+use crate::event::EventWaitResult;
+use crate::sys::unix::RawDescriptor;
+use crate::unix::duration_to_timespec;
+use crate::SafeDescriptor;
+
+#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
+pub struct PlatformEvent {
+ // TODO(schuffelen): Implement a more complete kqueue abstraction?
+ queue: SafeDescriptor,
+}
+
+// Only accepts the subset of parameters we actually use
+fn make_kevent(filter: i16, flags: u16, fflags: u32) -> libc::kevent {
+ libc::kevent {
+ ident: 0, /* hopefully not global? */
+ filter,
+ flags,
+ fflags,
+ data: 0,
+ udata: ptr::null_mut(),
+ }
+}
+
+impl PlatformEvent {
+ pub fn new() -> Result<PlatformEvent> {
+ // SAFETY: Trivially safe
+ let raw_queue = unsafe { libc::kqueue() };
+ if raw_queue < 0 {
+ return crate::errno::errno_result();
+ }
+ // SAFETY: Tested whether it was a valid file descriptor
+ let queue = unsafe { SafeDescriptor::from_raw_descriptor(raw_queue) };
+ let event = PlatformEvent { queue };
+ let reg = make_kevent(
+ libc::EVFILT_USER,
+ libc::EV_ADD | libc::EV_CLEAR,
+ libc::NOTE_FFNOP,
+ );
+ event.kevent(&[reg], &mut [], None)?;
+ Ok(event)
+ }
+
+ fn kevent(
+ &self,
+ changelist: &[libc::kevent],
+ eventlist: &mut [libc::kevent],
+ timeout: Option<Duration>,
+ ) -> Result<libc::c_int> {
+ let timespec = timeout.map(duration_to_timespec);
+ // SAFETY: `queue` is a valid kqueue, `changelist` and `eventlist` are supplied with lengths
+ // based on valid slices
+ let res = unsafe {
+ libc::kevent(
+ self.queue.as_raw_descriptor(),
+ changelist.as_ptr(),
+ changelist.len() as i32,
+ eventlist.as_mut_ptr(),
+ eventlist.len() as i32,
+ if let Some(timeout) = timespec {
+ &timeout
+ } else {
+ ptr::null()
+ },
+ )
+ };
+ if res < 0 {
+ errno_result()
+ } else {
+ Ok(res)
+ }
+ }
+
+ pub fn signal(&self) -> Result<()> {
+ let event = make_kevent(libc::EVFILT_USER, 0, libc::NOTE_TRIGGER);
+ self.kevent(&[event], &mut [], None)?;
+ Ok(())
+ }
+
+ pub fn wait(&self) -> Result<()> {
+ let mut event = [make_kevent(0, 0, 0)];
+ self.kevent(&[], &mut event[..], None)?;
+ Ok(())
+ }
+
+ pub fn wait_timeout(&self, timeout: Duration) -> Result<EventWaitResult> {
+ let mut event = [make_kevent(0, 0, 0)];
+ if self.kevent(&[], &mut event[..], Some(timeout))? == 0 {
+ Ok(EventWaitResult::TimedOut)
+ } else {
+ Ok(EventWaitResult::Signaled)
+ }
+ }
+
+ pub fn reset(&self) -> Result<()> {
+ self.wait_timeout(Duration::ZERO)?;
+ Ok(())
+ }
+
+ pub fn try_clone(&self) -> Result<PlatformEvent> {
+ self.queue.try_clone().map(|queue| PlatformEvent { queue })
+ }
+}
+
+impl crate::AsRawDescriptor for PlatformEvent {
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.queue.as_raw_descriptor()
+ }
+}
+
+impl crate::FromRawDescriptor for PlatformEvent {
+ unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self {
+ PlatformEvent {
+ queue: SafeDescriptor::from_raw_descriptor(descriptor),
+ }
+ }
+}
+
+impl crate::IntoRawDescriptor for PlatformEvent {
+ fn into_raw_descriptor(self) -> RawDescriptor {
+ self.queue.into_raw_descriptor()
+ }
+}
+
+impl From<PlatformEvent> for crate::SafeDescriptor {
+ fn from(evt: PlatformEvent) -> Self {
+ evt.queue
+ }
+}
+
+impl From<SafeDescriptor> for PlatformEvent {
+ fn from(queue: SafeDescriptor) -> Self {
+ PlatformEvent { queue }
+ }
+}
diff --git a/base/src/sys/macos/mod.rs b/base/src/sys/macos/mod.rs
index 5c4d4bb..254cc30 100644
--- a/base/src/sys/macos/mod.rs
+++ b/base/src/sys/macos/mod.rs
@@ -10,8 +10,10 @@
use crate::unix::Pid;
use crate::MmapError;
+mod event;
mod net;
+pub(crate) use event::PlatformEvent;
pub(in crate::sys) use libc::sendmsg;
pub(in crate::sys) use net::sockaddr_un;
pub(in crate::sys) use net::sockaddrv4_to_lib_c;
@@ -100,63 +102,6 @@
pub struct MemoryMappingArena {}
-#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
-pub struct PlatformEvent {}
-
-impl PlatformEvent {
- pub fn new() -> crate::errno::Result<PlatformEvent> {
- todo!();
- }
- pub fn signal(&self) -> crate::errno::Result<()> {
- todo!();
- }
- pub fn wait(&self) -> crate::errno::Result<()> {
- todo!();
- }
- pub fn wait_timeout(
- &self,
- _timeout: std::time::Duration,
- ) -> crate::errno::Result<crate::event::EventWaitResult> {
- todo!();
- }
- pub fn reset(&self) -> crate::errno::Result<()> {
- todo!();
- }
- pub fn try_clone(&self) -> crate::errno::Result<PlatformEvent> {
- todo!();
- }
-}
-
-impl crate::AsRawDescriptor for PlatformEvent {
- fn as_raw_descriptor(&self) -> RawDescriptor {
- todo!();
- }
-}
-
-impl crate::FromRawDescriptor for PlatformEvent {
- unsafe fn from_raw_descriptor(_descriptor: RawDescriptor) -> Self {
- todo!();
- }
-}
-
-impl crate::IntoRawDescriptor for PlatformEvent {
- fn into_raw_descriptor(self) -> RawDescriptor {
- todo!();
- }
-}
-
-impl From<PlatformEvent> for crate::SafeDescriptor {
- fn from(_evt: PlatformEvent) -> Self {
- todo!();
- }
-}
-
-impl From<crate::SafeDescriptor> for PlatformEvent {
- fn from(_evt: crate::SafeDescriptor) -> Self {
- todo!();
- }
-}
-
#[derive(Debug)]
pub struct MemoryMapping {}
diff --git a/base/src/sys/unix/descriptor.rs b/base/src/sys/unix/descriptor.rs
index 26a9cd2..fd97efe 100644
--- a/base/src/sys/unix/descriptor.rs
+++ b/base/src/sys/unix/descriptor.rs
@@ -32,24 +32,25 @@
pub const INVALID_DESCRIPTOR: RawDescriptor = -1;
-/// Clones `descriptor`, returning a new `RawDescriptor` that refers to the same open file
-/// description as `descriptor`. The cloned descriptor will have the `FD_CLOEXEC` flag set but will
-/// not share any other file descriptor flags with `descriptor`.
-pub fn clone_descriptor(descriptor: &dyn AsRawDescriptor) -> Result<RawDescriptor> {
- clone_fd(&descriptor.as_raw_descriptor())
+/// Clones `descriptor`, returning a new `SafeDescriptor` that refers to the same file
+/// `descriptor`. The cloned descriptor will have the `FD_CLOEXEC` flag set but will not share any
+/// other file descriptor flags with `descriptor`.
+pub fn clone_descriptor(descriptor: &(impl AsRawDescriptor + ?Sized)) -> Result<SafeDescriptor> {
+ clone_fd(descriptor.as_raw_descriptor())
}
-/// Clones `fd`, returning a new file descriptor that refers to the same open file description as
-/// `fd`. The cloned fd will have the `FD_CLOEXEC` flag set but will not share any other file
-/// descriptor flags with `fd`.
-fn clone_fd(fd: &dyn AsRawFd) -> Result<RawFd> {
+/// Clones `fd`, returning a new file descriptor that refers to the same open file as `fd`. The
+/// cloned fd will have the `FD_CLOEXEC` flag set but will not share any other file descriptor
+/// flags with `fd`.
+fn clone_fd(fd: RawFd) -> Result<SafeDescriptor> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
- let ret = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_DUPFD_CLOEXEC, 0) };
+ let ret = unsafe { libc::fcntl(fd, libc::F_DUPFD_CLOEXEC, 0) };
if ret < 0 {
errno_result()
} else {
- Ok(ret)
+ // SAFETY: We just dup'd the FD and so have exclusive access.
+ Ok(unsafe { SafeDescriptor::from_raw_descriptor(ret) })
}
}
@@ -108,9 +109,7 @@
type Error = std::io::Error;
fn try_from(fd: &dyn AsRawFd) -> std::result::Result<Self, Self::Error> {
- Ok(SafeDescriptor {
- descriptor: clone_fd(fd)?,
- })
+ Ok(clone_fd(fd.as_raw_fd())?)
}
}
diff --git a/base/src/sys/unix/mod.rs b/base/src/sys/unix/mod.rs
index 1ebeb87..3bd39a9 100644
--- a/base/src/sys/unix/mod.rs
+++ b/base/src/sys/unix/mod.rs
@@ -13,6 +13,7 @@
mod sock_ctrl_msg;
mod stream_channel;
pub mod system_info;
+mod time;
pub mod tube;
pub use descriptor::*;
@@ -24,6 +25,7 @@
pub use system_info::iov_max;
pub use system_info::number_of_logical_cores;
pub use system_info::pagesize;
+pub use time::duration_to_timespec;
/// Process identifier.
pub type Pid = libc::pid_t;
diff --git a/base/src/sys/unix/time.rs b/base/src/sys/unix/time.rs
new file mode 100644
index 0000000..57ca780
--- /dev/null
+++ b/base/src/sys/unix/time.rs
@@ -0,0 +1,18 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::time::Duration;
+
+use libc::timespec;
+
+/// Return a timespec filed with the specified Duration `duration`.
+#[allow(clippy::useless_conversion)]
+pub fn duration_to_timespec(duration: Duration) -> timespec {
+ // nsec always fits in i32 because subsec_nanos is defined to be less than one billion.
+ let nsec = duration.subsec_nanos() as i32;
+ timespec {
+ tv_sec: duration.as_secs() as libc::time_t,
+ tv_nsec: nsec.into(),
+ }
+}
diff --git a/base/src/sys/windows/tube.rs b/base/src/sys/windows/tube.rs
index ed61c2f..b653508 100644
--- a/base/src/sys/windows/tube.rs
+++ b/base/src/sys/windows/tube.rs
@@ -384,6 +384,18 @@
}
}
+impl CloseNotifier for SendTube {
+ fn get_close_notifier(&self) -> &dyn AsRawDescriptor {
+ self.0.get_close_notifier()
+ }
+}
+
+impl CloseNotifier for RecvTube {
+ fn get_close_notifier(&self) -> &dyn AsRawDescriptor {
+ self.0.get_close_notifier()
+ }
+}
+
/// A request to duplicate a handle to a target process.
#[derive(Serialize, Deserialize, Debug)]
pub struct DuplicateHandleRequest {
diff --git a/base/src/test_utils.rs b/base/src/test_utils.rs
index 248822a..cdd596d 100644
--- a/base/src/test_utils.rs
+++ b/base/src/test_utils.rs
@@ -43,3 +43,25 @@
panic!("This test need to be run as root or with passwordless sudo.");
}
}
+
+/// Assert repeatedly until it's true
+///
+/// Runs the provided `$cond` closure until it returns true. If it does not return true after
+/// `$tries` times, it will panic.
+/// There is no delay between polls, but the `$cond` can sleep as needed.
+#[macro_export]
+macro_rules! poll_assert {
+ ($tries: tt, $cond:expr) => {
+ $crate::test_utils::poll_assert_impl(stringify!($cond), $tries, $cond)
+ };
+}
+
+/// Implementation of [poll_assert]
+pub fn poll_assert_impl(msg: &'static str, tries: usize, poll_fn: impl Fn() -> bool) {
+ for _ in 0..tries {
+ if poll_fn() {
+ return;
+ }
+ }
+ panic!("Still failing after {} tries: {}", tries, msg);
+}
diff --git a/bit_field/Android.bp b/bit_field/Android.bp
index d6f35fb..3873a8f 100644
--- a/bit_field/Android.bp
+++ b/bit_field/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/bit_field/bit_field_derive/Android.bp b/bit_field/bit_field_derive/Android.bp
index 34c8592..fb5b59d 100644
--- a/bit_field/bit_field_derive/Android.bp
+++ b/bit_field/bit_field_derive/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/broker_ipc/Android.bp b/broker_ipc/Android.bp
index d78d468..8540908 100644
--- a/broker_ipc/Android.bp
+++ b/broker_ipc/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/broker_ipc/src/lib.rs b/broker_ipc/src/lib.rs
index 4f39661..71a7313 100644
--- a/broker_ipc/src/lib.rs
+++ b/broker_ipc/src/lib.rs
@@ -17,7 +17,7 @@
use base::FromRawDescriptor;
use base::IntoRawDescriptor;
use base::SafeDescriptor;
-use base::Tube;
+use base::SendTube;
#[cfg(feature = "process-invariants")]
pub use broker_ipc_product::init_broker_process_invariants;
use broker_ipc_product::init_child_crash_reporting;
@@ -33,7 +33,7 @@
pub struct CommonChildStartupArgs {
log_args: LogArgs,
syslog_file: Option<SafeDescriptor>,
- metrics_tube: Option<Tube>,
+ metrics_tube: Option<SendTube>,
product_attrs: ProductAttributes,
}
@@ -44,7 +44,7 @@
syslog_path: Option<PathBuf>,
#[cfg(feature = "crash-report")] _crash_attrs: crash_report::CrashReportAttributes,
#[cfg(feature = "process-invariants")] _process_invariants: EmulatorProcessInvariants,
- metrics_tube: Option<Tube>,
+ metrics_tube: Option<SendTube>,
) -> anyhow::Result<Self> {
Ok(Self {
log_args: log_args.clone(),
diff --git a/cargo2android_defaults.bp b/cargo2android_defaults.bp
index 682184e..80b397c 100644
--- a/cargo2android_defaults.bp
+++ b/cargo2android_defaults.bp
@@ -24,7 +24,6 @@
},
},
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
defaults_visibility: [
diff --git a/cargo_embargo.json b/cargo_embargo.json
index fa0d0a4..777ce8b 100644
--- a/cargo_embargo.json
+++ b/cargo_embargo.json
@@ -1,6 +1,12 @@
{
"tests": true,
"features": [
+ "android_display",
+ // "android_display_stub" is enabled only to allow the cargo build to succeed as
+ // part of cargo_embargo. Patch files are used to remove it from the
+ // generated Android.bp files (so that the real display backend implementation is
+ // used).
+ "android_display_stub",
"android-sparse",
"audio",
"balloon",
@@ -185,9 +191,6 @@
"kvm_sys": {
"no_presubmit": true
},
- "metrics": {
- "copy_out": true
- },
"net_util": {
"no_presubmit": true
},
diff --git a/common/audio_streams/Android.bp b/common/audio_streams/Android.bp
index 1eed2c7..9d535c0 100644
--- a/common/audio_streams/Android.bp
+++ b/common/audio_streams/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/common/balloon_control/Android.bp b/common/balloon_control/Android.bp
index e6ddd56..561c23b 100644
--- a/common/balloon_control/Android.bp
+++ b/common/balloon_control/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/common/data_model/Android.bp b/common/data_model/Android.bp
index 78d8ba2..461e626 100644
--- a/common/data_model/Android.bp
+++ b/common/data_model/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/common/data_model/src/lib.rs b/common/data_model/src/lib.rs
index 29720ee..05c2e84 100644
--- a/common/data_model/src/lib.rs
+++ b/common/data_model/src/lib.rs
@@ -2,34 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-use std::io;
-use std::mem::size_of;
-use std::mem::MaybeUninit;
-use std::slice::from_raw_parts_mut;
-
-use zerocopy::AsBytes;
-use zerocopy::FromBytes;
-use zerocopy::FromZeroes;
-use zerocopy::Ref;
-
-pub fn zerocopy_from_reader<R: io::Read, T: AsBytes + FromBytes + FromZeroes>(
- mut read: R,
-) -> io::Result<T> {
- let mut out = T::new_zeroed();
- read.read_exact(out.as_bytes_mut())?;
- Ok(out)
-}
-
-pub fn zerocopy_from_mut_slice<T: FromBytes + AsBytes>(data: &mut [u8]) -> Option<&mut T> {
- let lv: Ref<&mut [u8], T> = Ref::new(data)?;
- Some(lv.into_mut())
-}
-
-pub fn zerocopy_from_slice<T: FromBytes>(data: &[u8]) -> Option<&T> {
- let lv: Ref<&[u8], T> = Ref::new(data)?;
- Some(lv.into_ref())
-}
-
pub mod endian;
pub use crate::endian::*;
diff --git a/common/sync/Android.bp b/common/sync/Android.bp
index d683033..af6260c 100644
--- a/common/sync/Android.bp
+++ b/common/sync/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/cros_async/Android.bp b/cros_async/Android.bp
index 6acddaf..a744469 100644
--- a/cros_async/Android.bp
+++ b/cros_async/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/cros_async/Cargo.toml b/cros_async/Cargo.toml
index 3db4888..fb25c40 100644
--- a/cros_async/Cargo.toml
+++ b/cros_async/Cargo.toml
@@ -4,6 +4,9 @@
authors = ["The ChromiumOS Authors"]
edition = "2021"
+[features]
+tokio = ["dep:tokio"]
+
[dependencies]
async-trait = "0.1.36"
async-task = "4"
@@ -23,6 +26,7 @@
serde = "*"
serde_keyvalue = { path = "../serde_keyvalue", features = ["argh_derive"] } # provided by ebuild
static_assertions = "1.1"
+tokio = { version = "1.29.1", optional = true, features = ["net", "rt-multi-thread"] }
[target.'cfg(any(target_os = "android", target_os = "linux"))'.dependencies]
io_uring = { path = "../io_uring" } # provided by ebuild
diff --git a/cros_async/src/common_executor.rs b/cros_async/src/common_executor.rs
index 862f466..afaee4b 100644
--- a/cros_async/src/common_executor.rs
+++ b/cros_async/src/common_executor.rs
@@ -27,7 +27,10 @@
use crate::AsyncResult;
use crate::BlockingPool;
use crate::DetachedTasks;
+use crate::ExecutorTrait;
+use crate::IntoAsync;
use crate::IoSource;
+use crate::TaskHandle;
/// Abstraction for IO backends.
pub trait Reactor: Send + Sync + Sized {
@@ -62,6 +65,8 @@
ex: &Arc<RawExecutor<Self>>,
f: F,
) -> AsyncResult<IoSource<F>>;
+
+ fn wrap_task_handle<R>(task: RawTaskHandle<Self, R>) -> TaskHandle<R>;
}
// Indicates the executor is either within or about to make a `Reactor::wait_for_work` call. When a
@@ -105,59 +110,7 @@
}
}
- pub fn new_source<F: AsRawDescriptor>(self: &Arc<Self>, f: F) -> AsyncResult<IoSource<F>> {
- self.reactor.new_source(self, f)
- }
-
- pub fn spawn<F>(self: &Arc<Self>, f: F) -> TaskHandle<Re, F::Output>
- where
- F: Future + Send + 'static,
- F::Output: Send + 'static,
- {
- let raw = Arc::downgrade(self);
- let schedule = move |runnable| {
- if let Some(r) = raw.upgrade() {
- r.queue.push_back(runnable);
- r.wake();
- }
- };
- let (runnable, task) = async_task::spawn(f, schedule);
- runnable.schedule();
- TaskHandle {
- task,
- raw: Arc::downgrade(self),
- }
- }
-
- pub fn spawn_local<F>(self: &Arc<Self>, f: F) -> TaskHandle<Re, F::Output>
- where
- F: Future + 'static,
- F::Output: 'static,
- {
- let raw = Arc::downgrade(self);
- let schedule = move |runnable| {
- if let Some(r) = raw.upgrade() {
- r.queue.push_back(runnable);
- r.wake();
- }
- };
- let (runnable, task) = async_task::spawn_local(f, schedule);
- runnable.schedule();
- TaskHandle {
- task,
- raw: Arc::downgrade(self),
- }
- }
-
- pub fn spawn_blocking<F, R>(self: &Arc<Self>, f: F) -> TaskHandle<Re, R>
- where
- F: FnOnce() -> R + Send + 'static,
- R: Send + 'static,
- {
- self.spawn(self.blocking_pool.spawn(f))
- }
-
- fn run<F: Future>(&self, cx: &mut Context, done: F) -> AsyncResult<F::Output> {
+ fn run_internal<F: Future>(&self, cx: &mut Context, done: F) -> AsyncResult<F::Output> {
self.reactor.on_thread_start();
pin_mut!(done);
@@ -193,12 +146,66 @@
.map_err(AsyncError::Io)?;
}
}
+}
- pub fn run_until<F: Future>(self: &Arc<Self>, f: F) -> AsyncResult<F::Output> {
+impl<Re: Reactor + 'static> ExecutorTrait for Arc<RawExecutor<Re>> {
+ fn async_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>> {
+ self.reactor.new_source(self, f)
+ }
+
+ fn spawn<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ let raw = Arc::downgrade(self);
+ let schedule = move |runnable| {
+ if let Some(r) = raw.upgrade() {
+ r.queue.push_back(runnable);
+ r.wake();
+ }
+ };
+ let (runnable, task) = async_task::spawn(f, schedule);
+ runnable.schedule();
+ Re::wrap_task_handle(RawTaskHandle {
+ task,
+ raw: Arc::downgrade(self),
+ })
+ }
+
+ fn spawn_local<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + 'static,
+ F::Output: 'static,
+ {
+ let raw = Arc::downgrade(self);
+ let schedule = move |runnable| {
+ if let Some(r) = raw.upgrade() {
+ r.queue.push_back(runnable);
+ r.wake();
+ }
+ };
+ let (runnable, task) = async_task::spawn_local(f, schedule);
+ runnable.schedule();
+ Re::wrap_task_handle(RawTaskHandle {
+ task,
+ raw: Arc::downgrade(self),
+ })
+ }
+
+ fn spawn_blocking<F, R>(&self, f: F) -> TaskHandle<R>
+ where
+ F: FnOnce() -> R + Send + 'static,
+ R: Send + 'static,
+ {
+ self.spawn(self.blocking_pool.spawn(f))
+ }
+
+ fn run_until<F: Future>(&self, f: F) -> AsyncResult<F::Output> {
let waker = super::waker::new_waker(Arc::downgrade(self));
let mut ctx = Context::from_waker(&waker);
- self.run(&mut ctx, f)
+ self.run_internal(&mut ctx, f)
}
}
@@ -222,18 +229,18 @@
let waker = noop_waker();
let mut cx = Context::from_waker(&waker);
- if let Err(e) = self.run(&mut cx, final_future) {
+ if let Err(e) = self.run_internal(&mut cx, final_future) {
warn!("Failed to drive RawExecutor to completion: {}", e);
}
}
}
-pub struct TaskHandle<Re: Reactor + 'static, R> {
+pub struct RawTaskHandle<Re: Reactor + 'static, R> {
task: Task<R>,
raw: Weak<RawExecutor<Re>>,
}
-impl<Re: Reactor, R: Send + 'static> TaskHandle<Re, R> {
+impl<Re: Reactor, R: Send + 'static> RawTaskHandle<Re, R> {
pub fn detach(self) {
if let Some(raw) = self.raw.upgrade() {
raw.detached_tasks.lock().push(self.task);
@@ -245,7 +252,7 @@
}
}
-impl<Re: Reactor, R: 'static> Future for TaskHandle<Re, R> {
+impl<Re: Reactor, R: 'static> Future for RawTaskHandle<Re, R> {
type Output = R;
fn poll(
diff --git a/cros_async/src/executor.rs b/cros_async/src/executor.rs
index 6246216..35e4095 100644
--- a/cros_async/src/executor.rs
+++ b/cros_async/src/executor.rs
@@ -1,28 +1,46 @@
-// Copyright 2020 The ChromiumOS Authors
+// Copyright 2024 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-use crate::sys::ExecutorKindSys;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::Arc;
-#[derive(
- Clone,
- Copy,
- Debug,
- PartialEq,
- Eq,
- serde::Serialize,
- serde::Deserialize,
- serde_keyvalue::FromKeyValues,
-)]
-#[serde(deny_unknown_fields, rename_all = "kebab-case", untagged)]
-pub enum ExecutorKind {
- SysVariants(ExecutorKindSys),
+#[cfg(any(target_os = "android", target_os = "linux"))]
+use base::warn;
+#[cfg(any(target_os = "android", target_os = "linux"))]
+use base::AsRawDescriptors;
+#[cfg(any(target_os = "android", target_os = "linux"))]
+use base::RawDescriptor;
+use once_cell::sync::OnceCell;
+use serde::Deserialize;
+use serde_keyvalue::argh::FromArgValue;
+use serde_keyvalue::ErrorKind;
+use serde_keyvalue::KeyValueDeserializer;
+
+use crate::common_executor;
+use crate::common_executor::RawExecutor;
+#[cfg(any(target_os = "android", target_os = "linux"))]
+use crate::sys::linux;
+#[cfg(windows)]
+use crate::sys::windows;
+use crate::sys::ExecutorKindSys;
+use crate::AsyncResult;
+use crate::IntoAsync;
+use crate::IoSource;
+
+cfg_if::cfg_if! {
+ if #[cfg(feature = "tokio")] {
+ use crate::tokio_executor::TokioExecutor;
+ use crate::tokio_executor::TokioTaskHandle;
+ }
}
-impl Default for ExecutorKind {
- fn default() -> ExecutorKind {
- ExecutorKind::SysVariants(ExecutorKindSys::default())
- }
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum ExecutorKind {
+ SysVariants(ExecutorKindSys),
+ #[cfg(feature = "tokio")]
+ Tokio,
}
impl From<ExecutorKindSys> for ExecutorKind {
@@ -31,11 +49,618 @@
}
}
-// TODO: schuffelen - Remove after adding a platform-independent Executor
-impl From<ExecutorKind> for ExecutorKindSys {
- fn from(e: ExecutorKind) -> ExecutorKindSys {
- match e {
- ExecutorKind::SysVariants(inner) => inner,
+/// If set, [`ExecutorKind::default()`] returns the value of `DEFAULT_EXECUTOR_KIND`.
+/// If not set, [`ExecutorKind::default()`] returns a statically-chosen default value, and
+/// [`ExecutorKind::default()`] initializes `DEFAULT_EXECUTOR_KIND` with that value.
+static DEFAULT_EXECUTOR_KIND: OnceCell<ExecutorKind> = OnceCell::new();
+
+impl Default for ExecutorKind {
+ fn default() -> Self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ let default_fn = || ExecutorKindSys::Fd.into();
+ #[cfg(windows)]
+ let default_fn = || ExecutorKindSys::Handle.into();
+ *DEFAULT_EXECUTOR_KIND.get_or_init(default_fn)
+ }
+}
+
+/// The error type for [`Executor::set_default_executor_kind()`].
+#[derive(thiserror::Error, Debug)]
+pub enum SetDefaultExecutorKindError {
+ /// The default executor kind is set more than once.
+ #[error("The default executor kind is already set to {0:?}")]
+ SetMoreThanOnce(ExecutorKind),
+
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ /// io_uring is unavailable. The reason might be the lack of the kernel support,
+ /// but is not limited to that.
+ #[error("io_uring is unavailable: {0}")]
+ UringUnavailable(linux::uring_executor::Error),
+}
+
+impl FromArgValue for ExecutorKind {
+ fn from_arg_value(value: &str) -> std::result::Result<ExecutorKind, String> {
+ // `from_arg_value` returns a `String` as error, but our deserializer API defines its own
+ // error type. Perform parsing from a closure so we can easily map returned errors.
+ let builder = move || {
+ let mut des = KeyValueDeserializer::from(value);
+
+ let kind: ExecutorKind = match (des.parse_identifier()?, des.next_char()) {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ ("epoll", None) => ExecutorKindSys::Fd.into(),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ ("uring", None) => ExecutorKindSys::Uring.into(),
+ #[cfg(windows)]
+ ("handle", None) => ExecutorKindSys::Handle.into(),
+ #[cfg(windows)]
+ ("overlapped", None) => ExecutorKindSys::Overlapped { concurrency: None }.into(),
+ #[cfg(windows)]
+ ("overlapped", Some(',')) => {
+ if des.parse_identifier()? != "concurrency" {
+ let kind = ErrorKind::SerdeError("expected `concurrency`".to_string());
+ return Err(des.error_here(kind));
+ }
+ if des.next_char() != Some('=') {
+ return Err(des.error_here(ErrorKind::ExpectedEqual));
+ }
+ let concurrency = des.parse_number()?;
+ ExecutorKindSys::Overlapped {
+ concurrency: Some(concurrency),
+ }
+ .into()
+ }
+ #[cfg(feature = "tokio")]
+ ("tokio", None) => ExecutorKind::Tokio,
+ (_identifier, _next) => {
+ let kind = ErrorKind::SerdeError("unexpected kind".to_string());
+ return Err(des.error_here(kind));
+ }
+ };
+ des.finish()?;
+ Ok(kind)
+ };
+
+ builder().map_err(|e| e.to_string())
+ }
+}
+
+impl serde::Serialize for ExecutorKind {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ match self {
+ ExecutorKind::SysVariants(sv) => sv.serialize(serializer),
+ #[cfg(feature = "tokio")]
+ ExecutorKind::Tokio => "tokio".serialize(serializer),
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for ExecutorKind {
+ fn deserialize<D>(deserializer: D) -> Result<ExecutorKind, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ base::error!("ExecutorKind::deserialize");
+ let string = String::deserialize(deserializer)?;
+ ExecutorKind::from_arg_value(&string).map_err(serde::de::Error::custom)
+ }
+}
+
+/// Reference to a task managed by the executor.
+///
+/// Dropping a `TaskHandle` attempts to cancel the associated task. Call `detach` to allow it to
+/// continue running the background.
+///
+/// `await`ing the `TaskHandle` waits for the task to finish and yields its result.
+pub enum TaskHandle<R> {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Fd(common_executor::RawTaskHandle<linux::EpollReactor, R>),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Uring(common_executor::RawTaskHandle<linux::UringReactor, R>),
+ #[cfg(windows)]
+ Handle(common_executor::RawTaskHandle<windows::HandleReactor, R>),
+ #[cfg(feature = "tokio")]
+ Tokio(TokioTaskHandle<R>),
+}
+
+impl<R: Send + 'static> TaskHandle<R> {
+ pub fn detach(self) {
+ match self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ TaskHandle::Fd(f) => f.detach(),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ TaskHandle::Uring(u) => u.detach(),
+ #[cfg(windows)]
+ TaskHandle::Handle(h) => h.detach(),
+ #[cfg(feature = "tokio")]
+ TaskHandle::Tokio(t) => t.detach(),
+ }
+ }
+
+ // Cancel the task and wait for it to stop. Returns the result of the task if it was already
+ // finished.
+ pub async fn cancel(self) -> Option<R> {
+ match self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ TaskHandle::Fd(f) => f.cancel().await,
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ TaskHandle::Uring(u) => u.cancel().await,
+ #[cfg(windows)]
+ TaskHandle::Handle(h) => h.cancel().await,
+ #[cfg(feature = "tokio")]
+ TaskHandle::Tokio(t) => t.cancel().await,
+ }
+ }
+}
+
+impl<R: 'static> Future for TaskHandle<R> {
+ type Output = R;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context) -> std::task::Poll<Self::Output> {
+ match self.get_mut() {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ TaskHandle::Fd(f) => Pin::new(f).poll(cx),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ TaskHandle::Uring(u) => Pin::new(u).poll(cx),
+ #[cfg(windows)]
+ TaskHandle::Handle(h) => Pin::new(h).poll(cx),
+ #[cfg(feature = "tokio")]
+ TaskHandle::Tokio(t) => Pin::new(t).poll(cx),
+ }
+ }
+}
+
+pub(crate) trait ExecutorTrait {
+ fn async_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>>;
+
+ fn spawn<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static;
+
+ fn spawn_blocking<F, R>(&self, f: F) -> TaskHandle<R>
+ where
+ F: FnOnce() -> R + Send + 'static,
+ R: Send + 'static;
+
+ fn spawn_local<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + 'static,
+ F::Output: 'static;
+
+ fn run_until<F: Future>(&self, f: F) -> AsyncResult<F::Output>;
+}
+
+/// An executor for scheduling tasks that poll futures to completion.
+///
+/// All asynchronous operations must run within an executor, which is capable of spawning futures as
+/// tasks. This executor also provides a mechanism for performing asynchronous I/O operations.
+///
+/// The returned type is a cheap, clonable handle to the underlying executor. Cloning it will only
+/// create a new reference, not a new executor.
+///
+/// Note that language limitations (trait objects can have <=1 non auto trait) require this to be
+/// represented on the POSIX side as an enum, rather than a trait. This leads to some code &
+/// interface duplication, but as far as we understand that is unavoidable.
+///
+/// See <https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2571401/2..6/cros_async/src/executor.rs#b75>
+/// for further details.
+///
+/// # Examples
+///
+/// Concurrently wait for multiple files to become readable/writable and then read/write the data.
+///
+/// ```
+/// use std::cmp::min;
+/// use std::error::Error;
+/// use std::fs::{File, OpenOptions};
+///
+/// use cros_async::{AsyncResult, Executor, IoSource, complete3};
+/// const CHUNK_SIZE: usize = 32;
+///
+/// // Write all bytes from `data` to `f`.
+/// async fn write_file(f: &IoSource<File>, mut data: Vec<u8>) -> AsyncResult<()> {
+/// while data.len() > 0 {
+/// let (count, mut buf) = f.write_from_vec(None, data).await?;
+///
+/// data = buf.split_off(count);
+/// }
+///
+/// Ok(())
+/// }
+///
+/// // Transfer `len` bytes of data from `from` to `to`.
+/// async fn transfer_data(
+/// from: IoSource<File>,
+/// to: IoSource<File>,
+/// len: usize,
+/// ) -> AsyncResult<usize> {
+/// let mut rem = len;
+///
+/// while rem > 0 {
+/// let buf = vec![0u8; min(rem, CHUNK_SIZE)];
+/// let (count, mut data) = from.read_to_vec(None, buf).await?;
+///
+/// if count == 0 {
+/// // End of file. Return the number of bytes transferred.
+/// return Ok(len - rem);
+/// }
+///
+/// data.truncate(count);
+/// write_file(&to, data).await?;
+///
+/// rem = rem.saturating_sub(count);
+/// }
+///
+/// Ok(len)
+/// }
+///
+/// #[cfg(any(target_os = "android", target_os = "linux"))]
+/// # fn do_it() -> Result<(), Box<dyn Error>> {
+/// let ex = Executor::new()?;
+///
+/// let (rx, tx) = base::linux::pipe()?;
+/// let zero = File::open("/dev/zero")?;
+/// let zero_bytes = CHUNK_SIZE * 7;
+/// let zero_to_pipe = transfer_data(
+/// ex.async_from(zero)?,
+/// ex.async_from(tx.try_clone()?)?,
+/// zero_bytes,
+/// );
+///
+/// let rand = File::open("/dev/urandom")?;
+/// let rand_bytes = CHUNK_SIZE * 19;
+/// let rand_to_pipe = transfer_data(ex.async_from(rand)?, ex.async_from(tx)?, rand_bytes);
+///
+/// let null = OpenOptions::new().write(true).open("/dev/null")?;
+/// let null_bytes = zero_bytes + rand_bytes;
+/// let pipe_to_null = transfer_data(ex.async_from(rx)?, ex.async_from(null)?, null_bytes);
+///
+/// ex.run_until(complete3(
+/// async { assert_eq!(pipe_to_null.await.unwrap(), null_bytes) },
+/// async { assert_eq!(zero_to_pipe.await.unwrap(), zero_bytes) },
+/// async { assert_eq!(rand_to_pipe.await.unwrap(), rand_bytes) },
+/// ))?;
+///
+/// # Ok(())
+/// # }
+/// #[cfg(any(target_os = "android", target_os = "linux"))]
+/// # do_it().unwrap();
+/// ```
+#[derive(Clone)]
+pub enum Executor {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Fd(Arc<RawExecutor<linux::EpollReactor>>),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Uring(Arc<RawExecutor<linux::UringReactor>>),
+ #[cfg(windows)]
+ Handle(Arc<RawExecutor<windows::HandleReactor>>),
+ #[cfg(windows)]
+ Overlapped(Arc<RawExecutor<windows::HandleReactor>>),
+ #[cfg(feature = "tokio")]
+ Tokio(TokioExecutor),
+}
+
+impl Executor {
+ /// Create a new `Executor`.
+ pub fn new() -> AsyncResult<Self> {
+ Executor::with_executor_kind(ExecutorKind::default())
+ }
+
+ /// Create a new `Executor` of the given `ExecutorKind`.
+ pub fn with_executor_kind(kind: ExecutorKind) -> AsyncResult<Self> {
+ Ok(match kind {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ ExecutorKind::SysVariants(ExecutorKindSys::Fd) => Executor::Fd(RawExecutor::new()?),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ ExecutorKind::SysVariants(ExecutorKindSys::Uring) => {
+ Executor::Uring(RawExecutor::new()?)
+ }
+ #[cfg(windows)]
+ ExecutorKind::SysVariants(ExecutorKindSys::Handle) => {
+ Executor::Handle(RawExecutor::new()?)
+ }
+ #[cfg(windows)]
+ ExecutorKind::SysVariants(ExecutorKindSys::Overlapped { concurrency }) => {
+ let reactor = match concurrency {
+ Some(concurrency) => windows::HandleReactor::new_with(concurrency)?,
+ None => windows::HandleReactor::new()?,
+ };
+ Executor::Overlapped(RawExecutor::new_with(reactor)?)
+ }
+ #[cfg(feature = "tokio")]
+ ExecutorKind::Tokio => Executor::Tokio(TokioExecutor::new()?),
+ })
+ }
+
+ /// Set the default ExecutorKind for [`Self::new()`]. This call is effective only once.
+ pub fn set_default_executor_kind(
+ executor_kind: ExecutorKind,
+ ) -> Result<(), SetDefaultExecutorKindError> {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ if executor_kind == ExecutorKind::SysVariants(ExecutorKindSys::Uring) {
+ linux::uring_executor::check_uring_availability()
+ .map_err(SetDefaultExecutorKindError::UringUnavailable)?;
+ if !crate::is_uring_stable() {
+ warn!(
+ "Enabling io_uring executor on the kernel version where io_uring is unstable"
+ );
+ }
+ }
+ DEFAULT_EXECUTOR_KIND.set(executor_kind).map_err(|_|
+ // `expect` succeeds since this closure runs only when DEFAULT_EXECUTOR_KIND is set.
+ SetDefaultExecutorKindError::SetMoreThanOnce(
+ *DEFAULT_EXECUTOR_KIND
+ .get()
+ .expect("Failed to get DEFAULT_EXECUTOR_KIND"),
+ ))
+ }
+
+ /// Create a new `IoSource<F>` associated with `self`. Callers may then use the returned
+ /// `IoSource` to directly start async operations without needing a separate reference to the
+ /// executor.
+ pub fn async_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>> {
+ match self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Fd(ex) => ex.async_from(f),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Uring(ex) => ex.async_from(f),
+ #[cfg(windows)]
+ Executor::Handle(ex) => ex.async_from(f),
+ #[cfg(windows)]
+ Executor::Overlapped(ex) => ex.async_from(f),
+ #[cfg(feature = "tokio")]
+ Executor::Tokio(ex) => ex.async_from(f),
+ }
+ }
+
+ /// Create a new overlapped `IoSource<F>` associated with `self`. Callers may then use the
+ /// If the executor is not overlapped, then Handle source is returned.
+ /// returned `IoSource` to directly start async operations without needing a separate reference
+ /// to the executor.
+ #[cfg(windows)]
+ pub fn async_overlapped_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>> {
+ match self {
+ Executor::Overlapped(ex) => Ok(IoSource::Overlapped(windows::OverlappedSource::new(
+ f, ex, false,
+ )?)),
+ _ => self.async_from(f),
+ }
+ }
+
+ /// Spawn a new future for this executor to run to completion. Callers may use the returned
+ /// `TaskHandle` to await on the result of `f`. Dropping the returned `TaskHandle` will cancel
+ /// `f`, preventing it from being polled again. To drop a `TaskHandle` without canceling the
+ /// future associated with it use `TaskHandle::detach`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use cros_async::AsyncResult;
+ /// # fn example_spawn() -> AsyncResult<()> {
+ /// # use std::thread;
+ ///
+ /// # use cros_async::Executor;
+ /// use futures::executor::block_on;
+ ///
+ /// # let ex = Executor::new()?;
+ ///
+ /// # // Spawn a thread that runs the executor.
+ /// # let ex2 = ex.clone();
+ /// # thread::spawn(move || ex2.run());
+ ///
+ /// let task = ex.spawn(async { 7 + 13 });
+ ///
+ /// let result = block_on(task);
+ /// assert_eq!(result, 20);
+ /// # Ok(())
+ /// # }
+ ///
+ /// # example_spawn().unwrap();
+ /// ```
+ pub fn spawn<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ match self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Fd(ex) => ex.spawn(f),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Uring(ex) => ex.spawn(f),
+ #[cfg(windows)]
+ Executor::Handle(ex) => ex.spawn(f),
+ #[cfg(windows)]
+ Executor::Overlapped(ex) => ex.spawn(f),
+ #[cfg(feature = "tokio")]
+ Executor::Tokio(ex) => ex.spawn(f),
+ }
+ }
+
+ /// Spawn a thread-local task for this executor to drive to completion. Like `spawn` but without
+ /// requiring `Send` on `F` or `F::Output`. This method should only be called from the same
+ /// thread where `run()` or `run_until()` is called.
+ ///
+ /// # Panics
+ ///
+ /// `Executor::run` and `Executor::run_util` will panic if they try to poll a future that was
+ /// added by calling `spawn_local` from a different thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use cros_async::AsyncResult;
+ /// # fn example_spawn_local() -> AsyncResult<()> {
+ /// # use cros_async::Executor;
+ ///
+ /// # let ex = Executor::new()?;
+ ///
+ /// let task = ex.spawn_local(async { 7 + 13 });
+ ///
+ /// let result = ex.run_until(task)?;
+ /// assert_eq!(result, 20);
+ /// Ok(())
+ /// # }
+ ///
+ /// # example_spawn_local().unwrap();
+ /// ```
+ pub fn spawn_local<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + 'static,
+ F::Output: 'static,
+ {
+ match self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Fd(ex) => ex.spawn_local(f),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Uring(ex) => ex.spawn_local(f),
+ #[cfg(windows)]
+ Executor::Handle(ex) => ex.spawn_local(f),
+ #[cfg(windows)]
+ Executor::Overlapped(ex) => ex.spawn_local(f),
+ #[cfg(feature = "tokio")]
+ Executor::Tokio(ex) => ex.spawn_local(f),
+ }
+ }
+
+ /// Run the provided closure on a dedicated thread where blocking is allowed.
+ ///
+ /// Callers may `await` on the returned `TaskHandle` to wait for the result of `f`. Dropping
+ /// the returned `TaskHandle` may not cancel the operation if it was already started on a
+ /// worker thread.
+ ///
+ /// # Panics
+ ///
+ /// `await`ing the `TaskHandle` after the `Executor` is dropped will panic if the work was not
+ /// already completed.
+ ///
+ /// # Examples
+ ///
+ /// ```edition2018
+ /// # use cros_async::Executor;
+ ///
+ /// # async fn do_it(ex: &Executor) {
+ /// let res = ex.spawn_blocking(move || {
+ /// // Do some CPU-intensive or blocking work here.
+ ///
+ /// 42
+ /// }).await;
+ ///
+ /// assert_eq!(res, 42);
+ /// # }
+ ///
+ /// # let ex = Executor::new().unwrap();
+ /// # ex.run_until(do_it(&ex)).unwrap();
+ /// ```
+ pub fn spawn_blocking<F, R>(&self, f: F) -> TaskHandle<R>
+ where
+ F: FnOnce() -> R + Send + 'static,
+ R: Send + 'static,
+ {
+ match self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Fd(ex) => ex.spawn_blocking(f),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Uring(ex) => ex.spawn_blocking(f),
+ #[cfg(windows)]
+ Executor::Handle(ex) => ex.spawn_blocking(f),
+ #[cfg(windows)]
+ Executor::Overlapped(ex) => ex.spawn_blocking(f),
+ #[cfg(feature = "tokio")]
+ Executor::Tokio(ex) => ex.spawn_blocking(f),
+ }
+ }
+
+ /// Run the executor indefinitely, driving all spawned futures to completion. This method will
+ /// block the current thread and only return in the case of an error.
+ ///
+ /// # Panics
+ ///
+ /// Once this method has been called on a thread, it may only be called on that thread from that
+ /// point on. Attempting to call it from another thread will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use cros_async::AsyncResult;
+ /// # fn example_run() -> AsyncResult<()> {
+ /// use std::thread;
+ ///
+ /// use cros_async::Executor;
+ /// use futures::executor::block_on;
+ ///
+ /// let ex = Executor::new()?;
+ ///
+ /// // Spawn a thread that runs the executor.
+ /// let ex2 = ex.clone();
+ /// thread::spawn(move || ex2.run());
+ ///
+ /// let task = ex.spawn(async { 7 + 13 });
+ ///
+ /// let result = block_on(task);
+ /// assert_eq!(result, 20);
+ /// # Ok(())
+ /// # }
+ ///
+ /// # example_run().unwrap();
+ /// ```
+ pub fn run(&self) -> AsyncResult<()> {
+ self.run_until(std::future::pending())
+ }
+
+ /// Drive all futures spawned in this executor until `f` completes. This method will block the
+ /// current thread only until `f` is complete and there may still be unfinished futures in the
+ /// executor.
+ ///
+ /// # Panics
+ ///
+ /// Once this method has been called on a thread, from then onwards it may only be called on
+ /// that thread. Attempting to call it from another thread will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use cros_async::AsyncResult;
+ /// # fn example_run_until() -> AsyncResult<()> {
+ /// use cros_async::Executor;
+ ///
+ /// let ex = Executor::new()?;
+ ///
+ /// let task = ex.spawn_local(async { 7 + 13 });
+ ///
+ /// let result = ex.run_until(task)?;
+ /// assert_eq!(result, 20);
+ /// # Ok(())
+ /// # }
+ ///
+ /// # example_run_until().unwrap();
+ /// ```
+ pub fn run_until<F: Future>(&self, f: F) -> AsyncResult<F::Output> {
+ match self {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Fd(ex) => ex.run_until(f),
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ Executor::Uring(ex) => ex.run_until(f),
+ #[cfg(windows)]
+ Executor::Handle(ex) => ex.run_until(f),
+ #[cfg(windows)]
+ Executor::Overlapped(ex) => ex.run_until(f),
+ #[cfg(feature = "tokio")]
+ Executor::Tokio(ex) => ex.run_until(f),
+ }
+ }
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+impl AsRawDescriptors for Executor {
+ fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
+ match self {
+ Executor::Fd(ex) => ex.as_raw_descriptors(),
+ Executor::Uring(ex) => ex.as_raw_descriptors(),
+ #[cfg(feature = "tokio")]
+ Executor::Tokio(ex) => ex.as_raw_descriptors(),
}
}
}
diff --git a/cros_async/src/io_source.rs b/cros_async/src/io_source.rs
index c61a337..b46b2aa 100644
--- a/cros_async/src/io_source.rs
+++ b/cros_async/src/io_source.rs
@@ -10,6 +10,8 @@
use crate::sys::linux::PollSource;
#[cfg(any(target_os = "android", target_os = "linux"))]
use crate::sys::linux::UringSource;
+#[cfg(feature = "tokio")]
+use crate::sys::platform::tokio_source::TokioSource;
#[cfg(windows)]
use crate::sys::windows::HandleSource;
#[cfg(windows)]
@@ -29,6 +31,8 @@
Handle(HandleSource<F>),
#[cfg(windows)]
Overlapped(OverlappedSource<F>),
+ #[cfg(feature = "tokio")]
+ Tokio(TokioSource<F>),
}
static_assertions::assert_impl_all!(IoSource<std::fs::File>: Send, Sync);
@@ -47,6 +51,8 @@
IoSource::Handle(x) => HandleSource::$method(x, $($args),*).await,
#[cfg(windows)]
IoSource::Overlapped(x) => OverlappedSource::$method(x, $($args),*).await,
+ #[cfg(feature = "tokio")]
+ IoSource::Tokio(x) => TokioSource::$method(x, $($args),*).await,
}
};
}
@@ -65,6 +71,8 @@
IoSource::Handle(x) => HandleSource::$method(x, $($args),*),
#[cfg(windows)]
IoSource::Overlapped(x) => OverlappedSource::$method(x, $($args),*),
+ #[cfg(feature = "tokio")]
+ IoSource::Tokio(x) => TokioSource::$method(x, $($args),*),
}
};
}
@@ -214,7 +222,7 @@
}
let f = tmpfile_with_contents("data".as_bytes());
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f).unwrap();
ex.run_until(go(source)).unwrap();
}
@@ -232,7 +240,7 @@
}
let mut f = tmpfile_with_contents(&[]);
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f.try_clone().unwrap()).unwrap();
ex.run_until(go(source)).unwrap();
@@ -266,7 +274,7 @@
}
let f = tmpfile_with_contents("data".as_bytes());
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f).unwrap();
ex.run_until(go(source)).unwrap();
}
@@ -292,7 +300,7 @@
}
let mut f = tmpfile_with_contents(&[]);
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f.try_clone().unwrap()).unwrap();
ex.run_until(go(source)).unwrap();
@@ -315,7 +323,7 @@
}
let f = tempfile::tempfile().unwrap();
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f).unwrap();
ex.run_until(go(source)).unwrap();
@@ -346,7 +354,7 @@
f.write_all(&[0xBB; 32]).unwrap();
f.rewind().unwrap();
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f).unwrap();
ex.run_until(go(source)).unwrap();
@@ -369,7 +377,7 @@
}
let f = tempfile::tempfile().unwrap();
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f).unwrap();
ex.run_until(go(source)).unwrap();
@@ -393,7 +401,7 @@
f.write_all(&[0xffu8; 32]).unwrap();
f.rewind().unwrap();
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f).unwrap();
ex.run_until(go(source)).unwrap();
@@ -419,7 +427,7 @@
}
let mut f = tempfile::tempfile().unwrap();
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let source = ex.async_from(f.try_clone().unwrap()).unwrap();
ex.run_until(go(source)).unwrap();
diff --git a/cros_async/src/lib.rs b/cros_async/src/lib.rs
index d61e089..071317d 100644
--- a/cros_async/src/lib.rs
+++ b/cros_async/src/lib.rs
@@ -57,6 +57,7 @@
mod async_types;
pub mod audio_streams_async;
mod blocking;
+mod common_executor;
mod complete;
mod event;
mod executor;
@@ -67,12 +68,9 @@
mod select;
pub mod sync;
pub mod sys;
-#[cfg(any(target_os = "android", target_os = "linux"))]
-pub use sys::linux::uring_executor::is_uring_stable;
-pub use sys::Executor;
-pub use sys::TaskHandle;
-mod common_executor;
mod timer;
+#[cfg(feature = "tokio")]
+mod tokio_executor;
mod waker;
use std::future::Future;
@@ -89,7 +87,10 @@
pub use blocking::CancellableBlockingPool;
pub use blocking::TimeoutAction;
pub use event::EventAsync;
+pub use executor::Executor;
pub use executor::ExecutorKind;
+pub(crate) use executor::ExecutorTrait;
+pub use executor::TaskHandle;
#[cfg(windows)]
pub use futures::executor::block_on;
use futures::stream::FuturesUnordered;
@@ -104,6 +105,8 @@
pub use mem::VecIoWrapper;
use remain::sorted;
pub use select::SelectResult;
+#[cfg(any(target_os = "android", target_os = "linux"))]
+pub use sys::linux::uring_executor::is_uring_stable;
use thiserror::Error as ThisError;
pub use timer::TimerAsync;
diff --git a/cros_async/src/sys.rs b/cros_async/src/sys.rs
index 74be597..ff1ab59 100644
--- a/cros_async/src/sys.rs
+++ b/cros_async/src/sys.rs
@@ -14,7 +14,4 @@
pub use platform::async_types;
pub use platform::event;
-pub use platform::executor::Executor;
pub use platform::executor::ExecutorKindSys;
-pub use platform::executor::SetDefaultExecutorKindError;
-pub use platform::executor::TaskHandle;
diff --git a/cros_async/src/sys/linux.rs b/cros_async/src/sys/linux.rs
index 02aa718..fcaf87e 100644
--- a/cros_async/src/sys/linux.rs
+++ b/cros_async/src/sys/linux.rs
@@ -9,13 +9,17 @@
pub mod fd_executor;
pub mod poll_source;
mod timer;
+#[cfg(feature = "tokio")]
+pub mod tokio_source;
pub mod uring_executor;
pub mod uring_source;
pub use error::AsyncErrorSys;
pub use executor::ExecutorKindSys;
+pub(crate) use fd_executor::EpollReactor;
pub use poll_source::Error as PollSourceError;
pub use poll_source::PollSource;
+pub(crate) use uring_executor::UringReactor;
pub use uring_source::UringSource;
use crate::Error;
diff --git a/cros_async/src/sys/linux/error.rs b/cros_async/src/sys/linux/error.rs
index 48e492c..83e946d 100644
--- a/cros_async/src/sys/linux/error.rs
+++ b/cros_async/src/sys/linux/error.rs
@@ -9,6 +9,9 @@
pub enum AsyncErrorSys {
#[error("Poll source error: {0}")]
Poll(#[from] super::poll_source::Error),
+ #[cfg(feature = "tokio")]
+ #[error("Tokio source error: {0}")]
+ Tokio(#[from] super::tokio_source::Error),
#[error("Uring source error: {0}")]
Uring(#[from] super::uring_executor::Error),
}
@@ -17,6 +20,8 @@
fn from(err: AsyncErrorSys) -> Self {
match err {
AsyncErrorSys::Poll(e) => e.into(),
+ #[cfg(feature = "tokio")]
+ AsyncErrorSys::Tokio(e) => e.into(),
AsyncErrorSys::Uring(e) => e.into(),
}
}
diff --git a/cros_async/src/sys/linux/event.rs b/cros_async/src/sys/linux/event.rs
index 46c5e42..a937974 100644
--- a/cros_async/src/sys/linux/event.rs
+++ b/cros_async/src/sys/linux/event.rs
@@ -39,12 +39,13 @@
use super::*;
use crate::common_executor::RawExecutor;
use crate::sys::linux::uring_executor::is_uring_stable;
+ use crate::ExecutorTrait;
pub(crate) fn new_poll(
event: Event,
ex: &Arc<RawExecutor<EpollReactor>>,
) -> AsyncResult<EventAsync> {
- ex.new_source(event)
+ ex.async_from(event)
.map(|io_source| EventAsync { io_source })
}
@@ -52,7 +53,7 @@
event: Event,
ex: &Arc<RawExecutor<UringReactor>>,
) -> AsyncResult<EventAsync> {
- ex.new_source(event)
+ ex.async_from(event)
.map(|io_source| EventAsync { io_source })
}
diff --git a/cros_async/src/sys/linux/executor.rs b/cros_async/src/sys/linux/executor.rs
index 455636e..a08ff25 100644
--- a/cros_async/src/sys/linux/executor.rs
+++ b/cros_async/src/sys/linux/executor.rs
@@ -2,132 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-use std::future::Future;
-use std::pin::Pin;
-use std::sync::Arc;
-
-use base::debug;
-use base::warn;
-use base::AsRawDescriptors;
-use base::RawDescriptor;
-use once_cell::sync::OnceCell;
use serde::Deserialize;
use serde::Serialize;
-use thiserror::Error as ThisError;
-
-use super::fd_executor::EpollReactor;
-use super::uring_executor::check_uring_availability;
-use super::uring_executor::is_uring_stable;
-use super::uring_executor::Error as UringError;
-use super::uring_executor::UringReactor;
-use crate::common_executor;
-use crate::common_executor::RawExecutor;
-use crate::AsyncResult;
-use crate::IntoAsync;
-use crate::IoSource;
-
-/// An executor for scheduling tasks that poll futures to completion.
-///
-/// All asynchronous operations must run within an executor, which is capable of spawning futures as
-/// tasks. This executor also provides a mechanism for performing asynchronous I/O operations.
-///
-/// The returned type is a cheap, clonable handle to the underlying executor. Cloning it will only
-/// create a new reference, not a new executor.
-///
-/// Note that language limitations (trait objects can have <=1 non auto trait) require this to be
-/// represented on the POSIX side as an enum, rather than a trait. This leads to some code &
-/// interface duplication, but as far as we understand that is unavoidable.
-///
-/// See <https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2571401/2..6/cros_async/src/executor.rs#b75>
-/// for further details.
-///
-/// # Examples
-///
-/// Concurrently wait for multiple files to become readable/writable and then read/write the data.
-///
-/// ```
-/// use std::cmp::min;
-/// use std::error::Error;
-/// use std::fs::{File, OpenOptions};
-///
-/// use cros_async::{AsyncResult, Executor, IoSource, complete3};
-/// const CHUNK_SIZE: usize = 32;
-///
-/// // Write all bytes from `data` to `f`.
-/// async fn write_file(f: &IoSource<File>, mut data: Vec<u8>) -> AsyncResult<()> {
-/// while data.len() > 0 {
-/// let (count, mut buf) = f.write_from_vec(None, data).await?;
-///
-/// data = buf.split_off(count);
-/// }
-///
-/// Ok(())
-/// }
-///
-/// // Transfer `len` bytes of data from `from` to `to`.
-/// async fn transfer_data(
-/// from: IoSource<File>,
-/// to: IoSource<File>,
-/// len: usize,
-/// ) -> AsyncResult<usize> {
-/// let mut rem = len;
-///
-/// while rem > 0 {
-/// let buf = vec![0u8; min(rem, CHUNK_SIZE)];
-/// let (count, mut data) = from.read_to_vec(None, buf).await?;
-///
-/// if count == 0 {
-/// // End of file. Return the number of bytes transferred.
-/// return Ok(len - rem);
-/// }
-///
-/// data.truncate(count);
-/// write_file(&to, data).await?;
-///
-/// rem = rem.saturating_sub(count);
-/// }
-///
-/// Ok(len)
-/// }
-///
-/// #[cfg(any(target_os = "android", target_os = "linux"))]
-/// # fn do_it() -> Result<(), Box<dyn Error>> {
-/// let ex = Executor::new()?;
-///
-/// let (rx, tx) = base::linux::pipe()?;
-/// let zero = File::open("/dev/zero")?;
-/// let zero_bytes = CHUNK_SIZE * 7;
-/// let zero_to_pipe = transfer_data(
-/// ex.async_from(zero)?,
-/// ex.async_from(tx.try_clone()?)?,
-/// zero_bytes,
-/// );
-///
-/// let rand = File::open("/dev/urandom")?;
-/// let rand_bytes = CHUNK_SIZE * 19;
-/// let rand_to_pipe = transfer_data(ex.async_from(rand)?, ex.async_from(tx)?, rand_bytes);
-///
-/// let null = OpenOptions::new().write(true).open("/dev/null")?;
-/// let null_bytes = zero_bytes + rand_bytes;
-/// let pipe_to_null = transfer_data(ex.async_from(rx)?, ex.async_from(null)?, null_bytes);
-///
-/// ex.run_until(complete3(
-/// async { assert_eq!(pipe_to_null.await.unwrap(), null_bytes) },
-/// async { assert_eq!(zero_to_pipe.await.unwrap(), zero_bytes) },
-/// async { assert_eq!(rand_to_pipe.await.unwrap(), rand_bytes) },
-/// ))?;
-///
-/// # Ok(())
-/// # }
-/// #[cfg(any(target_os = "android", target_os = "linux"))]
-/// # do_it().unwrap();
-/// ```
-
-#[derive(Clone)]
-pub enum Executor {
- Uring(Arc<RawExecutor<UringReactor>>),
- Fd(Arc<RawExecutor<EpollReactor>>),
-}
/// An enum to express the kind of the backend of `Executor`
#[derive(
@@ -140,317 +16,3 @@
#[serde(rename = "epoll")]
Fd,
}
-
-/// If set, [`ExecutorKindSys::default()`] returns the value of `DEFAULT_EXECUTOR_KIND`.
-/// If not set, [`ExecutorKindSys::default()`] returns a statically-chosen default value, and
-/// [`ExecutorKindSys::default()`] initializes `DEFAULT_EXECUTOR_KIND` with that value.
-static DEFAULT_EXECUTOR_KIND: OnceCell<ExecutorKindSys> = OnceCell::new();
-
-impl Default for ExecutorKindSys {
- fn default() -> Self {
- *DEFAULT_EXECUTOR_KIND.get_or_init(|| ExecutorKindSys::Fd)
- }
-}
-
-/// The error type for [`Executor::set_default_executor_kind()`].
-#[derive(Debug, ThisError)]
-pub enum SetDefaultExecutorKindError {
- /// The default executor kind is set more than once.
- #[error("The default executor kind is already set to {0:?}")]
- SetMoreThanOnce(ExecutorKindSys),
-
- /// io_uring is unavailable. The reason might be the lack of the kernel support,
- /// but is not limited to that.
- #[error("io_uring is unavailable: {0}")]
- UringUnavailable(UringError),
-}
-
-/// Reference to a task managed by the executor.
-///
-/// Dropping a `TaskHandle` attempts to cancel the associated task. Call `detach` to allow it to
-/// continue running the background.
-///
-/// `await`ing the `TaskHandle` waits for the task to finish and yields its result.
-pub enum TaskHandle<R> {
- Uring(common_executor::TaskHandle<UringReactor, R>),
- Fd(common_executor::TaskHandle<EpollReactor, R>),
-}
-
-impl<R: Send + 'static> TaskHandle<R> {
- pub fn detach(self) {
- match self {
- TaskHandle::Uring(x) => x.detach(),
- TaskHandle::Fd(x) => x.detach(),
- }
- }
-
- // Cancel the task and wait for it to stop. Returns the result of the task if it was already
- // finished.
- pub async fn cancel(self) -> Option<R> {
- match self {
- TaskHandle::Uring(x) => x.cancel().await,
- TaskHandle::Fd(x) => x.cancel().await,
- }
- }
-}
-
-impl<R: 'static> Future for TaskHandle<R> {
- type Output = R;
-
- fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context) -> std::task::Poll<Self::Output> {
- match self.get_mut() {
- TaskHandle::Uring(x) => Pin::new(x).poll(cx),
- TaskHandle::Fd(x) => Pin::new(x).poll(cx),
- }
- }
-}
-
-impl Executor {
- /// Create a new `Executor`.
- pub fn new() -> AsyncResult<Self> {
- Executor::with_executor_kind(ExecutorKindSys::default())
- }
-
- /// Create a new `Executor` of the given `ExecutorKind`.
- pub fn with_executor_kind(kind: ExecutorKindSys) -> AsyncResult<Self> {
- match kind {
- ExecutorKindSys::Uring => RawExecutor::new().map(Executor::Uring),
- ExecutorKindSys::Fd => RawExecutor::new().map(Executor::Fd),
- }
- }
-
- /// Set the default ExecutorKind for [`Self::new()`]. This call is effective only once.
- /// If a call is the first call, it sets the default, and `set_default_executor_kind`
- /// returns `Ok(())`. Otherwise, it returns `SetDefaultExecutorKindError::SetMoreThanOnce`
- /// which contains the existing ExecutorKind value configured by the first call.
- pub fn set_default_executor_kind(
- executor_kind: ExecutorKindSys,
- ) -> Result<(), SetDefaultExecutorKindError> {
- if executor_kind == ExecutorKindSys::Uring {
- check_uring_availability().map_err(SetDefaultExecutorKindError::UringUnavailable)?;
- if !is_uring_stable() {
- warn!(
- "Enabling io_uring executor on the kernel version where io_uring is unstable"
- );
- }
- }
-
- debug!("setting the default executor to {:?}", executor_kind);
- DEFAULT_EXECUTOR_KIND.set(executor_kind).map_err(|_|
- // `expect` succeeds since this closure runs only when DEFAULT_EXECUTOR_KIND is set.
- SetDefaultExecutorKindError::SetMoreThanOnce(
- *DEFAULT_EXECUTOR_KIND
- .get()
- .expect("Failed to get DEFAULT_EXECUTOR_KIND"),
- ))
- }
-
- /// Create a new `IoSource<F>` associated with `self`. Callers may then use the returned
- /// `IoSource` to directly start async operations without needing a separate reference to the
- /// executor.
- pub fn async_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>> {
- match self {
- Executor::Uring(ex) => ex.new_source(f),
- Executor::Fd(ex) => ex.new_source(f),
- }
- }
-
- /// Spawn a new future for this executor to run to completion. Callers may use the returned
- /// `TaskHandle` to await on the result of `f`. Dropping the returned `TaskHandle` will cancel
- /// `f`, preventing it from being polled again. To drop a `TaskHandle` without canceling the
- /// future associated with it use `TaskHandle::detach`.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_spawn() -> AsyncResult<()> {
- /// # use std::thread;
- ///
- /// # use cros_async::Executor;
- /// use futures::executor::block_on;
- ///
- /// # let ex = Executor::new()?;
- ///
- /// # // Spawn a thread that runs the executor.
- /// # let ex2 = ex.clone();
- /// # thread::spawn(move || ex2.run());
- ///
- /// let task = ex.spawn(async { 7 + 13 });
- ///
- /// let result = block_on(task);
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_spawn().unwrap();
- /// ```
- pub fn spawn<F>(&self, f: F) -> TaskHandle<F::Output>
- where
- F: Future + Send + 'static,
- F::Output: Send + 'static,
- {
- match self {
- Executor::Uring(ex) => TaskHandle::Uring(ex.spawn(f)),
- Executor::Fd(ex) => TaskHandle::Fd(ex.spawn(f)),
- }
- }
-
- /// Spawn a thread-local task for this executor to drive to completion. Like `spawn` but without
- /// requiring `Send` on `F` or `F::Output`. This method should only be called from the same
- /// thread where `run()` or `run_until()` is called.
- ///
- /// # Panics
- ///
- /// `Executor::run` and `Executor::run_until` will panic if they try to poll a future that was
- /// added by calling `spawn_local` from a different thread.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_spawn_local() -> AsyncResult<()> {
- /// # use cros_async::Executor;
- ///
- /// # let ex = Executor::new()?;
- ///
- /// let task = ex.spawn_local(async { 7 + 13 });
- ///
- /// let result = ex.run_until(task)?;
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_spawn_local().unwrap();
- /// ```
- pub fn spawn_local<F>(&self, f: F) -> TaskHandle<F::Output>
- where
- F: Future + 'static,
- F::Output: 'static,
- {
- match self {
- Executor::Uring(ex) => TaskHandle::Uring(ex.spawn_local(f)),
- Executor::Fd(ex) => TaskHandle::Fd(ex.spawn_local(f)),
- }
- }
-
- /// Run the provided closure on a dedicated thread where blocking is allowed.
- ///
- /// Callers may `await` on the returned `TaskHandle` to wait for the result of `f`. Dropping
- /// the returned `TaskHandle` may not cancel the operation if it was already started on a
- /// worker thread.
- ///
- /// # Panics
- ///
- /// `await`ing the `TaskHandle` after the `Executor` is dropped will panic if the work was not
- /// already completed.
- ///
- /// # Examples
- ///
- /// ```edition2018
- /// # use cros_async::Executor;
- ///
- /// # async fn do_it(ex: &Executor) {
- /// let res = ex.spawn_blocking(move || {
- /// // Do some CPU-intensive or blocking work here.
- ///
- /// 42
- /// }).await;
- ///
- /// assert_eq!(res, 42);
- /// # }
- ///
- /// # let ex = Executor::new().unwrap();
- /// # ex.run_until(do_it(&ex)).unwrap();
- /// ```
- pub fn spawn_blocking<F, R>(&self, f: F) -> TaskHandle<R>
- where
- F: FnOnce() -> R + Send + 'static,
- R: Send + 'static,
- {
- match self {
- Executor::Uring(ex) => TaskHandle::Uring(ex.spawn_blocking(f)),
- Executor::Fd(ex) => TaskHandle::Fd(ex.spawn_blocking(f)),
- }
- }
-
- /// Run the executor indefinitely, driving all spawned futures to completion. This method will
- /// block the current thread and only return in the case of an error.
- ///
- /// # Panics
- ///
- /// Once this method has been called on a thread, it may only be called on that thread from that
- /// point on. Attempting to call it from another thread will panic.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_run() -> AsyncResult<()> {
- /// use std::thread;
- ///
- /// use cros_async::Executor;
- /// use futures::executor::block_on;
- ///
- /// let ex = Executor::new()?;
- ///
- /// // Spawn a thread that runs the executor.
- /// let ex2 = ex.clone();
- /// thread::spawn(move || ex2.run());
- ///
- /// let task = ex.spawn(async { 7 + 13 });
- ///
- /// let result = block_on(task);
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_run().unwrap();
- /// ```
- pub fn run(&self) -> AsyncResult<()> {
- self.run_until(std::future::pending())
- }
-
- /// Drive all futures spawned in this executor until `f` completes. This method will block the
- /// current thread only until `f` is complete and there may still be unfinished futures in the
- /// executor.
- ///
- /// # Panics
- ///
- /// Once this method has been called on a thread, from then onwards it may only be called on
- /// that thread. Attempting to call it from another thread will panic.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_run_until() -> AsyncResult<()> {
- /// use cros_async::Executor;
- ///
- /// let ex = Executor::new()?;
- ///
- /// let task = ex.spawn_local(async { 7 + 13 });
- ///
- /// let result = ex.run_until(task)?;
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_run_until().unwrap();
- /// ```
- pub fn run_until<F: Future>(&self, f: F) -> AsyncResult<F::Output> {
- match self {
- Executor::Uring(ex) => Ok(ex.run_until(f)?),
- Executor::Fd(ex) => Ok(ex.run_until(f)?),
- }
- }
-}
-
-impl AsRawDescriptors for Executor {
- fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
- match self {
- Executor::Uring(ex) => ex.as_raw_descriptors(),
- Executor::Fd(ex) => ex.as_raw_descriptors(),
- }
- }
-}
diff --git a/cros_async/src/sys/linux/fd_executor.rs b/cros_async/src/sys/linux/fd_executor.rs
index 495ef85..a0a0d1d 100644
--- a/cros_async/src/sys/linux/fd_executor.rs
+++ b/cros_async/src/sys/linux/fd_executor.rs
@@ -27,10 +27,12 @@
use thiserror::Error as ThisError;
use crate::common_executor::RawExecutor;
+use crate::common_executor::RawTaskHandle;
use crate::common_executor::Reactor;
use crate::waker::WakerToken;
use crate::AsyncResult;
use crate::IoSource;
+use crate::TaskHandle;
#[sorted]
#[derive(Debug, ThisError)]
@@ -378,6 +380,10 @@
) -> AsyncResult<IoSource<F>> {
Ok(IoSource::Epoll(super::PollSource::new(f, ex)?))
}
+
+ fn wrap_task_handle<R>(task: RawTaskHandle<EpollReactor, R>) -> TaskHandle<R> {
+ TaskHandle::Fd(task)
+ }
}
impl AsRawDescriptors for EpollReactor {
@@ -401,6 +407,7 @@
use super::*;
use crate::BlockingPool;
+ use crate::ExecutorTrait;
#[test]
fn test_it() {
diff --git a/cros_async/src/sys/linux/poll_source.rs b/cros_async/src/sys/linux/poll_source.rs
index ec6ca18..b7c1c15 100644
--- a/cros_async/src/sys/linux/poll_source.rs
+++ b/cros_async/src/sys/linux/poll_source.rs
@@ -385,6 +385,7 @@
use std::fs::File;
use super::*;
+ use crate::ExecutorTrait;
#[test]
fn memory_leak() {
diff --git a/cros_async/src/sys/linux/timer.rs b/cros_async/src/sys/linux/timer.rs
index 056bc41..8acefdf 100644
--- a/cros_async/src/sys/linux/timer.rs
+++ b/cros_async/src/sys/linux/timer.rs
@@ -36,13 +36,14 @@
use crate::common_executor::RawExecutor;
use crate::sys::linux::uring_executor::is_uring_stable;
use crate::Executor;
+ use crate::ExecutorTrait;
impl TimerAsync<Timer> {
pub(crate) fn new_poll(
timer: Timer,
ex: &Arc<RawExecutor<EpollReactor>>,
) -> AsyncResult<TimerAsync<Timer>> {
- ex.new_source(timer)
+ ex.async_from(timer)
.map(|io_source| TimerAsync { io_source })
}
@@ -50,7 +51,7 @@
timer: Timer,
ex: &Arc<RawExecutor<UringReactor>>,
) -> AsyncResult<TimerAsync<Timer>> {
- ex.new_source(timer)
+ ex.async_from(timer)
.map(|io_source| TimerAsync { io_source })
}
}
diff --git a/cros_async/src/sys/linux/tokio_source.rs b/cros_async/src/sys/linux/tokio_source.rs
new file mode 100644
index 0000000..4a84712
--- /dev/null
+++ b/cros_async/src/sys/linux/tokio_source.rs
@@ -0,0 +1,405 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::io;
+use std::os::fd::AsRawFd;
+use std::os::fd::OwnedFd;
+use std::os::fd::RawFd;
+use std::sync::Arc;
+
+use base::add_fd_flags;
+use base::clone_descriptor;
+use base::linux::fallocate;
+use base::linux::FallocateMode;
+use base::AsRawDescriptor;
+use base::VolatileSlice;
+use tokio::io::unix::AsyncFd;
+
+use crate::mem::MemRegion;
+use crate::AsyncError;
+use crate::AsyncResult;
+use crate::BackingMemory;
+
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ #[error("Failed to copy the FD for the polling context: '{0}'")]
+ DuplicatingFd(base::Error),
+ #[error("Failed to punch hole in file: '{0}'.")]
+ Fallocate(base::Error),
+ #[error("Failed to fdatasync: '{0}'")]
+ Fdatasync(io::Error),
+ #[error("Failed to fsync: '{0}'")]
+ Fsync(io::Error),
+ #[error("Failed to join task: '{0}'")]
+ Join(tokio::task::JoinError),
+ #[error("Cannot wait on file descriptor")]
+ NonWaitable,
+ #[error("Failed to read: '{0}'")]
+ Read(io::Error),
+ #[error("Failed to set nonblocking: '{0}'")]
+ SettingNonBlocking(base::Error),
+ #[error("Tokio Async FD error: '{0}'")]
+ TokioAsyncFd(io::Error),
+ #[error("Failed to write: '{0}'")]
+ Write(io::Error),
+}
+
+impl From<Error> for io::Error {
+ fn from(e: Error) -> Self {
+ use Error::*;
+ match e {
+ DuplicatingFd(e) => e.into(),
+ Fallocate(e) => e.into(),
+ Fdatasync(e) => e,
+ Fsync(e) => e,
+ Join(e) => io::Error::new(io::ErrorKind::Other, e),
+ NonWaitable => io::Error::new(io::ErrorKind::Other, e),
+ Read(e) => e,
+ SettingNonBlocking(e) => e.into(),
+ TokioAsyncFd(e) => e,
+ Write(e) => e,
+ }
+ }
+}
+
+enum FdType {
+ Async(AsyncFd<Arc<OwnedFd>>),
+ Blocking(Arc<OwnedFd>),
+}
+
+impl AsRawFd for FdType {
+ fn as_raw_fd(&self) -> RawFd {
+ match self {
+ FdType::Async(async_fd) => async_fd.as_raw_fd(),
+ FdType::Blocking(blocking) => blocking.as_raw_fd(),
+ }
+ }
+}
+
+impl From<Error> for AsyncError {
+ fn from(e: Error) -> AsyncError {
+ AsyncError::SysVariants(e.into())
+ }
+}
+
+fn do_fdatasync(raw: Arc<OwnedFd>) -> io::Result<()> {
+ let fd = raw.as_raw_fd();
+ // SAFETY: we partially own `raw`
+ match unsafe { libc::fdatasync(fd) } {
+ 0 => Ok(()),
+ _ => Err(io::Error::last_os_error()),
+ }
+}
+
+fn do_fsync(raw: Arc<OwnedFd>) -> io::Result<()> {
+ let fd = raw.as_raw_fd();
+ // SAFETY: we partially own `raw`
+ match unsafe { libc::fsync(fd) } {
+ 0 => Ok(()),
+ _ => Err(io::Error::last_os_error()),
+ }
+}
+
+fn do_read_to_mem(
+ raw: Arc<OwnedFd>,
+ file_offset: Option<u64>,
+ io_vecs: &Vec<VolatileSlice>,
+) -> io::Result<usize> {
+ let ptr = io_vecs.as_ptr() as *const libc::iovec;
+ let len = io_vecs.len() as i32;
+ let fd = raw.as_raw_fd();
+ let res = match file_offset {
+ // SAFETY: we partially own `raw`, `io_vecs` is validated
+ Some(off) => unsafe { libc::preadv64(fd, ptr, len, off as libc::off64_t) },
+ // SAFETY: we partially own `raw`, `io_vecs` is validated
+ None => unsafe { libc::readv(fd, ptr, len) },
+ };
+ match res {
+ r if r >= 0 => Ok(res as usize),
+ _ => Err(io::Error::last_os_error()),
+ }
+}
+fn do_read_to_vec(
+ raw: Arc<OwnedFd>,
+ file_offset: Option<u64>,
+ vec: &mut Vec<u8>,
+) -> io::Result<usize> {
+ let fd = raw.as_raw_fd();
+ let ptr = vec.as_mut_ptr() as *mut libc::c_void;
+ let res = match file_offset {
+ // SAFETY: we partially own `raw`, `ptr` has space up to vec.len()
+ Some(off) => unsafe { libc::pread64(fd, ptr, vec.len(), off as libc::off64_t) },
+ // SAFETY: we partially own `raw`, `ptr` has space up to vec.len()
+ None => unsafe { libc::read(fd, ptr, vec.len()) },
+ };
+ match res {
+ r if r >= 0 => Ok(res as usize),
+ _ => Err(io::Error::last_os_error()),
+ }
+}
+
+fn do_write_from_vec(
+ raw: Arc<OwnedFd>,
+ file_offset: Option<u64>,
+ vec: &Vec<u8>,
+) -> io::Result<usize> {
+ let fd = raw.as_raw_fd();
+ let ptr = vec.as_ptr() as *const libc::c_void;
+ let res = match file_offset {
+ // SAFETY: we partially own `raw`, `ptr` has data up to vec.len()
+ Some(off) => unsafe { libc::pwrite64(fd, ptr, vec.len(), off as libc::off64_t) },
+ // SAFETY: we partially own `raw`, `ptr` has data up to vec.len()
+ None => unsafe { libc::write(fd, ptr, vec.len()) },
+ };
+ match res {
+ r if r >= 0 => Ok(res as usize),
+ _ => Err(io::Error::last_os_error()),
+ }
+}
+
+fn do_write_from_mem(
+ raw: Arc<OwnedFd>,
+ file_offset: Option<u64>,
+ io_vecs: &Vec<VolatileSlice>,
+) -> io::Result<usize> {
+ let ptr = io_vecs.as_ptr() as *const libc::iovec;
+ let len = io_vecs.len() as i32;
+ let fd = raw.as_raw_fd();
+ let res = match file_offset {
+ // SAFETY: we partially own `raw`, `io_vecs` is validated
+ Some(off) => unsafe { libc::pwritev64(fd, ptr, len, off as libc::off64_t) },
+ // SAFETY: we partially own `raw`, `io_vecs` is validated
+ None => unsafe { libc::writev(fd, ptr, len) },
+ };
+ match res {
+ r if r >= 0 => Ok(res as usize),
+ _ => Err(io::Error::last_os_error()),
+ }
+}
+
+pub struct TokioSource<T> {
+ fd: FdType,
+ inner: T,
+ runtime: tokio::runtime::Handle,
+}
+impl<T: AsRawDescriptor> TokioSource<T> {
+ pub fn new(inner: T, runtime: tokio::runtime::Handle) -> Result<TokioSource<T>, Error> {
+ let _guard = runtime.enter(); // Required for AsyncFd
+ let safe_fd = clone_descriptor(&inner).map_err(Error::DuplicatingFd)?;
+ let fd_arc: Arc<OwnedFd> = Arc::new(safe_fd.into());
+ let fd = match AsyncFd::new(fd_arc.clone()) {
+ Ok(async_fd) => {
+ add_fd_flags(async_fd.get_ref().as_raw_descriptor(), libc::O_NONBLOCK)
+ .map_err(Error::SettingNonBlocking)?;
+ FdType::Async(async_fd)
+ }
+ Err(e) if e.kind() == io::ErrorKind::PermissionDenied => FdType::Blocking(fd_arc),
+ Err(e) => return Err(Error::TokioAsyncFd(e)),
+ };
+ Ok(TokioSource { fd, inner, runtime })
+ }
+
+ pub fn as_source(&self) -> &T {
+ &self.inner
+ }
+
+ pub fn as_source_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ fn clone_fd(&self) -> Arc<OwnedFd> {
+ match &self.fd {
+ FdType::Async(async_fd) => async_fd.get_ref().clone(),
+ FdType::Blocking(blocking) => blocking.clone(),
+ }
+ }
+
+ pub async fn fdatasync(&self) -> AsyncResult<()> {
+ let fd = self.clone_fd();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || do_fdatasync(fd))
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Fdatasync)?)
+ }
+
+ pub async fn fsync(&self) -> AsyncResult<()> {
+ let fd = self.clone_fd();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || do_fsync(fd))
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Fsync)?)
+ }
+
+ pub fn into_source(self) -> T {
+ self.inner
+ }
+
+ pub async fn read_to_vec(
+ &self,
+ file_offset: Option<u64>,
+ mut vec: Vec<u8>,
+ ) -> AsyncResult<(usize, Vec<u8>)> {
+ Ok(match &self.fd {
+ FdType::Async(async_fd) => {
+ let res = async_fd
+ .async_io(tokio::io::Interest::READABLE, |fd| {
+ do_read_to_vec(fd.clone(), file_offset, &mut vec)
+ })
+ .await
+ .map_err(AsyncError::Io)?;
+ (res, vec)
+ }
+ FdType::Blocking(blocking) => {
+ let fd = blocking.clone();
+ self.runtime
+ .spawn_blocking(move || {
+ let size = do_read_to_vec(fd, file_offset, &mut vec)?;
+ Ok((size, vec))
+ })
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Read)?
+ }
+ })
+ }
+
+ pub async fn read_to_mem(
+ &self,
+ file_offset: Option<u64>,
+ mem: Arc<dyn BackingMemory + Send + Sync>,
+ mem_offsets: impl IntoIterator<Item = MemRegion>,
+ ) -> AsyncResult<usize> {
+ let mem_offsets_vec: Vec<MemRegion> = mem_offsets.into_iter().collect();
+ Ok(match &self.fd {
+ FdType::Async(async_fd) => {
+ let iovecs = mem_offsets_vec
+ .into_iter()
+ .filter_map(|mem_range| mem.get_volatile_slice(mem_range).ok())
+ .collect::<Vec<VolatileSlice>>();
+ async_fd
+ .async_io(tokio::io::Interest::READABLE, |fd| {
+ do_read_to_mem(fd.clone(), file_offset, &iovecs)
+ })
+ .await
+ .map_err(AsyncError::Io)?
+ }
+ FdType::Blocking(blocking) => {
+ let fd = blocking.clone();
+ self.runtime
+ .spawn_blocking(move || {
+ let iovecs = mem_offsets_vec
+ .into_iter()
+ .filter_map(|mem_range| mem.get_volatile_slice(mem_range).ok())
+ .collect::<Vec<VolatileSlice>>();
+ do_read_to_mem(fd, file_offset, &iovecs)
+ })
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Read)?
+ }
+ })
+ }
+
+ pub async fn punch_hole(&self, file_offset: u64, len: u64) -> AsyncResult<()> {
+ let fd = self.clone_fd();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || fallocate(&*fd, FallocateMode::PunchHole, file_offset, len))
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Fallocate)?)
+ }
+
+ pub async fn wait_readable(&self) -> AsyncResult<()> {
+ match &self.fd {
+ FdType::Async(async_fd) => async_fd
+ .readable()
+ .await
+ .map_err(crate::AsyncError::Io)?
+ .retain_ready(),
+ FdType::Blocking(_) => return Err(Error::NonWaitable.into()),
+ }
+ Ok(())
+ }
+
+ pub async fn write_from_mem(
+ &self,
+ file_offset: Option<u64>,
+ mem: Arc<dyn BackingMemory + Send + Sync>,
+ mem_offsets: impl IntoIterator<Item = MemRegion>,
+ ) -> AsyncResult<usize> {
+ let mem_offsets_vec: Vec<MemRegion> = mem_offsets.into_iter().collect();
+ Ok(match &self.fd {
+ FdType::Async(async_fd) => {
+ let iovecs = mem_offsets_vec
+ .into_iter()
+ .filter_map(|mem_range| mem.get_volatile_slice(mem_range).ok())
+ .collect::<Vec<VolatileSlice>>();
+ async_fd
+ .async_io(tokio::io::Interest::WRITABLE, |fd| {
+ do_write_from_mem(fd.clone(), file_offset, &iovecs)
+ })
+ .await
+ .map_err(AsyncError::Io)?
+ }
+ FdType::Blocking(blocking) => {
+ let fd = blocking.clone();
+ self.runtime
+ .spawn_blocking(move || {
+ let iovecs = mem_offsets_vec
+ .into_iter()
+ .filter_map(|mem_range| mem.get_volatile_slice(mem_range).ok())
+ .collect::<Vec<VolatileSlice>>();
+ do_write_from_mem(fd, file_offset, &iovecs.clone())
+ })
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Read)?
+ }
+ })
+ }
+
+ pub async fn write_from_vec(
+ &self,
+ file_offset: Option<u64>,
+ vec: Vec<u8>,
+ ) -> AsyncResult<(usize, Vec<u8>)> {
+ Ok(match &self.fd {
+ FdType::Async(async_fd) => {
+ let res = async_fd
+ .async_io(tokio::io::Interest::WRITABLE, |fd| {
+ do_write_from_vec(fd.clone(), file_offset, &vec)
+ })
+ .await
+ .map_err(AsyncError::Io)?;
+ (res, vec)
+ }
+ FdType::Blocking(blocking) => {
+ let fd = blocking.clone();
+ self.runtime
+ .spawn_blocking(move || {
+ let size = do_write_from_vec(fd.clone(), file_offset, &vec)?;
+ Ok((size, vec))
+ })
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Read)?
+ }
+ })
+ }
+
+ pub async fn write_zeroes_at(&self, file_offset: u64, len: u64) -> AsyncResult<()> {
+ let fd = self.clone_fd();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || fallocate(&*fd, FallocateMode::ZeroRange, file_offset, len))
+ .await
+ .map_err(Error::Join)?
+ .map_err(Error::Fallocate)?)
+ }
+}
diff --git a/cros_async/src/sys/linux/uring_executor.rs b/cros_async/src/sys/linux/uring_executor.rs
index ab50cf6..ff18dff 100644
--- a/cros_async/src/sys/linux/uring_executor.rs
+++ b/cros_async/src/sys/linux/uring_executor.rs
@@ -77,6 +77,7 @@
use thiserror::Error as ThisError;
use crate::common_executor::RawExecutor;
+use crate::common_executor::RawTaskHandle;
use crate::common_executor::Reactor;
use crate::mem::BackingMemory;
use crate::waker::WakerToken;
@@ -85,6 +86,7 @@
use crate::AsyncResult;
use crate::IoSource;
use crate::MemRegion;
+use crate::TaskHandle;
#[sorted]
#[derive(Debug, ThisError)]
@@ -786,6 +788,10 @@
) -> AsyncResult<IoSource<F>> {
Ok(IoSource::Uring(super::UringSource::new(f, ex)?))
}
+
+ fn wrap_task_handle<R>(task: RawTaskHandle<UringReactor, R>) -> TaskHandle<R> {
+ TaskHandle::Uring(task)
+ }
}
impl AsRawDescriptor for UringReactor {
@@ -895,6 +901,7 @@
use crate::mem::MemRegion;
use crate::mem::VecIoWrapper;
use crate::BlockingPool;
+ use crate::ExecutorTrait;
// A future that returns ready when the uring queue is empty.
struct UringQueueEmpty<'a> {
diff --git a/cros_async/src/sys/linux/uring_source.rs b/cros_async/src/sys/linux/uring_source.rs
index b0b76f2..d01762a 100644
--- a/cros_async/src/sys/linux/uring_source.rs
+++ b/cros_async/src/sys/linux/uring_source.rs
@@ -204,6 +204,7 @@
use super::*;
use crate::sys::linux::ExecutorKindSys;
use crate::Executor;
+ use crate::ExecutorTrait;
use crate::IoSource;
async fn read_u64<T: AsRawDescriptor>(source: &UringSource<T>) -> u64 {
@@ -385,7 +386,7 @@
waker: None,
}));
- let uring_ex = Executor::with_executor_kind(ExecutorKindSys::Uring).unwrap();
+ let uring_ex = Executor::with_executor_kind(ExecutorKindSys::Uring.into()).unwrap();
let f = File::open("/dev/zero").unwrap();
let source = uring_ex.async_from(f).unwrap();
@@ -394,7 +395,7 @@
};
let handle = std::thread::spawn(move || uring_ex.run_until(quit));
- let poll_ex = Executor::with_executor_kind(ExecutorKindSys::Fd).unwrap();
+ let poll_ex = Executor::with_executor_kind(ExecutorKindSys::Fd.into()).unwrap();
poll_ex.run_until(go(source)).unwrap();
state.lock().wake();
@@ -420,7 +421,7 @@
waker: None,
}));
- let poll_ex = Executor::with_executor_kind(ExecutorKindSys::Fd).unwrap();
+ let poll_ex = Executor::with_executor_kind(ExecutorKindSys::Fd.into()).unwrap();
let f = File::open("/dev/zero").unwrap();
let source = poll_ex.async_from(f).unwrap();
@@ -429,7 +430,7 @@
};
let handle = std::thread::spawn(move || poll_ex.run_until(quit));
- let uring_ex = Executor::with_executor_kind(ExecutorKindSys::Uring).unwrap();
+ let uring_ex = Executor::with_executor_kind(ExecutorKindSys::Uring.into()).unwrap();
uring_ex.run_until(go(source)).unwrap();
state.lock().wake();
diff --git a/cros_async/src/sys/windows.rs b/cros_async/src/sys/windows.rs
index b040309..315426f 100644
--- a/cros_async/src/sys/windows.rs
+++ b/cros_async/src/sys/windows.rs
@@ -11,6 +11,8 @@
mod io_completion_port;
pub mod overlapped_source;
mod timer;
+#[cfg(feature = "tokio")]
+pub mod tokio_source;
pub mod wait_for_handle;
pub use error::AsyncErrorSys;
diff --git a/cros_async/src/sys/windows/error.rs b/cros_async/src/sys/windows/error.rs
index f043745..74c4c04 100644
--- a/cros_async/src/sys/windows/error.rs
+++ b/cros_async/src/sys/windows/error.rs
@@ -13,6 +13,9 @@
HandleSource(#[from] super::handle_source::Error),
#[error("An error with a handle source: {0}")]
OverlappedSource(#[from] super::overlapped_source::Error),
+ #[cfg(feature = "tokio")]
+ #[error("Tokio source error: {0}")]
+ Tokio(#[from] super::tokio_source::Error),
}
impl From<AsyncErrorSys> for io::Error {
@@ -21,6 +24,8 @@
AsyncErrorSys::HandleExecutor(e) => e.into(),
AsyncErrorSys::HandleSource(e) => e.into(),
AsyncErrorSys::OverlappedSource(e) => e.into(),
+ #[cfg(feature = "tokio")]
+ AsyncErrorSys::Tokio(e) => e.into(),
}
}
}
diff --git a/cros_async/src/sys/windows/executor.rs b/cros_async/src/sys/windows/executor.rs
index 4c78233..31ba4bf 100644
--- a/cros_async/src/sys/windows/executor.rs
+++ b/cros_async/src/sys/windows/executor.rs
@@ -2,405 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-use std::future::Future;
-use std::pin::Pin;
-use std::sync::Arc;
-
-use once_cell::sync::OnceCell;
-use serde::Deserialize;
-use serde::Serialize;
-use thiserror::Error as ThisError;
-
-use super::HandleReactor;
-use crate::common_executor;
-use crate::common_executor::RawExecutor;
-use crate::AsyncResult;
-use crate::IntoAsync;
-use crate::IoSource;
-
-pub const DEFAULT_IO_CONCURRENCY: u32 = 1;
-
-/// An executor for scheduling tasks that poll futures to completion.
-///
-/// All asynchronous operations must run within an executor, which is capable of spawning futures as
-/// tasks. This executor also provides a mechanism for performing asynchronous I/O operations.
-///
-/// The returned type is a cheap, clonable handle to the underlying executor. Cloning it will only
-/// create a new reference, not a new executor.
-///
-/// Note that language limitations (trait objects can have <=1 non auto trait) require this to be
-/// represented on the POSIX side as an enum, rather than a trait. This leads to some code &
-/// interface duplication, but as far as we understand that is unavoidable.
-///
-/// See <https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2571401/2..6/cros_async/src/executor.rs#b75>
-/// for further details.
-///
-/// # Examples
-///
-/// Concurrently wait for multiple files to become readable/writable and then read/write the data.
-///
-/// ```
-/// use std::cmp::min;
-/// use std::error::Error;
-/// use std::fs::{File, OpenOptions};
-///
-/// use cros_async::{AsyncResult, Executor, IoSource, complete3};
-/// const CHUNK_SIZE: usize = 32;
-///
-/// // Write all bytes from `data` to `f`.
-/// async fn write_file(f: &IoSource<File>, mut data: Vec<u8>) -> AsyncResult<()> {
-/// while data.len() > 0 {
-/// let (count, mut buf) = f.write_from_vec(Some(0), data).await?;
-///
-/// data = buf.split_off(count);
-/// }
-///
-/// Ok(())
-/// }
-///
-/// // Transfer `len` bytes of data from `from` to `to`.
-/// async fn transfer_data(
-/// from: IoSource<File>,
-/// to: IoSource<File>,
-/// len: usize,
-/// ) -> AsyncResult<usize> {
-/// let mut rem = len;
-///
-/// while rem > 0 {
-/// let buf = vec![0u8; min(rem, CHUNK_SIZE)];
-/// let (count, mut data) = from.read_to_vec(Some(0), buf).await?;
-///
-/// if count == 0 {
-/// // End of file. Return the number of bytes transferred.
-/// return Ok(len - rem);
-/// }
-///
-/// data.truncate(count);
-/// write_file(&*to, data).await?;
-///
-/// rem = rem.saturating_sub(count);
-/// }
-///
-/// Ok(len)
-/// }
-///
-/// #[cfg(any(target_os = "android", target_os = "linux"))]
-/// # fn do_it() -> Result<(), Box<dyn Error>> {
-/// let ex = Executor::new()?;
-///
-/// let (rx, tx) = base::pipe()?;
-/// let zero = File::open("/dev/zero")?;
-/// let zero_bytes = CHUNK_SIZE * 7;
-/// let zero_to_pipe = transfer_data(
-/// ex.async_from(zero)?,
-/// ex.async_from(tx.try_clone()?)?,
-/// zero_bytes,
-/// );
-///
-/// let rand = File::open("/dev/urandom")?;
-/// let rand_bytes = CHUNK_SIZE * 19;
-/// let rand_to_pipe = transfer_data(ex.async_from(rand)?, ex.async_from(tx)?, rand_bytes);
-///
-/// let null = OpenOptions::new().write(true).open("/dev/null")?;
-/// let null_bytes = zero_bytes + rand_bytes;
-/// let pipe_to_null = transfer_data(ex.async_from(rx)?, ex.async_from(null)?, null_bytes);
-///
-/// ex.run_until(complete3(
-/// async { assert_eq!(pipe_to_null.await.unwrap(), null_bytes) },
-/// async { assert_eq!(zero_to_pipe.await.unwrap(), zero_bytes) },
-/// async { assert_eq!(rand_to_pipe.await.unwrap(), rand_bytes) },
-/// ))?;
-///
-/// # Ok(())
-/// # }
-/// #[cfg(any(target_os = "android", target_os = "linux"))]
-/// # do_it().unwrap();
-/// ```
-
-#[derive(Clone)]
-pub enum Executor {
- Handle(Arc<RawExecutor<HandleReactor>>),
- Overlapped(Arc<RawExecutor<HandleReactor>>),
-}
-
/// An enum to express the kind of the backend of `Executor`
-#[derive(
- Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, serde_keyvalue::FromKeyValues,
-)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Deserialize, serde_keyvalue::FromKeyValues)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
pub enum ExecutorKindSys {
Handle,
- Overlapped,
+ Overlapped { concurrency: Option<u32> },
}
-/// If set, [`Executor::new()`] is created with `ExecutorKindSys` of `DEFAULT_EXECUTOR_KIND`.
-static DEFAULT_EXECUTOR_KIND: OnceCell<ExecutorKindSys> = OnceCell::new();
-
-impl Default for ExecutorKindSys {
- fn default() -> Self {
- DEFAULT_EXECUTOR_KIND
- .get()
- .copied()
- .unwrap_or(ExecutorKindSys::Handle)
- }
-}
-
-/// The error type for [`Executor::set_default_executor_kind()`].
-#[derive(ThisError, Debug)]
-pub enum SetDefaultExecutorKindError {
- /// The default executor kind is set more than once.
- #[error("The default executor kind is already set to {0:?}")]
- SetMoreThanOnce(ExecutorKindSys),
-}
-
-/// Reference to a task managed by the executor.
-///
-/// Dropping a `TaskHandle` attempts to cancel the associated task. Call `detach` to allow it to
-/// continue running the background.
-///
-/// `await`ing the `TaskHandle` waits for the task to finish and yields its result.
-pub enum TaskHandle<R> {
- Handle(common_executor::TaskHandle<HandleReactor, R>),
-}
-
-impl<R: Send + 'static> TaskHandle<R> {
- pub fn detach(self) {
- match self {
- TaskHandle::Handle(x) => x.detach(),
- }
- }
-
- // Cancel the task and wait for it to stop. Returns the result of the task if it was already
- // finished.
- pub async fn cancel(self) -> Option<R> {
- match self {
- TaskHandle::Handle(x) => x.cancel().await,
- }
- }
-}
-
-impl<R: 'static> Future for TaskHandle<R> {
- type Output = R;
-
- fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context) -> std::task::Poll<Self::Output> {
- match self.get_mut() {
- TaskHandle::Handle(x) => Pin::new(x).poll(cx),
- }
- }
-}
-
-impl Executor {
- /// Create a new `Executor`.
- pub fn new() -> AsyncResult<Self> {
- Executor::with_executor_kind(ExecutorKindSys::default())
- }
-
- /// Create a new `Executor` of the given `ExecutorKindSys`.
- pub fn with_executor_kind(kind: ExecutorKindSys) -> AsyncResult<Self> {
- match kind {
- ExecutorKindSys::Handle => Ok(Executor::Handle(RawExecutor::<HandleReactor>::new()?)),
- ExecutorKindSys::Overlapped => {
- Ok(Executor::Overlapped(RawExecutor::<HandleReactor>::new()?))
- }
- }
- }
-
- /// Create a new `Executor` of the given `ExecutorKind`.
- pub fn with_kind_and_concurrency(kind: ExecutorKindSys, concurrency: u32) -> AsyncResult<Self> {
- match kind {
- ExecutorKindSys::Handle => Ok(Executor::Handle(RawExecutor::<HandleReactor>::new()?)),
- ExecutorKindSys::Overlapped => Ok(Executor::Overlapped(
- RawExecutor::<HandleReactor>::new_with(HandleReactor::new_with(concurrency)?)?,
- )),
- }
- }
-
- /// Create a new `IoSource<F>` associated with `self`. Callers may then use the returned
- /// `IoSource` to directly start async operations without needing a separate reference to the
- /// executor.
- pub fn async_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>> {
- match self {
- Executor::Handle(ex) => ex.new_source(f),
- Executor::Overlapped(ex) => ex.new_source(f),
- }
- }
-
- /// Create a new overlapped `IoSource<F>` associated with `self`. Callers may then use the
- /// If the executor is not overlapped, then Handle source is returned.
- /// returned `IoSource` to directly start async operations without needing a separate reference
- /// to the executor.
- pub fn async_overlapped_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>> {
- match self {
- Executor::Handle(ex) => ex.new_source(f),
- Executor::Overlapped(ex) => Ok(IoSource::Overlapped(super::OverlappedSource::new(
- f, ex, false,
- )?)),
- }
- }
-
- /// Set the default ExecutorKind for [`Self::new()`]. This call is effective only once.
- /// If a call is the first call, it sets the default, and `set_default_executor_kind`
- /// returns `Ok(())`. Otherwise, it returns `SetDefaultExecutorKindError::SetMoreThanOnce`
- /// which contains the existing ExecutorKind value configured by the first call.
- pub fn set_default_executor_kind(
- executor_kind: ExecutorKindSys,
- ) -> Result<(), SetDefaultExecutorKindError> {
- DEFAULT_EXECUTOR_KIND.set(executor_kind).map_err(|_|
- // `expect` succeeds since this closure runs only when DEFAULT_EXECUTOR_KIND is set.
- SetDefaultExecutorKindError::SetMoreThanOnce(
- *DEFAULT_EXECUTOR_KIND
- .get()
- .expect("Failed to get DEFAULT_EXECUTOR_KIND"),
- ))
- }
-
- /// Spawn a new future for this executor to run to completion. Callers may use the returned
- /// `TaskHandle` to await on the result of `f`. Dropping the returned `TaskHandle` will cancel
- /// `f`, preventing it from being polled again. To drop a `TaskHandle` without canceling the
- /// future associated with it use `TaskHandle::detach`.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_spawn() -> AsyncResult<()> {
- /// # use std::thread;
- ///
- /// # use cros_async::Executor;
- /// use futures::executor::block_on;
- ///
- /// # let ex = Executor::new()?;
- ///
- /// # // Spawn a thread that runs the executor.
- /// # let ex2 = ex.clone();
- /// # thread::spawn(move || ex2.run());
- ///
- /// let task = ex.spawn(async { 7 + 13 });
- ///
- /// let result = block_on(task);
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_spawn().unwrap();
- /// ```
- pub fn spawn<F>(&self, f: F) -> TaskHandle<F::Output>
+impl serde::Serialize for ExecutorKindSys {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
- F: Future + Send + 'static,
- F::Output: Send + 'static,
+ S: serde::Serializer,
{
- match self {
- Executor::Handle(ex) => TaskHandle::Handle(ex.spawn(f)),
- Executor::Overlapped(ex) => TaskHandle::Handle(ex.spawn(f)),
- }
- }
-
- /// Spawn a thread-local task for this executor to drive to completion. Like `spawn` but without
- /// requiring `Send` on `F` or `F::Output`. This method should only be called from the same
- /// thread where `run()` or `run_until()` is called.
- ///
- /// # Panics
- ///
- /// `Executor::run` and `Executor::run_util` will panic if they try to poll a future that was
- /// added by calling `spawn_local` from a different thread.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_spawn_local() -> AsyncResult<()> {
- /// # use cros_async::Executor;
- ///
- /// # let ex = Executor::new()?;
- ///
- /// let task = ex.spawn_local(async { 7 + 13 });
- ///
- /// let result = ex.run_until(task)?;
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_spawn_local().unwrap();
- /// ```
- pub fn spawn_local<F>(&self, f: F) -> TaskHandle<F::Output>
- where
- F: Future + 'static,
- F::Output: 'static,
- {
- match self {
- Executor::Handle(ex) => TaskHandle::Handle(ex.spawn_local(f)),
- Executor::Overlapped(ex) => TaskHandle::Handle(ex.spawn_local(f)),
- }
- }
-
- /// Run the executor indefinitely, driving all spawned futures to completion. This method will
- /// block the current thread and only return in the case of an error.
- ///
- /// # Panics
- ///
- /// Once this method has been called on a thread, it may only be called on that thread from that
- /// point on. Attempting to call it from another thread will panic.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_run() -> AsyncResult<()> {
- /// use std::thread;
- ///
- /// use cros_async::Executor;
- /// use futures::executor::block_on;
- ///
- /// let ex = Executor::new()?;
- ///
- /// // Spawn a thread that runs the executor.
- /// let ex2 = ex.clone();
- /// thread::spawn(move || ex2.run());
- ///
- /// let task = ex.spawn(async { 7 + 13 });
- ///
- /// let result = block_on(task);
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_run().unwrap();
- /// ```
- pub fn run(&self) -> AsyncResult<()> {
- self.run_until(std::future::pending())
- }
-
- /// Drive all futures spawned in this executor until `f` completes. This method will block the
- /// current thread only until `f` is complete and there may still be unfinished futures in the
- /// executor.
- ///
- /// # Panics
- ///
- /// Once this method has been called on a thread, from then onwards it may only be called on
- /// that thread. Attempting to call it from another thread will panic.
- ///
- /// # Examples
- ///
- /// ```
- /// # use cros_async::AsyncResult;
- /// # fn example_run_until() -> AsyncResult<()> {
- /// use cros_async::Executor;
- ///
- /// let ex = Executor::new()?;
- ///
- /// let task = ex.spawn_local(async { 7 + 13 });
- ///
- /// let result = ex.run_until(task)?;
- /// assert_eq!(result, 20);
- /// # Ok(())
- /// # }
- ///
- /// # example_run_until().unwrap();
- /// ```
- pub fn run_until<F: Future>(&self, f: F) -> AsyncResult<F::Output> {
- match self {
- Executor::Handle(ex) => Ok(ex.run_until(f)?),
- Executor::Overlapped(ex) => Ok(ex.run_until(f)?),
- }
+ serializer.serialize_str(&match self {
+ ExecutorKindSys::Handle => "handle".to_string(),
+ ExecutorKindSys::Overlapped { concurrency: None } => "overlapped".to_string(),
+ ExecutorKindSys::Overlapped {
+ concurrency: Some(n),
+ } => format!("overlapped,concurrency={}", n),
+ })
}
}
diff --git a/cros_async/src/sys/windows/handle_executor.rs b/cros_async/src/sys/windows/handle_executor.rs
index 7bb00c4..c5c4fe5 100644
--- a/cros_async/src/sys/windows/handle_executor.rs
+++ b/cros_async/src/sys/windows/handle_executor.rs
@@ -26,7 +26,7 @@
use crate::common_executor;
use crate::common_executor::RawExecutor;
-use crate::sys::windows::executor::DEFAULT_IO_CONCURRENCY;
+use crate::common_executor::RawTaskHandle;
use crate::sys::windows::io_completion_port::CompletionPacket;
use crate::sys::windows::io_completion_port::IoCompletionPort;
use crate::waker::WakerToken;
@@ -34,6 +34,9 @@
use crate::AsyncError;
use crate::AsyncResult;
use crate::IoSource;
+use crate::TaskHandle;
+
+const DEFAULT_IO_CONCURRENCY: u32 = 1;
#[derive(Debug, ThisError)]
pub enum Error {
@@ -92,7 +95,7 @@
})
}
- fn new() -> Result<Self> {
+ pub fn new() -> Result<Self> {
Self::new_with(DEFAULT_IO_CONCURRENCY)
}
@@ -214,6 +217,10 @@
) -> AsyncResult<IoSource<F>> {
Ok(IoSource::Handle(super::HandleSource::new(f)?))
}
+
+ fn wrap_task_handle<R>(task: RawTaskHandle<HandleReactor, R>) -> TaskHandle<R> {
+ TaskHandle::Handle(task)
+ }
}
/// Represents a handle that has been registered for overlapped operations with a specific executor.
@@ -343,6 +350,7 @@
use futures::StreamExt;
use crate::BlockingPool;
+ use crate::ExecutorTrait;
#[test]
fn run_future() {
diff --git a/cros_async/src/sys/windows/handle_source.rs b/cros_async/src/sys/windows/handle_source.rs
index cc9f18e..f688567 100644
--- a/cros_async/src/sys/windows/handle_source.rs
+++ b/cros_async/src/sys/windows/handle_source.rs
@@ -400,6 +400,7 @@
use super::super::HandleReactor;
use super::*;
use crate::common_executor::RawExecutor;
+ use crate::ExecutorTrait;
#[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
#[test]
diff --git a/cros_async/src/sys/windows/overlapped_source.rs b/cros_async/src/sys/windows/overlapped_source.rs
index dc8ca9b..0650187 100644
--- a/cros_async/src/sys/windows/overlapped_source.rs
+++ b/cros_async/src/sys/windows/overlapped_source.rs
@@ -418,6 +418,7 @@
use super::*;
use crate::mem::VecIoWrapper;
+ use crate::ExecutorTrait;
fn tempfile_path() -> (PathBuf, TempDir) {
let dir = tempfile::TempDir::new().unwrap();
diff --git a/cros_async/src/sys/windows/tokio_source.rs b/cros_async/src/sys/windows/tokio_source.rs
new file mode 100644
index 0000000..676edb7
--- /dev/null
+++ b/cros_async/src/sys/windows/tokio_source.rs
@@ -0,0 +1,291 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::fs::File;
+use std::io;
+use std::io::Read;
+use std::io::Seek;
+use std::io::SeekFrom;
+use std::io::Write;
+use std::mem::ManuallyDrop;
+use std::sync::Arc;
+
+use base::AsRawDescriptor;
+use base::FileReadWriteAtVolatile;
+use base::FileReadWriteVolatile;
+use base::FromRawDescriptor;
+use base::PunchHole;
+use base::VolatileSlice;
+use base::WriteZeroesAt;
+use smallvec::SmallVec;
+use sync::Mutex;
+
+use crate::mem::MemRegion;
+use crate::AsyncError;
+use crate::AsyncResult;
+use crate::BackingMemory;
+
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ #[error("An error occurred trying to seek: {0}.")]
+ IoSeekError(io::Error),
+ #[error("An error occurred trying to read: {0}.")]
+ IoReadError(io::Error),
+ #[error("An error occurred trying to write: {0}.")]
+ IoWriteError(io::Error),
+ #[error("An error occurred trying to flush: {0}.")]
+ IoFlushError(io::Error),
+ #[error("An error occurred trying to punch hole: {0}.")]
+ IoPunchHoleError(io::Error),
+ #[error("An error occurred trying to write zeroes: {0}.")]
+ IoWriteZeroesError(io::Error),
+ #[error("Failed to join task: '{0}'")]
+ Join(tokio::task::JoinError),
+ #[error("An error occurred trying to duplicate source handles: {0}.")]
+ HandleDuplicationFailed(io::Error),
+ #[error("An error occurred trying to wait on source handles: {0}.")]
+ HandleWaitFailed(base::Error),
+ #[error("An error occurred trying to get a VolatileSlice into BackingMemory: {0}.")]
+ BackingMemoryVolatileSliceFetchFailed(crate::mem::Error),
+ #[error("TokioSource is gone, so no handles are available to fulfill the IO request.")]
+ NoTokioSource,
+ #[error("Operation on TokioSource is cancelled.")]
+ OperationCancelled,
+ #[error("Operation on TokioSource was aborted (unexpected).")]
+ OperationAborted,
+}
+
+impl From<Error> for AsyncError {
+ fn from(e: Error) -> AsyncError {
+ AsyncError::SysVariants(e.into())
+ }
+}
+
+impl From<Error> for io::Error {
+ fn from(e: Error) -> Self {
+ use Error::*;
+ match e {
+ IoSeekError(e) => e,
+ IoReadError(e) => e,
+ IoWriteError(e) => e,
+ IoFlushError(e) => e,
+ IoPunchHoleError(e) => e,
+ IoWriteZeroesError(e) => e,
+ Join(e) => io::Error::new(io::ErrorKind::Other, e),
+ HandleDuplicationFailed(e) => e,
+ HandleWaitFailed(e) => e.into(),
+ BackingMemoryVolatileSliceFetchFailed(e) => io::Error::new(io::ErrorKind::Other, e),
+ NoTokioSource => io::Error::new(io::ErrorKind::Other, NoTokioSource),
+ OperationCancelled => io::Error::new(io::ErrorKind::Interrupted, OperationCancelled),
+ OperationAborted => io::Error::new(io::ErrorKind::Interrupted, OperationAborted),
+ }
+ }
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
+
+pub struct TokioSource<T: AsRawDescriptor> {
+ source: Option<T>,
+ source_file: Arc<Mutex<Option<ManuallyDrop<File>>>>,
+ runtime: tokio::runtime::Handle,
+}
+
+impl<T: AsRawDescriptor> TokioSource<T> {
+ pub(crate) fn new(source: T, runtime: tokio::runtime::Handle) -> Result<TokioSource<T>> {
+ let descriptor = source.as_raw_descriptor();
+ // SAFETY: The Drop implementation makes sure `source` outlives `source_file`.
+ let source_file = unsafe { ManuallyDrop::new(File::from_raw_descriptor(descriptor)) };
+ Ok(Self {
+ source: Some(source),
+ source_file: Arc::new(Mutex::new(Some(source_file))),
+ runtime,
+ })
+ }
+ #[inline]
+ fn get_slices(
+ mem: &Arc<dyn BackingMemory + Send + Sync>,
+ mem_offsets: Vec<MemRegion>,
+ ) -> Result<SmallVec<[VolatileSlice<'_>; 16]>> {
+ mem_offsets
+ .into_iter()
+ .map(|region| {
+ mem.get_volatile_slice(region)
+ .map_err(Error::BackingMemoryVolatileSliceFetchFailed)
+ })
+ .collect::<Result<SmallVec<[VolatileSlice; 16]>>>()
+ }
+ pub fn as_source(&self) -> &T {
+ self.source.as_ref().unwrap()
+ }
+ pub fn as_source_mut(&mut self) -> &mut T {
+ self.source.as_mut().unwrap()
+ }
+ pub async fn fdatasync(&self) -> AsyncResult<()> {
+ // TODO(b/282003931): Fall back to regular fsync.
+ self.fsync().await
+ }
+ pub async fn fsync(&self) -> AsyncResult<()> {
+ let source_file = self.source_file.clone();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || {
+ source_file
+ .lock()
+ .as_mut()
+ .ok_or(Error::OperationCancelled)?
+ .flush()
+ .map_err(Error::IoFlushError)
+ })
+ .await
+ .map_err(Error::Join)??)
+ }
+ pub fn into_source(mut self) -> T {
+ self.source_file.lock().take();
+ self.source.take().unwrap()
+ }
+ pub async fn punch_hole(&self, file_offset: u64, len: u64) -> AsyncResult<()> {
+ let source_file = self.source_file.clone();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || {
+ source_file
+ .lock()
+ .as_mut()
+ .ok_or(Error::OperationCancelled)?
+ .punch_hole(file_offset, len)
+ .map_err(Error::IoPunchHoleError)
+ })
+ .await
+ .map_err(Error::Join)??)
+ }
+ pub async fn read_to_mem(
+ &self,
+ file_offset: Option<u64>,
+ mem: Arc<dyn BackingMemory + Send + Sync>,
+ mem_offsets: impl IntoIterator<Item = MemRegion>,
+ ) -> AsyncResult<usize> {
+ let mem_offsets = mem_offsets.into_iter().collect();
+ let source_file = self.source_file.clone();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || {
+ let mut file_lock = source_file.lock();
+ let file = file_lock.as_mut().ok_or(Error::OperationCancelled)?;
+ let memory_slices = Self::get_slices(&mem, mem_offsets)?;
+ match file_offset {
+ Some(file_offset) => file
+ .read_vectored_at_volatile(memory_slices.as_slice(), file_offset)
+ .map_err(Error::IoReadError),
+ None => file
+ .read_vectored_volatile(memory_slices.as_slice())
+ .map_err(Error::IoReadError),
+ }
+ })
+ .await
+ .map_err(Error::Join)??)
+ }
+ pub async fn read_to_vec(
+ &self,
+ file_offset: Option<u64>,
+ mut vec: Vec<u8>,
+ ) -> AsyncResult<(usize, Vec<u8>)> {
+ let source_file = self.source_file.clone();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || {
+ let mut file_lock = source_file.lock();
+ let file = file_lock.as_mut().ok_or(Error::OperationCancelled)?;
+ if let Some(file_offset) = file_offset {
+ file.seek(SeekFrom::Start(file_offset))
+ .map_err(Error::IoSeekError)?;
+ }
+ Ok::<(usize, Vec<u8>), Error>((
+ file.read(vec.as_mut_slice()).map_err(Error::IoReadError)?,
+ vec,
+ ))
+ })
+ .await
+ .map_err(Error::Join)??)
+ }
+ pub async fn wait_readable(&self) -> AsyncResult<()> {
+ unimplemented!();
+ }
+ pub async fn wait_for_handle(&self) -> AsyncResult<()> {
+ let waiter = super::wait_for_handle::WaitForHandle::new(self.source.as_ref().unwrap());
+ Ok(waiter.await?)
+ }
+ pub async fn write_from_mem(
+ &self,
+ file_offset: Option<u64>,
+ mem: Arc<dyn BackingMemory + Send + Sync>,
+ mem_offsets: impl IntoIterator<Item = MemRegion>,
+ ) -> AsyncResult<usize> {
+ let mem_offsets = mem_offsets.into_iter().collect();
+ let source_file = self.source_file.clone();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || {
+ let mut file_lock = source_file.lock();
+ let file = file_lock.as_mut().ok_or(Error::OperationCancelled)?;
+ let memory_slices = Self::get_slices(&mem, mem_offsets)?;
+ match file_offset {
+ Some(file_offset) => file
+ .write_vectored_at_volatile(memory_slices.as_slice(), file_offset)
+ .map_err(Error::IoWriteError),
+ None => file
+ .write_vectored_volatile(memory_slices.as_slice())
+ .map_err(Error::IoWriteError),
+ }
+ })
+ .await
+ .map_err(Error::Join)??)
+ }
+ pub async fn write_from_vec(
+ &self,
+ file_offset: Option<u64>,
+ vec: Vec<u8>,
+ ) -> AsyncResult<(usize, Vec<u8>)> {
+ let source_file = self.source_file.clone();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || {
+ let mut file_lock = source_file.lock();
+ let file = file_lock.as_mut().ok_or(Error::OperationCancelled)?;
+ if let Some(file_offset) = file_offset {
+ file.seek(SeekFrom::Start(file_offset))
+ .map_err(Error::IoSeekError)?;
+ }
+ Ok::<(usize, Vec<u8>), Error>((
+ file.write(vec.as_slice()).map_err(Error::IoWriteError)?,
+ vec,
+ ))
+ })
+ .await
+ .map_err(Error::Join)??)
+ }
+ pub async fn write_zeroes_at(&self, file_offset: u64, len: u64) -> AsyncResult<()> {
+ let source_file = self.source_file.clone();
+ Ok(self
+ .runtime
+ .spawn_blocking(move || {
+ // ZeroRange calls `punch_hole` which doesn't extend the File size if it needs to.
+ // Will fix if it becomes a problem.
+ source_file
+ .lock()
+ .as_mut()
+ .ok_or(Error::OperationCancelled)?
+ .write_zeroes_at(file_offset, len as usize)
+ .map_err(Error::IoWriteZeroesError)
+ .map(|_| ())
+ })
+ .await
+ .map_err(Error::Join)??)
+ }
+}
+impl<T: AsRawDescriptor> Drop for TokioSource<T> {
+ fn drop(&mut self) {
+ let mut source_file = self.source_file.lock();
+ source_file.take();
+ }
+}
diff --git a/cros_async/src/tokio_executor.rs b/cros_async/src/tokio_executor.rs
new file mode 100644
index 0000000..b84fcca
--- /dev/null
+++ b/cros_async/src/tokio_executor.rs
@@ -0,0 +1,170 @@
+// Copyright 2023 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::sync::OnceLock;
+
+use base::AsRawDescriptors;
+use base::RawDescriptor;
+use tokio::runtime::Runtime;
+use tokio::task::LocalSet;
+
+use crate::sys::platform::tokio_source::TokioSource;
+use crate::AsyncError;
+use crate::AsyncResult;
+use crate::ExecutorTrait;
+use crate::IntoAsync;
+use crate::IoSource;
+use crate::TaskHandle;
+
+mod send_wrapper {
+ use std::thread;
+
+ #[derive(Clone)]
+ pub(super) struct SendWrapper<T> {
+ instance: T,
+ thread_id: thread::ThreadId,
+ }
+
+ impl<T> SendWrapper<T> {
+ pub(super) fn new(instance: T) -> SendWrapper<T> {
+ SendWrapper {
+ instance,
+ thread_id: thread::current().id(),
+ }
+ }
+ }
+
+ // SAFETY: panics when the value is accessed on the wrong thread.
+ unsafe impl<T> Send for SendWrapper<T> {}
+ // SAFETY: panics when the value is accessed on the wrong thread.
+ unsafe impl<T> Sync for SendWrapper<T> {}
+
+ impl<T> Drop for SendWrapper<T> {
+ fn drop(&mut self) {
+ if self.thread_id != thread::current().id() {
+ panic!("SendWrapper value was dropped on the wrong thread");
+ }
+ }
+ }
+
+ impl<T> std::ops::Deref for SendWrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ if self.thread_id != thread::current().id() {
+ panic!("SendWrapper value was accessed on the wrong thread");
+ }
+ &self.instance
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct TokioExecutor {
+ runtime: Arc<Runtime>,
+ local_set: Arc<OnceLock<send_wrapper::SendWrapper<LocalSet>>>,
+}
+
+impl TokioExecutor {
+ pub fn new() -> AsyncResult<Self> {
+ Ok(TokioExecutor {
+ runtime: Arc::new(Runtime::new().map_err(AsyncError::Io)?),
+ local_set: Arc::new(OnceLock::new()),
+ })
+ }
+}
+
+impl ExecutorTrait for TokioExecutor {
+ fn async_from<'a, F: IntoAsync + 'a>(&self, f: F) -> AsyncResult<IoSource<F>> {
+ Ok(IoSource::Tokio(TokioSource::new(
+ f,
+ self.runtime.handle().clone(),
+ )?))
+ }
+
+ fn run_until<F: Future>(&self, f: F) -> AsyncResult<F::Output> {
+ let local_set = self
+ .local_set
+ .get_or_init(|| send_wrapper::SendWrapper::new(LocalSet::new()));
+ Ok(self
+ .runtime
+ .block_on(async { local_set.run_until(f).await }))
+ }
+
+ fn spawn<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ TaskHandle::Tokio(TokioTaskHandle {
+ join_handle: Some(self.runtime.spawn(f)),
+ })
+ }
+
+ fn spawn_blocking<F, R>(&self, f: F) -> TaskHandle<R>
+ where
+ F: FnOnce() -> R + Send + 'static,
+ R: Send + 'static,
+ {
+ TaskHandle::Tokio(TokioTaskHandle {
+ join_handle: Some(self.runtime.spawn_blocking(f)),
+ })
+ }
+
+ fn spawn_local<F>(&self, f: F) -> TaskHandle<F::Output>
+ where
+ F: Future + 'static,
+ F::Output: 'static,
+ {
+ let local_set = self
+ .local_set
+ .get_or_init(|| send_wrapper::SendWrapper::new(LocalSet::new()));
+ TaskHandle::Tokio(TokioTaskHandle {
+ join_handle: Some(local_set.spawn_local(f)),
+ })
+ }
+}
+
+impl AsRawDescriptors for TokioExecutor {
+ fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
+ todo!();
+ }
+}
+
+pub struct TokioTaskHandle<T> {
+ join_handle: Option<tokio::task::JoinHandle<T>>,
+}
+impl<R> TokioTaskHandle<R> {
+ pub async fn cancel(mut self) -> Option<R> {
+ match self.join_handle.take() {
+ Some(handle) => {
+ handle.abort();
+ handle.await.ok()
+ }
+ None => None,
+ }
+ }
+ pub fn detach(mut self) {
+ self.join_handle.take();
+ }
+}
+impl<R: 'static> Future for TokioTaskHandle<R> {
+ type Output = R;
+ fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context) -> std::task::Poll<Self::Output> {
+ let self_mut = self.get_mut();
+ Pin::new(self_mut.join_handle.as_mut().unwrap())
+ .poll(cx)
+ .map(|v| v.unwrap())
+ }
+}
+impl<T> std::ops::Drop for TokioTaskHandle<T> {
+ fn drop(&mut self) {
+ if let Some(handle) = self.join_handle.take() {
+ handle.abort()
+ }
+ }
+}
diff --git a/cros_async/tests/executor.rs b/cros_async/tests/executor.rs
index dd26976..84b009c 100644
--- a/cros_async/tests/executor.rs
+++ b/cros_async/tests/executor.rs
@@ -22,7 +22,7 @@
#[test]
fn cancel_pending_task() {
for kind in all_kinds() {
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let task = ex.spawn(std::future::pending::<()>());
assert_eq!(ex.run_until(task.cancel()).unwrap(), None);
}
@@ -34,7 +34,7 @@
#[test]
fn cancel_ready_task() {
for kind in all_kinds() {
- let ex = Executor::with_executor_kind(kind.into()).unwrap();
+ let ex = Executor::with_executor_kind(kind).unwrap();
let (s, r) = futures::channel::oneshot::channel();
let mut s = Some(s);
let task = ex.spawn(futures::future::poll_fn(move |_| {
diff --git a/cros_fdt/Android.bp b/cros_fdt/Android.bp
index 8e1ef9c..73c4d15 100644
--- a/cros_fdt/Android.bp
+++ b/cros_fdt/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -27,6 +28,7 @@
edition: "2021",
rustlibs: [
"libanyhow",
+ "libindexmap",
"libthiserror",
],
proc_macros: ["libremain"],
@@ -43,6 +45,7 @@
edition: "2021",
rustlibs: [
"libanyhow",
+ "libindexmap",
"libthiserror",
],
proc_macros: ["libremain"],
diff --git a/cros_fdt/Cargo.toml b/cros_fdt/Cargo.toml
index a66784f..836e6c8 100644
--- a/cros_fdt/Cargo.toml
+++ b/cros_fdt/Cargo.toml
@@ -6,5 +6,6 @@
[dependencies]
anyhow = "*"
+indexmap = "*"
remain = "*"
thiserror = "1.0.20"
diff --git a/cros_fdt/src/fdt.rs b/cros_fdt/src/fdt.rs
index c69968c..fba4c52 100644
--- a/cros_fdt/src/fdt.rs
+++ b/cros_fdt/src/fdt.rs
@@ -5,11 +5,12 @@
//! This module writes Flattened Devicetree blobs as defined here:
//! <https://devicetree-specification.readthedocs.io/en/stable/flattened-format.html>
-use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::convert::TryInto;
use std::io;
+use indexmap::map::Entry;
+use indexmap::IndexMap;
use remain::sorted;
use thiserror::Error as ThisError;
@@ -322,8 +323,8 @@
pub struct FdtNode {
/// Node name
pub(crate) name: String,
- pub(crate) props: BTreeMap<String, Vec<u8>>,
- pub(crate) subnodes: BTreeMap<String, FdtNode>,
+ pub(crate) props: IndexMap<String, Vec<u8>>,
+ pub(crate) subnodes: IndexMap<String, FdtNode>,
}
impl FdtNode {
@@ -331,8 +332,8 @@
// node or property names do not satisfy devicetree naming criteria.
pub(crate) fn new(
name: String,
- props: BTreeMap<String, Vec<u8>>,
- subnodes: BTreeMap<String, FdtNode>,
+ props: IndexMap<String, Vec<u8>>,
+ subnodes: IndexMap<String, FdtNode>,
) -> Result<Self> {
if !is_valid_node_name(&name) {
return Err(Error::InvalidName(name));
@@ -372,8 +373,8 @@
consume(input, name_nbytes + align_pad_len(name_nbytes, SIZE_U32))?;
// Node properties and subnodes
- let mut props = BTreeMap::new();
- let mut subnodes = BTreeMap::new();
+ let mut props = IndexMap::new();
+ let mut subnodes = IndexMap::new();
let mut encountered_subnode = false; // Properties must appear before subnodes
loop {
@@ -1174,24 +1175,6 @@
}
#[test]
- fn fdt_iter_nodes() {
- let mut root = FdtNode::empty("").unwrap();
- let node_a = root.subnode_mut("A").unwrap();
- node_a.subnode_mut("B").unwrap();
- node_a.subnode_mut("A").unwrap();
-
- let mut root_subnodes = root.iter_subnodes();
- let node_a = root_subnodes.next().unwrap();
- assert_eq!(node_a.name, "A");
- assert!(root_subnodes.next().is_none());
-
- let mut node_a_subnodes = node_a.iter_subnodes();
- assert_eq!(node_a_subnodes.next().unwrap().name, "A");
- assert_eq!(node_a_subnodes.next().unwrap().name, "B");
- assert!(node_a_subnodes.next().is_none());
- }
-
- #[test]
fn fdt_get_node() {
let fdt = Fdt::new(&[]);
assert!(fdt.get_node("/").is_some());
@@ -1440,18 +1423,87 @@
}
#[test]
- fn prop_order() {
- let expected_bytes: &[u8] = &[
+ fn node_order() {
+ let expected: &[u8] = &[
0xd0, 0x0d, 0xfe, 0xed, // 0000: magic (0xd00dfeed)
- 0x00, 0x00, 0x00, 0x84, // 0004: totalsize (0x84)
+ 0x00, 0x00, 0x00, 0x9C, // 0004: totalsize (0x9C)
0x00, 0x00, 0x00, 0x38, // 0008: off_dt_struct (0x38)
- 0x00, 0x00, 0x00, 0x78, // 000C: off_dt_strings (0x78)
+ 0x00, 0x00, 0x00, 0x9C, // 000C: off_dt_strings (0x9C)
0x00, 0x00, 0x00, 0x28, // 0010: off_mem_rsvmap (0x28)
0x00, 0x00, 0x00, 0x11, // 0014: version (0x11 = 17)
0x00, 0x00, 0x00, 0x10, // 0018: last_comp_version (0x10 = 16)
0x00, 0x00, 0x00, 0x00, // 001C: boot_cpuid_phys (0)
- 0x00, 0x00, 0x00, 0x0C, // 0020: size_dt_strings (0x0C)
- 0x00, 0x00, 0x00, 0x40, // 0024: size_dt_struct (0x40)
+ 0x00, 0x00, 0x00, 0x00, // 0020: size_dt_strings (0x00)
+ 0x00, 0x00, 0x00, 0x64, // 0024: size_dt_struct (0x64)
+ 0x00, 0x00, 0x00, 0x00, // 0028: rsvmap terminator (address = 0 high)
+ 0x00, 0x00, 0x00, 0x00, // 002C: rsvmap terminator (address = 0 low)
+ 0x00, 0x00, 0x00, 0x00, // 0030: rsvmap terminator (size = 0 high)
+ 0x00, 0x00, 0x00, 0x00, // 0034: rsvmap terminator (size = 0 low)
+ 0x00, 0x00, 0x00, 0x01, // 0038: FDT_BEGIN_NODE
+ 0x00, 0x00, 0x00, 0x00, // 003C: node name ("") + padding
+ 0x00, 0x00, 0x00, 0x01, // 0040: FDT_BEGIN_NODE
+ b'B', 0x00, 0x00, 0x00, // 0044: node name ("B") + padding
+ 0x00, 0x00, 0x00, 0x02, // 0048: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x01, // 004C: FDT_BEGIN_NODE
+ b'A', 0x00, 0x00, 0x00, // 0050: node name ("A") + padding
+ 0x00, 0x00, 0x00, 0x02, // 0054: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x01, // 0058: FDT_BEGIN_NODE
+ b'C', 0x00, 0x00, 0x00, // 005C: node name ("C") + padding
+ 0x00, 0x00, 0x00, 0x01, // 0060: FDT_BEGIN_NODE
+ b'D', 0x00, 0x00, 0x00, // 0064: node name ("D") + padding
+ 0x00, 0x00, 0x00, 0x02, // 0068: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x01, // 006C: FDT_BEGIN_NODE
+ b'E', 0x00, 0x00, 0x00, // 0070: node name ("E") + padding
+ 0x00, 0x00, 0x00, 0x02, // 0074: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x01, // 0078: FDT_BEGIN_NODE
+ b'B', 0x00, 0x00, 0x00, // 007C: node name ("B") + padding
+ 0x00, 0x00, 0x00, 0x02, // 0080: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x01, // 0084: FDT_BEGIN_NODE
+ b'F', 0x00, 0x00, 0x00, // 0088: node name ("F") + padding
+ 0x00, 0x00, 0x00, 0x02, // 008C: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x02, // 0090: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x02, // 0094: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x09, // 0098: FDT_END
+ ];
+
+ let mut fdt = Fdt::new(&[]);
+ let root = fdt.root_mut();
+ let root_subnode_names = ["B", "A", "C"];
+ let node_c_subnode_names = ["D", "E", "B", "F"];
+ for n in root_subnode_names {
+ root.subnode_mut(n).unwrap();
+ }
+ let node_c = root.subnode_mut("C").unwrap();
+ for n in node_c_subnode_names {
+ node_c.subnode_mut(n).unwrap();
+ }
+
+ assert!(root
+ .iter_subnodes()
+ .zip(root_subnode_names)
+ .all(|(sn, n)| sn.name == n));
+ assert!(root
+ .subnode("C")
+ .unwrap()
+ .iter_subnodes()
+ .zip(node_c_subnode_names)
+ .all(|(sn, n)| sn.name == n));
+ assert_eq!(fdt.finish().unwrap(), expected);
+ }
+
+ #[test]
+ fn prop_order() {
+ let expected: &[u8] = &[
+ 0xd0, 0x0d, 0xfe, 0xed, // 0000: magic (0xd00dfeed)
+ 0x00, 0x00, 0x00, 0x98, // 0004: totalsize (0x98)
+ 0x00, 0x00, 0x00, 0x38, // 0008: off_dt_struct (0x38)
+ 0x00, 0x00, 0x00, 0x88, // 000C: off_dt_strings (0x88)
+ 0x00, 0x00, 0x00, 0x28, // 0010: off_mem_rsvmap (0x28)
+ 0x00, 0x00, 0x00, 0x11, // 0014: version (0x11 = 17)
+ 0x00, 0x00, 0x00, 0x10, // 0018: last_comp_version (0x10 = 16)
+ 0x00, 0x00, 0x00, 0x00, // 001C: boot_cpuid_phys (0)
+ 0x00, 0x00, 0x00, 0x10, // 0020: size_dt_strings (0x10)
+ 0x00, 0x00, 0x00, 0x50, // 0024: size_dt_struct (0x50)
0x00, 0x00, 0x00, 0x00, // 0028: rsvmap terminator (address = 0 high)
0x00, 0x00, 0x00, 0x00, // 002C: rsvmap terminator (address = 0 low)
0x00, 0x00, 0x00, 0x00, // 0030: rsvmap terminator (size = 0 high)
@@ -1461,35 +1513,39 @@
0x00, 0x00, 0x00, 0x03, // 0040: FDT_PROP (u32)
0x00, 0x00, 0x00, 0x04, // 0044: prop len (4)
0x00, 0x00, 0x00, 0x00, // 0048: prop nameoff (0x00)
- 0x00, 0x00, 0x00, 0x01, // 004C: prop u32 value (0x1)
+ 0x76, 0x61, 0x6c, 0x00, // 004C: prop string value ("val")
0x00, 0x00, 0x00, 0x03, // 0050: FDT_PROP (u32)
0x00, 0x00, 0x00, 0x04, // 0054: prop len (4)
0x00, 0x00, 0x00, 0x04, // 0058: prop nameoff (0x04)
0x00, 0x00, 0x00, 0x02, // 005C: prop u32 high (0x2)
0x00, 0x00, 0x00, 0x03, // 0060: FDT_PROP (u32)
0x00, 0x00, 0x00, 0x04, // 0064: prop len (4)
- 0x00, 0x00, 0x00, 0x08, // 0068: prop nameoff (0x22)
- 0x76, 0x61, 0x6c, 0x00, // 006C: prop string value ("val")
- 0x00, 0x00, 0x00, 0x02, // 0070: FDT_END_NODE
- 0x00, 0x00, 0x00, 0x09, // 0074: FDT_END
- b'a', b'b', b'c', 0x00, // 0078: strings + 0x00: "abc"
- b'd', b'e', b'f', 0x00, // 007C: strings + 0x04: "def"
- b'g', b'h', b'i', 0x00, // 0080: strings + 0x08: "ghi"
+ 0x00, 0x00, 0x00, 0x08, // 0068: prop nameoff (0x08)
+ 0x00, 0x00, 0x00, 0x01, // 006C: prop u32 value (0x1)
+ 0x00, 0x00, 0x00, 0x03, // 0070: FDT_PROP (u32)
+ 0x00, 0x00, 0x00, 0x04, // 0074: prop len (4)
+ 0x00, 0x00, 0x00, 0x0C, // 0078: prop nameoff (0x0B)
+ 0x00, 0x00, 0x00, 0x03, // 007C: prop u32 value (0x3)
+ 0x00, 0x00, 0x00, 0x02, // 0080: FDT_END_NODE
+ 0x00, 0x00, 0x00, 0x09, // 0084: FDT_END
+ b'g', b'h', b'i', 0x00, // 0088: strings + 0x00: "ghi"
+ b'd', b'e', b'f', 0x00, // 008C: strings + 0x04: "def"
+ b'a', b'b', b'c', 0x00, // 0090: strings + 0x08: "abc"
+ b'b', b'c', b'd', 0x00, // 0094: strings + 0x0C: "bcd"
];
let mut fdt = Fdt::new(&[]);
let root_node = fdt.root_mut();
- root_node.set_prop("abc", 1u32).unwrap();
- root_node.set_prop("def", 2u32).unwrap();
root_node.set_prop("ghi", "val").unwrap();
- assert_eq!(fdt.finish().unwrap(), expected_bytes);
+ root_node.set_prop("def", 2u32).unwrap();
+ root_node.set_prop("abc", 1u32).unwrap();
+ root_node.set_prop("bcd", 3u32).unwrap();
- let mut fdt = Fdt::new(&[]);
- let root_node = fdt.root_mut();
- root_node.set_prop("ghi", "val").unwrap();
- root_node.set_prop("def", 2u32).unwrap();
- root_node.set_prop("abc", 1u32).unwrap();
- assert_eq!(fdt.finish().unwrap(), expected_bytes);
+ assert_eq!(
+ root_node.prop_names().collect::<Vec<_>>(),
+ ["ghi", "def", "abc", "bcd"]
+ );
+ assert_eq!(fdt.finish().unwrap(), expected);
}
#[test]
diff --git a/cros_fdt/test-files/base.dtb b/cros_fdt/test-files/base.dtb
index c0884ed..84736d2 100644
--- a/cros_fdt/test-files/base.dtb
+++ b/cros_fdt/test-files/base.dtb
Binary files differ
diff --git a/cros_fdt/test-files/base.dts b/cros_fdt/test-files/base.dts
index abb90ce..6b081a4 100644
--- a/cros_fdt/test-files/base.dts
+++ b/cros_fdt/test-files/base.dts
@@ -17,14 +17,14 @@
cpu@0 {
device_type = "cpu";
- compatible = "arm,arm-v8";
+ compatible = "arm,armv8";
enable-method = "psci";
reg = <0x00>;
};
cpu@1 {
device_type = "cpu";
- compatible = "arm,arm-v8";
+ compatible = "arm,armv8";
enable-method = "psci";
reg = <0x01>;
};
diff --git a/cros_tracing/Android.bp b/cros_tracing/Android.bp
index ed18c8d..b132421 100644
--- a/cros_tracing/Android.bp
+++ b/cros_tracing/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/cros_tracing_types/Android.bp b/cros_tracing_types/Android.bp
index ca29fd7..fb9a25b 100644
--- a/cros_tracing_types/Android.bp
+++ b/cros_tracing_types/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/crosvm_cli/Android.bp b/crosvm_cli/Android.bp
index 3ce8499..ba35698 100644
--- a/crosvm_cli/Android.bp
+++ b/crosvm_cli/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/crosvm_control/Android.bp b/crosvm_control/Android.bp
index bf87a30..abed499 100644
--- a/crosvm_control/Android.bp
+++ b/crosvm_control/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/crosvm_control/src/lib.rs b/crosvm_control/src/lib.rs
index a122bb7..11368ec 100644
--- a/crosvm_control/src/lib.rs
+++ b/crosvm_control/src/lib.rs
@@ -438,9 +438,13 @@
///
/// # Safety
///
-/// Function is unsafe due to raw pointer usage - a null pointer could be passed in. Usage of
-/// !raw_pointer.is_null() checks should prevent unsafe behavior but the caller should ensure no
-/// null pointers are passed.
+/// Function is unsafe due to raw pointer usage.
+/// Trivial !raw_pointer.is_null() checks prevent some unsafe behavior, but the caller should
+/// ensure no null pointers are passed into the function.
+///
+/// The safety requirements for `socket_path` and `dev_path` are the same as the ones from
+/// `CStr::from_ptr()`. `out_port` should be a non-null pointer that points to a writable 1byte
+/// region.
#[no_mangle]
pub unsafe extern "C" fn crosvm_client_usb_attach(
socket_path: *const c_char,
@@ -475,6 +479,63 @@
.unwrap_or(false)
}
+/// Attaches a u2f security key to crosvm instance whose control socket is listening on
+/// `socket_path`.
+///
+/// The function returns the amount of entries written.
+/// # Arguments
+///
+/// * `socket_path` - Path to the crosvm control socket
+/// * `hidraw_path` - Path to the hidraw device of the security key (like `/dev/hidraw0`)
+/// * `out_port` - (optional) internal port will be written here if provided.
+///
+/// The function returns true on success or false if an error occurred.
+///
+/// # Safety
+///
+/// Function is unsafe due to raw pointer usage.
+/// Trivial !raw_pointer.is_null() checks prevent some unsafe behavior, but the caller should
+/// ensure no null pointers are passed into the function.
+///
+/// The safety requirements for `socket_path` and `hidraw_path` are the same as the ones from
+/// `CStr::from_ptr()`. `out_port` should be a non-null pointer that points to a writable 1byte
+/// region.
+#[no_mangle]
+pub unsafe extern "C" fn crosvm_client_security_key_attach(
+ socket_path: *const c_char,
+ hidraw_path: *const c_char,
+ out_port: *mut u8,
+) -> bool {
+ catch_unwind(|| {
+ if let Some(socket_path) = validate_socket_path(socket_path) {
+ if hidraw_path.is_null() {
+ return false;
+ }
+ let hidraw_path = Path::new(
+ // SAFETY: just checked that `hidraw_path` is not null.
+ unsafe { CStr::from_ptr(hidraw_path) }
+ .to_str()
+ .unwrap_or(""),
+ );
+
+ if let Ok(UsbControlResult::Ok { port }) =
+ do_security_key_attach(socket_path, hidraw_path)
+ {
+ if !out_port.is_null() {
+ // SAFETY: trivially safe
+ unsafe { *out_port = port };
+ }
+ true
+ } else {
+ false
+ }
+ } else {
+ false
+ }
+ })
+ .unwrap_or(false)
+}
+
/// Detaches an USB device from crosvm instance whose control socket is listening on `socket_path`.
/// `port` determines device to be detached.
///
diff --git a/devices/Android.bp b/devices/Android.bp
index de3f88c..efed696 100644
--- a/devices/Android.bp
+++ b/devices/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -26,6 +27,8 @@
},
edition: "2021",
features: [
+ "android_display",
+ "android_display_stub",
"audio",
"balloon",
"geniezone",
@@ -65,6 +68,7 @@
"liblibc",
"liblinux_input_sys",
"libmemoffset",
+ "libmetrics",
"libminijail_rust",
"libnamed_lock",
"libnet_sys",
@@ -115,6 +119,8 @@
},
edition: "2021",
features: [
+ "android_display",
+ "android_display_stub",
"audio",
"balloon",
"geniezone",
@@ -155,6 +161,7 @@
"liblibc",
"liblinux_input_sys",
"libmemoffset",
+ "libmetrics",
"libminijail_rust",
"libnamed_lock",
"libnet_sys",
@@ -200,6 +207,8 @@
srcs: ["src/lib.rs"],
edition: "2021",
features: [
+ "android_display",
+ "android_display_stub",
"audio",
"balloon",
"geniezone",
@@ -237,6 +246,7 @@
"liblibc",
"liblinux_input_sys",
"libmemoffset",
+ "libmetrics",
"libminijail_rust",
"libnet_sys",
"libnet_util",
diff --git a/devices/Cargo.toml b/devices/Cargo.toml
index 8e066a9..de3b798 100644
--- a/devices/Cargo.toml
+++ b/devices/Cargo.toml
@@ -5,6 +5,8 @@
edition = "2021"
[features]
+android_display = ["gpu_display/android_display"]
+android_display_stub = ["gpu_display/android_display_stub"]
arc_quota = ["dbus", "protobuf", "system_api"]
audio = []
audio_cras = ["libcras"]
@@ -13,6 +15,7 @@
gunyah = []
libvda-stub = ["libvda/libvda-stub"]
net = []
+pvclock = []
geniezone = []
usb = []
vaapi = ["cros-codecs/vaapi", "crc32fast"]
@@ -44,7 +47,7 @@
base = { path = "../base" }
bit_field = { path = "../bit_field" }
cfg-if = "1.0.0"
-chrono = { version = "0.4.19", features = [ "serde", "clock" ], default-features = false }
+chrono = { version = "0.4.34", features = [ "serde", "now" ], default-features = false }
crc32fast = { version = "1.2.1", optional = true }
cros_async = { path = "../cros_async" }
cros-codecs = { version = "0.0.4", optional = true }
@@ -63,6 +66,7 @@
libvda = { path = "../media/libvda", optional = true }
linux_input_sys = { path = "../linux_input_sys" }
memoffset = { version = "0.6" }
+metrics = { path = "../metrics" }
net_util = { path = "../net_util" }
num-traits = "0.2"
once_cell = "1.7.2"
@@ -72,7 +76,7 @@
rand = "0.8"
remain = "*"
resources = { path = "../resources" }
-serde = { version = "1", features = [ "derive" ] }
+serde = { version = "1", features = [ "derive", "rc" ] }
serde_json = "1"
serde_keyvalue = { path = "../serde_keyvalue", features = ["argh_derive"] }
smallvec = "1.6.1"
@@ -99,7 +103,6 @@
[target.'cfg(windows)'.dependencies]
broker_ipc = { path = "../broker_ipc" }
-metrics = { path = "../metrics" }
tube_transporter = { path = "../tube_transporter" }
win_audio = { path = "../win_audio"}
win_util = { path = "../win_util"}
diff --git a/devices/src/acpi.rs b/devices/src/acpi.rs
index e4657dc..7244e02 100644
--- a/devices/src/acpi.rs
+++ b/devices/src/acpi.rs
@@ -7,6 +7,7 @@
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
+use std::time::Instant;
use acpi_tables::aml;
use acpi_tables::aml::Aml;
@@ -23,6 +24,8 @@
use base::VmEventType;
use base::WaitContext;
use base::WorkerThread;
+use metrics::log_metric;
+use metrics::MetricEventType;
use serde::Deserialize;
use serde::Serialize;
use sync::Mutex;
@@ -663,38 +666,50 @@
active: AtomicBool,
vm_control_tube: Arc<Mutex<Tube>>,
pm_config: Arc<Mutex<PmConfig>>,
- debug_label: String,
+ metrics_event: MetricEventType,
+ armed_time: Arc<Mutex<Instant>>,
}
impl PmWakeupEvent {
pub fn new(
vm_control_tube: Arc<Mutex<Tube>>,
pm_config: Arc<Mutex<PmConfig>>,
- debug_label: String,
+ metrics_event: MetricEventType,
) -> Self {
Self {
active: AtomicBool::new(false),
vm_control_tube,
pm_config,
- debug_label,
+ metrics_event,
+ // Not actually armed, but simpler than wrapping with an Option.
+ armed_time: Arc::new(Mutex::new(Instant::now())),
}
}
pub fn trigger_wakeup(&self) -> anyhow::Result<()> {
if self.active.load(Ordering::SeqCst) && self.pm_config.lock().should_trigger_pme() {
+ let elapsed = self.armed_time.lock().elapsed().as_millis();
+ log_metric(
+ self.metrics_event.clone(),
+ elapsed.try_into().unwrap_or(i64::MAX),
+ );
+
let tube = self.vm_control_tube.lock();
tube.send(&VmRequest::Gpe(PM_WAKEUP_GPIO))
- .with_context(|| format!("{} failed to send pme", self.debug_label))?;
+ .with_context(|| format!("{:?} failed to send pme", self.metrics_event))?;
match tube.recv::<VmResponse>() {
Ok(VmResponse::Ok) => (),
- e => bail!("{} pme failure {:?}", self.debug_label, e),
+ e => bail!("{:?} pme failure {:?}", self.metrics_event, e),
}
}
Ok(())
}
pub fn set_active(&self, active: bool) {
- self.active.store(active, Ordering::SeqCst)
+ self.active.store(active, Ordering::SeqCst);
+ if active {
+ *self.armed_time.lock() = Instant::now();
+ }
}
}
diff --git a/devices/src/cmos.rs b/devices/src/cmos.rs
index 9c1db3b..0f99c16 100644
--- a/devices/src/cmos.rs
+++ b/devices/src/cmos.rs
@@ -5,6 +5,7 @@
use std::cmp::min;
use std::sync::Arc;
use std::time::Duration;
+use std::time::Instant;
use anyhow::anyhow;
use anyhow::Context;
@@ -24,6 +25,8 @@
use chrono::TimeZone;
use chrono::Timelike;
use chrono::Utc;
+use metrics::log_metric;
+use metrics::MetricEventType;
use serde::Deserialize;
use serde::Serialize;
use sync::Mutex;
@@ -86,17 +89,35 @@
alarm_fn: Option<AlarmFn>,
#[serde(skip_serializing)] // skip serializing the worker thread
worker: Option<WorkerThread<AlarmFn>>,
+ #[serde(skip_serializing)] // skip serializing the armed time
+ armed_time: Option<Arc<Mutex<Instant>>>,
}
struct AlarmFn {
irq: IrqEdgeEvent,
vm_control: Tube,
+ armed_time: Arc<Mutex<Instant>>,
}
impl AlarmFn {
+ fn new(irq: IrqEdgeEvent, vm_control: Tube) -> Self {
+ Self {
+ irq,
+ vm_control,
+ // Not actually armed, but simpler than wrapping with an Option.
+ armed_time: Arc::new(Mutex::new(Instant::now())),
+ }
+ }
+
fn fire(&self) -> anyhow::Result<()> {
self.irq.trigger().context("failed to trigger irq")?;
+ let elapsed = self.armed_time.lock().elapsed().as_millis();
+ log_metric(
+ MetricEventType::RtcWakeup,
+ elapsed.try_into().unwrap_or(i64::MAX),
+ );
+
// The Linux kernel expects wakeups to come via ACPI when ACPI is enabled. There's
// no real way to determine that here, so just send this unconditionally.
self.vm_control
@@ -125,7 +146,7 @@
mem_below_4g,
mem_above_4g,
now_fn,
- Some(AlarmFn { irq, vm_control }),
+ Some(AlarmFn::new(irq, vm_control)),
)
}
@@ -153,6 +174,7 @@
data[0x5c] = (high_mem >> 8) as u8;
data[0x5d] = (high_mem >> 16) as u8;
+ let armed_time = alarm_fn.as_ref().map(|a| a.armed_time.clone());
Ok(Cmos {
index: 0,
data,
@@ -161,6 +183,7 @@
alarm_time: None,
alarm_fn,
worker: None,
+ armed_time,
})
}
@@ -202,6 +225,9 @@
if self.alarm_fn.is_some() {
self.spawn_worker();
}
+ if let Some(armed_time) = self.armed_time.as_ref() {
+ *armed_time.lock() = Instant::now();
+ }
let duration = target
.signed_duration_since(now)
@@ -262,12 +288,10 @@
}
fn alarm_from_registers(year: i32, data: &[u8; DATA_LEN]) -> Option<DateTime<Utc>> {
- Utc.ymd_opt(
+ Utc.with_ymd_and_hms(
year,
from_bcd(data[RTC_REG_ALARM_MONTH as usize])?,
from_bcd(data[RTC_REG_ALARM_DAY as usize])?,
- )
- .and_hms_opt(
from_bcd(data[RTC_REG_ALARM_HOUR as usize])?,
from_bcd(data[RTC_REG_ALARM_MIN as usize])?,
from_bcd(data[RTC_REG_ALARM_SEC as usize])?,
@@ -418,8 +442,6 @@
#[cfg(test)]
mod tests {
- use chrono::NaiveDateTime;
-
use super::*;
use crate::suspendable_tests;
@@ -473,7 +495,7 @@
}
fn timestamp_to_datetime(timestamp: i64) -> DateTime<Utc> {
- Utc.from_utc_datetime(&NaiveDateTime::from_timestamp_opt(timestamp, 0).unwrap())
+ DateTime::from_timestamp(timestamp, 0).unwrap()
}
fn test_now_party_like_its_1999() -> DateTime<Utc> {
@@ -694,10 +716,7 @@
fn cmos_sleep_wake() {
// 2000-01-02T03:04:05+00:00
let now_fn = || timestamp_to_datetime(946782245);
- let alarm_fn = AlarmFn {
- irq: IrqEdgeEvent::new().unwrap(),
- vm_control: Tube::pair().unwrap().0,
- };
+ let alarm_fn = AlarmFn::new(IrqEdgeEvent::new().unwrap(), Tube::pair().unwrap().0);
let mut cmos = Cmos::new_inner(1024, 0, now_fn, Some(alarm_fn)).unwrap();
// A date later this year
diff --git a/devices/src/lib.rs b/devices/src/lib.rs
index a134a95..f6dfab0 100644
--- a/devices/src/lib.rs
+++ b/devices/src/lib.rs
@@ -252,6 +252,9 @@
}
}
+// Use 64MB chunks when writing the memory snapshot (if encryption is used).
+const MEMORY_SNAP_ENCRYPTED_CHUNK_SIZE_BYTES: usize = 1024 * 1024 * 64;
+
async fn snapshot_handler(
snapshot_writer: vm_control::SnapshotWriter,
guest_memory: &GuestMemory,
@@ -262,7 +265,11 @@
// VM & devices are stopped.
let guest_memory_metadata = unsafe {
guest_memory
- .snapshot(&mut snapshot_writer.raw_fragment("mem")?, compress_memory)
+ .snapshot(
+ &mut snapshot_writer
+ .raw_fragment_with_chunk_size("mem", MEMORY_SNAP_ENCRYPTED_CHUNK_SIZE_BYTES)?,
+ compress_memory,
+ )
.context("failed to snapshot memory")?
};
snapshot_writer.write_fragment("mem_metadata", &guest_memory_metadata)?;
diff --git a/devices/src/pci/pci_configuration.rs b/devices/src/pci/pci_configuration.rs
index 3559082..e7a1b91 100644
--- a/devices/src/pci/pci_configuration.rs
+++ b/devices/src/pci/pci_configuration.rs
@@ -833,7 +833,12 @@
.write_obj_volatile(value, reg_offset)
.expect("bad register offset");
}
- mmio_mapping.flush_uncached_guest_mapping(reg_offset)
+ if let Err(err) = mmio_mapping.flush_region(reg_offset, 4) {
+ error!(
+ "failed to flush write to pci mmio register ({}): {}",
+ reg_idx, err
+ );
+ }
}
}
@@ -1017,7 +1022,12 @@
mapping
.write_obj_volatile(new_val, offset)
.expect("memcpy failed");
- mapping.flush_uncached_guest_mapping(offset);
+ if let Err(err) = mapping.flush_region(offset, 4) {
+ error!(
+ "failed to flush write to pci cap in mmio register ({}): {}",
+ reg_idx, err
+ );
+ }
}
}
diff --git a/devices/src/pci/pci_root.rs b/devices/src/pci/pci_root.rs
index a029bae..b6d8d03 100644
--- a/devices/src/pci/pci_root.rs
+++ b/devices/src/pci/pci_root.rs
@@ -579,7 +579,9 @@
mapping
.write_obj_volatile(val, reg_offset)
.expect("memcpy failed");
- mapping.flush_uncached_guest_mapping(reg_offset);
+ if let Err(err) = mapping.flush_region(reg_offset, 4) {
+ error!("failed to flush write to mfd bit: {}", err);
+ }
}
}
}
diff --git a/devices/src/serial_device.rs b/devices/src/serial_device.rs
index 20454b5..dd47711 100644
--- a/devices/src/serial_device.rs
+++ b/devices/src/serial_device.rs
@@ -188,6 +188,7 @@
let evt = evt.try_clone().map_err(Error::CloneEvent)?;
keep_rds.push(evt.as_raw_descriptor());
cros_tracing::push_descriptors!(keep_rds);
+ metrics::push_descriptors(keep_rds);
let input: Option<Box<dyn SerialInput>> = if let Some(input_path) = &self.input {
let input_path = input_path.as_path();
diff --git a/devices/src/usb/backend/device.rs b/devices/src/usb/backend/device.rs
index a7086e0..c1231a4 100644
--- a/devices/src/usb/backend/device.rs
+++ b/devices/src/usb/backend/device.rs
@@ -1,4 +1,4 @@
-// Copyright 2023 The ChromiumOS Authors
+// Copyright 2024 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -29,6 +29,8 @@
use crate::usb::backend::endpoint::UsbEndpoint;
use crate::usb::backend::error::Error;
use crate::usb::backend::error::Result;
+use crate::usb::backend::fido_backend::fido_passthrough::FidoPassthroughDevice;
+use crate::usb::backend::fido_backend::transfer::FidoTransfer;
use crate::usb::backend::host_backend::host_device::HostDevice;
use crate::usb::backend::transfer::BackendTransfer;
use crate::usb::backend::transfer::BackendTransferHandle;
@@ -47,14 +49,19 @@
use crate::utils::EventLoop;
use crate::utils::FailHandle;
+/// This enum defines different USB backend implementations that we support. Each implementation
+/// needs to implement the `BackendDevice` trait as we dispatch on the enum based on the type.
+/// Each concrete implementation can take care of setting up the device-specific configurations.
pub enum BackendDeviceType {
// Real device on the host, backed by usbdevfs
HostDevice(HostDevice),
+ // Virtual security key implementation
+ FidoDevice(FidoPassthroughDevice),
}
impl AsRawDescriptor for BackendDeviceType {
fn as_raw_descriptor(&self) -> RawDescriptor {
- multi_dispatch!(self, BackendDeviceType, HostDevice, as_raw_descriptor)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, as_raw_descriptor)
}
}
@@ -66,7 +73,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
submit_backend_transfer,
transfer
)
@@ -76,7 +83,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
detach_event_handler,
event_loop
)
@@ -86,7 +93,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
request_transfer_buffer,
size
)
@@ -101,7 +108,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
build_bulk_transfer,
ep_addr,
transfer_buffer,
@@ -117,7 +124,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
build_interrupt_transfer,
ep_addr,
transfer_buffer
@@ -128,20 +135,20 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
get_control_transfer_state
)
}
fn get_device_state(&mut self) -> Arc<RwLock<DeviceState>> {
- multi_dispatch!(self, BackendDeviceType, HostDevice, get_device_state)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, get_device_state)
}
fn get_active_config_descriptor(&mut self) -> Result<ConfigDescriptorTree> {
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
get_active_config_descriptor
)
}
@@ -150,7 +157,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
get_config_descriptor,
config
)
@@ -160,17 +167,17 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
get_config_descriptor_by_index,
config_index
)
}
- fn get_device_descriptor_tree(&mut self) -> DeviceDescriptorTree {
+ fn get_device_descriptor_tree(&mut self) -> Result<DeviceDescriptorTree> {
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
get_device_descriptor_tree
)
}
@@ -179,7 +186,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
get_active_configuration
)
}
@@ -188,7 +195,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
set_active_configuration,
config
)
@@ -198,7 +205,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
clear_feature,
value,
index
@@ -209,7 +216,7 @@
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
create_endpoints,
config_descriptor
)
@@ -218,34 +225,34 @@
impl XhciBackendDevice for BackendDeviceType {
fn get_backend_type(&self) -> BackendType {
- multi_dispatch!(self, BackendDeviceType, HostDevice, get_backend_type)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, get_backend_type)
}
fn get_vid(&self) -> u16 {
- multi_dispatch!(self, BackendDeviceType, HostDevice, get_vid)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, get_vid)
}
fn get_pid(&self) -> u16 {
- multi_dispatch!(self, BackendDeviceType, HostDevice, get_pid)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, get_pid)
}
fn set_address(&mut self, address: UsbDeviceAddress) {
- multi_dispatch!(self, BackendDeviceType, HostDevice, set_address, address)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, set_address, address)
}
fn reset(&mut self) -> Result<()> {
- multi_dispatch!(self, BackendDeviceType, HostDevice, reset)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, reset)
}
fn get_speed(&self) -> Option<DeviceSpeed> {
- multi_dispatch!(self, BackendDeviceType, HostDevice, get_speed)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, get_speed)
}
fn alloc_streams(&self, ep: u8, num_streams: u16) -> Result<()> {
multi_dispatch!(
self,
BackendDeviceType,
- HostDevice,
+ HostDevice FidoDevice,
alloc_streams,
ep,
num_streams
@@ -253,11 +260,11 @@
}
fn free_streams(&self, ep: u8) -> Result<()> {
- multi_dispatch!(self, BackendDeviceType, HostDevice, free_streams, ep)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, free_streams, ep)
}
fn stop(&mut self) {
- multi_dispatch!(self, BackendDeviceType, HostDevice, stop)
+ multi_dispatch!(self, BackendDeviceType, HostDevice FidoDevice, stop)
}
}
@@ -281,16 +288,6 @@
}
}
-impl Drop for BackendDeviceType {
- fn drop(&mut self) {
- match self {
- BackendDeviceType::HostDevice(host_device) => {
- host_device.release_interfaces();
- }
- }
- }
-}
-
impl BackendDeviceType {
// Check for requests that should be intercepted and handled in a generic way
// rather than passed directly to the backend device for device-specific implementations.
@@ -343,10 +340,6 @@
ControlRequestDataPhaseTransferDirection::HostToDevice,
) => {
usb_trace!("handling set interface");
- // Right now we only have one backend device type so the match statement is a bit
- // dry but as we add new backend types the compiler will make sure to warn we need
- // to take care of them in this match statement. Non-host backend device might not
- // need to set interfaces so we'll have to add a catch-all skip case.
match self {
BackendDeviceType::HostDevice(host_device) => match host_device.set_interface(
control_request_setup.index as u8,
@@ -358,6 +351,10 @@
(TransferStatus::Stalled, 0)
}
},
+ _ => {
+ // Nothing to do for non-host devices
+ (TransferStatus::Completed, 0)
+ }
}
}
(
@@ -401,6 +398,26 @@
}
}
}
+ BackendDeviceType::FidoDevice(fido_passthrough) => {
+ match fido_passthrough
+ .get_config_descriptor_by_index(control_request_setup.value as u8)
+ {
+ Ok(descriptor_tree) => {
+ let device_descriptor =
+ fido_passthrough.get_device_descriptor_tree()?;
+ let offset = descriptor_tree.offset();
+ let data = device_descriptor.raw()
+ [offset..offset + descriptor_tree.wTotalLength as usize]
+ .to_vec();
+ let bytes = buffer.write(&data).map_err(Error::WriteBuffer)?;
+ (TransferStatus::Completed, bytes as u32)
+ }
+ Err(e) => {
+ error!("get fido descriptor error: {}", e);
+ (TransferStatus::Stalled, 0)
+ }
+ }
+ }
}
} else {
return Ok(false);
@@ -453,12 +470,16 @@
buffer
};
- // TODO(morg): this match can be abstracted once we have more backends
+ // TODO(morg): Refactor this code so it doesn't need to match on each implementation type
let mut control_transfer = match self {
BackendDeviceType::HostDevice(_) => BackendTransferType::HostDevice(
Transfer::new_control(TransferBuffer::Vector(control_buffer))
.map_err(Error::CreateTransfer)?,
),
+ BackendDeviceType::FidoDevice(_) => BackendTransferType::FidoDevice(FidoTransfer::new(
+ 0,
+ TransferBuffer::Vector(control_buffer),
+ )),
};
let tmp_transfer = xhci_transfer.clone();
@@ -612,11 +633,9 @@
// It's a standard, set_config, device request.
usb_trace!("set_config: {}", config);
- match self {
- BackendDeviceType::HostDevice(host_device) => {
- host_device.release_interfaces();
- }
- };
+ if let BackendDeviceType::HostDevice(host_device) = self {
+ host_device.release_interfaces();
+ }
let cur_config = match self.get_active_configuration() {
Ok(c) => Some(c),
@@ -645,11 +664,9 @@
let config_descriptor = self.get_config_descriptor(config)?;
- match self {
- BackendDeviceType::HostDevice(host_device) => {
- host_device.claim_interfaces(&config_descriptor);
- }
- };
+ if let BackendDeviceType::HostDevice(host_device) = self {
+ host_device.claim_interfaces(&config_descriptor);
+ }
self.create_endpoints(&config_descriptor)?;
Ok(TransferStatus::Completed)
@@ -781,7 +798,7 @@
/// Gets a specific device config descriptor tree by index.
fn get_config_descriptor_by_index(&mut self, config_index: u8) -> Result<ConfigDescriptorTree>;
/// Gets the device descriptor tree.
- fn get_device_descriptor_tree(&mut self) -> DeviceDescriptorTree;
+ fn get_device_descriptor_tree(&mut self) -> Result<DeviceDescriptorTree>;
/// Gets the device current active configuration.
fn get_active_configuration(&mut self) -> Result<u8>;
/// Sets the device active configuration.
diff --git a/devices/src/usb/backend/device_provider.rs b/devices/src/usb/backend/device_provider.rs
index e7319c9..ce1b6e7 100644
--- a/devices/src/usb/backend/device_provider.rs
+++ b/devices/src/usb/backend/device_provider.rs
@@ -24,6 +24,7 @@
use crate::usb::backend::device::DeviceState;
use crate::usb::backend::error::Error;
use crate::usb::backend::error::Result;
+use crate::usb::backend::fido_backend::fido_provider::attach_security_key;
use crate::usb::backend::host_backend::host_backend_device_provider::attach_host_backend_device;
use crate::usb::xhci::usb_hub::UsbHub;
use crate::usb::xhci::xhci_backend_device::XhciBackendDevice;
@@ -231,6 +232,56 @@
}
}
+ fn handle_attach_security_key(&self, hidraw: File) -> UsbControlResult {
+ let (fido_device, event_handler) = match attach_security_key(
+ hidraw,
+ self.event_loop.clone(),
+ DeviceState::new(self.fail_handle.clone(), self.job_queue.clone()),
+ ) {
+ Ok((fido_device, event_handler)) => (fido_device, event_handler),
+ Err(e) => {
+ error!(
+ "could not create a virtual fido device from the given file: {}",
+ e
+ );
+ return UsbControlResult::NoSuchDevice;
+ }
+ };
+
+ if let Err(e) = self.event_loop.add_event(
+ &*fido_device.lock(),
+ EventType::Read,
+ Arc::downgrade(&event_handler),
+ ) {
+ error!("failed to add fido device to event handler: {}", e);
+ return UsbControlResult::FailedToOpenDevice;
+ }
+
+ let device_ctx = DeviceContext {
+ event_handler,
+ device: fido_device.clone(),
+ };
+
+ // Reset the device to make sure it's in a usable state.
+ // Resetting it also stops polling on the FD, since we only poll when there is an active
+ // transaction.
+ if let Err(e) = fido_device.lock().reset() {
+ error!("failed to reset fido device after attach: {:?}", e);
+ }
+
+ let port = self.usb_hub.connect_backend(fido_device);
+ match port {
+ Ok(port) => {
+ self.devices.lock().insert(port, device_ctx);
+ UsbControlResult::Ok { port }
+ }
+ Err(e) => {
+ error!("failed to connect device to hub: {}", e);
+ UsbControlResult::NoAvailablePort
+ }
+ }
+ }
+
fn handle_list_devices(&self, ports: [u8; USB_CONTROL_MAX_PORTS]) -> UsbControlResult {
let mut devices: [UsbControlAttachedDevice; USB_CONTROL_MAX_PORTS] = Default::default();
for (result_index, &port_id) in ports.iter().enumerate() {
@@ -258,6 +309,7 @@
let cmd = tube.recv().map_err(Error::ReadControlTube)?;
let result = match cmd {
UsbControlCommand::AttachDevice { file } => self.handle_attach_device(file),
+ UsbControlCommand::AttachSecurityKey { file } => self.handle_attach_security_key(file),
UsbControlCommand::DetachDevice { port } => self.handle_detach_device(port),
UsbControlCommand::ListDevice { ports } => self.handle_list_devices(ports),
};
diff --git a/devices/src/usb/backend/error.rs b/devices/src/usb/backend/error.rs
index 3e35da4..becb66d 100644
--- a/devices/src/usb/backend/error.rs
+++ b/devices/src/usb/backend/error.rs
@@ -7,6 +7,7 @@
use thiserror::Error;
use usb_util::Error as UsbUtilError;
+use crate::usb::backend::fido_backend::error::Error as FidoError;
use crate::usb::xhci::scatter_gather_buffer::Error as BufferError;
use crate::usb::xhci::xhci_transfer::Error as XhciTransferError;
use crate::utils::Error as UtilsError;
@@ -30,6 +31,8 @@
CreateBuffer(XhciTransferError),
#[error("failed to create control tube: {0}")]
CreateControlTube(TubeError),
+ #[error("failed to create fido backend device: {0}")]
+ CreateFidoBackendDevice(FidoError),
#[error("failed to create host backend usb device: {0}")]
CreateHostUsbDevice(UsbUtilError),
#[error("failed to create libusb context: {0}")]
@@ -85,6 +88,8 @@
TransferComplete(XhciTransferError),
#[error("failed to cancel transfer: {0}")]
TransferHandle(UsbUtilError),
+ #[error("transfer has already completed when being cancelled")]
+ TransferHandleAlreadyComplete,
#[error("failed to write buffer: {0}")]
WriteBuffer(BufferError),
#[error("failed to write control tube: {0}")]
diff --git a/devices/src/usb/backend/fido_backend/constants.rs b/devices/src/usb/backend/fido_backend/constants.rs
new file mode 100644
index 0000000..89d704c
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/constants.rs
@@ -0,0 +1,150 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use usb_util::DescriptorType;
+
+// How long it takes for the security key to become inactive and time out all previously pending
+// transactions since last activity.
+pub const TRANSACTION_TIMEOUT_MILLIS: u64 = 120_000;
+
+// How long to wait before timing out and canceling a USB transfer from the guest if the host
+// security key is unresponsive.
+pub const USB_TRANSFER_TIMEOUT_MILLIS: u64 = 5_000;
+
+// 5ms is the default USB interrupt polling rate according to specs.
+pub const USB_POLL_RATE_MILLIS: u64 = 5;
+
+// Some applications expect a very short RTT when handling packets between host key and guest, half
+// a millisecond seems like a decent compromise.
+pub const PACKET_POLL_RATE_NANOS: u64 = 50_000;
+
+// Total max number of transactions we can hold in our key. Any more transactions will push older
+// transactions away from the stack.
+pub const MAX_TRANSACTIONS: usize = 4;
+
+// Max number of incoming packets still to be processed by the guest
+pub const U2FHID_MAX_IN_PENDING: usize = 32;
+
+pub const U2FHID_PACKET_SIZE: usize = 64;
+pub const PACKET_INIT_HEADER_SIZE: usize = 7;
+pub const PACKET_CONT_HEADER_SIZE: usize = 5;
+pub const PACKET_INIT_DATA_SIZE: usize = U2FHID_PACKET_SIZE - PACKET_INIT_HEADER_SIZE;
+pub const PACKET_CONT_DATA_SIZE: usize = U2FHID_PACKET_SIZE - PACKET_CONT_HEADER_SIZE;
+pub const BROADCAST_CID: u32 = 0xFFFFFFFF;
+
+pub const NONCE_SIZE: usize = 8;
+pub const EMPTY_NONCE: [u8; NONCE_SIZE] = [0u8; NONCE_SIZE];
+
+// It's a valid init packet only if the 7th bit of the cmd field is set
+pub const PACKET_INIT_VALID_CMD: u8 = 0b1000_0000;
+pub const U2FHID_ERROR_CMD: u8 = 0xBF;
+
+pub const U2FHID_CONTROL_ENDPOINT: u8 = 0x00;
+pub const U2FHID_IN_ENDPOINT: u8 = 0x81;
+pub const U2FHID_OUT_ENDPOINT: u8 = 0x01;
+
+// Generic HID commands
+pub const HID_GET_IDLE: u8 = 0x02;
+pub const HID_SET_IDLE: u8 = 0x0A;
+pub const HID_GET_REPORT_DESC: u8 = 0x22;
+
+pub const HID_MAX_DESCRIPTOR_SIZE: usize = 4096;
+
+// Descriptor data taken from: https://github.com/gl-sergei/u2f-token/blob/master/src/usb-hid.c
+// With minor modifications for our own PID and VID and other strings
+pub const U2FHID_DEVICE_DESC: &[u8] = &[
+ 18,
+ DescriptorType::Device as u8,
+ 0x10,
+ 0x01,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x40,
+ // Google Vendor ID
+ 0xd1,
+ 0x18,
+ // Unique Product ID
+ 0xd0,
+ 0xf1,
+ 0x00,
+ 0x01,
+ 0,
+ 0,
+ 0,
+ 1,
+];
+
+pub const HID_REPORT_DESC_HEADER: &[u8] = &[
+ 0x06, 0xd0, 0xf1, // Usage Page (FIDO)
+ 0x09, 0x01, // Usage (FIDO)
+];
+
+pub const U2FHID_CONFIG_DESC: &[u8] = &[
+ 9,
+ DescriptorType::Configuration as u8,
+ /* Configuration Descriptor. */
+ 41,
+ 0x00, /* wTotalLength. */
+ 0x01, /* bNumInterfaces. */
+ 0x01, /* bConfigurationValue. */
+ 0, /* iConfiguration. */
+ 0x80, /* bmAttributes. */
+ 15, /* bMaxPower (100mA). */
+ /* Interface Descriptor. */
+ 9, /* bLength: Interface Descriptor size */
+ DescriptorType::Interface as u8,
+ 0, /* bInterfaceNumber: Number of Interface */
+ 0x00, /* bAlternateSetting: Alternate setting */
+ 0x02, /* bNumEndpoints: Two endpoints used */
+ 0x03, /* bInterfaceClass: HID */
+ 0x00, /* bInterfaceSubClass: no boot */
+ 0x00, /* bInterfaceProtocol: 0=none */
+ 0x00, /* iInterface */
+ /* HID Descriptor. */
+ 9, /* bLength: HID Descriptor size */
+ 0x21, /* bDescriptorType: HID */
+ 0x10,
+ 0x01, /* bcdHID: HID Class Spec release number */
+ 0x00, /* bCountryCode: Hardware target country */
+ 0x01, /* bNumDescriptors: Number of HID class descriptors to follow */
+ 0x22, /* bDescriptorType */
+ 0x22,
+ 0, /* wItemLength: Total length of Report descriptor */
+ /* Endpoint IN1 Descriptor */
+ 7, /* bLength: Endpoint Descriptor size */
+ DescriptorType::Endpoint as u8,
+ 0x81, /* bEndpointAddress: (IN1) */
+ 0x03, /* bmAttributes: Interrupt */
+ 0x40,
+ 0x00, /* wMaxPacketSize: 64 */
+ 0x05, /* bInterval (5ms) */
+ /* Endpoint OUT1 Descriptor */
+ 7, /* bLength: Endpoint Descriptor size */
+ DescriptorType::Endpoint as u8,
+ 0x01, /* bEndpointAddress: (OUT1) */
+ 0x03, /* bmAttributes: Interrupt */
+ 0x40,
+ 0x00, /* wMaxPacketSize: 64 */
+ 0x05, /* bInterval (5ms) */
+];
+
+pub const HID_REPORT_DESC: &[u8] = &[
+ 0x06, 0xd0, 0xf1, /* USAGE_PAGE (FIDO Alliance) */
+ 0x09, 0x01, /* USAGE (Keyboard) */
+ 0xa1, 0x01, /* COLLECTION (Application) */
+ 0x09, 0x20, /* USAGE (Input report data) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x26, 0xff, 0x00, /* LOGICAL_MAXIMUM (255) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x40, /* REPORT_COUNT (64) */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs); Modifier byte */
+ 0x09, 0x21, /* USAGE (Output report data) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x26, 0xff, 0x00, /* LOGICAL_MAXIMUM (255) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x40, /* REPORT_COUNT (64) */
+ 0x91, 0x02, /* OUTPUT (Data,Var,Abs); Modifier byte */
+ 0xc0, /* END_COLLECTION */
+];
diff --git a/devices/src/usb/backend/fido_backend/error.rs b/devices/src/usb/backend/fido_backend/error.rs
new file mode 100644
index 0000000..5e3423e
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/error.rs
@@ -0,0 +1,51 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::io::Error as IOError;
+
+use remain::sorted;
+use thiserror::Error;
+
+use crate::utils::Error as UtilsError;
+
+#[sorted]
+#[derive(Error, Debug)]
+pub enum Error {
+ #[error("Failed to arm {name} timer: {error:#}")]
+ CannotArmPollTimer { name: String, error: base::Error },
+ #[error("Failed to clear {name} timer: {error:#}")]
+ CannotClearPollTimer { name: String, error: base::Error },
+ #[error("Cannot convert the u2f init packet from bytes")]
+ CannotConvertInitPacketFromBytes,
+ #[error("Cannot create the poll timer")]
+ CannotCreatePollTimer(base::Error),
+ #[error("Cannot extract cid value from packet bytes")]
+ CannotExtractCidFromBytes,
+ #[error("Pending fido transfer reference has been lost.")]
+ FidoTransferLost,
+ #[error("The fido device is in an inconsistent state")]
+ InconsistentFidoDeviceState,
+ #[error("Invalid data buffer size")]
+ InvalidDataBufferSize,
+ #[error("The given hidraw device is not a security key")]
+ InvalidHidrawDevice,
+ #[error("The u2f init packet is invalid")]
+ InvalidInitPacket,
+ #[error("The u2f init packet contains invalid data size for the nonce")]
+ InvalidNonceSize,
+ #[error("Pending packet queue is full and cannot process more host packets")]
+ PendingInQueueFull,
+ #[error("Failed to read packet from hidraw device")]
+ ReadHidrawDevice(IOError),
+ #[error("Cannot start fido device queue")]
+ StartAsyncFidoQueue(UtilsError),
+ #[error("Unsupported TransferBuffer type")]
+ UnsupportedTransferBufferType,
+ #[error("Failed to wait context on poll thread")]
+ WaitContextFailed(anyhow::Error),
+ #[error("Failed to write to hidraw device")]
+ WriteHidrawDevice(IOError),
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
diff --git a/devices/src/usb/backend/fido_backend/fido_device.rs b/devices/src/usb/backend/fido_backend/fido_device.rs
new file mode 100644
index 0000000..43e1a1d
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/fido_device.rs
@@ -0,0 +1,315 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::fs::File;
+use std::io::Error as IOError;
+use std::io::ErrorKind;
+use std::io::Write;
+use std::sync::Arc;
+
+use base::debug;
+use base::error;
+use base::warn;
+use base::AsRawDescriptor;
+use base::EventType;
+use base::RawDescriptor;
+use sync::Mutex;
+use zerocopy::FromBytes;
+use zerocopy::FromZeroes;
+
+use crate::usb::backend::fido_backend::constants;
+use crate::usb::backend::fido_backend::error::Error;
+use crate::usb::backend::fido_backend::error::Result;
+use crate::usb::backend::fido_backend::fido_guest::FidoGuestKey;
+use crate::usb::backend::fido_backend::fido_transaction::TransactionManager;
+use crate::usb::backend::fido_backend::hid_utils::verify_is_fido_device;
+use crate::usb::backend::fido_backend::poll_thread::PollTimer;
+use crate::utils::EventLoop;
+
+#[derive(FromZeroes, FromBytes, Debug)]
+#[repr(C)]
+pub struct InitPacket {
+ cid: u32,
+ cmd: u8,
+ bcnth: u8,
+ bcntl: u8,
+ data: [u8; constants::PACKET_INIT_DATA_SIZE],
+}
+
+impl InitPacket {
+ pub fn extract_cid(bytes: [u8; constants::U2FHID_PACKET_SIZE]) -> Result<u32> {
+ // cid is the first 4 bytes so we don't need to worry about anything else in the bytes
+ // buffer, we can just read from prefix.
+ FromBytes::read_from_prefix(&bytes[..]).ok_or_else(|| Error::CannotExtractCidFromBytes)
+ }
+
+ fn is_valid(bytes: [u8; constants::U2FHID_PACKET_SIZE]) -> bool {
+ (bytes[4] & constants::PACKET_INIT_VALID_CMD) != 0
+ }
+
+ pub fn from_bytes(bytes: [u8; constants::U2FHID_PACKET_SIZE]) -> Result<InitPacket> {
+ if !InitPacket::is_valid(bytes) {
+ return Err(Error::InvalidInitPacket);
+ }
+
+ InitPacket::read_from(&bytes[..]).ok_or_else(|| Error::CannotConvertInitPacketFromBytes)
+ }
+
+ pub fn bcnt(&self) -> u16 {
+ (self.bcnth as u16) << 8 | (self.bcntl as u16)
+ }
+}
+
+/// A virtual representation of a FidoDevice emulated on the Host.
+pub struct FidoDevice {
+ /// Guest representation of the virtual security key device
+ pub guest_key: Arc<Mutex<FidoGuestKey>>,
+ /// The `TransactionManager` which handles starting and stopping u2f transactions
+ pub transaction_manager: Arc<Mutex<TransactionManager>>,
+ /// Marks whether the current device is active in a transaction. If it is not active, the fd
+ /// polling event loop does not handle the device fd monitoring.
+ pub is_active: bool,
+ /// Marks whether the device has been lost. In case the FD stops being responsive we signal
+ /// that the device is lost and any further transaction will return a failure.
+ pub is_device_lost: bool,
+ /// Backend provider event loop to attach/detach the monitored fd.
+ event_loop: Arc<EventLoop>,
+ /// Timer to poll for active USB transfers
+ pub transfer_timer: PollTimer,
+ /// fd of the actual hidraw device
+ pub fd: Arc<Mutex<File>>,
+}
+
+impl AsRawDescriptor for FidoDevice {
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.fd.lock().as_raw_descriptor()
+ }
+}
+
+impl FidoDevice {
+ pub fn new(hidraw: File, event_loop: Arc<EventLoop>) -> Result<FidoDevice> {
+ verify_is_fido_device(&hidraw)?;
+ let timer = PollTimer::new(
+ "USB transfer timer".to_string(),
+ std::time::Duration::from_millis(constants::USB_POLL_RATE_MILLIS),
+ )?;
+ Ok(FidoDevice {
+ guest_key: Arc::new(Mutex::new(FidoGuestKey::new()?)),
+ transaction_manager: Arc::new(Mutex::new(TransactionManager::new()?)),
+ is_active: false,
+ is_device_lost: false,
+ event_loop,
+ transfer_timer: timer,
+ fd: Arc::new(Mutex::new(hidraw)),
+ })
+ }
+
+ /// Sets the device active state. If the device becomes active, it toggles polling on the file
+ /// descriptor for the host hid device. If the devices becomes inactive, it stops polling.
+ /// In case of error, it's not possible to recover so we just log the warning and continue.
+ pub fn set_active(&mut self, active: bool) {
+ if self.is_active && !active {
+ if let Err(e) = self.event_loop.pause_event_for_descriptor(self) {
+ error!("Could not deactivate polling of host device: {}", e);
+ }
+ } else if !self.is_active && active {
+ if let Err(e) = self
+ .event_loop
+ .resume_event_for_descriptor(self, EventType::Read)
+ {
+ error!(
+ "Could not resume polling of host device, transactions will be lost: {}",
+ e
+ );
+ }
+ }
+
+ self.is_active = active;
+ }
+
+ /// Starts a new transaction from a given init packet.
+ pub fn start_transaction(&mut self, packet: &InitPacket) -> Result<()> {
+ let nonce = if packet.cid == constants::BROADCAST_CID {
+ packet.data[..constants::NONCE_SIZE]
+ .try_into()
+ .map_err(|_| Error::InvalidNonceSize)?
+ } else {
+ constants::EMPTY_NONCE
+ };
+
+ // Start a transaction and the expiration timer if necessary
+ if self
+ .transaction_manager
+ .lock()
+ .start_transaction(packet.cid, nonce)
+ {
+ // Enable the timer that polls for transactions to expire
+ self.transaction_manager.lock().transaction_timer.arm()?;
+ }
+
+ // Transition the low level device to active for a response from the host
+ self.set_active(true);
+ Ok(())
+ }
+
+ /// Receives a low-level request from the host device. It means we read data from the actual
+ /// key on the host.
+ pub fn recv_from_host(&mut self, packet: [u8; constants::U2FHID_PACKET_SIZE]) -> Result<()> {
+ let cid = InitPacket::extract_cid(packet)?;
+ let transaction_opt = if cid == constants::BROADCAST_CID {
+ match InitPacket::from_bytes(packet) {
+ Ok(packet) => {
+ // This is a special case, in case of an error message we return to the
+ // latest broadcast transaction without nonce checking.
+ if packet.cmd == constants::U2FHID_ERROR_CMD {
+ self.transaction_manager.lock().get_transaction(cid)
+ // Otherwise we verify that the nonce matches the right transaction.
+ } else {
+ let nonce = packet.data[..constants::NONCE_SIZE]
+ .try_into()
+ .map_err(|_| Error::InvalidNonceSize)?;
+ self.transaction_manager
+ .lock()
+ .get_transaction_from_nonce(nonce)
+ }
+ }
+ _ => {
+ // Drop init transaction with bad init packet
+ return Ok(());
+ }
+ }
+ } else {
+ self.transaction_manager.lock().get_transaction(cid)
+ };
+
+ let transaction = match transaction_opt {
+ Some(t) => t,
+ None => {
+ debug!("Ignoring non-started transaction");
+ return Ok(());
+ }
+ };
+
+ match InitPacket::from_bytes(packet) {
+ Ok(packet) => {
+ if packet.cid == constants::BROADCAST_CID {
+ let nonce = &packet.data[..constants::NONCE_SIZE];
+ if transaction.nonce != nonce {
+ // In case of an error command we can let it through, otherwise we drop the
+ // response.
+ if packet.cmd != constants::U2FHID_ERROR_CMD {
+ warn!(
+ "u2f: received a broadcast transaction with mismatched nonce.\
+ Ignoring transaction."
+ );
+ return Ok(());
+ }
+ }
+ }
+ self.transaction_manager.lock().update_transaction(
+ cid,
+ packet.bcnt(),
+ constants::PACKET_INIT_DATA_SIZE as u16,
+ );
+ }
+ // It's not an init packet, it means it's a continuation packet
+ Err(Error::InvalidInitPacket) => {
+ self.transaction_manager.lock().update_transaction(
+ cid,
+ transaction.resp_bcnt,
+ transaction.resp_size + constants::PACKET_CONT_DATA_SIZE as u16,
+ );
+ }
+ Err(e) => {
+ error!(
+ "u2f: received an invalid transaction state: {:?}. Ignoring transaction.",
+ e
+ );
+ return Ok(());
+ }
+ }
+
+ // Fetch the transaction again to check if we are done processing it or if we should wait
+ // for more continuation packets.
+ let transaction = match self.transaction_manager.lock().get_transaction(cid) {
+ Some(t) => t,
+ None => {
+ error!(
+ "We lost a transaction on the way. This is a bug. (cid: {})",
+ cid
+ );
+ return Ok(());
+ }
+ };
+ // Check for the end of the transaction
+ if transaction.resp_size >= transaction.resp_bcnt {
+ if self
+ .transaction_manager
+ .lock()
+ .close_transaction(transaction.cid)
+ {
+ // Resets the device as inactive, since we're not waiting for more data to come
+ // from the host.
+ self.set_active(false);
+ }
+ }
+
+ let mut guest_key = self.guest_key.lock();
+ if guest_key.pending_in_packets.is_empty() {
+ // We start polling waiting to send the data back to the guest.
+ if let Err(e) = guest_key.timer.arm() {
+ error!(
+ "Unable to start U2F guest key timer. U2F packets may be lost. {}",
+ e
+ );
+ }
+ }
+ guest_key.pending_in_packets.push_back(packet);
+
+ Ok(())
+ }
+
+ /// Receives a request from the guest device to write into the actual device on the host.
+ pub fn recv_from_guest(
+ &mut self,
+ packet: [u8; constants::U2FHID_PACKET_SIZE],
+ ) -> Result<usize> {
+ // The first byte in the host packet request is the HID report request ID as required by
+ // the Linux kernel. The real request data starts from the second byte, so we need to
+ // allocate one extra byte in our write buffer.
+ // See: https://docs.kernel.org/hid/hidraw.html#write
+ let mut host_packet = vec![0; constants::U2FHID_PACKET_SIZE + 1];
+
+ match InitPacket::from_bytes(packet) {
+ Ok(init_packet) => {
+ self.start_transaction(&init_packet)?;
+ }
+ Err(Error::InvalidInitPacket) => {
+ // It's not an init packet, so we don't start a transaction.
+ }
+ Err(e) => {
+ warn!("Received malformed or invalid u2f-hid init packet, request will be dropped");
+ return Err(e);
+ }
+ }
+
+ host_packet[1..].copy_from_slice(&packet);
+
+ let written = self
+ .fd
+ .lock()
+ .write(&host_packet)
+ .map_err(Error::WriteHidrawDevice)?;
+
+ if written != host_packet.len() {
+ return Err(Error::WriteHidrawDevice(IOError::new(
+ ErrorKind::Other,
+ "Wrote too few bytes to hidraw device.",
+ )));
+ }
+
+ // we subtract 1 because we added 1 extra byte to the host packet
+ Ok(host_packet.len() - 1)
+ }
+}
diff --git a/devices/src/usb/backend/fido_backend/fido_guest.rs b/devices/src/usb/backend/fido_backend/fido_guest.rs
new file mode 100644
index 0000000..5d927a7
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/fido_guest.rs
@@ -0,0 +1,132 @@
+// Copyright 2023 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::collections::VecDeque;
+
+use base::error;
+use usb_util::TransferBuffer;
+
+use crate::usb::backend::fido_backend::constants;
+use crate::usb::backend::fido_backend::error::Error;
+use crate::usb::backend::fido_backend::error::Result;
+use crate::usb::backend::fido_backend::poll_thread::PollTimer;
+use crate::usb::backend::fido_backend::transfer::FidoTransfer;
+
+/// `FidoGuestKey` is the struct representation of a virtual fido device as seen by the guest VM.
+/// It takes care of bubbling up transactions from the host into the guest and show a
+/// representation of the device's state into the guest.
+pub struct FidoGuestKey {
+ /// Queue of packets already processed by the host that need to be sent to the guest.
+ pub pending_in_packets: VecDeque<[u8; constants::U2FHID_PACKET_SIZE]>,
+ /// HID Idle state of the security key.
+ pub idle: u8,
+ /// Timer used to poll to periodically send packets to pending USB transfers.
+ pub timer: PollTimer,
+}
+
+impl FidoGuestKey {
+ pub fn new() -> Result<Self> {
+ let timer = PollTimer::new(
+ "guest packet timer".to_string(),
+ std::time::Duration::from_nanos(constants::PACKET_POLL_RATE_NANOS),
+ )?;
+ Ok(FidoGuestKey {
+ pending_in_packets: VecDeque::with_capacity(constants::U2FHID_MAX_IN_PENDING),
+ idle: 1,
+ timer,
+ })
+ }
+
+ /// Resets the guest key representation, stopping the poll and clearing the packet queue.
+ pub fn reset(&mut self) {
+ self.pending_in_packets.clear();
+ if let Err(e) = self.timer.clear() {
+ error!("Unable to clear guest key timer, silently failing. {}", e);
+ }
+ }
+
+ /// Sends data to the guest by associating a given transfer to the oldest packet in the queue.
+ /// If the data from the host hasn't been read yet (the packet queue is empty), it returns the
+ /// same transfer back to the caller, unmodified.
+ pub fn return_data_to_guest(
+ &mut self,
+ transfer_opt: Option<FidoTransfer>,
+ ) -> Result<Option<FidoTransfer>> {
+ // If this happens, it means we passed around an empty reference to a
+ // non existing transfer that was already cancelled and removed.
+ let mut transfer = transfer_opt.ok_or(Error::FidoTransferLost)?;
+ match self.pending_in_packets.pop_front() {
+ Some(packet) => {
+ transfer.buffer = TransferBuffer::Vector(packet.to_vec());
+ transfer.actual_length = packet.len();
+ transfer.complete_transfer();
+ Ok(None)
+ }
+ None => {
+ // Pending queue is empty, nothing to do so we return the original transfer without
+ // consuming it.
+ Ok(Some(transfer))
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use std::sync::Arc;
+
+ use sync::Mutex;
+ use usb_util::TransferBuffer;
+ use usb_util::TransferStatus;
+
+ use crate::usb::backend::fido_backend::constants::U2FHID_PACKET_SIZE;
+ use crate::usb::backend::fido_backend::fido_guest::FidoGuestKey;
+ use crate::usb::backend::fido_backend::transfer::FidoTransfer;
+ use crate::usb::backend::transfer::BackendTransfer;
+ use crate::usb::backend::transfer::BackendTransferType;
+
+ #[test]
+ fn test_reset() {
+ let mut fido_key = FidoGuestKey::new().unwrap();
+ let fake_packet = [0; U2FHID_PACKET_SIZE];
+
+ fido_key.pending_in_packets.push_back(fake_packet);
+ assert_eq!(fido_key.pending_in_packets.len(), 1);
+ fido_key.reset();
+ assert_eq!(fido_key.pending_in_packets.len(), 0);
+ }
+
+ #[test]
+ fn test_return_data_to_guest_no_packet_retry() {
+ let mut fido_key = FidoGuestKey::new().unwrap();
+ let transfer_buffer = TransferBuffer::Vector(vec![0u8; U2FHID_PACKET_SIZE]);
+ let fake_transfer = FidoTransfer::new(1, transfer_buffer);
+
+ let returned_transfer = fido_key.return_data_to_guest(Some(fake_transfer)).unwrap();
+ assert!(returned_transfer.is_some());
+ }
+
+ #[test]
+ fn test_return_data_to_guest_success() {
+ let mut fido_key = FidoGuestKey::new().unwrap();
+ let fake_packet = [5; U2FHID_PACKET_SIZE];
+ let transfer_buffer = TransferBuffer::Vector(vec![0u8; U2FHID_PACKET_SIZE]);
+ let mut fake_transfer = FidoTransfer::new(1, transfer_buffer);
+
+ let callback_outer = Arc::new(Mutex::new(false));
+ let callback_inner = callback_outer.clone();
+
+ fake_transfer.set_callback(move |t: BackendTransferType| {
+ assert_eq!(t.actual_length(), U2FHID_PACKET_SIZE);
+ assert!(t.status() == TransferStatus::Completed);
+ *callback_inner.lock() = true;
+ });
+ fido_key.pending_in_packets.push_back(fake_packet);
+
+ let returned_transfer = fido_key.return_data_to_guest(Some(fake_transfer)).unwrap();
+ assert!(returned_transfer.is_none());
+ assert!(*callback_outer.lock());
+ }
+}
diff --git a/devices/src/usb/backend/fido_backend/fido_passthrough.rs b/devices/src/usb/backend/fido_backend/fido_passthrough.rs
new file mode 100644
index 0000000..7e7a64b
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/fido_passthrough.rs
@@ -0,0 +1,566 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::collections::VecDeque;
+use std::io::Error as IOError;
+use std::io::ErrorKind;
+use std::io::Read;
+use std::sync::Arc;
+use std::sync::RwLock;
+
+use base::debug;
+use base::error;
+use base::AsRawDescriptor;
+use base::Event;
+use base::RawDescriptor;
+use base::WorkerThread;
+use sync::Mutex;
+use usb_util::parse_usbfs_descriptors;
+use usb_util::ConfigDescriptorTree;
+use usb_util::ControlRequestDataPhaseTransferDirection;
+use usb_util::ControlRequestRecipient;
+use usb_util::ControlRequestType;
+use usb_util::DescriptorType;
+use usb_util::DeviceDescriptorTree;
+use usb_util::DeviceSpeed;
+use usb_util::EndpointDirection;
+use usb_util::EndpointType;
+use usb_util::Error as UsbUtilError;
+use usb_util::TransferBuffer;
+use usb_util::TransferStatus;
+use usb_util::UsbRequestSetup;
+use zerocopy::AsBytes;
+use zerocopy::FromBytes;
+
+use crate::usb::backend::device::BackendDevice;
+use crate::usb::backend::device::DeviceState;
+use crate::usb::backend::endpoint::ControlEndpointState;
+use crate::usb::backend::endpoint::UsbEndpoint;
+use crate::usb::backend::error::Error as BackendError;
+use crate::usb::backend::error::Result as BackendResult;
+use crate::usb::backend::fido_backend::constants;
+use crate::usb::backend::fido_backend::error::Error;
+use crate::usb::backend::fido_backend::error::Result;
+use crate::usb::backend::fido_backend::fido_device::FidoDevice;
+use crate::usb::backend::fido_backend::poll_thread::poll_for_pending_packets;
+use crate::usb::backend::fido_backend::transfer::FidoTransfer;
+use crate::usb::backend::fido_backend::transfer::FidoTransferHandle;
+use crate::usb::backend::transfer::BackendTransferHandle;
+use crate::usb::backend::transfer::BackendTransferType;
+use crate::usb::backend::transfer::ControlTransferState;
+use crate::usb::backend::transfer::GenericTransferHandle;
+use crate::usb::xhci::xhci_backend_device::BackendType;
+use crate::usb::xhci::xhci_backend_device::UsbDeviceAddress;
+use crate::usb::xhci::xhci_backend_device::XhciBackendDevice;
+use crate::utils::AsyncJobQueue;
+use crate::utils::EventLoop;
+
+/// Host-level fido passthrough device that handles USB operations and relays them to the
+/// appropriate virtual fido device.
+pub struct FidoPassthroughDevice {
+ /// The virtual FIDO device implementation.
+ device: Arc<Mutex<FidoDevice>>,
+ /// The state of the device as seen by the backend provider.
+ state: Arc<RwLock<DeviceState>>,
+ /// The state of the control transfer exchange with the xhci layer.
+ control_transfer_state: Arc<RwLock<ControlTransferState>>,
+ transfer_job_queue: Arc<AsyncJobQueue>,
+ kill_evt: Event,
+ worker_thread: Option<WorkerThread<()>>,
+ pending_in_transfers:
+ Arc<Mutex<VecDeque<(FidoTransferHandle, Arc<Mutex<Option<FidoTransfer>>>)>>>,
+}
+
+impl FidoPassthroughDevice {
+ pub fn new(
+ device: Arc<Mutex<FidoDevice>>,
+ state: DeviceState,
+ event_loop: Arc<EventLoop>,
+ ) -> Result<Self> {
+ let control_transfer_state = ControlTransferState {
+ ctl_ep_state: ControlEndpointState::SetupStage,
+ control_request_setup: UsbRequestSetup::new(0, 0, 0, 0, 0),
+ executed: false,
+ };
+ let job_queue = AsyncJobQueue::init(&event_loop).map_err(Error::StartAsyncFidoQueue)?;
+ Ok(FidoPassthroughDevice {
+ device,
+ state: Arc::new(RwLock::new(state)),
+ control_transfer_state: Arc::new(RwLock::new(control_transfer_state)),
+ transfer_job_queue: job_queue,
+ kill_evt: Event::new().unwrap(),
+ worker_thread: None,
+ pending_in_transfers: Arc::new(Mutex::new(VecDeque::new())),
+ })
+ }
+
+ /// This function is called from the low-level event handler when the monitored `fd` is ready
+ /// to transmit data from the host to the guest.
+ pub fn read_hidraw_file(&mut self) -> Result<()> {
+ let mut device = self.device.lock();
+ // Device has already stopped working, just return early.
+ if device.is_device_lost {
+ return Ok(());
+ }
+ if !device.is_active {
+ // We should NEVER be polling on the fd and wake up if no transactions have been
+ // initiated from the guest first.
+ error!("Fido device received fd poll event from inactive device. This is a bug.");
+ return Err(Error::InconsistentFidoDeviceState);
+ }
+
+ let mut packet = vec![0; constants::U2FHID_PACKET_SIZE * 2];
+
+ if device.guest_key.lock().pending_in_packets.len() >= constants::U2FHID_MAX_IN_PENDING {
+ return Err(Error::PendingInQueueFull);
+ }
+
+ let read_result = device.fd.lock().read(&mut packet);
+ match read_result {
+ Ok(n) => {
+ // We read too much, the device is misbehaving
+ if n != constants::U2FHID_PACKET_SIZE {
+ return Err(Error::ReadHidrawDevice(IOError::new(
+ ErrorKind::Other,
+ format!("Read too many bytes ({n}), the hidraw device is misbehaving."),
+ )));
+ }
+ // This is safe because we just checked the size of n is exactly U2FHID_PACKET_SIZE
+ device
+ .recv_from_host(packet[..constants::U2FHID_PACKET_SIZE].try_into().unwrap())?;
+ }
+ Err(e) => {
+ error!("U2F hidraw read error: {e:#}, resetting and detaching device",);
+ device.set_active(false);
+ device.is_device_lost = true;
+ return Err(Error::ReadHidrawDevice(e));
+ }
+ }
+ Ok(())
+ }
+
+ /// This function is called by a queued job to handle all communication related to USB control
+ /// transfer packets between the guest and the virtual security key.
+ pub fn handle_control(
+ transfer: &mut FidoTransfer,
+ device: &Arc<Mutex<FidoDevice>>,
+ ) -> Result<()> {
+ transfer.actual_length = 0;
+ let request_setup = match &transfer.buffer {
+ TransferBuffer::Vector(v) => {
+ UsbRequestSetup::read_from_prefix(v).ok_or_else(|| Error::InvalidDataBufferSize)?
+ }
+ _ => {
+ return Err(Error::UnsupportedTransferBufferType);
+ }
+ };
+
+ let mut request_setup_out = request_setup.as_bytes().to_vec();
+ let is_device_to_host =
+ request_setup.get_direction() == ControlRequestDataPhaseTransferDirection::DeviceToHost;
+ let descriptor_type = (request_setup.value >> 8) as u8;
+
+ // Get Device Descriptor request
+ if descriptor_type == (DescriptorType::Device as u8) && is_device_to_host {
+ // If the descriptor is larger than the actual requested data, we only allocate space
+ // for the request size. This is common for USB3 control setup to request only the
+ // initial 8 bytes instead of the full descriptor.
+ let buf_size = std::cmp::min(
+ request_setup.length.into(),
+ constants::U2FHID_DEVICE_DESC.len(),
+ );
+ let mut buffer: Vec<u8> = constants::U2FHID_DEVICE_DESC[..buf_size].to_vec();
+ transfer.actual_length = buffer.len();
+ request_setup_out.append(&mut buffer);
+ }
+
+ if request_setup.get_recipient() == ControlRequestRecipient::Interface {
+ // It's a request for the HID report descriptor
+ if is_device_to_host && descriptor_type == constants::HID_GET_REPORT_DESC {
+ let mut buffer: Vec<u8> = constants::HID_REPORT_DESC.to_vec();
+ transfer.actual_length = buffer.len();
+ request_setup_out.append(&mut buffer);
+ }
+ }
+
+ if request_setup.get_type() == ControlRequestType::Class {
+ match request_setup.request {
+ constants::HID_GET_IDLE => {
+ let mut buffer: Vec<u8> = vec![0u8, 1];
+ buffer[0] = device.lock().guest_key.lock().idle;
+ transfer.actual_length = 1;
+ request_setup_out.append(&mut buffer);
+ }
+ constants::HID_SET_IDLE => {
+ device.lock().guest_key.lock().idle = (request_setup.value >> 8) as u8;
+ }
+ _ => {
+ debug!(
+ "Received unsupported setup request code of Class type: {}",
+ request_setup.request
+ );
+ }
+ }
+ }
+
+ // Store the response
+ transfer.buffer = TransferBuffer::Vector(request_setup_out);
+ Ok(())
+ }
+
+ /// This function is called by a queued job to handle all USB OUT requests from the guest down
+ /// to the host by writing the given `FidoTransfer` data into the hidraw file.
+ pub fn handle_interrupt_out(
+ transfer: &mut FidoTransfer,
+ device: &Arc<Mutex<FidoDevice>>,
+ ) -> Result<()> {
+ let mut packet = [0u8; constants::U2FHID_PACKET_SIZE];
+ let buffer = match &transfer.buffer {
+ TransferBuffer::Vector(v) => v,
+ _ => {
+ return Err(Error::UnsupportedTransferBufferType);
+ }
+ };
+ if buffer.len() > constants::U2FHID_PACKET_SIZE {
+ error!(
+ "Buffer size is bigger than u2f-hid packet size: {}",
+ buffer.len()
+ );
+ return Err(Error::InvalidDataBufferSize);
+ }
+ packet.copy_from_slice(buffer);
+ let written = device.lock().recv_from_guest(packet)?;
+ transfer.actual_length = written;
+ Ok(())
+ }
+}
+
+impl Drop for FidoPassthroughDevice {
+ fn drop(&mut self) {
+ self.device.lock().is_device_lost = true;
+ if let Err(e) = self.kill_evt.signal() {
+ error!(
+ "Failed to send signal to stop poll worker thread, \
+ it might have already stopped. {e:#}"
+ );
+ }
+ }
+}
+
+impl AsRawDescriptor for FidoPassthroughDevice {
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.device.lock().as_raw_descriptor()
+ }
+}
+
+impl BackendDevice for FidoPassthroughDevice {
+ fn submit_backend_transfer(
+ &mut self,
+ transfer: BackendTransferType,
+ ) -> BackendResult<BackendTransferHandle> {
+ let transfer = match transfer {
+ BackendTransferType::FidoDevice(transfer) => transfer,
+ _ => return Err(BackendError::MalformedBackendTransfer),
+ };
+
+ let endpoint = transfer.endpoint;
+ let arc_transfer = Arc::new(Mutex::new(Some(transfer)));
+ let cancel_handle = FidoTransferHandle {
+ weak_transfer: Arc::downgrade(&arc_transfer),
+ };
+
+ match endpoint {
+ constants::U2FHID_CONTROL_ENDPOINT => {
+ let arc_transfer_local = arc_transfer.clone();
+ let fido_device = self.device.clone();
+ self.transfer_job_queue
+ .queue_job(move || {
+ let mut lock = arc_transfer_local.lock();
+ match lock.take() {
+ Some(mut transfer) => {
+ if let Err(e) = FidoPassthroughDevice::handle_control(
+ &mut transfer,
+ &fido_device,
+ ) {
+ error!(
+ "Fido device handle control failed, cancelling transfer:\
+ {e:#}"
+ );
+ drop(lock);
+ if let Err(e) = cancel_handle.cancel() {
+ error!(
+ "Failed to cancel transfer, dropping request: {e:#}"
+ );
+ return;
+ }
+ }
+ transfer.complete_transfer();
+ }
+ None => {
+ error!(
+ "USB transfer disappeared in handle_control. Dropping request."
+ );
+ }
+ }
+ })
+ .map_err(BackendError::QueueAsyncJob)?;
+ }
+ constants::U2FHID_OUT_ENDPOINT => {
+ let arc_transfer_local = arc_transfer.clone();
+ let fido_device = self.device.clone();
+ self.transfer_job_queue
+ .queue_job(move || {
+ let mut lock = arc_transfer_local.lock();
+ match lock.take() {
+ Some(mut transfer) => {
+ if let Err(e) = FidoPassthroughDevice::handle_interrupt_out(
+ &mut transfer,
+ &fido_device,
+ ) {
+ error!(
+ "Fido device handle interrupt out failed,\
+ cancelling transfer: {e:#}"
+ );
+ drop(lock);
+ if let Err(e) = cancel_handle.cancel() {
+ error!(
+ "Failed to cancel transfer, dropping request: {e:#}"
+ );
+ return;
+ }
+ }
+ transfer.complete_transfer();
+ }
+ None => {
+ error!("Interrupt out transfer disappeared. Dropping request.");
+ }
+ }
+ })
+ .map_err(BackendError::QueueAsyncJob)?;
+ }
+ constants::U2FHID_IN_ENDPOINT => {
+ let handle = FidoTransferHandle {
+ weak_transfer: Arc::downgrade(&arc_transfer.clone()),
+ };
+ self.pending_in_transfers
+ .lock()
+ .push_back((handle, arc_transfer.clone()));
+
+ // Make sure to arm the timer for both transfer and host packet polling as we wait
+ // for transaction requests to be fulfilled by the host or xhci transfer to time
+ // out.
+ if let Err(e) = self.device.lock().guest_key.lock().timer.arm() {
+ error!("Unable to start U2F guest key timer. U2F packets may be lost. {e:#}");
+ }
+ if let Err(e) = self.device.lock().transfer_timer.arm() {
+ error!("Unable to start transfer poll timer. Transfers might stall. {e:#}");
+ }
+ }
+ _ => {
+ error!("Wrong endpoint requested: {endpoint}");
+ return Err(BackendError::MalformedBackendTransfer);
+ }
+ }
+
+ // Start the worker thread if it hasn't been created yet
+ if self.worker_thread.is_none()
+ && (endpoint == constants::U2FHID_IN_ENDPOINT
+ || endpoint == constants::U2FHID_OUT_ENDPOINT)
+ {
+ let device = self.device.clone();
+ let pending_in_transfers = self.pending_in_transfers.clone();
+ self.worker_thread = Some(WorkerThread::start("fido poll thread", move |kill_evt| {
+ if let Err(e) = poll_for_pending_packets(device, pending_in_transfers, kill_evt) {
+ error!("Poll worker thread errored: {e:#}");
+ }
+ }));
+ }
+
+ let cancel_handle = FidoTransferHandle {
+ weak_transfer: Arc::downgrade(&arc_transfer),
+ };
+ Ok(BackendTransferHandle::new(cancel_handle))
+ }
+
+ fn detach_event_handler(&self, _event_loop: &Arc<EventLoop>) -> BackendResult<()> {
+ self.device.lock().set_active(false);
+ Ok(())
+ }
+
+ fn request_transfer_buffer(&mut self, size: usize) -> TransferBuffer {
+ TransferBuffer::Vector(vec![0u8; size])
+ }
+
+ fn build_bulk_transfer(
+ &mut self,
+ _ep_addr: u8,
+ _transfer_buffer: TransferBuffer,
+ _stream_id: Option<u16>,
+ ) -> BackendResult<BackendTransferType> {
+ // Fido devices don't support bulk transfer requests
+ Err(BackendError::MalformedBackendTransfer)
+ }
+
+ fn build_interrupt_transfer(
+ &mut self,
+ ep_addr: u8,
+ transfer_buffer: TransferBuffer,
+ ) -> BackendResult<BackendTransferType> {
+ Ok(BackendTransferType::FidoDevice(FidoTransfer::new(
+ ep_addr,
+ transfer_buffer,
+ )))
+ }
+
+ fn get_control_transfer_state(&mut self) -> Arc<RwLock<ControlTransferState>> {
+ self.control_transfer_state.clone()
+ }
+
+ fn get_device_state(&mut self) -> Arc<RwLock<DeviceState>> {
+ self.state.clone()
+ }
+
+ fn get_active_config_descriptor(&mut self) -> BackendResult<ConfigDescriptorTree> {
+ // There is only a config descriptor for u2f virtual keys.
+ self.get_config_descriptor_by_index(0)
+ }
+
+ fn get_config_descriptor(&mut self, config: u8) -> BackendResult<ConfigDescriptorTree> {
+ let device_descriptor = self.get_device_descriptor_tree()?;
+ if let Some(config_descriptor) = device_descriptor.get_config_descriptor(config) {
+ return Ok(config_descriptor.clone());
+ }
+ Err(BackendError::GetConfigDescriptor(
+ UsbUtilError::DescriptorParse,
+ ))
+ }
+
+ fn get_config_descriptor_by_index(
+ &mut self,
+ config_index: u8,
+ ) -> BackendResult<ConfigDescriptorTree> {
+ let device_descriptor = self.get_device_descriptor_tree()?;
+ if let Some(config_descriptor) =
+ device_descriptor.get_config_descriptor_by_index(config_index)
+ {
+ return Ok(config_descriptor.clone());
+ }
+ Err(BackendError::GetConfigDescriptor(
+ UsbUtilError::DescriptorParse,
+ ))
+ }
+
+ fn get_device_descriptor_tree(&mut self) -> BackendResult<DeviceDescriptorTree> {
+ // Skip the first two fields of length and descriptor type as we don't need them in our
+ // DeviceDescriptor structure.
+ let mut descbuf: Vec<u8> = constants::U2FHID_DEVICE_DESC.to_vec();
+ let mut configbuf: Vec<u8> = constants::U2FHID_CONFIG_DESC.to_vec();
+ descbuf.append(&mut configbuf);
+ parse_usbfs_descriptors(&descbuf).map_err(BackendError::GetDeviceDescriptor)
+ }
+
+ fn get_active_configuration(&mut self) -> BackendResult<u8> {
+ let descriptor_tree = self.get_device_descriptor_tree()?;
+ if descriptor_tree.bNumConfigurations != 1 {
+ error!(
+ "Fido devices should only have one configuration, found {}",
+ descriptor_tree.bNumConfigurations
+ );
+ } else if let Some(config_descriptor) = descriptor_tree.get_config_descriptor_by_index(0) {
+ return Ok(config_descriptor.bConfigurationValue);
+ }
+ Err(BackendError::GetActiveConfig(UsbUtilError::DescriptorParse))
+ }
+
+ fn set_active_configuration(&mut self, config: u8) -> BackendResult<()> {
+ // Fido devices only have one configuration so we should do nothing here.
+ // Return an error if the configuration number is unexpected.
+ if config != 0 {
+ error!(
+ "Requested to set fido active configuration of {config}, but only 0 is allowed."
+ );
+ return Err(BackendError::BadBackendProviderState);
+ }
+ Ok(())
+ }
+
+ fn clear_feature(&mut self, _value: u16, _index: u16) -> BackendResult<TransferStatus> {
+ // Nothing to do here, just return.
+ Ok(TransferStatus::Completed)
+ }
+
+ fn create_endpoints(&mut self, _config_descriptor: &ConfigDescriptorTree) -> BackendResult<()> {
+ let mut endpoints = Vec::new();
+ let device_state = self.get_device_state();
+ // We ignore the config descriptor because u2f-hid endpoints are already defined by the
+ // protocol and are unchanging.
+ // Endpoint 1 (OUT)
+ endpoints.push(UsbEndpoint::new(
+ device_state.read().unwrap().fail_handle.clone(),
+ device_state.read().unwrap().job_queue.clone(),
+ 1,
+ EndpointDirection::HostToDevice,
+ EndpointType::Interrupt,
+ ));
+ // Endpoint 1 (IN)
+ endpoints.push(UsbEndpoint::new(
+ device_state.read().unwrap().fail_handle.clone(),
+ device_state.read().unwrap().job_queue.clone(),
+ 1,
+ EndpointDirection::DeviceToHost,
+ EndpointType::Interrupt,
+ ));
+ device_state.write().unwrap().endpoints = endpoints;
+ Ok(())
+ }
+}
+
+impl XhciBackendDevice for FidoPassthroughDevice {
+ fn get_backend_type(&self) -> BackendType {
+ BackendType::Usb2
+ }
+
+ fn get_vid(&self) -> u16 {
+ // Google vendor ID
+ 0x18d1
+ }
+
+ fn get_pid(&self) -> u16 {
+ // Unique Product ID
+ 0xf1d0
+ }
+
+ fn set_address(&mut self, _address: UsbDeviceAddress) {
+ // Nothing to do here
+ }
+
+ fn reset(&mut self) -> BackendResult<()> {
+ let mut device_lock = self.device.lock();
+ device_lock.set_active(false);
+ device_lock.guest_key.lock().reset();
+ device_lock.transaction_manager.lock().reset();
+ Ok(())
+ }
+
+ fn get_speed(&self) -> Option<DeviceSpeed> {
+ Some(DeviceSpeed::Full)
+ }
+
+ fn alloc_streams(&self, _ep: u8, _num_streams: u16) -> BackendResult<()> {
+ // FIDO devices don't support bulk/streams so we ignore this request.
+ Ok(())
+ }
+
+ fn free_streams(&self, _ep: u8) -> BackendResult<()> {
+ // FIDO devices don't support bulk/streams so we ignore this request.
+ Ok(())
+ }
+
+ fn stop(&mut self) {
+ // Transition the FIDO device into inactive mode and mark device as lost.
+ // The FIDO device cannot error on reset so we can unwrap safely.
+ self.reset().unwrap();
+ self.device.lock().is_device_lost = true;
+ }
+}
diff --git a/devices/src/usb/backend/fido_backend/fido_provider.rs b/devices/src/usb/backend/fido_backend/fido_provider.rs
new file mode 100644
index 0000000..78b2808
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/fido_provider.rs
@@ -0,0 +1,40 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::fs::File;
+use std::sync::Arc;
+
+use sync::Mutex;
+
+use crate::usb::backend::device::BackendDeviceType;
+use crate::usb::backend::device::DeviceState;
+use crate::usb::backend::error::Error;
+use crate::usb::backend::error::Result;
+use crate::usb::backend::fido_backend::fido_device::FidoDevice;
+use crate::usb::backend::fido_backend::fido_passthrough::FidoPassthroughDevice;
+use crate::usb::backend::utils::UsbUtilEventHandler;
+use crate::utils::EventHandler;
+use crate::utils::EventLoop;
+
+/// Utility function to attach a security key device to the backend provider. It initializes a
+/// `FidoPassthroughDevice` and returns it with its `EventHandler` to the backend.
+pub fn attach_security_key(
+ hidraw: File,
+ event_loop: Arc<EventLoop>,
+ device_state: DeviceState,
+) -> Result<(Arc<Mutex<BackendDeviceType>>, Arc<dyn EventHandler>)> {
+ let device =
+ FidoDevice::new(hidraw, event_loop.clone()).map_err(Error::CreateFidoBackendDevice)?;
+ let passthrough_device =
+ FidoPassthroughDevice::new(Arc::new(Mutex::new(device)), device_state, event_loop)
+ .map_err(Error::CreateFidoBackendDevice)?;
+ let device_impl = BackendDeviceType::FidoDevice(passthrough_device);
+ let arc_mutex_device = Arc::new(Mutex::new(device_impl));
+
+ let event_handler: Arc<dyn EventHandler> = Arc::new(UsbUtilEventHandler {
+ device: arc_mutex_device.clone(),
+ });
+
+ Ok((arc_mutex_device, event_handler))
+}
diff --git a/devices/src/usb/backend/fido_backend/fido_transaction.rs b/devices/src/usb/backend/fido_backend/fido_transaction.rs
new file mode 100644
index 0000000..b896bcb
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/fido_transaction.rs
@@ -0,0 +1,333 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::collections::VecDeque;
+use std::time::Instant;
+
+use base::error;
+use base::warn;
+
+cfg_if::cfg_if! {
+ if #[cfg(test)] {
+ use base::FakeClock as Clock;
+ } else {
+ use base::Clock;
+ }
+}
+
+use crate::usb::backend::fido_backend::constants;
+use crate::usb::backend::fido_backend::error::Result;
+use crate::usb::backend::fido_backend::poll_thread::PollTimer;
+
+/// Struct representation of a u2f-hid transaction according to the U2FHID protocol standard.
+#[derive(Clone, Copy, Debug)]
+pub struct FidoTransaction {
+ /// Client ID of the transaction
+ pub cid: u32,
+ /// BCNT of the response.
+ pub resp_bcnt: u16,
+ /// Total size of the response.
+ pub resp_size: u16,
+ /// Unique nonce for broadcast transactions.
+ /// The nonce size is 8 bytes, if no nonce is given it's empty
+ pub nonce: [u8; constants::NONCE_SIZE],
+ /// Timestamp of the transaction submission time.
+ submission_time: Instant,
+}
+
+/// Struct to keep track of all active transactions. It cycles through them, starts, stops and
+/// removes outdated ones as they expire.
+pub struct TransactionManager {
+ /// Sorted (by age) list of transactions.
+ transactions: VecDeque<FidoTransaction>,
+ /// Timestamp of the latest transaction.
+ last_transaction_time: Instant,
+ /// Timer used to poll for expired transactions.
+ pub transaction_timer: PollTimer,
+ /// Clock representation, overridden for testing.
+ clock: Clock,
+}
+
+impl TransactionManager {
+ pub fn new() -> Result<TransactionManager> {
+ let timer = PollTimer::new(
+ "transaction timer".to_string(),
+ // Transactions expire after 120 seconds, polling a tenth of the time
+ // sounds acceptable
+ std::time::Duration::from_millis(constants::TRANSACTION_TIMEOUT_MILLIS / 10),
+ )?;
+ let clock = Clock::new();
+ Ok(TransactionManager {
+ transactions: VecDeque::new(),
+ last_transaction_time: clock.now(),
+ clock,
+ transaction_timer: timer,
+ })
+ }
+
+ pub fn pop_transaction(&mut self) -> Option<FidoTransaction> {
+ self.transactions.pop_front()
+ }
+
+ /// Attempts to close a transaction if it exists. Otherwise it silently drops it.
+ /// It returns true to signal that there's no more transactions active and the device can
+ /// return to an idle state.
+ pub fn close_transaction(&mut self, cid: u32) -> bool {
+ match self.transactions.iter().position(|t| t.cid == cid) {
+ Some(index) => {
+ self.transactions.remove(index);
+ }
+ None => {
+ warn!(
+ "Tried to close a transaction that does not exist. Silently dropping request."
+ );
+ }
+ };
+
+ if self.transactions.is_empty() {
+ return true;
+ }
+ false
+ }
+
+ /// Starts a new transaction in the queue. Returns true if it is the first transaction,
+ /// signaling that the device would have to transition from idle to active state.
+ pub fn start_transaction(&mut self, cid: u32, nonce: [u8; constants::NONCE_SIZE]) -> bool {
+ let transaction = FidoTransaction {
+ cid,
+ resp_bcnt: 0,
+ resp_size: 0,
+ nonce,
+ submission_time: self.clock.now(),
+ };
+
+ // Remove the oldest transaction
+ if self.transactions.len() >= constants::MAX_TRANSACTIONS {
+ let _ = self.pop_transaction();
+ }
+ self.last_transaction_time = transaction.submission_time;
+ self.transactions.push_back(transaction);
+ if self.transactions.len() == 1 {
+ return true;
+ }
+ false
+ }
+
+ /// Tests the transaction expiration time. If the latest transaction time is beyond the
+ /// acceptable timeout, it removes all transactions and signals to reset the device (returns
+ /// true).
+ pub fn expire_transactions(&mut self) -> bool {
+ // We have no transactions pending, so we can just return true
+ if self.transactions.is_empty() {
+ return true;
+ }
+
+ // The transaction manager resets if transactions took too long. We use duration_since
+ // instead of elapsed so we can work with fake clocks in tests.
+ if self
+ .clock
+ .now()
+ .duration_since(self.last_transaction_time)
+ .as_millis()
+ >= constants::TRANSACTION_TIMEOUT_MILLIS.into()
+ {
+ self.reset();
+ return true;
+ }
+ false
+ }
+
+ /// Resets the `TransactionManager`, dropping all pending transactions.
+ pub fn reset(&mut self) {
+ self.transactions = VecDeque::new();
+ self.last_transaction_time = self.clock.now();
+ if let Err(e) = self.transaction_timer.clear() {
+ error!(
+ "Unable to clear transaction manager timer, silently failing. {}",
+ e
+ );
+ }
+ }
+
+ /// Updates the bcnt and size of the first transaction that matches the given CID.
+ pub fn update_transaction(&mut self, cid: u32, resp_bcnt: u16, resp_size: u16) {
+ let index = match self
+ .transactions
+ .iter()
+ .position(|t: &FidoTransaction| t.cid == cid)
+ {
+ Some(index) => index,
+ None => {
+ warn!(
+ "No u2f transaction found with (cid {}) in the list. Skipping.",
+ cid
+ );
+ return;
+ }
+ };
+ match self.transactions.get_mut(index) {
+ Some(t_ref) => {
+ t_ref.resp_bcnt = resp_bcnt;
+ t_ref.resp_size = resp_size;
+ }
+ None => {
+ error!(
+ "A u2f transaction was found at index {} but now is gone. This is a bug.",
+ index
+ );
+ }
+ };
+ }
+
+ /// Returns the first transaction that matches the given CID.
+ pub fn get_transaction(&mut self, cid: u32) -> Option<FidoTransaction> {
+ let index = match self
+ .transactions
+ .iter()
+ .position(|t: &FidoTransaction| t.cid == cid)
+ {
+ Some(index) => index,
+ None => {
+ return None;
+ }
+ };
+ match self.transactions.get(index) {
+ Some(t_ref) => Some(*t_ref),
+ None => {
+ error!(
+ "A u2f transaction was found at index {} but now is gone. This is a bug.",
+ index
+ );
+ None
+ }
+ }
+ }
+
+ /// Returns the first broadcast transaction that matches the given nonce.
+ pub fn get_transaction_from_nonce(
+ &mut self,
+ nonce: [u8; constants::NONCE_SIZE],
+ ) -> Option<FidoTransaction> {
+ let index =
+ match self.transactions.iter().position(|t: &FidoTransaction| {
+ t.cid == constants::BROADCAST_CID && t.nonce == nonce
+ }) {
+ Some(index) => index,
+ None => {
+ return None;
+ }
+ };
+ match self.transactions.get(index) {
+ Some(t_ref) => Some(*t_ref),
+ None => {
+ error!(
+ "A u2f transaction was found at index {} but now is gone. This is a bug.",
+ index
+ );
+ None
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use crate::usb::backend::fido_backend::constants::EMPTY_NONCE;
+ use crate::usb::backend::fido_backend::constants::MAX_TRANSACTIONS;
+ use crate::usb::backend::fido_backend::constants::TRANSACTION_TIMEOUT_MILLIS;
+ use crate::usb::backend::fido_backend::fido_transaction::TransactionManager;
+
+ #[test]
+ fn test_start_transaction() {
+ let mut manager = TransactionManager::new().unwrap();
+ let cid = 1234;
+
+ assert!(manager.start_transaction(cid, EMPTY_NONCE));
+ assert_eq!(manager.transactions.len(), 1);
+ assert_eq!(manager.last_transaction_time, manager.clock.now());
+
+ manager.clock.add_ns(100);
+
+ assert!(!manager.start_transaction(cid, EMPTY_NONCE));
+ assert_eq!(manager.transactions.len(), 2);
+ assert_eq!(manager.last_transaction_time, manager.clock.now());
+
+ manager.reset();
+
+ // We check that we silently drop old transactions once we go over the MAX_TRANSACTIONS
+ // limit.
+ for _ in 0..MAX_TRANSACTIONS + 1 {
+ manager.start_transaction(cid, EMPTY_NONCE);
+ }
+
+ assert_eq!(manager.transactions.len(), MAX_TRANSACTIONS);
+ }
+
+ #[test]
+ fn test_pop_transaction() {
+ let mut manager = TransactionManager::new().unwrap();
+ let cid1 = 1234;
+ let cid2 = 5678;
+
+ manager.start_transaction(cid1, EMPTY_NONCE);
+ manager.start_transaction(cid2, EMPTY_NONCE);
+
+ let popped_transaction = manager.pop_transaction().unwrap();
+
+ assert_eq!(popped_transaction.cid, cid1);
+ }
+
+ #[test]
+ fn test_close_transaction() {
+ let mut manager = TransactionManager::new().unwrap();
+ let cid1 = 1234;
+ let cid2 = 5678;
+
+ manager.start_transaction(cid1, EMPTY_NONCE);
+ manager.start_transaction(cid2, EMPTY_NONCE);
+
+ assert!(!manager.close_transaction(cid2));
+ // We run this a second time to test it doesn't error out when closing already closed
+ // transactions.
+ assert!(!manager.close_transaction(cid2));
+ assert_eq!(manager.transactions.len(), 1);
+ assert!(manager.close_transaction(cid1));
+ }
+
+ #[test]
+ fn test_update_transaction() {
+ let mut manager = TransactionManager::new().unwrap();
+ let cid = 1234;
+ let bcnt = 17;
+ let size = 56;
+
+ manager.start_transaction(cid, EMPTY_NONCE);
+ manager.update_transaction(cid, bcnt, size);
+
+ let transaction = manager.get_transaction(cid).unwrap();
+
+ assert_eq!(transaction.resp_bcnt, bcnt);
+ assert_eq!(transaction.resp_size, size);
+ }
+
+ #[test]
+ fn test_expire_transactions() {
+ let mut manager = TransactionManager::new().unwrap();
+ let cid = 1234;
+
+ // No transactions, so it defaults to true
+ assert!(manager.expire_transactions());
+
+ manager.start_transaction(cid, EMPTY_NONCE);
+ assert!(!manager.expire_transactions());
+
+ // Advance clock beyond expiration time, convert milliseconds to nanoseconds
+ manager
+ .clock
+ .add_ns(TRANSACTION_TIMEOUT_MILLIS * 1000000 + 1);
+ assert!(manager.expire_transactions());
+ assert_eq!(manager.transactions.len(), 0);
+ }
+}
diff --git a/devices/src/usb/backend/fido_backend/hid_utils.rs b/devices/src/usb/backend/fido_backend/hid_utils.rs
new file mode 100644
index 0000000..597521b
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/hid_utils.rs
@@ -0,0 +1,72 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::fs::File;
+use std::os::raw::c_int;
+
+use base::handle_eintr_errno;
+use base::ioctl_ior_nr;
+
+use crate::usb::backend::fido_backend::constants;
+use crate::usb::backend::fido_backend::error::Error;
+use crate::usb::backend::fido_backend::error::Result;
+
+#[repr(C)]
+#[derive(Clone)]
+pub struct HidrawReportDescriptor {
+ pub size: u32,
+ pub value: [u8; constants::HID_MAX_DESCRIPTOR_SIZE],
+}
+
+pub const HID_IO_TYPE: u32 = 'H' as u32;
+
+ioctl_ior_nr!(HIDIOCGRDESCSIZE, HID_IO_TYPE, 0x01, c_int);
+ioctl_ior_nr!(HIDIOCGRDESC, HID_IO_TYPE, 0x02, HidrawReportDescriptor);
+
+/// Verifies that the given `hidraw` file handle is a valid FIDO device.
+/// In case it is not, it returns an `InvalidHidrawDevice` erro.
+pub fn verify_is_fido_device(hidraw: &File) -> Result<()> {
+ let mut desc_size: c_int = 0;
+ // SAFETY:
+ // Safe because:
+ // - We check the return value after the call.
+ // - ioctl(HIDIOCGRDDESCSIZE) does not hold the descriptor after the call.
+ unsafe {
+ let ret = handle_eintr_errno!(base::ioctl_with_mut_ref(
+ hidraw,
+ HIDIOCGRDESCSIZE(),
+ &mut desc_size
+ ));
+ if ret < 0 || (desc_size as usize) < constants::HID_REPORT_DESC_HEADER.len() {
+ return Err(Error::InvalidHidrawDevice);
+ }
+ }
+
+ let mut descriptor = HidrawReportDescriptor {
+ size: desc_size as u32,
+ value: [0; constants::HID_MAX_DESCRIPTOR_SIZE],
+ };
+
+ // SAFETY:
+ // Safe because:
+ // - We check the return value after the call.
+ // - ioctl(HIDIOCGRDESC) does not hold the descriptor after the call.
+ unsafe {
+ let ret = handle_eintr_errno!(base::ioctl_with_mut_ref(
+ hidraw,
+ HIDIOCGRDESC(),
+ &mut descriptor
+ ));
+ if ret < 0 {
+ return Err(Error::InvalidHidrawDevice);
+ }
+ }
+
+ if descriptor.value[..constants::HID_REPORT_DESC_HEADER.len()]
+ != *constants::HID_REPORT_DESC_HEADER
+ {
+ return Err(Error::InvalidHidrawDevice);
+ }
+ Ok(())
+}
diff --git a/devices/src/usb/backend/fido_backend/mod.rs b/devices/src/usb/backend/fido_backend/mod.rs
new file mode 100644
index 0000000..cc5a1b7
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/mod.rs
@@ -0,0 +1,14 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+mod constants;
+pub mod error;
+pub mod fido_device;
+pub mod fido_guest;
+pub mod fido_passthrough;
+pub mod fido_provider;
+pub mod fido_transaction;
+pub mod hid_utils;
+pub mod poll_thread;
+pub mod transfer;
diff --git a/devices/src/usb/backend/fido_backend/poll_thread.rs b/devices/src/usb/backend/fido_backend/poll_thread.rs
new file mode 100644
index 0000000..87ff3cb
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/poll_thread.rs
@@ -0,0 +1,314 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! This file contains all functions and structs used to handle polling operations for the fido
+//! backend device.
+
+use std::collections::VecDeque;
+use std::sync::Arc;
+use std::time::Duration;
+
+use anyhow::Context;
+use base::debug;
+use base::error;
+use base::AsRawDescriptor;
+use base::Event;
+use base::EventToken;
+use base::RawDescriptor;
+use base::Timer;
+use base::TimerTrait;
+use base::WaitContext;
+use sync::Mutex;
+use usb_util::TransferStatus;
+
+use crate::usb::backend::fido_backend::error::Error;
+use crate::usb::backend::fido_backend::error::Result;
+use crate::usb::backend::fido_backend::fido_device::FidoDevice;
+use crate::usb::backend::fido_backend::transfer::FidoTransfer;
+use crate::usb::backend::fido_backend::transfer::FidoTransferHandle;
+use crate::usb::backend::transfer::BackendTransfer;
+use crate::usb::backend::transfer::GenericTransferHandle;
+
+#[derive(EventToken)]
+enum Token {
+ TransactionPollTimer,
+ TransferPollTimer,
+ PacketPollTimer,
+ Kill,
+}
+
+/// PollTimer is a wrapper around the crosvm-provided `Timer` struct with a focus on maintaining a
+/// regular interval with easy `arm()` and `clear()` methods to start and stop the timer
+/// transparently from the interval.
+pub struct PollTimer {
+ name: String,
+ timer: Timer,
+ interval: Duration,
+}
+
+impl PollTimer {
+ pub fn new(name: String, interval: Duration) -> Result<Self> {
+ let timer = Timer::new().map_err(Error::CannotCreatePollTimer)?;
+ Ok(PollTimer {
+ name,
+ timer,
+ interval,
+ })
+ }
+
+ /// Arms the timer with its initialized interval.
+ pub fn arm(&mut self) -> Result<()> {
+ self.timer
+ .reset(self.interval, None)
+ .map_err(|error| Error::CannotArmPollTimer {
+ name: self.name.clone(),
+ error,
+ })
+ }
+
+ /// Clears the timer, disarming it.
+ pub fn clear(&mut self) -> Result<()> {
+ self.timer
+ .clear()
+ .map_err(|error| Error::CannotClearPollTimer {
+ name: self.name.clone(),
+ error,
+ })
+ }
+}
+
+impl AsRawDescriptor for PollTimer {
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.timer.as_raw_descriptor()
+ }
+}
+
+/// This function is the main poll thread. It periodically wakes up to emulate a USB interrupt
+/// (poll) device behavior. It takes care of three different poll timers:
+/// - `PacketPollTimer`: periodically polls for available USB transfers waiting for data
+/// - `TransferPollTimer`: times out USB transfers that stay pending for too long without data
+/// - `TransactionPollTimer`: puts the security key device to sleep when transactions time out
+pub fn poll_for_pending_packets(
+ device: Arc<Mutex<FidoDevice>>,
+ pending_in_transfers: Arc<
+ Mutex<VecDeque<(FidoTransferHandle, Arc<Mutex<Option<FidoTransfer>>>)>>,
+ >,
+ kill_evt: Event,
+) -> Result<()> {
+ let device_lock = device.lock();
+ let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
+ (&device_lock.guest_key.lock().timer, Token::PacketPollTimer),
+ (&device_lock.transfer_timer, Token::TransferPollTimer),
+ (
+ &device_lock.transaction_manager.lock().transaction_timer,
+ Token::TransactionPollTimer,
+ ),
+ (&kill_evt, Token::Kill),
+ ])
+ .context("poll worker context failed")
+ .map_err(Error::WaitContextFailed)?;
+ drop(device_lock);
+
+ loop {
+ let events = wait_ctx
+ .wait()
+ .context("wait failed")
+ .map_err(Error::WaitContextFailed)?;
+ for event in events.iter().filter(|e| e.is_readable) {
+ match event.token {
+ // This timer checks that we have u2f host packets pending, waiting to be sent to
+ // the guest, and that we have a valid USB transfer from the guest waiting for
+ // data.
+ Token::PacketPollTimer => {
+ handle_packet_poll(&device, &pending_in_transfers)?;
+ // If there are still transfers waiting in the queue we continue polling.
+ if packet_timer_needs_rearm(&device, &pending_in_transfers) {
+ device.lock().guest_key.lock().timer.arm()?;
+ }
+ }
+ // This timer takes care of expiring USB transfers from the guest as they time out
+ // waiting for data from the host. It is the equivalent of a USB interrupt poll
+ // thread.
+ Token::TransferPollTimer => {
+ let mut transfers_lock = pending_in_transfers.lock();
+
+ transfers_lock.retain(process_pending_transfer);
+
+ // If the device has died, we need to tell the first pending transfer
+ // that the device has been lost at the xhci level, so we can safely detach the
+ // device from the guest.
+ if device.lock().is_device_lost {
+ let (_, transfer_opt) = match transfers_lock.pop_front() {
+ Some(tuple) => tuple,
+ None => {
+ // No pending transfers waiting for data, so we do nothing.
+ continue;
+ }
+ };
+ signal_device_lost(transfer_opt.lock().take());
+ return Ok(());
+ }
+
+ // If we still have pending transfers waiting, we keep polling, otherwise we
+ // stop.
+ if transfers_lock.len() > 0 {
+ device.lock().transfer_timer.arm()?;
+ } else {
+ device.lock().transfer_timer.clear()?;
+ }
+ }
+ // This timer takes care of timing out u2f transactions that haven't seen any
+ // activity from either guest or host for a long-enough time.
+ Token::TransactionPollTimer => {
+ // If transactions aren't expired, re-arm
+ if !device
+ .lock()
+ .transaction_manager
+ .lock()
+ .expire_transactions()
+ {
+ device
+ .lock()
+ .transaction_manager
+ .lock()
+ .transaction_timer
+ .arm()?;
+ }
+ }
+ Token::Kill => {
+ debug!("Fido poll thread exited succesfully.");
+ return Ok(());
+ }
+ }
+ }
+ }
+}
+
+/// Handles polling for available data to send back to the guest.
+fn handle_packet_poll(
+ device: &Arc<Mutex<FidoDevice>>,
+ pending_in_transfers: &Arc<
+ Mutex<VecDeque<(FidoTransferHandle, Arc<Mutex<Option<FidoTransfer>>>)>>,
+ >,
+) -> Result<()> {
+ if device.lock().is_device_lost {
+ // Rather than erroring here, we just return Ok as the case of a device being lost is
+ // handled by the transfer timer.
+ return Ok(());
+ }
+ let mut transfers_lock = pending_in_transfers.lock();
+
+ // Process and remove expired or cancelled transfers
+ transfers_lock.retain(process_pending_transfer);
+
+ if transfers_lock.is_empty() {
+ // We cannot do anything, the active transfers got pruned.
+ // Return Ok() and let the poll thread handle the missing packets.
+ return Ok(());
+ }
+
+ // Fetch first available transfer from the pending list and its fail handle.
+ let (_, transfer_opt) = match transfers_lock.pop_front() {
+ Some(tuple) => tuple,
+ None => {
+ // No pending transfers waiting for data, so we do nothing.
+ return Ok(());
+ }
+ };
+ drop(transfers_lock);
+
+ let mut transfer_lock = transfer_opt.lock();
+ let transfer = transfer_lock.take();
+
+ // Obtain the next packet from the guest key and send it to the guest
+ match device
+ .lock()
+ .guest_key
+ .lock()
+ .return_data_to_guest(transfer)?
+ {
+ None => {
+ // The transfer was successful, nothing to do.
+ Ok(())
+ }
+ transfer => {
+ // We received our transfer back, it means there's no data available to return to the
+ // guest.
+ *transfer_lock = transfer;
+ drop(transfer_lock);
+ let cancel_handle = FidoTransferHandle {
+ weak_transfer: Arc::downgrade(&transfer_opt),
+ };
+
+ // Put the transfer back into the pending queue, we can try again later.
+ pending_in_transfers
+ .lock()
+ .push_front((cancel_handle, transfer_opt));
+ Ok(())
+ }
+ }
+}
+
+/// Filter functions used to check for expired or canceled transfers. It is called over each
+/// USB transfer waiting in the pending queue. Returns true if the given transfer is still valid,
+/// otherwise false.
+fn process_pending_transfer(
+ transfer_handle_pair: &(FidoTransferHandle, Arc<Mutex<Option<FidoTransfer>>>),
+) -> bool {
+ let mut lock = transfer_handle_pair.1.lock();
+ let transfer = match lock.take() {
+ Some(t) => {
+ // The transfer has already been cancelled. We report back to the xhci level and remove
+ // it.
+ if t.status() == TransferStatus::Cancelled {
+ t.complete_transfer();
+ return false;
+ }
+ // The transfer has expired, we cancel it and report back to the xhci level.
+ if t.timeout_expired() {
+ if let Err(e) = transfer_handle_pair.0.cancel() {
+ error!("Failed to properly cancel IN transfer, dropping the request: {e:#}");
+ return false;
+ }
+ t.complete_transfer();
+ return false;
+ }
+ Some(t)
+ }
+ None => {
+ // Transfer has already been removed so we can skip it.
+ return false;
+ }
+ };
+ *lock = transfer;
+
+ true
+}
+
+/// Signals to the current transfer that the underlying device has been lost and the xhci layer
+/// should recover by detaching the FIDO backend.
+fn signal_device_lost(transfer_opt: Option<FidoTransfer>) {
+ if let Some(mut transfer) = transfer_opt {
+ transfer.signal_device_lost();
+ transfer.complete_transfer();
+ }
+}
+
+/// Checks whether we should re-arm the packet poll timer or not.
+fn packet_timer_needs_rearm(
+ device: &Arc<Mutex<FidoDevice>>,
+ pending_in_transfers: &Arc<
+ Mutex<VecDeque<(FidoTransferHandle, Arc<Mutex<Option<FidoTransfer>>>)>>,
+ >,
+) -> bool {
+ let transfers_lock = pending_in_transfers.lock();
+ if transfers_lock.is_empty() {
+ // If there are no transfers pending, it means that some packet got stuck or lost,
+ // so we just reset the entire device state since no one is waiting for a
+ // response from the xhci level anyway.
+ device.lock().guest_key.lock().reset();
+ return false;
+ }
+ true
+}
diff --git a/devices/src/usb/backend/fido_backend/transfer.rs b/devices/src/usb/backend/fido_backend/transfer.rs
new file mode 100644
index 0000000..edb1ef3
--- /dev/null
+++ b/devices/src/usb/backend/fido_backend/transfer.rs
@@ -0,0 +1,124 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::sync::Weak;
+use std::time::Instant;
+
+use base::error;
+use base::Clock;
+use sync::Mutex;
+use usb_util::TransferBuffer;
+use usb_util::TransferStatus;
+
+use crate::usb::backend::error::Error as BackendError;
+use crate::usb::backend::error::Result as BackendResult;
+use crate::usb::backend::fido_backend::constants::USB_TRANSFER_TIMEOUT_MILLIS;
+use crate::usb::backend::transfer::BackendTransfer;
+use crate::usb::backend::transfer::BackendTransferType;
+use crate::usb::backend::transfer::GenericTransferHandle;
+
+/// Implementation of a generic USB transfer for the FIDO backend. It implements common USB
+/// transfer functionality since it cannot rely on the transfer structures provided by the
+/// usb_utils crate as the FIDO backend does not use usbdevfs to communicate with the host.
+pub struct FidoTransfer {
+ /// TransferBuffer structure with either a request or response data from the guest/host.
+ pub buffer: TransferBuffer,
+ /// Status of the transfer, used by the xhci layer for a successful completion.
+ status: TransferStatus,
+ /// Actual length of the transfer, as per USB specs.
+ pub actual_length: usize,
+ /// USB endpoint associated with this transfer.
+ pub endpoint: u8,
+ /// Timestamp of the transfer submission time.
+ submission_time: Instant,
+ /// Callback to be executed once the transfer has completed, to signal the xhci layer.
+ pub callback: Option<Box<dyn Fn(FidoTransfer) + Send + Sync>>,
+}
+
+impl FidoTransfer {
+ pub fn new(endpoint: u8, buffer: TransferBuffer) -> FidoTransfer {
+ let clock = Clock::new();
+ FidoTransfer {
+ buffer,
+ status: TransferStatus::Error, // Default to error
+ actual_length: 0,
+ endpoint,
+ submission_time: clock.now(),
+ callback: None,
+ }
+ }
+
+ /// Called when the device is lost and we need to signal to the xhci layer that the transfer
+ /// cannot continue and the device should be detached.
+ pub fn signal_device_lost(&mut self) {
+ self.status = TransferStatus::NoDevice;
+ }
+
+ /// Checks if the current transfer should time out or not
+ pub fn timeout_expired(&self) -> bool {
+ self.submission_time.elapsed().as_millis() >= USB_TRANSFER_TIMEOUT_MILLIS.into()
+ }
+
+ /// Finalizes the transfer by setting the right status and then calling the callback to signal
+ /// the xhci layer.
+ pub fn complete_transfer(mut self) {
+ // The default status is "Error". Unless it was explicitly set to Cancel or NoDevice,
+ // we can just transition it to Completed instead.
+ if self.status == TransferStatus::Error {
+ self.status = TransferStatus::Completed;
+ }
+
+ if let Some(cb) = self.callback.take() {
+ cb(self);
+ }
+ }
+}
+
+impl BackendTransfer for FidoTransfer {
+ fn status(&self) -> TransferStatus {
+ self.status
+ }
+
+ fn actual_length(&self) -> usize {
+ self.actual_length
+ }
+
+ fn buffer(&self) -> &TransferBuffer {
+ &self.buffer
+ }
+
+ fn set_callback<C: 'static + Fn(BackendTransferType) + Send + Sync>(&mut self, cb: C) {
+ let callback = move |t: FidoTransfer| cb(BackendTransferType::FidoDevice(t));
+ self.callback = Some(Box::new(callback));
+ }
+}
+
+/// Implementation of a cancel handler for `FidoTransfer`
+pub struct FidoTransferHandle {
+ pub weak_transfer: Weak<Mutex<Option<FidoTransfer>>>,
+}
+
+impl GenericTransferHandle for FidoTransferHandle {
+ fn cancel(&self) -> BackendResult<()> {
+ let rc_transfer = match self.weak_transfer.upgrade() {
+ None => {
+ return Err(BackendError::TransferHandleAlreadyComplete);
+ }
+ Some(rc_transfer) => rc_transfer,
+ };
+
+ let mut lock = rc_transfer.lock();
+
+ let mut transfer = match lock.take() {
+ Some(t) => t,
+ None => {
+ error!("Transfer has already been lost while being cancelled. Ignore");
+ return Err(BackendError::TransferHandleAlreadyComplete);
+ }
+ };
+ transfer.status = TransferStatus::Cancelled;
+ *lock = Some(transfer);
+ Ok(())
+ }
+}
diff --git a/devices/src/usb/backend/host_backend/host_device.rs b/devices/src/usb/backend/host_backend/host_device.rs
index 6bc1eab..c24eb5b 100644
--- a/devices/src/usb/backend/host_backend/host_device.rs
+++ b/devices/src/usb/backend/host_backend/host_device.rs
@@ -89,7 +89,7 @@
let config_descriptor = self.get_config_descriptor_by_index(descriptor_index)?;
- let device_descriptor = self.get_device_descriptor_tree();
+ let device_descriptor = self.get_device_descriptor_tree()?;
let config_start = config_descriptor.offset();
let config_end = config_start + config_descriptor.wTotalLength as usize;
let mut descriptor_data = device_descriptor.raw()[config_start..config_end].to_vec();
@@ -160,6 +160,12 @@
}
}
+impl Drop for HostDevice {
+ fn drop(&mut self) {
+ self.release_interfaces();
+ }
+}
+
impl AsRawDescriptor for HostDevice {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.device.lock().as_raw_descriptor()
@@ -184,6 +190,7 @@
.submit_transfer(transfer)
.map_err(Error::CreateTransfer)
.map(BackendTransferHandle::new),
+ _ => Err(Error::MalformedBackendTransfer),
}
}
@@ -249,8 +256,8 @@
.map_err(Error::GetConfigDescriptor)
}
- fn get_device_descriptor_tree(&mut self) -> DeviceDescriptorTree {
- self.device.lock().get_device_descriptor_tree().clone()
+ fn get_device_descriptor_tree(&mut self) -> Result<DeviceDescriptorTree> {
+ Ok(self.device.lock().get_device_descriptor_tree().clone())
}
fn get_active_configuration(&mut self) -> Result<u8> {
diff --git a/devices/src/usb/backend/mod.rs b/devices/src/usb/backend/mod.rs
index 6b4ab51..5690d89 100644
--- a/devices/src/usb/backend/mod.rs
+++ b/devices/src/usb/backend/mod.rs
@@ -6,6 +6,7 @@
pub mod device_provider;
pub mod endpoint;
pub mod error;
+pub mod fido_backend;
pub mod host_backend;
pub mod transfer;
pub mod utils;
diff --git a/devices/src/usb/backend/transfer.rs b/devices/src/usb/backend/transfer.rs
index cd7a006..4229d3d 100644
--- a/devices/src/usb/backend/transfer.rs
+++ b/devices/src/usb/backend/transfer.rs
@@ -9,6 +9,7 @@
use crate::usb::backend::endpoint::ControlEndpointState;
use crate::usb::backend::error::Result;
+use crate::usb::backend::fido_backend::transfer::FidoTransfer;
/// BackendTransferHandle is a wrapper structure around a generic transfer handle whose
/// implementation depends on the backend type that is being used.
@@ -30,6 +31,7 @@
pub enum BackendTransferType {
HostDevice(Transfer),
+ FidoDevice(FidoTransfer),
}
/// The backend transfer trait implemention is the interface of a generic transfer structure that
@@ -46,22 +48,26 @@
fn set_callback<C: 'static + Fn(BackendTransferType) + Send + Sync>(&mut self, cb: C);
}
+// TODO(morg): refactor with multi_dispatch
impl BackendTransfer for BackendTransferType {
fn status(&self) -> TransferStatus {
match self {
BackendTransferType::HostDevice(transfer) => BackendTransfer::status(transfer),
+ BackendTransferType::FidoDevice(transfer) => BackendTransfer::status(transfer),
}
}
fn actual_length(&self) -> usize {
match self {
BackendTransferType::HostDevice(transfer) => BackendTransfer::actual_length(transfer),
+ BackendTransferType::FidoDevice(transfer) => BackendTransfer::actual_length(transfer),
}
}
fn buffer(&self) -> &TransferBuffer {
match self {
BackendTransferType::HostDevice(transfer) => BackendTransfer::buffer(transfer),
+ BackendTransferType::FidoDevice(transfer) => BackendTransfer::buffer(transfer),
}
}
@@ -70,6 +76,9 @@
BackendTransferType::HostDevice(transfer) => {
BackendTransfer::set_callback(transfer, cb)
}
+ BackendTransferType::FidoDevice(transfer) => {
+ BackendTransfer::set_callback(transfer, cb)
+ }
}
}
}
diff --git a/devices/src/usb/backend/utils.rs b/devices/src/usb/backend/utils.rs
index d324e9d..ce2dde3 100644
--- a/devices/src/usb/backend/utils.rs
+++ b/devices/src/usb/backend/utils.rs
@@ -72,6 +72,9 @@
.lock()
.poll_transfers()
.context("UsbUtilEventHandler poll_transfers failed"),
+ BackendDeviceType::FidoDevice(fido_device) => fido_device
+ .read_hidraw_file()
+ .context("FidoDeviceEventHandler failed to read hidraw device"),
}
}
}
diff --git a/devices/src/vfio.rs b/devices/src/vfio.rs
index 6f96979..53efbeb 100644
--- a/devices/src/vfio.rs
+++ b/devices/src/vfio.rs
@@ -98,6 +98,8 @@
OpenContainer(io::Error),
#[error("failed to open {1} group: {0}")]
OpenGroup(io::Error, String),
+ #[error("failed to read {1} link: {0}")]
+ ReadLink(io::Error, PathBuf),
#[error("resources error: {0}")]
Resources(ResourcesError),
#[error("unknown vfio device type (flags: {0:#x})")]
@@ -780,7 +782,9 @@
let mut uuid_path = PathBuf::new();
uuid_path.push(sysfspath);
uuid_path.push("iommu_group");
- let group_path = uuid_path.read_link().map_err(|_| VfioError::InvalidPath)?;
+ let group_path = uuid_path
+ .read_link()
+ .map_err(|e| VfioError::ReadLink(e, uuid_path))?;
let group_osstr = group_path.file_name().ok_or(VfioError::InvalidPath)?;
let group_str = group_osstr.to_str().ok_or(VfioError::InvalidPath)?;
let group_id = group_str
diff --git a/devices/src/virtcpufreq.rs b/devices/src/virtcpufreq.rs
index 4ea5dd0..b896096 100644
--- a/devices/src/virtcpufreq.rs
+++ b/devices/src/virtcpufreq.rs
@@ -17,6 +17,9 @@
use crate::DeviceId;
use crate::Suspendable;
+const CPUFREQ_GOV_SCALE_FACTOR_DEFAULT: u32 = 100;
+const CPUFREQ_GOV_SCALE_FACTOR_SCHEDUTIL: u32 = 80;
+
const SCHED_FLAG_RESET_ON_FORK: u64 = 0x1;
const SCHED_FLAG_KEEP_POLICY: u64 = 0x08;
const SCHED_FLAG_KEEP_PARAMS: u64 = 0x10;
@@ -28,6 +31,7 @@
cpu_fmax: u32,
cpu_capacity: u32,
pcpu: u32,
+ util_factor: u32,
}
fn get_cpu_info(cpu_id: u32, property: &str) -> Result<u32, Error> {
@@ -38,6 +42,11 @@
.map_err(|_| Error::new(libc::EINVAL))
}
+fn get_cpu_info_str(cpu_id: u32, property: &str) -> Result<String, Error> {
+ let path = format!("/sys/devices/system/cpu/cpu{cpu_id}/{property}");
+ std::fs::read_to_string(path).map_err(|_| Error::new(libc::EINVAL))
+}
+
fn get_cpu_capacity(cpu_id: u32) -> Result<u32, Error> {
get_cpu_info(cpu_id, "cpu_capacity")
}
@@ -50,15 +59,25 @@
get_cpu_info(cpu_id, "cpufreq/scaling_cur_freq")
}
+fn get_cpu_util_factor(cpu_id: u32) -> Result<u32, Error> {
+ let gov = get_cpu_info_str(cpu_id, "cpufreq/scaling_governor")?;
+ match gov.trim() {
+ "schedutil" => Ok(CPUFREQ_GOV_SCALE_FACTOR_SCHEDUTIL),
+ _ => Ok(CPUFREQ_GOV_SCALE_FACTOR_DEFAULT),
+ }
+}
+
impl VirtCpufreq {
pub fn new(pcpu: u32, _socket: Option<Arc<Mutex<UnixStream>>>) -> Self {
let cpu_capacity = get_cpu_capacity(pcpu).expect("Error reading capacity");
let cpu_fmax = get_cpu_maxfreq_khz(pcpu).expect("Error reading max freq");
+ let util_factor = get_cpu_util_factor(pcpu).expect("Error getting util factor");
VirtCpufreq {
cpu_fmax,
cpu_capacity,
pcpu,
+ util_factor,
}
}
}
@@ -104,8 +123,8 @@
}
};
- // Undo 25% margin applied by schedutil governor to cpufreq.
- let cpu_cap_scaled = self.cpu_capacity * 80 / 100;
+ // Util margin depends on the cpufreq governor on the host
+ let cpu_cap_scaled = self.cpu_capacity * self.util_factor / CPUFREQ_GOV_SCALE_FACTOR_DEFAULT;
let util = cpu_cap_scaled * freq / self.cpu_fmax;
let mut sched_attr = sched_attr::default();
diff --git a/devices/src/virtio/async_device.rs b/devices/src/virtio/async_device.rs
index 5e38324..ff9c454 100644
--- a/devices/src/virtio/async_device.rs
+++ b/devices/src/virtio/async_device.rs
@@ -78,6 +78,7 @@
///
/// Returns `true` if the queue was running, `false` if it wasn't.
pub fn stop(&mut self) -> AsyncResult<bool> {
+ // TODO: schuffelen - All callers should use stop_async instead.
match std::mem::replace(self, AsyncQueueState::Broken) {
AsyncQueueState::Running((task, ex, handle)) => {
// Abort the task and run it to completion to retrieve the queue's resource.
@@ -92,4 +93,25 @@
}
}
}
+ /// Stops a previously started queue.
+ ///
+ /// The executor on which the task has been started will be run if needed in order to retrieve
+ /// the queue's resource.
+ ///
+ /// Returns `true` if the queue was running, `false` if it wasn't.
+ pub async fn stop_async(&mut self) -> AsyncResult<bool> {
+ match std::mem::replace(self, AsyncQueueState::Broken) {
+ AsyncQueueState::Running((task, _, handle)) => {
+ // Abort the task and run it to completion to retrieve the queue's resource.
+ handle.abort();
+ let resource = task.await;
+ *self = AsyncQueueState::Stopped(resource);
+ Ok(true)
+ }
+ state => {
+ *self = state;
+ Ok(false)
+ }
+ }
+ }
}
diff --git a/devices/src/virtio/balloon.rs b/devices/src/virtio/balloon.rs
index b354148..461f8d9 100644
--- a/devices/src/virtio/balloon.rs
+++ b/devices/src/virtio/balloon.rs
@@ -1576,11 +1576,9 @@
self.start_worker(mem, interrupt, queues)
}
- fn reset(&mut self) -> bool {
- if let StoppedWorker::AlreadyStopped = self.stop_worker() {
- return false;
- }
- true
+ fn reset(&mut self) -> anyhow::Result<()> {
+ let _worker = self.stop_worker();
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
diff --git a/devices/src/virtio/block/asynchronous.rs b/devices/src/virtio/block/asynchronous.rs
index dea1a08..813149d 100644
--- a/devices/src/virtio/block/asynchronous.rs
+++ b/devices/src/virtio/block/asynchronous.rs
@@ -1132,18 +1132,16 @@
Ok(())
}
- fn reset(&mut self) -> bool {
- let mut success = false;
+ fn reset(&mut self) -> anyhow::Result<()> {
while let Some((_, (worker_thread, _))) = self.worker_threads.pop_first() {
let (disk_image, control_tube) = worker_thread.stop();
self.disk_image = Some(disk_image);
if let Some(control_tube) = control_tube {
self.control_tube = Some(control_tube);
}
- success = true;
}
self.activated_queues.clear();
- success
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
@@ -1655,7 +1653,7 @@
);
// reset and assert resources are got back
- assert!(b.reset(), "reset should succeed");
+ assert!(b.reset().is_ok(), "reset should succeed");
assert!(
b.disk_image.is_some(),
"BlockAsync should have a disk image"
diff --git a/devices/src/virtio/block/mod.rs b/devices/src/virtio/block/mod.rs
index d52c4fe..b06f311 100644
--- a/devices/src/virtio/block/mod.rs
+++ b/devices/src/virtio/block/mod.rs
@@ -102,6 +102,7 @@
deserialize_with = "deserialize_disk_id"
)]
pub id: Option<[u8; DISK_ID_LEN]>,
+ // Deprecated: Use async_executor=overlapped[concurrency=N]"
// camel_case variant allowed for backward compatibility.
#[cfg(windows)]
#[serde(
@@ -451,6 +452,51 @@
pci_address: None,
}
);
+ let params = from_block_arg("/some/path.img,async-executor=overlapped").unwrap();
+ assert_eq!(
+ params,
+ DiskOption {
+ path: "/some/path.img".into(),
+ read_only: false,
+ root: false,
+ sparse: true,
+ direct: false,
+ block_size: 512,
+ id: None,
+ io_concurrency: NonZeroU32::new(1).unwrap(),
+ multiple_workers: false,
+ async_executor: Some(ExecutorKindSys::Overlapped { concurrency: None }.into()),
+ packed_queue: false,
+ bootindex: None,
+ pci_address: None,
+ }
+ );
+ let params =
+ from_block_arg("/some/path.img,async-executor=\"overlapped,concurrency=4\"")
+ .unwrap();
+ assert_eq!(
+ params,
+ DiskOption {
+ path: "/some/path.img".into(),
+ read_only: false,
+ root: false,
+ sparse: true,
+ direct: false,
+ block_size: 512,
+ id: None,
+ io_concurrency: NonZeroU32::new(1).unwrap(),
+ multiple_workers: false,
+ async_executor: Some(
+ ExecutorKindSys::Overlapped {
+ concurrency: Some(4)
+ }
+ .into()
+ ),
+ packed_queue: false,
+ bootindex: None,
+ pci_address: None,
+ }
+ );
}
// id
diff --git a/devices/src/virtio/block/sys/linux.rs b/devices/src/virtio/block/sys/linux.rs
index 3cf7796..56b3949 100644
--- a/devices/src/virtio/block/sys/linux.rs
+++ b/devices/src/virtio/block/sys/linux.rs
@@ -61,7 +61,6 @@
impl BlockAsync {
pub fn create_executor(&self) -> Executor {
- Executor::with_executor_kind(self.executor_kind.into())
- .expect("Failed to create an executor")
+ Executor::with_executor_kind(self.executor_kind).expect("Failed to create an executor")
}
}
diff --git a/devices/src/virtio/block/sys/windows.rs b/devices/src/virtio/block/sys/windows.rs
index cebadbb..7b66a1a 100644
--- a/devices/src/virtio/block/sys/windows.rs
+++ b/devices/src/virtio/block/sys/windows.rs
@@ -10,6 +10,7 @@
use base::warn;
use cros_async::sys::windows::ExecutorKindSys;
use cros_async::Executor;
+use cros_async::ExecutorKind;
use winapi::um::winbase::FILE_FLAG_NO_BUFFERING;
use winapi::um::winbase::FILE_FLAG_OVERLAPPED;
use winapi::um::winnt::FILE_SHARE_READ;
@@ -38,7 +39,13 @@
flags |= FILE_FLAG_NO_BUFFERING;
}
- if self.async_executor == Some(ExecutorKindSys::Overlapped.into()) {
+ let is_overlapped = matches!(
+ self.async_executor,
+ Some(ExecutorKind::SysVariants(
+ ExecutorKindSys::Overlapped { .. }
+ ))
+ );
+ if is_overlapped {
warn!("Opening disk file for overlapped IO");
flags |= FILE_FLAG_OVERLAPPED;
}
@@ -50,10 +57,7 @@
let file = open_option
.open(&self.path)
.context("Failed to open disk file")?;
- let image_type = disk::detect_image_type(
- &file,
- self.async_executor == Some(ExecutorKindSys::Overlapped.into()),
- )?;
+ let image_type = disk::detect_image_type(&file, is_overlapped)?;
Ok(disk::create_disk_file_of_type(
file,
self.sparse,
@@ -66,7 +70,12 @@
impl BlockAsync {
pub fn create_executor(&self) -> Executor {
- Executor::with_kind_and_concurrency(self.executor_kind.into(), self.io_concurrency)
- .expect("Failed to create an executor")
+ let mut kind = self.executor_kind;
+ if let ExecutorKind::SysVariants(ExecutorKindSys::Overlapped { concurrency }) = &mut kind {
+ if concurrency.is_none() {
+ *concurrency = Some(self.io_concurrency);
+ }
+ }
+ Executor::with_executor_kind(kind).expect("Failed to create an executor")
}
}
diff --git a/devices/src/virtio/console.rs b/devices/src/virtio/console.rs
index fdc1e0b..2ade6f1 100644
--- a/devices/src/virtio/console.rs
+++ b/devices/src/virtio/console.rs
@@ -394,7 +394,7 @@
self.pci_address
}
- fn reset(&mut self) -> bool {
+ fn reset(&mut self) -> anyhow::Result<()> {
if let Some(input_thread) = self.input_thread.take() {
self.input = Some(input_thread.stop());
}
@@ -406,9 +406,8 @@
.input
.map_or(VecDeque::new(), |arc_mutex| arc_mutex.lock().clone());
self.output = Some(worker.output);
- return true;
}
- false
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
@@ -492,3 +491,64 @@
Ok(())
}
}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(windows)]
+ use base::windows::named_pipes;
+ use tempfile::tempfile;
+ use vm_memory::GuestAddress;
+
+ use super::*;
+ use crate::suspendable_virtio_tests;
+
+ struct ConsoleContext {
+ #[cfg(windows)]
+ input_peer: named_pipes::PipeConnection,
+ }
+
+ fn modify_device(_context: &mut ConsoleContext, b: &mut Console) {
+ b.input_buffer.push_back(0);
+ }
+
+ fn create_device() -> (ConsoleContext, Console) {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ let (input, context) = (Box::new(tempfile().unwrap()), ConsoleContext {});
+ #[cfg(windows)]
+ let (input, context) = {
+ let (x, y) = named_pipes::pair(
+ &named_pipes::FramingMode::Byte,
+ &named_pipes::BlockingMode::NoWait,
+ 0,
+ )
+ .unwrap();
+ (Box::new(x), ConsoleContext { input_peer: y })
+ };
+
+ let output = Box::new(tempfile().unwrap());
+ (
+ context,
+ Console::new(
+ hypervisor::ProtectionType::Unprotected,
+ Some(input),
+ Some(output),
+ Vec::new(),
+ None,
+ ),
+ )
+ }
+
+ suspendable_virtio_tests!(console, create_device, 2, modify_device);
+
+ #[test]
+ fn test_inactive_sleep_resume() {
+ let (_ctx, device) = &mut create_device();
+ let sleep_result = device.virtio_sleep().expect("failed to sleep");
+ assert!(sleep_result.is_none());
+ device.virtio_snapshot().expect("failed to snapshot");
+ device.virtio_wake(None).expect("failed to wake");
+ // Make sure the input and output haven't been dropped.
+ assert!(device.input.is_some());
+ assert!(device.output.is_some());
+ }
+}
diff --git a/devices/src/virtio/console/asynchronous.rs b/devices/src/virtio/console/asynchronous.rs
index 626a22c..b98480b 100644
--- a/devices/src/virtio/console/asynchronous.rs
+++ b/devices/src/virtio/console/asynchronous.rs
@@ -296,6 +296,20 @@
1 + self.extra_ports.len()
}
+ /// Returns the maximum number of queues supported by this device.
+ pub fn max_queues(&self) -> usize {
+ // The port 0 receive and transmit queues always exist;
+ // other queues only exist if VIRTIO_CONSOLE_F_MULTIPORT is set.
+ if self.is_multi_port() {
+ let port_num = self.max_ports();
+
+ // Extra 1 is for control port; each port has two queues (tx & rx)
+ (port_num + 1) * 2
+ } else {
+ 2
+ }
+ }
+
/// Return the reference of the console port by port_id
fn get_console_port(&mut self, port_id: usize) -> anyhow::Result<&mut ConsolePort> {
match port_id {
@@ -411,15 +425,10 @@
}
}
-enum VirtioConsoleState {
- Stopped(ConsoleDevice),
- Running(WorkerThread<anyhow::Result<ConsoleDevice>>),
- Broken,
-}
-
/// Virtio console device.
pub struct AsyncConsole {
- state: VirtioConsoleState,
+ console_device: Option<ConsoleDevice>,
+ worker_thread: Option<WorkerThread<anyhow::Result<ConsoleDevice>>>,
base_features: u64,
keep_descriptors: Vec<Descriptor>,
pci_address: Option<PciAddress>,
@@ -437,7 +446,7 @@
) -> AsyncConsole {
let pci_address = options.pci_address;
AsyncConsole {
- state: VirtioConsoleState::Stopped(ConsoleDevice::new(
+ console_device: Some(ConsoleDevice::new(
protection_type,
evt,
input,
@@ -446,6 +455,7 @@
options,
Default::default(),
)),
+ worker_thread: None,
base_features: base_features(protection_type),
keep_descriptors: keep_rds.iter().copied().map(Descriptor).collect(),
pci_address,
@@ -503,49 +513,40 @@
return Err(anyhow!("expected 2 queues, got {}", queues.len()));
}
- // Reset the device if it was already running.
- if matches!(self.state, VirtioConsoleState::Running { .. }) {
- self.reset();
- }
-
- let state = std::mem::replace(&mut self.state, VirtioConsoleState::Broken);
- let console = match state {
- VirtioConsoleState::Running { .. } => {
- return Err(anyhow!("device should not be running here. This is a bug."));
- }
- VirtioConsoleState::Stopped(console) => console,
- VirtioConsoleState::Broken => {
- return Err(anyhow!("device is broken and cannot be activated"));
- }
- };
+ let console = self.console_device.take().context("no console_device")?;
let ex = Executor::new().expect("failed to create an executor");
let receive_queue = queues.remove(&0).unwrap();
let transmit_queue = queues.remove(&1).unwrap();
- self.state =
- VirtioConsoleState::Running(WorkerThread::start("v_console", move |kill_evt| {
- let mut console = console;
- let receive_queue = Arc::new(Mutex::new(receive_queue));
- let transmit_queue = Arc::new(Mutex::new(transmit_queue));
+ self.worker_thread = Some(WorkerThread::start("v_console", move |kill_evt| {
+ let mut console = console;
+ let receive_queue = Arc::new(Mutex::new(receive_queue));
+ let transmit_queue = Arc::new(Mutex::new(transmit_queue));
- // Start transmit queue of port 0
- console.start_queue(&ex, 0, receive_queue, interrupt.clone())?;
- // Start receive queue of port 0
- console.start_queue(&ex, 1, transmit_queue, interrupt.clone())?;
+ // Start transmit queue of port 0
+ console.start_queue(&ex, 0, receive_queue, interrupt.clone())?;
+ // Start receive queue of port 0
+ console.start_queue(&ex, 1, transmit_queue, interrupt.clone())?;
- // Run until the kill event is signaled and cancel all tasks.
- ex.run_until(async {
- async_utils::await_and_exit(&ex, kill_evt).await?;
- let port = &mut console.port0;
- if let Some(input) = port.input.as_mut() {
- input.stop().context("failed to stop rx queue")?;
- }
- port.output.stop().context("failed to stop tx queue")?;
+ // Run until the kill event is signaled and cancel all tasks.
+ ex.run_until(async {
+ async_utils::await_and_exit(&ex, kill_evt).await?;
+ let port = &mut console.port0;
+ if let Some(input) = port.input.as_mut() {
+ input
+ .stop_async()
+ .await
+ .context("failed to stop rx queue")?;
+ }
+ port.output
+ .stop_async()
+ .await
+ .context("failed to stop tx queue")?;
- Ok(console)
- })?
- }));
+ Ok(console)
+ })?
+ }));
Ok(())
}
@@ -554,29 +555,11 @@
self.pci_address
}
- fn reset(&mut self) -> bool {
- match std::mem::replace(&mut self.state, VirtioConsoleState::Broken) {
- // Stopped console is already in reset state.
- state @ VirtioConsoleState::Stopped(_) => {
- self.state = state;
- true
- }
- // Stop the worker thread and go back to `Stopped` state.
- VirtioConsoleState::Running(worker_thread) => {
- let thread_res = worker_thread.stop();
- match thread_res {
- Ok(console) => {
- self.state = VirtioConsoleState::Stopped(console);
- true
- }
- Err(e) => {
- error!("worker thread returned an error: {}", e);
- false
- }
- }
- }
- // We are broken and cannot reset properly.
- VirtioConsoleState::Broken => false,
+ fn reset(&mut self) -> anyhow::Result<()> {
+ if let Some(worker_thread) = self.worker_thread.take() {
+ let console = worker_thread.stop()?;
+ self.console_device = Some(console);
}
+ Ok(())
}
}
diff --git a/devices/src/virtio/fs/passthrough.rs b/devices/src/virtio/fs/passthrough.rs
index 4218e90..cc58110 100644
--- a/devices/src/virtio/fs/passthrough.rs
+++ b/devices/src/virtio/fs/passthrough.rs
@@ -39,7 +39,6 @@
use base::FromRawDescriptor;
use base::Protection;
use base::RawDescriptor;
-use data_model::zerocopy_from_reader;
use fuse::filesystem::Context;
use fuse::filesystem::DirectoryIterator;
use fuse::filesystem::Entry;
@@ -1308,7 +1307,7 @@
#[cfg_attr(not(feature = "arc_quota"), allow(unused_variables))] ctx: Context,
inode: Inode,
handle: Handle,
- r: R,
+ mut r: R,
) -> io::Result<IoctlReply> {
let data: Arc<dyn AsRawDescriptor> = if self.zero_message_open.load(Ordering::Relaxed) {
self.find_inode(inode)?
@@ -1316,7 +1315,8 @@
self.find_handle(handle, inode)?
};
- let in_attr: fsxattr = zerocopy_from_reader(r)?;
+ let mut in_attr = fsxattr::new_zeroed();
+ r.read_exact(in_attr.as_bytes_mut())?;
#[cfg(feature = "arc_quota")]
let st = stat(&*data)?;
@@ -1399,7 +1399,7 @@
#[cfg_attr(not(feature = "arc_quota"), allow(unused_variables))] ctx: Context,
inode: Inode,
handle: Handle,
- r: R,
+ mut r: R,
) -> io::Result<IoctlReply> {
let data: Arc<dyn AsRawDescriptor> = if self.zero_message_open.load(Ordering::Relaxed) {
self.find_inode(inode)?
@@ -1408,7 +1408,8 @@
};
// The ioctl encoding is a long but the parameter is actually an int.
- let in_flags: c_int = zerocopy_from_reader(r)?;
+ let mut in_flags: c_int = 0;
+ r.read_exact(in_flags.as_bytes_mut())?;
#[cfg(feature = "arc_quota")]
let st = stat(&*data)?;
@@ -1522,7 +1523,8 @@
data
};
- let mut arg: fsverity_enable_arg = zerocopy_from_reader(&mut r)?;
+ let mut arg = fsverity_enable_arg::new_zeroed();
+ r.read_exact(arg.as_bytes_mut())?;
let mut salt;
if arg.salt_size > 0 {
@@ -1565,7 +1567,7 @@
&self,
inode: Inode,
handle: Handle,
- r: R,
+ mut r: R,
out_size: u32,
) -> io::Result<IoctlReply> {
let data: Arc<dyn AsRawDescriptor> = if self.zero_message_open.load(Ordering::Relaxed) {
@@ -1574,7 +1576,8 @@
self.find_handle(handle, inode)?
};
- let digest: fsverity_digest = zerocopy_from_reader(r)?;
+ let mut digest = fsverity_digest::new_zeroed();
+ r.read_exact(digest.as_bytes_mut())?;
// Taken from fs/verity/fsverity_private.h.
const FS_VERITY_MAX_DIGEST_SIZE: u16 = 64;
diff --git a/devices/src/virtio/gpu/mod.rs b/devices/src/virtio/gpu/mod.rs
index 0e2ee75..c26bc0a 100644
--- a/devices/src/virtio/gpu/mod.rs
+++ b/devices/src/virtio/gpu/mod.rs
@@ -68,6 +68,7 @@
pub use self::protocol::VIRTIO_GPU_F_RESOURCE_BLOB;
pub use self::protocol::VIRTIO_GPU_F_RESOURCE_UUID;
pub use self::protocol::VIRTIO_GPU_F_VIRGL;
+pub use self::protocol::VIRTIO_GPU_MAX_SCANOUTS;
pub use self::protocol::VIRTIO_GPU_SHM_ID_HOST_VISIBLE;
use self::protocol::*;
use self::virtio_gpu::to_rutabaga_descriptor;
@@ -1118,7 +1119,13 @@
Stub,
#[cfg(windows)]
/// Open a window using WinAPI.
- WinApi(WinDisplayProperties),
+ WinApi,
+ #[cfg(feature = "android_display")]
+ /// The display buffer is backed by an Android surface. The surface is set via an AIDL service
+ /// that the backend hosts. Currently, the AIDL service is registered to the service manager
+ /// using the name given here. The entity holding the surface is expected to locate the service
+ /// via this name, and pass the surface to it.
+ Android(String),
}
impl DisplayBackend {
@@ -1134,11 +1141,10 @@
DisplayBackend::X(display) => GpuDisplay::open_x(display.as_deref()),
DisplayBackend::Stub => GpuDisplay::open_stub(),
#[cfg(windows)]
- DisplayBackend::WinApi(display_properties) => match wndproc_thread.take() {
+ DisplayBackend::WinApi => match wndproc_thread.take() {
Some(wndproc_thread) => GpuDisplay::open_winapi(
wndproc_thread,
/* win_metrics= */ None,
- display_properties.clone(),
gpu_display_wait_descriptor_ctrl,
None,
),
@@ -1147,6 +1153,8 @@
Err(GpuDisplayError::Allocate)
}
},
+ #[cfg(feature = "android_display")]
+ DisplayBackend::Android(service_name) => GpuDisplay::open_android(service_name),
}
}
}
diff --git a/devices/src/virtio/gpu/parameters.rs b/devices/src/virtio/gpu/parameters.rs
index bbca096..3e97627 100644
--- a/devices/src/virtio/gpu/parameters.rs
+++ b/devices/src/virtio/gpu/parameters.rs
@@ -16,6 +16,7 @@
use super::GpuMode;
use super::GpuWsi;
+use crate::virtio::gpu::VIRTIO_GPU_MAX_SCANOUTS;
use crate::PciAddress;
mod serde_capset_mask {
@@ -41,6 +42,8 @@
pub struct GpuParameters {
#[serde(rename = "backend")]
pub mode: GpuMode,
+ #[serde(default = "default_max_num_displays")]
+ pub max_num_displays: u32,
#[serde(rename = "displays")]
pub display_params: Vec<DisplayParameters>,
// `width` and `height` are supported for CLI backwards compatibility.
@@ -81,6 +84,7 @@
impl Default for GpuParameters {
fn default() -> Self {
GpuParameters {
+ max_num_displays: default_max_num_displays(),
display_params: vec![],
__width_compat: None,
__height_compat: None,
@@ -109,6 +113,10 @@
}
}
+fn default_max_num_displays() -> u32 {
+ VIRTIO_GPU_MAX_SCANOUTS as u32
+}
+
#[cfg(test)]
mod tests {
use serde_json::*;
diff --git a/devices/src/virtio/gpu/virtio_gpu.rs b/devices/src/virtio/gpu/virtio_gpu.rs
index a296afb..c453fcd 100644
--- a/devices/src/virtio/gpu/virtio_gpu.rs
+++ b/devices/src/virtio/gpu/virtio_gpu.rs
@@ -46,6 +46,7 @@
use serde::Deserialize;
use serde::Serialize;
use sync::Mutex;
+use vm_control::gpu::DisplayMode;
use vm_control::gpu::DisplayParameters;
use vm_control::gpu::GpuControlCommand;
use vm_control::gpu::GpuControlResult;
@@ -285,11 +286,17 @@
let mut display = display.borrow_mut();
+ let display_params =
+ self.display_params
+ .clone()
+ .unwrap_or(DisplayParameters::default_with_mode(DisplayMode::Windowed(
+ self.width,
+ self.height,
+ )));
let surface_id = display.create_surface(
self.parent_surface_id,
self.scanout_id,
- self.width,
- self.height,
+ &display_params,
self.scanout_type,
)?;
diff --git a/devices/src/virtio/input/mod.rs b/devices/src/virtio/input/mod.rs
index 4404f5e..146bf0c 100644
--- a/devices/src/virtio/input/mod.rs
+++ b/devices/src/virtio/input/mod.rs
@@ -604,13 +604,12 @@
Ok(())
}
- fn reset(&mut self) -> bool {
+ fn reset(&mut self) -> anyhow::Result<()> {
if let Some(worker_thread) = self.worker_thread.take() {
let worker = worker_thread.stop();
self.source = Some(worker.event_source);
- return true;
}
- false
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
diff --git a/devices/src/virtio/interrupt.rs b/devices/src/virtio/interrupt.rs
index ddbf552..8fc6b85 100644
--- a/devices/src/virtio/interrupt.rs
+++ b/devices/src/virtio/interrupt.rs
@@ -253,6 +253,26 @@
)
}
+ #[cfg(test)]
+ pub fn new_for_test_with_msix() -> Interrupt {
+ let (_, unused_config_tube) = base::Tube::pair().unwrap();
+ let msix_vectors = 2;
+ let msix_cfg = MsixConfig::new(
+ msix_vectors,
+ unused_config_tube,
+ 0,
+ "test_device".to_owned(),
+ );
+
+ Interrupt::new(
+ IrqLevelEvent::new().unwrap(),
+ Some(Arc::new(Mutex::new(msix_cfg))),
+ msix_vectors,
+ #[cfg(target_arch = "x86_64")]
+ None,
+ )
+ }
+
/// Get a reference to the interrupt event.
pub fn get_interrupt_evt(&self) -> &Event {
match &self.inner.as_ref().transport {
diff --git a/devices/src/virtio/mod.rs b/devices/src/virtio/mod.rs
index 7474d17..4cd17d6 100644
--- a/devices/src/virtio/mod.rs
+++ b/devices/src/virtio/mod.rs
@@ -16,7 +16,7 @@
mod iommu;
#[cfg(feature = "net")]
pub mod net;
-#[cfg(target_arch = "x86_64")]
+#[cfg(feature = "pvclock")]
pub mod pvclock;
mod queue;
mod rng;
@@ -38,6 +38,7 @@
#[cfg(feature = "audio")]
pub mod snd;
pub mod vhost;
+pub mod vhost_user_frontend;
pub mod vsock;
#[cfg(feature = "balloon")]
@@ -96,6 +97,7 @@
pub use self::tpm::Tpm;
#[cfg(feature = "vtpm")]
pub use self::tpm::TpmBackend;
+pub use self::vhost_user_frontend::VhostUserFrontend;
#[cfg(any(feature = "video-decoder", feature = "video-encoder"))]
pub use self::video::VideoDevice;
pub use self::virtio_device::SharedMemoryMapper;
@@ -107,6 +109,8 @@
pub use self::virtio_pci_device::VirtioPciCap;
pub use self::virtio_pci_device::VirtioPciDevice;
pub use self::virtio_pci_device::VirtioPciShmCap;
+#[cfg(feature = "pvclock")]
+pub use self::DeviceType::Pvclock;
cfg_if::cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
diff --git a/devices/src/virtio/net.rs b/devices/src/virtio/net.rs
index 6fc3053..7f0acb1 100644
--- a/devices/src/virtio/net.rs
+++ b/devices/src/virtio/net.rs
@@ -810,13 +810,13 @@
Ok(())
}
- fn reset(&mut self) -> bool {
+ fn reset(&mut self) -> anyhow::Result<()> {
for worker_thread in self.worker_threads.drain(..) {
let worker = worker_thread.stop();
self.taps.push(worker.tap);
}
- true
+ Ok(())
}
}
diff --git a/devices/src/virtio/pmem.rs b/devices/src/virtio/pmem.rs
index 8b0761d..df5f456 100644
--- a/devices/src/virtio/pmem.rs
+++ b/devices/src/virtio/pmem.rs
@@ -328,13 +328,12 @@
Ok(())
}
- fn reset(&mut self) -> bool {
+ fn reset(&mut self) -> anyhow::Result<()> {
if let Some(worker_thread) = self.worker_thread.take() {
let (_queue, pmem_device_tube) = worker_thread.stop();
self.pmem_device_tube = Some(pmem_device_tube);
- return true;
}
- false
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
diff --git a/devices/src/virtio/pvclock.rs b/devices/src/virtio/pvclock.rs
index b3b919a..a0820c5 100644
--- a/devices/src/virtio/pvclock.rs
+++ b/devices/src/virtio/pvclock.rs
@@ -4,13 +4,48 @@
//! Virtio version of a linux pvclock clocksource.
//!
-//! See the driver source here:
+//! Driver source is here:
//! <https://android.googlesource.com/kernel/common/+/ebaa2c516811825b141de844cee7a38653058ef5/drivers/virtio/virtio_pvclock.c>
//!
-//! For more information about this device, please visit <go/virtio-pvclock>.
+//! # Background
+//!
+//! Userland applications often rely on CLOCK_MONOTONIC to be relatively continuous.
+//! Large jumps can signal problems (e.g., triggering Android watchdogs).
+//! This assumption breaks down in virtualized environments, where a VM's suspension isn't
+//! inherently linked to the guest kernel's concept of "suspend".
+//! Since fixing all userland code is impractical, virtio-pvclock allows the VMM and guest kernel
+//! to collaborate on emulating the expected clock behavior around suspend/resume.
+//!
+//! # How it works
+//!
+//! ## Core functions of virtio-pvclock device:
+//!
+//! 1. Adjusts hardware clocksource offsets to make the guest clocks appear suspended when the VM is
+//! suspended.
+//! - This is achieved through the pvclock mechanism implemented in x86 KVM used by kvm-clock.
+//! 2. Provides the guest kernel with the duration of VM suspension, allowing the guest to adjust
+//! its clocks accordingly.
+//! - Since the offset between the CLOCK_MONOTONIC and CLOCK_BOOTTIME is maintained by the guest
+//! kernel, applying the adjustment is the guest driver's responsibility.
+//!
+//! ## Expected guest clock behaviors under virtio-pvclock is enabled
+//!
+//! - Monotonicity of CLOCK_MONOTONIC and CLOCK_BOOTTIME is maintained.
+//! - CLOCK_MONOTONIC will not include the time passed during crosvm is suspended from its run mode
+//! perspective.
+//! - CLOCK_BOOTTIME will be adjusted to include the time passed during crosvm is suspended.
+//!
+//! # Why it is needed
+//!
+//! Because the existing solution does not cover some expectations we need.
+//!
+//! kvm-clock is letting the host to manage the offsets of CLOCK_MONOTONIC.
+//! However, it doesn't address the difference between CLOCK_BOOTTIME and CLOCK_MONOTONIC related
+//! to host's suspend/resume, as it is designed to maintain the CLOCK_REALTIME in sync mainly.
use std::arch::x86_64::_rdtsc;
use std::collections::BTreeMap;
+use std::mem::replace;
use std::mem::size_of;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
@@ -75,10 +110,14 @@
const VIRTIO_PVCLOCK_CLOCKSOURCE_RATING: u32 = 450;
+// The config structure being exposed to the guest to tell them how much suspend time should be
+// injected to the guest's CLOCK_BOOTTIME.
#[derive(Debug, Clone, Copy, Default, AsBytes, FromZeroes, FromBytes)]
+#[allow(non_camel_case_types)]
#[repr(C)]
struct virtio_pvclock_config {
- // Number of nanoseconds the VM has been suspended without guest suspension.
+ // Total duration the VM has been paused while the guest kernel is not in the suspended state
+ // (from the power management and timekeeping perspective).
suspend_time_ns: Le64,
// Device-suggested rating of the pvclock clocksource.
clocksource_rating: Le32,
@@ -86,6 +125,7 @@
}
#[derive(Debug, Clone, Copy, Default, FromZeroes, FromBytes, AsBytes)]
+#[allow(non_camel_case_types)]
#[repr(C)]
struct virtio_pvclock_set_pvclock_page_req {
// Physical address of pvclock page.
@@ -200,94 +240,153 @@
}
}
-/// Entry struct for the virtio-pvclock device.
-///
-/// Handles MMIO communication, and activating the PvClockWorker thread.
-pub struct PvClock {
+/// Serializable part of the [PvClock] struct which will be used by the virtio_snapshot / restore.
+#[derive(Serialize, Deserialize)]
+struct PvClockState {
tsc_frequency: u64,
- suspend_tube: Option<Tube>,
+ /// If the device is sleeping, a [PvClockWorkerSnapshot] that can re-create the worker
+ /// will be stored here. (We can't just store the worker itself as it contains an object
+ /// tree with references to [GuestMemory].)
+ paused_main_worker: Option<PvClockWorkerSnapshot>,
/// The total time the vm has been suspended, this is in an `Arc<AtomicU64>>` because it's set
/// by the PvClockWorker thread but read by PvClock from the mmio bus in the main thread.
total_suspend_ns: Arc<AtomicU64>,
features: u64,
acked_features: u64,
- worker_thread: Option<WorkerThread<WorkerReturn>>,
- /// If the device is sleeping, a [PvClockWorkerSnapshot] that can re-create the worker
- /// will be stored here. (We can't just store the worker itself as it contains an object
- /// tree with references to [GuestMemory].)
- paused_worker: Option<PvClockWorkerSnapshot>,
}
-/// Snapshotted form of the [PvClock] device.
-#[derive(Serialize, Deserialize)]
-struct PvClockSnapshot {
- tsc_frequency: u64,
- paused_worker: Option<PvClockWorkerSnapshot>,
- total_suspend_ns: u64,
- features: u64,
- acked_features: u64,
+/// An enum to keep dynamic state of pvclock workers in a type safe manner.
+enum PvClockWorkerState {
+ /// Idle means no worker is running.
+ /// This tube is for communicating with this device from the crosvm threads.
+ Idle(Tube),
+ /// A stub worker to respond pvclock commands when the device is not activated yet.
+ Stub(WorkerThread<StubWorkerReturn>),
+ /// A main worker to respond pvclock commands while the device is active.
+ Main(WorkerThread<MainWorkerReturn>),
+ /// None is used only for handling transitional state between the states above.
+ None,
+}
+
+/// A struct that represents virtio-pvclock device.
+pub struct PvClock {
+ state: PvClockState,
+ worker_state: PvClockWorkerState,
}
impl PvClock {
pub fn new(base_features: u64, tsc_frequency: u64, suspend_tube: Tube) -> Self {
- PvClock {
+ let state = PvClockState {
tsc_frequency,
- suspend_tube: Some(suspend_tube),
+ paused_main_worker: None,
total_suspend_ns: Arc::new(AtomicU64::new(0)),
features: base_features
| 1 << VIRTIO_PVCLOCK_F_TSC_STABLE
| 1 << VIRTIO_PVCLOCK_F_INJECT_SLEEP
| 1 << VIRTIO_PVCLOCK_F_CLOCKSOURCE_RATING,
acked_features: 0,
- worker_thread: None,
- paused_worker: None,
+ };
+ PvClock {
+ state,
+ worker_state: PvClockWorkerState::Idle(suspend_tube),
}
}
fn get_config(&self) -> virtio_pvclock_config {
virtio_pvclock_config {
- suspend_time_ns: self.total_suspend_ns.load(Ordering::SeqCst).into(),
+ suspend_time_ns: self.state.total_suspend_ns.load(Ordering::SeqCst).into(),
clocksource_rating: VIRTIO_PVCLOCK_CLOCKSOURCE_RATING.into(),
padding: 0,
}
}
- fn start_worker(
+ /// Use switch_to_*_worker unless needed to keep the state transition consistent
+ fn start_main_worker(
&mut self,
interrupt: Interrupt,
- mut queues: BTreeMap<usize, Queue>,
pvclock_worker: PvClockWorker,
+ mut queues: BTreeMap<usize, Queue>,
) -> anyhow::Result<()> {
- if queues.len() != QUEUE_SIZES.len() {
- return Err(anyhow!(
- "expected {} queues, got {}",
- QUEUE_SIZES.len(),
- queues.len()
+ let last_state = replace(&mut self.worker_state, PvClockWorkerState::None);
+ if let PvClockWorkerState::Idle(suspend_tube) = last_state {
+ if queues.len() != QUEUE_SIZES.len() {
+ return Err(anyhow!(
+ "expected {} queues, got {}",
+ QUEUE_SIZES.len(),
+ queues.len()
+ ));
+ }
+ let set_pvclock_page_queue = queues.remove(&0).unwrap();
+ self.worker_state = PvClockWorkerState::Main(WorkerThread::start(
+ "virtio_pvclock".to_string(),
+ move |kill_evt| {
+ run_main_worker(
+ pvclock_worker,
+ set_pvclock_page_queue,
+ suspend_tube,
+ interrupt,
+ kill_evt,
+ )
+ },
));
+ } else {
+ panic!("Invalid state transition");
}
-
- let set_pvclock_page_queue = queues.remove(&0).unwrap();
-
- let suspend_tube = self
- .suspend_tube
- .take()
- .ok_or(anyhow!("suspend tube should not be None"))?;
-
- self.worker_thread = Some(WorkerThread::start(
- "virtio_pvclock".to_string(),
- move |kill_evt| {
- run_worker(
- pvclock_worker,
- set_pvclock_page_queue,
- suspend_tube,
- interrupt,
- kill_evt,
- )
- },
- ));
-
Ok(())
}
+
+ /// Use switch_to_*_worker unless needed to keep the state transition consistent
+ fn start_stub_worker(&mut self) {
+ let last_state = replace(&mut self.worker_state, PvClockWorkerState::None);
+ self.worker_state = if let PvClockWorkerState::Idle(suspend_tube) = last_state {
+ PvClockWorkerState::Stub(WorkerThread::start(
+ "virtio_pvclock_stub".to_string(),
+ move |kill_evt| run_stub_worker(suspend_tube, kill_evt),
+ ))
+ } else {
+ panic!("Invalid state transition");
+ };
+ }
+
+ /// Use switch_to_*_worker unless needed to keep the state transition consistent
+ fn stop_stub_worker(&mut self) {
+ let last_state = replace(&mut self.worker_state, PvClockWorkerState::None);
+ self.worker_state = if let PvClockWorkerState::Stub(stub_worker_thread) = last_state {
+ let stub_worker_ret = stub_worker_thread.stop();
+ PvClockWorkerState::Idle(stub_worker_ret.suspend_tube)
+ } else {
+ panic!("Invalid state transition");
+ }
+ }
+
+ /// Use switch_to_*_worker unless needed to keep the state transition consistent
+ fn stop_main_worker(&mut self) {
+ let last_state = replace(&mut self.worker_state, PvClockWorkerState::None);
+ if let PvClockWorkerState::Main(main_worker_thread) = last_state {
+ let main_worker_ret = main_worker_thread.stop();
+ self.worker_state = PvClockWorkerState::Idle(main_worker_ret.suspend_tube);
+ let mut queues = BTreeMap::new();
+ queues.insert(0, main_worker_ret.set_pvclock_page_queue);
+ self.state.paused_main_worker = Some(main_worker_ret.worker.into());
+ } else {
+ panic!("Invalid state transition");
+ }
+ }
+
+ fn switch_to_stub_worker(&mut self) {
+ self.stop_main_worker();
+ self.start_stub_worker();
+ }
+
+ fn switch_to_main_worker(
+ &mut self,
+ interrupt: Interrupt,
+ pvclock_worker: PvClockWorker,
+ queues: BTreeMap<usize, Queue>,
+ ) -> anyhow::Result<()> {
+ self.stop_stub_worker();
+ self.start_main_worker(interrupt, pvclock_worker, queues)
+ }
}
/// Represents a moment in time including the TSC counter value at that time.
@@ -329,7 +428,7 @@
suspend_time: Option<PvclockInstant>,
// The total time the vm has been suspended, this is in an Arc<AtomicU64>> because it's set
// by the PvClockWorker thread but read by PvClock from the mmio bus in the main thread.
- total_suspend_ns: Arc<AtomicU64>,
+ total_injected_ns: Arc<AtomicU64>,
// The total change in the TSC value over suspensions.
total_suspend_tsc_delta: u64,
// Pvclock shared data.
@@ -338,11 +437,11 @@
}
impl PvClockWorker {
- pub fn new(tsc_frequency: u64, total_suspend_ns: Arc<AtomicU64>, mem: GuestMemory) -> Self {
+ pub fn new(tsc_frequency: u64, total_injected_ns: Arc<AtomicU64>, mem: GuestMemory) -> Self {
PvClockWorker {
tsc_frequency,
suspend_time: None,
- total_suspend_ns,
+ total_injected_ns,
total_suspend_tsc_delta: 0,
pvclock_shared_data: None,
mem,
@@ -351,14 +450,14 @@
fn from_snapshot(
tsc_frequency: u64,
- total_suspend_ns: Arc<AtomicU64>,
+ total_injected_ns: Arc<AtomicU64>,
snap: PvClockWorkerSnapshot,
mem: GuestMemory,
) -> Self {
PvClockWorker {
tsc_frequency,
suspend_time: snap.suspend_time,
- total_suspend_ns,
+ total_injected_ns,
total_suspend_tsc_delta: snap.total_suspend_tsc_delta,
pvclock_shared_data: snap
.pvclock_shared_data_base_address
@@ -454,7 +553,10 @@
// SAFETY:
// Safe because _rdtsc takes no arguments, and we trust _rdtsc to not modify
// any other memory.
- unsafe { _rdtsc() } - suspend_time.tsc_value,
+ // NB: This calculation may wrap around, as TSC can be reset to zero when
+ // the device has resumed from the "deep" suspend state (it may not happen for
+ // s2idle cases). It also happens when the tsc value itself wraps.
+ unsafe { _rdtsc() }.wrapping_sub(suspend_time.tsc_value),
)
} else {
return Err(Error::new(libc::ENOTSUP))
@@ -462,7 +564,10 @@
};
// update the total tsc delta during all suspends
- self.total_suspend_tsc_delta += this_suspend_tsc_delta;
+ // NB: This calculation may wrap around, as the suspend time can be bigger than u64 range.
+ self.total_suspend_tsc_delta = self
+ .total_suspend_tsc_delta
+ .wrapping_add(this_suspend_tsc_delta);
// save tsc_suspended_delta to shared memory
self.pvclock_shared_data
@@ -478,7 +583,7 @@
);
// update total suspend ns
- self.total_suspend_ns
+ self.total_injected_ns
.fetch_add(this_suspend_duration.as_nanos() as u64, Ordering::SeqCst);
Ok(())
@@ -523,20 +628,77 @@
Error::new(libc::EFAULT)
}
-struct WorkerReturn {
+struct StubWorkerReturn {
+ suspend_tube: Tube,
+}
+
+/// A stub worker to respond any requests when the device is inactive.
+fn run_stub_worker(suspend_tube: Tube, kill_evt: Event) -> StubWorkerReturn {
+ #[derive(EventToken, Debug)]
+ enum Token {
+ SomePvClockRequest,
+ Kill,
+ }
+ let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
+ (suspend_tube.get_read_notifier(), Token::SomePvClockRequest),
+ // TODO(b/242743502): Can also close on Tube closure for Unix once CloseNotifier is
+ // implemented for Tube.
+ #[cfg(windows)]
+ (suspend_tube.get_close_notifier(), Token::Kill),
+ (&kill_evt, Token::Kill),
+ ]) {
+ Ok(wait_ctx) => wait_ctx,
+ Err(e) => {
+ error!("failed creating WaitContext: {}", e);
+ return StubWorkerReturn { suspend_tube };
+ }
+ };
+ 'wait: loop {
+ let events = match wait_ctx.wait() {
+ Ok(v) => v,
+ Err(e) => {
+ error!("failed polling for events: {}", e);
+ break;
+ }
+ };
+ for event in events.iter().filter(|e| e.is_readable) {
+ match event.token {
+ Token::SomePvClockRequest => {
+ match suspend_tube.recv::<PvClockCommand>() {
+ Ok(req) => req,
+ Err(e) => {
+ error!("failed to receive request: {}", e);
+ continue;
+ }
+ };
+ if let Err(e) = suspend_tube.send(&PvClockCommandResponse::DeviceInactive) {
+ error!("error sending PvClockCommandResponse: {}", e);
+ }
+ }
+ Token::Kill => {
+ break 'wait;
+ }
+ }
+ }
+ }
+ StubWorkerReturn { suspend_tube }
+}
+
+struct MainWorkerReturn {
worker: PvClockWorker,
set_pvclock_page_queue: Queue,
suspend_tube: Tube,
}
// TODO(b/237300012): asyncify this device.
-fn run_worker(
+/// A worker to process PvClockCommand requests
+fn run_main_worker(
mut worker: PvClockWorker,
mut set_pvclock_page_queue: Queue,
suspend_tube: Tube,
interrupt: Interrupt,
kill_evt: Event,
-) -> WorkerReturn {
+) -> MainWorkerReturn {
#[derive(EventToken)]
enum Token {
SetPvClockPageQueue,
@@ -557,7 +719,7 @@
Ok(pc) => pc,
Err(e) => {
error!("failed creating WaitContext: {}", e);
- return WorkerReturn {
+ return MainWorkerReturn {
suspend_tube,
set_pvclock_page_queue,
worker,
@@ -570,7 +732,7 @@
.is_err()
{
error!("failed creating WaitContext");
- return WorkerReturn {
+ return MainWorkerReturn {
suspend_tube,
set_pvclock_page_queue,
worker,
@@ -686,7 +848,7 @@
}
}
- WorkerReturn {
+ MainWorkerReturn {
suspend_tube,
set_pvclock_page_queue,
worker,
@@ -695,7 +857,11 @@
impl VirtioDevice for PvClock {
fn keep_rds(&self) -> Vec<RawDescriptor> {
- vec![self.suspend_tube.as_ref().unwrap().as_raw_descriptor()]
+ if let PvClockWorkerState::Idle(suspend_tube) = &self.worker_state {
+ vec![suspend_tube.as_raw_descriptor()]
+ } else {
+ Vec::new()
+ }
}
fn device_type(&self) -> DeviceType {
@@ -707,15 +873,15 @@
}
fn features(&self) -> u64 {
- self.features
+ self.state.features
}
fn ack_features(&mut self, mut value: u64) {
- if value & !self.features != 0 {
+ if value & !self.features() != 0 {
warn!("virtio-pvclock got unknown feature ack {:x}", value);
- value &= self.features;
+ value &= self.features();
}
- self.acked_features |= value;
+ self.state.acked_features |= value;
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
@@ -736,28 +902,25 @@
interrupt: Interrupt,
queues: BTreeMap<usize, Queue>,
) -> anyhow::Result<()> {
- let tsc_frequency = self.tsc_frequency;
- let total_suspend_ns = self.total_suspend_ns.clone();
+ let tsc_frequency = self.state.tsc_frequency;
+ let total_suspend_ns = self.state.total_suspend_ns.clone();
let worker = PvClockWorker::new(tsc_frequency, total_suspend_ns, mem);
- self.start_worker(interrupt, queues, worker)
+ self.switch_to_main_worker(interrupt, worker, queues)
}
- fn reset(&mut self) -> bool {
- if let Some(worker_thread) = self.worker_thread.take() {
- let worker_ret = worker_thread.stop();
- self.suspend_tube = Some(worker_ret.suspend_tube);
- return true;
- }
- false
+ fn reset(&mut self) -> Result<()> {
+ self.switch_to_stub_worker();
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
- if let Some(worker_thread) = self.worker_thread.take() {
- let worker_ret = worker_thread.stop();
+ let last_state = replace(&mut self.worker_state, PvClockWorkerState::None);
+ if let PvClockWorkerState::Main(main_worker_thread) = last_state {
+ let main_worker_ret = main_worker_thread.stop();
let mut queues = BTreeMap::new();
- queues.insert(0, worker_ret.set_pvclock_page_queue);
- self.suspend_tube = Some(worker_ret.suspend_tube);
- self.paused_worker = Some(worker_ret.worker.into());
+ queues.insert(0, main_worker_ret.set_pvclock_page_queue);
+ self.worker_state = PvClockWorkerState::Idle(main_worker_ret.suspend_tube);
+ self.state.paused_main_worker = Some(main_worker_ret.worker.into());
Ok(Some(queues))
} else {
Ok(None)
@@ -770,53 +933,46 @@
) -> anyhow::Result<()> {
if let Some((mem, interrupt, queues)) = queues_state {
let worker_snap = self
- .paused_worker
+ .state
+ .paused_main_worker
.take()
.ok_or(anyhow!("a sleeping pvclock must have a paused worker"))?;
let worker = PvClockWorker::from_snapshot(
- self.tsc_frequency,
- self.total_suspend_ns.clone(),
+ self.state.tsc_frequency,
+ self.state.total_suspend_ns.clone(),
worker_snap,
mem,
);
- self.start_worker(interrupt, queues, worker)?;
+ // Use unchecked as no worker is running at this point
+ self.start_main_worker(interrupt, worker, queues)?;
}
Ok(())
}
fn virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
- serde_json::to_value(PvClockSnapshot {
- features: self.features,
- acked_features: self.acked_features,
- total_suspend_ns: self.total_suspend_ns.load(Ordering::SeqCst),
- tsc_frequency: self.tsc_frequency,
- paused_worker: self.paused_worker.clone(),
- })
- .context("failed to serialize PvClockSnapshot")
+ serde_json::to_value(&self.state).context("failed to serialize PvClockState")
}
fn virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
- let snap: PvClockSnapshot = serde_json::from_value(data).context("error deserializing")?;
- if snap.features != self.features {
+ let state: PvClockState = serde_json::from_value(data).context("error deserializing")?;
+ if state.features != self.features() {
bail!(
"expected virtio_features to match, but they did not. Live: {:?}, snapshot {:?}",
- self.features,
- snap.features,
+ self.features(),
+ state.features,
);
}
- self.acked_features = snap.acked_features;
- self.total_suspend_ns
- .store(snap.total_suspend_ns, Ordering::SeqCst);
- self.paused_worker = snap.paused_worker;
-
// TODO(b/291346907): we assume that the TSC frequency has NOT changed
// since the snapshot was made. Assuming we have not moved machines,
// this is a reasonable assumption. We don't verify the frequency
// because TSC calibration noisy.
- self.tsc_frequency = snap.tsc_frequency;
-
+ self.state = state;
Ok(())
}
+
+ fn on_device_sandboxed(&mut self) {
+ self.start_stub_worker();
+ }
}
#[cfg(test)]
@@ -830,16 +986,28 @@
Interrupt::new_for_test()
}
- fn create_sleeping_device() -> (PvClock, GuestMemory, Tube) {
- let mem = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
- let (_host_tube, device_tube) = Tube::pair().unwrap();
+ fn create_pvclock_device() -> (Tube, PvClock) {
+ let (host_tube, device_tube) = Tube::pair().unwrap();
let mut pvclock_device = PvClock::new(0, 1e9 as u64, device_tube);
+ // Simulate the device initialization to start the stub thread.
+ // In the real case, on_device_sandboxed will be called after the device is sandboxed
+ // (or at some point during the device initializtion when the sandbox is disabled) to
+ // allow devices to use multi-threads (as spawning new threads before sandboxing is
+ // prohibited because of the minijail's restriction).
+ pvclock_device.on_device_sandboxed();
+
+ (host_tube, pvclock_device)
+ }
+
+ fn create_sleeping_device() -> (PvClock, GuestMemory, Tube) {
+ let (_host_tube, mut pvclock_device) = create_pvclock_device();
+
// The queue won't actually be used, so passing one that isn't
// fully configured is fine.
let mut fake_queue = QueueConfig::new(TEST_QUEUE_SIZE, 0);
fake_queue.set_ready(true);
-
+ let mem = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
pvclock_device
.activate(
mem.clone(),
@@ -847,7 +1015,6 @@
BTreeMap::from([(0, fake_queue.activate(&mem, Event::new().unwrap()).unwrap())]),
)
.expect("activate should succeed");
-
let queues = pvclock_device
.virtio_sleep()
.expect("sleep should succeed")
@@ -857,8 +1024,7 @@
queues.get(&0).expect("queue must be present").size(),
TEST_QUEUE_SIZE
);
- assert!(pvclock_device.paused_worker.is_some());
-
+ assert!(pvclock_device.state.paused_main_worker.is_some());
(pvclock_device, mem, _host_tube)
}
@@ -873,7 +1039,15 @@
pvclock_device
.virtio_wake(Some(queues_state))
.expect("wake should succeed");
- assert!(pvclock_device.paused_worker.is_none());
+ assert!(pvclock_device.state.paused_main_worker.is_none());
+ }
+
+ #[test]
+ fn test_command_response_when_inactive() {
+ let (host_tube, _pvclock_device) = create_pvclock_device();
+ assert!(host_tube.send(&PvClockCommand::Suspend).is_ok());
+ let res = host_tube.recv::<PvClockCommandResponse>();
+ assert!(matches!(res, Ok(PvClockCommandResponse::DeviceInactive)));
}
#[test]
@@ -890,14 +1064,18 @@
// Store a test value we can look for later in the test to verify
// we're restoring properties.
pvclock_device
+ .state
.total_suspend_ns
.store(test_suspend_ns, Ordering::SeqCst);
let snap = pvclock_device.virtio_snapshot().unwrap();
- pvclock_device.total_suspend_ns.store(0, Ordering::SeqCst);
+ pvclock_device
+ .state
+ .total_suspend_ns
+ .store(0, Ordering::SeqCst);
pvclock_device.virtio_restore(snap).unwrap();
assert_eq!(
- pvclock_device.total_suspend_ns.load(Ordering::SeqCst),
+ pvclock_device.state.total_suspend_ns.load(Ordering::SeqCst),
test_suspend_ns
);
diff --git a/devices/src/virtio/rng.rs b/devices/src/virtio/rng.rs
index 3d1e341..3bb52df 100644
--- a/devices/src/virtio/rng.rs
+++ b/devices/src/virtio/rng.rs
@@ -6,6 +6,7 @@
use std::io::Write;
use anyhow::anyhow;
+use anyhow::Context;
use base::error;
use base::warn;
use base::Event;
@@ -15,8 +16,6 @@
use base::WorkerThread;
use rand::rngs::OsRng;
use rand::RngCore;
-use remain::sorted;
-use thiserror::Error;
use vm_memory::GuestMemory;
use super::DeviceType;
@@ -27,10 +26,8 @@
const QUEUE_SIZE: u16 = 256;
const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
-#[sorted]
-#[derive(Error, Debug)]
-pub enum RngError {}
-pub type Result<T> = std::result::Result<T, RngError>;
+// Chosen to match the Linux guest driver RNG buffer refill size.
+const CHUNK_SIZE: usize = 64;
struct Worker {
interrupt: Interrupt,
@@ -38,33 +35,33 @@
}
impl Worker {
- fn process_queue(&mut self) -> bool {
- let queue = &mut self.queue;
-
+ fn process_queue(&mut self) {
+ let mut rand_bytes = [0u8; CHUNK_SIZE];
let mut needs_interrupt = false;
- while let Some(mut avail_desc) = queue.pop() {
+
+ while let Some(mut avail_desc) = self.queue.pop() {
let writer = &mut avail_desc.writer;
- let avail_bytes = writer.available_bytes();
-
- let mut rand_bytes = vec![0u8; avail_bytes];
- OsRng.fill_bytes(&mut rand_bytes);
-
- let written_size = match writer.write_all(&rand_bytes) {
- Ok(_) => rand_bytes.len(),
- Err(e) => {
+ while writer.available_bytes() > 0 {
+ let chunk_size = writer.available_bytes().min(CHUNK_SIZE);
+ let chunk = &mut rand_bytes[..chunk_size];
+ OsRng.fill_bytes(chunk);
+ if let Err(e) = writer.write_all(chunk) {
warn!("Failed to write random data to the guest: {}", e);
- 0usize
+ break;
}
- };
+ }
- queue.add_used(avail_desc, written_size as u32);
+ let written_size = writer.bytes_written();
+ self.queue.add_used(avail_desc, written_size as u32);
needs_interrupt = true;
}
- needs_interrupt
+ if needs_interrupt {
+ self.queue.trigger_interrupt(&self.interrupt);
+ }
}
- fn run(mut self, kill_evt: Event) -> anyhow::Result<Vec<Queue>> {
+ fn run(&mut self, kill_evt: Event) -> anyhow::Result<()> {
#[derive(EventToken)]
enum Token {
QueueAvailable,
@@ -72,43 +69,29 @@
Kill,
}
- let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
+ let wait_ctx = WaitContext::build_with(&[
(self.queue.event(), Token::QueueAvailable),
(&kill_evt, Token::Kill),
- ]) {
- Ok(pc) => pc,
- Err(e) => {
- return Err(anyhow!("failed creating WaitContext: {}", e));
- }
- };
+ ])
+ .context("failed creating WaitContext")?;
+
if let Some(resample_evt) = self.interrupt.get_resample_evt() {
- if wait_ctx
+ wait_ctx
.add(resample_evt, Token::InterruptResample)
- .is_err()
- {
- return Err(anyhow!("failed adding resample event to WaitContext."));
- }
+ .context("failed adding resample event to WaitContext.")?;
}
- 'wait: loop {
- let events = match wait_ctx.wait() {
- Ok(v) => v,
- Err(e) => {
- error!("failed polling for events: {}", e);
- break;
- }
- };
-
- let mut needs_interrupt = false;
- let mut exiting = false;
+ let mut exiting = false;
+ while !exiting {
+ let events = wait_ctx.wait().context("failed polling for events")?;
for event in events.iter().filter(|e| e.is_readable) {
match event.token {
Token::QueueAvailable => {
- if let Err(e) = self.queue.event().wait() {
- error!("failed reading queue Event: {}", e);
- break 'wait;
- }
- needs_interrupt |= self.process_queue();
+ self.queue
+ .event()
+ .wait()
+ .context("failed reading queue Event")?;
+ self.process_queue();
}
Token::InterruptResample => {
self.interrupt.interrupt_resample();
@@ -116,26 +99,21 @@
Token::Kill => exiting = true,
}
}
- if needs_interrupt {
- self.queue.trigger_interrupt(&self.interrupt);
- }
- if exiting {
- break;
- }
}
- Ok(vec![self.queue])
+
+ Ok(())
}
}
/// Virtio device for exposing entropy to the guest OS through virtio.
pub struct Rng {
- worker_thread: Option<WorkerThread<anyhow::Result<Vec<Queue>>>>,
+ worker_thread: Option<WorkerThread<Worker>>,
virtio_features: u64,
}
impl Rng {
/// Create a new virtio rng device that gets random data from /dev/urandom.
- pub fn new(virtio_features: u64) -> Result<Rng> {
+ pub fn new(virtio_features: u64) -> anyhow::Result<Rng> {
Ok(Rng {
worker_thread: None,
virtio_features,
@@ -173,28 +151,27 @@
let queue = queues.remove(&0).unwrap();
self.worker_thread = Some(WorkerThread::start("v_rng", move |kill_evt| {
- let worker = Worker { interrupt, queue };
- worker.run(kill_evt)
+ let mut worker = Worker { interrupt, queue };
+ if let Err(e) = worker.run(kill_evt) {
+ error!("rng worker thread failed: {:#}", e);
+ }
+ worker
}));
Ok(())
}
- fn reset(&mut self) -> bool {
+ fn reset(&mut self) -> anyhow::Result<()> {
if let Some(worker_thread) = self.worker_thread.take() {
- if let Err(e) = worker_thread.stop() {
- error!("rng worker failed: {:#}", e);
- return false;
- }
- return true;
+ let _worker = worker_thread.stop();
}
- false
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
if let Some(worker_thread) = self.worker_thread.take() {
- let queues = worker_thread.stop()?;
- return Ok(Some(BTreeMap::from_iter(queues.into_iter().enumerate())));
+ let worker = worker_thread.stop();
+ return Ok(Some(BTreeMap::from([(0, worker.queue)])));
}
Ok(None)
}
diff --git a/devices/src/virtio/scsi/device.rs b/devices/src/virtio/scsi/device.rs
index e028cf9..4a8576e 100644
--- a/devices/src/virtio/scsi/device.rs
+++ b/devices/src/virtio/scsi/device.rs
@@ -678,8 +678,8 @@
let intr = interrupt.clone();
let worker_thread = WorkerThread::start("v_scsi_ctrlq", move |kill_evt| {
- let ex = Executor::with_executor_kind(executor_kind.into())
- .expect("Failed to create an executor");
+ let ex =
+ Executor::with_executor_kind(executor_kind).expect("Failed to create an executor");
if let Err(err) = ex
.run_until(run_worker(
&ex,
@@ -701,7 +701,7 @@
let interrupt = interrupt.clone();
let worker_thread =
WorkerThread::start(format!("v_scsi_req_{}", i + 2), move |kill_evt| {
- let ex = Executor::with_executor_kind(executor_kind.into())
+ let ex = Executor::with_executor_kind(executor_kind)
.expect("Failed to create an executor");
let async_logical_unit = targets
.0
diff --git a/devices/src/virtio/snd/common_backend/mod.rs b/devices/src/virtio/snd/common_backend/mod.rs
index ac01df2..f15508e 100644
--- a/devices/src/virtio/snd/common_backend/mod.rs
+++ b/devices/src/virtio/snd/common_backend/mod.rs
@@ -471,12 +471,12 @@
Ok(())
}
- fn reset(&mut self) -> bool {
+ fn reset(&mut self) -> anyhow::Result<()> {
if let Some(worker_thread) = self.worker_thread.take() {
let _ = worker_thread.stop();
}
- true
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
diff --git a/devices/src/virtio/snd/vios_backend/mod.rs b/devices/src/virtio/snd/vios_backend/mod.rs
index 4cafeaa..e8b84c9 100644
--- a/devices/src/virtio/snd/vios_backend/mod.rs
+++ b/devices/src/virtio/snd/vios_backend/mod.rs
@@ -172,18 +172,16 @@
Ok(())
}
- fn reset(&mut self) -> bool {
- let mut ret = true;
-
+ fn reset(&mut self) -> anyhow::Result<()> {
if let Some(worker_thread) = self.worker_thread.take() {
- let worker_status = worker_thread.stop();
- ret = worker_status.is_ok();
+ let worker = worker_thread.stop();
+ self.vios_client
+ .lock()
+ .stop_bg_thread()
+ .context("failed to stop VioS Client background thread")?;
+ let _worker = worker.context("failed to stop worker_thread")?;
}
- if let Err(e) = self.vios_client.lock().stop_bg_thread() {
- error!("virtio-snd: Failed to stop vios background thread: {}", e);
- ret = false;
- }
- ret
+ Ok(())
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
diff --git a/devices/src/virtio/vhost/net.rs b/devices/src/virtio/vhost/net.rs
index 08b67bb..39ccbb3 100644
--- a/devices/src/virtio/vhost/net.rs
+++ b/devices/src/virtio/vhost/net.rs
@@ -315,16 +315,15 @@
}
}
- fn reset(&mut self) -> bool {
+ fn reset(&mut self) -> anyhow::Result<()> {
if let Some(worker_thread) = self.worker_thread.take() {
let (worker, tap) = worker_thread.stop();
self.vhost_net_handle = Some(worker.vhost_handle);
self.tap = Some(tap);
self.vhost_interrupt = Some(worker.vhost_interrupt);
self.response_tube = worker.response_tube;
- return true;
}
- false
+ Ok(())
}
}
diff --git a/devices/src/virtio/vhost/user/README.md b/devices/src/virtio/vhost/user/README.md
index 978cff9..90daf7b 100644
--- a/devices/src/virtio/vhost/user/README.md
+++ b/devices/src/virtio/vhost/user/README.md
@@ -4,8 +4,8 @@
## Code Locations
-- [`vmm`](./vmm/) - Implements vhost-user vmm device; i.e. vhost-user master.
-- [`device`](./device/) - Implements vhost-user device backend; i.e. vhost-user slave.
+- [`vmm`](./vmm/) - Implements vhost-user vmm device; i.e. vhost-user frontend.
+- [`device`](./device/) - Implements vhost-user device backend; i.e. vhost-user backend.
## Usage
diff --git a/devices/src/virtio/vhost/user/device/block.rs b/devices/src/virtio/vhost/user/device/block.rs
index 804baca..df3def6 100644
--- a/devices/src/virtio/vhost/user/device/block.rs
+++ b/devices/src/virtio/vhost/user/device/block.rs
@@ -14,13 +14,12 @@
pub use sys::Options;
use vm_memory::GuestMemory;
use vmm_vhost::message::*;
-use vmm_vhost::VhostUserSlaveReqHandler;
use crate::virtio;
use crate::virtio::block::asynchronous::BlockAsync;
use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
-use crate::virtio::vhost::user::device::VhostUserDevice;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
+use crate::virtio::vhost::user::device::VhostUserDeviceBuilder;
use crate::virtio::Interrupt;
use crate::virtio::VirtioDevice;
@@ -43,15 +42,8 @@
acked_protocol_features: u64,
}
-impl VhostUserDevice for BlockAsync {
- fn max_queue_num(&self) -> usize {
- NUM_QUEUES as usize
- }
-
- fn into_req_handler(
- self: Box<Self>,
- _ex: &Executor,
- ) -> anyhow::Result<Box<dyn VhostUserSlaveReqHandler>> {
+impl VhostUserDeviceBuilder for BlockAsync {
+ fn build(self: Box<Self>, _ex: &Executor) -> anyhow::Result<Box<dyn vmm_vhost::Backend>> {
let avail_features = self.features() | 1 << VHOST_USER_F_PROTOCOL_FEATURES;
let backend = BlockBackend {
inner: self,
@@ -59,12 +51,12 @@
acked_features: 0,
acked_protocol_features: VhostUserProtocolFeatures::empty(),
};
- let handler = DeviceRequestHandler::new(Box::new(backend));
+ let handler = DeviceRequestHandler::new(backend);
Ok(Box::new(handler))
}
}
-impl VhostUserBackend for BlockBackend {
+impl VhostUserDevice for BlockBackend {
fn max_queue_num(&self) -> usize {
NUM_QUEUES as usize
}
@@ -91,7 +83,7 @@
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG
| VhostUserProtocolFeatures::MQ
- | VhostUserProtocolFeatures::SLAVE_REQ
+ | VhostUserProtocolFeatures::BACKEND_REQ
}
fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
@@ -111,7 +103,9 @@
}
fn reset(&mut self) {
- self.inner.reset();
+ if let Err(e) = self.inner.reset() {
+ base::error!("reset failed: {:#}", e);
+ }
}
fn start_queue(
@@ -132,8 +126,7 @@
// TODO: This assumes that `reset` only stops workers which might not be true in the
// future. Consider moving the `reset` code into a `stop_all_workers` method or, maybe,
// make `stop_queue` implicitly stop a worker thread when there is no active queue.
- self.inner.reset();
- Ok(())
+ self.inner.reset()
}
fn snapshot(&self) -> anyhow::Result<Vec<u8>> {
diff --git a/devices/src/virtio/vhost/user/device/block/sys/windows.rs b/devices/src/virtio/vhost/user/device/block/sys/windows.rs
index 55254c5..0aa5670 100644
--- a/devices/src/virtio/vhost/user/device/block/sys/windows.rs
+++ b/devices/src/virtio/vhost/user/device/block/sys/windows.rs
@@ -29,7 +29,7 @@
use crate::virtio::vhost::user::device::handler::sys::windows::read_from_tube_transporter;
use crate::virtio::vhost::user::device::handler::sys::windows::run_handler;
use crate::virtio::vhost::user::device::VhostUserDevice;
-use crate::virtio::vhost::user::VhostUserBackend;
+use crate::virtio::vhost::user::VhostUserDeviceBuilder;
use crate::virtio::BlockAsync;
#[derive(FromArgs, Debug)]
@@ -65,12 +65,12 @@
let _raise_timer_resolution =
enable_high_res_timers().context("failed to set timer resolution")?;
- info!("using {} IO handles.", disk_option.io_concurrency.get());
+ info!("using {:?} executor.", disk_option.async_executor);
let kind = disk_option
.async_executor
.unwrap_or(ExecutorKindSys::Handle.into());
- let ex = Executor::with_executor_kind(kind.into()).context("failed to create executor")?;
+ let ex = Executor::with_executor_kind(kind).context("failed to create executor")?;
let block = Box::new(BlockAsync::new(
base_features(ProtectionType::Unprotected),
@@ -92,7 +92,7 @@
// }
// This is basically the event loop.
- let handler = block.into_req_handler(&ex)?;
+ let handler = block.build(&ex)?;
info!("vhost-user disk device ready, starting run loop...");
if let Err(e) = ex.run_until(run_handler(handler, vhost_user_tube, exit_event, &ex)) {
diff --git a/devices/src/virtio/vhost/user/device/console.rs b/devices/src/virtio/vhost/user/device/console.rs
index c70842d..4f3e0dd 100644
--- a/devices/src/virtio/vhost/user/device/console.rs
+++ b/devices/src/virtio/vhost/user/device/console.rs
@@ -19,7 +19,6 @@
use sync::Mutex;
use vm_memory::GuestMemory;
use vmm_vhost::message::VhostUserProtocolFeatures;
-use vmm_vhost::VhostUserSlaveReqHandler;
use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
use zerocopy::AsBytes;
@@ -30,10 +29,10 @@
use crate::virtio::copy_config;
use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
use crate::virtio::vhost::user::device::handler::Error as DeviceError;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::listener::sys::VhostUserListener;
use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
-use crate::virtio::vhost::user::device::VhostUserDevice;
+use crate::virtio::vhost::user::device::VhostUserDeviceBuilder;
use crate::virtio::Interrupt;
use crate::virtio::Queue;
use crate::SerialHardware;
@@ -60,24 +59,8 @@
}
}
-impl VhostUserDevice for VhostUserConsoleDevice {
- fn max_queue_num(&self) -> usize {
- // The port 0 receive and transmit queues always exist;
- // other queues only exist if VIRTIO_CONSOLE_F_MULTIPORT is set.
- if self.console.is_multi_port() {
- let port_num = self.console.max_ports();
-
- // Extra 1 is for control port; each port has two queues (tx & rx)
- (port_num + 1) * 2
- } else {
- 2
- }
- }
-
- fn into_req_handler(
- self: Box<Self>,
- ex: &Executor,
- ) -> anyhow::Result<Box<dyn VhostUserSlaveReqHandler>> {
+impl VhostUserDeviceBuilder for VhostUserConsoleDevice {
+ fn build(self: Box<Self>, ex: &Executor) -> anyhow::Result<Box<dyn vmm_vhost::Backend>> {
if self.raw_stdin {
// Set stdin() to raw mode so we can send over individual keystrokes unbuffered
std::io::stdin()
@@ -85,7 +68,7 @@
.context("failed to set terminal in raw mode")?;
}
- let queue_num = self.max_queue_num();
+ let queue_num = self.console.max_queues();
let active_queues = vec![None; queue_num];
let backend = ConsoleBackend {
@@ -96,7 +79,7 @@
active_queues,
};
- let handler = DeviceRequestHandler::new(Box::new(backend));
+ let handler = DeviceRequestHandler::new(backend);
Ok(Box::new(handler))
}
}
@@ -109,9 +92,9 @@
active_queues: Vec<Option<Arc<Mutex<Queue>>>>,
}
-impl VhostUserBackend for ConsoleBackend {
+impl VhostUserDevice for ConsoleBackend {
fn max_queue_num(&self) -> usize {
- self.device.max_queue_num()
+ self.device.console.max_queues()
}
fn features(&self) -> u64 {
diff --git a/devices/src/virtio/vhost/user/device/fs.rs b/devices/src/virtio/vhost/user/device/fs.rs
index c523ae8..874856a 100644
--- a/devices/src/virtio/vhost/user/device/fs.rs
+++ b/devices/src/virtio/vhost/user/device/fs.rs
@@ -38,7 +38,7 @@
use crate::virtio::fs::process_fs_queue;
use crate::virtio::fs::Config;
use crate::virtio::vhost::user::device::handler::Error as DeviceError;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::handler::WorkerState;
use crate::virtio::Interrupt;
use crate::virtio::Queue;
@@ -117,7 +117,7 @@
}
}
-impl VhostUserBackend for FsBackend {
+impl VhostUserDevice for FsBackend {
fn max_queue_num(&self) -> usize {
MAX_QUEUE_NUM
}
diff --git a/devices/src/virtio/vhost/user/device/fs/sys/linux.rs b/devices/src/virtio/vhost/user/device/fs/sys/linux.rs
index 5df3932..7236bb1 100644
--- a/devices/src/virtio/vhost/user/device/fs/sys/linux.rs
+++ b/devices/src/virtio/vhost/user/device/fs/sys/linux.rs
@@ -97,13 +97,14 @@
/// Returns an error if the given `args` is invalid or the device fails to run.
pub fn start_device(opts: Options) -> anyhow::Result<()> {
let ex = Executor::new().context("Failed to create executor")?;
- let fs_device = Box::new(FsBackend::new(&ex, &opts.tag, opts.cfg)?);
+ let fs_device = FsBackend::new(&ex, &opts.tag, opts.cfg)?;
let mut keep_rds = fs_device.keep_rds.clone();
let listener = VhostUserListener::new_socket(&opts.socket, Some(&mut keep_rds))?;
base::syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
let pid = jail_and_fork(
keep_rds,
diff --git a/devices/src/virtio/vhost/user/device/gpu.rs b/devices/src/virtio/vhost/user/device/gpu.rs
index c7e1c2f..f2cd560 100644
--- a/devices/src/virtio/vhost/user/device/gpu.rs
+++ b/devices/src/virtio/vhost/user/device/gpu.rs
@@ -29,7 +29,7 @@
use crate::virtio::gpu::QueueReader;
use crate::virtio::vhost::user::device::handler::Error as DeviceError;
use crate::virtio::vhost::user::device::handler::VhostBackendReqConnection;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::handler::WorkerState;
use crate::virtio::DescriptorChain;
use crate::virtio::Gpu;
@@ -94,7 +94,7 @@
shmem_mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
}
-impl VhostUserBackend for GpuBackend {
+impl VhostUserDevice for GpuBackend {
fn max_queue_num(&self) -> usize {
MAX_QUEUE_NUM
}
@@ -114,7 +114,7 @@
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG
- | VhostUserProtocolFeatures::SLAVE_REQ
+ | VhostUserProtocolFeatures::BACKEND_REQ
| VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS
}
diff --git a/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs b/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs
index dc43e0e..57df8cf 100644
--- a/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs
+++ b/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs
@@ -12,7 +12,6 @@
use argh::FromArgs;
use base::clone_descriptor;
use base::error;
-use base::FromRawDescriptor;
use base::SafeDescriptor;
use base::Tube;
use base::UnixSeqpacketListener;
@@ -98,13 +97,7 @@
// Start handling the display.
let display = clone_descriptor(&*state.borrow_mut().display().borrow())
- .map(|fd| {
- AsyncWrapper::new(
- // SAFETY:
- // Safe because we just created this fd.
- unsafe { SafeDescriptor::from_raw_descriptor(fd) },
- )
- })
+ .map(AsyncWrapper::new)
.context("failed to clone inner WaitContext for gpu display")
.and_then(|ctx| {
self.ex
@@ -244,7 +237,7 @@
None,
)));
- let backend = Box::new(GpuBackend {
+ let backend = GpuBackend {
ex: ex.clone(),
gpu,
resource_bridges,
@@ -254,7 +247,7 @@
queue_workers: Default::default(),
platform_workers: Default::default(),
shmem_mapper: Arc::new(Mutex::new(None)),
- });
+ };
// Run until the backend is finished.
let _ = ex.run_until(listener.run_backend(backend, &ex))?;
diff --git a/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs b/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs
index bd59997..f445621 100644
--- a/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs
+++ b/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs
@@ -286,9 +286,7 @@
.push(GpuDisplayParameters::default());
}
- let display_backends = vec![virtio::DisplayBackend::WinApi(
- (&config.params.display_params[0]).into(),
- )];
+ let display_backends = vec![virtio::DisplayBackend::WinApi];
let mut gpu_params = config.params.clone();
@@ -312,7 +310,7 @@
let ex = Executor::new().context("failed to create executor")?;
- let backend = Box::new(GpuBackend {
+ let backend = GpuBackend {
ex: ex.clone(),
gpu,
resource_bridges: Default::default(),
@@ -322,7 +320,7 @@
queue_workers: Default::default(),
platform_workers: Default::default(),
shmem_mapper: Arc::new(Mutex::new(None)),
- });
+ };
let handler = DeviceRequestHandler::new(backend);
diff --git a/devices/src/virtio/vhost/user/device/handler.rs b/devices/src/virtio/vhost/user/device/handler.rs
index 97dd9eb..daa39a0 100644
--- a/devices/src/virtio/vhost/user/device/handler.rs
+++ b/devices/src/virtio/vhost/user/device/handler.rs
@@ -5,12 +5,12 @@
//! Library for implementing vhost-user device executables.
//!
//! This crate provides
-//! * `VhostUserBackend` trait, which is a collection of methods to handle vhost-user requests, and
+//! * `VhostUserDevice` trait, which is a collection of methods to handle vhost-user requests, and
//! * `DeviceRequestHandler` struct, which makes a connection to a VMM and starts an event loop.
//!
//! They are expected to be used as follows:
//!
-//! 1. Define a struct and implement `VhostUserBackend` for it.
+//! 1. Define a struct and implement `VhostUserDevice` for it.
//! 2. Create a `DeviceRequestHandler` with the backend struct.
//! 3. Drive the `DeviceRequestHandler::run` async fn with an executor.
//!
@@ -19,7 +19,7 @@
//! /* fields */
//! }
//!
-//! impl VhostUserBackend for MyBackend {
+//! impl VhostUserDevice for MyBackend {
//! /* implement methods */
//! }
//!
@@ -37,12 +37,12 @@
//! ```
// Implementation note:
// This code lets us take advantage of the vmm_vhost low level implementation of the vhost user
-// protocol. DeviceRequestHandler implements the VhostUserSlaveReqHandler trait from vmm_vhost,
-// and includes some common code for setting up guest memory and managing partially configured
-// vrings. DeviceRequestHandler::run watches the vhost-user socket and then calls handle_request()
-// when it becomes readable. handle_request() reads and parses the message and then calls one of the
-// VhostUserSlaveReqHandler trait methods. These dispatch back to the supplied VhostUserBackend
-// implementation (this is what our devices implement).
+// protocol. DeviceRequestHandler implements the Backend trait from vmm_vhost, and includes some
+// common code for setting up guest memory and managing partially configured vrings.
+// DeviceRequestHandler::run watches the vhost-user socket and then calls handle_request() when it
+// becomes readable. handle_request() reads and parses the message and then calls one of the
+// Backend trait methods. These dispatch back to the supplied VhostUserDevice implementation (this
+// is what our devices implement).
pub(super) mod sys;
@@ -89,13 +89,12 @@
use vmm_vhost::message::VhostUserSingleMemoryRegion;
use vmm_vhost::message::VhostUserVringAddrFlags;
use vmm_vhost::message::VhostUserVringState;
+use vmm_vhost::BackendReq;
use vmm_vhost::Connection;
use vmm_vhost::Error as VhostError;
+use vmm_vhost::Frontend;
+use vmm_vhost::FrontendClient;
use vmm_vhost::Result as VhostResult;
-use vmm_vhost::Slave;
-use vmm_vhost::SlaveReq;
-use vmm_vhost::VhostUserMasterReqHandler;
-use vmm_vhost::VhostUserSlaveReqHandler;
use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
use crate::virtio::Interrupt;
@@ -122,8 +121,11 @@
Err(VhostError::InvalidMessage)
}
-/// Trait for vhost-user backend.
-pub trait VhostUserBackend {
+/// Trait for vhost-user devices. Analogous to the `VirtioDevice` trait.
+///
+/// In contrast with [[vmm_vhost::Backend]], which closely matches the vhost-user spec, this trait
+/// is designed to follow crosvm conventions for implementing devices.
+pub trait VhostUserDevice {
/// The maximum number of queues that this backend can manage.
fn max_queue_num(&self) -> usize;
@@ -181,13 +183,13 @@
/// The backend is given an `Arc` instead of full ownership so that the framework can also use
/// the connection.
///
- /// This method will be called when `VhostUserProtocolFeatures::SLAVE_REQ` is
+ /// This method will be called when `VhostUserProtocolFeatures::BACKEND_REQ` is
/// negotiated.
fn set_backend_req_connection(&mut self, _conn: Arc<VhostBackendReqConnection>) {
error!("set_backend_req_connection is not implemented");
}
- /// Used to stop non queue workers that `VhostUserBackend::stop_queue` can't stop. May or may
+ /// Used to stop non queue workers that `VhostUserDevice::stop_queue` can't stop. May or may
/// not also stop all queue workers.
fn stop_non_queue_workers(&mut self) -> anyhow::Result<()> {
error!("sleep not implemented for vhost user device");
@@ -354,13 +356,13 @@
}
}
-/// A request handler for devices implementing `VhostUserBackend`.
-pub struct DeviceRequestHandler {
+/// An adapter that implements `vmm_vhost::Backend` for any type implementing `VhostUserDevice`.
+pub struct DeviceRequestHandler<T: VhostUserDevice> {
vrings: Vec<Vring>,
owned: bool,
vmm_maps: Option<Vec<MappingInfo>>,
mem: Option<GuestMemory>,
- backend: Box<dyn VhostUserBackend>,
+ backend: T,
backend_req_connection: Arc<Mutex<VhostBackendReqConnectionState>>,
}
@@ -370,9 +372,9 @@
backend: Vec<u8>,
}
-impl DeviceRequestHandler {
+impl<T: VhostUserDevice> DeviceRequestHandler<T> {
/// Creates a vhost-user handler instance for `backend`.
- pub(crate) fn new(backend: Box<dyn VhostUserBackend>) -> Self {
+ pub(crate) fn new(backend: T) -> Self {
let mut vrings = Vec::with_capacity(backend.max_queue_num());
for _ in 0..backend.max_queue_num() {
vrings.push(Vring::new(Queue::MAX_SIZE, backend.features()));
@@ -391,7 +393,19 @@
}
}
-impl VhostUserSlaveReqHandler for DeviceRequestHandler {
+impl<T: VhostUserDevice> AsRef<T> for DeviceRequestHandler<T> {
+ fn as_ref(&self) -> &T {
+ &self.backend
+ }
+}
+
+impl<T: VhostUserDevice> AsMut<T> for DeviceRequestHandler<T> {
+ fn as_mut(&mut self) -> &mut T {
+ &mut self.backend
+ }
+}
+
+impl<T: VhostUserDevice> vmm_vhost::Backend for DeviceRequestHandler<T> {
fn set_owner(&mut self) -> VhostResult<()> {
if self.owned {
return Err(VhostError::InvalidOperation);
@@ -566,7 +580,7 @@
Ok(queue) => queue,
Err(e) => {
error!("failed to activate vring: {:#}", e);
- return Err(VhostError::SlaveInternalError);
+ return Err(VhostError::BackendInternalError);
}
};
@@ -577,7 +591,7 @@
.start_queue(index as usize, queue, mem, doorbell)
{
error!("Failed to start queue {}: {}", index, e);
- return Err(VhostError::SlaveInternalError);
+ return Err(VhostError::BackendInternalError);
}
Ok(())
@@ -621,10 +635,9 @@
return Err(VhostError::InvalidOperation);
}
- // Slave must not pass data to/from the backend until ring is
- // enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1,
- // or after it has been disabled by VHOST_USER_SET_VRING_ENABLE
- // with parameter 0.
+ // Backend must not pass data to/from the ring until ring is enabled by
+ // VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been disabled by
+ // VHOST_USER_SET_VRING_ENABLE with parameter 0.
self.vrings[index as usize].enabled = enable;
Ok(())
@@ -651,9 +664,9 @@
Ok(())
}
- fn set_slave_req_fd(&mut self, ep: Connection<SlaveReq>) {
+ fn set_backend_req_fd(&mut self, ep: Connection<BackendReq>) {
let conn = Arc::new(VhostBackendReqConnection::new(
- Slave::new(ep),
+ FrontendClient::new(ep),
self.backend.get_shared_memory_region().map(|r| r.id),
));
@@ -729,12 +742,12 @@
fn wake(&mut self) -> VhostResult<()> {
for (index, vring) in self.vrings.iter_mut().enumerate() {
if let Some(queue) = vring.paused_queue.take() {
- let mem = self.mem.clone().ok_or(VhostError::SlaveInternalError)?;
+ let mem = self.mem.clone().ok_or(VhostError::BackendInternalError)?;
let doorbell = vring.doorbell.clone().expect("Failed to clone doorbell");
if let Err(e) = self.backend.start_queue(index, queue, mem, doorbell) {
error!("Failed to start queue {}: {}", index, e);
- return Err(VhostError::SlaveInternalError);
+ return Err(VhostError::BackendInternalError);
}
}
}
@@ -821,7 +834,7 @@
/// Keeps track of Vhost user backend request connection.
pub struct VhostBackendReqConnection {
- conn: Arc<Mutex<Slave>>,
+ conn: Arc<Mutex<FrontendClient>>,
shmem_info: Mutex<Option<ShmemInfo>>,
}
@@ -832,7 +845,7 @@
}
impl VhostBackendReqConnection {
- pub fn new(conn: Slave, shmid: Option<u8>) -> Self {
+ pub fn new(conn: FrontendClient, shmid: Option<u8>) -> Self {
let shmem_info = Mutex::new(shmid.map(|shmid| ShmemInfo {
shmid,
mapped_regions: BTreeMap::new(),
@@ -868,7 +881,7 @@
}
struct VhostShmemMapper {
- conn: Arc<Mutex<Slave>>,
+ conn: Arc<Mutex<FrontendClient>>,
shmem_info: ShmemInfo,
}
@@ -988,15 +1001,17 @@
use anyhow::anyhow;
use anyhow::bail;
use base::Event;
- use vmm_vhost::SlaveReqHandler;
- use vmm_vhost::VhostUserSlaveReqHandler;
+ use vmm_vhost::BackendServer;
+ use vmm_vhost::FrontendReq;
use zerocopy::AsBytes;
use zerocopy::FromBytes;
use zerocopy::FromZeroes;
use super::sys::test_helpers;
use super::*;
- use crate::virtio::vhost::user::vmm::VhostUserHandler;
+ use crate::virtio::vhost_user_frontend::VhostUserFrontend;
+ use crate::virtio::DeviceType;
+ use crate::virtio::VirtioDevice;
#[derive(Clone, Copy, Debug, PartialEq, Eq, AsBytes, FromZeroes, FromBytes)]
#[repr(C, packed(4))]
@@ -1012,6 +1027,8 @@
acked_features: u64,
acked_protocol_features: VhostUserProtocolFeatures,
active_queues: Vec<Option<Queue>>,
+ allow_backend_req: bool,
+ backend_conn: Option<Arc<VhostBackendReqConnection>>,
}
impl FakeBackend {
@@ -1025,11 +1042,13 @@
acked_features: 0,
acked_protocol_features: VhostUserProtocolFeatures::empty(),
active_queues,
+ allow_backend_req: false,
+ backend_conn: None,
}
}
}
- impl VhostUserBackend for FakeBackend {
+ impl VhostUserDevice for FakeBackend {
fn max_queue_num(&self) -> usize {
Self::MAX_QUEUE_NUM
}
@@ -1055,7 +1074,11 @@
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
- VhostUserProtocolFeatures::CONFIG
+ let mut features = VhostUserProtocolFeatures::CONFIG;
+ if self.allow_backend_req {
+ features |= VhostUserProtocolFeatures::BACKEND_REQ;
+ }
+ features
}
fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
@@ -1094,10 +1117,24 @@
.take()
.ok_or(Error::WorkerNotFound)?)
}
+
+ fn set_backend_req_connection(&mut self, conn: Arc<VhostBackendReqConnection>) {
+ self.backend_conn = Some(conn);
+ }
}
#[test]
fn test_vhost_user_activate() {
+ test_vhost_user_activate_parameterized(false);
+ }
+
+ #[test]
+ #[cfg(not(windows))] // Windows requries more complex connection setup.
+ fn test_vhost_user_activate_with_backend_req() {
+ test_vhost_user_activate_parameterized(true);
+ }
+
+ fn test_vhost_user_activate_parameterized(allow_backend_req: bool) {
const QUEUES_NUM: usize = 2;
let (dev, vmm) = test_helpers::setup();
@@ -1105,103 +1142,170 @@
let vmm_bar = Arc::new(Barrier::new(2));
let dev_bar = vmm_bar.clone();
- let (tx, rx) = channel();
+ let (ready_tx, ready_rx) = channel();
+ let (shutdown_tx, shutdown_rx) = channel();
std::thread::spawn(move || {
// VMM side
- rx.recv().unwrap(); // Ensure the device is ready.
+ ready_rx.recv().unwrap(); // Ensure the device is ready.
- let allow_features = 1 << VHOST_USER_F_PROTOCOL_FEATURES;
- let allow_protocol_features = VhostUserProtocolFeatures::CONFIG;
let connection = test_helpers::connect(vmm);
- let mut vmm_handler =
- VhostUserHandler::new(connection, allow_features, allow_protocol_features).unwrap();
+ let mut vmm_device =
+ VhostUserFrontend::new(DeviceType::Console, 0, connection, None, None).unwrap();
println!("read_config");
let mut buf = vec![0; std::mem::size_of::<FakeConfig>()];
- vmm_handler.read_config(0, &mut buf).unwrap();
+ vmm_device.read_config(0, &mut buf);
// Check if the obtained config data is correct.
let config = FakeConfig::read_from(buf.as_bytes()).unwrap();
assert_eq!(config, FAKE_CONFIG_DATA);
- println!("set_mem_table");
- let mem = GuestMemory::new(&[(GuestAddress(0x0), 0x10000)]).unwrap();
- vmm_handler.set_mem_table(&mem).unwrap();
+ let activate = |vmm_device: &mut VhostUserFrontend| {
+ let mem = GuestMemory::new(&[(GuestAddress(0x0), 0x10000)]).unwrap();
+ let interrupt = Interrupt::new_for_test_with_msix();
- for idx in 0..QUEUES_NUM {
- println!("activate_mem_table: queue_index={}", idx);
- let mut queue = QueueConfig::new(0x10, 0);
- queue.set_ready(true);
- let queue = queue
- .activate(&mem, Event::new().unwrap())
- .expect("QueueConfig::activate");
- let irqfd = Event::new().unwrap();
+ let mut queues = BTreeMap::new();
+ for idx in 0..QUEUES_NUM {
+ let mut queue = QueueConfig::new(0x10, 0);
+ queue.set_ready(true);
+ let queue = queue
+ .activate(&mem, Event::new().unwrap())
+ .expect("QueueConfig::activate");
+ queues.insert(idx, queue);
+ }
- vmm_handler
- .activate_vring(&mem, idx, &queue, &irqfd)
+ println!("activate");
+ vmm_device
+ .activate(mem.clone(), interrupt.clone(), queues)
.unwrap();
- }
+ };
- vmm_handler.sleep().unwrap();
+ activate(&mut vmm_device);
- vmm_handler.wake().unwrap();
+ println!("reset");
+ let reset_result = vmm_device.reset();
+ assert!(
+ reset_result.is_ok(),
+ "reset failed: {:#}",
+ reset_result.unwrap_err()
+ );
+
+ activate(&mut vmm_device);
+
+ println!("virtio_sleep");
+ vmm_device.virtio_sleep().unwrap();
+
+ println!("virtio_wake");
+ vmm_device.virtio_wake(None).unwrap();
+
+ println!("wait for shutdown signal");
+ shutdown_rx.recv().unwrap();
// The VMM side is supposed to stop before the device side.
- drop(vmm_handler);
+ println!("drop");
+ drop(vmm_device);
vmm_bar.wait();
});
// Device side
- let handler = DeviceRequestHandler::new(Box::new(FakeBackend::new()));
+ let mut handler = DeviceRequestHandler::new(FakeBackend::new());
+ handler.as_mut().allow_backend_req = allow_backend_req;
// Notify listener is ready.
- tx.send(()).unwrap();
+ ready_tx.send(()).unwrap();
let mut req_handler = test_helpers::listen(dev, handler);
- // VhostUserHandler::new()
- handle_request(&mut req_handler).expect("set_owner");
- handle_request(&mut req_handler).expect("get_features");
- handle_request(&mut req_handler).expect("set_features");
- handle_request(&mut req_handler).expect("get_protocol_features");
- handle_request(&mut req_handler).expect("set_protocol_features");
-
- // VhostUserHandler::read_config()
- handle_request(&mut req_handler).expect("get_config");
-
- // VhostUserHandler::set_mem_table()
- handle_request(&mut req_handler).expect("set_mem_table");
-
- for _ in 0..QUEUES_NUM {
- // VhostUserHandler::activate_vring()
- handle_request(&mut req_handler).expect("set_vring_num");
- handle_request(&mut req_handler).expect("set_vring_addr");
- handle_request(&mut req_handler).expect("set_vring_base");
- handle_request(&mut req_handler).expect("set_vring_call");
- handle_request(&mut req_handler).expect("set_vring_kick");
- handle_request(&mut req_handler).expect("set_vring_enable");
+ // VhostUserFrontend::new()
+ handle_request(&mut req_handler, FrontendReq::SET_OWNER).unwrap();
+ handle_request(&mut req_handler, FrontendReq::GET_FEATURES).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_FEATURES).unwrap();
+ handle_request(&mut req_handler, FrontendReq::GET_PROTOCOL_FEATURES).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_PROTOCOL_FEATURES).unwrap();
+ if allow_backend_req {
+ handle_request(&mut req_handler, FrontendReq::SET_BACKEND_REQ_FD).unwrap();
}
- // sleep
- handle_request(&mut req_handler).expect("sleep");
+ // VhostUserFrontend::read_config()
+ handle_request(&mut req_handler, FrontendReq::GET_CONFIG).unwrap();
- // wake
- handle_request(&mut req_handler).expect("wake");
+ // VhostUserFrontend::activate()
+ handle_request(&mut req_handler, FrontendReq::SET_MEM_TABLE).unwrap();
+ for _ in 0..QUEUES_NUM {
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_NUM).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_ADDR).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_BASE).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_CALL).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_KICK).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_ENABLE).unwrap();
+ }
+ // VhostUserFrontend::reset()
+ for _ in 0..QUEUES_NUM {
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_ENABLE).unwrap();
+ handle_request(&mut req_handler, FrontendReq::GET_VRING_BASE).unwrap();
+ }
+
+ // VhostUserFrontend::activate()
+ handle_request(&mut req_handler, FrontendReq::SET_MEM_TABLE).unwrap();
+ for _ in 0..QUEUES_NUM {
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_NUM).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_ADDR).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_BASE).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_CALL).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_KICK).unwrap();
+ handle_request(&mut req_handler, FrontendReq::SET_VRING_ENABLE).unwrap();
+ }
+
+ if allow_backend_req {
+ // Make sure the connection still works even after reset/reactivate.
+ req_handler
+ .as_ref()
+ .as_ref()
+ .backend_conn
+ .as_ref()
+ .expect("backend_conn missing")
+ .send_config_changed()
+ .expect("send_config_changed failed");
+ }
+
+ // VhostUserFrontend::virtio_sleep()
+ handle_request(&mut req_handler, FrontendReq::SLEEP).unwrap();
+
+ // VhostUserFrontend::virtio_wake()
+ handle_request(&mut req_handler, FrontendReq::WAKE).unwrap();
+
+ if allow_backend_req {
+ // Make sure the connection still works even after sleep/wake.
+ req_handler
+ .as_ref()
+ .as_ref()
+ .backend_conn
+ .as_ref()
+ .expect("backend_conn missing")
+ .send_config_changed()
+ .expect("send_config_changed failed");
+ }
+
+ // Ask the client to shutdown, then wait to it to finish.
+ shutdown_tx.send(()).unwrap();
dev_bar.wait();
- match handle_request(&mut req_handler) {
+ // Verify recv_header fails with `ClientExit` after the client has disconnected.
+ match req_handler.recv_header() {
Err(VhostError::ClientExit) => (),
r => panic!("expected Err(ClientExit) but got {:?}", r),
}
}
- fn handle_request<S: VhostUserSlaveReqHandler>(
- handler: &mut SlaveReqHandler<S>,
+ fn handle_request<S: vmm_vhost::Backend>(
+ handler: &mut BackendServer<S>,
+ expected_message_type: FrontendReq,
) -> Result<(), VhostError> {
let (hdr, files) = handler.recv_header()?;
+ assert_eq!(hdr.get_code(), Ok(expected_message_type));
handler.process_message(hdr, files)
}
}
diff --git a/devices/src/virtio/vhost/user/device/handler/sys/linux.rs b/devices/src/virtio/vhost/user/device/handler/sys/linux.rs
index c3cb364..cf8d0ee 100644
--- a/devices/src/virtio/vhost/user/device/handler/sys/linux.rs
+++ b/devices/src/virtio/vhost/user/device/handler/sys/linux.rs
@@ -9,16 +9,15 @@
use base::SafeDescriptor;
use cros_async::AsyncWrapper;
use cros_async::Executor;
+use vmm_vhost::BackendServer;
use vmm_vhost::Error as VhostError;
-use vmm_vhost::SlaveReqHandler;
-use vmm_vhost::VhostUserSlaveReqHandler;
/// Performs the run loop for an already-constructor request handler.
-pub async fn run_handler<S>(mut req_handler: SlaveReqHandler<S>, ex: &Executor) -> Result<()>
+pub async fn run_handler<S>(mut backend_server: BackendServer<S>, ex: &Executor) -> Result<()>
where
- S: VhostUserSlaveReqHandler,
+ S: vmm_vhost::Backend,
{
- let h = SafeDescriptor::try_from(&req_handler as &dyn AsRawDescriptor)
+ let h = SafeDescriptor::try_from(&backend_server as &dyn AsRawDescriptor)
.map(AsyncWrapper::new)
.context("failed to get safe descriptor for handler")?;
let handler_source = ex
@@ -30,7 +29,7 @@
.wait_readable()
.await
.context("failed to wait for the handler to become readable")?;
- let (hdr, files) = match req_handler.recv_header() {
+ let (hdr, files) = match backend_server.recv_header() {
Ok((hdr, files)) => (hdr, files),
Err(VhostError::ClientExit) => {
info!("vhost-user connection closed");
@@ -42,13 +41,13 @@
}
};
- if req_handler.needs_wait_for_payload(&hdr) {
+ if backend_server.needs_wait_for_payload(&hdr) {
handler_source
.wait_readable()
.await
.context("failed to wait for the handler to become readable")?;
}
- req_handler.process_message(hdr, files)?;
+ backend_server.process_message(hdr, files)?;
}
}
@@ -57,10 +56,9 @@
use std::os::unix::net::UnixStream;
use tempfile::TempDir;
- use vmm_vhost::connection::socket::SocketListener;
use vmm_vhost::connection::Listener;
- use vmm_vhost::SlaveReqHandler;
- use vmm_vhost::VhostUserSlaveReqHandler;
+ use vmm_vhost::unix::SocketListener;
+ use vmm_vhost::BackendServer;
pub(crate) fn setup() -> (SocketListener, TempDir) {
let dir = tempfile::Builder::new()
@@ -80,11 +78,11 @@
UnixStream::connect(path).unwrap()
}
- pub(crate) fn listen<S: VhostUserSlaveReqHandler>(
+ pub(crate) fn listen<S: vmm_vhost::Backend>(
mut listener: SocketListener,
handler: S,
- ) -> SlaveReqHandler<S> {
+ ) -> BackendServer<S> {
let connection = listener.accept().unwrap().unwrap();
- SlaveReqHandler::new(connection, handler)
+ BackendServer::new(connection, handler)
}
}
diff --git a/devices/src/virtio/vhost/user/device/handler/sys/windows.rs b/devices/src/virtio/vhost/user/device/handler/sys/windows.rs
index d5aa5cb..f7b7718 100644
--- a/devices/src/virtio/vhost/user/device/handler/sys/windows.rs
+++ b/devices/src/virtio/vhost/user/device/handler/sys/windows.rs
@@ -20,12 +20,9 @@
use futures::FutureExt;
use tube_transporter::TubeTransferDataList;
use tube_transporter::TubeTransporterReader;
-use vmm_vhost::message::MasterReq;
+use vmm_vhost::message::FrontendReq;
use vmm_vhost::message::VhostUserMsgHeader;
-use vmm_vhost::SlaveReqHandler;
-use vmm_vhost::VhostUserSlaveReqHandler;
-
-use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
+use vmm_vhost::BackendServer;
pub fn read_from_tube_transporter(
raw_transport_tube: RawDescriptor,
@@ -48,7 +45,7 @@
}
pub async fn run_handler(
- handler: Box<dyn VhostUserSlaveReqHandler>,
+ handler: Box<dyn vmm_vhost::Backend>,
vhost_user_tube: Tube,
exit_event: Event,
ex: &Executor,
@@ -62,7 +59,7 @@
.context("failed to create an async event")?;
let exit_event = EventAsync::new(exit_event, ex).context("failed to create an async event")?;
- let mut req_handler = SlaveReqHandler::from_stream(vhost_user_tube, handler);
+ let mut backend_server = BackendServer::from_stream(vhost_user_tube, handler);
let read_event_fut = read_event.next_val().fuse();
let close_event_fut = close_event.next_val().fuse();
@@ -71,26 +68,26 @@
pin_mut!(close_event_fut);
pin_mut!(exit_event_fut);
- let mut pending_header: Option<(VhostUserMsgHeader<MasterReq>, Vec<std::fs::File>)> = None;
+ let mut pending_header: Option<(VhostUserMsgHeader<FrontendReq>, Vec<std::fs::File>)> = None;
loop {
select! {
_read_res = read_event_fut => {
match pending_header.take() {
None => {
- let (hdr, files) = req_handler
+ let (hdr, files) = backend_server
.recv_header()
.context("failed to handle a vhost-user request")?;
- if req_handler.needs_wait_for_payload(&hdr) {
+ if backend_server.needs_wait_for_payload(&hdr) {
// Wait for the message body being notified.
pending_header = Some((hdr, files));
} else {
- req_handler
+ backend_server
.process_message(hdr, files)
.context("failed to handle a vhost-user request")?;
}
}
Some((hdr, files)) => {
- req_handler
+ backend_server
.process_message(hdr, files)
.context("failed to handle a vhost-user request")?;
}
@@ -114,9 +111,8 @@
#[cfg(test)]
pub mod test_helpers {
use base::Tube;
- use vmm_vhost::message::MasterReq;
- use vmm_vhost::SlaveReqHandler;
- use vmm_vhost::VhostUserSlaveReqHandler;
+ use vmm_vhost::message::FrontendReq;
+ use vmm_vhost::BackendServer;
pub(crate) fn setup() -> (Tube, Tube) {
Tube::pair().unwrap()
@@ -126,10 +122,7 @@
tube
}
- pub(crate) fn listen<S: VhostUserSlaveReqHandler>(
- dev_tube: Tube,
- handler: S,
- ) -> SlaveReqHandler<S> {
- SlaveReqHandler::from_stream(dev_tube, handler)
+ pub(crate) fn listen<S: vmm_vhost::Backend>(dev_tube: Tube, handler: S) -> BackendServer<S> {
+ BackendServer::from_stream(dev_tube, handler)
}
}
diff --git a/devices/src/virtio/vhost/user/device/listener.rs b/devices/src/virtio/vhost/user/device/listener.rs
index 9abade1..22becbd 100644
--- a/devices/src/virtio/vhost/user/device/listener.rs
+++ b/devices/src/virtio/vhost/user/device/listener.rs
@@ -10,11 +10,10 @@
use cros_async::Executor;
use futures::Future;
pub use sys::VhostUserListener;
-use vmm_vhost::VhostUserSlaveReqHandler;
use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
-use crate::virtio::vhost::user::VhostUserDevice;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
+use crate::virtio::vhost::user::VhostUserDeviceBuilder;
/// Trait that the platform-specific type `VhostUserListener` needs to implement. It contains all
/// the methods that are ok to call from non-platform specific code.
@@ -22,12 +21,10 @@
/// Creates a VhostUserListener from `path`, which is a platform-specific string describing how
/// to establish the vhost-user channel. For instance, it can be a path to a socket.
///
- /// `max_num_queues` is the maximum number of queues we will supports through this channel.
/// `keep_rds` is a vector of `RawDescriptor`s to which the descriptors needed for this listener
/// to operate properly will be added if it is `Some()`.
fn new(
path: &str,
- max_num_queues: usize,
keep_rds: Option<&mut Vec<RawDescriptor>>,
) -> anyhow::Result<VhostUserListener>;
@@ -49,11 +46,11 @@
None
}
- /// Returns a `Future` that processes requests for a `VhostUserSlaveReqHandler`. The future
- /// exits when the front-end side disconnects or an error occurs.
+ /// Returns a `Future` that processes requests for `handler`. The future exits when the
+ /// front-end side disconnects or an error occurs.
fn run_req_handler<'e>(
self,
- handler: Box<dyn VhostUserSlaveReqHandler>,
+ handler: Box<dyn vmm_vhost::Backend>,
ex: &'e Executor,
) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + 'e>>;
@@ -63,7 +60,7 @@
/// This is a legacy way to run devices - prefer `run_device`.
fn run_backend<'e>(
self,
- backend: Box<dyn VhostUserBackend>,
+ backend: impl VhostUserDevice + 'static,
ex: &'e Executor,
) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + 'e>>
where
@@ -74,10 +71,10 @@
/// Start processing requests for a `VhostUserDevice` on `listener`. Returns when the front-end
/// side disconnects or an error occurs.
- fn run_device(self, ex: Executor, device: Box<dyn VhostUserDevice>) -> anyhow::Result<()>
+ fn run_device(self, ex: Executor, device: Box<dyn VhostUserDeviceBuilder>) -> anyhow::Result<()>
where
Self: Sized,
{
- ex.run_until(self.run_req_handler(device.into_req_handler(&ex).unwrap(), &ex))?
+ ex.run_until(self.run_req_handler(device.build(&ex).unwrap(), &ex))?
}
}
diff --git a/devices/src/virtio/vhost/user/device/listener/sys/linux.rs b/devices/src/virtio/vhost/user/device/listener/sys/linux.rs
index c6bec2d..a8a444a 100644
--- a/devices/src/virtio/vhost/user/device/listener/sys/linux.rs
+++ b/devices/src/virtio/vhost/user/device/listener/sys/linux.rs
@@ -11,10 +11,9 @@
use cros_async::Executor;
use futures::Future;
use futures::FutureExt;
-use vmm_vhost::connection::socket::SocketListener;
use vmm_vhost::connection::Listener;
-use vmm_vhost::SlaveReqHandler;
-use vmm_vhost::VhostUserSlaveReqHandler;
+use vmm_vhost::unix::SocketListener;
+use vmm_vhost::BackendServer;
use crate::virtio::vhost::user::device::handler::sys::linux::run_handler;
use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
@@ -41,10 +40,10 @@
}
/// Attaches to an already bound socket via `listener` and handles incoming messages from the
-/// VMM, which are dispatched to the device backend via the `VhostUserBackend` trait methods.
+/// VMM, which are dispatched to the device backend via the `VhostUserDevice` trait methods.
async fn run_with_handler(
mut listener: SocketListener,
- handler: Box<dyn VhostUserSlaveReqHandler>,
+ handler: Box<dyn vmm_vhost::Backend>,
ex: &Executor,
) -> anyhow::Result<()> {
listener.set_nonblocking(true)?;
@@ -58,7 +57,7 @@
.context("failed to accept an incoming connection")?
{
Some(connection) => {
- let req_handler = SlaveReqHandler::new(connection, handler);
+ let req_handler = BackendServer::new(connection, handler);
return run_handler(req_handler, ex).await;
}
None => {
@@ -80,20 +79,13 @@
///
/// `keep_rds` can be specified to retrieve the raw descriptors that must be preserved for this
/// listener to keep working after forking.
- fn new(
- path: &str,
- _max_num_queues: usize,
- keep_rds: Option<&mut Vec<RawDescriptor>>,
- ) -> anyhow::Result<Self> {
+ fn new(path: &str, keep_rds: Option<&mut Vec<RawDescriptor>>) -> anyhow::Result<Self> {
Self::new_socket(path, keep_rds)
}
- /// Returns a future that runs a `VhostUserSlaveReqHandler` using this listener.
- ///
- /// `ex` is the executor on which the request handler can schedule its own tasks.
fn run_req_handler<'e>(
self,
- handler: Box<dyn VhostUserSlaveReqHandler>,
+ handler: Box<dyn vmm_vhost::Backend>,
ex: &'e Executor,
) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + 'e>> {
async { run_with_handler(self.0, handler, ex).await }.boxed_local()
diff --git a/devices/src/virtio/vhost/user/device/listener/sys/windows.rs b/devices/src/virtio/vhost/user/device/listener/sys/windows.rs
index 40c5931..890ec10 100644
--- a/devices/src/virtio/vhost/user/device/listener/sys/windows.rs
+++ b/devices/src/virtio/vhost/user/device/listener/sys/windows.rs
@@ -7,9 +7,8 @@
use base::RawDescriptor;
use cros_async::Executor;
use futures::Future;
-use vmm_vhost::VhostUserSlaveReqHandler;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
/// TODO implement this. On Windows the `vhost_user_tube` can be provided through the `path`
@@ -18,17 +17,13 @@
pub struct VhostUserListener;
impl VhostUserListenerTrait for VhostUserListener {
- fn new(
- _path: &str,
- _max_num_queues: usize,
- _keep_rds: Option<&mut Vec<RawDescriptor>>,
- ) -> anyhow::Result<Self> {
+ fn new(_path: &str, _keep_rds: Option<&mut Vec<RawDescriptor>>) -> anyhow::Result<Self> {
todo!()
}
fn run_req_handler<'e>(
self,
- _handler: Box<dyn VhostUserSlaveReqHandler>,
+ _handler: Box<dyn vmm_vhost::Backend>,
_ex: &'e Executor,
) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + 'e>> {
todo!()
diff --git a/devices/src/virtio/vhost/user/device/mod.rs b/devices/src/virtio/vhost/user/device/mod.rs
index 609a9ca..03af3d1 100644
--- a/devices/src/virtio/vhost/user/device/mod.rs
+++ b/devices/src/virtio/vhost/user/device/mod.rs
@@ -15,13 +15,12 @@
pub use block::run_block_device;
pub use block::Options as BlockOptions;
use cros_async::Executor;
-use cros_async::ExecutorKind;
#[cfg(feature = "gpu")]
pub use gpu::run_gpu_device;
#[cfg(feature = "gpu")]
pub use gpu::Options as GpuOptions;
pub use handler::VhostBackendReqConnectionState;
-pub use handler::VhostUserBackend;
+pub use handler::VhostUserDevice;
pub use listener::sys::VhostUserListener;
pub use listener::VhostUserListenerTrait;
#[cfg(feature = "net")]
@@ -34,7 +33,6 @@
pub use snd::run_snd_device;
#[cfg(feature = "audio")]
pub use snd::Options as SndOptions;
-use vmm_vhost::VhostUserSlaveReqHandler;
cfg_if::cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
@@ -53,30 +51,21 @@
}
}
-/// A trait for vhost-user devices.
+/// A trait for not-yet-built vhost-user devices.
///
-/// Upon being given an [[Executor]], a device can be converted into a
-/// [[VhostUserSlaveReqHandler]], which can then process the requests from the front-end.
+/// Upon being given an [[Executor]], a builder can be converted into a [[vmm_vhost::Backend]],
+/// which can then process the requests from the front-end.
///
-/// We don't build request handlers directly to ensure that the device starts to process queues in
-/// the jailed process, not in the main process. [[VhostUserDevice::into_req_handler()]] is called
-/// only after jailing, which ensures that any operations by the request handler is done in the
-/// jailed process.
-pub trait VhostUserDevice {
- /// The maximum number of queues that this device can manage.
- fn max_queue_num(&self) -> usize;
-
- /// Turn this device into a vhost-user request handler that will run the device.
+/// We don't build the device directly to ensure that the device only starts threads in the jailed
+/// process, not in the main process. [[VhostUserDeviceBuilder::build()]] is called only after
+/// jailing, which ensures that any operations by the device are done in the jailed process.
+///
+/// TODO: Ideally this would return a [[VhostUserDevice]] instead of [[vmm_vhost::Backend]]. Only
+/// the vhost-user vhost-vsock device uses the latter and it can probably be migrated to
+/// [[VhostUserDevice]].
+pub trait VhostUserDeviceBuilder {
+ /// Create the vhost-user device.
///
/// `ex` is an executor the device can use to schedule its tasks.
- fn into_req_handler(
- self: Box<Self>,
- ex: &Executor,
- ) -> anyhow::Result<Box<dyn VhostUserSlaveReqHandler>>;
-
- /// The preferred ExecutorKind of an Executor to accept by
- /// [`VhostUserDevice::into_req_handler()`].
- fn executor_kind(&self) -> Option<ExecutorKind> {
- None
- }
+ fn build(self: Box<Self>, ex: &Executor) -> anyhow::Result<Box<dyn vmm_vhost::Backend>>;
}
diff --git a/devices/src/virtio/vhost/user/device/net.rs b/devices/src/virtio/vhost/user/device/net.rs
index dd51a80..1ed1e06 100644
--- a/devices/src/virtio/vhost/user/device/net.rs
+++ b/devices/src/virtio/vhost/user/device/net.rs
@@ -34,8 +34,8 @@
use crate::virtio::net::virtio_features_to_tap_offload;
use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
use crate::virtio::vhost::user::device::handler::Error as DeviceError;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
-use crate::virtio::vhost::user::VhostUserDevice;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
+use crate::virtio::vhost::user::VhostUserDeviceBuilder;
use crate::virtio::Interrupt;
use crate::virtio::Queue;
@@ -142,7 +142,7 @@
}
}
-impl<T: 'static> VhostUserBackend for NetBackend<T>
+impl<T: 'static> VhostUserDevice for NetBackend<T>
where
T: TapT + IntoAsync,
{
@@ -241,22 +241,15 @@
}
}
-impl<T> VhostUserDevice for NetBackend<T>
+impl<T> VhostUserDeviceBuilder for NetBackend<T>
where
T: TapT + IntoAsync + 'static,
{
- fn max_queue_num(&self) -> usize {
- MAX_QUEUE_NUM
- }
-
- fn into_req_handler(
- self: Box<Self>,
- ex: &Executor,
- ) -> anyhow::Result<Box<dyn vmm_vhost::VhostUserSlaveReqHandler>> {
+ fn build(self: Box<Self>, ex: &Executor) -> anyhow::Result<Box<dyn vmm_vhost::Backend>> {
NET_EXECUTOR.with(|thread_ex| {
let _ = thread_ex.set(ex.clone());
});
- let handler = DeviceRequestHandler::new(self);
+ let handler = DeviceRequestHandler::new(*self);
Ok(Box::new(handler))
}
diff --git a/devices/src/virtio/vhost/user/device/net/sys/linux.rs b/devices/src/virtio/vhost/user/device/net/sys/linux.rs
index 34c15f1..aacc20d 100644
--- a/devices/src/virtio/vhost/user/device/net/sys/linux.rs
+++ b/devices/src/virtio/vhost/user/device/net/sys/linux.rs
@@ -35,7 +35,7 @@
use crate::virtio::net::process_rx;
use crate::virtio::net::validate_and_configure_tap;
use crate::virtio::net::NetError;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::listener::sys::VhostUserListener;
use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
use crate::virtio::vhost::user::device::net::run_ctrl_queue;
@@ -176,7 +176,7 @@
queue
}
-/// Platform specific impl of VhostUserBackend::start_queue.
+/// Platform specific impl of VhostUserDevice::start_queue.
pub(in crate::virtio::vhost::user::device::net) fn start_queue<T: 'static + IntoAsync + TapT>(
backend: &mut NetBackend<T>,
idx: usize,
@@ -331,7 +331,7 @@
let listener = VhostUserListener::new_socket(&socket, None)?;
// run_until() returns an Result<Result<..>> which the ? operator lets us
// flatten.
- ex.run_until(listener.run_backend(Box::new(backend), &ex))?
+ ex.run_until(listener.run_backend(backend, &ex))?
}));
}
};
diff --git a/devices/src/virtio/vhost/user/device/net/sys/windows.rs b/devices/src/virtio/vhost/user/device/net/sys/windows.rs
index a3bcf4c..150e5bd 100644
--- a/devices/src/virtio/vhost/user/device/net/sys/windows.rs
+++ b/devices/src/virtio/vhost/user/device/net/sys/windows.rs
@@ -50,7 +50,7 @@
use crate::virtio::vhost::user::device::handler::sys::windows::read_from_tube_transporter;
use crate::virtio::vhost::user::device::handler::sys::windows::run_handler;
use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::handler::WorkerState;
use crate::virtio::vhost::user::device::net::run_ctrl_queue;
use crate::virtio::vhost::user::device::net::run_tx_queue;
@@ -167,7 +167,7 @@
queue
}
-/// Platform specific impl of VhostUserBackend::start_queue.
+/// Platform specific impl of VhostUserDevice::start_queue.
pub(in crate::virtio::vhost::user::device::net) fn start_queue<T: 'static + IntoAsync + TapT>(
backend: &mut NetBackend<T>,
idx: usize,
@@ -307,13 +307,11 @@
let exit_event = bootstrap_tube.recv::<Event>()?;
// We only have one net device for now.
- let dev = Box::new(
- NetBackend::<net_util::Slirp>::new_slirp(
- net_backend_config.guest_pipe,
- net_backend_config.slirp_kill_event,
- )
- .unwrap(),
- );
+ let dev = NetBackend::<net_util::Slirp>::new_slirp(
+ net_backend_config.guest_pipe,
+ net_backend_config.slirp_kill_event,
+ )
+ .unwrap();
let handler = DeviceRequestHandler::new(dev);
diff --git a/devices/src/virtio/vhost/user/device/snd.rs b/devices/src/virtio/vhost/user/device/snd.rs
index 363def4..e54436a 100644
--- a/devices/src/virtio/vhost/user/device/snd.rs
+++ b/devices/src/virtio/vhost/user/device/snd.rs
@@ -49,9 +49,9 @@
use crate::virtio::snd::parameters::Parameters;
use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
use crate::virtio::vhost::user::device::handler::Error as DeviceError;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::handler::WorkerState;
-use crate::virtio::vhost::user::VhostUserDevice;
+use crate::virtio::vhost::user::VhostUserDeviceBuilder;
use crate::virtio::Interrupt;
use crate::virtio::Queue;
@@ -133,21 +133,14 @@
}
}
-impl VhostUserDevice for SndBackend {
- fn max_queue_num(&self) -> usize {
- MAX_QUEUE_NUM
- }
-
- fn into_req_handler(
- self: Box<Self>,
- _ex: &Executor,
- ) -> anyhow::Result<Box<dyn vmm_vhost::VhostUserSlaveReqHandler>> {
- let handler = DeviceRequestHandler::new(self);
+impl VhostUserDeviceBuilder for SndBackend {
+ fn build(self: Box<Self>, _ex: &Executor) -> anyhow::Result<Box<dyn vmm_vhost::Backend>> {
+ let handler = DeviceRequestHandler::new(*self);
Ok(Box::new(handler))
}
}
-impl VhostUserBackend for SndBackend {
+impl VhostUserDevice for SndBackend {
fn max_queue_num(&self) -> usize {
MAX_QUEUE_NUM
}
diff --git a/devices/src/virtio/vhost/user/device/snd/sys/windows.rs b/devices/src/virtio/vhost/user/device/snd/sys/windows.rs
index 1c3996d..1a06c9b 100644
--- a/devices/src/virtio/vhost/user/device/snd/sys/windows.rs
+++ b/devices/src/virtio/vhost/user/device/snd/sys/windows.rs
@@ -23,7 +23,7 @@
use crate::virtio::vhost::user::device::handler::sys::windows::run_handler;
use crate::virtio::vhost::user::device::snd::SndBackend;
use crate::virtio::vhost::user::device::snd::SND_EXECUTOR;
-use crate::virtio::vhost::user::device::VhostUserDevice;
+use crate::virtio::vhost::user::VhostUserDeviceBuilder;
pub mod generic;
pub use generic as product;
@@ -111,7 +111,7 @@
warn!("Failed to set audio thread to real time: {}", e);
};
- let handler = snd_device.into_req_handler(&ex)?;
+ let handler = snd_device.build(&ex)?;
info!("vhost-user snd device ready, starting run loop...");
if let Err(e) = ex.run_until(run_handler(
diff --git a/devices/src/virtio/vhost/user/device/vsock.rs b/devices/src/virtio/vhost/user/device/vsock.rs
index 04e7d7d..25bb9c3 100644
--- a/devices/src/virtio/vhost/user/device/vsock.rs
+++ b/devices/src/virtio/vhost/user/device/vsock.rs
@@ -22,7 +22,7 @@
use vhost::Vsock;
use vm_memory::GuestMemory;
use vmm_vhost::connection::Connection;
-use vmm_vhost::message::SlaveReq;
+use vmm_vhost::message::BackendReq;
use vmm_vhost::message::VhostSharedMemoryRegion;
use vmm_vhost::message::VhostUserConfigFlags;
use vmm_vhost::message::VhostUserInflight;
@@ -33,7 +33,6 @@
use vmm_vhost::message::VhostUserVringState;
use vmm_vhost::Error;
use vmm_vhost::Result;
-use vmm_vhost::VhostUserSlaveReqHandler;
use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
use zerocopy::AsBytes;
@@ -41,7 +40,7 @@
use crate::virtio::vhost::user::device::handler::vmm_va_to_gpa;
use crate::virtio::vhost::user::device::handler::MappingInfo;
use crate::virtio::vhost::user::device::handler::VhostUserRegularOps;
-use crate::virtio::vhost::user::VhostUserDevice;
+use crate::virtio::vhost::user::VhostUserDeviceBuilder;
use crate::virtio::vhost::user::VhostUserListener;
use crate::virtio::vhost::user::VhostUserListenerTrait;
use crate::virtio::Queue;
@@ -91,15 +90,8 @@
}
}
-impl VhostUserDevice for VhostUserVsockDevice {
- fn max_queue_num(&self) -> usize {
- NUM_QUEUES
- }
-
- fn into_req_handler(
- self: Box<Self>,
- _ex: &Executor,
- ) -> anyhow::Result<Box<dyn vmm_vhost::VhostUserSlaveReqHandler>> {
+impl VhostUserDeviceBuilder for VhostUserVsockDevice {
+ fn build(self: Box<Self>, _ex: &Executor) -> anyhow::Result<Box<dyn vmm_vhost::Backend>> {
let backend = VsockBackend {
queues: [
QueueConfig::new(Queue::MAX_SIZE, 0),
@@ -121,11 +113,11 @@
use vhost::Error::*;
match err {
IoctlError(e) => Error::ReqHandlerError(e),
- _ => Error::SlaveInternalError,
+ _ => Error::BackendInternalError,
}
}
-impl VhostUserSlaveReqHandler for VsockBackend {
+impl vmm_vhost::Backend for VsockBackend {
fn set_owner(&mut self) -> Result<()> {
self.handle.set_owner().map_err(convert_vhost_error)
}
@@ -400,9 +392,9 @@
Err(Error::InvalidOperation)
}
- fn set_slave_req_fd(&mut self, _vu_req: Connection<SlaveReq>) {
- // We didn't set VhostUserProtocolFeatures::SLAVE_REQ
- unreachable!("unexpected set_slave_req_fd");
+ fn set_backend_req_fd(&mut self, _vu_req: Connection<BackendReq>) {
+ // We didn't set VhostUserProtocolFeatures::BACKEND_REQ
+ unreachable!("unexpected set_backend_req_fd");
}
fn get_inflight_fd(
diff --git a/devices/src/virtio/vhost/user/device/wl.rs b/devices/src/virtio/vhost/user/device/wl.rs
index f1cdf36..0d3cf9b 100644
--- a/devices/src/virtio/vhost/user/device/wl.rs
+++ b/devices/src/virtio/vhost/user/device/wl.rs
@@ -18,7 +18,6 @@
use base::clone_descriptor;
use base::error;
use base::warn;
-use base::FromRawDescriptor;
use base::SafeDescriptor;
use base::Tube;
use base::UnixSeqpacket;
@@ -41,7 +40,7 @@
use crate::virtio::vhost::user::device::handler::Error as DeviceError;
use crate::virtio::vhost::user::device::handler::VhostBackendReqConnection;
use crate::virtio::vhost::user::device::handler::VhostBackendReqConnectionState;
-use crate::virtio::vhost::user::device::handler::VhostUserBackend;
+use crate::virtio::vhost::user::device::handler::VhostUserDevice;
use crate::virtio::vhost::user::device::handler::WorkerState;
use crate::virtio::vhost::user::device::listener::sys::VhostUserListener;
use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
@@ -141,7 +140,7 @@
}
}
-impl VhostUserBackend for WlBackend {
+impl VhostUserDevice for WlBackend {
fn max_queue_num(&self) -> usize {
NUM_QUEUES
}
@@ -176,7 +175,7 @@
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
- VhostUserProtocolFeatures::SLAVE_REQ | VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS
+ VhostUserProtocolFeatures::BACKEND_REQ | VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS
}
fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
@@ -268,11 +267,7 @@
let queue_task = match idx {
0 => {
let wlstate_ctx = clone_descriptor(wlstate.borrow().wait_ctx())
- .map(|fd| {
- // SAFETY:
- // Safe because we just created this fd.
- AsyncWrapper::new(unsafe { SafeDescriptor::from_raw_descriptor(fd) })
- })
+ .map(AsyncWrapper::new)
.context("failed to clone inner WaitContext for WlState")
.and_then(|ctx| {
self.ex
@@ -407,7 +402,7 @@
let listener = VhostUserListener::new_socket(&socket, None)?;
- let backend = Box::new(WlBackend::new(&ex, wayland_paths, resource_bridge));
+ let backend = WlBackend::new(&ex, wayland_paths, resource_bridge);
// run_until() returns an Result<Result<..>> which the ? operator lets us flatten.
ex.run_until(listener.run_backend(backend, &ex))?
}
diff --git a/devices/src/virtio/vhost/user/mod.rs b/devices/src/virtio/vhost/user/mod.rs
index 638d207..622cf64 100644
--- a/devices/src/virtio/vhost/user/mod.rs
+++ b/devices/src/virtio/vhost/user/mod.rs
@@ -3,7 +3,6 @@
// found in the LICENSE file.
pub mod device;
-pub mod vmm;
use std::fmt::Debug;
diff --git a/devices/src/virtio/vhost/user/snapshot_extensions.md b/devices/src/virtio/vhost/user/snapshot_extensions.md
new file mode 100644
index 0000000..2b20446
--- /dev/null
+++ b/devices/src/virtio/vhost/user/snapshot_extensions.md
@@ -0,0 +1,121 @@
+# vhost-user protocol extensions: sleep/wake/snapshot/restore
+
+WORK IN PROGRESS
+
+Documentation for the vhost-user protocol extensions added to crosvm as part of the snapshot-restore
+project. Written in the style of https://qemu-project.gitlab.io/qemu/interop/vhost-user.html so that
+we can send it upstream as a proposal.
+
+These extensions might be redundant with the VHOST_USER_PROTOCOL_F_DEVICE_STATE features recently
+added to the spec.
+
+## Protocol features
+
+TODO: Include a protocol feature for backends to advertise snapshotting support.
+
+## Front-end message types
+
+### VHOST_USER_SLEEP
+
+id: 1000 (temporary)
+
+equivalent ioctl: N/A
+
+request payload: N/A
+
+reply payload: i8
+
+Backend should stop all active queues. If the backend interacts with resources on the host, e.g. if
+it writes to a socket, it is expected that all activity with those resources stops before the
+VHOST_USER_SLEEP response is sent. This requirement allows other host side processes to snapshot
+their own state without the risk of race conditions. For example, if a virtio-blk flushed pending
+writes after VHOST_USER_SLEEP, then a disk image snapshot taken by the VMM could be missing data.
+
+The first byte of the response should be 1 to indicate success or 0 to indicate failure.
+
+### VHOST_USER_WAKE
+
+id: 1001 (temporary)
+
+equivalent ioctl: N/A
+
+request payload: N/A
+
+reply payload: i8
+
+Backend should start all active queues and may restart any interactions with host side resources.
+
+The first byte of the response should be 1 to indicate success or 0 to indicate failure.
+
+### VHOST_USER_SNAPSHOT
+
+id: 1002 (temporary)
+
+equivalent ioctl: N/A
+
+request payload: N/A
+
+reply payload: i8, followed by (payload size - 1) bytes of opaque snapshot data
+
+Backend should create a snapshot of all state needed to perform a restore.
+
+The first byte of the response should be 1 to indicate success or 0 to indicate failure. The rest of
+the response is the snapshot bytes, which are opaque from the perspective of the frontend.
+
+### VHOST_USER_RESTORE
+
+id: 1003 (temporary)
+
+equivalent ioctl: N/A
+
+request payload: (payload size) bytes of opaque snapshot data
+
+reply payload: i8
+
+Backend should restore itself to state of the snapshot provided in the request payload. The request
+will contain the exact same bytes returned from a previous VHOST_USER_SNAPSHOT request.
+
+The frontend must send the VHOST_USER_SET_MEM_TABLE request before VHOST_USER_RESTORE so that the
+backend has enough information to perform the vring restore.
+
+The event file descriptors for adding buffers to the vrings (normally passed via
+VHOST_USER_SET_VRING_KICK) are included in the ancillary data. The index of the file descriptor in
+the ancillary data is the index of the queue it belongs to.
+
+The one byte response should be 1 to indicate success or 0 to indicate failure.
+
+## Snapshot-Restore
+
+TODO: write an overview for the feature
+
+### Frontend
+
+Snapshot sequence:
+
+1. Frontend connects to vhost-user devices.
+1. ... proceed as usual ...
+1. For each vhost-user device
+ - Frontend sends VHOST_USER_SLEEP request.
+1. For each vhost-user device
+ - Frontend sends VHOST_USER_SNAPSHOT request and saves the response payload somewhere.
+1. For each vhost-user device
+ - Frontend sends VHOST_USER_WAKE request.
+1. ... proceed as usual ...
+
+Restore sequence:
+
+1. Frontend connects to vhost-user devices.
+1. For each vhost-user device
+ - Frontend sends VHOST_USER_SLEEP request.
+1. For each vhost-user device
+ - Frontend sends VHOST_USER_SET_MEM_TABLE request.
+ - For every queue that was active at the time of snapshotting, frontend sends a
+ VHOST_USER_SET_VRING_CALL request for that queue.
+ - Frontend sends VHOST_USER_RESTORE request.
+1. For each vhost-user device
+ - Frontend sends VHOST_USER_WAKE request.
+1. ... proceed as usual ...
+
+### Backend
+
+TODO: anything interesting to write here?
diff --git a/devices/src/virtio/vhost/user/vmm/handler.rs b/devices/src/virtio/vhost/user/vmm/handler.rs
deleted file mode 100644
index 7ed77f0..0000000
--- a/devices/src/virtio/vhost/user/vmm/handler.rs
+++ /dev/null
@@ -1,568 +0,0 @@
-// Copyright 2021 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-mod sys;
-pub(crate) mod worker;
-
-use std::collections::BTreeMap;
-
-use base::error;
-use base::info;
-use base::trace;
-use base::AsRawDescriptor;
-use base::Event;
-use base::Protection;
-use base::SafeDescriptor;
-use base::WorkerThread;
-use hypervisor::MemCacheType;
-use vm_control::VmMemorySource;
-use vm_memory::GuestMemory;
-use vmm_vhost::message::VhostUserConfigFlags;
-use vmm_vhost::message::VhostUserExternalMapMsg;
-use vmm_vhost::message::VhostUserGpuMapMsg;
-use vmm_vhost::message::VhostUserProtocolFeatures;
-use vmm_vhost::message::VhostUserShmemMapMsg;
-use vmm_vhost::message::VhostUserShmemUnmapMsg;
-use vmm_vhost::HandlerResult;
-use vmm_vhost::Master;
-use vmm_vhost::MasterReqHandler;
-use vmm_vhost::VhostUserMasterReqHandler;
-use vmm_vhost::VhostUserMemoryRegionInfo;
-use vmm_vhost::VringConfigData;
-use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
-
-use crate::virtio::vhost::user::vmm::handler::sys::create_backend_req_handler;
-use crate::virtio::vhost::user::vmm::Connection;
-use crate::virtio::vhost::user::vmm::Error;
-use crate::virtio::vhost::user::vmm::Result;
-use crate::virtio::Interrupt;
-use crate::virtio::Queue;
-use crate::virtio::SharedMemoryMapper;
-use crate::virtio::SharedMemoryRegion;
-
-type BackendReqHandler = MasterReqHandler<BackendReqHandlerImpl>;
-
-pub struct VhostUserHandler {
- vu: Master,
- pub avail_features: u64,
- acked_features: u64,
- protocol_features: VhostUserProtocolFeatures,
- backend_req_handler: Option<BackendReqHandler>,
- // Shared memory region info. IPC result from backend is saved with outer Option.
- shmem_region: Option<Option<SharedMemoryRegion>>,
-}
-
-impl VhostUserHandler {
- /// Creates a `VhostUserHandler` instance with features and protocol features initialized.
- pub fn new(
- connection: Connection,
- allow_features: u64,
- allow_protocol_features: VhostUserProtocolFeatures,
- ) -> Result<Self> {
- #[cfg(windows)]
- let backend_pid = connection.target_pid();
-
- let mut vu = Master::from_stream(connection);
-
- vu.set_owner().map_err(Error::SetOwner)?;
-
- let avail_features = allow_features & vu.get_features().map_err(Error::GetFeatures)?;
- let mut acked_features = 0;
-
- let mut protocol_features = VhostUserProtocolFeatures::empty();
- if avail_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
- // The vhost-user backend supports VHOST_USER_F_PROTOCOL_FEATURES; enable it.
- vu.set_features(1 << VHOST_USER_F_PROTOCOL_FEATURES)
- .map_err(Error::SetFeatures)?;
- acked_features |= 1 << VHOST_USER_F_PROTOCOL_FEATURES;
-
- let avail_protocol_features = vu
- .get_protocol_features()
- .map_err(Error::GetProtocolFeatures)?;
- protocol_features = allow_protocol_features & avail_protocol_features;
- vu.set_protocol_features(protocol_features)
- .map_err(Error::SetProtocolFeatures)?;
- }
-
- // if protocol feature `VhostUserProtocolFeatures::SLAVE_REQ` is negotiated.
- let backend_req_handler =
- if protocol_features.contains(VhostUserProtocolFeatures::SLAVE_REQ) {
- let mut handler = create_backend_req_handler(
- BackendReqHandlerImpl {
- interrupt: None,
- shared_mapper_state: None,
- },
- #[cfg(windows)]
- backend_pid,
- )?;
- vu.set_slave_request_fd(&handler.take_tx_descriptor())
- .map_err(Error::SetDeviceRequestChannel)?;
- Some(handler)
- } else {
- None
- };
-
- Ok(VhostUserHandler {
- vu,
- avail_features,
- acked_features,
- protocol_features,
- backend_req_handler,
- shmem_region: None,
- })
- }
-
- /// Returns the maximum number of queues supported by the backend, or `None` if the MQ protocol
- /// feature was not negotiated.
- pub fn num_queues(&self) -> Result<Option<usize>> {
- if self
- .protocol_features
- .contains(VhostUserProtocolFeatures::MQ)
- {
- trace!("backend supports VHOST_USER_PROTOCOL_F_MQ");
- let num_queues = self.vu.get_queue_num().map_err(Error::GetQueueNum)?;
- trace!("VHOST_USER_GET_QUEUE_NUM returned {num_queues}");
- Ok(Some(num_queues as usize))
- } else {
- trace!("backend does not support VHOST_USER_PROTOCOL_F_MQ");
- Ok(None)
- }
- }
-
- /// Enables a set of features.
- pub fn ack_features(&mut self, ack_features: u64) -> Result<()> {
- let features = (ack_features & self.avail_features) | self.acked_features;
- self.vu.set_features(features).map_err(Error::SetFeatures)?;
- self.acked_features = features;
- Ok(())
- }
-
- /// Gets the device configuration space at `offset` and writes it into `data`.
- pub fn read_config(&mut self, offset: u64, data: &mut [u8]) -> Result<()> {
- let (_, config) = self
- .vu
- .get_config(
- offset
- .try_into()
- .map_err(|_| Error::InvalidConfigOffset(offset))?,
- data.len()
- .try_into()
- .map_err(|_| Error::InvalidConfigLen(data.len()))?,
- VhostUserConfigFlags::WRITABLE,
- data,
- )
- .map_err(Error::GetConfig)?;
- data.copy_from_slice(&config);
- Ok(())
- }
-
- /// Writes `data` into the device configuration space at `offset`.
- pub fn write_config(&mut self, offset: u64, data: &[u8]) -> Result<()> {
- self.vu
- .set_config(
- offset
- .try_into()
- .map_err(|_| Error::InvalidConfigOffset(offset))?,
- VhostUserConfigFlags::empty(),
- data,
- )
- .map_err(Error::SetConfig)
- }
-
- /// Sets the memory map regions so it can translate the vring addresses.
- pub fn set_mem_table(&mut self, mem: &GuestMemory) -> Result<()> {
- let regions: Vec<_> = mem
- .regions()
- .map(|region| VhostUserMemoryRegionInfo {
- guest_phys_addr: region.guest_addr.0,
- memory_size: region.size as u64,
- userspace_addr: region.host_addr as u64,
- mmap_offset: region.shm_offset,
- mmap_handle: region.shm.as_raw_descriptor(),
- })
- .collect();
-
- self.vu
- .set_mem_table(regions.as_slice())
- .map_err(Error::SetMemTable)?;
-
- Ok(())
- }
-
- /// Activates a vring for the given `queue`.
- pub fn activate_vring(
- &mut self,
- mem: &GuestMemory,
- queue_index: usize,
- queue: &Queue,
- irqfd: &Event,
- ) -> Result<()> {
- self.vu
- .set_vring_num(queue_index, queue.size())
- .map_err(Error::SetVringNum)?;
-
- let config_data = VringConfigData {
- queue_size: queue.size(),
- flags: 0u32,
- desc_table_addr: mem
- .get_host_address(queue.desc_table())
- .map_err(Error::GetHostAddress)? as u64,
- used_ring_addr: mem
- .get_host_address(queue.used_ring())
- .map_err(Error::GetHostAddress)? as u64,
- avail_ring_addr: mem
- .get_host_address(queue.avail_ring())
- .map_err(Error::GetHostAddress)? as u64,
- log_addr: None,
- };
- self.vu
- .set_vring_addr(queue_index, &config_data)
- .map_err(Error::SetVringAddr)?;
-
- self.vu
- .set_vring_base(queue_index, 0)
- .map_err(Error::SetVringBase)?;
-
- self.vu
- .set_vring_call(queue_index, irqfd)
- .map_err(Error::SetVringCall)?;
- self.vu
- .set_vring_kick(queue_index, queue.event())
- .map_err(Error::SetVringKick)?;
-
- // Per protocol documentation, `VHOST_USER_SET_VRING_ENABLE` should be sent only when
- // `VHOST_USER_F_PROTOCOL_FEATURES` has been negotiated.
- if self.acked_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
- self.vu
- .set_vring_enable(queue_index, true)
- .map_err(Error::SetVringEnable)?;
- }
-
- Ok(())
- }
-
- /// Activates vrings.
- pub fn activate(
- &mut self,
- mem: GuestMemory,
- interrupt: Interrupt,
- queues: BTreeMap<usize, Queue>,
- label: &str,
- ) -> Result<WorkerThread<()>> {
- self.set_mem_table(&mem)?;
-
- let msix_config_opt = interrupt
- .get_msix_config()
- .as_ref()
- .ok_or(Error::MsixConfigUnavailable)?;
- let msix_config = msix_config_opt.lock();
-
- let non_msix_evt = Event::new().map_err(Error::CreateEvent)?;
- for (&queue_index, queue) in queues.iter() {
- let irqfd = msix_config
- .get_irqfd(queue.vector() as usize)
- .unwrap_or(&non_msix_evt);
- self.activate_vring(&mem, queue_index, queue, irqfd)?;
- }
-
- drop(msix_config);
-
- self.start_worker(interrupt, label, mem, non_msix_evt)
- }
-
- /// Deactivates all vrings.
- pub fn reset(&mut self, queues_num: usize) -> Result<()> {
- for queue_index in 0..queues_num {
- if self.acked_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
- self.vu
- .set_vring_enable(queue_index, false)
- .map_err(Error::SetVringEnable)?;
- }
- self.vu
- .get_vring_base(queue_index)
- .map_err(Error::GetVringBase)?;
- }
- Ok(())
- }
-
- pub fn get_shared_memory_region(&mut self) -> Result<Option<SharedMemoryRegion>> {
- if !self
- .protocol_features
- .contains(VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS)
- {
- return Ok(None);
- }
- if let Some(r) = self.shmem_region.as_ref() {
- return Ok(r.clone());
- }
- let regions = self
- .vu
- .get_shared_memory_regions()
- .map_err(Error::ShmemRegions)?;
- let region = match regions.len() {
- 0 => None,
- 1 => Some(SharedMemoryRegion {
- id: regions[0].id,
- length: regions[0].length,
- }),
- n => return Err(Error::TooManyShmemRegions(n)),
- };
-
- self.shmem_region = Some(region.clone());
- Ok(region)
- }
-
- pub fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) -> Result<()> {
- // Return error if backend request handler is not available. This indicates
- // that `VhostUserProtocolFeatures::SLAVE_REQ` is not negotiated.
- let backend_req_handler =
- self.backend_req_handler
- .as_mut()
- .ok_or(Error::ProtocolFeatureNotNegoiated(
- VhostUserProtocolFeatures::SLAVE_REQ,
- ))?;
-
- // The virtio framework will only call this if get_shared_memory_region returned a region
- let shmid = self
- .shmem_region
- .clone()
- .flatten()
- .expect("missing shmid")
- .id;
-
- backend_req_handler
- .backend_mut()
- .set_shared_mapper_state(SharedMapperState { mapper, shmid });
- Ok(())
- }
-
- /// Sends a message to the device process to stop worker futures/threads
- pub fn sleep(&mut self) -> Result<()> {
- self.vu.sleep().map_err(Error::Sleep)?;
- Ok(())
- }
-
- /// Sends a message to the device process to start up worker futures/threads.
- pub fn wake(&mut self) -> Result<()> {
- self.vu.wake().map_err(Error::Wake)
- }
-
- /// Sends a snapshot request to the device and it should respond with the device's serialized
- /// state.
- pub fn snapshot(&self) -> Result<serde_json::Value> {
- let snapshot_bytes = self.vu.snapshot().map_err(Error::Snapshot)?;
- serde_json::to_value(snapshot_bytes).map_err(Error::SliceToSerdeValue)
- }
-
- /// Sends a restore request with a payload of serialized snapshotted data and queue_evts to the
- /// device process so that it can revive its state and wire up the queue_evts again.
- pub fn restore(
- &mut self,
- data: serde_json::Value,
- queue_evts: Option<Vec<Event>>,
- ) -> Result<()> {
- let data_bytes: Vec<u8> = serde_json::from_value(data).map_err(Error::SerdeValueToSlice)?;
- self.vu
- .restore(data_bytes.as_slice(), queue_evts)
- .map_err(Error::Restore)
- }
-
- /// Rewire up irqfds. Meant to be called right before `restore` and should only be called
- /// if the device is asleep.
- pub fn restore_irqfd(&self, queue_index: usize, irqfd: &Event) -> Result<()> {
- self.vu
- .set_vring_call(queue_index, irqfd)
- .map_err(Error::SetVringCall)
- }
-
- /// Helper to start up the worker thread that will be used with handling interrupts and requests
- /// from the device process.
- pub fn start_worker(
- &mut self,
- interrupt: Interrupt,
- label: &str,
- mem: GuestMemory,
- non_msix_evt: Event,
- ) -> Result<WorkerThread<()>> {
- let label = format!("vhost_user_virtio_{}", label);
-
- let mut backend_req_handler = self.backend_req_handler.take();
- if let Some(handler) = &mut backend_req_handler {
- // Using unwrap here to get the mutex protected value
- handler.backend_mut().set_interrupt(interrupt.clone());
- }
-
- Ok(WorkerThread::start(label.clone(), move |kill_evt| {
- let mut worker = worker::Worker {
- mem,
- kill_evt,
- non_msix_evt,
- backend_req_handler,
- };
-
- if let Err(e) = worker.run(interrupt) {
- error!("failed to start {} worker: {}", label, e);
- }
- }))
- }
-}
-
-struct SharedMapperState {
- mapper: Box<dyn SharedMemoryMapper>,
- shmid: u8,
-}
-
-pub struct BackendReqHandlerImpl {
- interrupt: Option<Interrupt>,
- shared_mapper_state: Option<SharedMapperState>,
-}
-
-impl BackendReqHandlerImpl {
- fn set_interrupt(&mut self, interrupt: Interrupt) {
- self.interrupt = Some(interrupt);
- }
-
- fn set_shared_mapper_state(&mut self, shared_mapper_state: SharedMapperState) {
- self.shared_mapper_state = Some(shared_mapper_state);
- }
-}
-
-impl VhostUserMasterReqHandler for BackendReqHandlerImpl {
- fn shmem_map(
- &mut self,
- req: &VhostUserShmemMapMsg,
- fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- let shared_mapper_state = self
- .shared_mapper_state
- .as_mut()
- .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
- if req.shmid != shared_mapper_state.shmid {
- error!(
- "bad shmid {}, expected {}",
- req.shmid, shared_mapper_state.shmid
- );
- return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
- }
- match shared_mapper_state.mapper.add_mapping(
- VmMemorySource::Descriptor {
- descriptor: SafeDescriptor::try_from(fd)
- .map_err(|_| std::io::Error::from_raw_os_error(libc::EIO))?,
- offset: req.fd_offset,
- size: req.len,
- },
- req.shm_offset,
- Protection::from(req.flags),
- MemCacheType::CacheCoherent,
- ) {
- Ok(()) => Ok(0),
- Err(e) => {
- error!("failed to create mapping {:?}", e);
- Err(std::io::Error::from_raw_os_error(libc::EINVAL))
- }
- }
- }
-
- fn shmem_unmap(&mut self, req: &VhostUserShmemUnmapMsg) -> HandlerResult<u64> {
- let shared_mapper_state = self
- .shared_mapper_state
- .as_mut()
- .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
- if req.shmid != shared_mapper_state.shmid {
- error!(
- "bad shmid {}, expected {}",
- req.shmid, shared_mapper_state.shmid
- );
- return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
- }
- match shared_mapper_state.mapper.remove_mapping(req.shm_offset) {
- Ok(()) => Ok(0),
- Err(e) => {
- error!("failed to remove mapping {:?}", e);
- Err(std::io::Error::from_raw_os_error(libc::EINVAL))
- }
- }
- }
-
- fn gpu_map(
- &mut self,
- req: &VhostUserGpuMapMsg,
- descriptor: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- let shared_mapper_state = self
- .shared_mapper_state
- .as_mut()
- .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
- if req.shmid != shared_mapper_state.shmid {
- error!(
- "bad shmid {}, expected {}",
- req.shmid, shared_mapper_state.shmid
- );
- return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
- }
- match shared_mapper_state.mapper.add_mapping(
- VmMemorySource::Vulkan {
- descriptor: SafeDescriptor::try_from(descriptor)
- .map_err(|_| std::io::Error::from_raw_os_error(libc::EIO))?,
- handle_type: req.handle_type,
- memory_idx: req.memory_idx,
- device_uuid: req.device_uuid,
- driver_uuid: req.driver_uuid,
- size: req.len,
- },
- req.shm_offset,
- Protection::read_write(),
- MemCacheType::CacheCoherent,
- ) {
- Ok(()) => Ok(0),
- Err(e) => {
- error!("failed to create mapping {:?}", e);
- Err(std::io::Error::from_raw_os_error(libc::EINVAL))
- }
- }
- }
-
- fn external_map(&mut self, req: &VhostUserExternalMapMsg) -> HandlerResult<u64> {
- let shared_mapper_state = self
- .shared_mapper_state
- .as_mut()
- .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
- if req.shmid != shared_mapper_state.shmid {
- error!(
- "bad shmid {}, expected {}",
- req.shmid, shared_mapper_state.shmid
- );
- return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
- }
- match shared_mapper_state.mapper.add_mapping(
- VmMemorySource::ExternalMapping {
- ptr: req.ptr,
- size: req.len,
- },
- req.shm_offset,
- Protection::read_write(),
- MemCacheType::CacheCoherent,
- ) {
- Ok(()) => Ok(0),
- Err(e) => {
- error!("failed to create mapping {:?}", e);
- Err(std::io::Error::from_raw_os_error(libc::EINVAL))
- }
- }
- }
-
- fn handle_config_change(&mut self) -> HandlerResult<u64> {
- info!("Handle Config Change called");
- match &self.interrupt {
- Some(interrupt) => {
- interrupt.signal_config_changed();
- Ok(0)
- }
- None => {
- error!("cannot send interrupt");
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
- }
- }
-}
diff --git a/devices/src/virtio/vhost/user/vmm/handler/sys/linux.rs b/devices/src/virtio/vhost/user/vmm/handler/sys/linux.rs
deleted file mode 100644
index 5ebc0eb..0000000
--- a/devices/src/virtio/vhost/user/vmm/handler/sys/linux.rs
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2022 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use anyhow::bail;
-use anyhow::Context;
-use anyhow::Result;
-use base::info;
-use base::AsRawDescriptor;
-use base::SafeDescriptor;
-use cros_async::AsyncWrapper;
-use cros_async::Executor;
-use vmm_vhost::Error as VhostError;
-use vmm_vhost::MasterReqHandler;
-
-use crate::virtio::vhost::user::vmm::handler::BackendReqHandler;
-use crate::virtio::vhost::user::vmm::handler::BackendReqHandlerImpl;
-use crate::virtio::vhost::user::vmm::Error;
-use crate::virtio::vhost::user::vmm::Result as VhostResult;
-
-pub fn create_backend_req_handler(h: BackendReqHandlerImpl) -> VhostResult<BackendReqHandler> {
- let handler = MasterReqHandler::with_stream(h).map_err(Error::CreateBackendReqHandler)?;
- Ok(handler)
-}
-
-pub async fn run_backend_request_handler(
- handler: Option<BackendReqHandler>,
- ex: &Executor,
-) -> Result<()> {
- let mut handler = match handler {
- Some(h) => h,
- None => std::future::pending().await,
- };
-
- let h = SafeDescriptor::try_from(&handler as &dyn AsRawDescriptor)
- .map(AsyncWrapper::new)
- .context("failed to get safe descriptor for handler")?;
- let handler_source = ex
- .async_from(h)
- .context("failed to create an async source")?;
-
- loop {
- handler_source
- .wait_readable()
- .await
- .context("failed to wait for the handler to become readable")?;
- match handler.handle_request() {
- Ok(_) => (),
- Err(VhostError::ClientExit) => {
- info!("vhost-user connection closed");
- // Exit as the client closed the connection.
- return Ok(());
- }
- Err(e) => {
- bail!("failed to handle a vhost-user request: {}", e);
- }
- };
- }
-}
diff --git a/devices/src/virtio/vhost/user/vmm/handler/worker.rs b/devices/src/virtio/vhost/user/vmm/handler/worker.rs
deleted file mode 100644
index 7bfc686..0000000
--- a/devices/src/virtio/vhost/user/vmm/handler/worker.rs
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2021 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use base::Event;
-use cros_async::select4;
-use cros_async::EventAsync;
-use cros_async::Executor;
-use cros_async::SelectResult;
-use futures::pin_mut;
-use vm_memory::GuestMemory;
-
-use crate::virtio::async_utils;
-use crate::virtio::vhost::user::vmm::handler::sys::run_backend_request_handler;
-use crate::virtio::vhost::user::vmm::handler::BackendReqHandler;
-use crate::virtio::Interrupt;
-use crate::virtio::VIRTIO_MSI_NO_VECTOR;
-
-pub struct Worker {
- pub mem: GuestMemory,
- pub kill_evt: Event,
- pub non_msix_evt: Event,
- pub backend_req_handler: Option<BackendReqHandler>,
-}
-
-impl Worker {
- // Runs asynchronous tasks.
- pub fn run(&mut self, interrupt: Interrupt) -> Result<(), String> {
- let ex = Executor::new().expect("failed to create an executor");
-
- let non_msix_evt = self
- .non_msix_evt
- .try_clone()
- .expect("failed to clone non_msix_evt");
- let handle_non_msix_evt = handle_non_msix_evt(&ex, non_msix_evt, interrupt.clone());
- pin_mut!(handle_non_msix_evt);
-
- let resample = async_utils::handle_irq_resample(&ex, interrupt);
- pin_mut!(resample);
-
- let kill_evt = self.kill_evt.try_clone().expect("failed to clone kill_evt");
- let kill = async_utils::await_and_exit(&ex, kill_evt);
- pin_mut!(kill);
-
- let req_handler = run_backend_request_handler(self.backend_req_handler.take(), &ex);
- pin_mut!(req_handler);
-
- match ex.run_until(select4(handle_non_msix_evt, resample, kill, req_handler)) {
- Ok((non_msix_evt_result, resample_res, _, backend_result)) => {
- if let SelectResult::Finished(Err(e)) = non_msix_evt_result {
- return Err(format!("non msix event failure: {:#}", e));
- }
- if let SelectResult::Finished(Err(e)) = resample_res {
- return Err(format!("failed to resample a irq value: {:?}", e));
- }
- if let SelectResult::Finished(Err(e)) = backend_result {
- return Err(format!("backend request failure: {:#}", e));
- }
- Ok(())
- }
- Err(e) => Err(e.to_string()),
- }
- }
-}
-
-// The vhost-user protocol allows the backend to signal events, but for non-MSI-X devices,
-// a device must also update the interrupt status mask. `handle_non_msix_evt` proxies events
-// from the vhost-user backend to update the status mask.
-async fn handle_non_msix_evt(
- ex: &Executor,
- non_msix_evt: Event,
- interrupt: Interrupt,
-) -> Result<(), String> {
- let event_async =
- EventAsync::new(non_msix_evt, ex).expect("failed to create async non_msix_evt");
- loop {
- let _ = event_async.next_val().await;
- // The parameter vector of signal_used_queue is used only when msix is enabled.
- interrupt.signal_used_queue(VIRTIO_MSI_NO_VECTOR);
- }
-}
diff --git a/devices/src/virtio/vhost/user/vmm/virtio_device.rs b/devices/src/virtio/vhost/user/vmm/virtio_device.rs
deleted file mode 100644
index 53ea83b..0000000
--- a/devices/src/virtio/vhost/user/vmm/virtio_device.rs
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2022 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//! VirtioDevice implementation for the VMM side of a vhost-user connection.
-
-use std::cell::RefCell;
-use std::collections::BTreeMap;
-use std::sync::Arc;
-
-use anyhow::Context;
-use base::error;
-use base::trace;
-use base::Event;
-use base::RawDescriptor;
-use base::WorkerThread;
-use serde_json::Value;
-use sync::Mutex;
-use vm_memory::GuestMemory;
-use vmm_vhost::message::VhostUserProtocolFeatures;
-use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
-
-use crate::pci::MsixConfig;
-use crate::virtio::copy_config;
-use crate::virtio::device_constants::VIRTIO_DEVICE_TYPE_SPECIFIC_FEATURES_MASK;
-use crate::virtio::vhost::user::vmm::Connection;
-use crate::virtio::vhost::user::vmm::Result;
-use crate::virtio::vhost::user::vmm::VhostUserHandler;
-use crate::virtio::DeviceType;
-use crate::virtio::Interrupt;
-use crate::virtio::Queue;
-use crate::virtio::QueueConfig;
-use crate::virtio::SharedMemoryMapper;
-use crate::virtio::SharedMemoryRegion;
-use crate::virtio::VirtioDevice;
-use crate::PciAddress;
-
-pub struct VhostUserVirtioDevice {
- device_type: DeviceType,
- worker_thread: Option<WorkerThread<()>>,
- handler: RefCell<VhostUserHandler>,
- queue_sizes: Vec<u16>,
- cfg: Option<Vec<u8>>,
- expose_shmem_descriptors_with_viommu: bool,
- pci_address: Option<PciAddress>,
-}
-
-// Returns the largest power of two that is less than or equal to `val`.
-fn power_of_two_le(val: u16) -> Option<u16> {
- if val == 0 {
- None
- } else if val.is_power_of_two() {
- Some(val)
- } else {
- val.checked_next_power_of_two()
- .map(|next_pow_two| next_pow_two / 2)
- }
-}
-
-impl VhostUserVirtioDevice {
- /// Create a new VirtioDevice for a vhost-user device frontend.
- ///
- /// # Arguments
- ///
- /// - `device_type`: virtio device type
- /// - `base_features`: base virtio device features (e.g. `VIRTIO_F_VERSION_1`)
- /// - `connection`: connection to the device backend
- /// - `max_queue_size`: maximum number of entries in each queue (default: [`Queue::MAX_SIZE`])
- pub fn new(
- device_type: DeviceType,
- base_features: u64,
- connection: Connection,
- max_queue_size: Option<u16>,
- pci_address: Option<PciAddress>,
- ) -> Result<VhostUserVirtioDevice> {
- VhostUserVirtioDevice::new_internal(
- connection,
- device_type,
- max_queue_size,
- base_features,
- None, // cfg
- pci_address,
- )
- }
-
- /// Create a new VirtioDevice for a vhost-user device frontend.
- ///
- /// # Arguments
- ///
- /// - `connection`: connection to the device backend
- /// - `device_type`: virtio device type
- /// - `max_queue_size`: maximum number of entries in each queue (default: [`Queue::MAX_SIZE`])
- /// - `base_features`: base virtio device features (e.g. `VIRTIO_F_VERSION_1`)
- /// - `cfg`: bytes to return for the virtio configuration space (queried from device if not
- /// specified)
- pub(crate) fn new_internal(
- connection: Connection,
- device_type: DeviceType,
- max_queue_size: Option<u16>,
- base_features: u64,
- cfg: Option<&[u8]>,
- pci_address: Option<PciAddress>,
- ) -> Result<VhostUserVirtioDevice> {
- let allow_features = VIRTIO_DEVICE_TYPE_SPECIFIC_FEATURES_MASK
- | base_features
- | 1 << VHOST_USER_F_PROTOCOL_FEATURES;
-
- let mut allow_protocol_features = VhostUserProtocolFeatures::CONFIG
- | VhostUserProtocolFeatures::MQ
- | VhostUserProtocolFeatures::SLAVE_REQ;
-
- // HACK: the crosvm vhost-user GPU backend supports the non-standard
- // VHOST_USER_PROTOCOL_FEATURE_SHARED_MEMORY_REGIONS. This should either be standardized
- // (and enabled for all device types) or removed.
- let expose_shmem_descriptors_with_viommu = if device_type == DeviceType::Gpu {
- allow_protocol_features |= VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS;
- true
- } else {
- false
- };
-
- let handler = VhostUserHandler::new(connection, allow_features, allow_protocol_features)?;
-
- // If the device supports VHOST_USER_PROTOCOL_F_MQ, use VHOST_USER_GET_QUEUE_NUM to
- // determine the number of queues supported. Otherwise, use the minimum number of queues
- // required by the spec for this device type.
- let num_queues = handler
- .num_queues()?
- .unwrap_or_else(|| device_type.min_queues());
-
- // Clamp the maximum queue size to the largest power of 2 <= max_queue_size.
- let max_queue_size = max_queue_size
- .and_then(power_of_two_le)
- .unwrap_or(Queue::MAX_SIZE);
-
- trace!(
- "vhost-user {device_type} frontend with {num_queues} queues x {max_queue_size} entries\
- {}",
- if let Some(pci_address) = pci_address {
- format!(" pci-address {pci_address}")
- } else {
- "".to_string()
- }
- );
-
- let queue_sizes = vec![max_queue_size; num_queues];
-
- Ok(VhostUserVirtioDevice {
- device_type,
- worker_thread: None,
- handler: RefCell::new(handler),
- queue_sizes,
- cfg: cfg.map(|cfg| cfg.to_vec()),
- expose_shmem_descriptors_with_viommu,
- pci_address,
- })
- }
-}
-
-impl VirtioDevice for VhostUserVirtioDevice {
- fn keep_rds(&self) -> Vec<RawDescriptor> {
- Vec::new()
- }
-
- fn device_type(&self) -> DeviceType {
- self.device_type
- }
-
- fn queue_max_sizes(&self) -> &[u16] {
- &self.queue_sizes
- }
-
- fn features(&self) -> u64 {
- self.handler.borrow().avail_features
- }
-
- fn ack_features(&mut self, features: u64) {
- if let Err(e) = self.handler.borrow_mut().ack_features(features) {
- error!("failed to enable features 0x{:x}: {}", features, e);
- }
- }
-
- fn read_config(&self, offset: u64, data: &mut [u8]) {
- if let Some(cfg) = &self.cfg {
- copy_config(data, 0, cfg, offset);
- } else if let Err(e) = self.handler.borrow_mut().read_config(offset, data) {
- error!("failed to read config: {}", e);
- }
- }
-
- fn write_config(&mut self, offset: u64, data: &[u8]) {
- if let Err(e) = self.handler.borrow_mut().write_config(offset, data) {
- error!("failed to write config: {}", e);
- }
- }
-
- fn activate(
- &mut self,
- mem: GuestMemory,
- interrupt: Interrupt,
- queues: BTreeMap<usize, Queue>,
- ) -> anyhow::Result<()> {
- let worker_thread = self
- .handler
- .borrow_mut()
- .activate(mem, interrupt, queues, &format!("{}", self.device_type))
- .context("failed to activate queues")?;
- self.worker_thread = Some(worker_thread);
- Ok(())
- }
-
- fn reset(&mut self) -> bool {
- if let Err(e) = self.handler.borrow_mut().reset(self.queue_sizes.len()) {
- error!("Failed to reset device: {}", e);
- false
- } else {
- true
- }
- }
-
- fn pci_address(&self) -> Option<PciAddress> {
- self.pci_address
- }
-
- fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
- match self.handler.borrow_mut().get_shared_memory_region() {
- Ok(r) => r,
- Err(e) => {
- error!("Failed to get shared memory regions {}", e);
- None
- }
- }
- }
-
- fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
- if let Err(e) = self.handler.borrow_mut().set_shared_memory_mapper(mapper) {
- error!("Error setting shared memory mapper {}", e);
- }
- }
-
- fn expose_shmem_descriptors_with_viommu(&self) -> bool {
- self.expose_shmem_descriptors_with_viommu
- }
-
- fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
- self.handler
- .borrow_mut()
- .sleep()
- .context("Failed to sleep device.")?;
-
- // Vhost user devices won't return queues on sleep, so return an empty Vec so that
- // VirtioPciDevice can set the sleep state properly.
- Ok(Some(BTreeMap::new()))
- }
-
- fn virtio_wake(
- &mut self,
- // Vhost user doesn't need to pass queue_states back to the device process, since it will
- // already have it.
- _queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
- ) -> anyhow::Result<()> {
- self.handler
- .borrow_mut()
- .wake()
- .context("Failed to wake device.")
- }
-
- fn virtio_snapshot(&mut self) -> anyhow::Result<Value> {
- self.handler
- .borrow_mut()
- .snapshot()
- .context("failed to snapshot vu device")
- }
-
- fn virtio_restore(&mut self, _data: Value) -> anyhow::Result<()> {
- panic!("virtio_restore should not be called for vhost-user devices.")
- }
-
- fn is_vhost_user(&self) -> bool {
- true
- }
-
- fn vhost_user_restore(
- &mut self,
- data: Value,
- queue_configs: &[QueueConfig],
- queue_evts: Option<Vec<Event>>,
- interrupt: Option<Interrupt>,
- mem: GuestMemory,
- msix_config: &Arc<Mutex<MsixConfig>>,
- device_activated: bool,
- ) -> anyhow::Result<()> {
- // Other aspects of the restore operation will depend on the mem table
- // being set.
- self.handler.borrow_mut().set_mem_table(&mem)?;
-
- if device_activated {
- let non_msix_evt = Event::new().context("Failed to create event")?;
- queue_configs
- .iter()
- .enumerate()
- .filter(|(_, q)| q.ready())
- .try_for_each(|(queue_index, queue)| {
- let msix_lock = msix_config.lock();
- let irqfd = msix_lock
- .get_irqfd(queue.vector() as usize)
- .unwrap_or(&non_msix_evt);
-
- self.handler
- .borrow_mut()
- .restore_irqfd(queue_index, irqfd)
- .context("Failed to restore irqfd")?;
-
- Ok::<(), anyhow::Error>(())
- })?;
-
- anyhow::ensure!(
- self.worker_thread.is_none(),
- "self.worker_thread is some, but that should not be possible since only cold restore \
- is supported."
- );
- self.worker_thread = Some(
- self.handler
- .borrow_mut()
- .start_worker(
- interrupt.expect(
- "Interrupt doesn't exist. This shouldn't \
- happen since the device is activated.",
- ),
- &format!("{}", self.device_type),
- mem,
- non_msix_evt,
- )
- .context("Failed to start worker on restore.")?,
- );
- }
-
- Ok(self.handler.borrow_mut().restore(data, queue_evts)?)
- }
-}
diff --git a/devices/src/virtio/vhost/user/vmm/mod.rs b/devices/src/virtio/vhost_user_frontend/error.rs
similarity index 85%
rename from devices/src/virtio/vhost/user/vmm/mod.rs
rename to devices/src/virtio/vhost_user_frontend/error.rs
index 8aee196..1b2f5e9 100644
--- a/devices/src/virtio/vhost/user/vmm/mod.rs
+++ b/devices/src/virtio/vhost_user_frontend/error.rs
@@ -2,27 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-mod fs;
-mod handler;
-mod virtio_device;
-
use remain::sorted;
use thiserror::Error as ThisError;
-pub use virtio_device::VhostUserVirtioDevice;
use vm_memory::GuestMemoryError;
use vmm_vhost::message::VhostUserProtocolFeatures;
use vmm_vhost::Error as VhostError;
-pub use self::handler::VhostUserHandler;
-
-cfg_if::cfg_if! {
- if #[cfg(any(target_os = "android", target_os = "linux"))] {
- pub type Connection = std::os::unix::net::UnixStream;
- } else if #[cfg(windows)] {
- pub type Connection = base::Tube;
- }
-}
-
#[sorted]
#[derive(ThisError, Debug)]
pub enum Error {
@@ -53,12 +38,6 @@
/// Failed to get vring base offset.
#[error("failed to get vring base offset: {0}")]
GetVringBase(VhostError),
- /// Invalid config length is given.
- #[error("invalid config length is given: {0}")]
- InvalidConfigLen(usize),
- /// Invalid config offset is given.
- #[error("invalid config offset is given: {0}")]
- InvalidConfigOffset(u64),
/// MSI-X config is unavailable.
#[error("MSI-X config is unavailable")]
MsixConfigUnavailable,
@@ -127,9 +106,6 @@
/// Failed to connect socket.
#[error("failed to connect socket: {0}")]
SocketConnect(std::io::Error),
- /// Failed to create Master from a UDS path.
- #[error("failed to connect to device socket while creating instance: {0}")]
- SocketConnectOnMasterCreate(VhostError),
/// Failed to spawn worker thread.
#[error("failed to spawn worker: {0}")]
SpawnWorker(std::io::Error),
diff --git a/devices/src/virtio/vhost/user/vmm/fs.rs b/devices/src/virtio/vhost_user_frontend/fs.rs
similarity index 79%
rename from devices/src/virtio/vhost/user/vmm/fs.rs
rename to devices/src/virtio/vhost_user_frontend/fs.rs
index 4c32de9..e4ba05f 100644
--- a/devices/src/virtio/vhost/user/vmm/fs.rs
+++ b/devices/src/virtio/vhost_user_frontend/fs.rs
@@ -7,19 +7,18 @@
use zerocopy::AsBytes;
use crate::virtio::device_constants::fs::FS_MAX_TAG_LEN;
-use crate::virtio::vhost::user::vmm::Connection;
-use crate::virtio::vhost::user::vmm::Error;
-use crate::virtio::vhost::user::vmm::Result;
-use crate::virtio::vhost::user::vmm::VhostUserVirtioDevice;
+use crate::virtio::vhost_user_frontend::Error;
+use crate::virtio::vhost_user_frontend::Result;
+use crate::virtio::vhost_user_frontend::VhostUserFrontend;
use crate::virtio::DeviceType;
-impl VhostUserVirtioDevice {
+impl VhostUserFrontend {
pub fn new_fs(
base_features: u64,
- connection: Connection,
+ connection: vmm_vhost::SystemStream,
max_queue_size: Option<u16>,
tag: Option<&str>,
- ) -> Result<VhostUserVirtioDevice> {
+ ) -> Result<VhostUserFrontend> {
let cfg = if let Some(tag) = tag {
if tag.len() > FS_MAX_TAG_LEN {
return Err(Error::TagTooLong {
@@ -44,7 +43,7 @@
None
};
- VhostUserVirtioDevice::new_internal(
+ VhostUserFrontend::new_internal(
connection,
DeviceType::Fs,
max_queue_size,
diff --git a/devices/src/virtio/vhost_user_frontend/handler.rs b/devices/src/virtio/vhost_user_frontend/handler.rs
new file mode 100644
index 0000000..da02a58
--- /dev/null
+++ b/devices/src/virtio/vhost_user_frontend/handler.rs
@@ -0,0 +1,193 @@
+// Copyright 2021 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use base::error;
+use base::info;
+use base::AsRawDescriptor;
+use base::Protection;
+use base::SafeDescriptor;
+use hypervisor::MemCacheType;
+use vm_control::VmMemorySource;
+use vmm_vhost::message::VhostUserExternalMapMsg;
+use vmm_vhost::message::VhostUserGpuMapMsg;
+use vmm_vhost::message::VhostUserShmemMapMsg;
+use vmm_vhost::message::VhostUserShmemUnmapMsg;
+use vmm_vhost::Frontend;
+use vmm_vhost::FrontendServer;
+use vmm_vhost::HandlerResult;
+
+use crate::virtio::Interrupt;
+use crate::virtio::SharedMemoryMapper;
+
+pub(crate) type BackendReqHandler = FrontendServer<BackendReqHandlerImpl>;
+
+struct SharedMapperState {
+ mapper: Box<dyn SharedMemoryMapper>,
+ shmid: u8,
+}
+
+pub struct BackendReqHandlerImpl {
+ interrupt: Option<Interrupt>,
+ shared_mapper_state: Option<SharedMapperState>,
+}
+
+impl BackendReqHandlerImpl {
+ pub(crate) fn new() -> Self {
+ BackendReqHandlerImpl {
+ interrupt: None,
+ shared_mapper_state: None,
+ }
+ }
+
+ pub(crate) fn set_interrupt(&mut self, interrupt: Interrupt) {
+ self.interrupt = Some(interrupt);
+ }
+
+ pub(crate) fn set_shared_mapper_state(
+ &mut self,
+ mapper: Box<dyn SharedMemoryMapper>,
+ shmid: u8,
+ ) {
+ self.shared_mapper_state = Some(SharedMapperState { mapper, shmid });
+ }
+}
+
+impl Frontend for BackendReqHandlerImpl {
+ fn shmem_map(
+ &mut self,
+ req: &VhostUserShmemMapMsg,
+ fd: &dyn AsRawDescriptor,
+ ) -> HandlerResult<u64> {
+ let shared_mapper_state = self
+ .shared_mapper_state
+ .as_mut()
+ .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
+ if req.shmid != shared_mapper_state.shmid {
+ error!(
+ "bad shmid {}, expected {}",
+ req.shmid, shared_mapper_state.shmid
+ );
+ return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
+ }
+ match shared_mapper_state.mapper.add_mapping(
+ VmMemorySource::Descriptor {
+ descriptor: SafeDescriptor::try_from(fd)
+ .map_err(|_| std::io::Error::from_raw_os_error(libc::EIO))?,
+ offset: req.fd_offset,
+ size: req.len,
+ },
+ req.shm_offset,
+ Protection::from(req.flags),
+ MemCacheType::CacheCoherent,
+ ) {
+ Ok(()) => Ok(0),
+ Err(e) => {
+ error!("failed to create mapping {:?}", e);
+ Err(std::io::Error::from_raw_os_error(libc::EINVAL))
+ }
+ }
+ }
+
+ fn shmem_unmap(&mut self, req: &VhostUserShmemUnmapMsg) -> HandlerResult<u64> {
+ let shared_mapper_state = self
+ .shared_mapper_state
+ .as_mut()
+ .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
+ if req.shmid != shared_mapper_state.shmid {
+ error!(
+ "bad shmid {}, expected {}",
+ req.shmid, shared_mapper_state.shmid
+ );
+ return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
+ }
+ match shared_mapper_state.mapper.remove_mapping(req.shm_offset) {
+ Ok(()) => Ok(0),
+ Err(e) => {
+ error!("failed to remove mapping {:?}", e);
+ Err(std::io::Error::from_raw_os_error(libc::EINVAL))
+ }
+ }
+ }
+
+ fn gpu_map(
+ &mut self,
+ req: &VhostUserGpuMapMsg,
+ descriptor: &dyn AsRawDescriptor,
+ ) -> HandlerResult<u64> {
+ let shared_mapper_state = self
+ .shared_mapper_state
+ .as_mut()
+ .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
+ if req.shmid != shared_mapper_state.shmid {
+ error!(
+ "bad shmid {}, expected {}",
+ req.shmid, shared_mapper_state.shmid
+ );
+ return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
+ }
+ match shared_mapper_state.mapper.add_mapping(
+ VmMemorySource::Vulkan {
+ descriptor: SafeDescriptor::try_from(descriptor)
+ .map_err(|_| std::io::Error::from_raw_os_error(libc::EIO))?,
+ handle_type: req.handle_type,
+ memory_idx: req.memory_idx,
+ device_uuid: req.device_uuid,
+ driver_uuid: req.driver_uuid,
+ size: req.len,
+ },
+ req.shm_offset,
+ Protection::read_write(),
+ MemCacheType::CacheCoherent,
+ ) {
+ Ok(()) => Ok(0),
+ Err(e) => {
+ error!("failed to create mapping {:?}", e);
+ Err(std::io::Error::from_raw_os_error(libc::EINVAL))
+ }
+ }
+ }
+
+ fn external_map(&mut self, req: &VhostUserExternalMapMsg) -> HandlerResult<u64> {
+ let shared_mapper_state = self
+ .shared_mapper_state
+ .as_mut()
+ .ok_or_else(|| std::io::Error::from_raw_os_error(libc::EINVAL))?;
+ if req.shmid != shared_mapper_state.shmid {
+ error!(
+ "bad shmid {}, expected {}",
+ req.shmid, shared_mapper_state.shmid
+ );
+ return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
+ }
+ match shared_mapper_state.mapper.add_mapping(
+ VmMemorySource::ExternalMapping {
+ ptr: req.ptr,
+ size: req.len,
+ },
+ req.shm_offset,
+ Protection::read_write(),
+ MemCacheType::CacheCoherent,
+ ) {
+ Ok(()) => Ok(0),
+ Err(e) => {
+ error!("failed to create mapping {:?}", e);
+ Err(std::io::Error::from_raw_os_error(libc::EINVAL))
+ }
+ }
+ }
+
+ fn handle_config_change(&mut self) -> HandlerResult<u64> {
+ info!("Handle Config Change called");
+ match &self.interrupt {
+ Some(interrupt) => {
+ interrupt.signal_config_changed();
+ Ok(0)
+ }
+ None => {
+ error!("cannot send interrupt");
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+ }
+ }
+}
diff --git a/devices/src/virtio/vhost_user_frontend/mod.rs b/devices/src/virtio/vhost_user_frontend/mod.rs
new file mode 100644
index 0000000..37f7dd5
--- /dev/null
+++ b/devices/src/virtio/vhost_user_frontend/mod.rs
@@ -0,0 +1,631 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! VirtioDevice implementation for the VMM side of a vhost-user connection.
+
+mod error;
+mod fs;
+mod handler;
+mod sys;
+mod worker;
+
+use std::cell::RefCell;
+use std::collections::BTreeMap;
+use std::sync::Arc;
+
+use anyhow::Context;
+use base::error;
+use base::trace;
+use base::AsRawDescriptor;
+use base::Event;
+use base::RawDescriptor;
+use base::WorkerThread;
+use serde_json::Value;
+use sync::Mutex;
+use vm_memory::GuestMemory;
+use vmm_vhost::message::VhostUserConfigFlags;
+use vmm_vhost::message::VhostUserProtocolFeatures;
+use vmm_vhost::BackendClient;
+use vmm_vhost::VhostUserMemoryRegionInfo;
+use vmm_vhost::VringConfigData;
+use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
+
+use crate::pci::MsixConfig;
+use crate::virtio::copy_config;
+use crate::virtio::device_constants::VIRTIO_DEVICE_TYPE_SPECIFIC_FEATURES_MASK;
+use crate::virtio::vhost_user_frontend::error::Error;
+use crate::virtio::vhost_user_frontend::error::Result;
+use crate::virtio::vhost_user_frontend::handler::BackendReqHandler;
+use crate::virtio::vhost_user_frontend::handler::BackendReqHandlerImpl;
+use crate::virtio::vhost_user_frontend::sys::create_backend_req_handler;
+use crate::virtio::vhost_user_frontend::worker::Worker;
+use crate::virtio::DeviceType;
+use crate::virtio::Interrupt;
+use crate::virtio::Queue;
+use crate::virtio::QueueConfig;
+use crate::virtio::SharedMemoryMapper;
+use crate::virtio::SharedMemoryRegion;
+use crate::virtio::VirtioDevice;
+use crate::PciAddress;
+
+pub struct VhostUserFrontend {
+ device_type: DeviceType,
+ worker_thread: Option<WorkerThread<Option<BackendReqHandler>>>,
+
+ backend_client: BackendClient,
+ avail_features: u64,
+ acked_features: u64,
+ protocol_features: VhostUserProtocolFeatures,
+ // `backend_req_handler` is only present if the backend supports BACKEND_REQ. `worker_thread`
+ // takes ownership of `backend_req_handler` when it starts. The worker thread will always
+ // return ownershp of the handler when stopped.
+ backend_req_handler: Option<BackendReqHandler>,
+ // Shared memory region info. IPC result from backend is saved with outer Option.
+ shmem_region: RefCell<Option<Option<SharedMemoryRegion>>>,
+
+ queue_sizes: Vec<u16>,
+ cfg: Option<Vec<u8>>,
+ expose_shmem_descriptors_with_viommu: bool,
+ pci_address: Option<PciAddress>,
+}
+
+// Returns the largest power of two that is less than or equal to `val`.
+fn power_of_two_le(val: u16) -> Option<u16> {
+ if val == 0 {
+ None
+ } else if val.is_power_of_two() {
+ Some(val)
+ } else {
+ val.checked_next_power_of_two()
+ .map(|next_pow_two| next_pow_two / 2)
+ }
+}
+
+impl VhostUserFrontend {
+ /// Create a new VirtioDevice for a vhost-user device frontend.
+ ///
+ /// # Arguments
+ ///
+ /// - `device_type`: virtio device type
+ /// - `base_features`: base virtio device features (e.g. `VIRTIO_F_VERSION_1`)
+ /// - `connection`: connection to the device backend
+ /// - `max_queue_size`: maximum number of entries in each queue (default: [`Queue::MAX_SIZE`])
+ pub fn new(
+ device_type: DeviceType,
+ base_features: u64,
+ connection: vmm_vhost::SystemStream,
+ max_queue_size: Option<u16>,
+ pci_address: Option<PciAddress>,
+ ) -> Result<VhostUserFrontend> {
+ VhostUserFrontend::new_internal(
+ connection,
+ device_type,
+ max_queue_size,
+ base_features,
+ None, // cfg
+ pci_address,
+ )
+ }
+
+ /// Create a new VirtioDevice for a vhost-user device frontend.
+ ///
+ /// # Arguments
+ ///
+ /// - `connection`: connection to the device backend
+ /// - `device_type`: virtio device type
+ /// - `max_queue_size`: maximum number of entries in each queue (default: [`Queue::MAX_SIZE`])
+ /// - `base_features`: base virtio device features (e.g. `VIRTIO_F_VERSION_1`)
+ /// - `cfg`: bytes to return for the virtio configuration space (queried from device if not
+ /// specified)
+ pub(crate) fn new_internal(
+ connection: vmm_vhost::SystemStream,
+ device_type: DeviceType,
+ max_queue_size: Option<u16>,
+ mut base_features: u64,
+ cfg: Option<&[u8]>,
+ pci_address: Option<PciAddress>,
+ ) -> Result<VhostUserFrontend> {
+ // Don't allow packed queues even if requested. We don't handle them properly yet at the
+ // protocol layer.
+ // TODO: b/331466964 - Remove once packed queue support is added to BackendClient.
+ if base_features & (1 << virtio_sys::virtio_config::VIRTIO_F_RING_PACKED) != 0 {
+ base_features &= !(1 << virtio_sys::virtio_config::VIRTIO_F_RING_PACKED);
+ base::warn!(
+ "VIRTIO_F_RING_PACKED requested, but not yet supported by vhost-user frontend. \
+ Automatically disabled."
+ );
+ }
+
+ #[cfg(windows)]
+ let backend_pid = connection.target_pid();
+
+ let mut backend_client = BackendClient::from_stream(connection);
+
+ backend_client.set_owner().map_err(Error::SetOwner)?;
+
+ let allow_features = VIRTIO_DEVICE_TYPE_SPECIFIC_FEATURES_MASK
+ | base_features
+ | 1 << VHOST_USER_F_PROTOCOL_FEATURES;
+ let avail_features =
+ allow_features & backend_client.get_features().map_err(Error::GetFeatures)?;
+ let mut acked_features = 0;
+
+ let mut allow_protocol_features = VhostUserProtocolFeatures::CONFIG
+ | VhostUserProtocolFeatures::MQ
+ | VhostUserProtocolFeatures::BACKEND_REQ;
+
+ // HACK: the crosvm vhost-user GPU backend supports the non-standard
+ // VHOST_USER_PROTOCOL_FEATURE_SHARED_MEMORY_REGIONS. This should either be standardized
+ // (and enabled for all device types) or removed.
+ let expose_shmem_descriptors_with_viommu = if device_type == DeviceType::Gpu {
+ allow_protocol_features |= VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS;
+ true
+ } else {
+ false
+ };
+
+ let mut protocol_features = VhostUserProtocolFeatures::empty();
+ if avail_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
+ // The vhost-user backend supports VHOST_USER_F_PROTOCOL_FEATURES; enable it.
+ backend_client
+ .set_features(1 << VHOST_USER_F_PROTOCOL_FEATURES)
+ .map_err(Error::SetFeatures)?;
+ acked_features |= 1 << VHOST_USER_F_PROTOCOL_FEATURES;
+
+ let avail_protocol_features = backend_client
+ .get_protocol_features()
+ .map_err(Error::GetProtocolFeatures)?;
+ protocol_features = allow_protocol_features & avail_protocol_features;
+ backend_client
+ .set_protocol_features(protocol_features)
+ .map_err(Error::SetProtocolFeatures)?;
+ }
+
+ // if protocol feature `VhostUserProtocolFeatures::BACKEND_REQ` is negotiated.
+ let backend_req_handler =
+ if protocol_features.contains(VhostUserProtocolFeatures::BACKEND_REQ) {
+ let (handler, tx_fd) = create_backend_req_handler(
+ BackendReqHandlerImpl::new(),
+ #[cfg(windows)]
+ backend_pid,
+ )?;
+ backend_client
+ .set_backend_req_fd(&tx_fd)
+ .map_err(Error::SetDeviceRequestChannel)?;
+ Some(handler)
+ } else {
+ None
+ };
+
+ // If the device supports VHOST_USER_PROTOCOL_F_MQ, use VHOST_USER_GET_QUEUE_NUM to
+ // determine the number of queues supported. Otherwise, use the minimum number of queues
+ // required by the spec for this device type.
+ let num_queues = if protocol_features.contains(VhostUserProtocolFeatures::MQ) {
+ trace!("backend supports VHOST_USER_PROTOCOL_F_MQ");
+ let num_queues = backend_client.get_queue_num().map_err(Error::GetQueueNum)?;
+ trace!("VHOST_USER_GET_QUEUE_NUM returned {num_queues}");
+ num_queues as usize
+ } else {
+ trace!("backend does not support VHOST_USER_PROTOCOL_F_MQ");
+ device_type.min_queues()
+ };
+
+ // Clamp the maximum queue size to the largest power of 2 <= max_queue_size.
+ let max_queue_size = max_queue_size
+ .and_then(power_of_two_le)
+ .unwrap_or(Queue::MAX_SIZE);
+
+ trace!(
+ "vhost-user {device_type} frontend with {num_queues} queues x {max_queue_size} entries\
+ {}",
+ if let Some(pci_address) = pci_address {
+ format!(" pci-address {pci_address}")
+ } else {
+ "".to_string()
+ }
+ );
+
+ let queue_sizes = vec![max_queue_size; num_queues];
+
+ Ok(VhostUserFrontend {
+ device_type,
+ worker_thread: None,
+ backend_client,
+ avail_features,
+ acked_features,
+ protocol_features,
+ backend_req_handler,
+ shmem_region: RefCell::new(None),
+ queue_sizes,
+ cfg: cfg.map(|cfg| cfg.to_vec()),
+ expose_shmem_descriptors_with_viommu,
+ pci_address,
+ })
+ }
+
+ fn set_mem_table(&mut self, mem: &GuestMemory) -> Result<()> {
+ let regions: Vec<_> = mem
+ .regions()
+ .map(|region| VhostUserMemoryRegionInfo {
+ guest_phys_addr: region.guest_addr.0,
+ memory_size: region.size as u64,
+ userspace_addr: region.host_addr as u64,
+ mmap_offset: region.shm_offset,
+ mmap_handle: region.shm.as_raw_descriptor(),
+ })
+ .collect();
+
+ self.backend_client
+ .set_mem_table(regions.as_slice())
+ .map_err(Error::SetMemTable)?;
+
+ Ok(())
+ }
+
+ /// Activates a vring for the given `queue`.
+ fn activate_vring(
+ &mut self,
+ mem: &GuestMemory,
+ queue_index: usize,
+ queue: &Queue,
+ irqfd: &Event,
+ ) -> Result<()> {
+ self.backend_client
+ .set_vring_num(queue_index, queue.size())
+ .map_err(Error::SetVringNum)?;
+
+ let config_data = VringConfigData {
+ queue_size: queue.size(),
+ flags: 0u32,
+ desc_table_addr: mem
+ .get_host_address(queue.desc_table())
+ .map_err(Error::GetHostAddress)? as u64,
+ used_ring_addr: mem
+ .get_host_address(queue.used_ring())
+ .map_err(Error::GetHostAddress)? as u64,
+ avail_ring_addr: mem
+ .get_host_address(queue.avail_ring())
+ .map_err(Error::GetHostAddress)? as u64,
+ log_addr: None,
+ };
+ self.backend_client
+ .set_vring_addr(queue_index, &config_data)
+ .map_err(Error::SetVringAddr)?;
+
+ self.backend_client
+ .set_vring_base(queue_index, 0)
+ .map_err(Error::SetVringBase)?;
+
+ self.backend_client
+ .set_vring_call(queue_index, irqfd)
+ .map_err(Error::SetVringCall)?;
+ self.backend_client
+ .set_vring_kick(queue_index, queue.event())
+ .map_err(Error::SetVringKick)?;
+
+ // Per protocol documentation, `VHOST_USER_SET_VRING_ENABLE` should be sent only when
+ // `VHOST_USER_F_PROTOCOL_FEATURES` has been negotiated.
+ if self.acked_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
+ self.backend_client
+ .set_vring_enable(queue_index, true)
+ .map_err(Error::SetVringEnable)?;
+ }
+
+ Ok(())
+ }
+
+ /// Helper to start up the worker thread that will be used with handling interrupts and requests
+ /// from the device process.
+ fn start_worker(&mut self, interrupt: Interrupt, non_msix_evt: Event) {
+ assert!(
+ self.worker_thread.is_none(),
+ "BUG: attempted to start worker twice"
+ );
+
+ let label = format!("vhost_user_virtio_{}", self.device_type);
+
+ let mut backend_req_handler = self.backend_req_handler.take();
+ if let Some(handler) = &mut backend_req_handler {
+ // Using unwrap here to get the mutex protected value
+ handler.frontend_mut().set_interrupt(interrupt.clone());
+ }
+
+ self.worker_thread = Some(WorkerThread::start(label.clone(), move |kill_evt| {
+ let ex = cros_async::Executor::new().expect("failed to create an executor");
+ let ex2 = ex.clone();
+ ex.run_until(async {
+ let mut worker = Worker {
+ kill_evt,
+ non_msix_evt,
+ backend_req_handler,
+ };
+ if let Err(e) = worker.run(&ex2, interrupt).await {
+ error!("failed to run {} worker: {:#}", label, e);
+ }
+ worker.backend_req_handler
+ })
+ .expect("run_until failed")
+ }));
+ }
+}
+
+impl VirtioDevice for VhostUserFrontend {
+ fn keep_rds(&self) -> Vec<RawDescriptor> {
+ Vec::new()
+ }
+
+ fn device_type(&self) -> DeviceType {
+ self.device_type
+ }
+
+ fn queue_max_sizes(&self) -> &[u16] {
+ &self.queue_sizes
+ }
+
+ fn features(&self) -> u64 {
+ self.avail_features
+ }
+
+ fn ack_features(&mut self, features: u64) {
+ let features = (features & self.avail_features) | self.acked_features;
+ if let Err(e) = self
+ .backend_client
+ .set_features(features)
+ .map_err(Error::SetFeatures)
+ {
+ error!("failed to enable features 0x{:x}: {}", features, e);
+ return;
+ }
+ self.acked_features = features;
+ }
+
+ fn read_config(&self, offset: u64, data: &mut [u8]) {
+ if let Some(cfg) = &self.cfg {
+ copy_config(data, 0, cfg, offset);
+ return;
+ }
+
+ let Ok(offset) = offset.try_into() else {
+ error!("failed to read config: invalid config offset is given: {offset}");
+ return;
+ };
+ let Ok(data_len) = data.len().try_into() else {
+ error!(
+ "failed to read config: invalid config length is given: {}",
+ data.len()
+ );
+ return;
+ };
+ let (_, config) = match self.backend_client.get_config(
+ offset,
+ data_len,
+ VhostUserConfigFlags::WRITABLE,
+ data,
+ ) {
+ Ok(x) => x,
+ Err(e) => {
+ error!("failed to read config: {}", Error::GetConfig(e));
+ return;
+ }
+ };
+ data.copy_from_slice(&config);
+ }
+
+ fn write_config(&mut self, offset: u64, data: &[u8]) {
+ let Ok(offset) = offset.try_into() else {
+ error!("failed to write config: invalid config offset is given: {offset}");
+ return;
+ };
+ if let Err(e) = self
+ .backend_client
+ .set_config(offset, VhostUserConfigFlags::empty(), data)
+ .map_err(Error::SetConfig)
+ {
+ error!("failed to write config: {}", e);
+ }
+ }
+
+ fn activate(
+ &mut self,
+ mem: GuestMemory,
+ interrupt: Interrupt,
+ queues: BTreeMap<usize, Queue>,
+ ) -> anyhow::Result<()> {
+ self.set_mem_table(&mem)?;
+
+ let msix_config_opt = interrupt
+ .get_msix_config()
+ .as_ref()
+ .ok_or(Error::MsixConfigUnavailable)?;
+ let msix_config = msix_config_opt.lock();
+
+ let non_msix_evt = Event::new().map_err(Error::CreateEvent)?;
+ for (&queue_index, queue) in queues.iter() {
+ let irqfd = msix_config
+ .get_irqfd(queue.vector() as usize)
+ .unwrap_or(&non_msix_evt);
+ self.activate_vring(&mem, queue_index, queue, irqfd)?;
+ }
+
+ drop(msix_config);
+
+ self.start_worker(interrupt, non_msix_evt);
+ Ok(())
+ }
+
+ fn reset(&mut self) -> anyhow::Result<()> {
+ for queue_index in 0..self.queue_sizes.len() {
+ if self.acked_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
+ self.backend_client
+ .set_vring_enable(queue_index, false)
+ .context("set_vring_enable failed during reset")?;
+ }
+ let _vring_base = self
+ .backend_client
+ .get_vring_base(queue_index)
+ .context("get_vring_base failed during reset")?;
+ }
+
+ if let Some(w) = self.worker_thread.take() {
+ self.backend_req_handler = w.stop();
+ }
+
+ Ok(())
+ }
+
+ fn pci_address(&self) -> Option<PciAddress> {
+ self.pci_address
+ }
+
+ fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
+ if !self
+ .protocol_features
+ .contains(VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS)
+ {
+ return None;
+ }
+ if let Some(r) = self.shmem_region.borrow().as_ref() {
+ return r.clone();
+ }
+ let regions = match self
+ .backend_client
+ .get_shared_memory_regions()
+ .map_err(Error::ShmemRegions)
+ {
+ Ok(x) => x,
+ Err(e) => {
+ error!("Failed to get shared memory regions {}", e);
+ return None;
+ }
+ };
+ let region = match regions.len() {
+ 0 => None,
+ 1 => Some(SharedMemoryRegion {
+ id: regions[0].id,
+ length: regions[0].length,
+ }),
+ n => {
+ error!(
+ "Failed to get shared memory regions {}",
+ Error::TooManyShmemRegions(n)
+ );
+ return None;
+ }
+ };
+
+ *self.shmem_region.borrow_mut() = Some(region.clone());
+ region
+ }
+
+ fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
+ // Return error if backend request handler is not available. This indicates
+ // that `VhostUserProtocolFeatures::BACKEND_REQ` is not negotiated.
+ let Some(backend_req_handler) = self.backend_req_handler.as_mut() else {
+ error!(
+ "Error setting shared memory mapper {}",
+ Error::ProtocolFeatureNotNegoiated(VhostUserProtocolFeatures::BACKEND_REQ)
+ );
+ return;
+ };
+
+ // The virtio framework will only call this if get_shared_memory_region returned a region
+ let shmid = self
+ .shmem_region
+ .borrow()
+ .clone()
+ .flatten()
+ .expect("missing shmid")
+ .id;
+
+ backend_req_handler
+ .frontend_mut()
+ .set_shared_mapper_state(mapper, shmid);
+ }
+
+ fn expose_shmem_descriptors_with_viommu(&self) -> bool {
+ self.expose_shmem_descriptors_with_viommu
+ }
+
+ fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
+ self.backend_client.sleep().map_err(Error::Sleep)?;
+
+ // Vhost user devices won't return queues on sleep, so return an empty Vec so that
+ // VirtioPciDevice can set the sleep state properly.
+ Ok(Some(BTreeMap::new()))
+ }
+
+ fn virtio_wake(
+ &mut self,
+ // Vhost user doesn't need to pass queue_states back to the device process, since it will
+ // already have it.
+ _queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
+ ) -> anyhow::Result<()> {
+ self.backend_client.wake().map_err(Error::Wake)?;
+ Ok(())
+ }
+
+ fn virtio_snapshot(&mut self) -> anyhow::Result<Value> {
+ let snapshot_bytes = self.backend_client.snapshot().map_err(Error::Snapshot)?;
+ Ok(serde_json::to_value(snapshot_bytes).map_err(Error::SliceToSerdeValue)?)
+ }
+
+ fn virtio_restore(&mut self, _data: Value) -> anyhow::Result<()> {
+ panic!("virtio_restore should not be called for vhost-user devices.")
+ }
+
+ fn is_vhost_user(&self) -> bool {
+ true
+ }
+
+ fn vhost_user_restore(
+ &mut self,
+ data: Value,
+ queue_configs: &[QueueConfig],
+ queue_evts: Option<Vec<Event>>,
+ interrupt: Option<Interrupt>,
+ mem: GuestMemory,
+ msix_config: &Arc<Mutex<MsixConfig>>,
+ device_activated: bool,
+ ) -> anyhow::Result<()> {
+ // Other aspects of the restore operation will depend on the mem table
+ // being set.
+ self.set_mem_table(&mem)?;
+
+ if device_activated {
+ let non_msix_evt = Event::new().context("Failed to create event")?;
+ queue_configs
+ .iter()
+ .enumerate()
+ .filter(|(_, q)| q.ready())
+ .try_for_each(|(queue_index, queue)| {
+ let msix_lock = msix_config.lock();
+ let irqfd = msix_lock
+ .get_irqfd(queue.vector() as usize)
+ .unwrap_or(&non_msix_evt);
+
+ self.backend_client
+ .set_vring_call(queue_index, irqfd)
+ .map_err(Error::SetVringCall)
+ .context("Failed to restore irqfd")?;
+
+ Ok::<(), anyhow::Error>(())
+ })?;
+
+ self.start_worker(
+ interrupt.expect(
+ "Interrupt doesn't exist. This shouldn't \
+ happen since the device is activated.",
+ ),
+ non_msix_evt,
+ );
+ }
+
+ let data_bytes: Vec<u8> = serde_json::from_value(data).map_err(Error::SerdeValueToSlice)?;
+ self.backend_client
+ .restore(data_bytes.as_slice(), queue_evts)
+ .map_err(Error::Restore)?;
+
+ Ok(())
+ }
+}
diff --git a/devices/src/virtio/vhost/user/vmm/handler/sys.rs b/devices/src/virtio/vhost_user_frontend/sys.rs
similarity index 61%
rename from devices/src/virtio/vhost/user/vmm/handler/sys.rs
rename to devices/src/virtio/vhost_user_frontend/sys.rs
index c147da9..1fb85b2 100644
--- a/devices/src/virtio/vhost/user/vmm/handler/sys.rs
+++ b/devices/src/virtio/vhost_user_frontend/sys.rs
@@ -3,15 +3,14 @@
// found in the LICENSE file.
cfg_if::cfg_if! {
- if #[cfg(any(target_os = "android", target_os = "linux"))] {
- mod linux;
- pub(super) use self::linux::*;
- use linux as platform;
+ if #[cfg(unix)] {
+ mod unix;
+ use unix as platform;
} else if #[cfg(windows)] {
mod windows;
- pub(super) use self::windows::*;
use windows as platform;
}
}
+pub(in super::super) use platform::create_backend_req_handler;
pub(super) use platform::run_backend_request_handler;
diff --git a/devices/src/virtio/vhost_user_frontend/sys/unix.rs b/devices/src/virtio/vhost_user_frontend/sys/unix.rs
new file mode 100644
index 0000000..5d18de3
--- /dev/null
+++ b/devices/src/virtio/vhost_user_frontend/sys/unix.rs
@@ -0,0 +1,69 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::pin::pin;
+
+use anyhow::bail;
+use anyhow::Context;
+use anyhow::Result;
+use base::info;
+use base::AsRawDescriptor;
+use base::SafeDescriptor;
+use cros_async::AsyncWrapper;
+use cros_async::Executor;
+use futures::channel::oneshot;
+use futures::future::FutureExt;
+use vmm_vhost::Error as VhostError;
+use vmm_vhost::FrontendServer;
+
+use crate::virtio::vhost_user_frontend::handler::BackendReqHandler;
+use crate::virtio::vhost_user_frontend::handler::BackendReqHandlerImpl;
+use crate::virtio::vhost_user_frontend::Error;
+use crate::virtio::vhost_user_frontend::Result as VhostResult;
+
+pub fn create_backend_req_handler(
+ h: BackendReqHandlerImpl,
+) -> VhostResult<(BackendReqHandler, SafeDescriptor)> {
+ FrontendServer::with_stream(h).map_err(Error::CreateBackendReqHandler)
+}
+
+/// Process requests from the backend.
+///
+/// If `stop_rx` is sent a value, the function will exit at a well defined point so that
+/// `run_backend_request_handler` can be re-invoked to resume processing the connection.
+pub async fn run_backend_request_handler(
+ ex: &Executor,
+ handler: &mut BackendReqHandler,
+ mut stop_rx: oneshot::Receiver<()>,
+) -> Result<()> {
+ let h = SafeDescriptor::try_from(handler as &dyn AsRawDescriptor)
+ .map(AsyncWrapper::new)
+ .context("failed to get safe descriptor for handler")?;
+ let handler_source = ex
+ .async_from(h)
+ .context("failed to create an async source")?;
+
+ let mut wait_readable_future = pin!(handler_source.wait_readable().fuse());
+
+ loop {
+ futures::select_biased! {
+ _ = stop_rx => return Ok(()),
+ r = wait_readable_future => {
+ r.context("failed to wait for the handler to become readable")?;
+ match handler.handle_request() {
+ Ok(_) => (),
+ Err(VhostError::ClientExit) => {
+ info!("vhost-user connection closed");
+ // Exit as the client closed the connection.
+ return Ok(());
+ }
+ Err(e) => {
+ bail!("failed to handle a vhost-user request: {}", e);
+ }
+ };
+ wait_readable_future.set(handler_source.wait_readable().fuse());
+ }
+ };
+ }
+}
diff --git a/devices/src/virtio/vhost/user/vmm/handler/sys/windows.rs b/devices/src/virtio/vhost_user_frontend/sys/windows.rs
similarity index 66%
rename from devices/src/virtio/vhost/user/vmm/handler/sys/windows.rs
rename to devices/src/virtio/vhost_user_frontend/sys/windows.rs
index 3432d4c..fa3fc7c 100644
--- a/devices/src/virtio/vhost/user/vmm/handler/sys/windows.rs
+++ b/devices/src/virtio/vhost_user_frontend/sys/windows.rs
@@ -7,42 +7,38 @@
use base::info;
use base::CloseNotifier;
use base::ReadNotifier;
+use base::SafeDescriptor;
use base::Tube;
use cros_async::EventAsync;
use cros_async::Executor;
+use futures::channel::oneshot;
use futures::pin_mut;
-use futures::select;
+use futures::select_biased;
use futures::FutureExt;
-use vmm_vhost::connection::TubePlatformConnection;
-use vmm_vhost::message::MasterReq;
use vmm_vhost::message::VhostUserProtocolFeatures;
-use vmm_vhost::Master;
-use vmm_vhost::MasterReqHandler;
-use crate::virtio::vhost::user::vmm::handler::BackendReqHandler;
-use crate::virtio::vhost::user::vmm::handler::BackendReqHandlerImpl;
-use crate::virtio::vhost::user::vmm::Error;
-use crate::virtio::vhost::user::vmm::Result as VhostResult;
+use crate::virtio::vhost_user_frontend::handler::BackendReqHandler;
+use crate::virtio::vhost_user_frontend::handler::BackendReqHandlerImpl;
+use crate::virtio::vhost_user_frontend::Error;
+use crate::virtio::vhost_user_frontend::Result as VhostResult;
pub fn create_backend_req_handler(
h: BackendReqHandlerImpl,
backend_pid: Option<u32>,
-) -> VhostResult<BackendReqHandler> {
+) -> VhostResult<(BackendReqHandler, SafeDescriptor)> {
let backend_pid = backend_pid.expect("tube needs target pid for backend requests");
- let mut handler =
- MasterReqHandler::with_tube(h, backend_pid).map_err(Error::CreateBackendReqHandler)?;
- Ok(handler)
+ vmm_vhost::FrontendServer::with_tube(h, backend_pid).map_err(Error::CreateBackendReqHandler)
}
+/// Process requests from the backend.
+///
+/// If `stop_rx` is sent a value, the function will exit at a well defined point so that
+/// `run_backend_request_handler` can be re-invoked to resume processing the connection.
pub async fn run_backend_request_handler(
- handler: Option<BackendReqHandler>,
ex: &Executor,
+ handler: &mut BackendReqHandler,
+ mut stop_rx: oneshot::Receiver<()>,
) -> Result<()> {
- let mut handler = match handler {
- Some(h) => h,
- None => std::future::pending().await,
- };
-
let read_notifier = handler.get_read_notifier();
let close_notifier = handler.get_close_notifier();
@@ -57,7 +53,8 @@
pin_mut!(close_event_fut);
loop {
- select! {
+ select_biased! {
+ _ = stop_rx => return Ok(()),
_read_res = read_event_fut => {
handler
.handle_request()
diff --git a/devices/src/virtio/vhost_user_frontend/worker.rs b/devices/src/virtio/vhost_user_frontend/worker.rs
new file mode 100644
index 0000000..4412e07
--- /dev/null
+++ b/devices/src/virtio/vhost_user_frontend/worker.rs
@@ -0,0 +1,85 @@
+// Copyright 2021 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::pin::pin;
+
+use anyhow::Context;
+use base::Event;
+use cros_async::EventAsync;
+use cros_async::Executor;
+use futures::channel::oneshot;
+use futures::select_biased;
+use futures::FutureExt;
+
+use crate::virtio::async_utils;
+use crate::virtio::vhost_user_frontend::handler::BackendReqHandler;
+use crate::virtio::vhost_user_frontend::sys::run_backend_request_handler;
+use crate::virtio::Interrupt;
+use crate::virtio::VIRTIO_MSI_NO_VECTOR;
+
+pub struct Worker {
+ pub kill_evt: Event,
+ pub non_msix_evt: Event,
+ pub backend_req_handler: Option<BackendReqHandler>,
+}
+
+impl Worker {
+ // Runs asynchronous tasks.
+ pub async fn run(&mut self, ex: &Executor, interrupt: Interrupt) -> anyhow::Result<()> {
+ let non_msix_evt = self
+ .non_msix_evt
+ .try_clone()
+ .expect("failed to clone non_msix_evt");
+ let mut handle_non_msix_evt =
+ pin!(handle_non_msix_evt(ex, non_msix_evt, interrupt.clone()).fuse());
+
+ let mut resample = pin!(async_utils::handle_irq_resample(ex, interrupt).fuse());
+
+ let kill_evt = self.kill_evt.try_clone().expect("failed to clone kill_evt");
+ let mut kill = pin!(async_utils::await_and_exit(ex, kill_evt).fuse());
+
+ let (stop_tx, stop_rx) = oneshot::channel();
+ let mut req_handler = pin!(if let Some(backend_req_handler) =
+ self.backend_req_handler.as_mut()
+ {
+ run_backend_request_handler(ex, backend_req_handler, stop_rx)
+ .fuse()
+ .left_future()
+ } else {
+ stop_rx.map(|_| Ok(())).right_future()
+ }
+ .fuse());
+
+ select_biased! {
+ r = kill => {
+ r.context("failed to wait on the kill event")?;
+ // Stop req_handler cooperatively.
+ let _ = stop_tx.send(());
+ req_handler.await.context("backend request failure on stop")?;
+ }
+ r = handle_non_msix_evt => r.context("non msix event failure")?,
+ r = resample => r.context("failed to resample a irq value")?,
+ r = req_handler => r.context("backend request failure")?,
+ }
+
+ Ok(())
+ }
+}
+
+// The vhost-user protocol allows the backend to signal events, but for non-MSI-X devices,
+// a device must also update the interrupt status mask. `handle_non_msix_evt` proxies events
+// from the vhost-user backend to update the status mask.
+async fn handle_non_msix_evt(
+ ex: &Executor,
+ non_msix_evt: Event,
+ interrupt: Interrupt,
+) -> anyhow::Result<()> {
+ let event_async =
+ EventAsync::new(non_msix_evt, ex).expect("failed to create async non_msix_evt");
+ loop {
+ let _ = event_async.next_val().await;
+ // The parameter vector of signal_used_queue is used only when msix is enabled.
+ interrupt.signal_used_queue(VIRTIO_MSI_NO_VECTOR);
+ }
+}
diff --git a/devices/src/virtio/video/decoder/backend/mod.rs b/devices/src/virtio/video/decoder/backend/mod.rs
index 6e80f76..cf8236e 100644
--- a/devices/src/virtio/video/decoder/backend/mod.rs
+++ b/devices/src/virtio/video/decoder/backend/mod.rs
@@ -153,10 +153,8 @@
mod tests {
use std::time::Duration;
- use base::FromRawDescriptor;
use base::MappedRegion;
use base::MemoryMappingBuilder;
- use base::SafeDescriptor;
use base::SharedMemory;
use base::WaitContext;
@@ -233,11 +231,7 @@
#[allow(dead_code)]
pub fn build_object_handle(mem: &SharedMemory) -> GuestResourceHandle {
GuestResourceHandle::VirtioObject(VirtioObjectHandle {
- // SAFETY:
- // Safe because we are taking ownership of a just-duplicated FD.
- desc: unsafe {
- SafeDescriptor::from_raw_descriptor(base::clone_descriptor(mem).unwrap())
- },
+ desc: base::clone_descriptor(mem).unwrap(),
modifier: 0,
})
}
@@ -247,11 +241,7 @@
#[allow(dead_code)]
pub fn build_guest_mem_handle(mem: &SharedMemory) -> GuestResourceHandle {
GuestResourceHandle::GuestPages(GuestMemHandle {
- // SAFETY:
- // Safe because we are taking ownership of a just-duplicated FD.
- desc: unsafe {
- SafeDescriptor::from_raw_descriptor(base::clone_descriptor(mem).unwrap())
- },
+ desc: base::clone_descriptor(mem).unwrap(),
mem_areas: vec![GuestMemArea {
offset: 0,
length: mem.size() as usize,
diff --git a/devices/src/virtio/video/resource.rs b/devices/src/virtio/video/resource.rs
index 00202ab..4d5192a 100644
--- a/devices/src/virtio/video/resource.rs
+++ b/devices/src/virtio/video/resource.rs
@@ -232,11 +232,8 @@
let guest_region = mem
.shm_region(GuestAddress(addr))
.map_err(GuestMemResourceCreationError::CantGetShmRegion)?;
- let desc = base::clone_descriptor(guest_region)
- .map_err(GuestMemResourceCreationError::DescriptorCloneError)?;
- // SAFETY:
- // Safe because we are the sole owner of the duplicated descriptor.
- unsafe { SafeDescriptor::from_raw_descriptor(desc) }
+ base::clone_descriptor(guest_region)
+ .map_err(GuestMemResourceCreationError::DescriptorCloneError)?
}
};
@@ -377,7 +374,6 @@
#[cfg(test)]
mod tests {
use base::MappedRegion;
- use base::SafeDescriptor;
use base::SharedMemory;
use super::*;
@@ -415,10 +411,7 @@
// Create the `GuestMemHandle` we will try to map and retrieve the data from.
let mem_handle = GuestResourceHandle::GuestPages(GuestMemHandle {
- // SAFETY: descriptor is expected to be valid
- desc: unsafe {
- SafeDescriptor::from_raw_descriptor(base::clone_descriptor(&mem).unwrap())
- },
+ desc: base::clone_descriptor(&mem).unwrap(),
mem_areas: page_order
.iter()
.map(|&page| GuestMemArea {
diff --git a/devices/src/virtio/video/worker.rs b/devices/src/virtio/video/worker.rs
index 32bd15f..aa40ef8 100644
--- a/devices/src/virtio/video/worker.rs
+++ b/devices/src/virtio/video/worker.rs
@@ -11,8 +11,6 @@
use base::error;
use base::info;
use base::Event;
-use base::FromRawDescriptor;
-use base::SafeDescriptor;
use base::WaitContext;
use cros_async::select3;
use cros_async::AsyncWrapper;
@@ -403,12 +401,7 @@
let device_wait_ctx = WaitContext::new().map_err(Error::WaitContextCreationFailed)?;
let device_evt = ex
.async_from(AsyncWrapper::new(
- clone_descriptor(&device_wait_ctx)
- .map(|fd|
- // SAFETY:
- // Safe because we just created this fd.
- unsafe { SafeDescriptor::from_raw_descriptor(fd) })
- .map_err(Error::CloneDescriptorFailed)?,
+ clone_descriptor(&device_wait_ctx).map_err(Error::CloneDescriptorFailed)?,
))
.map_err(Error::EventAsyncCreationFailed)?;
diff --git a/devices/src/virtio/virtio_device.rs b/devices/src/virtio/virtio_device.rs
index 6d7d4bc..295a050 100644
--- a/devices/src/virtio/virtio_device.rs
+++ b/devices/src/virtio/virtio_device.rs
@@ -7,6 +7,7 @@
#[cfg(target_arch = "x86_64")]
use acpi_tables::sdt::SDT;
+use anyhow::anyhow;
use anyhow::Result;
use base::Event;
use base::Protection;
@@ -119,10 +120,10 @@
/// Optionally deactivates this device. If the reset method is
/// not able to reset the virtio device, or the virtio device model doesn't
- /// implement the reset method, a false value is returned to indicate
- /// the reset is not successful. Otherwise a true value should be returned.
- fn reset(&mut self) -> bool {
- false
+ /// implement the reset method, an `Err` value is returned to indicate
+ /// the reset is not successful. Otherwise `Ok(())` should be returned.
+ fn reset(&mut self) -> Result<()> {
+ Err(anyhow!("reset not implemented for {}", self.debug_label()))
}
/// Returns any additional BAR configuration required by the device.
@@ -321,27 +322,12 @@
}
#[test]
- fn test_sleep_snapshot() {
+ fn test_unactivated_sleep_snapshot_wake() {
let (_ctx, mut device) = $dev();
- let mem = memory();
- let interrupt = interrupt();
- let queues = create_queues(
- $num_queues,
- device
- .queue_max_sizes()
- .first()
- .cloned()
- .expect("missing queue size"),
- &mem,
- );
- device
- .activate(mem.clone(), interrupt.clone(), queues)
- .expect("failed to activate");
- device
- .virtio_sleep()
- .expect("failed to sleep")
- .expect("missing queues while sleeping");
+ let sleep_result = device.virtio_sleep().expect("failed to sleep");
+ assert!(sleep_result.is_none());
device.virtio_snapshot().expect("failed to snapshot");
+ device.virtio_wake(None).expect("failed to wake");
}
#[test]
diff --git a/devices/src/virtio/virtio_mmio_device.rs b/devices/src/virtio/virtio_mmio_device.rs
index b3c1350..02f68e4 100644
--- a/devices/src/virtio/virtio_mmio_device.rs
+++ b/devices/src/virtio/virtio_mmio_device.rs
@@ -332,14 +332,18 @@
}
// Device has been reset by the driver
- if self.device_activated && self.is_reset_requested() && self.device.reset() {
- self.device_activated = false;
- // reset queues
- self.queues.iter_mut().for_each(QueueConfig::reset);
- // select queue 0 by default
- self.queue_select = 0;
- // reset interrupt
- self.interrupt = None;
+ if self.device_activated && self.is_reset_requested() {
+ if let Err(e) = self.device.reset() {
+ error!("failed to reset {} device: {:#}", self.debug_label(), e);
+ } else {
+ self.device_activated = false;
+ // reset queues
+ self.queues.iter_mut().for_each(QueueConfig::reset);
+ // select queue 0 by default
+ self.queue_select = 0;
+ // reset interrupt
+ self.interrupt = None;
+ }
}
}
diff --git a/devices/src/virtio/virtio_pci_device.rs b/devices/src/virtio/virtio_pci_device.rs
index 49cb9f1..7dbbc74 100644
--- a/devices/src/virtio/virtio_pci_device.rs
+++ b/devices/src/virtio/virtio_pci_device.rs
@@ -23,6 +23,8 @@
use hypervisor::Datamatch;
use hypervisor::MemCacheType;
use libc::ERANGE;
+#[cfg(target_arch = "x86_64")]
+use metrics::MetricEventType;
use resources::Alloc;
use resources::AllocOptions;
use resources::SystemAllocator;
@@ -548,7 +550,9 @@
Some(PmWakeupEvent::new(
self.vm_control_tube.clone(),
self.pm_config.clone(),
- self.device.debug_label(),
+ MetricEventType::VirtioWakeup {
+ virtio_id: self.device.device_type() as u32,
+ },
)),
);
self.interrupt = Some(interrupt.clone());
@@ -900,14 +904,18 @@
}
// Device has been reset by the driver
- if self.device_activated && self.is_reset_requested() && self.device.reset() {
- self.device_activated = false;
- // reset queues
- self.queues.iter_mut().for_each(QueueConfig::reset);
- // select queue 0 by default
- self.common_config.queue_select = 0;
- if let Err(e) = self.unregister_ioevents() {
- error!("failed to unregister ioevents: {:#}", e);
+ if self.device_activated && self.is_reset_requested() {
+ if let Err(e) = self.device.reset() {
+ error!("failed to reset {} device: {:#}", self.debug_label(), e);
+ } else {
+ self.device_activated = false;
+ // reset queues
+ self.queues.iter_mut().for_each(QueueConfig::reset);
+ // select queue 0 by default
+ self.common_config.queue_select = 0;
+ if let Err(e) = self.unregister_ioevents() {
+ error!("failed to unregister ioevents: {:#}", e);
+ }
}
}
}
@@ -1308,7 +1316,9 @@
Some(PmWakeupEvent::new(
self.vm_control_tube.clone(),
self.pm_config.clone(),
- self.device.debug_label(),
+ MetricEventType::VirtioWakeup {
+ virtio_id: self.device.device_type() as u32,
+ },
)),
));
}
diff --git a/devices/src/virtio/wl.rs b/devices/src/virtio/wl.rs
index 22a9518..8f1e0d4 100644
--- a/devices/src/virtio/wl.rs
+++ b/devices/src/virtio/wl.rs
@@ -75,6 +75,10 @@
use base::FromRawDescriptor;
#[cfg(feature = "gpu")]
use base::IntoRawDescriptor;
+#[cfg(feature = "minigbm")]
+use base::MemoryMappingBuilder;
+#[cfg(feature = "minigbm")]
+use base::MmapError;
use base::Protection;
use base::RawDescriptor;
use base::Result;
@@ -93,6 +97,8 @@
use libc::EBADF;
#[cfg(feature = "minigbm")]
use libc::EINVAL;
+#[cfg(feature = "minigbm")]
+use libc::ENOSYS;
use remain::sorted;
use resources::address_allocator::AddressAllocator;
use resources::AddressRange;
@@ -113,6 +119,10 @@
use rutabaga_gfx::RutabagaGrallocFlags;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaIntoRawDescriptor;
+#[cfg(feature = "minigbm")]
+use rutabaga_gfx::RUTABAGA_MAP_CACHE_CACHED;
+#[cfg(feature = "minigbm")]
+use rutabaga_gfx::RUTABAGA_MAP_CACHE_MASK;
use thiserror::Error as ThisError;
use vm_control::VmMemorySource;
use vm_memory::GuestAddress;
@@ -190,6 +200,10 @@
#[cfg(feature = "minigbm")]
const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
+#[cfg(feature = "minigbm")]
+const DMA_BUF_SYNC_WRITE: c_uint = 0x2;
+#[cfg(feature = "minigbm")]
+const DMA_BUF_SYNC_END: c_uint = 0x4;
#[cfg(feature = "minigbm")]
#[repr(C)]
@@ -797,6 +811,8 @@
slot: Option<(u64 /* offset */, VmRequester)>,
#[cfg(feature = "minigbm")]
is_dmabuf: bool,
+ #[cfg(feature = "minigbm")]
+ map_info: u32,
fence: Option<File>,
is_fence: bool,
}
@@ -820,6 +836,25 @@
}
}
+#[cfg(feature = "minigbm")]
+fn flush_shared_memory(shared_memory: &SharedMemory) -> Result<()> {
+ let mmap = match MemoryMappingBuilder::new(shared_memory.size as usize)
+ .from_shared_memory(shared_memory)
+ .build()
+ {
+ Ok(v) => v,
+ Err(_) => return Err(Error::new(EINVAL)),
+ };
+ if let Err(err) = mmap.flush_all() {
+ base::error!("failed to flush shared memory: {}", err);
+ return match err {
+ MmapError::NotImplemented(_) => Err(Error::new(ENOSYS)),
+ _ => Err(Error::new(EINVAL)),
+ };
+ }
+ Ok(())
+}
+
impl WlVfd {
fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
@@ -864,6 +899,7 @@
vfd.guest_shared_memory = Some(vfd_shm);
vfd.slot = Some((offset, vm));
vfd.is_dmabuf = true;
+ vfd.map_info = reqs.map_info;
Ok((vfd, desc))
}
@@ -881,10 +917,25 @@
// SAFETY:
// Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC(), &sync) } < 0 {
- Err(WlError::DmabufSync(io::Error::last_os_error()))
- } else {
- Ok(())
+ return Err(WlError::DmabufSync(io::Error::last_os_error()));
}
+
+ // virtio-wl kernel driver always maps dmabufs with WB memory type, regardless of
+ // the host memory type (which is wrong). However, to avoid changing the protocol,
+ // assume that all guest writes are cached and ensure clflush-like ops on all mapped
+ // cachelines if the host mapping is not cached.
+ const END_WRITE_MASK: u32 = DMA_BUF_SYNC_WRITE | DMA_BUF_SYNC_END;
+ if (flags & END_WRITE_MASK) == END_WRITE_MASK
+ && (self.map_info & RUTABAGA_MAP_CACHE_MASK) != RUTABAGA_MAP_CACHE_CACHED
+ {
+ if let Err(err) = flush_shared_memory(descriptor) {
+ base::warn!("failed to flush cached dmabuf mapping: {:?}", err);
+ return Err(WlError::DmabufSync(io::Error::from_raw_os_error(
+ err.errno(),
+ )));
+ }
+ }
+ Ok(())
}
None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
}
diff --git a/devices/src/vmwdt.rs b/devices/src/vmwdt.rs
index cf1a374..25eb67b 100644
--- a/devices/src/vmwdt.rs
+++ b/devices/src/vmwdt.rs
@@ -343,6 +343,7 @@
mod tests {
use std::thread::sleep;
+ use base::poll_assert;
use base::Tube;
use super::*;
@@ -378,14 +379,13 @@
vmwdt_locked[0].next_expiration_interval_ms
};
- sleep(Duration::from_millis(100));
-
- // Verify that our timer expired and the next_expiration_interval_ms changed
- let vmwdt_locked = device.vm_wdts.lock();
- assert_eq!(
- vmwdt_locked[0].next_expiration_interval_ms != next_expiration_ms,
- true
- );
+ // Poll multiple times as we don't get a signal when the watchdog thread has run.
+ poll_assert!(10, || {
+ sleep(Duration::from_millis(50));
+ let vmwdt_locked = device.vm_wdts.lock();
+ // Verify that our timer expired and the next_expiration_interval_ms changed
+ vmwdt_locked[0].next_expiration_interval_ms != next_expiration_ms
+ });
}
#[test]
@@ -404,16 +404,13 @@
// the function get_guest_time() returns 0
device.vm_wdts.lock()[0].last_guest_time_ms = -100;
- sleep(Duration::from_millis(100));
-
- // Verify that our timer expired and the next_expiration_interval_ms changed
- match vm_evt_rdtube.recv::<VmEventType>() {
- Ok(vm_event) => {
- assert!(vm_event == VmEventType::WatchdogReset);
+ // Poll multiple times as we don't get a signal when the watchdog thread has run.
+ poll_assert!(10, || {
+ sleep(Duration::from_millis(50));
+ match vm_evt_rdtube.recv::<VmEventType>() {
+ Ok(vm_event) => vm_event == VmEventType::WatchdogReset,
+ Err(_e) => false,
}
- Err(_e) => {
- panic!();
- }
- };
+ });
}
}
diff --git a/devices/tests/irqchip/userspace.rs b/devices/tests/irqchip/userspace.rs
index c4599bf..7f521f6 100644
--- a/devices/tests/irqchip/userspace.rs
+++ b/devices/tests/irqchip/userspace.rs
@@ -4,6 +4,7 @@
#![cfg(target_arch = "x86_64")]
+use std::collections::BTreeMap;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
@@ -46,7 +47,6 @@
use hypervisor::Level;
use hypervisor::PicSelect;
use hypervisor::PitRWMode;
-use hypervisor::Register;
use hypervisor::Regs;
use hypervisor::Sregs;
use hypervisor::TriggerMode;
@@ -752,19 +752,19 @@
fn set_debugregs(&self, _debugregs: &DebugRegs) -> Result<()> {
unimplemented!()
}
- fn get_xcrs(&self) -> Result<Vec<Register>> {
+ fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
unimplemented!()
}
- fn set_xcrs(&self, _xcrs: &[Register]) -> Result<()> {
+ fn set_xcr(&self, _xcr_index: u32, _value: u64) -> Result<()> {
unimplemented!()
}
- fn get_msrs(&self, _msrs: &mut Vec<Register>) -> Result<()> {
+ fn get_msr(&self, _msr_index: u32) -> Result<u64> {
unimplemented!()
}
- fn get_all_msrs(&self) -> Result<Vec<Register>> {
+ fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
unimplemented!()
}
- fn set_msrs(&self, _msrs: &[Register]) -> Result<()> {
+ fn set_msr(&self, _msr_index: u32, _value: u64) -> Result<()> {
unimplemented!()
}
fn set_cpuid(&self, _cpuid: &CpuId) -> Result<()> {
@@ -779,15 +779,6 @@
fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
unimplemented!()
}
- fn get_tsc_offset(&self) -> Result<u64> {
- unimplemented!()
- }
- fn set_tsc_offset(&self, _offset: u64) -> Result<()> {
- unimplemented!()
- }
- fn set_tsc_value(&self, _value: u64) -> Result<()> {
- unimplemented!()
- }
fn snapshot(&self) -> anyhow::Result<VcpuSnapshot> {
unimplemented!()
}
diff --git a/disk/Android.bp b/disk/Android.bp
index 4cbe5b3..4e4b37f 100644
--- a/disk/Android.bp
+++ b/disk/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/disk/src/android_sparse.rs b/disk/src/android_sparse.rs
index e9fa76a..204b9fa 100644
--- a/disk/src/android_sparse.rs
+++ b/disk/src/android_sparse.rs
@@ -24,7 +24,6 @@
use cros_async::BackingMemory;
use cros_async::Executor;
use cros_async::IoSource;
-use data_model::zerocopy_from_reader;
use data_model::Le16;
use data_model::Le32;
use remain::sorted;
@@ -111,13 +110,15 @@
chunks: BTreeMap<u64, ChunkWithSize>,
}
-fn parse_chunk<T: Read + Seek>(mut input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>> {
+fn parse_chunk<T: Read + Seek>(input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>> {
const HEADER_SIZE: usize = mem::size_of::<ChunkHeader>();
let current_offset = input
.stream_position()
.map_err(Error::ReadSpecificationError)?;
- let chunk_header: ChunkHeader =
- zerocopy_from_reader(&mut input).map_err(Error::ReadSpecificationError)?;
+ let mut chunk_header = ChunkHeader::new_zeroed();
+ input
+ .read_exact(chunk_header.as_bytes_mut())
+ .map_err(Error::ReadSpecificationError)?;
let chunk_body_size = (chunk_header.total_sz.to_native() as usize)
.checked_sub(HEADER_SIZE)
.ok_or(Error::InvalidSpecification(format!(
@@ -166,8 +167,9 @@
pub fn from_file(mut file: File) -> Result<AndroidSparse> {
file.seek(SeekFrom::Start(0))
.map_err(Error::ReadSpecificationError)?;
- let sparse_header: SparseHeader =
- zerocopy_from_reader(&mut file).map_err(Error::ReadSpecificationError)?;
+ let mut sparse_header = SparseHeader::new_zeroed();
+ file.read_exact(sparse_header.as_bytes_mut())
+ .map_err(Error::ReadSpecificationError)?;
if sparse_header.magic != SPARSE_HEADER_MAGIC {
return Err(Error::InvalidSpecification(format!(
"Header did not match magic constant. Expected {:x}, was {:x}",
diff --git a/disk/src/qcow/mod.rs b/disk/src/qcow/mod.rs
index e23a362..96fbc0e 100644
--- a/disk/src/qcow/mod.rs
+++ b/disk/src/qcow/mod.rs
@@ -287,10 +287,10 @@
}
// L2 blocks are always one cluster long. They contain cluster_size/sizeof(u64) addresses.
let l2_size: u32 = cluster_size / size_of::<u64>() as u32;
- let num_clusters: u32 = div_round_up_u64(size, u64::from(cluster_size)) as u32;
- let num_l2_clusters: u32 = div_round_up_u32(num_clusters, l2_size);
- let l1_clusters: u32 = div_round_up_u32(num_l2_clusters, cluster_size);
- let header_clusters = div_round_up_u32(size_of::<QcowHeader>() as u32, cluster_size);
+ let num_clusters: u32 = size.div_ceil(u64::from(cluster_size)) as u32;
+ let num_l2_clusters: u32 = num_clusters.div_ceil(l2_size);
+ let l1_clusters: u32 = num_l2_clusters.div_ceil(cluster_size);
+ let header_clusters = (size_of::<QcowHeader>() as u32).div_ceil(cluster_size);
Ok(QcowHeader {
magic: QCOW_MAGIC,
version: 3,
@@ -317,10 +317,7 @@
num_clusters + l1_clusters + num_l2_clusters + header_clusters,
) as u32;
// The refcount table needs to store the offset of each refcount cluster.
- div_round_up_u32(
- max_refcount_clusters * size_of::<u64>() as u32,
- cluster_size,
- )
+ (max_refcount_clusters * size_of::<u64>() as u32).div_ceil(cluster_size)
},
nb_snapshots: 0,
snapshots_offset: 0,
@@ -389,8 +386,8 @@
fn max_refcount_clusters(refcount_order: u32, cluster_size: u32, num_clusters: u32) -> u64 {
// Use u64 as the product of the u32 inputs can overflow.
let refcount_bytes = (0x01 << refcount_order as u64) / 8;
- let for_data = div_round_up_u64(num_clusters as u64 * refcount_bytes, cluster_size as u64);
- let for_refcounts = div_round_up_u64(for_data * refcount_bytes, cluster_size as u64);
+ let for_data = (u64::from(num_clusters) * refcount_bytes).div_ceil(u64::from(cluster_size));
+ let for_refcounts = (for_data * refcount_bytes).div_ceil(u64::from(cluster_size));
for_data + for_refcounts
}
@@ -532,10 +529,10 @@
}
let l2_size = cluster_size / size_of::<u64>() as u64;
- let num_clusters = div_round_up_u64(header.size, cluster_size);
- let num_l2_clusters = div_round_up_u64(num_clusters, l2_size);
- let l1_clusters = div_round_up_u64(num_l2_clusters, cluster_size);
- let header_clusters = div_round_up_u64(size_of::<QcowHeader>() as u64, cluster_size);
+ let num_clusters = header.size.div_ceil(cluster_size);
+ let num_l2_clusters = num_clusters.div_ceil(l2_size);
+ let l1_clusters = num_l2_clusters.div_ceil(cluster_size);
+ let header_clusters = (size_of::<QcowHeader>() as u64).div_ceil(cluster_size);
if num_l2_clusters > MAX_RAM_POINTER_TABLE_SIZE {
return Err(Error::TooManyL1Entries(num_l2_clusters));
}
@@ -549,7 +546,7 @@
.map_err(Error::ReadingHeader)?,
);
- let num_clusters = div_round_up_u64(header.size, cluster_size);
+ let num_clusters = header.size.div_ceil(cluster_size);
let refcount_clusters = max_refcount_clusters(
header.refcount_order,
cluster_size as u32,
@@ -737,7 +734,7 @@
header: QcowHeader,
cluster_size: u64,
) -> Result<()> {
- let l1_clusters = div_round_up_u64(header.l1_size as u64, cluster_size);
+ let l1_clusters = u64::from(header.l1_size).div_ceil(cluster_size);
let l1_table_offset = header.l1_table_offset;
for i in 0..l1_clusters {
add_ref(refcounts, cluster_size, l1_table_offset + i * cluster_size)?;
@@ -810,7 +807,7 @@
refblock_clusters: u64,
pointers_per_cluster: u64,
) -> Result<Vec<u64>> {
- let refcount_table_entries = div_round_up_u64(refblock_clusters, pointers_per_cluster);
+ let refcount_table_entries = refblock_clusters.div_ceil(pointers_per_cluster);
let mut ref_table = vec![0; refcount_table_entries as usize];
let mut first_free_cluster: u64 = 0;
for refblock_addr in &mut ref_table {
@@ -899,24 +896,21 @@
.len();
let refcount_bits = 1u64 << header.refcount_order;
- let refcount_bytes = div_round_up_u64(refcount_bits, 8);
+ let refcount_bytes = refcount_bits.div_ceil(8);
let refcount_block_entries = cluster_size / refcount_bytes;
let pointers_per_cluster = cluster_size / size_of::<u64>() as u64;
- let data_clusters = div_round_up_u64(header.size, cluster_size);
- let l2_clusters = div_round_up_u64(data_clusters, pointers_per_cluster);
- let l1_clusters = div_round_up_u64(l2_clusters, cluster_size);
- let header_clusters = div_round_up_u64(size_of::<QcowHeader>() as u64, cluster_size);
+ let data_clusters = header.size.div_ceil(cluster_size);
+ let l2_clusters = data_clusters.div_ceil(pointers_per_cluster);
+ let l1_clusters = l2_clusters.div_ceil(cluster_size);
+ let header_clusters = (size_of::<QcowHeader>() as u64).div_ceil(cluster_size);
let max_clusters = data_clusters + l2_clusters + l1_clusters + header_clusters;
let mut max_valid_cluster_index = max_clusters;
- let refblock_clusters = div_round_up_u64(max_valid_cluster_index, refcount_block_entries);
- let reftable_clusters = div_round_up_u64(refblock_clusters, pointers_per_cluster);
+ let refblock_clusters = max_valid_cluster_index.div_ceil(refcount_block_entries);
+ let reftable_clusters = refblock_clusters.div_ceil(pointers_per_cluster);
// Account for refblocks and the ref table size needed to address them.
- let refblocks_for_refs = div_round_up_u64(
- refblock_clusters + reftable_clusters,
- refcount_block_entries,
- );
- let reftable_clusters_for_refs =
- div_round_up_u64(refblocks_for_refs, refcount_block_entries);
+ let refblocks_for_refs =
+ (refblock_clusters + reftable_clusters).div_ceil(refcount_block_entries);
+ let reftable_clusters_for_refs = refblocks_for_refs.div_ceil(refcount_block_entries);
max_valid_cluster_index += refblock_clusters + reftable_clusters;
max_valid_cluster_index += refblocks_for_refs + reftable_clusters_for_refs;
@@ -1630,16 +1624,6 @@
Ok(())
}
-// Ceiling of the division of `dividend`/`divisor`.
-fn div_round_up_u64(dividend: u64, divisor: u64) -> u64 {
- dividend / divisor + u64::from(dividend % divisor != 0)
-}
-
-// Ceiling of the division of `dividend`/`divisor`.
-fn div_round_up_u32(dividend: u32, divisor: u32) -> u32 {
- dividend / divisor + u32::from(dividend % divisor != 0)
-}
-
#[cfg(test)]
mod tests {
use std::fs::OpenOptions;
diff --git a/docs/book/src/SUMMARY.md b/docs/book/src/SUMMARY.md
index 4daa932..03612a2 100644
--- a/docs/book/src/SUMMARY.md
+++ b/docs/book/src/SUMMARY.md
@@ -26,6 +26,7 @@
- [USB](./devices/usb.md)
- [Wayland](./devices/wayland.md)
- [Video (experimental)](./devices/video.md)
+ - [Virtual U2F Passthrough](./devices/virtual_u2f.md)
- [Vhost-user](./devices/vhost_user.md)
- [Tracing](./tracing.md)
- [Integration](./integration/index.md)
diff --git a/docs/book/src/appendix/seccomp.md b/docs/book/src/appendix/seccomp.md
index 1e7b539..0befc68 100644
--- a/docs/book/src/appendix/seccomp.md
+++ b/docs/book/src/appendix/seccomp.md
@@ -6,7 +6,7 @@
device per architecture. Each device requires a unique set of syscalls to accomplish their function
and each architecture has slightly different naming for similar syscalls. The ChromeOS docs have a
useful
-[listing of syscalls](https://chromium.googlesource.com/chromiumos/docs/+/HEAD/constants/syscalls.md).
+[listing of syscalls](https://www.chromium.org/chromium-os/developer-library/reference/linux-constants/syscalls/).
The seccomp policies are compiled from `.policy` source files into BPF bytecode by
[`jail/build.rs`](https://chromium.googlesource.com/crosvm/crosvm/+/refs/heads/main/jail/build.rs)
diff --git a/docs/book/src/contributing/style_guide_platform_specific_code.md b/docs/book/src/contributing/style_guide_platform_specific_code.md
index 9fe444e..357d2e2 100644
--- a/docs/book/src/contributing/style_guide_platform_specific_code.md
+++ b/docs/book/src/contributing/style_guide_platform_specific_code.md
@@ -261,6 +261,38 @@
}
```
+### Exception: dispatch enums (trait-object like enums) should NOT be split
+
+Dispatch enums (enums which are pretending to be trait objects) should NOT be split as shown above.
+This is because these enums just forward method calls verbatim and don't have any meaningful cross
+platform code. As such, there is no benefit to splitting the enum. Here is an acceptable example:
+
+```rust
+enum MyDispatcher {
+ #[cfg(windows)]
+ WinType(ImplForWindows),
+ #[cfg(unix)]
+ UnixType(ImplForUnix),
+}
+
+impl MyDispatcher {
+ fn foo(&self) {
+ match self {
+ #[cfg(windows)]
+ MyDispatcher::WinType(t) => t.foo(),
+ #[cfg(unix)]
+ MyDispatcher::UnixType(t) => t.foo(),
+ }
+ }
+}
+```
+
+## Errors
+
+Inlining all platform specific error values is ok. This is an exception to the [enum](#enum) to keep
+error handling simple. Organize platform independent errors first and then platform specific errors
+ordered by the target os name i.e. "linux" first and "windows" later.
+
## Code blocks and functions
If a code block or a function has little platform independent code and the bulk of the code is
@@ -385,12 +417,6 @@
}
```
-## Errors
-
-Inlining all platform specific error values is ok. This is an exception to the [enum](#enum) to keep
-error handling simple. Organize platform independent errors first and then platform specific errors
-ordered by the target os name i.e. "linux" first and "windows" later.
-
## Platform specific symbols
If a platform exports symbols that are specific to the platform only and are not exported by all
diff --git a/docs/book/src/devices/virtual_u2f.md b/docs/book/src/devices/virtual_u2f.md
new file mode 100644
index 0000000..db9b6bf
--- /dev/null
+++ b/docs/book/src/devices/virtual_u2f.md
@@ -0,0 +1,59 @@
+# Virtual U2F Passthrough
+
+crosvm supports sharing a single [u2f](https://en.wikipedia.org/wiki/Universal_2nd_Factor) USB
+device between the host and the guest. Unlike with normal [USB](usb.md) devices which require to be
+exclusively attached to one VM, it is possible to share a single security key between multiple VMs
+and the host in a non-exclusive manner using the `attach_key` command.
+
+A generic hardware security key that supports the fido1/u2f protocol should appear as a
+`/dev/hidraw` interface on the host, like this:
+
+```shell
+$ lsusb
+Bus 004 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub
+Bus 003 Device 018: ID 1050:0407 Yubico.com YubiKey OTP+FIDO+CCID
+Bus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
+$ ls /dev/hidraw*
+/dev/hidraw0 /dev/hidraw1
+```
+
+In this example, the physical YubiKey presents both a keyboard interface (`/dev/hidraw0`) and a
+u2f-hid interface (`/dev/hidraw1`). Crosvm supports passing the `/dev/hidraw1` interface to the
+guest via the `crosvm usb attach_key` command.
+
+First, start crosvm making sure to specify a control socket:
+
+```shell
+$ crosvm run -s /run/crosvm.sock ${USUAL_CROSVM_ARGS}
+```
+
+Since the virtual u2f device is surfaced as a generic HID device, make sure your guest kernel is
+built with support for HID devices. Specifically it needs CONFIG_HID, CONFIG_HIDRAW,
+CONFIG_HID_GENERIC, and CONFIG_USB_HID enabled.
+
+Once the VM is launched, attach the security key with the following command on the host:
+
+```shell
+$ crosvm usb attach_key /dev/hidraw1 /run/crosvm.sock
+ok 1
+```
+
+The virtual security key will show up inside the guest as a Google USB device with Product and
+Vendor IDs as `18d1:f1d0`:
+
+```shell
+$ lsusb
+Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub
+Bus 001 Device 002: ID 18d1:f1d0 Google Inc.
+Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
+```
+
+You can verify that the correct hidraw device has been created in the `/dev/` tree:
+
+```shell
+$ ls /dev/hidraw*
+/dev/hidraw0
+```
+
+The device should now be usable as u2f-supported security key both inside the guest and on the host.
+It can also be attached to other crosvm instances at the same time too.
diff --git a/docs/book/src/integration/chromeos.md b/docs/book/src/integration/chromeos.md
index 8c5a239..34d8499 100644
--- a/docs/book/src/integration/chromeos.md
+++ b/docs/book/src/integration/chromeos.md
@@ -23,7 +23,7 @@
`cros_workon` package. The full package name is `chromeos-base/crosvm`.
The developer guide section on
-[Make your Changes](https://chromium.googlesource.com/chromiumos/docs/+/main/developer_guide.md#make-your-changes)
+[Make your Changes](https://www.chromium.org/chromium-os/developer-library/guides/development/developer-guide/#make-your-changes)
applies to crosvm as well. You can specify the development version to be built with cros_workon, and
build with cros build-packages. Consecutive builds without changes to dependency can be done with
emerge.
@@ -120,7 +120,7 @@
Your change need to be merged into [chromiumos/platform/crosvm] to cherry-pick it to a release
branch. You should follow
-[ChromiumOS Merge Workflow](https://chromium.googlesource.com/chromiumos/docs/+/HEAD/work_on_branch.md)
+[ChromiumOS Merge Workflow](https://www.chromium.org/chromium-os/developer-library/guides/development/work-on-branch/)
to cherry-pick your changes. Since changes are merged from [crosvm/crosvm] to
[chromiumos/platform/crosvm] through [the merge process](#the-merge-process), you can't use gerrit
to cherry-pick your changes but need to use git command locally.
diff --git a/docs/book/src/testing/index.md b/docs/book/src/testing/index.md
index e4355a1..0441e06 100644
--- a/docs/book/src/testing/index.md
+++ b/docs/book/src/testing/index.md
@@ -19,7 +19,8 @@
- Avoid accessing kernel devices
- Avoid global state in unit tests
-This allows us to execute unit tests for any platform using emulators such as qemu-static or wine64.
+This allows us to execute unit tests for any platform using emulators such as qemu-user-static or
+wine64.
### Documentation tests
@@ -79,17 +80,17 @@
The platforms below can all be tested using `tools/run_tests -p $platform`. The table indicates how
these tests are executed:
-| Platform | Build | Unit Tests | Integration Tests | E2E Tests |
-| :-------------------------- | :---: | :---------------------------: | :---------------: | :-------: |
-| x86_64 (linux) | ✅ | ✅ | ✅ | ✅ |
-| aarch64 (linux) | ✅ | ✅ (qemu-static[^qemu-static]) | ✅ (qemu[^qemu]) | ❌ |
-| armhf (linux) | ✅ | ✅ (qemu-static[^qemu-static]) | ❌ | ❌ |
-| mingw64[^windows] (linux) | 🚧 | 🚧 (wine64) | ❌ | ❌ |
-| mingw64[^windows] (windows) | 🚧 | 🚧 | 🚧 | ❌ |
+| Platform | Build | Unit Tests | Integration Tests | E2E Tests |
+| :-------------------------- | :---: | :-----------------------: | :---------------: | :-------: |
+| x86_64 (linux) | ✅ | ✅ | ✅ | ✅ |
+| aarch64 (linux) | ✅ | ✅ (qemu-user[^qemu-user]) | ✅ (qemu[^qemu]) | ❌ |
+| armhf (linux) | ✅ | ✅ (qemu-user[^qemu-user]) | ❌ | ❌ |
+| mingw64[^windows] (linux) | 🚧 | 🚧 (wine64) | ❌ | ❌ |
+| mingw64[^windows] (windows) | 🚧 | 🚧 | 🚧 | ❌ |
Crosvm CI will use the same configuration as `tools/run_tests`.
-[^qemu-static]: qemu-static-aarch64 or qemu-static-arm translate instructions into x86 and executes them on the
+[^qemu-user]: qemu-aarch64-static or qemu-arm-static translate instructions into x86 and executes them on the
host kernel. This works well for unit tests, but will fail when interacting with platform
specific kernel features.
diff --git a/e2e_tests/fixture/Cargo.toml b/e2e_tests/fixture/Cargo.toml
index b094f8c..966e895 100644
--- a/e2e_tests/fixture/Cargo.toml
+++ b/e2e_tests/fixture/Cargo.toml
@@ -20,3 +20,4 @@
delegate = {path = "../guest_under_test/rootfs/delegate"}
serde = {version = "*", features = ["derive"]}
serde_json = "*"
+readclock = { path = "../guest_under_test/rootfs/readclock" }
diff --git a/e2e_tests/fixture/src/vm.rs b/e2e_tests/fixture/src/vm.rs
index 55adc52..ee51978 100644
--- a/e2e_tests/fixture/src/vm.rs
+++ b/e2e_tests/fixture/src/vm.rs
@@ -25,6 +25,7 @@
use log::info;
use log::Level;
use prebuilts::download_file;
+use readclock::ClockValues;
use url::Url;
use crate::sys::SerialArgs;
@@ -405,7 +406,27 @@
TestVm::new_generic(TestVmSys::append_config_args, cfg, false)
}
- pub fn new_cold_restore(cfg: Config) -> Result<TestVm> {
+ /// Create `TestVm` from a snapshot, using `--restore` but NOT `--suspended`.
+ pub fn new_restore(cfg: Config) -> Result<TestVm> {
+ let mut vm = TestVm::new_generic_restore(TestVmSys::append_config_args, cfg, false)?;
+ // Send a resume request to wait for the restore to finish.
+ // We don't want to return from this function until the restore is complete, otherwise it
+ // will be difficult to differentiate between a slow restore and a slow response from the
+ // guest.
+ let vm = run_with_timeout(
+ move || {
+ vm.resume_full().expect("failed to resume after VM restore");
+ vm
+ },
+ Duration::from_secs(60),
+ )
+ .expect("VM restore timeout");
+
+ Ok(vm)
+ }
+
+ /// Create `TestVm` from a snapshot, using `--restore` AND `--suspended`.
+ pub fn new_restore_suspended(cfg: Config) -> Result<TestVm> {
TestVm::new_generic_restore(TestVmSys::append_config_args, cfg, false)
}
@@ -610,6 +631,13 @@
self.sys
.crosvm_command("swap", vec![command.to_string()], self.sudo)
}
+
+ pub fn guest_clock_values(&mut self) -> Result<ClockValues> {
+ let output = self
+ .exec_in_guest("readclock")
+ .context("Failed to execute readclock binary")?;
+ serde_json::from_str(&output.stdout).context("Failed to parse result")
+ }
}
impl Drop for TestVm {
diff --git a/e2e_tests/guest_under_test/initramfs/init.sh b/e2e_tests/guest_under_test/initramfs/init.sh
index de8e772..d40e5ea 100644
--- a/e2e_tests/guest_under_test/initramfs/init.sh
+++ b/e2e_tests/guest_under_test/initramfs/init.sh
@@ -11,7 +11,7 @@
fi
}
-mount -t proc none /proc
+mount -t proc /proc -t proc
mount -t sysfs none /sys
mount -t devtmpfs none /dev
diff --git a/e2e_tests/guest_under_test/kernel/common.config b/e2e_tests/guest_under_test/kernel/common.config
index 45f095b..3d0a646 100644
--- a/e2e_tests/guest_under_test/kernel/common.config
+++ b/e2e_tests/guest_under_test/kernel/common.config
@@ -15,11 +15,16 @@
CONFIG_SHMEM=y
CONFIG_SYSVIPC=y
+# Enable /proc/config.gz
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+
# Virtio devices
CONFIG_VIRTIO_VSOCKETS=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_PMEM=y
CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_PVCLOCK=y
CONFIG_VSOCKETS=y
# Networking
diff --git a/e2e_tests/guest_under_test/rootfs/readclock/Android.bp b/e2e_tests/guest_under_test/rootfs/readclock/Android.bp
index af297e0..0cc1cc5 100644
--- a/e2e_tests/guest_under_test/rootfs/readclock/Android.bp
+++ b/e2e_tests/guest_under_test/rootfs/readclock/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/e2e_tests/tests/boot.rs b/e2e_tests/tests/boot.rs
index 14d98ce..86b83da 100644
--- a/e2e_tests/tests/boot.rs
+++ b/e2e_tests/tests/boot.rs
@@ -58,9 +58,13 @@
assert_eq!(vm.exec_in_guest("echo 42").unwrap().stdout.trim(), "42");
}
+/*
+ * VCPU-level suspend/resume tests (which does NOT suspend the devices)
+ */
+
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
-fn boot_test_suspend_resume() {
+fn vcpu_suspend_resume_succeeds() {
// There is no easy way for us to check if the VM is actually suspended. But at
// least exercise the code-path.
let mut vm = TestVm::new(Config::new()).unwrap();
@@ -71,7 +75,43 @@
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
-fn boot_test_suspend_resume_full() {
+fn vcpu_suspend_resume_succeeds_with_pvclock() {
+ // There is no easy way for us to check if the VM is actually suspended. But at
+ // least exercise the code-path.
+ let mut config = Config::new();
+ config = config.extra_args(vec!["--pvclock".to_string()]);
+ let mut vm = TestVm::new(config).unwrap();
+ vm.suspend().unwrap();
+ vm.resume().unwrap();
+ assert_eq!(vm.exec_in_guest("echo 42").unwrap().stdout.trim(), "42");
+}
+
+/*
+ * Full suspend/resume tests (which suspend the devices and vcpus)
+ */
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn full_suspend_resume_test_suspend_resume_full() {
+ // There is no easy way for us to check if the VM is actually suspended. But at
+ // least exercise the code-path.
+ let mut config = Config::new();
+ config = config.with_stdout_hardware("legacy-virtio-console");
+ // Why this test is called "full"? Can anyone explain...?
+ config = config.extra_args(vec![
+ "--no-usb".to_string(),
+ "--no-balloon".to_string(),
+ "--no-rng".to_string(),
+ ]);
+ let mut vm = TestVm::new(config).unwrap();
+ vm.suspend_full().unwrap();
+ vm.resume_full().unwrap();
+ assert_eq!(vm.exec_in_guest("echo 42").unwrap().stdout.trim(), "42");
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn full_suspend_resume_with_pvclock() {
// There is no easy way for us to check if the VM is actually suspended. But at
// least exercise the code-path.
let mut config = Config::new();
@@ -80,8 +120,8 @@
"--no-usb".to_string(),
"--no-balloon".to_string(),
"--no-rng".to_string(),
+ "--pvclock".to_string(),
]);
-
let mut vm = TestVm::new(config).unwrap();
vm.suspend_full().unwrap();
vm.resume_full().unwrap();
@@ -90,6 +130,75 @@
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
+fn vcpu_suspend_resume_with_pvclock_adjusts_guest_clocks() {
+ use readclock::ClockValues;
+
+ // SUSPEND_DURATION defines how long the VM should be suspended
+ const SUSPEND_DURATION: Duration = Duration::from_secs(2);
+ const ALLOWANCE: Duration = Duration::from_secs(1);
+
+ // Launch a VM with pvclock option
+ let mut config = Config::new();
+ config = config.with_stdout_hardware("legacy-virtio-console");
+ config = config.extra_args(vec![
+ "--no-usb".to_string(),
+ "--no-balloon".to_string(),
+ "--no-rng".to_string(),
+ "--pvclock".to_string(),
+ ]);
+ let mut vm = TestVm::new(config).unwrap();
+
+ // Mount the proc fs
+ vm.exec_in_guest("mount proc /proc -t proc").unwrap();
+ // Ensure that the kernel has virtio-pvclock
+ assert_eq!(
+ vm.exec_in_guest("cat /proc/config.gz | gunzip | grep '^CONFIG_VIRTIO_PVCLOCK'")
+ .unwrap()
+ .stdout
+ .trim(),
+ "CONFIG_VIRTIO_PVCLOCK=y"
+ );
+
+ let guest_clocks_before = vm.guest_clock_values().unwrap();
+ let host_clocks_before = ClockValues::now();
+ vm.suspend().unwrap();
+ println!("Sleeping {SUSPEND_DURATION:?}...");
+ std::thread::sleep(SUSPEND_DURATION);
+ vm.resume().unwrap();
+ // Sleep a bit, to give the guest a chance to move the CLOCK_BOOTTIME value forward.
+ std::thread::sleep(SUSPEND_DURATION);
+ let guest_clocks_after = vm.guest_clock_values().unwrap();
+ let host_clocks_after = ClockValues::now();
+ // Calculating in f64 since the result may be negative
+ let guest_mono_diff = guest_clocks_after.clock_monotonic().as_secs_f64()
+ - guest_clocks_before.clock_monotonic().as_secs_f64();
+ let guest_boot_diff = guest_clocks_after.clock_boottime().as_secs_f64()
+ - guest_clocks_before.clock_boottime().as_secs_f64();
+ let host_boot_diff = host_clocks_after.clock_boottime().as_secs_f64()
+ - host_clocks_before.clock_boottime().as_secs_f64();
+
+ assert!(host_boot_diff > SUSPEND_DURATION.as_secs_f64());
+ // Although the BOOTTIME and MONOTONIC behavior varies in general for some real-world factors
+ // like the implementation of the kernel, the virtualization platforms and hardware issues,
+ // when virtio-pvclock is in use, crosvm does its best effort to maintain the following
+ // invariants to make the guest's userland peaceful:
+
+ // Invariants 1: Guest's MONOTONIC behaves as if they are stopped during the VM is suspended in
+ // terms of crosvm's VM instance running state. In other words, the guest's monotonic
+ // difference is smaller than the "real" time experienced by the host by SUSPEND_DURATION.
+ let monotonic_error = guest_mono_diff + SUSPEND_DURATION.as_secs_f64() - host_boot_diff;
+ assert!(monotonic_error < ALLOWANCE.as_secs_f64());
+
+ // Invariants 2: Subtracting Guest's MONOTONIC from the Guest's BOOTTIME should be
+ // equal to the total duration that the VM was in the "suspended" state as noted
+ // in the Invariants 1.
+ let guest_suspend_duration = guest_boot_diff - guest_mono_diff;
+ let boottime_error = (guest_suspend_duration - SUSPEND_DURATION.as_secs_f64()).abs();
+ assert!(boottime_error < ALLOWANCE.as_secs_f64());
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
fn boot_test_vm_disable_sandbox() {
let mut vm = TestVm::new(Config::new().disable_sandbox()).unwrap();
assert_eq!(vm.exec_in_guest("echo 42").unwrap().stdout.trim(), "42");
diff --git a/e2e_tests/tests/pci_hotplug.rs b/e2e_tests/tests/pci_hotplug.rs
index 5e313cb..b0371f7 100644
--- a/e2e_tests/tests/pci_hotplug.rs
+++ b/e2e_tests/tests/pci_hotplug.rs
@@ -201,6 +201,7 @@
/// Checks tap hotplug works with a device added, removed, then added again.
#[test]
+#[ignore = "b/333090169 test is flaky"]
fn tap_hotplug_add_remove_add() {
call_test_with_sudo("tap_hotplug_add_remove_add_impl");
}
@@ -262,6 +263,7 @@
/// Checks tap hotplug works with a device added, removed, then rapidly added again.
#[test]
+#[ignore = "b/333090169 test is flaky"]
fn tap_hotplug_add_remove_rapid_add() {
call_test_with_sudo("tap_hotplug_add_remove_rapid_add_impl");
}
diff --git a/e2e_tests/tests/suspend_resume.rs b/e2e_tests/tests/suspend_resume.rs
index f893d2c..c814d39 100644
--- a/e2e_tests/tests/suspend_resume.rs
+++ b/e2e_tests/tests/suspend_resume.rs
@@ -119,7 +119,7 @@
// shut down VM
drop(vm);
// Start up VM with cold restore.
- let mut vm = TestVm::new_cold_restore(new_config().extra_args(vec![
+ let mut vm = TestVm::new_restore_suspended(new_config().extra_args(vec![
"--restore".to_string(),
snap1_path.to_str().unwrap().to_string(),
"--suspended".to_string(),
@@ -213,7 +213,7 @@
snap_path.to_str().unwrap().to_string(),
"--no-usb".to_string(),
]);
- let _vm = TestVm::new_cold_restore(config).unwrap();
+ let _vm = TestVm::new_restore(config).unwrap();
}
fn create_net_config(socket: &Path) -> VuConfig {
diff --git a/e2e_tests/tests/vsock.rs b/e2e_tests/tests/vsock.rs
index 6415428..dfdb13c 100644
--- a/e2e_tests/tests/vsock.rs
+++ b/e2e_tests/tests/vsock.rs
@@ -89,7 +89,7 @@
])
.with_stdout_hardware("legacy-virtio-console");
drop(vm);
- vm = TestVm::new_cold_restore(config).unwrap();
+ vm = TestVm::new_restore(config).unwrap();
host_to_guest_connection(&mut vm, guest_cid, guest_port);
}
@@ -119,7 +119,7 @@
])
.with_stdout_hardware("legacy-virtio-console");
drop(vm);
- vm = TestVm::new_cold_restore(config.disable_sandbox()).unwrap();
+ vm = TestVm::new_restore(config.disable_sandbox()).unwrap();
host_to_guest_connection(&mut vm, guest_cid, guest_port);
}
@@ -199,7 +199,7 @@
])
.with_stdout_hardware("legacy-virtio-console");
drop(vm);
- vm = TestVm::new_cold_restore(config).unwrap();
+ vm = TestVm::new_restore(config).unwrap();
guest_to_host_connection(&mut vm, host_port);
}
@@ -230,7 +230,7 @@
])
.with_stdout_hardware("legacy-virtio-console");
drop(vm);
- vm = TestVm::new_cold_restore(config.disable_sandbox()).unwrap();
+ vm = TestVm::new_restore(config.disable_sandbox()).unwrap();
guest_to_host_connection(&mut vm, host_port);
}
@@ -277,6 +277,7 @@
}
#[test]
+#[ignore = "b/333090069 test is flaky"]
fn vhost_user_host_to_guest() {
let guest_port = generate_vhost_port();
let guest_cid = generate_guest_cid();
@@ -295,6 +296,7 @@
}
#[test]
+#[ignore = "b/333090069 test is flaky"]
fn vhost_user_host_to_guest_with_devices() {
let guest_port = generate_vhost_port();
let guest_cid = generate_guest_cid();
diff --git a/fuse/Android.bp b/fuse/Android.bp
index d3544dd..5d6c6fd 100644
--- a/fuse/Android.bp
+++ b/fuse/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -30,7 +31,6 @@
"libbitflags",
"libcros_tracing",
"libcrossbeam_utils",
- "libdata_model",
"liblibc",
"libthiserror",
"libzerocopy",
@@ -55,7 +55,6 @@
"libbitflags",
"libcros_tracing",
"libcrossbeam_utils",
- "libdata_model",
"liblibc",
"libthiserror",
"libzerocopy",
diff --git a/fuse/Cargo.toml b/fuse/Cargo.toml
index 84e5a4a..e256534 100644
--- a/fuse/Cargo.toml
+++ b/fuse/Cargo.toml
@@ -12,7 +12,6 @@
bitflags = "2.2.1"
crossbeam-utils = "0.8"
cros_tracing = { path = "../cros_tracing" }
-data_model = { path = "../common/data_model" }
enumn = "0.1.0"
libc = { version = "*", features = ["extra_traits"] }
remain = "0.2"
diff --git a/fuse/src/server.rs b/fuse/src/server.rs
index 1d6f494..3b988bf 100644
--- a/fuse/src/server.rs
+++ b/fuse/src/server.rs
@@ -15,10 +15,9 @@
use base::error;
use base::pagesize;
use base::Protection;
-use data_model::zerocopy_from_reader;
-use data_model::zerocopy_from_slice;
use zerocopy::AsBytes;
-use zerocopy::Unalign;
+use zerocopy::FromBytes;
+use zerocopy::FromZeroes;
use crate::filesystem::Context;
use crate::filesystem::DirEntry;
@@ -39,7 +38,14 @@
const SELINUX_XATTR_CSTR: &[u8] = b"security.selinux\0";
/// A trait for reading from the underlying FUSE endpoint.
-pub trait Reader: io::Read {}
+pub trait Reader: io::Read {
+ fn read_struct<T: AsBytes + FromBytes + FromZeroes>(&mut self) -> Result<T> {
+ let mut out = T::new_zeroed();
+ self.read_exact(out.as_bytes_mut())
+ .map_err(Error::DecodeMessage)?;
+ Ok(out)
+ }
+}
impl<R: Reader> Reader for &'_ mut R {}
@@ -152,7 +158,7 @@
w: W,
mapper: M,
) -> Result<usize> {
- let in_header: InHeader = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let in_header: InHeader = r.read_struct()?;
cros_tracing::trace_simple_print!("fuse server: handle_message: in_header={:?}", in_header);
if in_header.len
@@ -247,7 +253,7 @@
}
fn forget<R: Reader>(&self, in_header: InHeader, mut r: R) -> Result<usize> {
- let ForgetIn { nlookup } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let ForgetIn { nlookup } = r.read_struct()?;
self.fs
.forget(Context::from(in_header), in_header.nodeid.into(), nlookup);
@@ -261,7 +267,7 @@
flags,
dummy: _,
fh,
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let handle = if (flags & GETATTR_FH) != 0 {
Some(fh.into())
@@ -287,7 +293,7 @@
}
fn setattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let setattr_in: SetattrIn = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let setattr_in: SetattrIn = r.read_struct()?;
let handle = if setattr_in.valid & FATTR_FH != 0 {
Some(setattr_in.fh.into())
@@ -374,7 +380,7 @@
fn mknod<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
let MknodIn {
mode, rdev, umask, ..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let buflen = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())
@@ -412,7 +418,7 @@
}
fn mkdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let MkdirIn { mode, umask } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let MkdirIn { mode, umask } = r.read_struct()?;
let buflen = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())
@@ -454,8 +460,7 @@
mut r: R,
w: W,
) -> Result<usize> {
- let ChromeOsTmpfileIn { mode, umask } =
- zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let ChromeOsTmpfileIn { mode, umask } = r.read_struct()?;
let len = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())
@@ -559,14 +564,13 @@
}
fn rename<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let RenameIn { newdir } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let RenameIn { newdir } = r.read_struct()?;
self.do_rename(in_header, size_of::<RenameIn>(), newdir, 0, r, w)
}
fn rename2<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let Rename2In { newdir, flags, .. } =
- zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let Rename2In { newdir, flags, .. } = r.read_struct()?;
#[allow(clippy::unnecessary_cast)]
let flags = flags & (libc::RENAME_EXCHANGE | libc::RENAME_NOREPLACE) as u32;
@@ -575,7 +579,7 @@
}
fn link<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let LinkIn { oldnodeid } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let LinkIn { oldnodeid } = r.read_struct()?;
let namelen = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())
@@ -601,7 +605,7 @@
}
fn open<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let OpenIn { flags, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let OpenIn { flags, .. } = r.read_struct()?;
match self
.fs
@@ -634,7 +638,7 @@
lock_owner,
flags,
..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
if size > self.fs.max_buffer_size() {
return reply_error(
@@ -694,7 +698,7 @@
lock_owner,
flags,
..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
if size > self.fs.max_buffer_size() {
return reply_error(
@@ -751,7 +755,7 @@
flags,
release_flags,
lock_owner,
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let flush = release_flags & RELEASE_FLUSH != 0;
let flock_release = release_flags & RELEASE_FLOCK_UNLOCK != 0;
@@ -778,7 +782,7 @@
fn fsync<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
let FsyncIn {
fh, fsync_flags, ..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let datasync = fsync_flags & 0x1 != 0;
match self.fs.fsync(
@@ -793,8 +797,7 @@
}
fn setxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let SetxattrIn { size, flags } =
- zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let SetxattrIn { size, flags } = r.read_struct()?;
// The name and value and encoded one after another and separated by a '\0' character.
let len = (in_header.len as usize)
@@ -831,7 +834,7 @@
}
fn getxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let GetxattrIn { size, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let GetxattrIn { size, .. } = r.read_struct()?;
let namelen = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())
@@ -874,7 +877,7 @@
mut r: R,
w: W,
) -> Result<usize> {
- let GetxattrIn { size, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let GetxattrIn { size, .. } = r.read_struct()?;
if size > self.fs.max_buffer_size() {
return reply_error(
@@ -932,7 +935,7 @@
unused: _,
padding: _,
lock_owner,
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
match self.fs.flush(
Context::from(in_header),
@@ -952,7 +955,7 @@
minor,
max_readahead,
flags,
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
if major < KERNEL_VERSION {
error!("Unsupported fuse protocol version: {}.{}", major, minor);
@@ -990,7 +993,7 @@
if (FsOptions::from_bits_truncate(u64::from(flags)) & FsOptions::INIT_EXT).is_empty() {
InitInExt::default()
} else {
- zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?
+ r.read_struct()?
};
// These fuse features are supported by this server by default.
@@ -1052,7 +1055,7 @@
}
fn opendir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let OpenIn { flags, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let OpenIn { flags, .. } = r.read_struct()?;
match self
.fs
@@ -1079,7 +1082,7 @@
) -> Result<usize> {
let ReadIn {
fh, offset, size, ..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
if size > self.fs.max_buffer_size() {
return reply_error(
@@ -1171,7 +1174,7 @@
cros_tracing::trace_simple_print!("fuse server: readdirplus: in_header={:?}", in_header);
let ReadIn {
fh, offset, size, ..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
if size > self.fs.max_buffer_size() {
return reply_error(
@@ -1258,8 +1261,7 @@
mut r: R,
w: W,
) -> Result<usize> {
- let ReleaseIn { fh, flags, .. } =
- zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let ReleaseIn { fh, flags, .. } = r.read_struct()?;
match self.fs.releasedir(
Context::from(in_header),
@@ -1275,7 +1277,7 @@
fn fsyncdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
let FsyncIn {
fh, fsync_flags, ..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let datasync = fsync_flags & 0x1 != 0;
match self.fs.fsyncdir(
@@ -1314,7 +1316,7 @@
}
fn access<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
- let AccessIn { mask, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let AccessIn { mask, .. } = r.read_struct()?;
match self
.fs
@@ -1328,7 +1330,7 @@
fn create<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
let CreateIn {
flags, mode, umask, ..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let buflen = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())
@@ -1414,7 +1416,7 @@
arg,
in_size,
out_size,
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let res = self.fs.ioctl(
in_header.into(),
@@ -1466,8 +1468,7 @@
mut r: R,
w: W,
) -> Result<usize> {
- let BatchForgetIn { count, .. } =
- zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let BatchForgetIn { count, .. } = r.read_struct()?;
if let Some(size) = (count as usize).checked_mul(size_of::<ForgetOne>()) {
if size > self.fs.max_buffer_size() as usize {
@@ -1487,11 +1488,8 @@
let mut requests = Vec::with_capacity(count as usize);
for _ in 0..count {
- requests.push(
- zerocopy_from_reader(&mut r)
- .map(|f: ForgetOne| (f.nodeid.into(), f.nlookup))
- .map_err(Error::DecodeMessage)?,
- );
+ let f: ForgetOne = r.read_struct()?;
+ requests.push((f.nodeid.into(), f.nlookup));
}
self.fs.batch_forget(Context::from(in_header), requests);
@@ -1512,7 +1510,7 @@
length,
mode,
..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
match self.fs.fallocate(
Context::from(in_header),
@@ -1549,7 +1547,7 @@
off_dst,
len,
flags,
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
match self.fs.copy_file_range(
Context::from(in_header),
@@ -1592,7 +1590,7 @@
len,
flags,
moffset,
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let flags = SetUpMappingFlags::from_bits_truncate(flags);
let mut prot = 0;
@@ -1643,8 +1641,7 @@
W: Writer,
M: Mapper,
{
- let RemoveMappingIn { count } =
- zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ let RemoveMappingIn { count } = r.read_struct()?;
// `FUSE_REMOVEMAPPING_MAX_ENTRY` is defined as
// `PAGE_SIZE / sizeof(struct fuse_removemapping_one)` in /kernel/include/uapi/linux/fuse.h.
@@ -1660,7 +1657,8 @@
let mut msgs = Vec::with_capacity(count as usize);
for _ in 0..(count as usize) {
- msgs.push(zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?);
+ let msg: RemoveMappingOne = r.read_struct()?;
+ msgs.push(msg);
}
match self.fs.remove_mapping(&msgs, mapper) {
@@ -1677,7 +1675,7 @@
) -> Result<usize> {
let CreateIn {
flags, mode, umask, ..
- } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
+ } = r.read_struct()?;
let buflen = (in_header.len as usize)
.checked_sub(size_of::<InHeader>())
@@ -1963,34 +1961,32 @@
// Because the security context data block may have been preceded by variable-length strings,
// `SecctxHeader` and the subsequent `Secctx` structs may not be correctly byte-aligned
// within `buf`.
- let secctx_header: &Unalign<SecctxHeader> =
- zerocopy_from_slice(&buf[0..size_of::<SecctxHeader>()]).ok_or(Error::DecodeMessage(
- io::Error::from_raw_os_error(libc::EINVAL),
- ))?;
+ let secctx_header = SecctxHeader::read_from_prefix(buf).ok_or(Error::DecodeMessage(
+ io::Error::from_raw_os_error(libc::EINVAL),
+ ))?;
// FUSE 7.38 introduced a generic request extension with the same structure as `SecctxHeader`.
// A `nr_secctx` value above `MAX_NR_SECCTX` indicates that this data block does not contain
// any security context information.
- if secctx_header.get().nr_secctx > MAX_NR_SECCTX {
+ if secctx_header.nr_secctx > MAX_NR_SECCTX {
return Ok(None);
}
let mut cur_secctx_pos = size_of::<SecctxHeader>();
- for _ in 0..secctx_header.get().nr_secctx {
+ for _ in 0..secctx_header.nr_secctx {
// `SecctxHeader.size` denotes the total size for the `SecctxHeader`, each of the
// `nr_secctx` `Secctx` structs along with the corresponding context name and value,
// and any additional padding.
if (cur_secctx_pos + size_of::<Secctx>()) > buf.len()
- || (cur_secctx_pos + size_of::<Secctx>()) > secctx_header.get().size as usize
+ || (cur_secctx_pos + size_of::<Secctx>()) > secctx_header.size as usize
{
return Err(Error::InvalidHeaderLength);
}
- let secctx: &Unalign<Secctx> =
- zerocopy_from_slice(&buf[cur_secctx_pos..(cur_secctx_pos + size_of::<Secctx>())])
- .ok_or(Error::DecodeMessage(io::Error::from_raw_os_error(
- libc::EINVAL,
- )))?;
+ let secctx =
+ Secctx::read_from(&buf[cur_secctx_pos..(cur_secctx_pos + size_of::<Secctx>())]).ok_or(
+ Error::DecodeMessage(io::Error::from_raw_os_error(libc::EINVAL)),
+ )?;
cur_secctx_pos += size_of::<Secctx>();
@@ -2008,13 +2004,13 @@
let value = secctx_data[1];
cur_secctx_pos += name.to_bytes_with_nul().len() + value.to_bytes_with_nul().len();
- if cur_secctx_pos > secctx_header.get().size as usize {
+ if cur_secctx_pos > secctx_header.size as usize {
return Err(Error::InvalidHeaderLength);
}
// `Secctx.size` contains the size of the security context value (not including the
// corresponding context name).
- if value.to_bytes_with_nul().len() as u32 != secctx.get().size {
+ if value.to_bytes_with_nul().len() as u32 != secctx.size {
return Err(Error::InvalidHeaderLength);
}
@@ -2030,7 +2026,7 @@
.checked_add(7)
.map(|l| l & !7)
.ok_or_else(|| Error::InvalidHeaderLength)?;
- if padded_secctx_size != secctx_header.get().size as usize {
+ if padded_secctx_size != secctx_header.size as usize {
return Err(Error::InvalidHeaderLength);
}
diff --git a/gpu_display/Android.bp b/gpu_display/Android.bp
index 9c6fe90..e96b71d 100644
--- a/gpu_display/Android.bp
+++ b/gpu_display/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
// cargo2android.py limitations:
@@ -26,11 +27,13 @@
cargo_pkg_version: "0.1.0",
srcs: ["src/lib.rs"],
edition: "2021",
+ features: [
+ "android_display",
+ ],
rustlibs: [
"libanyhow",
"libbase_rust",
"libcfg_if",
- "libdata_model",
"liblibc",
"liblinux_input_sys",
"libserde",
@@ -46,11 +49,18 @@
target: {
host: {
shared_libs: ["libwayland_client"],
+ features: ["android_display_stub"],
},
android: {
static_libs: [
"libwayland_client_static",
"libffi",
+ "libcrosvm_android_display_client",
+ ],
+ shared_libs: [
+ "libc++",
+ "libbinder_ndk",
+ "libnativewindow",
],
},
},
@@ -86,7 +96,6 @@
},
},
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
}
diff --git a/gpu_display/Cargo.toml b/gpu_display/Cargo.toml
index 49b9f38..651e638 100644
--- a/gpu_display/Cargo.toml
+++ b/gpu_display/Cargo.toml
@@ -8,10 +8,15 @@
x = []
kiwi = []
vulkan_display = [ "vulkano", "ash", "rand", "protos", "protobuf", "euclid", "smallvec"]
+# Enables the GPU display backend for Android. The backend uses Android surface as the backing
+# store.
+android_display = []
+# Stub implementation of the Android display backend. This is only used for building and testing the
+# Android display backend on a non-Android target
+android_display_stub = []
[dependencies]
anyhow = "*"
-data_model = { path = "../common/data_model" }
libc = "*"
base = { path = "../base" }
linux_input_sys = { path = "../linux_input_sys" }
diff --git a/gpu_display/cargo2android.bp b/gpu_display/cargo2android.bp
index c6f8393..054f9a3 100644
--- a/gpu_display/cargo2android.bp
+++ b/gpu_display/cargo2android.bp
@@ -28,7 +28,6 @@
},
},
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
}
diff --git a/gpu_display/examples/simple.rs b/gpu_display/examples/simple.rs
index 9741dc4..afc5b1f 100644
--- a/gpu_display/examples/simple.rs
+++ b/gpu_display/examples/simple.rs
@@ -9,6 +9,8 @@
use anyhow::Context;
use anyhow::Result;
use gpu_display::*;
+ use vm_control::gpu::DisplayMode;
+ use vm_control::gpu::DisplayParameters;
pub fn run() -> Result<()> {
let mut disp = GpuDisplay::open_wayland(None::<&str>).context("open_wayland")?;
@@ -16,8 +18,7 @@
.create_surface(
None,
/* scanout_id= */ Some(0),
- 1280,
- 1024,
+ &DisplayParameters::default_with_mode(DisplayMode::Windowed(1280, 1024)),
SurfaceType::Scanout,
)
.context("create_surface")?;
diff --git a/gpu_display/examples/simple_open.rs b/gpu_display/examples/simple_open.rs
index 73bdef7..1bf96a0 100644
--- a/gpu_display/examples/simple_open.rs
+++ b/gpu_display/examples/simple_open.rs
@@ -8,6 +8,8 @@
use anyhow::Result;
use gpu_display::GpuDisplay;
use gpu_display::SurfaceType;
+use vm_control::gpu::DisplayMode;
+use vm_control::gpu::DisplayParameters;
fn run() -> Result<()> {
let mut disp = GpuDisplay::open_x(None::<&str>).context("open_x")?;
@@ -15,8 +17,7 @@
.create_surface(
None,
/* scanout_id= */ Some(0),
- 1280,
- 1024,
+ &DisplayParameters::default_with_mode(DisplayMode::Windowed(1280, 1024)),
SurfaceType::Scanout,
)
.context("create_surface")?;
diff --git a/gpu_display/patches/Android.bp.patch b/gpu_display/patches/Android.bp.patch
index 2285463..1a42124 100644
--- a/gpu_display/patches/Android.bp.patch
+++ b/gpu_display/patches/Android.bp.patch
@@ -1,8 +1,16 @@
diff --git a/gpu_display/Android.bp b/gpu_display/Android.bp
-index 3ce915ad..9d5ce4aa 100644
+index dc5db8549..18a04722e 100644
--- a/gpu_display/Android.bp
+++ b/gpu_display/Android.bp
-@@ -36,5 +36,17 @@ rust_library {
+@@ -27,7 +27,6 @@ rust_library {
+ edition: "2021",
+ features: [
+ "android_display",
+- "android_display_stub",
+ ],
+ rustlibs: [
+ "libanyhow",
+@@ -43,7 +42,26 @@ rust_library {
],
proc_macros: ["libremain"],
static_libs: ["libdisplay_wl"],
@@ -12,12 +20,21 @@
+ target: {
+ host: {
+ shared_libs: ["libwayland_client"],
++ features: ["android_display_stub"],
+ },
+ android: {
+ static_libs: [
+ "libwayland_client_static",
+ "libffi",
++ "libcrosvm_android_display_client",
++ ],
++ shared_libs: [
++ "libc++",
++ "libbinder_ndk",
++ "libnativewindow",
+ ],
+ },
+ },
}
+
+ cc_library_static {
diff --git a/gpu_display/src/event_device.rs b/gpu_display/src/event_device.rs
index 5a6594e..f048078 100644
--- a/gpu_display/src/event_device.rs
+++ b/gpu_display/src/event_device.rs
@@ -5,6 +5,7 @@
use std::collections::VecDeque;
use std::fmt;
use std::io;
+use std::io::Read;
use std::io::Write;
use std::iter::ExactSizeIterator;
@@ -12,12 +13,12 @@
use base::RawDescriptor;
use base::ReadNotifier;
use base::StreamChannel;
-use data_model::zerocopy_from_reader;
use linux_input_sys::virtio_input_event;
use linux_input_sys::InputEventDecoder;
use serde::Deserialize;
use serde::Serialize;
use zerocopy::AsBytes;
+use zerocopy::FromZeroes;
const EVENT_SIZE: usize = virtio_input_event::SIZE;
const EVENT_BUFFER_LEN_MAX: usize = 64 * EVENT_SIZE;
@@ -148,7 +149,9 @@
}
pub fn recv_event_encoded(&self) -> io::Result<virtio_input_event> {
- zerocopy_from_reader::<_, virtio_input_event>(&self.event_socket)
+ let mut event = virtio_input_event::new_zeroed();
+ (&self.event_socket).read_exact(event.as_bytes_mut())?;
+ Ok(event)
}
}
diff --git a/gpu_display/src/generated/xlib.rs b/gpu_display/src/generated/xlib.rs
index 2448a68..4f7f512 100644
--- a/gpu_display/src/generated/xlib.rs
+++ b/gpu_display/src/generated/xlib.rs
@@ -2688,6 +2688,13 @@
arg3: ::std::os::raw::c_long,
) -> ::std::os::raw::c_int;
}
+extern "C" {
+ pub fn XStoreName(
+ arg1: *mut Display,
+ arg2: Window,
+ arg3: *const ::std::os::raw::c_char,
+ ) -> ::std::os::raw::c_int;
+}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct XSizeHints {
diff --git a/gpu_display/src/generated/xlib_generator.sh b/gpu_display/src/generated/xlib_generator.sh
index e26ec98..8730c7b 100755
--- a/gpu_display/src/generated/xlib_generator.sh
+++ b/gpu_display/src/generated/xlib_generator.sh
@@ -56,6 +56,7 @@
--allowlist-function XShmGetEventBase \
--allowlist-function XShmPutImage \
--allowlist-function XShmQueryExtension \
+ --allowlist-function XStoreName \
--allowlist-var 'XK_.*' \
--allowlist-var ButtonPress \
--allowlist-var ButtonPressMask \
diff --git a/gpu_display/src/gpu_display_android.rs b/gpu_display/src/gpu_display_android.rs
new file mode 100644
index 0000000..5ccef07
--- /dev/null
+++ b/gpu_display/src/gpu_display_android.rs
@@ -0,0 +1,240 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::ffi::c_char;
+use std::ffi::CStr;
+use std::ffi::CString;
+use std::panic::catch_unwind;
+use std::process::abort;
+use std::ptr::NonNull;
+use std::rc::Rc;
+use std::slice;
+
+use base::error;
+use base::AsRawDescriptor;
+use base::Event;
+use base::RawDescriptor;
+use base::VolatileSlice;
+use vm_control::gpu::DisplayParameters;
+
+use crate::DisplayT;
+use crate::GpuDisplayError;
+use crate::GpuDisplayFramebuffer;
+use crate::GpuDisplayResult;
+use crate::GpuDisplaySurface;
+use crate::SurfaceType;
+use crate::SysDisplayT;
+
+// Opaque blob
+#[repr(C)]
+pub(crate) struct AndroidDisplayContext {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+
+// Opaque blob
+#[repr(C)]
+pub(crate) struct ANativeWindow {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+
+// Should be the same as ANativeWindow_Buffer in android/native_window.h
+// Note that this struct is part of NDK; guaranteed to be stable, so we use it directly across the
+// FFI.
+#[repr(C)]
+pub(crate) struct ANativeWindow_Buffer {
+ width: i32,
+ height: i32,
+ stride: i32, // in number of pixels, NOT bytes
+ format: i32,
+ bits: *mut u8,
+ reserved: [u32; 6],
+}
+
+pub(crate) type ErrorCallback = unsafe extern "C" fn(message: *const c_char);
+
+extern "C" {
+ /// Constructs an AndroidDisplayContext for this backend. This awlays returns a valid (ex:
+ /// non-null) handle to the context. The `name` parameter is from crosvm commandline and the
+ /// client of crosvm will use it to locate and communicate to the AndroidDisplayContext. For
+ /// example, this can be a path to UNIX domain socket where a RPC binder server listens on.
+ /// `error_callback` is a function pointer to an error reporting function, and will be used by
+ /// this and other functions below when something goes wrong. The returned context should be
+ /// destroyed by calling `destroy_android_display_context` if this backend is no longer in use.
+ fn create_android_display_context(
+ name: *const c_char,
+ error_callback: ErrorCallback,
+ ) -> *mut AndroidDisplayContext;
+
+ /// Destroys the AndroidDisplayContext created from `create_android_display_context`.
+ fn destroy_android_display_context(self_: *mut AndroidDisplayContext);
+
+ /// Creates an Android Surface (which is also called as Window) of given size. If the surface
+ /// can't be created for whatever reason, null pointer is returned, in which case we shouldn't
+ /// proceed further.
+ fn create_android_surface(
+ ctx: *mut AndroidDisplayContext,
+ width: u32,
+ height: u32,
+ ) -> *mut ANativeWindow;
+
+ /// Destroys the Android surface created from `create_android_surface`.
+ #[allow(dead_code)]
+ fn destroy_android_surface(ctx: *mut AndroidDisplayContext, surface: *mut ANativeWindow);
+
+ /// Obtains one buffer from the given Android Surface. The information about the buffer (buffer
+ /// address, size, stride, etc) is reported via the `ANativeWindow_Buffer` struct. It shouldn't
+ /// be null. The size of the buffer is guaranteed to be bigger than (width * stride * 4) bytes.
+ /// This function locks the buffer for the client, which means the caller has the exclusive
+ /// access to the buffer until it is returned back to Android display stack (surfaceflinger) by
+ /// calling `post_android_surface_buffer`. This function may fail (in which case false is
+ /// returned), then the caller shouldn't try to read `out_buffer` or use the buffer in any way.
+ fn get_android_surface_buffer(
+ ctx: *mut AndroidDisplayContext,
+ surface: *mut ANativeWindow,
+ out_buffer: *mut ANativeWindow_Buffer,
+ ) -> bool;
+
+ /// Posts the buffer obtained from `get_android_surface_buffer` to the Android display system
+ /// so that it can be displayed on the screen. Once this is called, the caller shouldn't use
+ /// the buffer any more.
+ fn post_android_surface_buffer(ctx: *mut AndroidDisplayContext, surface: *mut ANativeWindow);
+}
+
+unsafe extern "C" fn error_callback(message: *const c_char) {
+ catch_unwind(|| {
+ error!(
+ "{}",
+ // SAFETY: message is null terminated
+ unsafe { CStr::from_ptr(message) }.to_string_lossy()
+ )
+ })
+ .unwrap_or_else(|_| abort())
+}
+
+struct AndroidDisplayContextWrapper(NonNull<AndroidDisplayContext>);
+
+impl Drop for AndroidDisplayContextWrapper {
+ fn drop(&mut self) {
+ // SAFETY: this object is constructed from create_android_display_context
+ unsafe { destroy_android_display_context(self.0.as_ptr()) };
+ }
+}
+
+impl Default for ANativeWindow_Buffer {
+ fn default() -> Self {
+ Self {
+ width: 0,
+ height: 0,
+ stride: 0,
+ format: 0,
+ bits: std::ptr::null_mut(),
+ reserved: [0u32; 6],
+ }
+ }
+}
+
+impl From<ANativeWindow_Buffer> for GpuDisplayFramebuffer<'_> {
+ fn from(anb: ANativeWindow_Buffer) -> Self {
+ // TODO: check anb.format to see if it's ARGB8888?
+ // TODO: infer bpp from anb.format?
+ const BYTES_PER_PIXEL: u32 = 4;
+ let stride_bytes = BYTES_PER_PIXEL * u32::try_from(anb.stride).unwrap();
+ let buffer_size = stride_bytes * u32::try_from(anb.height).unwrap();
+ let buffer =
+ // SAFETY: get_android_surface_buffer guarantees that bits points to a valid buffer and
+ // the buffer remains available until post_android_surface_buffer is called.
+ unsafe { slice::from_raw_parts_mut(anb.bits, buffer_size.try_into().unwrap()) };
+ Self::new(VolatileSlice::new(buffer), stride_bytes, BYTES_PER_PIXEL)
+ }
+}
+
+struct AndroidSurface {
+ context: Rc<AndroidDisplayContextWrapper>,
+ surface: NonNull<ANativeWindow>,
+}
+
+impl GpuDisplaySurface for AndroidSurface {
+ fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
+ let mut anb = ANativeWindow_Buffer::default();
+ // SAFETY: context and surface are opaque handles and buf is used as the out parameter to
+ // hold the return values.
+ let success = unsafe {
+ get_android_surface_buffer(
+ self.context.0.as_ptr(),
+ self.surface.as_ptr(),
+ &mut anb as *mut ANativeWindow_Buffer,
+ )
+ };
+ if success {
+ Some(anb.into())
+ } else {
+ None
+ }
+ }
+
+ fn flip(&mut self) {
+ // SAFETY: context and surface are opaque handles.
+ unsafe { post_android_surface_buffer(self.context.0.as_ptr(), self.surface.as_ptr()) }
+ }
+}
+
+pub struct DisplayAndroid {
+ context: Rc<AndroidDisplayContextWrapper>,
+ /// This event is never triggered and is used solely to fulfill AsRawDescriptor.
+ event: Event,
+}
+
+impl DisplayAndroid {
+ pub fn new(name: &str) -> GpuDisplayResult<DisplayAndroid> {
+ let name = CString::new(name).unwrap();
+ let context = NonNull::new(
+ // SAFETY: service_name is not leaked outside of this function
+ unsafe { create_android_display_context(name.as_ptr(), error_callback) },
+ )
+ .ok_or(GpuDisplayError::Unsupported)?;
+ let context = AndroidDisplayContextWrapper(context);
+ let event = Event::new().map_err(|_| GpuDisplayError::CreateEvent)?;
+ Ok(DisplayAndroid {
+ context: context.into(),
+ event,
+ })
+ }
+}
+
+impl DisplayT for DisplayAndroid {
+ fn create_surface(
+ &mut self,
+ parent_surface_id: Option<u32>,
+ _surface_id: u32,
+ _scanout_id: Option<u32>,
+ display_params: &DisplayParameters,
+ _surf_type: SurfaceType,
+ ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
+ if parent_surface_id.is_some() {
+ return Err(GpuDisplayError::Unsupported);
+ }
+
+ let (requested_width, requested_height) = display_params.get_virtual_display_size();
+ // SAFETY: context is an opaque handle.
+ let surface = NonNull::new(unsafe {
+ create_android_surface(self.context.0.as_ptr(), requested_width, requested_height)
+ })
+ .ok_or(GpuDisplayError::CreateSurface)?;
+
+ Ok(Box::new(AndroidSurface {
+ context: self.context.clone(),
+ surface,
+ }))
+ }
+}
+
+impl SysDisplayT for DisplayAndroid {}
+
+impl AsRawDescriptor for DisplayAndroid {
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.event.as_raw_descriptor()
+ }
+}
diff --git a/gpu_display/src/gpu_display_android_stub.rs b/gpu_display/src/gpu_display_android_stub.rs
new file mode 100644
index 0000000..5484c40
--- /dev/null
+++ b/gpu_display/src/gpu_display_android_stub.rs
@@ -0,0 +1,63 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Stub implementation of the native interface of libcrosvm_android_display_client
+//!
+//! This implementation is used to enable the gpu display backend for Android to be compiled
+//! without libcrosvm_android_display_client available. It is only used for testing purposes and
+//! not functional at runtime.
+
+use std::ffi::c_char;
+
+use crate::gpu_display_android::ANativeWindow;
+use crate::gpu_display_android::ANativeWindow_Buffer;
+use crate::gpu_display_android::AndroidDisplayContext;
+use crate::gpu_display_android::ErrorCallback;
+
+#[no_mangle]
+extern "C" fn create_android_display_context(
+ _name: *const c_char,
+ _error_callback: ErrorCallback,
+) -> *mut AndroidDisplayContext {
+ unimplemented!();
+}
+
+#[no_mangle]
+extern "C" fn destroy_android_display_context(_ctx: *mut AndroidDisplayContext) {
+ unimplemented!();
+}
+
+#[no_mangle]
+extern "C" fn create_android_surface(
+ _ctx: *mut AndroidDisplayContext,
+ _width: u32,
+ _height: u32,
+) -> *mut ANativeWindow {
+ unimplemented!();
+}
+
+#[no_mangle]
+extern "C" fn destroy_android_surface(
+ _ctx: *mut AndroidDisplayContext,
+ _surface: *mut ANativeWindow,
+) {
+ unimplemented!();
+}
+
+#[no_mangle]
+extern "C" fn get_android_surface_buffer(
+ _ctx: *mut AndroidDisplayContext,
+ _surface: *mut ANativeWindow,
+ _out_buffer: *mut ANativeWindow_Buffer,
+) -> u32 {
+ unimplemented!();
+}
+
+#[no_mangle]
+extern "C" fn post_android_surface_buffer(
+ _ctx: *mut AndroidDisplayContext,
+ _surface: *mut ANativeWindow,
+) {
+ unimplemented!();
+}
diff --git a/gpu_display/src/gpu_display_stub.rs b/gpu_display/src/gpu_display_stub.rs
index 5340356..123ab74 100644
--- a/gpu_display/src/gpu_display_stub.rs
+++ b/gpu_display/src/gpu_display_stub.rs
@@ -6,6 +6,7 @@
use base::Event;
use base::RawDescriptor;
use base::VolatileSlice;
+use vm_control::gpu::DisplayParameters;
use crate::DisplayT;
use crate::GpuDisplayError;
@@ -103,14 +104,14 @@
parent_surface_id: Option<u32>,
_surface_id: u32,
_scanout_id: Option<u32>,
- width: u32,
- height: u32,
+ display_params: &DisplayParameters,
_surf_type: SurfaceType,
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
if parent_surface_id.is_some() {
return Err(GpuDisplayError::Unsupported);
}
+ let (width, height) = display_params.get_virtual_display_size();
Ok(Box::new(StubSurface {
width,
height,
diff --git a/gpu_display/src/gpu_display_win/mod.rs b/gpu_display/src/gpu_display_win/mod.rs
index ebe2dd7..f88e864 100644
--- a/gpu_display/src/gpu_display_win/mod.rs
+++ b/gpu_display/src/gpu_display_win/mod.rs
@@ -36,14 +36,10 @@
use base::RawDescriptor;
use base::ReadNotifier;
use base::SendTube;
-use euclid::size2;
-use euclid::Size2D;
-use math_util::Size2DCheckedCast;
use metrics::sys::windows::Metrics;
pub use surface::Surface;
use sync::Mutex;
use sync::Waitable;
-use vm_control::gpu::DisplayMode;
use vm_control::gpu::DisplayParameters;
use vm_control::ModifyWaitContext;
use window_message_processor::DisplaySendToWndProc;
@@ -72,28 +68,6 @@
pub struct VirtualDisplaySpace;
pub struct HostWindowSpace;
-#[derive(Clone)]
-pub struct DisplayProperties {
- pub start_hidden: bool,
- pub is_fullscreen: bool,
- pub window_width: u32,
- pub window_height: u32,
-}
-
-impl From<&DisplayParameters> for DisplayProperties {
- fn from(params: &DisplayParameters) -> Self {
- let is_fullscreen = matches!(params.mode, DisplayMode::BorderlessFullScreen(_));
- let (window_width, window_height) = params.get_window_size();
-
- Self {
- start_hidden: params.hidden,
- is_fullscreen,
- window_width,
- window_height,
- }
- }
-}
-
pub enum VulkanDisplayWrapper {
Uninitialized,
#[cfg(feature = "vulkan_display")]
@@ -104,7 +78,6 @@
wndproc_thread: Rc<WindowProcedureThread>,
close_requested_event: Event,
win_metrics: Option<Weak<Metrics>>,
- display_properties: DisplayProperties,
is_surface_created: bool,
#[allow(dead_code)]
gpu_display_wait_descriptor_ctrl: SendTube,
@@ -118,7 +91,6 @@
pub fn new(
wndproc_thread: WindowProcedureThread,
win_metrics: Option<Weak<Metrics>>,
- display_properties: DisplayProperties,
gpu_display_wait_descriptor_ctrl: SendTube,
vulkan_display_create_params: Option<VulkanCreateParams>,
) -> Result<DisplayWin, GpuDisplayError> {
@@ -133,7 +105,6 @@
wndproc_thread: Rc::new(wndproc_thread),
close_requested_event,
win_metrics,
- display_properties,
is_surface_created: false,
gpu_display_wait_descriptor_ctrl,
event_device_wait_descriptor_requests: Vec::new(),
@@ -148,10 +119,10 @@
&mut self,
surface_id: u32,
scanout_id: u32,
- virtual_display_size: Size2D<i32, VirtualDisplaySpace>,
+ display_params: &DisplayParameters,
) -> Result<Arc<Mutex<VulkanDisplayWrapper>>> {
+ let display_params_clone = display_params.clone();
let metrics = self.win_metrics.clone();
- let display_properties = self.display_properties.clone();
#[cfg(feature = "vulkan_display")]
let vulkan_create_params = self.vulkan_display_create_params.clone();
// This function should not return until surface creation finishes. Besides, we would like
@@ -216,9 +187,8 @@
Surface::new(
surface_id,
window,
- &virtual_display_size,
metrics,
- &display_properties,
+ &display_params_clone,
display_event_dispatcher,
vulkan_display,
)
@@ -299,8 +269,7 @@
parent_surface_id: Option<u32>,
surface_id: u32,
scanout_id: Option<u32>,
- virtual_display_width: u32,
- virtual_display_height: u32,
+ display_params: &DisplayParameters,
surface_type: SurfaceType,
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
if parent_surface_id.is_some() {
@@ -316,7 +285,7 @@
let vulkan_display = match self.create_surface_internal(
surface_id,
scanout_id.expect("scanout id is required"),
- size2(virtual_display_width, virtual_display_height).checked_cast(),
+ display_params,
) {
Err(e) => {
error!("Failed to create surface: {:?}", e);
@@ -562,6 +531,7 @@
let wndproc_thread_builder = {
let mut wndproc_thread_builder = wndproc_thread_builder;
wndproc_thread_builder
+ .set_max_num_windows(1)
.set_display_tube(None)
.set_ime_tube(Some(_device_ime_tube));
wndproc_thread_builder
diff --git a/gpu_display/src/gpu_display_win/surface.rs b/gpu_display/src/gpu_display_win/surface.rs
index be58118..6c7491f 100644
--- a/gpu_display/src/gpu_display_win/surface.rs
+++ b/gpu_display/src/gpu_display_win/surface.rs
@@ -24,6 +24,8 @@
use euclid::Size2D;
use metrics::sys::windows::Metrics;
use sync::Mutex;
+use vm_control::gpu::DisplayMode;
+use vm_control::gpu::DisplayParameters;
use win_util::keys_down;
use winapi::shared::minwindef::HIWORD;
use winapi::shared::minwindef::LOWORD;
@@ -48,10 +50,8 @@
use super::window_message_processor::WindowMessage;
use super::window_message_processor::WindowPosMessage;
use super::window_message_processor::HANDLE_WINDOW_MESSAGE_TIMEOUT;
-use super::DisplayProperties;
use super::HostWindowSpace;
use super::MouseMode;
-use super::VirtualDisplaySpace;
use super::VulkanDisplayWrapper;
use crate::EventDeviceKind;
@@ -102,6 +102,29 @@
}
}
+#[allow(dead_code)]
+#[derive(Clone)]
+pub(crate) struct DisplayProperties {
+ pub start_hidden: bool,
+ pub is_fullscreen: bool,
+ pub window_width: u32,
+ pub window_height: u32,
+}
+
+impl From<&DisplayParameters> for DisplayProperties {
+ fn from(params: &DisplayParameters) -> Self {
+ let is_fullscreen = matches!(params.mode, DisplayMode::BorderlessFullScreen(_));
+ let (window_width, window_height) = params.get_window_size();
+
+ Self {
+ start_hidden: params.hidden,
+ is_fullscreen,
+ window_width,
+ window_height,
+ }
+ }
+}
+
pub struct Surface {
surface_id: u32,
mouse_input: MouseInputManager,
@@ -116,9 +139,8 @@
pub fn new(
surface_id: u32,
window: &GuiWindow,
- virtual_display_size: &Size2D<i32, VirtualDisplaySpace>,
_metrics: Option<Weak<Metrics>>,
- display_properties: &DisplayProperties,
+ display_params: &DisplayParameters,
resources: SurfaceResources,
vulkan_display: Arc<Mutex<VulkanDisplayWrapper>>,
) -> Result<Self> {
@@ -130,8 +152,12 @@
);
let initial_host_viewport_size = window.get_client_rect().context(CONTEXT_MESSAGE)?.size;
+ let virtual_display_size = {
+ let (width, height) = display_params.get_virtual_display_size();
+ size2(width, height).checked_cast()
+ };
let virtual_display_manager =
- VirtualDisplayManager::new(&initial_host_viewport_size, virtual_display_size);
+ VirtualDisplayManager::new(&initial_host_viewport_size, &virtual_display_size);
// This will make gfxstream initialize the child window to which it will render.
update_virtual_display_projection(
vulkan_display.lock(),
@@ -156,7 +182,7 @@
mouse_input,
window_manager: WindowManager::new(
window,
- display_properties,
+ &display_params.into(),
initial_host_viewport_size,
gpu_main_display_tube.clone(),
)
diff --git a/gpu_display/src/gpu_display_win/window_manager.rs b/gpu_display/src/gpu_display_win/window_manager.rs
index 0067185..f9ca33d 100644
--- a/gpu_display/src/gpu_display_win/window_manager.rs
+++ b/gpu_display/src/gpu_display_win/window_manager.rs
@@ -8,9 +8,9 @@
use base::Tube;
use super::math_util::Size;
+use super::surface::DisplayProperties;
use super::window::GuiWindow;
use super::window_message_processor::WindowPosMessage;
-use super::DisplayProperties;
pub(crate) struct NoopWindowManager {}
diff --git a/gpu_display/src/gpu_display_win/window_procedure_thread.rs b/gpu_display/src/gpu_display_win/window_procedure_thread.rs
index b84d69b..84253e7 100644
--- a/gpu_display/src/gpu_display_win/window_procedure_thread.rs
+++ b/gpu_display/src/gpu_display_win/window_procedure_thread.rs
@@ -62,11 +62,6 @@
// The default app icon id, which is defined in crosvm-manifest.rc.
const APP_ICON_ID: u16 = 1;
-// The number of GUI windows to pre-create when booting KiwiVM. At most this
-// number of guest displays and host windows can be used concurrently.
-// TODO(b/314984693): The service or the config file should specify this number.
-const MAX_NUM_WINDOWS: usize = 1;
-
#[derive(Debug)]
enum MessageLoopState {
/// The initial state.
@@ -241,13 +236,14 @@
// We don't implement Default for WindowProcedureThreadBuilder so that the builder function
// is the only way to create WindowProcedureThreadBuilder.
WindowProcedureThreadBuilder {
+ max_num_windows: 1,
display_tube: None,
#[cfg(feature = "kiwi")]
ime_tube: None,
}
}
- fn start_thread(gpu_main_display_tube: Option<Tube>) -> Result<Self> {
+ fn start_thread(max_num_windows: u32, gpu_main_display_tube: Option<Tube>) -> Result<Self> {
let (message_router_handle_sender, message_router_handle_receiver) = channel();
let message_loop_state = Arc::new(AtomicI32::new(MessageLoopState::NotStarted as i32));
let close_requested_event = Event::new().unwrap();
@@ -262,6 +258,7 @@
.spawn(move || {
match close_requested_event_clone.try_clone() {
Ok(close_requested_event) => Self::run_message_loop(
+ max_num_windows,
message_router_handle_sender,
message_loop_state_clone,
gpu_main_display_tube,
@@ -337,6 +334,7 @@
}
fn run_message_loop(
+ max_num_windows: u32,
message_router_handle_sender: Sender<Result<u32>>,
message_loop_state: Arc<AtomicI32>,
gpu_main_display_tube: Option<Tube>,
@@ -346,14 +344,16 @@
// SAFETY:
// Safe because the dispatcher will take care of the lifetime of the `MessageOnlyWindow` and
// `GuiWindow` objects.
- match unsafe { Self::create_windows() }.and_then(|(message_router_window, gui_windows)| {
- WindowMessageDispatcher::new(
- message_router_window,
- gui_windows,
- gpu_main_display_tube.clone(),
- close_requested_event,
- )
- }) {
+ match unsafe { Self::create_windows(max_num_windows) }.and_then(
+ |(message_router_window, gui_windows)| {
+ WindowMessageDispatcher::new(
+ message_router_window,
+ gui_windows,
+ gpu_main_display_tube.clone(),
+ close_requested_event,
+ )
+ },
+ ) {
Ok(dispatcher) => {
info!("WndProc thread entering message loop");
message_loop_state.store(MessageLoopState::Running as i32, Ordering::SeqCst);
@@ -542,7 +542,7 @@
/// # Safety
/// The owner of the returned window objects is responsible for dropping them before we finish
/// processing `WM_NCDESTROY`, because the window handle will become invalid afterwards.
- unsafe fn create_windows() -> Result<(MessageOnlyWindow, Vec<GuiWindow>)> {
+ unsafe fn create_windows(max_num_windows: u32) -> Result<(MessageOnlyWindow, Vec<GuiWindow>)> {
let message_router_window = MessageOnlyWindow::new(
/* class_name */
Self::get_window_class_name::<MessageOnlyWindow>()
@@ -559,10 +559,10 @@
// window may use the background brush to clear the gfxstream window client area when
// drawing occurs. This caused the screen flickering issue during resizing.
// See b/197786842 for details.
- let mut gui_windows = Vec::with_capacity(MAX_NUM_WINDOWS);
- for scanout_id in 0..MAX_NUM_WINDOWS {
+ let mut gui_windows = Vec::with_capacity(max_num_windows as usize);
+ for scanout_id in 0..max_num_windows {
gui_windows.push(GuiWindow::new(
- scanout_id as u32,
+ scanout_id,
/* class_name */
Self::get_window_class_name::<GuiWindow>()
.with_context(|| {
@@ -650,12 +650,18 @@
#[derive(Deserialize, Serialize)]
pub struct WindowProcedureThreadBuilder {
+ max_num_windows: u32,
display_tube: Option<Tube>,
#[cfg(feature = "kiwi")]
ime_tube: Option<Tube>,
}
impl WindowProcedureThreadBuilder {
+ pub fn set_max_num_windows(&mut self, max_num_windows: u32) -> &mut Self {
+ self.max_num_windows = max_num_windows;
+ self
+ }
+
pub fn set_display_tube(&mut self, display_tube: Option<Tube>) -> &mut Self {
self.display_tube = display_tube;
self
@@ -675,10 +681,16 @@
pub fn start_thread(self) -> Result<WindowProcedureThread> {
cfg_if::cfg_if! {
if #[cfg(feature = "kiwi")] {
- let ime_tube = self.ime_tube.ok_or_else(|| anyhow!("The ime tube is not set."))?;
- WindowProcedureThread::start_thread(self.display_tube, ime_tube)
+ let ime_tube = self
+ .ime_tube
+ .ok_or_else(|| anyhow!("The ime tube is not set."))?;
+ WindowProcedureThread::start_thread(
+ self.max_num_windows,
+ self.display_tube,
+ ime_tube,
+ )
} else {
- WindowProcedureThread::start_thread(None)
+ WindowProcedureThread::start_thread(self.max_num_windows, None)
}
}
}
diff --git a/gpu_display/src/gpu_display_wl.rs b/gpu_display/src/gpu_display_wl.rs
index 61a7f9b..c163ee7 100644
--- a/gpu_display/src/gpu_display_wl.rs
+++ b/gpu_display/src/gpu_display_wl.rs
@@ -32,6 +32,7 @@
use dwl::*;
use linux_input_sys::virtio_input_event;
use sync::Waitable;
+use vm_control::gpu::DisplayParameters;
use crate::DisplayExternalResourceImport;
use crate::DisplayT;
@@ -376,12 +377,12 @@
parent_surface_id: Option<u32>,
surface_id: u32,
scanout_id: Option<u32>,
- width: u32,
- height: u32,
+ display_params: &DisplayParameters,
surf_type: SurfaceType,
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
let parent_id = parent_surface_id.unwrap_or(0);
+ let (width, height) = display_params.get_virtual_display_size();
let row_size = width * BYTES_PER_PIXEL;
let fb_size = row_size * height;
let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
diff --git a/gpu_display/src/gpu_display_x.rs b/gpu_display/src/gpu_display_x.rs
index 56b68cd..f7560a1 100644
--- a/gpu_display/src/gpu_display_x.rs
+++ b/gpu_display/src/gpu_display_x.rs
@@ -34,6 +34,7 @@
use libc::IPC_PRIVATE;
use libc::IPC_RMID;
use linux_input_sys::virtio_input_event;
+use vm_control::gpu::DisplayParameters;
use crate::keycode_converter::KeycodeTranslator;
use crate::keycode_converter::KeycodeTypes;
@@ -699,8 +700,7 @@
parent_surface_id: Option<u32>,
_surface_id: u32,
_scanout_id: Option<u32>,
- width: u32,
- height: u32,
+ display_params: &DisplayParameters,
_surf_type: SurfaceType,
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
if parent_surface_id.is_some() {
@@ -710,6 +710,7 @@
// TODO(b/315870313): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
+ let (width, height) = display_params.get_virtual_display_size();
let depth = xlib::XDefaultDepthOfScreen(self.screen.as_ptr()) as u32;
let black_pixel = xlib::XBlackPixelOfScreen(self.screen.as_ptr());
@@ -726,6 +727,12 @@
black_pixel,
);
+ xlib::XStoreName(
+ self.display.as_ptr(),
+ window,
+ CStr::from_bytes_with_nul(b"crosvm\0").unwrap().as_ptr(),
+ );
+
let gc = xlib::XCreateGC(self.display.as_ptr(), window, 0, null_mut());
// Because the event is from an extension, its type must be calculated dynamically.
diff --git a/gpu_display/src/lib.rs b/gpu_display/src/lib.rs
index 76b9afe..49656fb 100644
--- a/gpu_display/src/lib.rs
+++ b/gpu_display/src/lib.rs
@@ -22,11 +22,16 @@
use serde::Serialize;
use sync::Waitable;
use thiserror::Error;
+use vm_control::gpu::DisplayParameters;
use vm_control::gpu::MouseMode;
#[cfg(feature = "vulkan_display")]
use vulkano::VulkanLibrary;
mod event_device;
+#[cfg(feature = "android_display")]
+mod gpu_display_android;
+#[cfg(feature = "android_display_stub")]
+mod gpu_display_android_stub;
mod gpu_display_stub;
#[cfg(windows)]
mod gpu_display_win;
@@ -43,8 +48,6 @@
pub use event_device::EventDevice;
pub use event_device::EventDeviceKind;
#[cfg(windows)]
-pub use gpu_display_win::DisplayProperties as WinDisplayProperties;
-#[cfg(windows)]
pub use gpu_display_win::WindowProcedureThread;
#[cfg(windows)]
pub use gpu_display_win::WindowProcedureThreadBuilder;
@@ -96,6 +99,9 @@
/// Failed to import a buffer to the compositor.
#[error("failed to import a buffer to the compositor")]
FailedImport,
+ /// Android display service name is invalid.
+ #[error("invalid Android display service name: {0}")]
+ InvalidAndroidDisplayServiceName(String),
/// The import ID is invalid.
#[error("invalid import ID")]
InvalidImportId,
@@ -312,8 +318,7 @@
parent_surface_id: Option<u32>,
surface_id: u32,
scanout_id: Option<u32>,
- width: u32,
- height: u32,
+ display_params: &DisplayParameters,
surf_type: SurfaceType,
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>;
@@ -440,6 +445,27 @@
Err(GpuDisplayError::Unsupported)
}
+ pub fn open_android(service_name: &str) -> GpuDisplayResult<GpuDisplay> {
+ let _ = service_name;
+ #[cfg(feature = "android_display")]
+ {
+ let display = gpu_display_android::DisplayAndroid::new(service_name)?;
+
+ let wait_ctx = WaitContext::new()?;
+ wait_ctx.add(&display, DisplayEventToken::Display)?;
+
+ Ok(GpuDisplay {
+ inner: Box::new(display),
+ next_id: 1,
+ event_devices: Default::default(),
+ surfaces: Default::default(),
+ wait_ctx,
+ })
+ }
+ #[cfg(not(feature = "android_display"))]
+ Err(GpuDisplayError::Unsupported)
+ }
+
pub fn open_stub() -> GpuDisplayResult<GpuDisplay> {
let display = gpu_display_stub::DisplayStub::new()?;
let wait_ctx = WaitContext::new()?;
@@ -536,8 +562,7 @@
&mut self,
parent_surface_id: Option<u32>,
scanout_id: Option<u32>,
- width: u32,
- height: u32,
+ display_params: &DisplayParameters,
surf_type: SurfaceType,
) -> GpuDisplayResult<u32> {
if let Some(parent_id) = parent_surface_id {
@@ -551,8 +576,7 @@
parent_surface_id,
new_surface_id,
scanout_id,
- width,
- height,
+ display_params,
surf_type,
)?;
diff --git a/gpu_display/src/sys/windows.rs b/gpu_display/src/sys/windows.rs
index 96ad106..bb9feb4 100644
--- a/gpu_display/src/sys/windows.rs
+++ b/gpu_display/src/sys/windows.rs
@@ -11,7 +11,6 @@
use base::WaitContext;
use metrics::sys::windows::Metrics;
-use crate::gpu_display_win::DisplayProperties;
use crate::gpu_display_win::DisplayWin;
use crate::DisplayEventToken;
use crate::DisplayT;
@@ -65,7 +64,6 @@
fn open_winapi(
wndproc_thread: WindowProcedureThread,
win_metrics: Option<Weak<Metrics>>,
- display_properties: DisplayProperties,
gpu_display_wait_descriptor_ctrl: SendTube,
vulkan_display_create_params: Option<VulkanCreateParams>,
) -> GpuDisplayResult<GpuDisplay>;
@@ -75,14 +73,12 @@
fn open_winapi(
wndproc_thread: WindowProcedureThread,
win_metrics: Option<Weak<Metrics>>,
- display_properties: DisplayProperties,
gpu_display_wait_descriptor_ctrl: SendTube,
vulkan_display_create_params: Option<VulkanCreateParams>,
) -> GpuDisplayResult<GpuDisplay> {
let display = DisplayWin::new(
wndproc_thread,
win_metrics,
- display_properties,
gpu_display_wait_descriptor_ctrl,
vulkan_display_create_params,
)?;
diff --git a/hypervisor/Android.bp b/hypervisor/Android.bp
index a4409ba..896f91e 100644
--- a/hypervisor/Android.bp
+++ b/hypervisor/Android.bp
@@ -1,14 +1,11 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "external_crosvm_license"
- // to get the below license kinds:
- // SPDX-license-identifier-BSD
default_applicable_licenses: ["external_crosvm_license"],
+ default_team: "trendy_team_foundation_security_rust_pkvm_",
}
rust_test {
@@ -103,6 +100,52 @@
}
rust_test {
+ name: "hypervisor_test_tests_hypervisor_virtualization",
+ defaults: ["crosvm_inner_defaults"],
+ host_supported: true,
+ crate_name: "hypervisor_virtualization",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.1.0",
+ srcs: ["tests/hypervisor_virtualization.rs"],
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ test_options: {
+ unit_test: false,
+ },
+ edition: "2021",
+ features: [
+ "gdb",
+ "gdbstub",
+ "gdbstub_arch",
+ "geniezone",
+ "gunyah",
+ ],
+ rustlibs: [
+ "libanyhow",
+ "libbase_rust",
+ "libbit_field",
+ "libbitflags",
+ "libcros_fdt",
+ "libdata_model",
+ "libdowncast_rs",
+ "libfnv",
+ "libgdbstub",
+ "libgdbstub_arch",
+ "libhypervisor",
+ "libkvm",
+ "libkvm_sys",
+ "liblibc",
+ "libmemoffset",
+ "libonce_cell",
+ "libserde",
+ "libserde_json",
+ "libsync_rust",
+ "libvm_memory",
+ ],
+ proc_macros: ["libenumn"],
+}
+
+rust_test {
name: "hypervisor_test_tests_kvm_main",
defaults: ["crosvm_inner_defaults"],
host_supported: true,
diff --git a/hypervisor/src/aarch64.rs b/hypervisor/src/aarch64.rs
index 14ed523..c838196 100644
--- a/hypervisor/src/aarch64.rs
+++ b/hypervisor/src/aarch64.rs
@@ -114,6 +114,23 @@
/// Gets the value of a register on this VCPU.
fn get_one_reg(&self, reg_id: VcpuRegAArch64) -> Result<u64>;
+ /// Sets the value of a Neon vector register (V0-V31) on this VCPU.
+ fn set_vector_reg(&self, reg_num: u8, data: u128) -> Result<()>;
+
+ /// Gets the value of a Neon vector register (V0-V31) on this VCPU.
+ fn get_vector_reg(&self, reg_num: u8) -> Result<u128>;
+
+ /// Gets the value of MPIDR_EL1 on this VCPU.
+ fn get_mpidr(&self) -> Result<u64> {
+ const RES1: u64 = 1 << 31;
+
+ // Assume that MPIDR_EL1.{U,MT} = {0,0}.
+
+ let aff = u64::try_from(self.id()).unwrap();
+
+ Ok(RES1 | aff)
+ }
+
/// Gets the current PSCI version.
fn get_psci_version(&self) -> Result<PsciVersion>;
diff --git a/hypervisor/src/geniezone/mod.rs b/hypervisor/src/geniezone/mod.rs
index 07bdeeb..3440b63 100644
--- a/hypervisor/src/geniezone/mod.rs
+++ b/hypervisor/src/geniezone/mod.rs
@@ -445,6 +445,14 @@
self.get_one_geniezone_reg_u64(GeniezoneVcpuRegister::from(reg_id))
}
+ fn set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()> {
+ unimplemented!()
+ }
+
+ fn get_vector_reg(&self, _reg_num: u8) -> Result<u128> {
+ unimplemented!()
+ }
+
fn get_psci_version(&self) -> Result<PsciVersion> {
Ok(PSCI_0_2)
}
diff --git a/hypervisor/src/gunyah/aarch64.rs b/hypervisor/src/gunyah/aarch64.rs
index 3133d01..c40c363 100644
--- a/hypervisor/src/gunyah/aarch64.rs
+++ b/hypervisor/src/gunyah/aarch64.rs
@@ -213,6 +213,14 @@
Err(Error::new(ENOTSUP))
}
+ fn set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()> {
+ unimplemented!()
+ }
+
+ fn get_vector_reg(&self, _reg_num: u8) -> Result<u128> {
+ unimplemented!()
+ }
+
fn get_psci_version(&self) -> Result<PsciVersion> {
Ok(PSCI_0_2)
}
diff --git a/hypervisor/src/haxm/vcpu.rs b/hypervisor/src/haxm/vcpu.rs
index 9161899..700cbb0 100644
--- a/hypervisor/src/haxm/vcpu.rs
+++ b/hypervisor/src/haxm/vcpu.rs
@@ -5,6 +5,7 @@
use core::ffi::c_void;
use std::arch::x86_64::CpuidResult;
use std::cmp::min;
+use std::collections::BTreeMap;
use std::intrinsics::copy_nonoverlapping;
use std::mem::size_of;
@@ -27,9 +28,6 @@
use vm_memory::GuestAddress;
use super::*;
-use crate::get_tsc_offset_from_msr;
-use crate::set_tsc_offset_via_msr;
-use crate::set_tsc_value_via_msr;
use crate::CpuId;
use crate::CpuIdEntry;
use crate::DebugRegs;
@@ -38,7 +36,6 @@
use crate::HypervHypercall;
use crate::IoOperation;
use crate::IoParams;
-use crate::Register;
use crate::Regs;
use crate::Segment;
use crate::Sregs;
@@ -112,13 +109,7 @@
}
// Also read efer MSR
- let mut efer = vec![Register {
- id: IA32_EFER,
- value: 0,
- }];
-
- self.get_msrs(&mut efer)?;
- state._efer = efer[0].value as u32;
+ state._efer = self.get_msr(IA32_EFER)? as u32;
Ok(VcpuState { state })
}
@@ -131,12 +122,7 @@
}
// Also set efer MSR
- let efer = vec![Register {
- id: IA32_EFER,
- value: state.state._efer as u64,
- }];
-
- self.set_msrs(&efer)
+ self.set_msr(IA32_EFER, state.state._efer as u64)
}
}
@@ -481,78 +467,53 @@
}
/// Gets the VCPU extended control registers.
- fn get_xcrs(&self) -> Result<Vec<Register>> {
+ fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
+ // Haxm does not support getting XCRs
+ Err(Error::new(libc::ENXIO))
+ }
+
+ /// Sets a VCPU extended control register.
+ fn set_xcr(&self, _xcr_index: u32, _value: u64) -> Result<()> {
// Haxm does not support setting XCRs
Err(Error::new(libc::ENXIO))
}
- /// Sets the VCPU extended control registers.
- fn set_xcrs(&self, _xcrs: &[Register]) -> Result<()> {
- // Haxm does not support setting XCRs
- Err(Error::new(libc::ENXIO))
- }
+ /// Gets the value of one model-specific register.
+ fn get_msr(&self, msr_index: u32) -> Result<u64> {
+ let mut msr_data = hax_msr_data {
+ nr_msr: 1,
+ ..Default::default()
+ };
+ msr_data.entries[0].entry = u64::from(msr_index);
- /// Gets the model-specific registers. `msrs` specifies the MSR indexes to be queried, and
- /// on success contains their indexes and values.
- fn get_msrs(&self, msrs: &mut Vec<Register>) -> Result<()> {
- // HAX_VCPU_IOCTL_GET_MSRS only allows you to set HAX_MAX_MSR_ARRAY-1 msrs at a time
- // TODO (b/163811378): the fact that you can only set HAX_MAX_MSR_ARRAY-1 seems like a
- // bug with HAXM, since the entries array itself is HAX_MAX_MSR_ARRAY long
- for chunk in msrs.chunks_mut((HAX_MAX_MSR_ARRAY - 1) as usize) {
- let chunk_size = chunk.len();
- let hax_chunk: Vec<vmx_msr> = chunk.iter().map(vmx_msr::from).collect();
-
- let mut msr_data = hax_msr_data {
- nr_msr: chunk_size as u16,
- ..Default::default()
- };
-
- // Copy chunk into msr_data
- msr_data.entries[..chunk_size].copy_from_slice(&hax_chunk);
-
- // TODO(b/315998194): Add safety comment
- #[allow(clippy::undocumented_unsafe_blocks)]
- let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_MSRS(), &mut msr_data) };
- if ret != 0 {
- return errno_result();
- }
-
- // copy values we got from kernel
- for (i, item) in chunk.iter_mut().enumerate().take(chunk_size) {
- item.value = msr_data.entries[i].value;
- }
+ // TODO(b/315998194): Add safety comment
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_MSRS(), &mut msr_data) };
+ if ret != 0 {
+ return errno_result();
}
- Ok(())
+ Ok(msr_data.entries[0].value)
}
- fn get_all_msrs(&self) -> Result<Vec<Register>> {
+ fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
Err(Error::new(EOPNOTSUPP))
}
- /// Sets the model-specific registers.
- fn set_msrs(&self, msrs: &[Register]) -> Result<()> {
- // HAX_VCPU_IOCTL_GET_MSRS only allows you to set HAX_MAX_MSR_ARRAY-1 msrs at a time
- // TODO (b/163811378): the fact that you can only set HAX_MAX_MSR_ARRAY-1 seems like a
- // bug with HAXM, since the entries array itself is HAX_MAX_MSR_ARRAY long
- for chunk in msrs.chunks((HAX_MAX_MSR_ARRAY - 1) as usize) {
- let chunk_size = chunk.len();
- let hax_chunk: Vec<vmx_msr> = chunk.iter().map(vmx_msr::from).collect();
+ /// Sets the value of one model-specific register.
+ fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
+ let mut msr_data = hax_msr_data {
+ nr_msr: 1,
+ ..Default::default()
+ };
+ msr_data.entries[0].entry = u64::from(msr_index);
+ msr_data.entries[0].value = value;
- let mut msr_data = hax_msr_data {
- nr_msr: chunk_size as u16,
- ..Default::default()
- };
-
- // Copy chunk into msr_data
- msr_data.entries[..chunk_size].copy_from_slice(&hax_chunk);
-
- // TODO(b/315998194): Add safety comment
- #[allow(clippy::undocumented_unsafe_blocks)]
- let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_SET_MSRS(), &mut msr_data) };
- if ret != 0 {
- return errno_result();
- }
+ // TODO(b/315998194): Add safety comment
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_SET_MSRS(), &mut msr_data) };
+ if ret != 0 {
+ return errno_result();
}
Ok(())
@@ -606,16 +567,6 @@
Err(Error::new(ENOENT))
}
- fn get_tsc_offset(&self) -> Result<u64> {
- // Use the default MSR-based implementation
- get_tsc_offset_from_msr(self)
- }
-
- fn set_tsc_offset(&self, offset: u64) -> Result<()> {
- // Use the default MSR-based implementation
- set_tsc_offset_via_msr(self, offset)
- }
-
fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()> {
// HAXM sets TSC_OFFSET based on what we set TSC to; however, it does
// not yet handle syncing. This means it computes
@@ -630,10 +581,6 @@
// offset directly.)
self.set_tsc_offset(tsc_offset)
}
-
- fn set_tsc_value(&self, value: u64) -> Result<()> {
- set_tsc_value_via_msr(self, value)
- }
}
struct VcpuState {
@@ -1004,24 +951,6 @@
}
}
-impl From<&vmx_msr> for Register {
- fn from(item: &vmx_msr) -> Register {
- Register {
- id: item.entry as u32,
- value: item.value,
- }
- }
-}
-
-impl From<&Register> for vmx_msr {
- fn from(item: &Register) -> vmx_msr {
- vmx_msr {
- entry: item.id as u64,
- value: item.value,
- }
- }
-}
-
// TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
#[cfg(test)]
#[cfg(feature = "enable_haxm_tests")]
@@ -1063,42 +992,25 @@
}
#[test]
- fn set_many_msrs() {
+ fn set_msr() {
let haxm = Haxm::new().expect("failed to instantiate HAXM");
let mem =
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
- let mut registers: Vec<Register> = Vec::new();
- for id in 0x300..0x3ff {
- registers.push(Register {
- id: 38,
- value: id as u64,
- });
- }
-
- vcpu.set_msrs(®isters).expect("failed to set registers");
+ vcpu.set_msr(38, 0x300).expect("failed to set MSR");
}
#[test]
- fn get_many_msrs() {
+ fn get_msr() {
let haxm = Haxm::new().expect("failed to instantiate HAXM");
let mem =
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
- let mut registers: Vec<Register> = Vec::new();
- for id in 0x300..0x3ff {
- registers.push(Register {
- id: 38,
- value: id as u64,
- });
- }
-
- vcpu.get_msrs(&mut registers)
- .expect("failed to get registers");
+ let _value = vcpu.get_msr(38).expect("failed to get MSR");
}
#[test]
@@ -1148,21 +1060,17 @@
assert_eq!(sregs.efer, EFER_LMA | EFER_LME);
// IA32_EFER register value should match
- let mut efer_reg = vec![Register {
- id: IA32_EFER,
- value: 0,
- }];
- vcpu.get_msrs(&mut efer_reg).expect("failed to get msrs");
- assert_eq!(efer_reg[0].value, EFER_LMA | EFER_LME);
+ let efer = vcpu.get_msr(IA32_EFER).expect("failed to get msr");
+ assert_eq!(efer, EFER_LMA | EFER_LME);
// Enable SCE via set_msrs
- efer_reg[0].value |= EFER_SCE;
- vcpu.set_msrs(&efer_reg).expect("failed to set msrs");
+ vcpu.set_msr(IA32_EFER, efer | EFER_SCE)
+ .expect("failed to set msr");
// Verify that setting stuck
let sregs = vcpu.get_sregs().expect("failed to get sregs");
assert_eq!(sregs.efer, EFER_SCE | EFER_LME | EFER_LMA);
- vcpu.get_msrs(&mut efer_reg).expect("failed to get msrs");
- assert_eq!(efer_reg[0].value, EFER_SCE | EFER_LME | EFER_LMA);
+ let new_efer = vcpu.get_msr(IA32_EFER).expect("failed to get msrs");
+ assert_eq!(new_efer, EFER_SCE | EFER_LME | EFER_LMA);
}
}
diff --git a/hypervisor/src/kvm/aarch64.rs b/hypervisor/src/kvm/aarch64.rs
index 19c4302..5d1a2b8 100644
--- a/hypervisor/src/kvm/aarch64.rs
+++ b/hypervisor/src/kvm/aarch64.rs
@@ -257,6 +257,10 @@
self.set_one_kvm_reg(kvm_reg_id, data.to_ne_bytes().as_slice())
}
+ fn set_one_kvm_reg_u128(&self, kvm_reg_id: KvmVcpuRegister, data: u128) -> Result<()> {
+ self.set_one_kvm_reg(kvm_reg_id, data.to_ne_bytes().as_slice())
+ }
+
fn set_one_kvm_reg(&self, kvm_reg_id: KvmVcpuRegister, data: &[u8]) -> Result<()> {
let onereg = kvm_one_reg {
id: kvm_reg_id.into(),
@@ -281,6 +285,12 @@
Ok(u64::from_ne_bytes(bytes))
}
+ fn get_one_kvm_reg_u128(&self, kvm_reg_id: KvmVcpuRegister) -> Result<u128> {
+ let mut bytes = 0u128.to_ne_bytes();
+ self.get_one_kvm_reg(kvm_reg_id, bytes.as_mut_slice())?;
+ Ok(u128::from_ne_bytes(bytes))
+ }
+
fn get_one_kvm_reg(&self, kvm_reg_id: KvmVcpuRegister, data: &mut [u8]) -> Result<()> {
let onereg = kvm_one_reg {
id: kvm_reg_id.into(),
@@ -307,22 +317,12 @@
self.set_one_kvm_reg(kvm_reg_id, data.to_ne_bytes().as_slice())
}
- fn set_one_kvm_reg_u128(&self, kvm_reg_id: KvmVcpuRegister, data: u128) -> Result<()> {
- self.set_one_kvm_reg(kvm_reg_id, data.to_ne_bytes().as_slice())
- }
-
fn get_one_kvm_reg_u32(&self, kvm_reg_id: KvmVcpuRegister) -> Result<u32> {
let mut bytes = 0u32.to_ne_bytes();
self.get_one_kvm_reg(kvm_reg_id, bytes.as_mut_slice())?;
Ok(u32::from_ne_bytes(bytes))
}
- fn get_one_kvm_reg_u128(&self, kvm_reg_id: KvmVcpuRegister) -> Result<u128> {
- let mut bytes = 0u128.to_ne_bytes();
- self.get_one_kvm_reg(kvm_reg_id, bytes.as_mut_slice())?;
- Ok(u128::from_ne_bytes(bytes))
- }
-
/// Retrieves the value of the currently active "version" of a multiplexed registers.
fn demux_register(&self, reg: &<GdbArch as Arch>::RegId) -> Result<Option<KvmVcpuRegister>> {
match *reg {
@@ -381,12 +381,24 @@
}
impl KvmVcpuRegister {
+ pub const MPIDR_EL1: Self = Self::from_encoding(0b11, 0b000, 0b0000, 0b0000, 0b101);
+
// Firmware pseudo-registers are part of the ARM KVM interface:
// https://docs.kernel.org/virt/kvm/arm/hypercalls.html
pub const PSCI_VERSION: Self = Self::Firmware(0);
pub const SMCCC_ARCH_WORKAROUND_1: Self = Self::Firmware(1);
pub const SMCCC_ARCH_WORKAROUND_2: Self = Self::Firmware(2);
pub const SMCCC_ARCH_WORKAROUND_3: Self = Self::Firmware(3);
+
+ const fn from_encoding(op0: u8, op1: u8, crn: u8, crm: u8, op2: u8) -> Self {
+ let op0 = (op0 as u16 & 0b11) << 14;
+ let op1 = (op1 as u16 & 0b111) << 11;
+ let crn = (crn as u16 & 0b1111) << 7;
+ let crm = (crm as u16 & 0b1111) << 3;
+ let op2 = op2 as u16 & 0b111;
+
+ Self::System(op0 | op1 | crn | crm | op2)
+ }
}
/// Gives the `u64` register ID expected by the `GET_ONE_REG`/`SET_ONE_REG` ioctl API.
@@ -680,6 +692,24 @@
self.get_one_kvm_reg_u64(KvmVcpuRegister::from(reg_id))
}
+ fn set_vector_reg(&self, reg_num: u8, data: u128) -> Result<()> {
+ if reg_num > 31 {
+ return Err(Error::new(EINVAL));
+ }
+ self.set_one_kvm_reg_u128(KvmVcpuRegister::V(reg_num), data)
+ }
+
+ fn get_vector_reg(&self, reg_num: u8) -> Result<u128> {
+ if reg_num > 31 {
+ return Err(Error::new(EINVAL));
+ }
+ self.get_one_kvm_reg_u128(KvmVcpuRegister::V(reg_num))
+ }
+
+ fn get_mpidr(&self) -> Result<u64> {
+ self.get_one_kvm_reg_u64(KvmVcpuRegister::MPIDR_EL1)
+ }
+
fn get_psci_version(&self) -> Result<PsciVersion> {
let version = if let Ok(v) = self.get_one_kvm_reg_u64(KvmVcpuRegister::PSCI_VERSION) {
let v = u32::try_from(v).map_err(|_| Error::new(EINVAL))?;
@@ -782,7 +812,7 @@
self.set_one_kvm_reg_u64(KvmVcpuRegister::Pstate, pstate)?;
for (i, reg) in regs.v.iter().enumerate() {
let n = u8::try_from(i).expect("invalid Vn general purpose register index");
- self.set_one_kvm_reg_u128(KvmVcpuRegister::V(n), *reg)?;
+ self.set_vector_reg(n, *reg)?;
}
self.set_one_kvm_reg_u32(KvmVcpuRegister::Fpcr, regs.fpcr)?;
self.set_one_kvm_reg_u32(KvmVcpuRegister::Fpsr, regs.fpsr)?;
@@ -806,7 +836,7 @@
regs.cpsr = self.get_one_kvm_reg_u64(KvmVcpuRegister::Pstate)? as u32;
for (i, reg) in regs.v.iter_mut().enumerate() {
let n = u8::try_from(i).expect("invalid Vn general purpose register index");
- *reg = self.get_one_kvm_reg_u128(KvmVcpuRegister::V(n))?;
+ *reg = self.get_vector_reg(n)?;
}
regs.fpcr = self.get_one_kvm_reg_u32(KvmVcpuRegister::Fpcr)?;
regs.fpsr = self.get_one_kvm_reg_u32(KvmVcpuRegister::Fpsr)?;
diff --git a/hypervisor/src/kvm/x86_64.rs b/hypervisor/src/kvm/x86_64.rs
index 3ccb123..65f3701 100644
--- a/hypervisor/src/kvm/x86_64.rs
+++ b/hypervisor/src/kvm/x86_64.rs
@@ -3,6 +3,7 @@
// found in the LICENSE file.
use std::arch::x86_64::CpuidResult;
+use std::collections::BTreeMap;
use base::errno_result;
use base::error;
@@ -30,10 +31,7 @@
use super::Kvm;
use super::KvmVcpu;
use super::KvmVm;
-use crate::get_tsc_offset_from_msr;
use crate::host_phys_addr_bits;
-use crate::set_tsc_offset_via_msr;
-use crate::set_tsc_value_via_msr;
use crate::ClockState;
use crate::CpuId;
use crate::CpuIdEntry;
@@ -51,7 +49,6 @@
use crate::PitChannelState;
use crate::PitState;
use crate::ProtectionType;
-use crate::Register;
use crate::Regs;
use crate::Segment;
use crate::Sregs;
@@ -883,25 +880,36 @@
}
}
- fn get_xcrs(&self) -> Result<Vec<Register>> {
+ fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
let mut regs: kvm_xcrs = Default::default();
// SAFETY:
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
// correct amount of memory to our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS(), &mut regs) };
- if ret == 0 {
- Ok(from_kvm_xcrs(®s))
- } else {
- errno_result()
+ if ret < 0 {
+ return errno_result();
}
+
+ Ok(regs
+ .xcrs
+ .iter()
+ .take(regs.nr_xcrs as usize)
+ .map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
+ .collect())
}
- fn set_xcrs(&self, xcrs: &[Register]) -> Result<()> {
- let xcrs = to_kvm_xcrs(xcrs);
+ fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
+ let mut kvm_xcr = kvm_xcrs {
+ nr_xcrs: 1,
+ ..Default::default()
+ };
+ kvm_xcr.xcrs[0].xcr = xcr_index;
+ kvm_xcr.xcrs[0].value = value;
+
let ret = {
// SAFETY:
// Here we trust the kernel not to read past the end of the kvm_xcrs struct.
- unsafe { ioctl_with_ref(self, KVM_SET_XCRS(), &xcrs) }
+ unsafe { ioctl_with_ref(self, KVM_SET_XCRS(), &kvm_xcr) }
};
if ret == 0 {
Ok(())
@@ -910,78 +918,117 @@
}
}
- fn get_msrs(&self, vec: &mut Vec<Register>) -> Result<()> {
- let msrs = to_kvm_msrs(vec);
+ fn get_msr(&self, msr_index: u32) -> Result<u64> {
+ let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
+ msrs[0].nmsrs = 1;
+
+ // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
+ unsafe {
+ let msr_entries = msrs[0].entries.as_mut_slice(1);
+ msr_entries[0].index = msr_index;
+ }
+
let ret = {
// SAFETY:
// Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
unsafe { ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0]) }
};
- // KVM_GET_MSRS actually returns the number of msr entries written.
if ret < 0 {
return errno_result();
}
+
+ // KVM_GET_MSRS returns the number of msr entries written.
+ if ret != 1 {
+ return Err(base::Error::new(libc::ENOENT));
+ }
+
// SAFETY:
// Safe because we trust the kernel to return the correct array length on success.
- let entries = unsafe {
- let count = ret as usize;
- assert!(count <= vec.len());
- msrs[0].entries.as_slice(count)
+ let value = unsafe {
+ let msr_entries = msrs[0].entries.as_slice(1);
+ msr_entries[0].data
};
- vec.truncate(0);
- vec.extend(entries.iter().map(|e| Register {
- id: e.index,
- value: e.data,
- }));
- Ok(())
+
+ Ok(value)
}
- fn get_all_msrs(&self) -> Result<Vec<Register>> {
- let mut msrs: Vec<_> = self
- .kvm
- .get_msr_index_list()?
- .into_iter()
- .map(|i| Register { id: i, value: 0 })
- .collect();
- let count = msrs.len();
- self.get_msrs(&mut msrs)?;
- if msrs.len() != count {
+ fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
+ let msr_index_list = self.kvm.get_msr_index_list()?;
+ let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(msr_index_list.len());
+ kvm_msrs[0].nmsrs = msr_index_list.len() as u32;
+ // SAFETY:
+ // Mapping the unsized array to a slice is unsafe because the length isn't known.
+ // Providing the length used to create the struct guarantees the entire slice is valid.
+ unsafe {
+ kvm_msrs[0]
+ .entries
+ .as_mut_slice(msr_index_list.len())
+ .iter_mut()
+ .zip(msr_index_list.iter())
+ .for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
+ }
+
+ let ret = {
+ // SAFETY:
+ // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
+ unsafe { ioctl_with_ref(self, KVM_GET_MSRS(), &kvm_msrs[0]) }
+ };
+ if ret < 0 {
+ return errno_result();
+ }
+
+ // KVM_GET_MSRS returns the number of msr entries written.
+ let count = ret as usize;
+ if count != msr_index_list.len() {
error!(
"failed to get all MSRs: requested {}, got {}",
+ msr_index_list.len(),
count,
- msrs.len()
);
return Err(base::Error::new(libc::EPERM));
}
+
+ // SAFETY:
+ // Safe because we trust the kernel to return the correct array length on success.
+ let msrs = unsafe {
+ BTreeMap::from_iter(
+ kvm_msrs[0]
+ .entries
+ .as_slice(count)
+ .iter()
+ .map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
+ )
+ };
+
Ok(msrs)
}
- fn set_msrs(&self, vec: &[Register]) -> Result<()> {
- let msrs = to_kvm_msrs(vec);
+ fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
+ let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
+ kvm_msrs[0].nmsrs = 1;
+
+ // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
+ unsafe {
+ let msr_entries = kvm_msrs[0].entries.as_mut_slice(1);
+ msr_entries[0].index = msr_index;
+ msr_entries[0].data = value;
+ }
+
let ret = {
// SAFETY:
// Here we trust the kernel not to read past the end of the kvm_msrs struct.
- unsafe { ioctl_with_ref(self, KVM_SET_MSRS(), &msrs[0]) }
+ unsafe { ioctl_with_ref(self, KVM_SET_MSRS(), &kvm_msrs[0]) }
};
- // KVM_SET_MSRS actually returns the number of msr entries written.
if ret < 0 {
return errno_result();
}
- let num_set = ret as usize;
- if num_set != vec.len() {
- if let Some(register) = vec.get(num_set) {
- error!(
- "failed to set MSR {:#x?} to {:#x?}",
- register.id, register.value
- );
- } else {
- error!(
- "unexpected KVM_SET_MSRS return value {num_set} (nmsrs={})",
- vec.len()
- );
- }
+
+ // KVM_SET_MSRS returns the number of msr entries written.
+ if ret != 1 {
+ error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
return Err(base::Error::new(libc::EPERM));
}
+
Ok(())
}
@@ -1049,31 +1096,9 @@
Err(Error::new(ENXIO))
}
- fn set_tsc_value(&self, value: u64) -> Result<()> {
- set_tsc_value_via_msr(self, value)
- }
-
- fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()> {
- // In theory, KVM requires no extra handling beyond restoring the TSC
- // MSR, which happens separately because TSC is in the all MSR list for
- // KVM; however, we found that when we don't directly restore the offset
- // timekeeping inside the guest goes haywire. We suspect that when KVM
- // is using pvclock (which we do), it doesn't want anyone else messing
- // with the guest's TSC. Long term, we should consider using
- // KVM_GET_CLOCK & KVM_SET_CLOCK instead. (We've also observed that
- // saving/restoring TSC_KHZ somehow fixes this issue as well. Further
- // research is required.)
- self.set_tsc_offset(tsc_offset)
- }
-
- fn get_tsc_offset(&self) -> Result<u64> {
- // Use the default MSR-based implementation
- get_tsc_offset_from_msr(self)
- }
-
- fn set_tsc_offset(&self, offset: u64) -> Result<()> {
- // Use the default MSR-based implementation
- set_tsc_offset_via_msr(self, offset)
+ fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
+ // On KVM, the TSC MSR is restored as part of SET_MSRS, and no further action is required.
+ Ok(())
}
}
@@ -1117,25 +1142,14 @@
///
/// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
pub fn get_apic_base(&self) -> Result<u64> {
- let mut apic_base = vec![Register {
- id: MSR_IA32_APICBASE,
- value: 0,
- }];
- self.get_msrs(&mut apic_base)?;
- match apic_base.get(0) {
- Some(base) => Ok(base.value),
- None => Err(Error::new(EIO)),
- }
+ self.get_msr(MSR_IA32_APICBASE)
}
/// X86 specific call to set the value of the APIC_BASE MSR.
///
/// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
- self.set_msrs(&[Register {
- id: MSR_IA32_APICBASE,
- value: apic_base,
- }])
+ self.set_msr(MSR_IA32_APICBASE, apic_base)
}
/// Call to get pending interrupts acknowledged by the APIC but not yet injected into the CPU.
@@ -1786,53 +1800,6 @@
}
}
-fn from_kvm_xcrs(r: &kvm_xcrs) -> Vec<Register> {
- r.xcrs
- .iter()
- .take(r.nr_xcrs as usize)
- .map(|x| Register {
- id: x.xcr,
- value: x.value,
- })
- .collect()
-}
-
-fn to_kvm_xcrs(r: &[Register]) -> kvm_xcrs {
- let mut kvm = kvm_xcrs {
- nr_xcrs: r.len() as u32,
- ..Default::default()
- };
- for (i, &xcr) in r.iter().enumerate() {
- kvm.xcrs[i].xcr = xcr.id;
- kvm.xcrs[i].value = xcr.value;
- }
- kvm
-}
-
-fn to_kvm_msrs(vec: &[Register]) -> Vec<kvm_msrs> {
- let vec: Vec<kvm_msr_entry> = vec
- .iter()
- .map(|e| kvm_msr_entry {
- index: e.id,
- data: e.value,
- ..Default::default()
- })
- .collect();
-
- let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(vec.len());
- // SAFETY:
- // Mapping the unsized array to a slice is unsafe because the length isn't known.
- // Providing the length used to create the struct guarantees the entire slice is valid.
- unsafe {
- msrs[0]
- .entries
- .as_mut_slice(vec.len())
- .copy_from_slice(&vec);
- }
- msrs[0].nmsrs = vec.len() as u32;
- msrs
-}
-
#[cfg(test)]
mod tests {
use super::*;
diff --git a/hypervisor/src/whpx/vcpu.rs b/hypervisor/src/whpx/vcpu.rs
index b3826ef..fd3e93b 100644
--- a/hypervisor/src/whpx/vcpu.rs
+++ b/hypervisor/src/whpx/vcpu.rs
@@ -4,6 +4,7 @@
use core::ffi::c_void;
use std::arch::x86_64::CpuidResult;
+use std::collections::BTreeMap;
use std::convert::TryInto;
use std::mem::size_of;
use std::sync::Arc;
@@ -20,9 +21,6 @@
use super::types::*;
use super::*;
-use crate::get_tsc_offset_from_msr;
-use crate::set_tsc_offset_via_msr;
-use crate::set_tsc_value_via_msr;
use crate::CpuId;
use crate::CpuIdEntry;
use crate::DebugRegs;
@@ -30,7 +28,6 @@
use crate::HypervHypercall;
use crate::IoOperation;
use crate::IoParams;
-use crate::Register;
use crate::Regs;
use crate::Sregs;
use crate::Vcpu;
@@ -1083,83 +1080,71 @@
}
/// Gets the VCPU extended control registers.
- fn get_xcrs(&self) -> Result<Vec<Register>> {
- const REG_NAMES: [WHV_REGISTER_NAME; 1] = [WHV_REGISTER_NAME_WHvX64RegisterXCr0];
- let mut xcrs: [WHV_REGISTER_VALUE; 1] = Default::default();
+ fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
+ const REG_NAME: WHV_REGISTER_NAME = WHV_REGISTER_NAME_WHvX64RegisterXCr0;
+ let mut reg_value = WHV_REGISTER_VALUE::default();
// safe because we have enough space for all the registers in whpx_regs
check_whpx!(unsafe {
WHvGetVirtualProcessorRegisters(
self.vm_partition.partition,
self.index,
- ®_NAMES as *const WHV_REGISTER_NAME,
- REG_NAMES.len() as u32,
- xcrs.as_mut_ptr(),
+ ®_NAME,
+ /* RegisterCount */ 1,
+ &mut reg_value,
)
})?;
- let reg = Register {
- id: 0, // whpx only supports xcr0
- // safe because the union value, reg64, is safe to pull out assuming
- // kernel filled in the xcrs properly.
- value: unsafe { xcrs[0].Reg64 },
- };
- Ok(vec![reg])
+
+ // safe because the union value, reg64, is safe to pull out assuming
+ // kernel filled in the xcrs properly.
+ let xcr0 = unsafe { reg_value.Reg64 };
+
+ // whpx only supports xcr0
+ let xcrs = BTreeMap::from([(0, xcr0)]);
+ Ok(xcrs)
}
- /// Sets the VCPU extended control registers.
- fn set_xcrs(&self, xcrs: &[Register]) -> Result<()> {
- const REG_NAMES: [WHV_REGISTER_NAME; 1] = [WHV_REGISTER_NAME_WHvX64RegisterXCr0];
- let whpx_xcrs = xcrs
- .iter()
- .filter_map(|reg| match reg.id {
- 0 => Some(WHV_REGISTER_VALUE { Reg64: reg.value }),
- _ => None,
- })
- .collect::<Vec<WHV_REGISTER_VALUE>>();
- if !whpx_xcrs.is_empty() {
- // safe because we have enough space for all the registers in whpx_xcrs
- check_whpx!(unsafe {
- WHvSetVirtualProcessorRegisters(
- self.vm_partition.partition,
- self.index,
- ®_NAMES as *const WHV_REGISTER_NAME,
- REG_NAMES.len() as u32,
- whpx_xcrs.as_ptr(),
- )
- })
- } else {
+ /// Sets a VCPU extended control register.
+ fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
+ if xcr_index != 0 {
// invalid xcr register provided
- Err(Error::new(ENXIO))
+ return Err(Error::new(EINVAL));
}
+
+ const REG_NAME: WHV_REGISTER_NAME = WHV_REGISTER_NAME_WHvX64RegisterXCr0;
+ let reg_value = WHV_REGISTER_VALUE { Reg64: value };
+ // safe because we have enough space for all the registers in whpx_xcrs
+ check_whpx!(unsafe {
+ WHvSetVirtualProcessorRegisters(
+ self.vm_partition.partition,
+ self.index,
+ ®_NAME,
+ /* RegisterCount */ 1,
+ ®_value,
+ )
+ })
}
- /// Gets the model-specific registers. `msrs` specifies the MSR indexes to be queried, and
- /// on success contains their indexes and values.
- fn get_msrs(&self, msrs: &mut Vec<Register>) -> Result<()> {
- let msr_names = get_msr_names(msrs);
- let mut buffer: Vec<WHV_REGISTER_VALUE> = vec![Default::default(); msr_names.len()];
+ /// Gets the value of a single model-specific register.
+ fn get_msr(&self, msr_index: u32) -> Result<u64> {
+ let msr_name = get_msr_name(msr_index).ok_or(Error::new(libc::ENOENT))?;
+ let mut msr_value = WHV_REGISTER_VALUE::default();
// safe because we have enough space for all the registers in whpx_regs
check_whpx!(unsafe {
WHvGetVirtualProcessorRegisters(
self.vm_partition.partition,
self.index,
- msr_names.as_ptr(),
- msr_names.len() as u32,
- buffer.as_mut_ptr(),
+ &msr_name,
+ /* RegisterCount */ 1,
+ &mut msr_value,
)
})?;
- msrs.retain(|&msr| VALID_MSRS.contains_key(&msr.id));
- if buffer.len() != msrs.len() {
- panic!("mismatch of valid whpx msr registers and returned registers");
- }
- for (i, msr) in msrs.iter_mut().enumerate() {
- // safe because Reg64 will be a valid union value for all msrs
- msr.value = unsafe { buffer[i].Reg64 };
- }
- Ok(())
+ // safe because Reg64 will be a valid union value
+ let value = unsafe { msr_value.Reg64 };
+ Ok(value)
}
- fn get_all_msrs(&self) -> Result<Vec<Register>> {
+ fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
// Note that some members of VALID_MSRS cannot be fetched from WHPX with
// WHvGetVirtualProcessorRegisters per the HTLFS, so we enumerate all of
// permitted MSRs here.
@@ -1172,73 +1157,41 @@
// handled by the generic x86_64 VCPU snapshot/restore. Non snapshot
// consumers should use get/set_tsc_adjust to access the adjust register
// if needed.
- let mut registers = vec![
- Register {
- id: MSR_EFER,
- ..Default::default()
- },
- Register {
- id: MSR_KERNEL_GS_BASE,
- ..Default::default()
- },
- Register {
- id: MSR_APIC_BASE,
- ..Default::default()
- },
- Register {
- id: MSR_SYSENTER_CS,
- ..Default::default()
- },
- Register {
- id: MSR_SYSENTER_EIP,
- ..Default::default()
- },
- Register {
- id: MSR_SYSENTER_ESP,
- ..Default::default()
- },
- Register {
- id: MSR_STAR,
- ..Default::default()
- },
- Register {
- id: MSR_LSTAR,
- ..Default::default()
- },
- Register {
- id: MSR_CSTAR,
- ..Default::default()
- },
- Register {
- id: MSR_SFMASK,
- ..Default::default()
- },
+ const MSRS_TO_SAVE: &[u32] = &[
+ MSR_EFER,
+ MSR_KERNEL_GS_BASE,
+ MSR_APIC_BASE,
+ MSR_SYSENTER_CS,
+ MSR_SYSENTER_EIP,
+ MSR_SYSENTER_ESP,
+ MSR_STAR,
+ MSR_LSTAR,
+ MSR_CSTAR,
+ MSR_SFMASK,
];
- self.get_msrs(&mut registers)?;
+
+ let registers = MSRS_TO_SAVE
+ .iter()
+ .map(|msr_index| {
+ let value = self.get_msr(*msr_index)?;
+ Ok((*msr_index, value))
+ })
+ .collect::<Result<BTreeMap<u32, u64>>>()?;
+
Ok(registers)
}
- /// Sets the model-specific registers.
- fn set_msrs(&self, msrs: &[Register]) -> Result<()> {
- let msr_names = get_msr_names(msrs);
- let whpx_msrs = msrs
- .iter()
- .filter_map(|msr| {
- if VALID_MSRS.contains_key(&msr.id) {
- Some(WHV_REGISTER_VALUE { Reg64: msr.value })
- } else {
- None
- }
- })
- .collect::<Vec<WHV_REGISTER_VALUE>>();
-
+ /// Sets the value of a single model-specific register.
+ fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
+ let msr_name = get_msr_name(msr_index).ok_or(Error::new(libc::ENOENT))?;
+ let msr_value = WHV_REGISTER_VALUE { Reg64: value };
check_whpx!(unsafe {
WHvSetVirtualProcessorRegisters(
self.vm_partition.partition,
self.index,
- msr_names.as_ptr(),
- msr_names.len() as u32,
- whpx_msrs.as_ptr(),
+ &msr_name,
+ /* RegisterCount */ 1,
+ &msr_value,
)
})
}
@@ -1311,23 +1264,6 @@
Err(Error::new(ENOENT))
}
- fn get_tsc_offset(&self) -> Result<u64> {
- // Note: WHV_REGISTER_NAME_WHvX64RegisterTscVirtualOffset register appears to no longer be
- // supported, so we use the MSR path. (It also didn't work in 19H2 either, always returning
- // zero on get.)
- get_tsc_offset_from_msr(self)
- }
-
- fn set_tsc_offset(&self, offset: u64) -> Result<()> {
- // Note: WHV_REGISTER_NAME_WHvX64RegisterTscVirtualOffset register appears to no longer be
- // supported, so we use the MSR path.
- set_tsc_offset_via_msr(self, offset)
- }
-
- fn set_tsc_value(&self, value: u64) -> Result<()> {
- set_tsc_value_via_msr(self, value)
- }
-
fn restore_timekeeping(&self, host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()> {
// Set the guest TSC such that it has the same TSC_OFFSET as it did at
// the moment it was snapshotted. This is required for virtio-pvclock
@@ -1338,10 +1274,8 @@
}
}
-fn get_msr_names(msrs: &[Register]) -> Vec<WHV_REGISTER_NAME> {
- msrs.iter()
- .filter_map(|reg| VALID_MSRS.get(®.id).copied())
- .collect::<Vec<WHV_REGISTER_NAME>>()
+fn get_msr_name(msr_index: u32) -> Option<WHV_REGISTER_NAME> {
+ VALID_MSRS.get(&msr_index).copied()
}
// run calls are tested with the integration tests since the full vcpu needs to be setup for it.
@@ -1503,15 +1437,14 @@
return;
}
- let mut xcrs = vcpu.get_xcrs().unwrap();
- xcrs[0].value = 1;
- vcpu.set_xcrs(&xcrs).unwrap();
- let xcrs2 = vcpu.get_xcrs().unwrap();
- assert_eq!(xcrs[0].value, xcrs2[0].value);
+ vcpu.set_xcr(0, 1).unwrap();
+ let xcrs = vcpu.get_xcrs().unwrap();
+ let xcr0 = xcrs.get(&0).unwrap();
+ assert_eq!(*xcr0, 1);
}
#[test]
- fn set_msrs() {
+ fn set_msr() {
if !Whpx::is_enabled() {
return;
}
@@ -1521,21 +1454,14 @@
let vm = new_vm(cpu_count, mem);
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
- let mut msrs = vec![Register {
- id: MSR_KERNEL_GS_BASE,
- value: 42,
- }];
- vcpu.set_msrs(&msrs).unwrap();
+ vcpu.set_msr(MSR_KERNEL_GS_BASE, 42).unwrap();
- msrs[0].value = 0;
- vcpu.get_msrs(&mut msrs).unwrap();
- assert_eq!(msrs.len(), 1);
- assert_eq!(msrs[0].id, MSR_KERNEL_GS_BASE);
- assert_eq!(msrs[0].value, 42);
+ let gs_base = vcpu.get_msr(MSR_KERNEL_GS_BASE).unwrap();
+ assert_eq!(gs_base, 42);
}
#[test]
- fn get_msrs() {
+ fn get_msr() {
if !Whpx::is_enabled() {
return;
}
@@ -1545,20 +1471,12 @@
let vm = new_vm(cpu_count, mem);
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
- let mut msrs = vec![
- // This one should succeed
- Register {
- id: MSR_TSC,
- ..Default::default()
- },
- // This one will fail to fetch
- Register {
- id: MSR_TSC + 1,
- ..Default::default()
- },
- ];
- vcpu.get_msrs(&mut msrs).unwrap();
- assert_eq!(msrs.len(), 1);
+ // This one should succeed
+ let _value = vcpu.get_msr(MSR_TSC).unwrap();
+
+ // This one will fail to fetch
+ vcpu.get_msr(MSR_TSC + 1)
+ .expect_err("invalid MSR index should fail");
}
#[test]
@@ -1598,22 +1516,18 @@
assert_eq!(sregs.cr0 & X86_CR0_PG, X86_CR0_PG);
assert_eq!(sregs.cr4 & X86_CR4_PAE, X86_CR4_PAE);
- let mut efer_reg = vec![Register {
- id: MSR_EFER,
- value: 0,
- }];
- vcpu.get_msrs(&mut efer_reg).expect("failed to get msrs");
- assert_eq!(efer_reg[0].value, EFER_LMA | EFER_LME);
+ let efer = vcpu.get_msr(MSR_EFER).expect("failed to get msr");
+ assert_eq!(efer, EFER_LMA | EFER_LME);
// Enable SCE via set_msrs
- efer_reg[0].value |= EFER_SCE;
- vcpu.set_msrs(&efer_reg).expect("failed to set msrs");
+ vcpu.set_msr(MSR_EFER, efer | EFER_SCE)
+ .expect("failed to set msr");
// Verify that setting stuck
let sregs = vcpu.get_sregs().expect("failed to get sregs");
assert_eq!(sregs.efer, EFER_SCE | EFER_LME | EFER_LMA);
- vcpu.get_msrs(&mut efer_reg).expect("failed to get msrs");
- assert_eq!(efer_reg[0].value, EFER_SCE | EFER_LME | EFER_LMA);
+ let new_efer = vcpu.get_msr(MSR_EFER).expect("failed to get msr");
+ assert_eq!(new_efer, EFER_SCE | EFER_LME | EFER_LMA);
}
#[test]
@@ -1665,7 +1579,7 @@
// Our MSR buffer is init'ed to zeros in the registers. The APIC base will be non-zero, so
// by asserting that we know the MSR fetch actually did get us data.
- let apic_base = all_msrs.iter().find(|reg| reg.id == MSR_APIC_BASE).unwrap();
- assert_ne!(apic_base.value, 0);
+ let apic_base = all_msrs.get(&MSR_APIC_BASE).unwrap();
+ assert_ne!(*apic_base, 0);
}
}
diff --git a/hypervisor/src/x86_64.rs b/hypervisor/src/x86_64.rs
index 20a99c3..fb8a774 100644
--- a/hypervisor/src/x86_64.rs
+++ b/hypervisor/src/x86_64.rs
@@ -5,9 +5,8 @@
use std::arch::x86_64::CpuidResult;
#[cfg(any(unix, feature = "haxm", feature = "whpx"))]
use std::arch::x86_64::__cpuid;
-#[cfg(any(unix, feature = "haxm", feature = "whpx"))]
use std::arch::x86_64::_rdtsc;
-use std::collections::HashMap;
+use std::collections::BTreeMap;
use std::collections::HashSet;
use anyhow::Context;
@@ -111,10 +110,10 @@
fn set_debugregs(&self, debugregs: &DebugRegs) -> Result<()>;
/// Gets the VCPU extended control registers.
- fn get_xcrs(&self) -> Result<Vec<Register>>;
+ fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>>;
- /// Sets the VCPU extended control registers.
- fn set_xcrs(&self, xcrs: &[Register]) -> Result<()>;
+ /// Sets a VCPU extended control register.
+ fn set_xcr(&self, xcr: u32, value: u64) -> Result<()>;
/// Gets the VCPU x87 FPU, MMX, XMM, YMM and MXCSR registers.
fn get_xsave(&self) -> Result<Xsave>;
@@ -130,15 +129,14 @@
/// snapshotting.
fn set_interrupt_state(&self, data: serde_json::Value) -> Result<()>;
- /// Gets the model-specific registers. `msrs` specifies the MSR indexes to be queried, and
- /// on success contains their indexes and values.
- fn get_msrs(&self, msrs: &mut Vec<Register>) -> Result<()>;
+ /// Gets a single model-specific register's value.
+ fn get_msr(&self, msr_index: u32) -> Result<u64>;
/// Gets the model-specific registers. Returns all the MSRs for the VCPU.
- fn get_all_msrs(&self) -> Result<Vec<Register>>;
+ fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>>;
- /// Sets the model-specific registers.
- fn set_msrs(&self, msrs: &[Register]) -> Result<()>;
+ /// Sets a single model-specific register's value.
+ fn set_msr(&self, msr_index: u32, value: u64) -> Result<()>;
/// Sets up the data returned by the CPUID instruction.
fn set_cpuid(&self, cpuid: &CpuId) -> Result<()>;
@@ -154,15 +152,62 @@
/// will then set the appropriate registers on the vcpu.
fn handle_cpuid(&mut self, entry: &CpuIdEntry) -> Result<()>;
- /// Get the guest->host TSC offset
- fn get_tsc_offset(&self) -> Result<u64>;
+ /// Gets the guest->host TSC offset.
+ ///
+ /// The default implementation uses [`VcpuX86_64::get_msr()`] to read the guest TSC.
+ fn get_tsc_offset(&self) -> Result<u64> {
+ // SAFETY:
+ // Safe because _rdtsc takes no arguments
+ let host_before_tsc = unsafe { _rdtsc() };
- /// Set the guest->host TSC offset
- fn set_tsc_offset(&self, offset: u64) -> Result<()>;
+ // get guest TSC value from our hypervisor
+ let guest_tsc = self.get_msr(crate::MSR_IA32_TSC)?;
+
+ // SAFETY:
+ // Safe because _rdtsc takes no arguments
+ let host_after_tsc = unsafe { _rdtsc() };
+
+ // Average the before and after host tsc to get the best value
+ let host_tsc = ((host_before_tsc as u128 + host_after_tsc as u128) / 2) as u64;
+
+ Ok(guest_tsc.wrapping_sub(host_tsc))
+ }
+
+ /// Sets the guest->host TSC offset.
+ ///
+ /// The default implementation uses [`VcpuX86_64::set_tsc_value()`] to set the TSC value.
+ ///
+ /// It sets TSC_OFFSET (VMCS / CB field) by setting the TSC MSR to the current
+ /// host TSC value plus the desired offset. We rely on the fact that hypervisors
+ /// determine the value of TSC_OFFSET by computing TSC_OFFSET = new_tsc_value
+ /// - _rdtsc() = _rdtsc() + offset - _rdtsc() ~= offset. Note that the ~= is
+ /// important: this is an approximate operation, because the two _rdtsc() calls
+ /// are separated by at least a few ticks.
+ ///
+ /// Note: TSC_OFFSET, host TSC, guest TSC, and TSC MSR are all different
+ /// concepts.
+ /// * When a guest executes rdtsc, the value (guest TSC) returned is host_tsc * TSC_MULTIPLIER +
+ /// TSC_OFFSET + TSC_ADJUST.
+ /// * The TSC MSR is a special MSR that when written to by the host, will cause TSC_OFFSET to be
+ /// set accordingly by the hypervisor.
+ /// * When the guest *writes* to TSC MSR, it actually changes the TSC_ADJUST MSR *for the
+ /// guest*. Generally this is only happens if the guest is trying to re-zero or synchronize
+ /// TSCs.
+ fn set_tsc_offset(&self, offset: u64) -> Result<()> {
+ // SAFETY: _rdtsc takes no arguments.
+ let host_tsc = unsafe { _rdtsc() };
+ self.set_tsc_value(host_tsc.wrapping_add(offset))
+ }
/// Sets the guest TSC exactly to the provided value.
- /// Required for snapshotting.
- fn set_tsc_value(&self, value: u64) -> Result<()>;
+ ///
+ /// The default implementation sets the guest's TSC by writing the value to the MSR directly.
+ ///
+ /// See [`VcpuX86_64::set_tsc_offset()`] for an explanation of how this value is actually read
+ /// by the guest after being set.
+ fn set_tsc_value(&self, value: u64) -> Result<()> {
+ self.set_msr(crate::MSR_IA32_TSC, value)
+ }
/// Some hypervisors require special handling to restore timekeeping when
/// a snapshot is restored. They are provided with a host TSC reference
@@ -213,19 +258,16 @@
self.set_regs(&snapshot.regs)?;
self.set_sregs(&snapshot.sregs)?;
self.set_debugregs(&snapshot.debug_regs)?;
- self.set_xcrs(&snapshot.xcrs)?;
-
- let mut msrs = HashMap::new();
- for reg in self.get_all_msrs()? {
- msrs.insert(reg.id, reg.value);
+ for (xcr_index, value) in &snapshot.xcrs {
+ self.set_xcr(*xcr_index, *value)?;
}
- for &msr in snapshot.msrs.iter() {
- if Some(&msr.value) == msrs.get(&msr.id) {
+ for (msr_index, value) in snapshot.msrs.iter() {
+ if self.get_msr(*msr_index) == Ok(*value) {
continue; // no need to set MSR since the values are the same.
}
- if let Err(e) = self.set_msrs(&[msr]) {
- if msr_allowlist.contains(&msr.id) {
+ if let Err(e) = self.set_msr(*msr_index, *value) {
+ if msr_allowlist.contains(msr_index) {
warn!(
"Failed to set MSR. MSR might not be supported in this kernel. Err: {}",
e
@@ -252,8 +294,8 @@
regs: Regs,
sregs: Sregs,
debug_regs: DebugRegs,
- xcrs: Vec<Register>,
- msrs: Vec<Register>,
+ xcrs: BTreeMap<u32, u64>,
+ msrs: BTreeMap<u32, u64>,
xsave: Xsave,
hypervisor_data: serde_json::Value,
tsc_offset: u64,
@@ -264,69 +306,6 @@
// TSC MSR
pub const MSR_IA32_TSC: u32 = 0x00000010;
-/// Implementation of get_tsc_offset that uses VcpuX86_64::get_msrs.
-#[cfg(any(unix, feature = "haxm", feature = "whpx"))]
-pub(crate) fn get_tsc_offset_from_msr(vcpu: &impl VcpuX86_64) -> Result<u64> {
- let mut regs = vec![Register {
- id: crate::MSR_IA32_TSC,
- value: 0,
- }];
-
- // SAFETY:
- // Safe because _rdtsc takes no arguments
- let host_before_tsc = unsafe { _rdtsc() };
-
- // get guest TSC value from our hypervisor
- vcpu.get_msrs(&mut regs)?;
-
- // SAFETY:
- // Safe because _rdtsc takes no arguments
- let host_after_tsc = unsafe { _rdtsc() };
-
- // Average the before and after host tsc to get the best value
- let host_tsc = ((host_before_tsc as u128 + host_after_tsc as u128) / 2) as u64;
-
- Ok(regs[0].value.wrapping_sub(host_tsc))
-}
-
-/// Implementation of set_tsc_offset that uses VcpuX86_64::get_msrs.
-///
-/// It sets TSC_OFFSET (VMCS / CB field) by setting the TSC MSR to the current
-/// host TSC value plus the desired offset. We rely on the fact that hypervisors
-/// determine the value of TSC_OFFSET by computing TSC_OFFSET = new_tsc_value
-/// - _rdtsc() = _rdtsc() + offset - _rdtsc() ~= offset. Note that the ~= is
-/// important: this is an approximate operation, because the two _rdtsc() calls
-/// are separated by at least a few ticks.
-///
-/// Note: TSC_OFFSET, host TSC, guest TSC, and TSC MSR are all different
-/// concepts.
-/// * When a guest executes rdtsc, the value (guest TSC) returned is host_tsc * TSC_MULTIPLIER +
-/// TSC_OFFSET + TSC_ADJUST.
-/// * The TSC MSR is a special MSR that when written to by the host, will cause TSC_OFFSET to be set
-/// accordingly by the hypervisor.
-/// * When the guest *writes* to TSC MSR, it actually changes the TSC_ADJUST MSR *for the guest*.
-/// Generally this is only happens if the guest is trying to re-zero or synchronize TSCs.
-#[cfg(any(unix, feature = "haxm", feature = "whpx"))]
-pub(crate) fn set_tsc_offset_via_msr(vcpu: &impl VcpuX86_64, offset: u64) -> Result<()> {
- // SAFETY: _rdtsc takes no arguments.
- let host_tsc = unsafe { _rdtsc() };
- set_tsc_value_via_msr(vcpu, host_tsc.wrapping_add(offset))
-}
-
-/// Sets the guest's TSC by writing the value to the MSR directly. See
-/// [`set_tsc_offset_via_msr`] for an explanation of how this value is actually
-/// read by the guest after being set.
-#[cfg(any(unix, feature = "haxm", feature = "whpx"))]
-pub(crate) fn set_tsc_value_via_msr(vcpu: &impl VcpuX86_64, value: u64) -> Result<()> {
- let regs = vec![Register {
- id: crate::MSR_IA32_TSC,
- value,
- }];
-
- // set guest TSC value from our hypervisor
- vcpu.set_msrs(®s)
-}
-
/// Gets host cpu max physical address bits.
#[cfg(any(unix, feature = "haxm", feature = "whpx"))]
pub(crate) fn host_phys_addr_bits() -> u8 {
@@ -355,7 +334,7 @@
pub fpu: Fpu,
/// Machine-specific registers.
- pub msrs: Vec<Register>,
+ pub msrs: BTreeMap<u32, u64>,
}
/// Hold the CPU feature configurations that are needed to setup a vCPU.
@@ -1002,13 +981,6 @@
pub dr7: u64,
}
-/// State of one VCPU register. Currently used for MSRs and XCRs.
-#[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)]
-pub struct Register {
- pub id: u32,
- pub value: u64,
-}
-
/// The hybrid type for intel hybrid CPU.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum CpuHybridType {
diff --git a/hypervisor/tests/hypervisor_virtualization.rs b/hypervisor/tests/hypervisor_virtualization.rs
new file mode 100644
index 0000000..3a1440a
--- /dev/null
+++ b/hypervisor/tests/hypervisor_virtualization.rs
@@ -0,0 +1,111 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#![cfg(target_arch = "x86_64")]
+#![cfg(any(feature = "whpx", feature = "gvm", feature = "haxm", unix))]
+
+use hypervisor::*;
+use vm_memory::GuestAddress;
+use vm_memory::GuestMemory;
+
+#[test]
+#[cfg(any(target_os = "android", target_os = "linux"))]
+fn test_kvm_minimal_virtualization() {
+ use hypervisor::kvm::*;
+ test_minimal_virtualization(|guest_mem| {
+ let kvm = Kvm::new().expect("failed to create kvm");
+ let vm = KvmVm::new(&kvm, guest_mem, Default::default()).expect("failed to create vm");
+ (kvm, vm)
+ });
+}
+
+#[test]
+#[cfg(all(windows, feature = "haxm"))]
+fn test_haxm_minimal_virtualization() {
+ use hypervisor::haxm::*;
+ test_minimal_virtualization(|guest_mem| {
+ let haxm = Haxm::new().expect("failed to create haxm");
+ let vm = HaxmVm::new(&haxm, guest_mem).expect("failed to create vm");
+ (haxm, vm)
+ });
+}
+
+#[test]
+#[cfg(feature = "gvm")]
+fn test_gvm_minimal_virtualization() {
+ use hypervisor::gvm::*;
+ test_minimal_virtualization(|guest_mem| {
+ let gvm = Gvm::new().expect("failed to create gvm");
+ let vm = GvmVm::new(&gvm, guest_mem).expect("failed to create vm");
+ (gvm, vm)
+ });
+}
+
+#[test]
+#[cfg(all(windows, feature = "whpx"))]
+fn test_whpx_minimal_virtualization() {
+ use hypervisor::whpx::*;
+ test_minimal_virtualization(|guest_mem| {
+ let whpx = Whpx::new().expect("failed to create whpx");
+ let vm = WhpxVm::new(&whpx, 1, guest_mem, CpuId::new(0), false, None)
+ .expect("failed to create vm");
+ (whpx, vm)
+ });
+}
+
+// This runs a minimal program under virtualization.
+// It should require only the ability to execute instructions under virtualization, physical
+// memory, the ability to get and set some guest VM registers, and intercepting HLT.
+fn test_minimal_virtualization<CreateVm, HypervisorT, VmT>(create_vm: CreateVm)
+where
+ CreateVm: FnOnce(GuestMemory) -> (HypervisorT, VmT),
+ HypervisorT: Hypervisor,
+ VmT: VmX86_64,
+{
+ /*
+ 0x0000000000000002: 01 D8 add ax, bx
+ 0x0000000000000002: F4 hlt
+ */
+
+ let code: [u8; 3] = [0x01, 0xd8, 0xf4];
+ let mem_size = 0x2000;
+ let load_addr = GuestAddress(0x1000);
+
+ let guest_mem =
+ GuestMemory::new(&[(GuestAddress(0), mem_size)]).expect("failed to create guest mem");
+ guest_mem
+ .write_at_addr(&code[..], load_addr)
+ .expect("failed to write to guest memory");
+
+ let (_, vm) = create_vm(guest_mem);
+ let mut vcpu = vm.create_vcpu(0).expect("new vcpu failed");
+ let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
+ vcpu_sregs.cs.base = 0;
+ vcpu_sregs.cs.selector = 0;
+
+ vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
+
+ let vcpu_regs = Regs {
+ rip: load_addr.offset(),
+ rflags: 2,
+ rax: 1,
+ rbx: 2,
+ ..Default::default()
+ };
+ vcpu.set_regs(&vcpu_regs).expect("set regs failed");
+
+ loop {
+ match vcpu.run().expect("run failed") {
+ VcpuExit::Hlt => {
+ break;
+ }
+ // Continue on external interrupt or signal
+ VcpuExit::Intr => continue,
+ r => panic!("unexpected exit reason: {:?}", r),
+ }
+ }
+
+ let regs: Regs = vcpu.get_regs().expect("failed to get regs");
+ assert_eq!(regs.rax, 3);
+}
diff --git a/hypervisor/tests/kvm/x86_64.rs b/hypervisor/tests/kvm/x86_64.rs
index c83d5de..b8b4515 100644
--- a/hypervisor/tests/kvm/x86_64.rs
+++ b/hypervisor/tests/kvm/x86_64.rs
@@ -25,7 +25,6 @@
use hypervisor::PitRWMode;
use hypervisor::PitRWState;
use hypervisor::PitState;
-use hypervisor::Register;
use hypervisor::TriggerMode;
use hypervisor::Vm;
use hypervisor::VmCap;
@@ -365,68 +364,51 @@
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
- let mut xcrs = vcpu.get_xcrs().unwrap();
- xcrs[0].value = 1;
- vcpu.set_xcrs(&xcrs).unwrap();
- let xcrs2 = vcpu.get_xcrs().unwrap();
- assert_eq!(xcrs[0].value, xcrs2[0].value);
+ vcpu.set_xcr(0, 1).unwrap();
+ let xcrs = vcpu.get_xcrs().unwrap();
+ let xcr0 = xcrs.get(&0).unwrap();
+ assert_eq!(*xcr0, 1);
}
#[test]
-fn get_msrs() {
+fn get_msr() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
- let mut msrs = vec![
- // This one should succeed
- Register {
- id: 0x0000011e,
- ..Default::default()
- },
- // This one will fail to fetch
- Register {
- id: 0xffffffff,
- ..Default::default()
- },
- ];
- vcpu.get_msrs(&mut msrs).unwrap();
- assert_eq!(msrs.len(), 1);
+
+ // This one should succeed
+ let _value = vcpu.get_msr(0x0000011e).unwrap();
+
+ // This one will fail to fetch
+ vcpu.get_msr(0xffffffff)
+ .expect_err("invalid MSR index should fail");
}
#[test]
-fn set_msrs() {
+fn set_msr() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
const MSR_TSC_AUX: u32 = 0xc0000103;
- let mut msrs = vec![Register {
- id: MSR_TSC_AUX,
- value: 42,
- }];
- vcpu.set_msrs(&msrs).unwrap();
-
- msrs[0].value = 0;
- vcpu.get_msrs(&mut msrs).unwrap();
- assert_eq!(msrs.len(), 1);
- assert_eq!(msrs[0].id, MSR_TSC_AUX);
- assert_eq!(msrs[0].value, 42);
+ vcpu.set_msr(MSR_TSC_AUX, 42).unwrap();
+ let msr_tsc_aux = vcpu.get_msr(MSR_TSC_AUX).unwrap();
+ assert_eq!(msr_tsc_aux, 42);
}
#[test]
-fn set_msrs_unsupported() {
+fn set_msr_unsupported() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
- let msrs = vec![Register {
- id: u32::MAX,
- value: u64::MAX,
- }];
- assert_eq!(vcpu.set_msrs(&msrs), Err(base::Error::new(libc::EPERM)));
+ assert_eq!(
+ vcpu.set_msr(u32::MAX, u64::MAX),
+ Err(base::Error::new(libc::EPERM))
+ );
}
#[test]
diff --git a/hypervisor/tests/tsc_offsets.rs b/hypervisor/tests/tsc_offsets.rs
index f270486..d35e327 100644
--- a/hypervisor/tests/tsc_offsets.rs
+++ b/hypervisor/tests/tsc_offsets.rs
@@ -177,11 +177,8 @@
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
if let Some(value) = set_msr {
- vcpu.set_msrs(&[Register {
- id: 0x00000010,
- value,
- }])
- .expect("set_msrs should not fail");
+ vcpu.set_msr(0x00000010, value)
+ .expect("set_msr should not fail");
}
if let Some(offset) = set_offset {
diff --git a/infra/README.recipes.md b/infra/README.recipes.md
index 948c1a7..d85ef15 100644
--- a/infra/README.recipes.md
+++ b/infra/README.recipes.md
@@ -174,19 +174,19 @@
— **def [RunSteps](/infra/recipes/update_chromeos_merges.py#14)(api):**
-[depot_tools/recipe_modules/bot_update]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/29e08c1737507596dbc222b74a274b53137a23e3/recipes/README.recipes.md#recipe_modules-bot_update
-[depot_tools/recipe_modules/depot_tools]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/29e08c1737507596dbc222b74a274b53137a23e3/recipes/README.recipes.md#recipe_modules-depot_tools
-[depot_tools/recipe_modules/gclient]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/29e08c1737507596dbc222b74a274b53137a23e3/recipes/README.recipes.md#recipe_modules-gclient
-[depot_tools/recipe_modules/git]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/29e08c1737507596dbc222b74a274b53137a23e3/recipes/README.recipes.md#recipe_modules-git
-[depot_tools/recipe_modules/gsutil]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/29e08c1737507596dbc222b74a274b53137a23e3/recipes/README.recipes.md#recipe_modules-gsutil
-[recipe_engine/recipe_modules/buildbucket]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-buildbucket
-[recipe_engine/recipe_modules/cipd]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-cipd
-[recipe_engine/recipe_modules/context]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-context
-[recipe_engine/recipe_modules/file]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-file
-[recipe_engine/recipe_modules/json]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-json
-[recipe_engine/recipe_modules/path]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-path
-[recipe_engine/recipe_modules/platform]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-platform
-[recipe_engine/recipe_modules/properties]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-properties
-[recipe_engine/recipe_modules/raw_io]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-raw_io
-[recipe_engine/recipe_modules/step]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/README.recipes.md#recipe_modules-step
-[recipe_engine/wkt/RecipeApi]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/0010a25ddf783692836331ed6461ff39ffa1ef71/recipe_engine/recipe_api.py#473
+[depot_tools/recipe_modules/bot_update]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/5a584405cae6e1fd08a6054ce9ba61b645b4d591/recipes/README.recipes.md#recipe_modules-bot_update
+[depot_tools/recipe_modules/depot_tools]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/5a584405cae6e1fd08a6054ce9ba61b645b4d591/recipes/README.recipes.md#recipe_modules-depot_tools
+[depot_tools/recipe_modules/gclient]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/5a584405cae6e1fd08a6054ce9ba61b645b4d591/recipes/README.recipes.md#recipe_modules-gclient
+[depot_tools/recipe_modules/git]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/5a584405cae6e1fd08a6054ce9ba61b645b4d591/recipes/README.recipes.md#recipe_modules-git
+[depot_tools/recipe_modules/gsutil]: https://chromium.googlesource.com/chromium/tools/depot_tools.git/+/5a584405cae6e1fd08a6054ce9ba61b645b4d591/recipes/README.recipes.md#recipe_modules-gsutil
+[recipe_engine/recipe_modules/buildbucket]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-buildbucket
+[recipe_engine/recipe_modules/cipd]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-cipd
+[recipe_engine/recipe_modules/context]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-context
+[recipe_engine/recipe_modules/file]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-file
+[recipe_engine/recipe_modules/json]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-json
+[recipe_engine/recipe_modules/path]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-path
+[recipe_engine/recipe_modules/platform]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-platform
+[recipe_engine/recipe_modules/properties]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-properties
+[recipe_engine/recipe_modules/raw_io]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-raw_io
+[recipe_engine/recipe_modules/step]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/README.recipes.md#recipe_modules-step
+[recipe_engine/wkt/RecipeApi]: https://chromium.googlesource.com/infra/luci/recipes-py.git/+/f967fa68fd4bd3e76140de7ffa449fa3a45a72f4/recipe_engine/recipe_api.py#471
diff --git a/infra/config/generated/commit-queue.cfg b/infra/config/generated/commit-queue.cfg
index f867d10..9f5a89f 100644
--- a/infra/config/generated/commit-queue.cfg
+++ b/infra/config/generated/commit-queue.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see Config message:
-# https://luci-config.appspot.com/schemas/projects:commit-queue.cfg
+# https://config.luci.app/schemas/projects:commit-queue.cfg
cq_status_host: "chromium-cq-status.appspot.com"
config_groups {
@@ -18,7 +18,6 @@
gerrit_cq_ability {
committer_list: "googlers"
committer_list: "project-crosvm-committers"
- allow_submit_with_open_deps: true
}
tryjob {
builders {
diff --git a/infra/config/generated/cr-buildbucket.cfg b/infra/config/generated/cr-buildbucket.cfg
index 359a39e..b3670dd 100644
--- a/infra/config/generated/cr-buildbucket.cfg
+++ b/infra/config/generated/cr-buildbucket.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see BuildbucketCfg message:
-# https://luci-config.appspot.com/schemas/projects:buildbucket.cfg
+# https://config.luci.app/schemas/projects:buildbucket.cfg
buckets {
name: "ci"
diff --git a/infra/config/generated/luci-logdog.cfg b/infra/config/generated/luci-logdog.cfg
index 6f24f44..20993b6 100644
--- a/infra/config/generated/luci-logdog.cfg
+++ b/infra/config/generated/luci-logdog.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectConfig message:
-# https://luci-config.appspot.com/schemas/projects:luci-logdog.cfg
+# https://config.luci.app/schemas/projects:luci-logdog.cfg
reader_auth_groups: "all"
writer_auth_groups: "luci-logdog-chromium-writers"
diff --git a/infra/config/generated/luci-milo.cfg b/infra/config/generated/luci-milo.cfg
index 205104f..69cd44c 100644
--- a/infra/config/generated/luci-milo.cfg
+++ b/infra/config/generated/luci-milo.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see Project message:
-# https://luci-config.appspot.com/schemas/projects:luci-milo.cfg
+# https://config.luci.app/schemas/projects:luci-milo.cfg
consoles {
id: "Postsubmit"
diff --git a/infra/config/generated/luci-notify.cfg b/infra/config/generated/luci-notify.cfg
index 3c61e5f..435cd58 100644
--- a/infra/config/generated/luci-notify.cfg
+++ b/infra/config/generated/luci-notify.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectConfig message:
-# https://luci-config.appspot.com/schemas/projects:luci-notify.cfg
+# https://config.luci.app/schemas/projects:luci-notify.cfg
notifiers {
notifications {
diff --git a/infra/config/generated/luci-scheduler.cfg b/infra/config/generated/luci-scheduler.cfg
index 2c0819f..24079c0 100644
--- a/infra/config/generated/luci-scheduler.cfg
+++ b/infra/config/generated/luci-scheduler.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectConfig message:
-# https://luci-config.appspot.com/schemas/projects:luci-scheduler.cfg
+# https://config.luci.app/schemas/projects:luci-scheduler.cfg
job {
id: "build_docs"
diff --git a/infra/config/generated/project.cfg b/infra/config/generated/project.cfg
index 1a83d97..d7c8046 100644
--- a/infra/config/generated/project.cfg
+++ b/infra/config/generated/project.cfg
@@ -2,12 +2,12 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectCfg message:
-# https://luci-config.appspot.com/schemas/projects:project.cfg
+# https://config.luci.app/schemas/projects:project.cfg
name: "crosvm"
access: "group:all"
lucicfg {
- version: "1.39.15"
+ version: "1.43.5"
package_dir: ".."
config_dir: "generated"
entry_point: "main.star"
diff --git a/infra/config/generated/realms.cfg b/infra/config/generated/realms.cfg
index 728e440..ad6a1a9 100644
--- a/infra/config/generated/realms.cfg
+++ b/infra/config/generated/realms.cfg
@@ -2,11 +2,15 @@
# Do not modify manually.
#
# For the schema of this file, see RealmsCfg message:
-# https://luci-config.appspot.com/schemas/projects:realms.cfg
+# https://config.luci.app/schemas/projects:realms.cfg
realms {
name: "@root"
bindings {
+ role: "role/buildbucket.owner"
+ principals: "group:mdb/crosvm-acl-luci-admin"
+ }
+ bindings {
role: "role/buildbucket.reader"
principals: "group:all"
}
diff --git a/infra/config/main.star b/infra/config/main.star
index 1e6416c..fa6f316 100755
--- a/infra/config/main.star
+++ b/infra/config/main.star
@@ -28,6 +28,7 @@
"role/swarming.poolOwner",
"role/swarming.poolUser",
"role/swarming.taskTriggerer",
+ "role/buildbucket.owner",
],
groups = "mdb/crosvm-acl-luci-admin",
),
@@ -123,8 +124,6 @@
repo = "https://chromium.googlesource.com/crosvm/crosvm",
refs = ["refs/heads/.+"], # will watch all branches
),
- # Allows us to submit chains of commits with a single CQ run.
- allow_submit_with_open_deps = True,
)
# Console showing all postsubmit verify builders
diff --git a/infra/config/recipes.cfg b/infra/config/recipes.cfg
index c86e671..ec7d374 100644
--- a/infra/config/recipes.cfg
+++ b/infra/config/recipes.cfg
@@ -20,12 +20,12 @@
"deps": {
"depot_tools": {
"branch": "refs/heads/main",
- "revision": "29e08c1737507596dbc222b74a274b53137a23e3",
+ "revision": "5a584405cae6e1fd08a6054ce9ba61b645b4d591",
"url": "https://chromium.googlesource.com/chromium/tools/depot_tools.git"
},
"recipe_engine": {
"branch": "refs/heads/main",
- "revision": "0010a25ddf783692836331ed6461ff39ffa1ef71",
+ "revision": "f967fa68fd4bd3e76140de7ffa449fa3a45a72f4",
"url": "https://chromium.googlesource.com/infra/luci/recipes-py.git"
}
},
diff --git a/io_uring/Android.bp b/io_uring/Android.bp
index 9786b34..dc4efe1 100644
--- a/io_uring/Android.bp
+++ b/io_uring/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/jail/Android.bp b/jail/Android.bp
index 765f027..dd72dcb 100644
--- a/jail/Android.bp
+++ b/jail/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/jail/seccomp/aarch64/xhci_device.policy b/jail/seccomp/aarch64/xhci_device.policy
index b61e29c..c70a123 100644
--- a/jail/seccomp/aarch64/xhci_device.policy
+++ b/jail/seccomp/aarch64/xhci_device.policy
@@ -11,6 +11,7 @@
faccessat2: 1
newfstatat: 1
timerfd_create: 1
+timerfd_settime: 1
getsockname: 1
openat: 1
setsockopt: 1
@@ -34,7 +35,9 @@
# 0x8008551c == USBDEVFS_ALLOC_STREAMS
# 0x8008551d == USBDEVFS_FREE_STREAMS
# 0x80085504 == USBDEVFS_SETINTERFACE
-ioctl: arg1 == 0xc0185500 || arg1 == 0x8038550a || arg1 == 0x8004551a || arg1 == 0x4008550d || arg1 == 0x8004550f || arg1 == 0x80045510 || arg1 == 0x80045515 || arg1 == 0x550b || arg1 == 0x5514 || arg1 == 0x80045505 || arg1 == 0x8108551b || arg1 == 0x40085511 || arg1 == 0x80185520 || arg1 == 0x551f || arg1 == 0x8008551c || arg1 == 0x8008551d || arg1 == 0x80085504
+# 0x80044801 == HIDIOCGRDESCSIZE
+# 0x90044802 == HIDIOCGRDESC
+ioctl: arg1 == 0xc0185500 || arg1 == 0x8038550a || arg1 == 0x8004551a || arg1 == 0x4008550d || arg1 == 0x8004550f || arg1 == 0x80045510 || arg1 == 0x80045515 || arg1 == 0x550b || arg1 == 0x5514 || arg1 == 0x80045505 || arg1 == 0x8108551b || arg1 == 0x40085511 || arg1 == 0x80185520 || arg1 == 0x551f || arg1 == 0x8008551c || arg1 == 0x8008551d || arg1 == 0x80085504 || arg1 == 0x80044801 || arg1 == 0x90044802
fstat: 1
getrandom: 1
prctl: arg0 == PR_SET_NAME
diff --git a/jail/seccomp/arm/xhci_device.policy b/jail/seccomp/arm/xhci_device.policy
index e782f03..51080d3 100644
--- a/jail/seccomp/arm/xhci_device.policy
+++ b/jail/seccomp/arm/xhci_device.policy
@@ -12,6 +12,7 @@
faccessat: 1
faccessat2: 1
timerfd_create: 1
+timerfd_settime: 1
getsockname: 1
pipe: 1
setsockopt: 1
@@ -37,7 +38,9 @@
# 0x8008551c == USBDEVFS_ALLOC_STREAMS
# 0x8008551d == USBDEVFS_FREE_STREAMS
# 0x80085504 == USBDEVFS_SETINTERFACE
-ioctl: arg1 == 0xc0105500 || arg1 == 0x802c550a || arg1 == 0x8004551a || arg1 == 0x4004550d || arg1 == 0x8004550f || arg1 == 0x80045510 || arg1 == 0x80045515 || arg1 == 0x550b || arg1 == 0x5514 || arg1 == 0x80045505 || arg1 == 0x8108551b || arg1 == 0x40085511 || arg1 == 0x80185520 || arg1 == 0x551f || arg1 == 0x8008551c || arg1 == 0x8008551d || arg1 == 0x80085504
+# 0x80044801 == HIDIOCGRDESCSIZE
+# 0x90044802 == HIDIOCGRDESC
+ioctl: arg1 == 0xc0105500 || arg1 == 0x802c550a || arg1 == 0x8004551a || arg1 == 0x4004550d || arg1 == 0x8004550f || arg1 == 0x80045510 || arg1 == 0x80045515 || arg1 == 0x550b || arg1 == 0x5514 || arg1 == 0x80045505 || arg1 == 0x8108551b || arg1 == 0x40085511 || arg1 == 0x80185520 || arg1 == 0x551f || arg1 == 0x8008551c || arg1 == 0x8008551d || arg1 == 0x80085504 || arg1 == 0x80044801 || arg1 == 0x90044802
fstat: 1
fstat64: 1
fstatat64: 1
diff --git a/jail/seccomp/x86_64/gpu_device.policy b/jail/seccomp/x86_64/gpu_device.policy
index 57a31c6..343949b 100644
--- a/jail/seccomp/x86_64/gpu_device.policy
+++ b/jail/seccomp/x86_64/gpu_device.policy
@@ -4,5 +4,5 @@
@include /usr/share/policy/crosvm/gpu_common.policy
-socket: arg0 == AF_UNIX && arg1 == SOCK_STREAM|SOCK_CLOEXEC && arg2 == 0
+socket: arg0 == AF_UNIX && arg1 in SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK && arg2 == 0
clone: arg0 & CLONE_THREAD
diff --git a/jail/seccomp/x86_64/xhci_device.policy b/jail/seccomp/x86_64/xhci_device.policy
index fdba71b..f727442 100644
--- a/jail/seccomp/x86_64/xhci_device.policy
+++ b/jail/seccomp/x86_64/xhci_device.policy
@@ -6,6 +6,7 @@
lstat: 1
timerfd_create: 1
+timerfd_settime: 1
name_to_handle_at: 1
access: 1
faccessat: 1
@@ -38,7 +39,9 @@
# 0x8008551c == USBDEVFS_ALLOC_STREAMS
# 0x8008551d == USBDEVFS_FREE_STREAMS
# 0x80085504 == USBDEVFS_SETINTERFACE
-ioctl: arg1 == 0xc0185500 || arg1 == 0x41045508 || arg1 == 0x8004550f || arg1 == 0x4008550d || arg1 == 0x8004551a || arg1 == 0x550b || arg1 == 0x80045510 || arg1 == 0x80045515 || arg1 == 0x8038550a || arg1 == 0x5514 || arg1 == 0x80045505 || arg1 == 0x8108551b || arg1 == 0x40085511 || arg1 == 0x80185520 || arg1 == 0x551f || arg1 == 0x8008551c || arg1 == 0x8008551d || arg1 == 0x80085504
+# 0x80044801 == HIDIOCGRDESCSIZE
+# 0x90044802 == HIDIOCGRDESC
+ioctl: arg1 == 0xc0185500 || arg1 == 0x41045508 || arg1 == 0x8004550f || arg1 == 0x4008550d || arg1 == 0x8004551a || arg1 == 0x550b || arg1 == 0x80045510 || arg1 == 0x80045515 || arg1 == 0x8038550a || arg1 == 0x5514 || arg1 == 0x80045505 || arg1 == 0x8108551b || arg1 == 0x40085511 || arg1 == 0x80185520 || arg1 == 0x551f || arg1 == 0x8008551c || arg1 == 0x8008551d || arg1 == 0x80085504 || arg1 == 0x80044801 || arg1 == 0x90044802
fstat: 1
newfstatat: 1
getrandom: 1
diff --git a/jail/src/helpers.rs b/jail/src/helpers.rs
index 6ec0853..29ee6b4 100644
--- a/jail/src/helpers.rs
+++ b/jail/src/helpers.rs
@@ -383,6 +383,7 @@
"/lib64",
"/usr/share/drirc.d",
"/usr/share/glvnd",
+ "/usr/share/libdrm",
"/usr/share/vulkan",
],
)?;
diff --git a/kernel_cmdline/Android.bp b/kernel_cmdline/Android.bp
index eb80e58..7fcb237 100644
--- a/kernel_cmdline/Android.bp
+++ b/kernel_cmdline/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/kernel_loader/Android.bp b/kernel_loader/Android.bp
index 0499835..d3fc0df 100644
--- a/kernel_loader/Android.bp
+++ b/kernel_loader/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/kvm/Android.bp b/kvm/Android.bp
index 4d2ad43..d87e60f 100644
--- a/kvm/Android.bp
+++ b/kvm/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/kvm_sys/Android.bp b/kvm_sys/Android.bp
index a0d5357..e693e6b 100644
--- a/kvm_sys/Android.bp
+++ b/kvm_sys/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/libcras_stub/Android.bp b/libcras_stub/Android.bp
index 3f8f7d4..dd00bda 100644
--- a/libcras_stub/Android.bp
+++ b/libcras_stub/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/linux_input_sys/Android.bp b/linux_input_sys/Android.bp
index 3e8284e..44ee82c 100644
--- a/linux_input_sys/Android.bp
+++ b/linux_input_sys/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/metrics/Android.bp b/metrics/Android.bp
index 4fae533..275ec22 100644
--- a/metrics/Android.bp
+++ b/metrics/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -24,10 +25,12 @@
"libanyhow",
"libbase_rust",
"libcfg_if",
+ "libmetrics_events",
"libmetrics_generic",
"libserde",
"libsync_rust",
],
+ aliases: ["metrics_generic:metrics_product"],
}
rust_test {
@@ -48,8 +51,10 @@
"libanyhow",
"libbase_rust",
"libcfg_if",
+ "libmetrics_events",
"libmetrics_generic",
"libserde",
"libsync_rust",
],
+ aliases: ["metrics_generic:metrics_product"],
}
diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml
index 06e5018..078509c 100644
--- a/metrics/Cargo.toml
+++ b/metrics/Cargo.toml
@@ -10,10 +10,9 @@
cfg-if = "*"
serde = { version = "1", features = ["derive"] }
sync = { path = "../common/sync" }
-# ANDROID: b/329312851 - Switch back to metrics_product when that bug is fixed.
-# metrics_product = { path = "../vendor/generic/metrics", package = "metrics_generic" }
-metrics_generic = { path = "../vendor/generic/metrics", package = "metrics_generic" }
+metrics_events = { path = "../metrics_events" }
+metrics_product = { path = "../vendor/generic/metrics", package = "metrics_generic" }
[target.'cfg(windows)'.dependencies]
-chrono = { version = "0.4.19", default-features = false }
+chrono = { version = "0.4.34", default-features = false, features = ["now"] }
winapi = { version = "*" }
diff --git a/metrics/src/controller.rs b/metrics/src/controller.rs
index c3eb6fd..ac64e92 100644
--- a/metrics/src/controller.rs
+++ b/metrics/src/controller.rs
@@ -6,16 +6,14 @@
use anyhow::Result;
use base::info;
-use base::warn;
use base::EventToken;
-use base::Tube;
+use base::RecvTube;
-use crate::MetricsRequest;
use crate::RequestHandler;
/// Runs the metrics controller.
pub struct MetricsController {
- pub(crate) agents: Vec<Tube>,
+ pub(crate) agents: Vec<RecvTube>,
handler: RequestHandler,
pub(crate) closed_tubes: usize,
}
@@ -30,7 +28,7 @@
}
impl MetricsController {
- pub fn new(agents: Vec<Tube>) -> Self {
+ pub fn new(agents: Vec<RecvTube>) -> Self {
Self {
agents,
handler: RequestHandler::new(),
@@ -46,13 +44,8 @@
}
/// Handles a tube that has indicated it has data ready to read.
- pub(crate) fn on_tube_readable(&self, client: &Tube) {
- match client.recv::<MetricsRequest>() {
- Ok(req) => self.handler.handle_request(req),
- Err(e) => {
- warn!("unexpected error receiving agent metrics request: {}", e)
- }
- }
+ pub(crate) fn on_tube_readable(&self, client: &RecvTube) {
+ self.handler.handle_tube_readable(client)
}
/// Handles a closed connection, and returns a bool indicating
diff --git a/metrics/src/lib.rs b/metrics/src/lib.rs
index ccedde4..a996d82 100644
--- a/metrics/src/lib.rs
+++ b/metrics/src/lib.rs
@@ -16,11 +16,8 @@
pub mod sys;
pub use controller::MetricsController;
-// ANDROID: b/329312851 - Switch back to metrics_product when that bug is fixed.
-// pub use metrics_product::MetricEventType;
-// pub use metrics_product::*;
-pub use metrics_generic::MetricEventType;
-pub use metrics_generic::*;
+pub use metrics_events::MetricEventType;
+pub use metrics_product::*;
pub type RequestHandler = MetricsRequestHandler;
diff --git a/metrics/src/sys.rs b/metrics/src/sys.rs
index e0696cf..64f09e4 100644
--- a/metrics/src/sys.rs
+++ b/metrics/src/sys.rs
@@ -6,6 +6,7 @@
if #[cfg(windows)] {
pub mod windows;
pub use windows::*;
+ pub use metrics_events::sys::windows::*;
} else if #[cfg(any(target_os = "android", target_os = "linux"))] {
pub(crate) mod linux;
}
diff --git a/metrics/src/sys/windows/system_metrics.rs b/metrics/src/sys/windows/system_metrics.rs
index 46402e1..1f3572d 100644
--- a/metrics/src/sys/windows/system_metrics.rs
+++ b/metrics/src/sys/windows/system_metrics.rs
@@ -22,7 +22,7 @@
use base::SafeDescriptor;
use base::WaitContext;
use chrono::DateTime;
-use chrono::Local;
+use chrono::Utc;
use winapi::shared::minwindef::DWORD;
use winapi::shared::minwindef::FILETIME;
use winapi::um::processthreadsapi::GetProcessTimes;
@@ -224,15 +224,15 @@
let updated_io = match *io_record {
Some(io) => ProcessIoRecord {
current: new_io,
- current_time: Local::now(),
+ current_time: Utc::now(),
last_upload: io.last_upload,
last_upload_time: io.last_upload_time,
},
None => ProcessIoRecord {
current: new_io,
- current_time: Local::now(),
+ current_time: Utc::now(),
last_upload: new_io,
- last_upload_time: Local::now(),
+ last_upload_time: Utc::now(),
},
};
*io_record = Some(updated_io);
@@ -530,9 +530,9 @@
#[derive(Copy, Clone)]
struct ProcessIoRecord {
current: ProcessIo,
- current_time: DateTime<Local>,
+ current_time: DateTime<Utc>,
last_upload: ProcessIo,
- last_upload_time: DateTime<Local>,
+ last_upload_time: DateTime<Utc>,
}
#[derive(Copy, Clone)]
diff --git a/metrics_events/Android.bp b/metrics_events/Android.bp
new file mode 100644
index 0000000..87a087e
--- /dev/null
+++ b/metrics_events/Android.bp
@@ -0,0 +1,26 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
+// Content before the first "rust_*" or "genrule" module is preserved.
+
+package {
+ default_applicable_licenses: ["external_crosvm_license"],
+}
+
+rust_library {
+ name: "libmetrics_events",
+ defaults: ["crosvm_inner_defaults"],
+ host_supported: true,
+ crate_name: "metrics_events",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.1.0",
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ rustlibs: [
+ "libanyhow",
+ "libcfg_if",
+ "libmetrics_events_generic",
+ "libserde",
+ ],
+ aliases: ["metrics_events_generic:metrics_events_product"],
+}
diff --git a/metrics_events/Cargo.toml b/metrics_events/Cargo.toml
new file mode 100644
index 0000000..498b09a
--- /dev/null
+++ b/metrics_events/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "metrics_events"
+version = "0.1.0"
+authors = ["The ChromiumOS Authors"]
+edition = "2021"
+
+[dependencies]
+anyhow = "*"
+cfg-if = "*"
+serde = { version = "1", features = ["derive"] }
+metrics_events_product = { path = "../vendor/generic/metrics_events", package = "metrics_events_generic" }
+
+[target.'cfg(windows)'.dependencies]
+win_util = { path = "../win_util" }
diff --git a/metrics_events/src/event_types.rs b/metrics_events/src/event_types.rs
new file mode 100644
index 0000000..f1fde88
--- /dev/null
+++ b/metrics_events/src/event_types.rs
@@ -0,0 +1,54 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use metrics_events_product::MetricEventType as VendorMetricEventType;
+use serde::Deserialize;
+use serde::Serialize;
+
+#[cfg(windows)]
+use crate::sys::windows::WaveFormatDetails;
+
+// TODO(mikehoyle): Create a way to generate these directly from the
+// proto for a single source-of-truth.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub enum MetricEventType {
+ CpuUsage,
+ MemoryUsage,
+ Fps,
+ JankyFps,
+ NetworkTxRate,
+ NetworkRxRate,
+ Interrupts,
+ FrameTime,
+ EmulatorGraphicsFreeze,
+ EmulatorGraphicsUnfreeze,
+ EmulatorGfxstreamVkAbortReason,
+ ChildProcessExit {
+ exit_code: u32,
+ #[cfg(windows)]
+ process_type: win_util::ProcessType,
+ },
+ ReadIo,
+ WriteIo,
+ #[cfg(windows)]
+ AudioFormatRequestOk(WaveFormatDetails),
+ #[cfg(windows)]
+ AudioFormatModifiedOk(WaveFormatDetails),
+ #[cfg(windows)]
+ AudioFormatFailed(WaveFormatDetails),
+ TscCoresOutOfSync,
+ NetworkTxRateSummarized,
+ NetworkRxRateSummarized,
+ DllLoaded(String),
+ GraphicsHangRenderThread,
+ GraphicsHangSyncThread,
+ AudioNoopStreamForced,
+ AudioPlaybackError,
+ RtcWakeup,
+ VirtioWakeup {
+ virtio_id: u32,
+ },
+ Other(i64),
+ Vendor(VendorMetricEventType),
+}
diff --git a/metrics_events/src/lib.rs b/metrics_events/src/lib.rs
new file mode 100644
index 0000000..3411cfb
--- /dev/null
+++ b/metrics_events/src/lib.rs
@@ -0,0 +1,10 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+mod event_types;
+pub mod sys;
+
+pub use event_types::MetricEventType;
+pub use metrics_events_product::MetricEventType as VendorMetricEventType;
+pub use metrics_events_product::RecordDetails;
diff --git a/vendor/generic/metrics/src/sys.rs b/metrics_events/src/sys.rs
similarity index 100%
rename from vendor/generic/metrics/src/sys.rs
rename to metrics_events/src/sys.rs
diff --git a/metrics_events/src/sys/windows.rs b/metrics_events/src/sys/windows.rs
new file mode 100644
index 0000000..0b972f9
--- /dev/null
+++ b/metrics_events/src/sys/windows.rs
@@ -0,0 +1,76 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use serde::Deserialize;
+use serde::Serialize;
+
+#[derive(Clone, Debug, Serialize, Deserialize, Default)]
+pub struct WaveFormatDetails {
+ // Format requested by WASAPI `GetMixFormat` system call.
+ pub requested: Option<WaveFormat>,
+ // Originally the requested wave format that's modified by the emulator. Only
+ // populated if the emulator decides the requested wave format should not be
+ // used.
+ pub modified: Option<WaveFormat>,
+ // Format that is valid and closest matching to the modified format, if the
+ // modified was rejected. Should only be populated if modified is also
+ // non-null and was rejected by WASAPI `IsFormatSupported` system call.
+ pub closest_matched: Option<WaveFormat>,
+}
+
+// Defines the format of waveformat audio data. This information is used by
+// WASAPI to determine how to process the audio playback data coming from the
+// emulator.
+//
+// The fields in the structure come from WAVEFORMATEXTENSIBLE of win32 api.
+// https://docs.microsoft.com/en-us/windows/win32/api/mmreg/ns-mmreg-waveformatextensible
+#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
+pub struct WaveFormat {
+ // Ex. 65534 (Maps to WAVE_FORMAT_EXTENSIBLE)
+ pub format_tag: i32,
+ // Number of channels.
+ pub channels: i32,
+ // Sample rate in Hz. Ex: 48000
+ pub samples_per_sec: i32,
+ // Required average data-transfer rate for the format tag. Usually this will
+ // be samples_per_sec * block_align, since the format tag is usually
+ // WAVE_FORMAT_IEEE_FLOAT or it's extensible and SubFormat is
+ // KSDATAFORMAT_SUBTYPE_IEEE_FLOAT.
+ pub avg_bytes_per_sec: i32,
+ // Minimum atomic unit of data based on the format_tag. Usually this will
+ // just be bits_per_samples * channels.
+ pub block_align: i32,
+ // Bits used per sample. Must be a multiple of 8.
+ pub bits_per_sample: i32,
+ // Size in bytes of extra information appended to WAVEFORMATEX struct.
+ pub size_bytes: i32,
+
+ // The next fields are part of the WAVEFORMATEXTENSIBLE struct. They will only
+ // be non-null if format_tag is WAVE_FORMAT_EXTENSIBLE.
+
+ // Bit depth. Can be any value. Ex. bits_per_sample is 24,
+ // but samples is 20. Note: This value is a union, so it could mean something
+ // slightly different, but most likely won't. Refer to doc for more info.
+ pub samples: Option<i32>,
+ // Bitmask mapping channels in stream to speaker positions.
+ // Ex. 3 ( SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT )
+ pub channel_mask: Option<i64>,
+ // Similar to format_tag, but for WAVEFORMATEXTENSIBLE structs.
+ pub sub_format: Option<WaveFormatSubFormat>,
+}
+
+// Subformat GUID mapping:
+// https://github.com/retep998/winapi-rs/blob/2f76bdea3a79817ccfab496fbd1786d5a697387b/src/shared/ksmedia.rs
+#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
+pub enum WaveFormatSubFormat {
+ Invalid,
+ Analog,
+ Pcm,
+ IeeeFloat,
+ Drm,
+ ALaw,
+ MuLaw,
+ Adpcm,
+ Mpeg,
+}
diff --git a/net_sys/Android.bp b/net_sys/Android.bp
index c874f12..be808c6 100644
--- a/net_sys/Android.bp
+++ b/net_sys/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/net_util/Android.bp b/net_util/Android.bp
index ded81b0..af80e93 100644
--- a/net_util/Android.bp
+++ b/net_util/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/patches/Android.bp.patch b/patches/Android.bp.patch
index eb61676..b66e5b0 100644
--- a/patches/Android.bp.patch
+++ b/patches/Android.bp.patch
@@ -1,16 +1,16 @@
-diff --git b/Android.bp a/Android.bp
-index d3d46248..bc1191a5 100644
---- b/Android.bp
-+++ a/Android.bp
+diff --git a/Android.bp b/Android.bp
+index 5df1c8cdc..7bacdd74d 100644
+--- a/Android.bp
++++ b/Android.bp
@@ -36,6 +36,7 @@ rust_binary {
name: "crosvm",
- defaults: ["crosvm_defaults"],
+ defaults: ["crosvm_inner_defaults"],
host_supported: true,
+ prefer_rlib: true,
crate_name: "crosvm",
cargo_env_compat: true,
- srcs: ["src/main.rs"],
-@@ -52,7 +52,6 @@ rust_binary {
+ cargo_pkg_version: "0.1.0",
+@@ -53,7 +54,6 @@ rust_binary {
"gdbstub_arch",
"geniezone",
"gfxstream",
@@ -18,8 +18,19 @@
"gpu",
"gpu_display",
"gunyah",
-@@ -107,6 +108,18 @@ rust_binary {
- "libthiserror",
+@@ -134,6 +134,10 @@ rust_binary {
+ },
+ android: {
+ shared_libs: [
++ // TODO(b/332677108): remove libc++ when display service is rewritten in rust.
++ "libc++",
++ "libbinder_ndk",
++ "libnativewindow",
+ "libprocessgroup",
+ ],
+ },
+@@ -142,6 +146,18 @@ rust_binary {
+ "libprocessgroup",
],
},
+ linux_bionic_arm64: {
@@ -37,7 +48,7 @@
},
ld_flags: [
"-Wl,--rpath,\\$$ORIGIN",
-@@ -191,7 +190,6 @@ rust_test {
+@@ -176,7 +192,6 @@ rust_test {
"gdbstub_arch",
"geniezone",
"gfxstream",
diff --git a/perfetto/Cargo.toml b/perfetto/Cargo.toml
index f28d3ff..13b052b 100644
--- a/perfetto/Cargo.toml
+++ b/perfetto/Cargo.toml
@@ -17,7 +17,6 @@
base = { path = "../base" }
cfg-if = "1.0.0"
cros_tracing_types = { path = "../cros_tracing_types" }
-data_model = { path = "../common/data_model" }
openssl = { version = "*", optional = true }
protobuf = "3.2"
serde = { version = "1", features = [ "derive" ] }
diff --git a/power_monitor/Android.bp b/power_monitor/Android.bp
index 84d59fd..6dad7a4 100644
--- a/power_monitor/Android.bp
+++ b/power_monitor/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/power_monitor/src/lib.rs b/power_monitor/src/lib.rs
index 5327cce..75c8d47 100644
--- a/power_monitor/src/lib.rs
+++ b/power_monitor/src/lib.rs
@@ -17,6 +17,7 @@
pub battery: Option<BatteryData>,
}
+#[derive(Clone, Copy)]
pub struct BatteryData {
pub status: BatteryStatus,
pub percent: u32,
@@ -30,6 +31,7 @@
pub charge_full: u32,
}
+#[derive(Clone, Copy)]
pub enum BatteryStatus {
Unknown,
Charging,
diff --git a/power_monitor/src/powerd/mod.rs b/power_monitor/src/powerd/mod.rs
index 26d6fda..39fe6b6 100644
--- a/power_monitor/src/powerd/mod.rs
+++ b/power_monitor/src/powerd/mod.rs
@@ -96,6 +96,7 @@
pub struct DBusMonitor {
connection: Connection,
connection_fd: RawFd,
+ previous_data: Option<BatteryData>,
}
impl DBusMonitor {
@@ -126,10 +127,19 @@
Ok(Box::new(Self {
connection,
connection_fd: fds[0],
+ previous_data: None,
}))
}
}
+fn denoise_value(new_val: u32, prev_val: u32, margin: f64) -> u32 {
+ if new_val.abs_diff(prev_val) as f64 / prev_val.min(new_val).max(1) as f64 >= margin {
+ new_val
+ } else {
+ prev_val
+ }
+}
+
impl PowerMonitor for DBusMonitor {
/// Returns the newest pending `PowerData` message, if any.
/// Callers should poll `PowerMonitor` to determine when messages are available.
@@ -172,6 +182,7 @@
_ => last,
});
+ let previous_data = self.previous_data.take();
match newest_message {
Some(message) => {
let data_bytes: Vec<u8> = message.read1().map_err(DBusMonitorError::DBusRead)?;
@@ -179,7 +190,22 @@
props
.merge_from_bytes(&data_bytes)
.map_err(DBusMonitorError::ConvertProtobuf)?;
- Ok(Some(props.into()))
+ let mut data: PowerData = props.into();
+ if let (Some(new_data), Some(previous)) = (data.battery.as_mut(), previous_data) {
+ // The raw information from powerd signals isn't really that useful to
+ // the guest. Voltage/current are volatile values, so the .0333 hZ
+ // snapshot provided by powerd isn't particularly meaningful. We do
+ // need to provide *something*, but we might as well make it less noisy
+ // to avoid having the guest try to process mostly useless information.
+ // charge_counter is potentially useful to the guest, but it doesn't
+ // need to be higher precision than battery.percent.
+ new_data.voltage = denoise_value(new_data.voltage, previous.voltage, 0.1);
+ new_data.current = denoise_value(new_data.current, previous.current, 0.1);
+ new_data.charge_counter =
+ denoise_value(new_data.charge_counter, previous.charge_counter, 0.01);
+ }
+ self.previous_data = data.battery;
+ Ok(Some(data))
}
None => Ok(None),
}
diff --git a/protos/Android.bp b/protos/Android.bp
index fd0ab02..3c72b0e 100644
--- a/protos/Android.bp
+++ b/protos/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -38,7 +39,6 @@
source_stem: "cdisk_spec",
host_supported: true,
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
}
@@ -50,7 +50,6 @@
source_stem: "registered_events",
host_supported: true,
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
}
diff --git a/protos/cargo2android_protobuf.bp b/protos/cargo2android_protobuf.bp
index 927f87f..40b0d54 100644
--- a/protos/cargo2android_protobuf.bp
+++ b/protos/cargo2android_protobuf.bp
@@ -5,7 +5,6 @@
source_stem: "cdisk_spec",
host_supported: true,
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
}
@@ -17,7 +16,6 @@
source_stem: "registered_events",
host_supported: true,
apex_available: [
- "//apex_available:platform",
"com.android.virt",
],
}
diff --git a/resources/Android.bp b/resources/Android.bp
index 5ebf2ca..a7c7354 100644
--- a/resources/Android.bp
+++ b/resources/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/riscv64/Android.bp b/riscv64/Android.bp
index f5d190f..d964f79 100644
--- a/riscv64/Android.bp
+++ b/riscv64/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -24,7 +25,6 @@
"libarch",
"libbase_rust",
"libcros_fdt",
- "libdata_model",
"libdevices",
"libgdbstub",
"libgdbstub_arch",
diff --git a/riscv64/Cargo.toml b/riscv64/Cargo.toml
index fda931b..b7255df 100644
--- a/riscv64/Cargo.toml
+++ b/riscv64/Cargo.toml
@@ -10,7 +10,6 @@
[dependencies]
arch = { path = "../arch" }
cros_fdt = { path = "../cros_fdt" }
-data_model = { path = "../common/data_model" }
devices = { path = "../devices" }
gdbstub = { version = "0.7.0", optional = true }
gdbstub_arch = { version = "0.3.0", optional = true }
diff --git a/riscv64/src/lib.rs b/riscv64/src/lib.rs
index 08dbf56..8472b96 100644
--- a/riscv64/src/lib.rs
+++ b/riscv64/src/lib.rs
@@ -213,11 +213,12 @@
let com_evt_1_3 = Event::new().map_err(Error::CreateEvent)?;
let com_evt_2_4 = Event::new().map_err(Error::CreateEvent)?;
- arch::add_serial_devices(
+ let serial_devices = arch::add_serial_devices(
components.hv_cfg.protection_type,
&mmio_bus,
- &com_evt_1_3,
- &com_evt_2_4,
+ // TODO: the IRQ numbers are bogus since the events aren't actually wired up
+ (0, &com_evt_1_3),
+ (0, &com_evt_2_4),
serial_parameters,
serial_jail,
#[cfg(feature = "swap")]
@@ -284,7 +285,7 @@
.insert(pci_bus, RISCV64_PCI_CFG_BASE, RISCV64_PCI_CFG_SIZE)
.map_err(Error::RegisterPci)?;
- get_serial_cmdline(&mut cmdline, serial_parameters, "mmio")
+ get_serial_cmdline(&mut cmdline, serial_parameters, "mmio", &serial_devices)
.map_err(Error::GetSerialCmdline)?;
for param in components.extra_kernel_params {
cmdline.insert_str(¶m).map_err(Error::Cmdline)?;
diff --git a/rutabaga_gfx/Android.bp b/rutabaga_gfx/Android.bp
index 6bd5aa0..b355aa2 100644
--- a/rutabaga_gfx/Android.bp
+++ b/rutabaga_gfx/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -37,14 +38,25 @@
"gfxstream_unstable",
],
shared_libs: [
+ "libc++",
+ "libbase",
+ ],
+ target: {
+ android: {
+ shared_libs: [
+ "libnativewindow",
+ "libcutils",
+ "liblog",
+ ],
+ },
+ },
+ static_libs: [
"libepoxy",
"libgbm",
+ "libdrm",
"libgfxstream_backend",
"libvirglrenderer",
],
- static_libs: [
- "libdrm",
- ],
}
rust_library {
diff --git a/rutabaga_gfx/Cargo.toml b/rutabaga_gfx/Cargo.toml
index 99172e3..3b1f33e 100644
--- a/rutabaga_gfx/Cargo.toml
+++ b/rutabaga_gfx/Cargo.toml
@@ -29,7 +29,7 @@
# vulkano = { version = "0.31.0", optional = true }
[target.'cfg(any(target_os = "android", target_os = "linux"))'.dependencies]
-nix = { version = "0.27.1", features = ["event", "feature", "fs", "mman", "socket", "uio", "ioctl"] }
+nix = { version = "0.28", features = ["event", "feature", "fs", "mman", "socket", "uio", "ioctl"] }
[target.'cfg(windows)'.dependencies]
winapi = {version = "0.3", features = ["winnt", "handleapi", "processthreadsapi", "winbase"]}
diff --git a/rutabaga_gfx/patches/Android.bp.patch b/rutabaga_gfx/patches/Android.bp.patch
index f3f178a..b6e9586 100644
--- a/rutabaga_gfx/patches/Android.bp.patch
+++ b/rutabaga_gfx/patches/Android.bp.patch
@@ -1,5 +1,5 @@
diff --git a/rutabaga_gfx/Android.bp b/rutabaga_gfx/Android.bp
-index 3384aee72..6bd5aa066 100644
+index 3384aee72..0c64f2ef2 100644
--- a/rutabaga_gfx/Android.bp
+++ b/rutabaga_gfx/Android.bp
@@ -22,7 +22,6 @@ rust_library {
@@ -10,7 +10,7 @@
"virgl_renderer",
],
rustlibs: [
-@@ -34,12 +33,46 @@ rust_library {
+@@ -34,14 +33,59 @@ rust_library {
"libzerocopy",
],
proc_macros: ["libremain"],
@@ -19,16 +19,27 @@
+ ],
shared_libs: [
- "libdrm",
++ "libc++",
++ "libbase",
++ ],
++ target: {
++ android: {
++ shared_libs: [
++ "libnativewindow",
++ "libcutils",
++ "liblog",
++ ],
++ },
++ },
++ static_libs: [
"libepoxy",
"libgbm",
++ "libdrm",
+ "libgfxstream_backend",
"libvirglrenderer",
],
-+ static_libs: [
-+ "libdrm",
-+ ],
-+}
-+
+ }
+
+rust_library {
+ name: "librutabaga_gfx_gfxstream",
+ defaults: ["crosvm_inner_defaults"],
@@ -55,10 +66,12 @@
+ shared_libs: [
+ "libgfxstream_backend",
+ ],
- }
-
++}
++
rust_test {
-@@ -58,7 +91,6 @@ rust_test {
+ name: "rutabaga_gfx_test_src_lib",
+ defaults: ["crosvm_inner_defaults"],
+@@ -58,7 +102,6 @@ rust_test {
edition: "2021",
features: [
"gfxstream",
@@ -66,7 +79,7 @@
"virgl_renderer",
],
rustlibs: [
-@@ -71,9 +103,12 @@ rust_test {
+@@ -71,9 +114,12 @@ rust_test {
],
proc_macros: ["libremain"],
shared_libs: [
diff --git a/rutabaga_gfx/src/cross_domain/sys/linux.rs b/rutabaga_gfx/src/cross_domain/sys/linux.rs
index 3f3d2d7..6aaef8c 100644
--- a/rutabaga_gfx/src/cross_domain/sys/linux.rs
+++ b/rutabaga_gfx/src/cross_domain/sys/linux.rs
@@ -17,8 +17,8 @@
use nix::fcntl::FcntlArg;
use nix::sys::epoll::EpollCreateFlags;
use nix::sys::epoll::EpollFlags;
-use nix::sys::eventfd::eventfd;
use nix::sys::eventfd::EfdFlags;
+use nix::sys::eventfd::EventFd;
use nix::sys::socket::connect;
use nix::sys::socket::recvmsg;
use nix::sys::socket::sendmsg;
@@ -212,10 +212,8 @@
}
let (raw_read_pipe, raw_write_pipe) = pipe()?;
- // SAFETY: Safe because we have created the pipe above and is valid.
- let read_pipe = unsafe { File::from_raw_descriptor(raw_read_pipe) };
- // SAFETY: Safe because we have created the pipe above and is valid.
- let write_pipe = unsafe { File::from_raw_descriptor(raw_write_pipe) };
+ let read_pipe = File::from(raw_read_pipe);
+ let write_pipe = File::from(raw_write_pipe);
*descriptor = write_pipe.as_raw_descriptor();
let read_pipe_id: u32 = add_item(
@@ -257,11 +255,12 @@
}
}
-pub type Sender = File;
+pub type Sender = EventFd;
+// TODO: Receiver should be EventFd as well, but there is no way to clone a nix EventFd.
pub type Receiver = File;
pub fn channel_signal(sender: &Sender) -> RutabagaResult<()> {
- write(sender.as_raw_fd(), &1u64.to_ne_bytes())?;
+ sender.write(1)?;
Ok(())
}
@@ -276,13 +275,13 @@
}
pub fn write_volatile(file: &File, opaque_data: &[u8]) -> RutabagaResult<()> {
- write(file.as_raw_fd(), opaque_data)?;
+ write(file.as_fd(), opaque_data)?;
Ok(())
}
pub fn channel() -> RutabagaResult<(Sender, Receiver)> {
- let sender: File = eventfd(0, EfdFlags::empty())?.into();
- let receiver = sender.try_clone()?;
+ let sender = EventFd::from_flags(EfdFlags::empty())?;
+ let receiver = sender.as_fd().try_clone_to_owned()?.into();
Ok((sender, receiver))
}
diff --git a/rutabaga_gfx/src/rutabaga_os/memory_mapping.rs b/rutabaga_gfx/src/rutabaga_os/memory_mapping.rs
index d15fe81..6fadec2 100644
--- a/rutabaga_gfx/src/rutabaga_os/memory_mapping.rs
+++ b/rutabaga_gfx/src/rutabaga_os/memory_mapping.rs
@@ -23,7 +23,7 @@
pub fn as_rutabaga_mapping(&self) -> RutabagaMapping {
RutabagaMapping {
- ptr: self.mapping.addr as u64,
+ ptr: self.mapping.addr.as_ptr() as u64,
size: self.mapping.size as u64,
}
}
diff --git a/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs b/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs
index 7eeb33f..3503b72 100644
--- a/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs
+++ b/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs
@@ -3,6 +3,8 @@
// found in the LICENSE file.
use std::num::NonZeroUsize;
+use std::os::fd::AsFd;
+use std::ptr::NonNull;
use libc::c_void;
use nix::sys::mman::mmap;
@@ -22,7 +24,7 @@
/// RAII semantics including munmap when no longer needed.
#[derive(Debug)]
pub struct MemoryMapping {
- pub addr: *mut c_void,
+ pub addr: NonNull<c_void>,
pub size: usize,
}
@@ -60,7 +62,7 @@
non_zero_size,
prot,
MapFlags::MAP_SHARED,
- Some(descriptor),
+ descriptor.as_fd(),
0,
)?
};
diff --git a/rutabaga_gfx/src/rutabaga_os/sys/stub/memory_mapping.rs b/rutabaga_gfx/src/rutabaga_os/sys/stub/memory_mapping.rs
index 384bd65..6f7cc28 100644
--- a/rutabaga_gfx/src/rutabaga_os/sys/stub/memory_mapping.rs
+++ b/rutabaga_gfx/src/rutabaga_os/sys/stub/memory_mapping.rs
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+use std::ptr::NonNull;
+
use libc::c_void;
use crate::rutabaga_os::SafeDescriptor;
@@ -12,7 +14,7 @@
/// RAII semantics including munmap when no longer needed.
#[derive(Debug)]
pub struct MemoryMapping {
- pub addr: *mut c_void,
+ pub addr: NonNull<c_void>,
pub size: usize,
}
diff --git a/rutabaga_gfx/src/rutabaga_os/sys/windows/memory_mapping.rs b/rutabaga_gfx/src/rutabaga_os/sys/windows/memory_mapping.rs
index 384bd65..6f7cc28 100644
--- a/rutabaga_gfx/src/rutabaga_os/sys/windows/memory_mapping.rs
+++ b/rutabaga_gfx/src/rutabaga_os/sys/windows/memory_mapping.rs
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+use std::ptr::NonNull;
+
use libc::c_void;
use crate::rutabaga_os::SafeDescriptor;
@@ -12,7 +14,7 @@
/// RAII semantics including munmap when no longer needed.
#[derive(Debug)]
pub struct MemoryMapping {
- pub addr: *mut c_void,
+ pub addr: NonNull<c_void>,
pub size: usize,
}
diff --git a/serde_keyvalue/Android.bp b/serde_keyvalue/Android.bp
index c313641..92080a4 100644
--- a/serde_keyvalue/Android.bp
+++ b/serde_keyvalue/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/serde_keyvalue/serde_keyvalue_derive/Android.bp b/serde_keyvalue/serde_keyvalue_derive/Android.bp
index 7d43764..15d9f0f 100644
--- a/serde_keyvalue/serde_keyvalue_derive/Android.bp
+++ b/serde_keyvalue/serde_keyvalue_derive/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/src/crosvm/cmdline.rs b/src/crosvm/cmdline.rs
index ca87706..c6cf27e 100644
--- a/src/crosvm/cmdline.rs
+++ b/src/crosvm/cmdline.rs
@@ -133,9 +133,6 @@
BalloonStats(BalloonStatsCommand),
#[cfg(feature = "balloon")]
BalloonWs(BalloonWsCommand),
- // TODO(b/288432539): remove once concierge is migrated
- #[cfg(feature = "balloon")]
- BalloonWss(BalloonWsCommand),
Battery(BatteryCommand),
#[cfg(feature = "composite-disk")]
CreateComposite(CreateCompositeCommand),
@@ -292,7 +289,11 @@
#[derive(FromArgs)]
#[argh(subcommand, name = "resume")]
-/// Resumes the crosvm instance
+/// Resumes the crosvm instance. No-op if already running. When starting crosvm with `--restore`,
+/// this command can be used to wait until the restore is complete
+// Implementation note: All the restore work happens before crosvm becomes able to process incoming
+// commands, so really all commands can be used to wait for restore to complete, but few are side
+// effect free.
pub struct ResumeCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
@@ -622,6 +623,7 @@
#[argh(subcommand)]
pub enum UsbSubCommand {
Attach(UsbAttachCommand),
+ SecurityKeyAttach(UsbAttachKeyCommand),
Detach(UsbDetachCommand),
List(UsbListCommand),
}
@@ -645,6 +647,18 @@
}
#[derive(FromArgs)]
+/// Attach security key device
+#[argh(subcommand, name = "attach_key")]
+pub struct UsbAttachKeyCommand {
+ #[argh(positional)]
+ /// security key hidraw device path
+ pub dev_path: String,
+ #[argh(positional, arg_name = "VM_SOCKET")]
+ /// VM Socket path
+ pub socket_path: String,
+}
+
+#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "detach")]
pub struct UsbDetachCommand {
@@ -726,6 +740,9 @@
#[argh(switch)]
/// compress the ram snapshot.
pub compress_memory: bool,
+ #[argh(switch, arg_name = "encrypt")]
+ /// whether the snapshot should be encrypted
+ pub encrypt: bool,
}
#[derive(FromArgs)]
@@ -738,6 +755,9 @@
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
+ /// true to require an encrypted snapshot
+ #[argh(switch, arg_name = "require_encrypted")]
+ pub require_encrypted: bool,
}
#[derive(FromArgs)]
@@ -930,6 +950,12 @@
/// path to user provided ACPI table
pub acpi_table: Vec<PathBuf>,
+ #[cfg(feature = "android_display")]
+ #[argh(option, arg_name = "NAME")]
+ #[merge(strategy = overwrite_option)]
+ /// name that the Android display backend will be registered to the service manager.
+ pub android_display_service: Option<String>,
+
#[argh(option)]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
@@ -942,43 +968,41 @@
#[serde(skip)] // TODO(b/255223604)
pub async_executor: Option<ExecutorKind>,
+ #[cfg(feature = "balloon")]
#[argh(option, arg_name = "N")]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
/// amount to bias balance of memory between host and guest as the balloon inflates, in mib.
pub balloon_bias_mib: Option<i64>,
+ #[cfg(feature = "balloon")]
#[argh(option, arg_name = "PATH")]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
/// path for balloon controller socket.
pub balloon_control: Option<PathBuf>,
+ #[cfg(feature = "balloon")]
#[argh(switch)]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
/// enable page reporting in balloon.
pub balloon_page_reporting: Option<bool>,
+ #[cfg(feature = "balloon")]
#[argh(option)]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
/// set number of WS bins to use (default = 4).
pub balloon_ws_num_bins: Option<u8>,
+ #[cfg(feature = "balloon")]
#[argh(switch)]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
/// enable working set reporting in balloon.
pub balloon_ws_reporting: Option<bool>,
- // TODO(b/288432539): remove once concierge is migrated
- #[argh(switch)]
- #[serde(skip)] // TODO(b/255223604)
- #[merge(strategy = overwrite_option)]
- /// enable working set reporting in balloon.
- pub balloon_wss_reporting: Option<bool>,
-
#[argh(option)]
/// comma separated key=value pairs for setting up battery
/// device
@@ -1139,6 +1163,7 @@
/// core-types=[atom=[0,1],core=[2,3]] - set vCPU 0 and
/// vCPU 1 as intel Atom type, also set vCPU 2 and vCPU 3
/// as intel Core type.
+ /// boot-cpu=NUM - Select vCPU to boot from. (default: 0) (aarch64 only)
pub cpus: Option<CpuOptions>,
#[cfg(feature = "crash-report")]
@@ -1313,9 +1338,13 @@
/// Possible key values:
/// backend=(2d|virglrenderer|gfxstream) - Which backend to
/// use for virtio-gpu (determining rendering protocol)
+ /// max_num_displays=INT - The maximum number of concurrent
+ /// virtual displays in this VM. This must not exceed
+ /// VIRTIO_GPU_MAX_SCANOUTS (i.e. 16).
/// displays=[[GpuDisplayParameters]] - The list of virtual
- /// displays to create. See the possible key values for
- /// GpuDisplayParameters in the section below.
+ /// displays to create when booting this VM. Displays may
+ /// be hotplugged after booting. See the possible key
+ /// values for GpuDisplayParameters in the section below.
/// context-types=LIST - The list of supported context
/// types, separated by ':' (default: no contexts enabled)
/// width=INT - The width of the virtual display connected
@@ -1441,6 +1470,7 @@
#[merge(strategy = overwrite_option)]
pub hypervisor: Option<HypervisorKind>,
+ #[cfg(feature = "balloon")]
#[argh(option, arg_name = "N")]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
@@ -1638,6 +1668,7 @@
/// netmask for VM subnet
pub netmask: Option<std::net::Ipv4Addr>,
+ #[cfg(feature = "balloon")]
#[argh(switch)]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
@@ -1851,11 +1882,11 @@
/// [--pstore <path=PATH,size=SIZE>]
pub pstore: Option<Pstore>,
- #[cfg(windows)]
#[argh(switch)]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
/// enable virtio-pvclock.
+ /// Only available when crosvm is built with feature 'pvclock'.
pub pvclock: Option<bool>,
#[argh(option, long = "restore", arg_name = "PATH")]
@@ -2139,6 +2170,7 @@
/// (EXPERIMENTAL) enable split-irqchip support
pub split_irqchip: Option<bool>,
+ #[cfg(feature = "balloon")]
#[argh(switch)]
#[serde(skip)] // TODO(b/255223604)
#[merge(strategy = overwrite_option)]
@@ -2584,6 +2616,7 @@
{
let cpus = cmd.cpus.unwrap_or_default();
cfg.vcpu_count = cpus.num_cores;
+ cfg.boot_cpu = cpus.boot_cpu.unwrap_or_default();
// Only allow deprecated `--cpu-cluster` option only if `--cpu clusters=[...]` is not
// used.
@@ -2806,6 +2839,7 @@
pmem.read_only = read_only;
cfg.pmem_devices.push(pmem);
}
+ cfg.pvclock = cmd.pvclock.unwrap_or_default();
#[cfg(windows)]
{
@@ -2825,7 +2859,6 @@
cfg.process_invariants_data_size = cmd.process_invariants_size;
}
- cfg.pvclock = cmd.pvclock.unwrap_or_default();
#[cfg(windows)]
{
cfg.service_pipe_name = cmd.service_pipe_name;
@@ -2874,8 +2907,6 @@
cfg.socket_path = Some(socket_path);
}
- cfg.balloon_control = cmd.balloon_control;
-
cfg.vsock = cmd.vsock;
// Legacy vsock options.
@@ -3095,12 +3126,24 @@
cfg.usb = !cmd.no_usb.unwrap_or_default();
cfg.rng = !cmd.no_rng.unwrap_or_default();
- cfg.balloon = !cmd.no_balloon.unwrap_or_default();
- cfg.balloon_page_reporting = cmd.balloon_page_reporting.unwrap_or_default();
- cfg.balloon_ws_num_bins = cmd.balloon_ws_num_bins.unwrap_or(4);
- cfg.balloon_ws_reporting = cmd.balloon_ws_reporting.unwrap_or_default()
- // TODO(b/288432539): remove once concierge is migrated
- | cmd.balloon_wss_reporting.unwrap_or_default();
+
+ #[cfg(feature = "balloon")]
+ {
+ cfg.balloon = !cmd.no_balloon.unwrap_or_default();
+
+ // cfg.balloon_bias is in bytes.
+ if let Some(b) = cmd.balloon_bias_mib {
+ cfg.balloon_bias = b * 1024 * 1024;
+ }
+
+ cfg.balloon_control = cmd.balloon_control;
+ cfg.balloon_page_reporting = cmd.balloon_page_reporting.unwrap_or_default();
+ cfg.balloon_ws_num_bins = cmd.balloon_ws_num_bins.unwrap_or(4);
+ cfg.balloon_ws_reporting = cmd.balloon_ws_reporting.unwrap_or_default();
+ cfg.strict_balloon = cmd.strict_balloon.unwrap_or_default();
+ cfg.init_memory = cmd.init_mem;
+ }
+
#[cfg(feature = "audio")]
{
cfg.virtio_snds = cmd.virtio_snd;
@@ -3118,6 +3161,11 @@
.get_or_insert_with(Default::default)
.display_params
.extend(cmd.gpu_display.into_iter().map(|p| p.0));
+
+ #[cfg(feature = "android_display")]
+ {
+ cfg.android_display_service = cmd.android_display_service;
+ }
}
#[cfg(windows)]
@@ -3363,11 +3411,6 @@
cfg.pci_hotplug_slots = cmd.pci_hotplug_slots;
}
- // cfg.balloon_bias is in bytes.
- if let Some(b) = cmd.balloon_bias_mib {
- cfg.balloon_bias = b * 1024 * 1024;
- }
-
cfg.vhost_user = cmd.vhost_user;
// Convert an option from `VhostUserOption` to `VhostUserFrontendOption` with the given
@@ -3420,10 +3463,6 @@
cfg.file_backed_mappings = cmd.file_backed_mapping;
- cfg.init_memory = cmd.init_mem;
-
- cfg.strict_balloon = cmd.strict_balloon.unwrap_or_default();
-
#[cfg(target_os = "android")]
{
cfg.task_profiles = cmd.task_profiles;
diff --git a/src/crosvm/config.rs b/src/crosvm/config.rs
index c18824d..6dc60c9 100644
--- a/src/crosvm/config.rs
+++ b/src/crosvm/config.rs
@@ -86,6 +86,7 @@
const MAX_PCIE_ECAM_SIZE: u64 = ONE_MB * 256;
// by default, if enabled, the balloon WS features will use 4 bins.
+#[cfg(feature = "balloon")]
const VIRTIO_BALLOON_WS_DEFAULT_NUM_BINS: u8 = 4;
/// Indicates the location and kind of executable kernel for a VM.
@@ -134,6 +135,9 @@
/// Core Type of CPUs.
#[cfg(target_arch = "x86_64")]
pub core_types: Option<CpuCoreType>,
+ /// Select which CPU to boot from.
+ #[serde(default)]
+ pub boot_cpu: Option<usize>,
}
/// Device tree overlay configuration.
@@ -634,19 +638,28 @@
#[cfg(all(target_arch = "x86_64", unix))]
pub ac_adapter: bool,
pub acpi_tables: Vec<PathBuf>,
+ #[cfg(feature = "android_display")]
+ pub android_display_service: Option<String>,
pub android_fstab: Option<PathBuf>,
pub async_executor: Option<ExecutorKind>,
+ #[cfg(feature = "balloon")]
pub balloon: bool,
+ #[cfg(feature = "balloon")]
pub balloon_bias: i64,
+ #[cfg(feature = "balloon")]
pub balloon_control: Option<PathBuf>,
+ #[cfg(feature = "balloon")]
pub balloon_page_reporting: bool,
+ #[cfg(feature = "balloon")]
pub balloon_ws_num_bins: u8,
+ #[cfg(feature = "balloon")]
pub balloon_ws_reporting: bool,
pub battery_config: Option<BatteryConfig>,
#[cfg(windows)]
pub block_control_tube: Vec<Tube>,
#[cfg(windows)]
pub block_vhost_user_tube: Vec<Tube>,
+ pub boot_cpu: usize,
#[cfg(target_arch = "x86_64")]
pub break_linux_pci_config_io: bool,
#[cfg(windows)]
@@ -700,6 +713,7 @@
pub host_guid: Option<String>,
pub hugepages: bool,
pub hypervisor: Option<HypervisorKind>,
+ #[cfg(feature = "balloon")]
pub init_memory: Option<u64>,
pub initrd_path: Option<PathBuf>,
#[cfg(all(windows, feature = "gpu"))]
@@ -754,7 +768,6 @@
pub product_version: Option<String>,
pub protection_type: ProtectionType,
pub pstore: Option<Pstore>,
- #[cfg(windows)]
pub pvclock: bool,
/// Must be `Some` iff `protection_type == ProtectionType::UnprotectedWithFirmware`.
pub pvm_fw: Option<PathBuf>,
@@ -778,6 +791,7 @@
pub socket_path: Option<PathBuf>,
#[cfg(feature = "audio")]
pub sound: Option<PathBuf>,
+ #[cfg(feature = "balloon")]
pub strict_balloon: bool,
pub stub_pci_devices: Vec<StubPciParameters>,
pub suspended: bool,
@@ -834,15 +848,24 @@
#[cfg(all(target_arch = "x86_64", unix))]
ac_adapter: false,
acpi_tables: Vec::new(),
+ #[cfg(feature = "android_display")]
+ android_display_service: None,
android_fstab: None,
async_executor: None,
+ #[cfg(feature = "balloon")]
balloon: true,
+ #[cfg(feature = "balloon")]
balloon_bias: 0,
+ #[cfg(feature = "balloon")]
balloon_control: None,
+ #[cfg(feature = "balloon")]
balloon_page_reporting: false,
+ #[cfg(feature = "balloon")]
balloon_ws_num_bins: VIRTIO_BALLOON_WS_DEFAULT_NUM_BINS,
+ #[cfg(feature = "balloon")]
balloon_ws_reporting: false,
battery_config: None,
+ boot_cpu: 0,
#[cfg(windows)]
block_control_tube: Vec::new(),
#[cfg(windows)]
@@ -904,6 +927,7 @@
product_channel: None,
hugepages: false,
hypervisor: None,
+ #[cfg(feature = "balloon")]
init_memory: None,
initrd_path: None,
#[cfg(all(windows, feature = "gpu"))]
@@ -958,7 +982,6 @@
product_name: None,
protection_type: ProtectionType::Unprotected,
pstore: None,
- #[cfg(windows)]
pvclock: false,
pvm_fw: None,
restore_path: None,
@@ -979,6 +1002,7 @@
socket_path: None,
#[cfg(feature = "audio")]
sound: None,
+ #[cfg(feature = "balloon")]
strict_balloon: false,
stub_pci_devices: Vec::new(),
suspended: false,
@@ -1102,6 +1126,11 @@
}
}
+ if cfg.boot_cpu >= cfg.vcpu_count.unwrap_or(1) {
+ log::warn!("boot_cpu selection cannot be higher than vCPUs available, defaulting to 0");
+ cfg.boot_cpu = 0;
+ }
+
#[cfg(all(
any(target_arch = "arm", target_arch = "aarch64"),
any(target_os = "android", target_os = "linux")
@@ -1173,12 +1202,15 @@
}
}
- if !cfg.balloon && cfg.balloon_control.is_some() {
- return Err("'balloon-control' requires enabled balloon".to_string());
- }
+ #[cfg(feature = "balloon")]
+ {
+ if !cfg.balloon && cfg.balloon_control.is_some() {
+ return Err("'balloon-control' requires enabled balloon".to_string());
+ }
- if !cfg.balloon && cfg.balloon_page_reporting {
- return Err("'balloon_page_reporting' requires enabled balloon".to_string());
+ if !cfg.balloon && cfg.balloon_page_reporting {
+ return Err("'balloon_page_reporting' requires enabled balloon".to_string());
+ }
}
#[cfg(any(target_os = "android", target_os = "linux"))]
diff --git a/src/crosvm/gpu_config.rs b/src/crosvm/gpu_config.rs
index 650f4ae..c8ef821 100644
--- a/src/crosvm/gpu_config.rs
+++ b/src/crosvm/gpu_config.rs
@@ -2,10 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#[cfg(feature = "kiwi")]
-use base::warn;
-#[cfg(feature = "kiwi")]
-use battlestar::process_invariants;
+use devices::virtio::gpu::VIRTIO_GPU_MAX_SCANOUTS;
use devices::virtio::GpuDisplayMode;
use devices::virtio::GpuDisplayParameters;
#[cfg(feature = "gfxstream")]
@@ -103,37 +100,27 @@
));
}
+ if gpu_parameters.max_num_displays < 1
+ || gpu_parameters.max_num_displays > VIRTIO_GPU_MAX_SCANOUTS as u32
+ {
+ return Err(format!(
+ "`max_num_displays` must be in range [1, {}]",
+ VIRTIO_GPU_MAX_SCANOUTS
+ ));
+ }
+ if gpu_parameters.display_params.len() as u32 > gpu_parameters.max_num_displays {
+ return Err(format!(
+ "Provided more `display_params` ({}) than `max_num_displays` ({})",
+ gpu_parameters.display_params.len(),
+ gpu_parameters.max_num_displays
+ ));
+ }
+
// Add a default display if no display is specified.
if gpu_parameters.display_params.is_empty() {
gpu_parameters.display_params.push(Default::default());
}
- // Process invariants are not written to the static `PROCESS_INVARIANTS` yet, so instead of
- // calling `phenotype!(kiwi_emulator_feature, get_enable_4k_uhd_resolution)`, we have to
- // load it by ourselves.
- // TODO(b/276909432): The BSS should read the experiment flags and specify the virtual
- // display size, and then we can remove this workaround.
- #[cfg(feature = "kiwi")]
- let is_4k_uhd_enabled = match process_invariants::load_invariants(
- &cfg.process_invariants_data_handle,
- &cfg.process_invariants_data_size,
- ) {
- Ok(invariants) => invariants
- .get_flag_snapshot()
- .get_features()
- .kiwi_emulator_feature
- .clone()
- .unwrap_or_default()
- .get_enable_4k_uhd_resolution(),
- Err(e) => {
- warn!(
- "Failed to load process invariants, will not enable 4k UHD: {}",
- e
- );
- false
- }
- };
- #[cfg(not(feature = "kiwi"))]
let is_4k_uhd_enabled = false;
let (width, height) =
gpu_parameters.display_params[0].get_virtual_display_size_4k_uhd(is_4k_uhd_enabled);
@@ -172,6 +159,81 @@
}
#[test]
+ fn parse_gpu_options_max_num_displays() {
+ {
+ let gpu_params = parse_gpu_options("").unwrap();
+ assert_eq!(gpu_params.max_num_displays, VIRTIO_GPU_MAX_SCANOUTS as u32);
+ }
+ {
+ let gpu_params = parse_gpu_options("max-num-displays=5").unwrap();
+ assert_eq!(gpu_params.max_num_displays, 5);
+ }
+ {
+ let command = crate::crosvm::cmdline::RunCommand::from_args(
+ &[],
+ &["--gpu", "max-num-displays=0", "/dev/null"],
+ )
+ .unwrap();
+ assert!(Config::try_from(command).is_err());
+ }
+ {
+ let command = crate::crosvm::cmdline::RunCommand::from_args(
+ &[],
+ &[
+ "--gpu",
+ format!("max-num-displays={}", VIRTIO_GPU_MAX_SCANOUTS + 1).as_str(),
+ "/dev/null",
+ ],
+ )
+ .unwrap();
+ assert!(Config::try_from(command).is_err());
+ }
+ // TODO(b/332910955): Remove the single display restriction on Windows and enable this test.
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ {
+ let command = crate::crosvm::cmdline::RunCommand::from_args(
+ &[],
+ &[
+ "--gpu",
+ "max-num-displays=1,displays=[[mode=windowed[1920,1080]],\
+ [mode=windowed[1280,720]]]",
+ "/dev/null",
+ ],
+ )
+ .unwrap();
+ assert!(Config::try_from(command).is_err());
+ }
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ {
+ let config: Config = crate::crosvm::cmdline::RunCommand::from_args(
+ &[],
+ &[
+ "--gpu",
+ "max-num-displays=3,displays=[[mode=windowed[1920,1080]],\
+ [mode=windowed[1280,720]]]",
+ "/dev/null",
+ ],
+ )
+ .unwrap()
+ .try_into()
+ .unwrap();
+
+ let gpu_params = config.gpu_parameters.unwrap();
+
+ assert_eq!(gpu_params.max_num_displays, 3);
+ assert_eq!(gpu_params.display_params.len(), 2);
+ assert_eq!(
+ gpu_params.display_params[0].mode,
+ GpuDisplayMode::Windowed(1920, 1080)
+ );
+ assert_eq!(
+ gpu_params.display_params[1].mode,
+ GpuDisplayMode::Windowed(1280, 720)
+ );
+ }
+ }
+
+ #[test]
fn parse_gpu_options_mode() {
use devices::virtio::gpu::GpuMode;
diff --git a/src/crosvm/plugin/process.rs b/src/crosvm/plugin/process.rs
index 8616c91..0084523 100644
--- a/src/crosvm/plugin/process.rs
+++ b/src/crosvm/plugin/process.rs
@@ -29,7 +29,6 @@
use base::ScmSocket;
use base::SharedMemory;
use base::SIGRTMIN;
-use data_model::zerocopy_from_slice;
use kvm::dirty_log_bitmap_size;
use kvm::Datamatch;
use kvm::IoeventAddress;
@@ -37,6 +36,10 @@
use kvm::IrqSource;
use kvm::PicId;
use kvm::Vm;
+use kvm_sys::kvm_clock_data;
+use kvm_sys::kvm_ioapic_state;
+use kvm_sys::kvm_pic_state;
+use kvm_sys::kvm_pit_state2;
use libc::pid_t;
use libc::waitpid;
use libc::EINVAL;
@@ -56,6 +59,7 @@
use sync::Mutex;
use vm_memory::GuestAddress;
use zerocopy::AsBytes;
+use zerocopy::FromBytes;
use super::*;
@@ -79,22 +83,25 @@
state: &[u8],
) -> SysResult<()> {
match state_set.enum_value().map_err(|_| SysError::new(EINVAL))? {
- main_request::StateSet::PIC0 => vm.set_pic_state(
- PicId::Primary,
- zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?,
- ),
- main_request::StateSet::PIC1 => vm.set_pic_state(
- PicId::Secondary,
- zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?,
- ),
+ main_request::StateSet::PIC0 => {
+ let pic_state = kvm_pic_state::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vm.set_pic_state(PicId::Primary, &pic_state)
+ }
+ main_request::StateSet::PIC1 => {
+ let pic_state = kvm_pic_state::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vm.set_pic_state(PicId::Secondary, &pic_state)
+ }
main_request::StateSet::IOAPIC => {
- vm.set_ioapic_state(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let ioapic_state = kvm_ioapic_state::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vm.set_ioapic_state(&ioapic_state)
}
main_request::StateSet::PIT => {
- vm.set_pit_state(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let pit_state = kvm_pit_state2::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vm.set_pit_state(&pit_state)
}
main_request::StateSet::CLOCK => {
- vm.set_clock(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let clock_data = kvm_clock_data::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vm.set_clock(&clock_data)
}
}
}
diff --git a/src/crosvm/plugin/vcpu.rs b/src/crosvm/plugin/vcpu.rs
index 2f0b78f..cfad4cf 100644
--- a/src/crosvm/plugin/vcpu.rs
+++ b/src/crosvm/plugin/vcpu.rs
@@ -19,13 +19,19 @@
use base::error;
use base::LayoutAllocation;
-use data_model::zerocopy_from_slice;
use kvm::CpuId;
use kvm::Vcpu;
+use kvm_sys::kvm_debugregs;
use kvm_sys::kvm_enable_cap;
+use kvm_sys::kvm_fpu;
+use kvm_sys::kvm_lapic_state;
+use kvm_sys::kvm_mp_state;
use kvm_sys::kvm_msr_entry;
use kvm_sys::kvm_msrs;
use kvm_sys::kvm_regs;
+use kvm_sys::kvm_sregs;
+use kvm_sys::kvm_vcpu_events;
+use kvm_sys::kvm_xcrs;
use kvm_sys::KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
use libc::EINVAL;
use libc::ENOENT;
@@ -40,6 +46,7 @@
use static_assertions::const_assert;
use sync::Mutex;
use zerocopy::AsBytes;
+use zerocopy::FromBytes;
use super::*;
@@ -111,28 +118,36 @@
fn set_vcpu_state(vcpu: &Vcpu, state_set: vcpu_request::StateSet, state: &[u8]) -> SysResult<()> {
match state_set {
vcpu_request::StateSet::REGS => {
- vcpu.set_regs(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let regs = kvm_regs::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_regs(®s)
}
vcpu_request::StateSet::SREGS => {
- vcpu.set_sregs(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let sregs = kvm_sregs::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_sregs(&sregs)
}
vcpu_request::StateSet::FPU => {
- vcpu.set_fpu(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let fpu = kvm_fpu::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_fpu(&fpu)
}
vcpu_request::StateSet::DEBUGREGS => {
- vcpu.set_debugregs(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let debugregs = kvm_debugregs::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_debugregs(&debugregs)
}
vcpu_request::StateSet::XCREGS => {
- vcpu.set_xcrs(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let xcrs = kvm_xcrs::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_xcrs(&xcrs)
}
vcpu_request::StateSet::LAPIC => {
- vcpu.set_lapic(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let lapic_state = kvm_lapic_state::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_lapic(&lapic_state)
}
vcpu_request::StateSet::MP => {
- vcpu.set_mp_state(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let mp_state = kvm_mp_state::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_mp_state(&mp_state)
}
vcpu_request::StateSet::EVENTS => {
- vcpu.set_vcpu_events(zerocopy_from_slice(state).ok_or(SysError::new(EINVAL))?)
+ let vcpu_events = kvm_vcpu_events::read_from(state).ok_or(SysError::new(EINVAL))?;
+ vcpu.set_vcpu_events(&vcpu_events)
}
}
}
diff --git a/src/crosvm/sys/linux.rs b/src/crosvm/sys/linux.rs
index c47cb2a..f94096c 100644
--- a/src/crosvm/sys/linux.rs
+++ b/src/crosvm/sys/linux.rs
@@ -75,6 +75,8 @@
use device_helpers::*;
use devices::create_devices_worker_thread;
use devices::serial_device::SerialHardware;
+#[cfg(feature = "pvclock")]
+use devices::tsc::get_tsc_sync_mitigations;
use devices::vfio::VfioCommonSetup;
use devices::vfio::VfioCommonTrait;
#[cfg(feature = "gpu")]
@@ -162,6 +164,7 @@
#[cfg(feature = "pci-hotplug")]
use jail_warden::PermissiveJailWarden;
use libc;
+use metrics::MetricsController;
use minijail::Minijail;
#[cfg(feature = "pci-hotplug")]
use pci_hotplug_manager::PciHotPlugManager;
@@ -226,6 +229,7 @@
#[cfg(feature = "gpu")] render_server_fd: Option<SafeDescriptor>,
#[cfg(feature = "gpu")] has_vfio_gfx_device: bool,
#[cfg(feature = "registered_events")] registered_evt_q: &SendTube,
+ #[cfg(feature = "pvclock")] pvclock_device_tube: Option<Tube>,
) -> DeviceResult<Vec<VirtioDeviceStub>> {
let mut devs = Vec::new();
@@ -402,6 +406,27 @@
devs.push(create_rng_device(cfg.protection_type, &cfg.jail_config)?);
}
+ #[cfg(feature = "pvclock")]
+ if let Some(suspend_tube) = pvclock_device_tube {
+ let tsc_state = devices::tsc::tsc_state()?;
+ let tsc_sync_mitigations =
+ get_tsc_sync_mitigations(&tsc_state, cfg.vcpu_count.unwrap_or(1));
+ if tsc_state.core_grouping.size() > 1 {
+ // Host TSCs are not in sync. Log what mitigations are applied.
+ warn!(
+ "Host TSCs are not in sync, applying the following mitigations: {:?}",
+ tsc_sync_mitigations
+ );
+ }
+ devs.push(create_pvclock_device(
+ cfg.protection_type,
+ &cfg.jail_config,
+ tsc_state.frequency,
+ suspend_tube,
+ )?);
+ info!("virtio-pvclock is enabled for this vm");
+ }
+
#[cfg(feature = "vtpm")]
{
if cfg.vtpm_proxy {
@@ -716,6 +741,7 @@
#[cfg(feature = "gpu")] render_server_fd: Option<SafeDescriptor>,
iova_max_addr: &mut Option<u64>,
#[cfg(feature = "registered_events")] registered_evt_q: &SendTube,
+ #[cfg(feature = "pvclock")] pvclock_device_tube: Option<Tube>,
) -> DeviceResult<Vec<(Box<dyn BusDeviceObj>, Option<Minijail>)>> {
let mut devices: Vec<(Box<dyn BusDeviceObj>, Option<Minijail>)> = Vec::new();
#[cfg(feature = "balloon")]
@@ -860,6 +886,8 @@
has_vfio_gfx_device,
#[cfg(feature = "registered_events")]
registered_evt_q,
+ #[cfg(feature = "pvclock")]
+ pvclock_device_tube,
)?;
for stub in stubs {
@@ -1279,6 +1307,7 @@
#[cfg(target_arch = "x86_64")]
pci_low_start: cfg.pci_low_start,
dynamic_power_coefficient: cfg.dynamic_power_coefficient.clone(),
+ boot_cpu: cfg.boot_cpu,
})
}
@@ -1607,7 +1636,7 @@
pub fn run_config(cfg: Config) -> Result<ExitState> {
if let Some(async_executor) = cfg.async_executor {
- Executor::set_default_executor_kind(async_executor.into())
+ Executor::set_default_executor_kind(async_executor)
.context("Failed to set the default async executor")?;
}
@@ -1653,6 +1682,10 @@
// access to those files will not be possible.
info!("crosvm entering multiprocess mode");
}
+
+ let (metrics_send, metrics_recv) = Tube::directional_pair().context("metrics tube")?;
+ metrics::initialize(metrics_send);
+
#[cfg(all(feature = "pci-hotplug", feature = "swap"))]
let swap_device_helper = match &swap_controller {
Some(swap_controller) => Some(swap_controller.create_device_helper()?),
@@ -1860,6 +1893,19 @@
BTreeMap::new();
let mut iova_max_addr: Option<u64> = None;
+ // pvclock gets a tube for handling suspend/resume requests from the main thread.
+ #[cfg(feature = "pvclock")]
+ let (pvclock_host_tube, pvclock_device_tube) = if cfg.pvclock {
+ let (host, device) = Tube::pair().context("failed to create tube")?;
+ (Some(host), Some(device))
+ } else {
+ (None, None)
+ };
+ #[cfg(not(feature = "pvclock"))]
+ if cfg.pvclock {
+ bail!("pvclock device is only supported when crosvm is built with a feature 'pvclock'");
+ }
+
#[cfg(feature = "registered_events")]
let (reg_evt_wrtube, reg_evt_rdtube) =
Tube::directional_pair().context("failed to create registered event tube")?;
@@ -1891,6 +1937,8 @@
&mut iova_max_addr,
#[cfg(feature = "registered_events")]
®_evt_wrtube,
+ #[cfg(feature = "pvclock")]
+ pvclock_device_tube,
)?;
#[cfg(feature = "pci-hotplug")]
@@ -2166,6 +2214,9 @@
#[cfg(feature = "registered_events")]
reg_evt_rdtube,
guest_suspended_cvar,
+ #[cfg(feature = "pvclock")]
+ pvclock_host_tube,
+ metrics_recv,
)
}
@@ -2700,6 +2751,24 @@
}
}
+#[cfg(feature = "pvclock")]
+fn send_pvclock_cmd(tube: &Tube, command: PvClockCommand) -> Result<()> {
+ tube.send(&command)
+ .with_context(|| format!("failed to send pvclock command {:?}", command))?;
+ let resp = tube
+ .recv::<PvClockCommandResponse>()
+ .context("failed to receive pvclock command response")?;
+ if let PvClockCommandResponse::Err(e) = resp {
+ bail!("pvclock encountered error on {:?}: {}", command, e);
+ }
+ if let PvClockCommandResponse::DeviceInactive = resp {
+ warn!("Tried to send {command:?} but pvclock device was inactive");
+ } else {
+ info!("{command:?} completed with {resp:?}");
+ }
+ Ok(())
+}
+
#[cfg(target_arch = "x86_64")]
fn handle_hotplug_command<V: VmArch, Vcpu: VcpuArch>(
linux: &mut RunnableLinuxVm<V, Vcpu>,
@@ -2776,6 +2845,8 @@
vm_memory_handler_control: &'a Tube,
#[cfg(feature = "registered_events")]
registered_evt_tubes: &'a mut HashMap<RegisteredEvent, HashSet<AddressedProtoTube>>,
+ #[cfg(feature = "pvclock")]
+ pvclock_host_tube: Option<Arc<Tube>>,
}
fn process_vm_request<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
@@ -2941,10 +3012,8 @@
.restore(image, state.linux.vcpu_count)
},
);
-
- // For non s2idle guest suspension we are done
- if let VmRequest::SuspendVcpus = request {
- if state.cfg.force_s2idle {
+ if state.cfg.force_s2idle {
+ if let VmRequest::SuspendVcpus = request {
suspend_requested = true;
// Spawn s2idle wait thread.
@@ -2967,6 +3036,26 @@
})
.context("failed to spawn s2idle_wait thread")?;
}
+ } else {
+ // if not doing s2idle, the guest clock should
+ // behave as the host does, so let the guest
+ // know about the suspend / resume via
+ // virtio-pvclock.
+ #[cfg(feature = "pvclock")]
+ if let Some(ref pvclock_host_tube) = state.pvclock_host_tube {
+ let cmd = match request {
+ VmRequest::SuspendVcpus => Some(PvClockCommand::Suspend),
+ VmRequest::ResumeVcpus => Some(PvClockCommand::Resume),
+ _ => None,
+ };
+ if let Some(cmd) = cmd {
+ if let Err(e) = send_pvclock_cmd(pvclock_host_tube, cmd.clone()) {
+ error!("{:?} command failed: {:#}", cmd, e);
+ } else {
+ info!("{:?} command successfully processed", cmd);
+ }
+ }
+ }
}
response
}
@@ -3193,6 +3282,8 @@
mut swap_controller: Option<SwapController>,
#[cfg(feature = "registered_events")] reg_evt_rdtube: RecvTube,
guest_suspended_cvar: Option<Arc<(Mutex<bool>, Condvar)>>,
+ #[cfg(feature = "pvclock")] pvclock_host_tube: Option<Tube>,
+ metrics_tube: RecvTube,
) -> Result<ExitState> {
#[derive(EventToken)]
enum Token {
@@ -3370,6 +3461,9 @@
(run_mode, run_mode)
};
+ #[cfg(feature = "pvclock")]
+ let pvclock_host_tube = pvclock_host_tube.map(Arc::new);
+
// Architecture-specific code must supply a vcpu_init element for each VCPU.
assert_eq!(vcpus.len(), linux.vcpu_init.len());
@@ -3522,6 +3616,7 @@
.try_box_clone()?
.restore(image, linux.vcpu_count)
},
+ /* require_encrypted= */ false,
)?;
// Allow the vCPUs to start for real.
vcpu::kick_all_vcpus(
@@ -3538,6 +3633,21 @@
.context("static device setup complete")?;
}
+ let metrics_thread = if metrics::is_initialized() {
+ Some(
+ std::thread::Builder::new()
+ .name("metrics_thread".into())
+ .spawn(move || {
+ if let Err(e) = MetricsController::new(vec![metrics_tube]).run() {
+ error!("Metrics controller error: {:?}", e);
+ }
+ })
+ .context("metrics thread failed")?,
+ )
+ } else {
+ None
+ };
+
let mut exit_state = ExitState::Stop;
let mut pvpanic_code = PvPanicCode::Unknown;
#[cfg(feature = "registered_events")]
@@ -3728,6 +3838,8 @@
vm_memory_handler_control: &vm_memory_handler_control,
#[cfg(feature = "registered_events")]
registered_evt_tubes: &mut registered_evt_tubes,
+ #[cfg(feature = "pvclock")]
+ pvclock_host_tube: pvclock_host_tube.clone(),
};
let (exit_requested, mut ids_to_remove, add_tubes) =
process_vm_control_event(&mut state, id, socket)?;
@@ -3865,6 +3977,20 @@
// control sockets are closed when this function exits.
mem::drop(linux);
+ // Drop the hotplug manager to tell the warden process to exit before we try to join
+ // the metrics thread.
+ #[cfg(feature = "pci-hotplug")]
+ mem::drop(hotplug_manager);
+
+ // All our children should have exited by now, so closing our fd should
+ // terminate metrics. Then join so that everything gets flushed.
+ metrics::get_destructor().cleanup();
+ if let Some(metrics_thread) = metrics_thread {
+ if let Err(e) = metrics_thread.join() {
+ error!("failed to exit irq handler thread: {:?}", e);
+ }
+ }
+
stdin()
.set_canon_mode()
.expect("failed to restore canonical mode for terminal");
@@ -4310,6 +4436,7 @@
base::syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
let jail_type = VirtioDeviceType::VhostUser;
@@ -4327,14 +4454,13 @@
let device = params
.create_vhost_user_device(&mut keep_rds)
.context("failed to create vhost-user device")?;
- let mut listener = VhostUserListener::new(vhost, device.max_queue_num(), Some(&mut keep_rds))
+ let mut listener = VhostUserListener::new(vhost, Some(&mut keep_rds))
.context("failed to create the vhost listener")?;
let parent_resources = listener.take_parent_process_resources();
// Executor must be created before jail in order to prevent the jailed process from creating
// unrestricted io_urings.
- let ex = Executor::with_executor_kind(device.executor_kind().unwrap_or_default().into())
- .context("Failed to create an Executor")?;
+ let ex = Executor::new().context("Failed to create an Executor")?;
keep_rds.extend(ex.as_raw_descriptors());
// Deduplicate the FDs since minijail expects them to be unique.
@@ -4449,7 +4575,7 @@
pub fn start_devices(opts: DevicesCommand) -> anyhow::Result<()> {
if let Some(async_executor) = opts.async_executor {
- Executor::set_default_executor_kind(async_executor.into())
+ Executor::set_default_executor_kind(async_executor)
.context("Failed to set the default async executor")?;
}
diff --git a/src/crosvm/sys/linux/device_helpers.rs b/src/crosvm/sys/linux/device_helpers.rs
index 3644c89..72dbbd0 100644
--- a/src/crosvm/sys/linux/device_helpers.rs
+++ b/src/crosvm/sys/linux/device_helpers.rs
@@ -35,14 +35,15 @@
use devices::virtio::ipc_memory_mapper::CreateIpcMapperRet;
use devices::virtio::memory_mapper::BasicMemoryMapper;
use devices::virtio::memory_mapper::MemoryMapperTrait;
+#[cfg(feature = "pvclock")]
+use devices::virtio::pvclock::PvClock;
use devices::virtio::scsi::ScsiOption;
#[cfg(feature = "audio")]
use devices::virtio::snd::parameters::Parameters as SndParameters;
use devices::virtio::vfio_wrapper::VfioWrapper;
-use devices::virtio::vhost::user::vmm::VhostUserVirtioDevice;
#[cfg(feature = "net")]
use devices::virtio::vhost::user::NetBackend;
-use devices::virtio::vhost::user::VhostUserDevice;
+use devices::virtio::vhost::user::VhostUserDeviceBuilder;
use devices::virtio::vhost::user::VhostUserVsockDevice;
use devices::virtio::vsock::VsockConfig;
#[cfg(feature = "balloon")]
@@ -54,6 +55,7 @@
use devices::virtio::NetParameters;
#[cfg(feature = "net")]
use devices::virtio::NetParametersMode;
+use devices::virtio::VhostUserFrontend;
use devices::virtio::VirtioDevice;
use devices::virtio::VirtioDeviceType;
use devices::BusDeviceObj;
@@ -192,7 +194,7 @@
fn create_vhost_user_device(
self,
_keep_rds: &mut Vec<RawDescriptor>,
- ) -> anyhow::Result<Box<dyn VhostUserDevice>> {
+ ) -> anyhow::Result<Box<dyn VhostUserDeviceBuilder>> {
unimplemented!()
}
@@ -270,7 +272,7 @@
fn create_vhost_user_device(
self,
keep_rds: &mut Vec<RawDescriptor>,
- ) -> anyhow::Result<Box<dyn VhostUserDevice>> {
+ ) -> anyhow::Result<Box<dyn VhostUserDeviceBuilder>> {
let disk = self.disk;
let disk_image = disk.open()?;
let base_features = virtio::base_features(ProtectionType::Unprotected);
@@ -334,7 +336,7 @@
protection_type: ProtectionType,
opt: &VhostUserFrontendOption,
) -> DeviceResult {
- let dev = VhostUserVirtioDevice::new(
+ let dev = VhostUserFrontend::new(
opt.type_,
virtio::base_features(protection_type),
vhost_user_connection(&opt.socket)?,
@@ -354,7 +356,7 @@
protection_type: ProtectionType,
option: &VhostUserFsOption,
) -> DeviceResult {
- let dev = VhostUserVirtioDevice::new_fs(
+ let dev = VhostUserFrontend::new_fs(
virtio::base_features(protection_type),
vhost_user_connection(&option.socket)?,
option.max_queue_size,
@@ -673,6 +675,25 @@
})
}
+#[cfg(feature = "pvclock")]
+pub fn create_pvclock_device(
+ protection_type: ProtectionType,
+ jail_config: &Option<JailConfig>,
+ tsc_frequency: u64,
+ suspend_tube: Tube,
+) -> DeviceResult {
+ let dev = PvClock::new(
+ virtio::base_features(protection_type),
+ tsc_frequency,
+ suspend_tube,
+ );
+
+ Ok(VirtioDeviceStub {
+ dev: Box::new(dev),
+ jail: simple_jail(jail_config, "pvclock_device")?,
+ })
+}
+
#[cfg(feature = "net")]
impl VirtioDeviceBuilder for &NetParameters {
const NAME: &'static str = "net";
@@ -731,7 +752,7 @@
fn create_vhost_user_device(
self,
keep_rds: &mut Vec<RawDescriptor>,
- ) -> anyhow::Result<Box<dyn VhostUserDevice>> {
+ ) -> anyhow::Result<Box<dyn VhostUserDeviceBuilder>> {
let vq_pairs = self.vq_pairs.unwrap_or(1);
let multi_vq = vq_pairs > 1 && self.vhost_net.is_none();
let (tap, _mac) = create_tap_for_net_device(&self.mode, multi_vq)?;
@@ -936,7 +957,7 @@
fn create_vhost_user_device(
self,
keep_rds: &mut Vec<RawDescriptor>,
- ) -> anyhow::Result<Box<dyn VhostUserDevice>> {
+ ) -> anyhow::Result<Box<dyn VhostUserDeviceBuilder>> {
let vsock_device = VhostUserVsockDevice::new(self.cid, &self.vhost_device)?;
keep_rds.push(vsock_device.as_raw_descriptor());
@@ -1230,7 +1251,7 @@
fn create_vhost_user_device(
self,
keep_rds: &mut Vec<RawDescriptor>,
- ) -> anyhow::Result<Box<dyn VhostUserDevice>> {
+ ) -> anyhow::Result<Box<dyn VhostUserDeviceBuilder>> {
Ok(Box::new(virtio::vhost::user::create_vu_console_device(
self, keep_rds,
)?))
diff --git a/src/crosvm/sys/linux/gpu.rs b/src/crosvm/sys/linux/gpu.rs
index 0cb158a..64879a6 100644
--- a/src/crosvm/sys/linux/gpu.rs
+++ b/src/crosvm/sys/linux/gpu.rs
@@ -125,6 +125,11 @@
virtio::DisplayBackend::Stub,
];
+ #[cfg(feature = "android_display")]
+ if let Some(service_name) = &cfg.android_display_service {
+ display_backends.insert(0, virtio::DisplayBackend::Android(service_name.to_string()));
+ }
+
// Use the unnamed socket for GPU display screens.
if let Some(socket_path) = cfg.wayland_socket_paths.get("") {
display_backends.insert(
@@ -188,7 +193,7 @@
socket_path.display(),
)
})?;
- jail.mount_bind(dir, dir, true)?;
+ jail.mount(dir, dir, "", (libc::MS_BIND | libc::MS_REC) as usize)?;
}
Some(jail)
diff --git a/src/crosvm/sys/linux/jail_warden.rs b/src/crosvm/sys/linux/jail_warden.rs
index 3cd9e34..cee109e 100644
--- a/src/crosvm/sys/linux/jail_warden.rs
+++ b/src/crosvm/sys/linux/jail_warden.rs
@@ -88,6 +88,7 @@
let mut keep_rds = Vec::new();
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
let (main_tube, worker_tube) = Tube::pair()?;
keep_rds.push(worker_tube.as_raw_descriptor());
#[cfg(feature = "swap")]
@@ -201,6 +202,7 @@
let mut keep_rds = vec![];
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
keep_rds.extend(pci_device.keep_rds());
let proxy_device_primitive = ChildProcIntf::new(
pci_device,
diff --git a/src/crosvm/sys/windows/broker.rs b/src/crosvm/sys/windows/broker.rs
index 55f8d28..c5ba518 100644
--- a/src/crosvm/sys/windows/broker.rs
+++ b/src/crosvm/sys/windows/broker.rs
@@ -102,8 +102,6 @@
use gpu_display::WindowProcedureThread;
#[cfg(feature = "gpu")]
use gpu_display::WindowProcedureThreadBuilder;
-use metrics::protos::event_details::EmulatorChildProcessExitDetails;
-use metrics::protos::event_details::RecordDetails;
use metrics::MetricEventType;
#[cfg(all(feature = "net", feature = "slirp"))]
use net_util::slirp::sys::windows::SlirpStartupConfig;
@@ -399,12 +397,10 @@
if self.process_type != ProcessType::Metrics {
let exit_code = self.child.wait();
if let Ok(Some(exit_code)) = exit_code {
- let mut details = RecordDetails::new();
- let mut exit_details = EmulatorChildProcessExitDetails::new();
- exit_details.set_exit_code(exit_code as u32);
- exit_details.set_process_type(self.process_type.into());
- details.emulator_child_process_exit_details = Some(exit_details).into();
- metrics::log_event_with_details(MetricEventType::ChildProcessExit, &details);
+ metrics::log_event(MetricEventType::ChildProcessExit {
+ exit_code: exit_code as u32,
+ process_type: self.process_type,
+ });
} else {
error!(
"Failed to log exit code for process: {:?}, couldn't get exit code",
@@ -466,10 +462,11 @@
/// IMPORTANT NOTE: The metrics process must receive the client (second) end
/// of the Tube pair in order to allow the connection to be properly shut
/// down without data loss.
-fn metrics_tube_pair(metric_tubes: &mut Vec<Tube>) -> Result<Tube> {
+fn metrics_tube_pair(metric_tubes: &mut Vec<RecvTube>) -> Result<SendTube> {
// TODO(nkgold): as written, this Tube pair won't handle ancillary data properly because the
// PIDs are not set properly at each end; however, we don't plan to send ancillary data.
- let (t1, t2) = Tube::pair().exit_context(Exit::CreateTube, "failed to create tube")?;
+ let (t1, t2) =
+ Tube::directional_pair().exit_context(Exit::CreateTube, "failed to create tube")?;
metric_tubes.push(t2);
Ok(t1)
}
@@ -1193,7 +1190,7 @@
exit_events: &mut Vec<Event>,
wait_ctx: &mut WaitContext<Token>,
main_child: &mut ChildProcess,
- metric_tubes: &mut Vec<Tube>,
+ metric_tubes: &mut Vec<RecvTube>,
#[cfg(feature = "process-invariants")] process_invariants: &EmulatorProcessInvariants,
) -> Result<Vec<ChildProcess>> {
let mut block_children = Vec::new();
@@ -1421,7 +1418,7 @@
wait_ctx: &mut WaitContext<Token>,
cfg: &mut Config,
log_args: &LogArgs,
- metric_tubes: &mut Vec<Tube>,
+ metric_tubes: &mut Vec<RecvTube>,
#[cfg(feature = "process-invariants")] process_invariants: &EmulatorProcessInvariants,
) -> Result<(ChildProcess, ChildProcess)> {
let (host_pipe, guest_pipe) = named_pipes::pair_with_buffer_size(
@@ -1609,7 +1606,7 @@
main_child: &mut ChildProcess,
children: &mut HashMap<u32, ChildCleanup>,
wait_ctx: &mut WaitContext<Token>,
- metric_tubes: &mut Vec<Tube>,
+ metric_tubes: &mut Vec<RecvTube>,
#[cfg(feature = "process-invariants")] process_invariants: &EmulatorProcessInvariants,
) -> Result<ChildProcess> {
// Extract the backend config from the sound config, so it can run elsewhere.
@@ -1802,7 +1799,7 @@
main_child: &mut ChildProcess,
children: &mut HashMap<u32, ChildCleanup>,
wait_ctx: &mut WaitContext<Token>,
- metric_tubes: &mut Vec<Tube>,
+ metric_tubes: &mut Vec<RecvTube>,
wndproc_thread_builder: WindowProcedureThreadBuilder,
#[cfg(feature = "process-invariants")] process_invariants: &EmulatorProcessInvariants,
) -> Result<ChildProcess> {
diff --git a/src/main.rs b/src/main.rs
index 4c8fd56..c65fab9 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -22,7 +22,6 @@
use base::syslog::LogArgs;
use base::syslog::LogConfig;
use cmdline::RunCommand;
-use cmdline::UsbAttachCommand;
mod crosvm;
use crosvm::cmdline;
#[cfg(feature = "plugin")]
@@ -66,6 +65,7 @@
use vm_control::client::do_net_add;
#[cfg(feature = "pci-hotplug")]
use vm_control::client::do_net_remove;
+use vm_control::client::do_security_key_attach;
use vm_control::client::do_swap_status;
use vm_control::client::do_usb_attach;
use vm_control::client::do_usb_detach;
@@ -503,7 +503,7 @@
fn start_device(opts: cmdline::DeviceCommand) -> std::result::Result<(), ()> {
if let Some(async_executor) = opts.async_executor {
- cros_async::Executor::set_default_executor_kind(async_executor.into())
+ cros_async::Executor::set_default_executor_kind(async_executor)
.map_err(|e| error!("Failed to set the default async executor: {:#}", e))?;
}
@@ -583,12 +583,18 @@
}
}
-fn usb_attach(cmd: UsbAttachCommand) -> ModifyUsbResult<UsbControlResult> {
+fn usb_attach(cmd: cmdline::UsbAttachCommand) -> ModifyUsbResult<UsbControlResult> {
let dev_path = Path::new(&cmd.dev_path);
do_usb_attach(cmd.socket_path, dev_path)
}
+fn security_key_attach(cmd: cmdline::UsbAttachKeyCommand) -> ModifyUsbResult<UsbControlResult> {
+ let dev_path = Path::new(&cmd.dev_path);
+
+ do_security_key_attach(cmd.socket_path, dev_path)
+}
+
fn usb_detach(cmd: cmdline::UsbDetachCommand) -> ModifyUsbResult<UsbControlResult> {
do_usb_detach(cmd.socket_path, cmd.port)
}
@@ -600,6 +606,7 @@
fn modify_usb(cmd: cmdline::UsbCommand) -> std::result::Result<(), ()> {
let result = match cmd.command {
cmdline::UsbSubCommand::Attach(cmd) => usb_attach(cmd),
+ cmdline::UsbSubCommand::SecurityKeyAttach(cmd) => security_key_attach(cmd),
cmdline::UsbSubCommand::Detach(cmd) => usb_detach(cmd),
cmdline::UsbSubCommand::List(cmd) => usb_list(cmd),
};
@@ -622,12 +629,14 @@
let req = VmRequest::Snapshot(SnapshotCommand::Take {
snapshot_path: take_cmd.snapshot_path,
compress_memory: take_cmd.compress_memory,
+ encrypt: take_cmd.encrypt,
});
(take_cmd.socket_path, req)
}
Restore(path) => {
let req = VmRequest::Restore(RestoreCommand::Apply {
restore_path: path.snapshot_path,
+ require_encrypted: path.require_encrypted,
});
(path.socket_path, req)
}
@@ -786,11 +795,6 @@
CrossPlatformCommands::BalloonWs(cmd) => {
balloon_ws(cmd).map_err(|_| anyhow!("balloon_ws subcommand failed"))
}
- // TODO(b/288432539): remove once concierge is migrated
- #[cfg(feature = "balloon")]
- CrossPlatformCommands::BalloonWss(cmd) => {
- balloon_ws(cmd).map_err(|_| anyhow!("balloon_ws subcommand failed"))
- }
CrossPlatformCommands::Battery(cmd) => {
modify_battery(cmd).map_err(|_| anyhow!("battery subcommand failed"))
}
diff --git a/src/sys/windows.rs b/src/sys/windows.rs
index 4f4399c..beb2a5a 100644
--- a/src/sys/windows.rs
+++ b/src/sys/windows.rs
@@ -285,7 +285,7 @@
type DeviceResult<T = VirtioDeviceStub> = Result<T>;
fn create_vhost_user_block_device(cfg: &Config, disk_device_tube: Tube) -> DeviceResult {
- let dev = virtio::vhost::user::vmm::VhostUserVirtioDevice::new(
+ let dev = virtio::VhostUserFrontend::new(
virtio::DeviceType::Block,
virtio::base_features(cfg.protection_type),
disk_device_tube,
@@ -323,7 +323,7 @@
#[cfg(feature = "gpu")]
fn create_vhost_user_gpu_device(base_features: u64, vhost_user_tube: Tube) -> DeviceResult {
- let dev = virtio::vhost::user::vmm::VhostUserVirtioDevice::new(
+ let dev = virtio::VhostUserFrontend::new(
virtio::DeviceType::Gpu,
base_features,
vhost_user_tube,
@@ -359,7 +359,7 @@
#[cfg(feature = "audio")]
fn create_vhost_user_snd_device(base_features: u64, vhost_user_tube: Tube) -> DeviceResult {
- let dev = virtio::vhost::user::vmm::VhostUserVirtioDevice::new(
+ let dev = virtio::VhostUserFrontend::new(
virtio::DeviceType::Sound,
base_features,
vhost_user_tube,
@@ -414,7 +414,7 @@
#[cfg(feature = "slirp")]
fn create_vhost_user_net_device(cfg: &Config, net_device_tube: Tube) -> DeviceResult {
let features = virtio::base_features(cfg.protection_type);
- let dev = virtio::vhost::user::vmm::VhostUserVirtioDevice::new(
+ let dev = virtio::VhostUserFrontend::new(
virtio::DeviceType::Net,
features,
net_device_tube,
@@ -1481,6 +1481,7 @@
.try_box_clone()?
.restore(image, guest_os.vcpu_count)
},
+ /* require_encrypted= */ false,
)?;
// Allow the vCPUs to start for real.
kick_all_vcpus(
@@ -2098,6 +2099,7 @@
dynamic_power_coefficient: cfg.dynamic_power_coefficient.clone(),
#[cfg(target_arch = "x86_64")]
break_linux_pci_config_io: cfg.break_linux_pci_config_io,
+ boot_cpu: cfg.boot_cpu,
})
}
diff --git a/src/sys/windows/main.rs b/src/sys/windows/main.rs
index 4436077..7775b5e 100644
--- a/src/sys/windows/main.rs
+++ b/src/sys/windows/main.rs
@@ -22,8 +22,6 @@
use crosvm_cli::sys::windows::exit::Exit;
use crosvm_cli::sys::windows::exit::ExitContext;
use crosvm_cli::sys::windows::exit::ExitContextAnyhow;
-use metrics::protos::event_details::EmulatorDllDetails;
-use metrics::protos::event_details::RecordDetails;
use metrics::MetricEventType;
#[cfg(feature = "slirp")]
use net_util::slirp::sys::windows::SlirpStartupConfig;
@@ -110,11 +108,7 @@
}
fn report_dll_loaded(dll_name: String) {
- let mut dll_load_details = EmulatorDllDetails::new();
- dll_load_details.set_dll_base_name(dll_name);
- let mut details = RecordDetails::new();
- details.emulator_dll_details = Some(dll_load_details).into();
- metrics::log_event_with_details(MetricEventType::DllLoaded, &details);
+ metrics::log_event(MetricEventType::DllLoaded(dll_name));
}
pub fn get_library_watcher(
diff --git a/swap/Android.bp b/swap/Android.bp
index a4bd094..742feb7 100644
--- a/swap/Android.bp
+++ b/swap/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -25,9 +26,9 @@
"libbase_rust",
"libcfg_if",
"libcros_tracing",
- "libdata_model",
"libjail",
"liblibc",
+ "libmetrics",
"libnum_cpus",
"libonce_cell",
"libserde",
diff --git a/swap/Cargo.toml b/swap/Cargo.toml
index 62eb9f0..86d7e55 100644
--- a/swap/Cargo.toml
+++ b/swap/Cargo.toml
@@ -21,8 +21,8 @@
base = { path = "../base" }
cfg-if = "*"
cros_tracing = { path = "../cros_tracing" }
-data_model = { path = "../common/data_model" }
jail = { path = "../jail" }
+metrics = { path = "../metrics" }
num_cpus = "*"
once_cell = "*"
remain = "*"
diff --git a/swap/src/controller.rs b/swap/src/controller.rs
index 8322ddb..928c2ac 100644
--- a/swap/src/controller.rs
+++ b/swap/src/controller.rs
@@ -197,6 +197,7 @@
syslog::push_descriptors(&mut keep_rds);
cros_tracing::push_descriptors!(&mut keep_rds);
+ metrics::push_descriptors(&mut keep_rds);
keep_rds.extend(guest_memory.as_raw_descriptors());
keep_rds.extend(uffd_factory.as_raw_descriptors());
diff --git a/swap/src/processes.rs b/swap/src/processes.rs
index 0d89169..b0fef61 100644
--- a/swap/src/processes.rs
+++ b/swap/src/processes.rs
@@ -6,11 +6,13 @@
use std::fs::read_to_string;
use std::num::ParseIntError;
+use std::path::Path;
use std::str::FromStr;
use std::thread::sleep;
use std::time::Duration;
use anyhow::anyhow;
+use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use base::linux::getpid;
@@ -41,13 +43,20 @@
///
/// This must be called from the main process.
pub fn freeze_child_processes(monitor_pid: Pid) -> Result<ProcessesGuard> {
- let guard = ProcessesGuard {
+ let mut guard = ProcessesGuard {
pids: load_descendants(getpid(), monitor_pid)?,
};
- guard.stop_the_world().context("stop the world")?;
+ for _ in 0..3 {
+ guard.stop_the_world().context("stop the world")?;
+ let pids_after = load_descendants(getpid(), monitor_pid)?;
+ if pids_after == guard.pids {
+ return Ok(guard);
+ }
+ guard.pids = pids_after;
+ }
- Ok(guard)
+ bail!("new processes forked while freezing");
}
impl ProcessesGuard {
@@ -139,10 +148,9 @@
chars.next()
}
-fn wait_process_stopped(pid: Pid) -> Result<()> {
- let process_stat_path = format!("/proc/{}/stat", pid);
+fn wait_for_task_stopped(task_path: &Path) -> Result<()> {
for _ in 0..10 {
- let stat = read_to_string(&process_stat_path).context("read process status")?;
+ let stat = read_to_string(task_path.join("stat")).context("read process status")?;
if let Some(state) = parse_process_state(&stat) {
if state == 'T' {
return Ok(());
@@ -153,6 +161,14 @@
Err(anyhow!("time out"))
}
+fn wait_process_stopped(pid: Pid) -> Result<()> {
+ let all_tasks = std::fs::read_dir(format!("/proc/{}/task", pid)).context("read tasks")?;
+ for task in all_tasks {
+ wait_for_task_stopped(&task.context("read task entry")?.path()).context("wait for task")?;
+ }
+ Ok(())
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/swap/src/userfaultfd.rs b/swap/src/userfaultfd.rs
index 41b7c41..10b9df6 100644
--- a/swap/src/userfaultfd.rs
+++ b/swap/src/userfaultfd.rs
@@ -25,6 +25,7 @@
use base::AsRawDescriptor;
use base::AsRawDescriptors;
use base::FromRawDescriptor;
+use base::IntoRawDescriptor;
use base::MappedRegion;
use base::MemoryMapping;
use base::MemoryMappingBuilder;
@@ -396,7 +397,7 @@
pub fn try_clone(&self) -> Result<Self> {
let dup_desc = base::clone_descriptor(self).map_err(Error::Clone)?;
// SAFETY: no one owns dup_desc.
- let uffd = unsafe { Self::from_raw_descriptor(dup_desc) };
+ let uffd = Self::from(unsafe { Uffd::from_raw_fd(dup_desc.into_raw_descriptor()) });
Ok(uffd)
}
}
diff --git a/third_party/vmm_vhost/.buildkite/pipeline.yml b/third_party/vmm_vhost/.buildkite/pipeline.yml
deleted file mode 100644
index 0e77e1f..0000000
--- a/third_party/vmm_vhost/.buildkite/pipeline.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE-BSD-Google file.
-
-steps:
- - label: "clippy-x86-custom"
- commands:
- - cargo clippy --all-features --all-targets --workspace -- -D warnings
- retry:
- automatic: false
- agents:
- platform: x86_64.metal
- os: linux
- plugins:
- - docker#v3.0.1:
- image: "rustvmm/dev:v12"
- always-pull: true
diff --git a/third_party/vmm_vhost/.cargo/config b/third_party/vmm_vhost/.cargo/config
deleted file mode 100644
index bf8523e..0000000
--- a/third_party/vmm_vhost/.cargo/config
+++ /dev/null
@@ -1,5 +0,0 @@
-# This workaround is needed because the linker is unable to find __addtf3,
-# __multf3 and __subtf3.
-# Related issue: https://github.com/rust-lang/compiler-builtins/issues/201
-[target.aarch64-unknown-linux-musl]
-rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]
diff --git a/third_party/vmm_vhost/.github/dependabot.yml b/third_party/vmm_vhost/.github/dependabot.yml
deleted file mode 100644
index 4fcd556..0000000
--- a/third_party/vmm_vhost/.github/dependabot.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-version: 2
-updates:
-- package-ecosystem: gitsubmodule
- directory: "/"
- schedule:
- interval: daily
- open-pull-requests-limit: 10
diff --git a/third_party/vmm_vhost/Android.bp b/third_party/vmm_vhost/Android.bp
index 27d85b0..13f9871 100644
--- a/third_party/vmm_vhost/Android.bp
+++ b/third_party/vmm_vhost/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -56,7 +57,6 @@
"libbase_rust",
"libbitflags",
"libcfg_if",
- "libdata_model",
"liblibc",
"libtempfile",
"libthiserror",
@@ -88,7 +88,6 @@
"libbase_rust",
"libbitflags",
"libcfg_if",
- "libdata_model",
"liblibc",
"libtempfile",
"libthiserror",
diff --git a/third_party/vmm_vhost/Cargo.toml b/third_party/vmm_vhost/Cargo.toml
index 9309618..18ff132 100644
--- a/third_party/vmm_vhost/Cargo.toml
+++ b/third_party/vmm_vhost/Cargo.toml
@@ -18,7 +18,6 @@
base = { path = "../../base" }
bitflags = "2.3"
cfg-if = "1.0.0"
-data_model = { path = "../../common/data_model" }
enumn = "0.1.0"
libc = ">=0.2.39"
remain = "*"
diff --git a/third_party/vmm_vhost/README.md b/third_party/vmm_vhost/README.md
index 227aa14..34141ab 100644
--- a/third_party/vmm_vhost/README.md
+++ b/third_party/vmm_vhost/README.md
@@ -12,6 +12,6 @@
establish virtqueue sharing with a user space process on the same host. It uses communication over a
Unix domain socket to share file descriptors in the ancillary data of the message.
-The protocol defines two sides of the communication, master and slave. Master is the application
-that shares its virtqueues, slave is the consumer of the virtqueues. Master and slave can be either
-a client (i.e. connecting) or server (listening) in the socket communication.
+The protocol defines two sides of the communication, frontend and backend. Frontend is the
+application that shares its virtqueues, backend is the consumer of the virtqueues. Frontend and
+backend can be either a client (i.e. connecting) or server (listening) in the socket communication.
diff --git a/third_party/vmm_vhost/src/master.rs b/third_party/vmm_vhost/src/backend_client.rs
similarity index 75%
rename from third_party/vmm_vhost/src/master.rs
rename to third_party/vmm_vhost/src/backend_client.rs
index 9fc2eb7..b1bbfa6 100644
--- a/third_party/vmm_vhost/src/master.rs
+++ b/third_party/vmm_vhost/src/backend_client.rs
@@ -1,8 +1,6 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
-//! Traits and Struct for vhost-user master.
-
use std::fs::File;
use std::mem;
use std::path::Path;
@@ -12,7 +10,6 @@
use base::Event;
use base::RawDescriptor;
use base::INVALID_DESCRIPTOR;
-use data_model::zerocopy_from_reader;
use zerocopy::AsBytes;
use zerocopy::FromBytes;
@@ -22,16 +19,15 @@
use crate::message::*;
use crate::Connection;
use crate::Error as VhostUserError;
-use crate::MasterReq;
+use crate::FrontendReq;
use crate::Result as VhostUserResult;
use crate::Result;
use crate::SystemStream;
/// Client for a vhost-user device. The API is a thin abstraction over the vhost-user protocol.
-pub struct Master {
- // Used to send requests to the slave.
- main_sock: Connection<MasterReq>,
- // Cached virtio features from the slave.
+pub struct BackendClient {
+ connection: Connection<FrontendReq>,
+ // Cached virtio features from the backend.
virtio_features: u64,
// Cached acked virtio features from the driver.
acked_virtio_features: u64,
@@ -39,23 +35,23 @@
acked_protocol_features: u64,
}
-impl Master {
+impl BackendClient {
/// Create a new instance from a Unix stream socket.
pub fn from_stream(sock: SystemStream) -> Self {
Self::new(Connection::from(sock))
}
/// Create a new instance.
- fn new(ep: Connection<MasterReq>) -> Self {
- Master {
- main_sock: ep,
+ fn new(connection: Connection<FrontendReq>) -> Self {
+ BackendClient {
+ connection,
virtio_features: 0,
acked_virtio_features: 0,
acked_protocol_features: 0,
}
}
- /// Create a new vhost-user master connection.
+ /// Create a new instance.
///
/// Will retry as the backend may not be ready to accept the connection.
///
@@ -86,7 +82,7 @@
/// Get a bitmask of supported virtio/vhost features.
pub fn get_features(&mut self) -> Result<u64> {
- let hdr = self.send_request_header(MasterReq::GET_FEATURES, None)?;
+ let hdr = self.send_request_header(FrontendReq::GET_FEATURES, None)?;
let val = self.recv_reply::<VhostUserU64>(&hdr)?;
self.virtio_features = val.value;
Ok(self.virtio_features)
@@ -96,7 +92,7 @@
/// This should be a subset of supported features from get_features().
pub fn set_features(&mut self, features: u64) -> Result<()> {
let val = VhostUserU64::new(features);
- let hdr = self.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::SET_FEATURES, &val, None)?;
self.acked_virtio_features = features & self.virtio_features;
self.wait_for_ack(&hdr)
}
@@ -104,18 +100,18 @@
/// Set the current process as the owner of the vhost backend.
/// This must be run before any other vhost commands.
pub fn set_owner(&self) -> Result<()> {
- let hdr = self.send_request_header(MasterReq::SET_OWNER, None)?;
+ let hdr = self.send_request_header(FrontendReq::SET_OWNER, None)?;
self.wait_for_ack(&hdr)
}
/// Used to be sent to request disabling all rings
/// This is no longer used.
pub fn reset_owner(&self) -> Result<()> {
- let hdr = self.send_request_header(MasterReq::RESET_OWNER, None)?;
+ let hdr = self.send_request_header(FrontendReq::RESET_OWNER, None)?;
self.wait_for_ack(&hdr)
}
- /// Set the memory map regions on the slave so it can translate the vring
+ /// Set the memory map regions on the backend so it can translate the vring
/// addresses. In the ancillary data there is an array of file descriptors
pub fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES {
@@ -139,7 +135,7 @@
let body = VhostUserMemory::new(ctx.regions.len() as u32);
let hdr = self.send_request_with_payload(
- MasterReq::SET_MEM_TABLE,
+ FrontendReq::SET_MEM_TABLE,
&body,
ctx.regions.as_bytes(),
Some(ctx.fds.as_slice()),
@@ -158,7 +154,7 @@
}
let _ = self.send_request_with_body(
- MasterReq::SET_LOG_BASE,
+ FrontendReq::SET_LOG_BASE,
&val,
fd.as_ref().map(std::slice::from_ref),
)?;
@@ -169,14 +165,14 @@
/// Specify an event file descriptor to signal on log write.
pub fn set_log_fd(&self, fd: RawDescriptor) -> Result<()> {
let fds = [fd];
- let hdr = self.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
+ let hdr = self.send_request_header(FrontendReq::SET_LOG_FD, Some(&fds))?;
self.wait_for_ack(&hdr)
}
/// Set the number of descriptors in the vring.
pub fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
let val = VhostUserVringState::new(queue_index as u32, num.into());
- let hdr = self.send_request_with_body(MasterReq::SET_VRING_NUM, &val, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::SET_VRING_NUM, &val, None)?;
self.wait_for_ack(&hdr)
}
@@ -187,21 +183,23 @@
}
let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data);
- let hdr = self.send_request_with_body(MasterReq::SET_VRING_ADDR, &val, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::SET_VRING_ADDR, &val, None)?;
self.wait_for_ack(&hdr)
}
/// Set the first index to look for available descriptors.
+ // TODO: b/331466964 - Arguments and message format are wrong for packed queues.
pub fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
let val = VhostUserVringState::new(queue_index as u32, base.into());
- let hdr = self.send_request_with_body(MasterReq::SET_VRING_BASE, &val, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::SET_VRING_BASE, &val, None)?;
self.wait_for_ack(&hdr)
}
/// Get the available vring base offset.
+ // TODO: b/331466964 - Return type is wrong for packed queues.
pub fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
let req = VhostUserVringState::new(queue_index as u32, 0);
- let hdr = self.send_request_with_body(MasterReq::GET_VRING_BASE, &req, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::GET_VRING_BASE, &req, None)?;
let reply = self.recv_reply::<VhostUserVringState>(&hdr)?;
Ok(reply.num)
}
@@ -213,7 +211,7 @@
/// will be used instead of waiting for the call.
pub fn set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()> {
let hdr = self.send_fd_for_vring(
- MasterReq::SET_VRING_CALL,
+ FrontendReq::SET_VRING_CALL,
queue_index,
event.as_raw_descriptor(),
)?;
@@ -228,7 +226,7 @@
/// should be used instead of waiting for a kick.
pub fn set_vring_kick(&self, queue_index: usize, event: &Event) -> Result<()> {
let hdr = self.send_fd_for_vring(
- MasterReq::SET_VRING_KICK,
+ FrontendReq::SET_VRING_KICK,
queue_index,
event.as_raw_descriptor(),
)?;
@@ -241,7 +239,7 @@
/// is set when there is no file descriptor in the ancillary data.
pub fn set_vring_err(&self, queue_index: usize, event: &Event) -> Result<()> {
let hdr = self.send_fd_for_vring(
- MasterReq::SET_VRING_ERR,
+ FrontendReq::SET_VRING_ERR,
queue_index,
event.as_raw_descriptor(),
)?;
@@ -250,7 +248,7 @@
/// Put the device to sleep.
pub fn sleep(&self) -> Result<()> {
- let hdr = self.send_request_header(MasterReq::SLEEP, None)?;
+ let hdr = self.send_request_header(FrontendReq::SLEEP, None)?;
let reply = self.recv_reply::<VhostUserSuccess>(&hdr)?;
if !reply.success() {
Err(VhostUserError::SleepError(anyhow!(
@@ -263,7 +261,7 @@
/// Wake the device up.
pub fn wake(&self) -> Result<()> {
- let hdr = self.send_request_header(MasterReq::WAKE, None)?;
+ let hdr = self.send_request_header(FrontendReq::WAKE, None)?;
let reply = self.recv_reply::<VhostUserSuccess>(&hdr)?;
if !reply.success() {
Err(VhostUserError::WakeError(anyhow!(
@@ -276,7 +274,7 @@
/// Snapshot the device and receive serialized state of the device.
pub fn snapshot(&self) -> Result<Vec<u8>> {
- let hdr = self.send_request_header(MasterReq::SNAPSHOT, None)?;
+ let hdr = self.send_request_header(FrontendReq::SNAPSHOT, None)?;
let (success_msg, buf_reply, _) = self.recv_reply_with_payload::<VhostUserSuccess>(&hdr)?;
if !success_msg.success() {
Err(VhostUserError::SnapshotError(anyhow!(
@@ -299,7 +297,7 @@
});
let hdr = self.send_request_with_payload(
- MasterReq::RESTORE,
+ FrontendReq::RESTORE,
&body,
data_bytes,
queue_evt_fds.as_deref(),
@@ -319,7 +317,7 @@
if self.virtio_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES == 0 {
return Err(VhostUserError::InvalidOperation);
}
- let hdr = self.send_request_header(MasterReq::GET_PROTOCOL_FEATURES, None)?;
+ let hdr = self.send_request_header(FrontendReq::GET_PROTOCOL_FEATURES, None)?;
let val = self.recv_reply::<VhostUserU64>(&hdr)?;
// Should we support forward compatibility?
// If so just mask out unrecognized flags instead of return errors.
@@ -335,12 +333,12 @@
return Err(VhostUserError::InvalidOperation);
}
if features.contains(VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS)
- && !features.contains(VhostUserProtocolFeatures::SLAVE_REQ)
+ && !features.contains(VhostUserProtocolFeatures::BACKEND_REQ)
{
return Err(VhostUserError::FeatureMismatch);
}
let val = VhostUserU64::new(features.bits());
- let hdr = self.send_request_with_body(MasterReq::SET_PROTOCOL_FEATURES, &val, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::SET_PROTOCOL_FEATURES, &val, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
// completed yet.
self.acked_protocol_features = features.bits();
@@ -353,7 +351,7 @@
return Err(VhostUserError::InvalidOperation);
}
- let hdr = self.send_request_header(MasterReq::GET_QUEUE_NUM, None)?;
+ let hdr = self.send_request_header(FrontendReq::GET_QUEUE_NUM, None)?;
let val = self.recv_reply::<VhostUserU64>(&hdr)?;
if val.value > VHOST_USER_MAX_VRINGS {
return Err(VhostUserError::InvalidMessage);
@@ -361,9 +359,9 @@
Ok(val.value)
}
- /// Signal slave to enable or disable corresponding vring.
+ /// Signal backend to enable or disable corresponding vring.
///
- /// Slave must not pass data to/from the backend until ring is enabled by
+ /// Backend must not pass data to/from the ring until ring is enabled by
/// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been
/// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
pub fn set_vring_enable(&self, queue_index: usize, enable: bool) -> Result<()> {
@@ -373,7 +371,7 @@
}
let val = VhostUserVringState::new(queue_index as u32, enable.into());
- let hdr = self.send_request_with_body(MasterReq::SET_VRING_ENABLE, &val, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::SET_VRING_ENABLE, &val, None)?;
self.wait_for_ack(&hdr)
}
@@ -396,15 +394,15 @@
}
// vhost-user spec states that:
- // "Master payload: virtio device config space"
- // "Slave payload: virtio device config space"
- let hdr = self.send_request_with_payload(MasterReq::GET_CONFIG, &body, buf, None)?;
+ // "Request payload: virtio device config space"
+ // "Reply payload: virtio device config space"
+ let hdr = self.send_request_with_payload(FrontendReq::GET_CONFIG, &body, buf, None)?;
let (body_reply, buf_reply, rfds) =
self.recv_reply_with_payload::<VhostUserConfig>(&hdr)?;
if !rfds.is_empty() {
return Err(VhostUserError::InvalidMessage);
} else if body_reply.size == 0 {
- return Err(VhostUserError::SlaveInternalError);
+ return Err(VhostUserError::BackendInternalError);
} else if body_reply.size != body.size
|| body_reply.size as usize != buf.len()
|| body_reply.offset != body.offset
@@ -434,17 +432,17 @@
return Err(VhostUserError::InvalidOperation);
}
- let hdr = self.send_request_with_payload(MasterReq::SET_CONFIG, &body, buf, None)?;
+ let hdr = self.send_request_with_payload(FrontendReq::SET_CONFIG, &body, buf, None)?;
self.wait_for_ack(&hdr)
}
- /// Setup slave communication channel.
- pub fn set_slave_request_fd(&self, fd: &dyn AsRawDescriptor) -> Result<()> {
- if self.acked_protocol_features & VhostUserProtocolFeatures::SLAVE_REQ.bits() == 0 {
+ /// Setup backend communication channel.
+ pub fn set_backend_req_fd(&self, fd: &dyn AsRawDescriptor) -> Result<()> {
+ if self.acked_protocol_features & VhostUserProtocolFeatures::BACKEND_REQ.bits() == 0 {
return Err(VhostUserError::InvalidOperation);
}
let fds = [fd.as_raw_descriptor()];
- let hdr = self.send_request_header(MasterReq::SET_SLAVE_REQ_FD, Some(&fds))?;
+ let hdr = self.send_request_header(FrontendReq::SET_BACKEND_REQ_FD, Some(&fds))?;
self.wait_for_ack(&hdr)
}
@@ -457,7 +455,7 @@
return Err(VhostUserError::InvalidOperation);
}
- let hdr = self.send_request_with_body(MasterReq::GET_INFLIGHT_FD, inflight, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::GET_INFLIGHT_FD, inflight, None)?;
let (inflight, files) = self.recv_reply_with_files::<VhostUserInflight>(&hdr)?;
match into_single_file(files) {
@@ -480,7 +478,8 @@
return Err(VhostUserError::InvalidParam);
}
- let hdr = self.send_request_with_body(MasterReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?;
+ let hdr =
+ self.send_request_with_body(FrontendReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?;
self.wait_for_ack(&hdr)
}
@@ -491,7 +490,7 @@
return Err(VhostUserError::InvalidOperation);
}
- let hdr = self.send_request_header(MasterReq::GET_MAX_MEM_SLOTS, None)?;
+ let hdr = self.send_request_header(FrontendReq::GET_MAX_MEM_SLOTS, None)?;
let val = self.recv_reply::<VhostUserU64>(&hdr)?;
Ok(val.value)
@@ -515,7 +514,7 @@
region.mmap_offset,
);
let fds = [region.mmap_handle];
- let hdr = self.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?;
+ let hdr = self.send_request_with_body(FrontendReq::ADD_MEM_REG, &body, Some(&fds))?;
self.wait_for_ack(&hdr)
}
@@ -535,13 +534,13 @@
region.userspace_addr,
region.mmap_offset,
);
- let hdr = self.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?;
+ let hdr = self.send_request_with_body(FrontendReq::REM_MEM_REG, &body, None)?;
self.wait_for_ack(&hdr)
}
/// Gets the shared memory regions used by the device.
pub fn get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>> {
- let hdr = self.send_request_header(MasterReq::GET_SHARED_MEMORY_REGIONS, None)?;
+ let hdr = self.send_request_header(FrontendReq::GET_SHARED_MEMORY_REGIONS, None)?;
let (body_reply, buf_reply, rfds) = self.recv_reply_with_payload::<VhostUserU64>(&hdr)?;
let struct_size = mem::size_of::<VhostSharedMemoryRegion>();
if !rfds.is_empty() || buf_reply.len() != body_reply.value as usize * struct_size {
@@ -552,7 +551,8 @@
for _ in 0..body_reply.value {
regions.push(
// Can't fail because the input is the correct size.
- zerocopy_from_reader(&buf_reply[offset..(offset + struct_size)]).unwrap(),
+ VhostSharedMemoryRegion::read_from(&buf_reply[offset..(offset + struct_size)])
+ .unwrap(),
);
offset += struct_size;
}
@@ -561,32 +561,32 @@
fn send_request_header(
&self,
- code: MasterReq,
+ code: FrontendReq,
fds: Option<&[RawDescriptor]>,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
let hdr = self.new_request_header(code, 0);
- self.main_sock.send_header_only_message(&hdr, fds)?;
+ self.connection.send_header_only_message(&hdr, fds)?;
Ok(hdr)
}
fn send_request_with_body<T: Sized + AsBytes>(
&self,
- code: MasterReq,
+ code: FrontendReq,
msg: &T,
fds: Option<&[RawDescriptor]>,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
let hdr = self.new_request_header(code, mem::size_of::<T>() as u32);
- self.main_sock.send_message(&hdr, msg, fds)?;
+ self.connection.send_message(&hdr, msg, fds)?;
Ok(hdr)
}
fn send_request_with_payload<T: Sized + AsBytes>(
&self,
- code: MasterReq,
+ code: FrontendReq,
msg: &T,
payload: &[u8],
fds: Option<&[RawDescriptor]>,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
if let Some(fd_arr) = fds {
if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES {
return Err(VhostUserError::InvalidParam);
@@ -599,34 +599,34 @@
code,
len.try_into().map_err(VhostUserError::InvalidCastToInt)?,
);
- self.main_sock
+ self.connection
.send_message_with_payload(&hdr, msg, payload, fds)?;
Ok(hdr)
}
fn send_fd_for_vring(
&self,
- code: MasterReq,
+ code: FrontendReq,
queue_index: usize,
fd: RawDescriptor,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag.
// This flag is set when there is no file descriptor in the ancillary data. This signals
// that polling will be used instead of waiting for the call.
let msg = VhostUserU64::new(queue_index as u64);
let hdr = self.new_request_header(code, mem::size_of::<VhostUserU64>() as u32);
- self.main_sock.send_message(&hdr, &msg, Some(&[fd]))?;
+ self.connection.send_message(&hdr, &msg, Some(&[fd]))?;
Ok(hdr)
}
fn recv_reply<T: Sized + FromBytes + AsBytes + Default + VhostUserMsgValidator>(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
) -> VhostUserResult<T> {
if hdr.is_reply() {
return Err(VhostUserError::InvalidParam);
}
- let (reply, body, rfds) = self.main_sock.recv_message::<T>()?;
+ let (reply, body, rfds) = self.connection.recv_message::<T>()?;
if !reply.is_reply_for(hdr) || !rfds.is_empty() || !body.is_valid() {
return Err(VhostUserError::InvalidMessage);
}
@@ -635,13 +635,13 @@
fn recv_reply_with_files<T: Sized + AsBytes + FromBytes + Default + VhostUserMsgValidator>(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
) -> VhostUserResult<(T, Vec<File>)> {
if hdr.is_reply() {
return Err(VhostUserError::InvalidParam);
}
- let (reply, body, files) = self.main_sock.recv_message::<T>()?;
+ let (reply, body, files) = self.connection.recv_message::<T>()?;
if !reply.is_reply_for(hdr) || files.is_empty() || !body.is_valid() {
return Err(VhostUserError::InvalidMessage);
}
@@ -650,13 +650,13 @@
fn recv_reply_with_payload<T: Sized + AsBytes + FromBytes + Default + VhostUserMsgValidator>(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
) -> VhostUserResult<(T, Vec<u8>, Vec<File>)> {
if hdr.is_reply() {
return Err(VhostUserError::InvalidParam);
}
- let (reply, body, buf, files) = self.main_sock.recv_message_with_payload::<T>()?;
+ let (reply, body, buf, files) = self.connection.recv_message_with_payload::<T>()?;
if !reply.is_reply_for(hdr) || !files.is_empty() || !body.is_valid() {
return Err(VhostUserError::InvalidMessage);
}
@@ -664,19 +664,19 @@
Ok((body, buf, files))
}
- fn wait_for_ack(&self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()> {
+ fn wait_for_ack(&self, hdr: &VhostUserMsgHeader<FrontendReq>) -> VhostUserResult<()> {
if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0
|| !hdr.is_need_reply()
{
return Ok(());
}
- let (reply, body, rfds) = self.main_sock.recv_message::<VhostUserU64>()?;
+ let (reply, body, rfds) = self.connection.recv_message::<VhostUserU64>()?;
if !reply.is_reply_for(hdr) || !rfds.is_empty() || !body.is_valid() {
return Err(VhostUserError::InvalidMessage);
}
if body.value != 0 {
- return Err(VhostUserError::SlaveInternalError);
+ return Err(VhostUserError::BackendInternalError);
}
Ok(())
}
@@ -686,13 +686,17 @@
}
#[inline]
- fn new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq> {
+ fn new_request_header(
+ &self,
+ request: FrontendReq,
+ size: u32,
+ ) -> VhostUserMsgHeader<FrontendReq> {
VhostUserMsgHeader::new(request, 0x1, size)
}
}
// TODO(b/221882601): likely need pairs of RDs and/or SharedMemory to represent mmaps on Windows.
-/// Context object to pass guest memory configuration to Master::set_mem_table().
+/// Context object to pass guest memory configuration to BackendClient::set_mem_table().
struct VhostUserMemoryContext {
regions: VhostUserMemoryPayload,
fds: Vec<RawDescriptor>,
@@ -720,27 +724,27 @@
use tempfile::tempfile;
use super::*;
- use crate::connection::tests::create_pair;
+ use crate::tests::create_pair;
const BUFFER_SIZE: usize = 0x1001;
#[test]
- fn create_master() {
- let (master, slave) = create_pair();
+ fn create_backend_client() {
+ let (backend_client, peer) = create_pair();
- assert!(master.main_sock.as_raw_descriptor() != INVALID_DESCRIPTOR);
+ assert!(backend_client.connection.as_raw_descriptor() != INVALID_DESCRIPTOR);
// Send two messages continuously
- master.set_owner().unwrap();
- master.reset_owner().unwrap();
+ backend_client.set_owner().unwrap();
+ backend_client.reset_owner().unwrap();
- let (hdr, rfds) = slave.recv_header().unwrap();
- assert_eq!(hdr.get_code(), Ok(MasterReq::SET_OWNER));
+ let (hdr, rfds) = peer.recv_header().unwrap();
+ assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_OWNER));
assert_eq!(hdr.get_size(), 0);
assert_eq!(hdr.get_version(), 0x1);
assert!(rfds.is_empty());
- let (hdr, rfds) = slave.recv_header().unwrap();
- assert_eq!(hdr.get_code(), Ok(MasterReq::RESET_OWNER));
+ let (hdr, rfds) = peer.recv_header().unwrap();
+ assert_eq!(hdr.get_code(), Ok(FrontendReq::RESET_OWNER));
assert_eq!(hdr.get_size(), 0);
assert_eq!(hdr.get_version(), 0x1);
assert!(rfds.is_empty());
@@ -748,286 +752,286 @@
#[test]
fn test_features() {
- let (mut master, peer) = create_pair();
+ let (mut backend_client, peer) = create_pair();
- master.set_owner().unwrap();
+ backend_client.set_owner().unwrap();
let (hdr, rfds) = peer.recv_header().unwrap();
- assert_eq!(hdr.get_code(), Ok(MasterReq::SET_OWNER));
+ assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_OWNER));
assert_eq!(hdr.get_size(), 0);
assert_eq!(hdr.get_version(), 0x1);
assert!(rfds.is_empty());
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(0x15);
peer.send_message(&hdr, &msg, None).unwrap();
- let features = master.get_features().unwrap();
+ let features = backend_client.get_features().unwrap();
assert_eq!(features, 0x15u64);
let (_hdr, rfds) = peer.recv_header().unwrap();
assert!(rfds.is_empty());
- let hdr = VhostUserMsgHeader::new(MasterReq::SET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::SET_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(0x15);
peer.send_message(&hdr, &msg, None).unwrap();
- master.set_features(0x15).unwrap();
+ backend_client.set_features(0x15).unwrap();
let (_hdr, msg, rfds) = peer.recv_message::<VhostUserU64>().unwrap();
assert!(rfds.is_empty());
let val = msg.value;
assert_eq!(val, 0x15);
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8);
let msg = 0x15u32;
peer.send_message(&hdr, &msg, None).unwrap();
- assert!(master.get_features().is_err());
+ assert!(backend_client.get_features().is_err());
}
#[test]
fn test_protocol_features() {
- let (mut master, peer) = create_pair();
+ let (mut backend_client, peer) = create_pair();
- master.set_owner().unwrap();
+ backend_client.set_owner().unwrap();
let (hdr, rfds) = peer.recv_header().unwrap();
- assert_eq!(hdr.get_code(), Ok(MasterReq::SET_OWNER));
+ assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_OWNER));
assert!(rfds.is_empty());
- assert!(master.get_protocol_features().is_err());
- assert!(master
+ assert!(backend_client.get_protocol_features().is_err());
+ assert!(backend_client
.set_protocol_features(VhostUserProtocolFeatures::all())
.is_err());
let vfeatures = 0x15 | 1 << VHOST_USER_F_PROTOCOL_FEATURES;
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(vfeatures);
peer.send_message(&hdr, &msg, None).unwrap();
- let features = master.get_features().unwrap();
+ let features = backend_client.get_features().unwrap();
assert_eq!(features, vfeatures);
let (_hdr, rfds) = peer.recv_header().unwrap();
assert!(rfds.is_empty());
- master.set_features(vfeatures).unwrap();
+ backend_client.set_features(vfeatures).unwrap();
let (_hdr, msg, rfds) = peer.recv_message::<VhostUserU64>().unwrap();
assert!(rfds.is_empty());
let val = msg.value;
assert_eq!(val, vfeatures);
let pfeatures = VhostUserProtocolFeatures::all();
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_PROTOCOL_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_PROTOCOL_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(pfeatures.bits());
peer.send_message(&hdr, &msg, None).unwrap();
- let features = master.get_protocol_features().unwrap();
+ let features = backend_client.get_protocol_features().unwrap();
assert_eq!(features, pfeatures);
let (_hdr, rfds) = peer.recv_header().unwrap();
assert!(rfds.is_empty());
- master.set_protocol_features(pfeatures).unwrap();
+ backend_client.set_protocol_features(pfeatures).unwrap();
let (_hdr, msg, rfds) = peer.recv_message::<VhostUserU64>().unwrap();
assert!(rfds.is_empty());
let val = msg.value;
assert_eq!(val, pfeatures.bits());
- let hdr = VhostUserMsgHeader::new(MasterReq::SET_PROTOCOL_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::SET_PROTOCOL_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(pfeatures.bits());
peer.send_message(&hdr, &msg, None).unwrap();
- assert!(master.get_protocol_features().is_err());
+ assert!(backend_client.get_protocol_features().is_err());
}
#[test]
- fn test_master_set_config_negative() {
- let (mut master, _peer) = create_pair();
+ fn test_backend_client_set_config_negative() {
+ let (mut backend_client, _peer) = create_pair();
let buf = vec![0x0; BUFFER_SIZE];
- master
+ backend_client
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap_err();
- master.virtio_features = 0xffff_ffff;
- master.acked_virtio_features = 0xffff_ffff;
- master.acked_protocol_features = 0xffff_ffff;
+ backend_client.virtio_features = 0xffff_ffff;
+ backend_client.acked_virtio_features = 0xffff_ffff;
+ backend_client.acked_protocol_features = 0xffff_ffff;
- master
+ backend_client
.set_config(0, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap();
- master
+ backend_client
.set_config(
VHOST_USER_CONFIG_SIZE,
VhostUserConfigFlags::WRITABLE,
&buf[0..4],
)
.unwrap_err();
- master
+ backend_client
.set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap_err();
- master
+ backend_client
.set_config(
0x100,
VhostUserConfigFlags::from_bits_retain(0xffff_ffff),
&buf[0..4],
)
.unwrap_err();
- master
+ backend_client
.set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf)
.unwrap_err();
- master
+ backend_client
.set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &[])
.unwrap_err();
}
- fn create_pair2() -> (Master, Connection<MasterReq>) {
- let (mut master, peer) = create_pair();
+ fn create_pair2() -> (BackendClient, Connection<FrontendReq>) {
+ let (mut backend_client, peer) = create_pair();
- master.virtio_features = 0xffff_ffff;
- master.acked_virtio_features = 0xffff_ffff;
- master.acked_protocol_features = 0xffff_ffff;
+ backend_client.virtio_features = 0xffff_ffff;
+ backend_client.acked_virtio_features = 0xffff_ffff;
+ backend_client.acked_protocol_features = 0xffff_ffff;
- (master, peer)
+ (backend_client, peer)
}
#[test]
- fn test_master_get_config_negative0() {
- let (master, peer) = create_pair2();
+ fn test_backend_client_get_config_negative0() {
+ let (backend_client, peer) = create_pair2();
let buf = vec![0x0; BUFFER_SIZE];
- let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
- hdr.set_code(MasterReq::GET_FEATURES);
+ hdr.set_code(FrontendReq::GET_FEATURES);
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
- hdr.set_code(MasterReq::GET_CONFIG);
+ hdr.set_code(FrontendReq::GET_CONFIG);
}
#[test]
- fn test_master_get_config_negative1() {
- let (master, peer) = create_pair2();
+ fn test_backend_client_get_config_negative1() {
+ let (backend_client, peer) = create_pair2();
let buf = vec![0x0; BUFFER_SIZE];
- let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
hdr.set_reply(false);
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative2() {
- let (master, peer) = create_pair2();
+ fn test_backend_client_get_config_negative2() {
+ let (backend_client, peer) = create_pair2();
let buf = vec![0x0; BUFFER_SIZE];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
}
#[test]
- fn test_master_get_config_negative3() {
- let (master, peer) = create_pair2();
+ fn test_backend_client_get_config_negative3() {
+ let (backend_client, peer) = create_pair2();
let buf = vec![0x0; BUFFER_SIZE];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = 0;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative4() {
- let (master, peer) = create_pair2();
+ fn test_backend_client_get_config_negative4() {
+ let (backend_client, peer) = create_pair2();
let buf = vec![0x0; BUFFER_SIZE];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = 0x101;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative5() {
- let (master, peer) = create_pair2();
+ fn test_backend_client_get_config_negative5() {
+ let (backend_client, peer) = create_pair2();
let buf = vec![0x0; BUFFER_SIZE];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = (BUFFER_SIZE) as u32;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative6() {
- let (master, peer) = create_pair2();
+ fn test_backend_client_get_config_negative6() {
+ let (backend_client, peer) = create_pair2();
let buf = vec![0x0; BUFFER_SIZE];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.size = 6;
peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None)
.unwrap();
- assert!(master
+ assert!(backend_client
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
fn test_maset_set_mem_table_failure() {
- let (master, _peer) = create_pair2();
+ let (backend_client, _peer) = create_pair2();
// set_mem_table() with 0 regions is invalid
- master.set_mem_table(&[]).unwrap_err();
+ backend_client.set_mem_table(&[]).unwrap_err();
// set_mem_table() with more than MAX_ATTACHED_FD_ENTRIES is invalid
let files: Vec<File> = (0..MAX_ATTACHED_FD_ENTRIES + 1)
@@ -1043,6 +1047,6 @@
mmap_handle: f.as_raw_descriptor(),
})
.collect();
- master.set_mem_table(&tables).unwrap_err();
+ backend_client.set_mem_table(&tables).unwrap_err();
}
}
diff --git a/third_party/vmm_vhost/src/slave_req_handler.rs b/third_party/vmm_vhost/src/backend_server.rs
similarity index 71%
rename from third_party/vmm_vhost/src/slave_req_handler.rs
rename to third_party/vmm_vhost/src/backend_server.rs
index 706b415..29e4acc 100644
--- a/third_party/vmm_vhost/src/slave_req_handler.rs
+++ b/third_party/vmm_vhost/src/backend_server.rs
@@ -11,32 +11,21 @@
use zerocopy::FromBytes;
use zerocopy::Ref;
-use crate::connection::to_system_stream;
use crate::into_single_file;
use crate::message::*;
+use crate::to_system_stream;
+use crate::BackendReq;
use crate::Connection;
use crate::Error;
-use crate::MasterReq;
+use crate::FrontendReq;
use crate::Result;
-use crate::SlaveReq;
use crate::SystemStream;
-/// Services provided to the master by the slave.
+/// Trait for vhost-user backends.
///
-/// The [VhostUserSlaveReqHandler] trait defines the services provided to the master by the slave.
-/// The vhost-user specification defines a master communication channel, by which masters could
-/// request services from slaves. The [VhostUserSlaveReqHandler] trait defines services provided by
-/// slaves, and it's used both on the master side and slave side.
-///
-/// - on the master side, a stub forwarder implementing [VhostUserSlaveReqHandler] will proxy
-/// service requests to slaves.
-/// - on the slave side, the [SlaveReqHandler] will forward service requests to a handler
-/// implementing [VhostUserSlaveReqHandler].
-///
-/// [VhostUserSlaveReqHandler]: trait.VhostUserSlaveReqHandler.html
-/// [SlaveReqHandler]: struct.SlaveReqHandler.html
+/// Each method corresponds to a vhost-user protocol method. See the specification for details.
#[allow(missing_docs)]
-pub trait VhostUserSlaveReqHandler {
+pub trait Backend {
fn set_owner(&mut self) -> Result<()>;
fn reset_owner(&mut self) -> Result<()>;
fn get_features(&mut self) -> Result<u64>;
@@ -52,7 +41,9 @@
available: u64,
log: u64,
) -> Result<()>;
+ // TODO: b/331466964 - Argument type is wrong for packed queues.
fn set_vring_base(&mut self, index: u32, base: u32) -> Result<()>;
+ // TODO: b/331466964 - Return type is wrong for packed queues.
fn get_vring_base(&mut self, index: u32) -> Result<VhostUserVringState>;
fn set_vring_kick(&mut self, index: u8, fd: Option<File>) -> Result<()>;
fn set_vring_call(&mut self, index: u8, fd: Option<File>) -> Result<()>;
@@ -69,7 +60,7 @@
flags: VhostUserConfigFlags,
) -> Result<Vec<u8>>;
fn set_config(&mut self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()>;
- fn set_slave_req_fd(&mut self, _vu_req: Connection<SlaveReq>) {}
+ fn set_backend_req_fd(&mut self, _vu_req: Connection<BackendReq>) {}
fn get_inflight_fd(
&mut self,
inflight: &VhostUserInflight,
@@ -89,9 +80,9 @@
fn restore(&mut self, data_bytes: &[u8], queue_evts: Vec<File>) -> Result<()>;
}
-impl<T> VhostUserSlaveReqHandler for T
+impl<T> Backend for T
where
- T: AsMut<dyn VhostUserSlaveReqHandler>,
+ T: AsMut<dyn Backend>,
{
fn set_owner(&mut self) -> Result<()> {
self.as_mut().set_owner()
@@ -179,8 +170,8 @@
self.as_mut().set_config(offset, buf, flags)
}
- fn set_slave_req_fd(&mut self, vu_req: Connection<SlaveReq>) {
- self.as_mut().set_slave_req_fd(vu_req)
+ fn set_backend_req_fd(&mut self, vu_req: Connection<BackendReq>) {
+ self.as_mut().set_backend_req_fd(vu_req)
}
fn get_inflight_fd(
@@ -227,127 +218,10 @@
}
}
-/// Abstracts |Connection| related operations for vhost-user slave implementations.
-pub struct SlaveReqHelper {
+/// Handles requests from a vhost-user connection by dispatching them to [[Backend]] methods.
+pub struct BackendServer<S: Backend> {
/// Underlying connection for communication.
- connection: Connection<MasterReq>,
-
- /// Sending ack for messages without payload.
- reply_ack_enabled: bool,
-}
-
-impl SlaveReqHelper {
- /// Creates a new |SlaveReqHelper| instance with an |Connection| underneath it.
- pub fn new(connection: Connection<MasterReq>) -> Self {
- SlaveReqHelper {
- connection,
- reply_ack_enabled: false,
- }
- }
-
- fn new_reply_header<T: Sized>(
- &self,
- req: &VhostUserMsgHeader<MasterReq>,
- payload_size: usize,
- ) -> Result<VhostUserMsgHeader<MasterReq>> {
- Ok(VhostUserMsgHeader::new(
- req.get_code().map_err(|_| Error::InvalidMessage)?,
- VhostUserHeaderFlag::REPLY.bits(),
- (mem::size_of::<T>()
- .checked_add(payload_size)
- .ok_or(Error::OversizedMsg)?)
- .try_into()
- .map_err(Error::InvalidCastToInt)?,
- ))
- }
-
- /// Sends reply back to Vhost Master in response to a message.
- pub fn send_ack_message(
- &mut self,
- req: &VhostUserMsgHeader<MasterReq>,
- success: bool,
- ) -> Result<()> {
- if self.reply_ack_enabled && req.is_need_reply() {
- let hdr: VhostUserMsgHeader<MasterReq> =
- self.new_reply_header::<VhostUserU64>(req, 0)?;
- let val = if success { 0 } else { 1 };
- let msg = VhostUserU64::new(val);
- self.connection.send_message(&hdr, &msg, None)?;
- }
- Ok(())
- }
-
- fn send_reply_message<T: Sized + AsBytes>(
- &mut self,
- req: &VhostUserMsgHeader<MasterReq>,
- msg: &T,
- ) -> Result<()> {
- let hdr = self.new_reply_header::<T>(req, 0)?;
- self.connection.send_message(&hdr, msg, None)?;
- Ok(())
- }
-
- fn send_reply_with_payload<T: Sized + AsBytes>(
- &mut self,
- req: &VhostUserMsgHeader<MasterReq>,
- msg: &T,
- payload: &[u8],
- ) -> Result<()> {
- let hdr = self.new_reply_header::<T>(req, payload.len())?;
- self.connection
- .send_message_with_payload(&hdr, msg, payload, None)?;
- Ok(())
- }
-
- /// Parses an incoming |SET_VRING_KICK| or |SET_VRING_CALL| message into a
- /// Vring number and an fd.
- pub fn handle_vring_fd_request(
- &mut self,
- buf: &[u8],
- files: Vec<File>,
- ) -> Result<(u8, Option<File>)> {
- let msg = VhostUserU64::read_from_prefix(buf).ok_or(Error::InvalidMessage)?;
- if !msg.is_valid() {
- return Err(Error::InvalidMessage);
- }
-
- // Bits (0-7) of the payload contain the vring index. Bit 8 is the
- // invalid FD flag (VHOST_USER_VRING_NOFD_MASK).
- // This bit is set when there is no file descriptor
- // in the ancillary data. This signals that polling will be used
- // instead of waiting for the call.
- // If Bit 8 is unset, the data must contain a file descriptor.
- let has_fd = (msg.value & 0x100u64) == 0;
-
- let file = into_single_file(files);
-
- if has_fd && file.is_none() || !has_fd && file.is_some() {
- return Err(Error::InvalidMessage);
- }
-
- Ok((msg.value as u8, file))
- }
-}
-
-impl AsRawDescriptor for SlaveReqHelper {
- fn as_raw_descriptor(&self) -> RawDescriptor {
- self.connection.as_raw_descriptor()
- }
-}
-
-/// Server to handle service requests from masters from the master communication channel.
-///
-/// The [SlaveReqHandler] acts as a server on the slave side, to handle service requests from
-/// masters on the master communication channel. It's actually a proxy invoking the registered
-/// handler implementing [VhostUserSlaveReqHandler] to do the real work.
-///
-/// The lifetime of the SlaveReqHandler object should be the same as the underline Unix Domain
-/// Socket, so it gets simpler to recover from disconnect.
-///
-/// [VhostUserSlaveReqHandler]: trait.VhostUserSlaveReqHandler.html
-/// [SlaveReqHandler]: struct.SlaveReqHandler.html
-pub struct SlaveReqHandler<S: VhostUserSlaveReqHandler> {
- slave_req_helper: SlaveReqHelper,
+ connection: Connection<FrontendReq>,
// the vhost-user backend device object
backend: S,
@@ -355,31 +229,34 @@
acked_virtio_features: u64,
protocol_features: VhostUserProtocolFeatures,
acked_protocol_features: u64,
+
+ /// Sending ack for messages without payload.
+ reply_ack_enabled: bool,
}
-impl<S: VhostUserSlaveReqHandler> SlaveReqHandler<S> {
- /// Create a vhost-user slave connection from a connected socket.
+impl<S: Backend> BackendServer<S> {
+ /// Create a backend server from a connected socket.
pub fn from_stream(socket: SystemStream, backend: S) -> Self {
Self::new(Connection::from(socket), backend)
}
}
-impl<S: VhostUserSlaveReqHandler> AsRef<S> for SlaveReqHandler<S> {
+impl<S: Backend> AsRef<S> for BackendServer<S> {
fn as_ref(&self) -> &S {
&self.backend
}
}
-impl<S: VhostUserSlaveReqHandler> SlaveReqHandler<S> {
- /// Create a vhost-user slave connection.
- pub fn new(connection: Connection<MasterReq>, backend: S) -> Self {
- SlaveReqHandler {
- slave_req_helper: SlaveReqHelper::new(connection),
+impl<S: Backend> BackendServer<S> {
+ pub fn new(connection: Connection<FrontendReq>, backend: S) -> Self {
+ BackendServer {
+ connection,
backend,
virtio_features: 0,
acked_virtio_features: 0,
protocol_features: VhostUserProtocolFeatures::empty(),
acked_protocol_features: 0,
+ reply_ack_enabled: false,
}
}
@@ -397,17 +274,17 @@
/// 5. Receives optional payloads.
/// 6. Processes the message.
///
- /// This method [`SlaveReqHandler::recv_header()`] is in charge of the step (1) and (2),
- /// [`SlaveReqHandler::needs_wait_for_payload()`] is (3), and
- /// [`SlaveReqHandler::process_message()`] is (5) and (6).
- /// We need to have the three method separately for multi-platform supports;
- /// [`SlaveReqHandler::recv_header()`] and [`SlaveReqHandler::process_message()`] need to be
- /// separated because the way of waiting for incoming messages differs between Unix and Windows
- /// so it's the caller's responsibility to wait before [`SlaveReqHandler::process_message()`].
+ /// This method [`BackendServer::recv_header()`] is in charge of the step (1) and (2),
+ /// [`BackendServer::needs_wait_for_payload()`] is (3), and
+ /// [`BackendServer::process_message()`] is (5) and (6). We need to have the three method
+ /// separately for multi-platform supports; [`BackendServer::recv_header()`] and
+ /// [`BackendServer::process_message()`] need to be separated because the way of waiting for
+ /// incoming messages differs between Unix and Windows so it's the caller's responsibility to
+ /// wait before [`BackendServer::process_message()`].
///
/// Note that some vhost-user protocol variant such as VVU doesn't assume stream mode. In this
/// case, a message header and its body are sent together so the step (4) is skipped. We handle
- /// this case in [`SlaveReqHandler::needs_wait_for_payload()`].
+ /// this case in [`BackendServer::needs_wait_for_payload()`].
///
/// The following pseudo code describes how a caller should process incoming vhost-user
/// messages:
@@ -418,19 +295,19 @@
/// connection.wait_readable().unwrap();
///
/// // (1) and (2)
- /// let (hdr, files) = slave_req_handler.recv_header();
+ /// let (hdr, files) = backend_server.recv_header();
///
/// // (3)
- /// if slave_req_handler.needs_wait_for_payload(&hdr) {
+ /// if backend_server.needs_wait_for_payload(&hdr) {
/// // (4) block until a payload comes if needed.
/// connection.wait_readable().unwrap();
/// }
///
/// // (5) and (6)
- /// slave_req_handler.process_message(&hdr, &files).unwrap();
+ /// backend_server.process_message(&hdr, &files).unwrap();
/// }
/// ```
- pub fn recv_header(&mut self) -> Result<(VhostUserMsgHeader<MasterReq>, Vec<File>)> {
+ pub fn recv_header(&mut self) -> Result<(VhostUserMsgHeader<FrontendReq>, Vec<File>)> {
// The underlying communication channel is a Unix domain socket in
// stream mode, and recvmsg() is a little tricky here. To successfully
// receive attached file descriptors, we need to receive messages and
@@ -440,7 +317,7 @@
// . recv optional message body and payload according size field in
// message header
// . validate message body and optional payload
- let (hdr, files) = match self.slave_req_helper.connection.recv_header() {
+ let (hdr, files) = match self.connection.recv_header() {
Ok((hdr, files)) => (hdr, files),
Err(Error::Disconnect) => {
// If the client closed the connection before sending a header, this should be
@@ -458,10 +335,10 @@
}
/// Returns whether the caller needs to wait for the incoming message before calling
- /// [`SlaveReqHandler::process_message`].
+ /// [`BackendServer::process_message`].
///
- /// See [`SlaveReqHandler::recv_header`]'s doc comment for the usage.
- pub fn needs_wait_for_payload(&self, hdr: &VhostUserMsgHeader<MasterReq>) -> bool {
+ /// See [`BackendServer::recv_header`]'s doc comment for the usage.
+ pub fn needs_wait_for_payload(&self, hdr: &VhostUserMsgHeader<FrontendReq>) -> bool {
// Since the vhost-user protocol uses stream mode, we need to wait until an additional
// payload is available if exists.
hdr.get_size() != 0
@@ -469,64 +346,77 @@
/// Main entrance to request from the communication channel.
///
- /// Receive and handle one incoming request message from the vmm.
- /// See [`SlaveReqHandler::recv_header`]'s doc comment for the usage.
+ /// Receive and handle one incoming request message from the frontend.
+ /// See [`BackendServer::recv_header`]'s doc comment for the usage.
///
/// # Return:
/// * - `Ok(())`: one request was successfully handled.
- /// * - `Err(ClientExit)`: the vmm closed the connection properly. This isn't an actual failure.
+ /// * - `Err(ClientExit)`: the frontend closed the connection properly. This isn't an actual
+ /// failure.
/// * - `Err(Disconnect)`: the connection was closed unexpectedly.
/// * - `Err(InvalidMessage)`: the vmm sent a illegal message.
/// * - other errors: failed to handle a request.
pub fn process_message(
&mut self,
- hdr: VhostUserMsgHeader<MasterReq>,
+ hdr: VhostUserMsgHeader<FrontendReq>,
files: Vec<File>,
) -> Result<()> {
- let buf = self.slave_req_helper.connection.recv_body_bytes(&hdr)?;
+ let buf = self.connection.recv_body_bytes(&hdr)?;
let size = buf.len();
match hdr.get_code() {
- Ok(MasterReq::SET_OWNER) => {
+ Ok(FrontendReq::SET_OWNER) => {
self.check_request_size(&hdr, size, 0)?;
let res = self.backend.set_owner();
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::RESET_OWNER) => {
+ Ok(FrontendReq::RESET_OWNER) => {
self.check_request_size(&hdr, size, 0)?;
let res = self.backend.reset_owner();
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_FEATURES) => {
+ Ok(FrontendReq::GET_FEATURES) => {
self.check_request_size(&hdr, size, 0)?;
- let features = self.backend.get_features()?;
+ let mut features = self.backend.get_features()?;
+
+ // Don't advertise packed queues even if the device does. We don't handle them
+ // properly yet at the protocol layer.
+ // TODO: b/331466964 - Remove once support is added.
+ features &= !(1 << VIRTIO_F_RING_PACKED);
+
let msg = VhostUserU64::new(features);
- self.slave_req_helper.send_reply_message(&hdr, &msg)?;
+ self.send_reply_message(&hdr, &msg)?;
self.virtio_features = features;
self.update_reply_ack_flag();
}
- Ok(MasterReq::SET_FEATURES) => {
- let msg = self.extract_request_body::<VhostUserU64>(&hdr, size, &buf)?;
+ Ok(FrontendReq::SET_FEATURES) => {
+ let mut msg = self.extract_request_body::<VhostUserU64>(&hdr, size, &buf)?;
+
+ // Don't allow packed queues even if the device does. We don't handle them
+ // properly yet at the protocol layer.
+ // TODO: b/331466964 - Remove once support is added.
+ msg.value &= !(1 << VIRTIO_F_RING_PACKED);
+
let res = self.backend.set_features(msg.value);
self.acked_virtio_features = msg.value;
self.update_reply_ack_flag();
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::SET_MEM_TABLE) => {
+ Ok(FrontendReq::SET_MEM_TABLE) => {
let res = self.set_mem_table(&hdr, size, &buf, files);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::SET_VRING_NUM) => {
+ Ok(FrontendReq::SET_VRING_NUM) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
let res = self.backend.set_vring_num(msg.index, msg.num);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::SET_VRING_ADDR) => {
+ Ok(FrontendReq::SET_VRING_ADDR) => {
let msg = self.extract_request_body::<VhostUserVringAddr>(&hdr, size, &buf)?;
let flags = match VhostUserVringAddrFlags::from_bits(msg.flags) {
Some(val) => val,
@@ -540,67 +430,67 @@
msg.available,
msg.log,
);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::SET_VRING_BASE) => {
+ Ok(FrontendReq::SET_VRING_BASE) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
let res = self.backend.set_vring_base(msg.index, msg.num);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_VRING_BASE) => {
+ Ok(FrontendReq::GET_VRING_BASE) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
let reply = self.backend.get_vring_base(msg.index)?;
- self.slave_req_helper.send_reply_message(&hdr, &reply)?;
+ self.send_reply_message(&hdr, &reply)?;
}
- Ok(MasterReq::SET_VRING_CALL) => {
+ Ok(FrontendReq::SET_VRING_CALL) => {
self.check_request_size(&hdr, size, mem::size_of::<VhostUserU64>())?;
let (index, file) = self.handle_vring_fd_request(&buf, files)?;
let res = self.backend.set_vring_call(index, file);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::SET_VRING_KICK) => {
+ Ok(FrontendReq::SET_VRING_KICK) => {
self.check_request_size(&hdr, size, mem::size_of::<VhostUserU64>())?;
let (index, file) = self.handle_vring_fd_request(&buf, files)?;
let res = self.backend.set_vring_kick(index, file);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::SET_VRING_ERR) => {
+ Ok(FrontendReq::SET_VRING_ERR) => {
self.check_request_size(&hdr, size, mem::size_of::<VhostUserU64>())?;
let (index, file) = self.handle_vring_fd_request(&buf, files)?;
let res = self.backend.set_vring_err(index, file);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_PROTOCOL_FEATURES) => {
+ Ok(FrontendReq::GET_PROTOCOL_FEATURES) => {
self.check_request_size(&hdr, size, 0)?;
let features = self.backend.get_protocol_features()?;
let msg = VhostUserU64::new(features.bits());
- self.slave_req_helper.send_reply_message(&hdr, &msg)?;
+ self.send_reply_message(&hdr, &msg)?;
self.protocol_features = features;
self.update_reply_ack_flag();
}
- Ok(MasterReq::SET_PROTOCOL_FEATURES) => {
+ Ok(FrontendReq::SET_PROTOCOL_FEATURES) => {
let msg = self.extract_request_body::<VhostUserU64>(&hdr, size, &buf)?;
let res = self.backend.set_protocol_features(msg.value);
self.acked_protocol_features = msg.value;
self.update_reply_ack_flag();
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_QUEUE_NUM) => {
+ Ok(FrontendReq::GET_QUEUE_NUM) => {
if self.acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() == 0 {
return Err(Error::InvalidOperation);
}
self.check_request_size(&hdr, size, 0)?;
let num = self.backend.get_queue_num()?;
let msg = VhostUserU64::new(num);
- self.slave_req_helper.send_reply_message(&hdr, &msg)?;
+ self.send_reply_message(&hdr, &msg)?;
}
- Ok(MasterReq::SET_VRING_ENABLE) => {
+ Ok(FrontendReq::SET_VRING_ENABLE) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
if self.acked_virtio_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES == 0 {
return Err(Error::InvalidOperation);
@@ -612,35 +502,36 @@
};
let res = self.backend.set_vring_enable(msg.index, enable);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_CONFIG) => {
+ Ok(FrontendReq::GET_CONFIG) => {
if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return Err(Error::InvalidOperation);
}
self.check_request_size(&hdr, size, hdr.get_size() as usize)?;
self.get_config(&hdr, &buf)?;
}
- Ok(MasterReq::SET_CONFIG) => {
+ Ok(FrontendReq::SET_CONFIG) => {
if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return Err(Error::InvalidOperation);
}
self.check_request_size(&hdr, size, hdr.get_size() as usize)?;
let res = self.set_config(&buf);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::SET_SLAVE_REQ_FD) => {
- if self.acked_protocol_features & VhostUserProtocolFeatures::SLAVE_REQ.bits() == 0 {
+ Ok(FrontendReq::SET_BACKEND_REQ_FD) => {
+ if self.acked_protocol_features & VhostUserProtocolFeatures::BACKEND_REQ.bits() == 0
+ {
return Err(Error::InvalidOperation);
}
self.check_request_size(&hdr, size, hdr.get_size() as usize)?;
- let res = self.set_slave_req_fd(files);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ let res = self.set_backend_req_fd(files);
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_INFLIGHT_FD) => {
+ Ok(FrontendReq::GET_INFLIGHT_FD) => {
if self.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits()
== 0
{
@@ -649,16 +540,14 @@
let msg = self.extract_request_body::<VhostUserInflight>(&hdr, size, &buf)?;
let (inflight, file) = self.backend.get_inflight_fd(&msg)?;
- let reply_hdr = self
- .slave_req_helper
- .new_reply_header::<VhostUserInflight>(&hdr, 0)?;
- self.slave_req_helper.connection.send_message(
+ let reply_hdr = self.new_reply_header::<VhostUserInflight>(&hdr, 0)?;
+ self.connection.send_message(
&reply_hdr,
&inflight,
Some(&[file.as_raw_descriptor()]),
)?;
}
- Ok(MasterReq::SET_INFLIGHT_FD) => {
+ Ok(FrontendReq::SET_INFLIGHT_FD) => {
if self.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits()
== 0
{
@@ -667,10 +556,10 @@
let file = into_single_file(files).ok_or(Error::IncorrectFds)?;
let msg = self.extract_request_body::<VhostUserInflight>(&hdr, size, &buf)?;
let res = self.backend.set_inflight_fd(&msg, file);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_MAX_MEM_SLOTS) => {
+ Ok(FrontendReq::GET_MAX_MEM_SLOTS) => {
if self.acked_protocol_features
& VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits()
== 0
@@ -680,9 +569,9 @@
self.check_request_size(&hdr, size, 0)?;
let num = self.backend.get_max_mem_slots()?;
let msg = VhostUserU64::new(num);
- self.slave_req_helper.send_reply_message(&hdr, &msg)?;
+ self.send_reply_message(&hdr, &msg)?;
}
- Ok(MasterReq::ADD_MEM_REG) => {
+ Ok(FrontendReq::ADD_MEM_REG) => {
if self.acked_protocol_features
& VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits()
== 0
@@ -693,10 +582,10 @@
let msg =
self.extract_request_body::<VhostUserSingleMemoryRegion>(&hdr, size, &buf)?;
let res = self.backend.add_mem_region(&msg, file);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::REM_MEM_REG) => {
+ Ok(FrontendReq::REM_MEM_REG) => {
if self.acked_protocol_features
& VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits()
== 0
@@ -707,30 +596,29 @@
let msg =
self.extract_request_body::<VhostUserSingleMemoryRegion>(&hdr, size, &buf)?;
let res = self.backend.remove_mem_region(&msg);
- self.slave_req_helper.send_ack_message(&hdr, res.is_ok())?;
+ self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
- Ok(MasterReq::GET_SHARED_MEMORY_REGIONS) => {
+ Ok(FrontendReq::GET_SHARED_MEMORY_REGIONS) => {
let regions = self.backend.get_shared_memory_regions()?;
let mut buf = Vec::new();
let msg = VhostUserU64::new(regions.len() as u64);
for r in regions {
buf.extend_from_slice(r.as_bytes())
}
- self.slave_req_helper
- .send_reply_with_payload(&hdr, &msg, buf.as_slice())?;
+ self.send_reply_with_payload(&hdr, &msg, buf.as_slice())?;
}
- Ok(MasterReq::SLEEP) => {
+ Ok(FrontendReq::SLEEP) => {
let res = self.backend.sleep();
let msg = VhostUserSuccess::new(res.is_ok());
- self.slave_req_helper.send_reply_message(&hdr, &msg)?;
+ self.send_reply_message(&hdr, &msg)?;
}
- Ok(MasterReq::WAKE) => {
+ Ok(FrontendReq::WAKE) => {
let res = self.backend.wake();
let msg = VhostUserSuccess::new(res.is_ok());
- self.slave_req_helper.send_reply_message(&hdr, &msg)?;
+ self.send_reply_message(&hdr, &msg)?;
}
- Ok(MasterReq::SNAPSHOT) => {
+ Ok(FrontendReq::SNAPSHOT) => {
let (success_msg, payload) = match self.backend.snapshot() {
Ok(snapshot_payload) => (VhostUserSuccess::new(true), snapshot_payload),
Err(e) => {
@@ -738,16 +626,12 @@
(VhostUserSuccess::new(false), Vec::new())
}
};
- self.slave_req_helper.send_reply_with_payload(
- &hdr,
- &success_msg,
- payload.as_slice(),
- )?;
+ self.send_reply_with_payload(&hdr, &success_msg, payload.as_slice())?;
}
- Ok(MasterReq::RESTORE) => {
+ Ok(FrontendReq::RESTORE) => {
let res = self.backend.restore(buf.as_slice(), files);
let msg = VhostUserSuccess::new(res.is_ok());
- self.slave_req_helper.send_reply_message(&hdr, &msg)?;
+ self.send_reply_message(&hdr, &msg)?;
}
_ => {
return Err(Error::InvalidMessage);
@@ -756,9 +640,63 @@
Ok(())
}
+ fn new_reply_header<T: Sized>(
+ &self,
+ req: &VhostUserMsgHeader<FrontendReq>,
+ payload_size: usize,
+ ) -> Result<VhostUserMsgHeader<FrontendReq>> {
+ Ok(VhostUserMsgHeader::new(
+ req.get_code().map_err(|_| Error::InvalidMessage)?,
+ VhostUserHeaderFlag::REPLY.bits(),
+ (mem::size_of::<T>()
+ .checked_add(payload_size)
+ .ok_or(Error::OversizedMsg)?)
+ .try_into()
+ .map_err(Error::InvalidCastToInt)?,
+ ))
+ }
+
+ /// Sends reply back to Vhost frontend in response to a message.
+ fn send_ack_message(
+ &mut self,
+ req: &VhostUserMsgHeader<FrontendReq>,
+ success: bool,
+ ) -> Result<()> {
+ if self.reply_ack_enabled && req.is_need_reply() {
+ let hdr: VhostUserMsgHeader<FrontendReq> =
+ self.new_reply_header::<VhostUserU64>(req, 0)?;
+ let val = if success { 0 } else { 1 };
+ let msg = VhostUserU64::new(val);
+ self.connection.send_message(&hdr, &msg, None)?;
+ }
+ Ok(())
+ }
+
+ fn send_reply_message<T: Sized + AsBytes>(
+ &mut self,
+ req: &VhostUserMsgHeader<FrontendReq>,
+ msg: &T,
+ ) -> Result<()> {
+ let hdr = self.new_reply_header::<T>(req, 0)?;
+ self.connection.send_message(&hdr, msg, None)?;
+ Ok(())
+ }
+
+ fn send_reply_with_payload<T: Sized + AsBytes>(
+ &mut self,
+ req: &VhostUserMsgHeader<FrontendReq>,
+ msg: &T,
+ payload: &[u8],
+ ) -> Result<()> {
+ let hdr = self.new_reply_header::<T>(req, payload.len())?;
+ self.connection
+ .send_message_with_payload(&hdr, msg, payload, None)?;
+ Ok(())
+ }
+
fn set_mem_table(
&mut self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
size: usize,
buf: &[u8],
files: Vec<File>,
@@ -795,7 +733,7 @@
self.backend.set_mem_table(®ions, files)
}
- fn get_config(&mut self, hdr: &VhostUserMsgHeader<MasterReq>, buf: &[u8]) -> Result<()> {
+ fn get_config(&mut self, hdr: &VhostUserMsgHeader<FrontendReq>, buf: &[u8]) -> Result<()> {
let (msg, payload) =
Ref::<_, VhostUserConfig>::new_from_prefix(buf).ok_or(Error::InvalidMessage)?;
if !msg.is_valid() {
@@ -810,22 +748,20 @@
};
let res = self.backend.get_config(msg.offset, msg.size, flags);
- // vhost-user slave's payload size MUST match master's request
- // on success, uses zero length of payload to indicate an error
- // to vhost-user master.
+ // The response payload size MUST match the request payload size on success. A zero length
+ // response is used to indicate an error.
match res {
Ok(ref buf) if buf.len() == msg.size as usize => {
let reply = VhostUserConfig::new(msg.offset, buf.len() as u32, flags);
- self.slave_req_helper
- .send_reply_with_payload(hdr, &reply, buf.as_slice())?;
+ self.send_reply_with_payload(hdr, &reply, buf.as_slice())?;
}
Ok(_) => {
let reply = VhostUserConfig::new(msg.offset, 0, flags);
- self.slave_req_helper.send_reply_message(hdr, &reply)?;
+ self.send_reply_message(hdr, &reply)?;
}
Err(_) => {
let reply = VhostUserConfig::new(msg.offset, 0, flags);
- self.slave_req_helper.send_reply_message(hdr, &reply)?;
+ self.send_reply_message(hdr, &reply)?;
}
}
Ok(())
@@ -848,27 +784,48 @@
self.backend.set_config(msg.offset, payload, flags)
}
- fn set_slave_req_fd(&mut self, files: Vec<File>) -> Result<()> {
+ fn set_backend_req_fd(&mut self, files: Vec<File>) -> Result<()> {
let file = into_single_file(files).ok_or(Error::InvalidMessage)?;
let fd = file.into();
// SAFETY: Safe because the protocol promises the file represents the appropriate file type
// for the platform.
let stream = unsafe { to_system_stream(fd) }?;
- self.backend.set_slave_req_fd(Connection::from(stream));
+ self.backend.set_backend_req_fd(Connection::from(stream));
Ok(())
}
+ /// Parses an incoming |SET_VRING_KICK| or |SET_VRING_CALL| message into a
+ /// Vring number and an fd.
fn handle_vring_fd_request(
&mut self,
buf: &[u8],
files: Vec<File>,
) -> Result<(u8, Option<File>)> {
- self.slave_req_helper.handle_vring_fd_request(buf, files)
+ let msg = VhostUserU64::read_from_prefix(buf).ok_or(Error::InvalidMessage)?;
+ if !msg.is_valid() {
+ return Err(Error::InvalidMessage);
+ }
+
+ // Bits (0-7) of the payload contain the vring index. Bit 8 is the
+ // invalid FD flag (VHOST_USER_VRING_NOFD_MASK).
+ // This bit is set when there is no file descriptor
+ // in the ancillary data. This signals that polling will be used
+ // instead of waiting for the call.
+ // If Bit 8 is unset, the data must contain a file descriptor.
+ let has_fd = (msg.value & 0x100u64) == 0;
+
+ let file = into_single_file(files);
+
+ if has_fd && file.is_none() || !has_fd && file.is_some() {
+ return Err(Error::InvalidMessage);
+ }
+
+ Ok((msg.value as u8, file))
}
fn check_request_size(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
size: usize,
expected: usize,
) -> Result<()> {
@@ -884,20 +841,20 @@
fn check_attached_files(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
files: &[File],
) -> Result<()> {
match hdr.get_code() {
- Ok(MasterReq::SET_MEM_TABLE)
- | Ok(MasterReq::SET_VRING_CALL)
- | Ok(MasterReq::SET_VRING_KICK)
- | Ok(MasterReq::SET_VRING_ERR)
- | Ok(MasterReq::SET_LOG_BASE)
- | Ok(MasterReq::SET_LOG_FD)
- | Ok(MasterReq::SET_SLAVE_REQ_FD)
- | Ok(MasterReq::SET_INFLIGHT_FD)
- | Ok(MasterReq::RESTORE)
- | Ok(MasterReq::ADD_MEM_REG) => Ok(()),
+ Ok(FrontendReq::SET_MEM_TABLE)
+ | Ok(FrontendReq::SET_VRING_CALL)
+ | Ok(FrontendReq::SET_VRING_KICK)
+ | Ok(FrontendReq::SET_VRING_ERR)
+ | Ok(FrontendReq::SET_LOG_BASE)
+ | Ok(FrontendReq::SET_LOG_FD)
+ | Ok(FrontendReq::SET_BACKEND_REQ_FD)
+ | Ok(FrontendReq::SET_INFLIGHT_FD)
+ | Ok(FrontendReq::RESTORE)
+ | Ok(FrontendReq::ADD_MEM_REG) => Ok(()),
Err(_) => Err(Error::InvalidMessage),
_ if !files.is_empty() => Err(Error::InvalidMessage),
_ => Ok(()),
@@ -906,7 +863,7 @@
fn extract_request_body<T: Sized + FromBytes + VhostUserMsgValidator>(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
size: usize,
buf: &[u8],
) -> Result<T> {
@@ -918,17 +875,16 @@
fn update_reply_ack_flag(&mut self) {
let pflag = VhostUserProtocolFeatures::REPLY_ACK;
- self.slave_req_helper.reply_ack_enabled =
- (self.virtio_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES) != 0
- && self.protocol_features.contains(pflag)
- && (self.acked_protocol_features & pflag.bits()) != 0;
+ self.reply_ack_enabled = (self.virtio_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES) != 0
+ && self.protocol_features.contains(pflag)
+ && (self.acked_protocol_features & pflag.bits()) != 0;
}
}
-impl<S: VhostUserSlaveReqHandler> AsRawDescriptor for SlaveReqHandler<S> {
+impl<S: Backend> AsRawDescriptor for BackendServer<S> {
fn as_raw_descriptor(&self) -> RawDescriptor {
// TODO(b/221882601): figure out if this used for polling.
- self.slave_req_helper.connection.as_raw_descriptor()
+ self.connection.as_raw_descriptor()
}
}
@@ -937,16 +893,16 @@
use base::INVALID_DESCRIPTOR;
use super::*;
- use crate::dummy_slave::DummySlaveReqHandler;
+ use crate::test_backend::TestBackend;
use crate::Connection;
use crate::SystemStream;
#[test]
- fn test_slave_req_handler_new() {
+ fn test_backend_server_new() {
let (p1, _p2) = SystemStream::pair().unwrap();
let connection = Connection::from(p1);
- let backend = DummySlaveReqHandler::new();
- let handler = SlaveReqHandler::new(connection, backend);
+ let backend = TestBackend::new();
+ let handler = BackendServer::new(connection, backend);
assert!(handler.as_raw_descriptor() != INVALID_DESCRIPTOR);
}
diff --git a/third_party/vmm_vhost/src/connection.rs b/third_party/vmm_vhost/src/connection.rs
index f0a79ca..4824a85 100644
--- a/third_party/vmm_vhost/src/connection.rs
+++ b/third_party/vmm_vhost/src/connection.rs
@@ -3,19 +3,6 @@
//! Common data structures for listener and connection.
-cfg_if::cfg_if! {
- if #[cfg(unix)] {
- pub mod socket;
- pub use socket::to_system_stream;
- mod unix;
- } else if #[cfg(windows)] {
- mod tube;
- pub use tube::TubePlatformConnection;
- pub use tube::to_system_stream;
- mod windows;
- }
-}
-
use std::fs::File;
use std::io::IoSliceMut;
use std::mem;
@@ -27,7 +14,7 @@
use zerocopy::FromBytes;
use crate::connection::Req;
-use crate::message::MasterReq;
+use crate::message::FrontendReq;
use crate::message::*;
use crate::sys::PlatformConnection;
use crate::Error;
@@ -37,7 +24,7 @@
/// Listener for accepting connections.
pub trait Listener: Sized {
/// Accept an incoming connection.
- fn accept(&mut self) -> Result<Option<Connection<MasterReq>>>;
+ fn accept(&mut self) -> Result<Option<Connection<FrontendReq>>>;
/// Change blocking status on the listener.
fn set_nonblocking(&self, block: bool) -> Result<()>;
@@ -264,33 +251,30 @@
use super::*;
use crate::message::VhostUserEmptyMessage;
use crate::message::VhostUserU64;
-
- cfg_if::cfg_if! {
- if #[cfg(unix)] {
- pub(crate) use super::unix::tests::*;
- } else if #[cfg(windows)] {
- pub(crate) use super::windows::tests::*;
- }
- }
+ use crate::tests::create_connection_pair;
#[test]
fn send_header_only() {
- let (master, slave) = create_connection_pair();
- let hdr1 = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0, 0);
- master.send_header_only_message(&hdr1, None).unwrap();
- let (hdr2, _, files) = slave.recv_message::<VhostUserEmptyMessage>().unwrap();
+ let (client_connection, server_connection) = create_connection_pair();
+ let hdr1 = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, 0);
+ client_connection
+ .send_header_only_message(&hdr1, None)
+ .unwrap();
+ let (hdr2, _, files) = server_connection
+ .recv_message::<VhostUserEmptyMessage>()
+ .unwrap();
assert_eq!(hdr1, hdr2);
assert!(files.is_empty());
}
#[test]
fn send_data() {
- let (master, slave) = create_connection_pair();
- let hdr1 = VhostUserMsgHeader::new(MasterReq::SET_FEATURES, 0, 8);
- master
+ let (client_connection, server_connection) = create_connection_pair();
+ let hdr1 = VhostUserMsgHeader::new(FrontendReq::SET_FEATURES, 0, 8);
+ client_connection
.send_message(&hdr1, &VhostUserU64::new(0xf00dbeefdeadf00d), None)
.unwrap();
- let (hdr2, body, files) = slave.recv_message::<VhostUserU64>().unwrap();
+ let (hdr2, body, files) = server_connection.recv_message::<VhostUserU64>().unwrap();
assert_eq!(hdr1, hdr2);
let value = body.value;
assert_eq!(value, 0xf00dbeefdeadf00d);
@@ -299,18 +283,20 @@
#[test]
fn send_fd() {
- let (master, slave) = create_connection_pair();
+ let (client_connection, server_connection) = create_connection_pair();
let mut fd = tempfile().unwrap();
write!(fd, "test").unwrap();
// Normal case for sending/receiving file descriptors
- let hdr1 = VhostUserMsgHeader::new(MasterReq::SET_MEM_TABLE, 0, 0);
- master
+ let hdr1 = VhostUserMsgHeader::new(FrontendReq::SET_MEM_TABLE, 0, 0);
+ client_connection
.send_header_only_message(&hdr1, Some(&[fd.as_raw_descriptor()]))
.unwrap();
- let (hdr2, _, files) = slave.recv_message::<VhostUserEmptyMessage>().unwrap();
+ let (hdr2, _, files) = server_connection
+ .recv_message::<VhostUserEmptyMessage>()
+ .unwrap();
assert_eq!(hdr1, hdr2);
assert_eq!(files.len(), 1);
let mut file = &files[0];
diff --git a/third_party/vmm_vhost/src/connection/socket.rs b/third_party/vmm_vhost/src/connection/socket.rs
deleted file mode 100644
index dbd768f..0000000
--- a/third_party/vmm_vhost/src/connection/socket.rs
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2021 The Chromium OS Authors. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-//! Structs for Unix Domain Socket listener and connection.
-
-use std::any::Any;
-use std::fs::File;
-use std::io::ErrorKind;
-use std::io::IoSlice;
-use std::io::IoSliceMut;
-use std::path::Path;
-use std::path::PathBuf;
-
-use base::AsRawDescriptor;
-use base::RawDescriptor;
-use base::SafeDescriptor;
-use base::ScmSocket;
-
-use crate::connection::Listener;
-use crate::message::*;
-use crate::unix::SystemListener;
-use crate::Connection;
-use crate::Error;
-use crate::Result;
-use crate::SystemStream;
-
-/// Unix domain socket listener for accepting incoming connections.
-pub struct SocketListener {
- fd: SystemListener,
- drop_path: Option<Box<dyn Any>>,
-}
-
-impl SocketListener {
- /// Create a unix domain socket listener.
- ///
- /// # Return:
- /// * - the new SocketListener object on success.
- /// * - SocketError: failed to create listener socket.
- pub fn new<P: AsRef<Path>>(path: P, unlink: bool) -> Result<Self> {
- if unlink {
- let _ = std::fs::remove_file(&path);
- }
- let fd = SystemListener::bind(&path).map_err(Error::SocketError)?;
-
- struct DropPath {
- path: PathBuf,
- }
-
- impl Drop for DropPath {
- fn drop(&mut self) {
- let _ = std::fs::remove_file(&self.path);
- }
- }
-
- Ok(SocketListener {
- fd,
- drop_path: Some(Box::new(DropPath {
- path: path.as_ref().to_owned(),
- })),
- })
- }
-
- /// Take and return the resources that the parent process needs to keep alive as long as the
- /// child process lives, in case of incoming fork.
- pub fn take_resources_for_parent(&mut self) -> Option<Box<dyn Any>> {
- self.drop_path.take()
- }
-}
-
-impl Listener for SocketListener {
- /// Accept an incoming connection.
- ///
- /// # Return:
- /// * - Some(SystemListener): new SystemListener object if new incoming connection is available.
- /// * - None: no incoming connection available.
- /// * - SocketError: errors from accept().
- fn accept(&mut self) -> Result<Option<Connection<MasterReq>>> {
- loop {
- match self.fd.accept() {
- Ok((stream, _addr)) => {
- return Ok(Some(Connection::from(stream)));
- }
- Err(e) => {
- match e.kind() {
- // No incoming connection available.
- ErrorKind::WouldBlock => return Ok(None),
- // New connection closed by peer.
- ErrorKind::ConnectionAborted => return Ok(None),
- // Interrupted by signals, retry
- ErrorKind::Interrupted => continue,
- _ => return Err(Error::SocketError(e)),
- }
- }
- }
- }
- }
-
- /// Change blocking status on the listener.
- ///
- /// # Return:
- /// * - () on success.
- /// * - SocketError: failure from set_nonblocking().
- fn set_nonblocking(&self, block: bool) -> Result<()> {
- self.fd.set_nonblocking(block).map_err(Error::SocketError)
- }
-}
-
-impl AsRawDescriptor for SocketListener {
- fn as_raw_descriptor(&self) -> RawDescriptor {
- self.fd.as_raw_descriptor()
- }
-}
-
-/// Unix domain socket based vhost-user connection.
-pub struct SocketPlatformConnection {
- sock: ScmSocket<SystemStream>,
-}
-
-// TODO: Switch to TryFrom to avoid the unwrap.
-impl From<SystemStream> for SocketPlatformConnection {
- fn from(sock: SystemStream) -> Self {
- Self {
- sock: sock.try_into().unwrap(),
- }
- }
-}
-
-// Advance the internal cursor of the slices.
-// This is same with a nightly API `IoSlice::advance_slices` but for `&[u8]`.
-fn advance_slices(bufs: &mut &mut [&[u8]], mut count: usize) {
- use std::mem::take;
-
- let mut idx = 0;
- for b in bufs.iter() {
- if count < b.len() {
- break;
- }
- count -= b.len();
- idx += 1;
- }
- *bufs = &mut take(bufs)[idx..];
- if !bufs.is_empty() {
- bufs[0] = &bufs[0][count..];
- }
-}
-
-impl SocketPlatformConnection {
- /// Create a new stream by connecting to server at `str`.
- ///
- /// # Return:
- /// * - the new SocketPlatformConnection object on success.
- /// * - SocketConnect: failed to connect to peer.
- pub fn connect<P: AsRef<Path>>(path: P) -> Result<Self> {
- let sock = SystemStream::connect(path).map_err(Error::SocketConnect)?;
- Ok(Self::from(sock))
- }
-
- /// Sends all bytes from scatter-gather vectors with optional attached file descriptors. Will
- /// loop until all data has been transfered.
- ///
- /// # TODO
- /// This function takes a slice of `&[u8]` instead of `IoSlice` because the internal
- /// cursor needs to be moved by `advance_slices()`.
- /// Once `IoSlice::advance_slices()` becomes stable, this should be updated.
- /// <https://github.com/rust-lang/rust/issues/62726>.
- fn send_iovec_all(
- &self,
- mut iovs: &mut [&[u8]],
- mut fds: Option<&[RawDescriptor]>,
- ) -> Result<()> {
- // Guarantee that `iovs` becomes empty if it doesn't contain any data.
- advance_slices(&mut iovs, 0);
-
- while !iovs.is_empty() {
- let iovec: Vec<_> = iovs.iter_mut().map(|i| IoSlice::new(i)).collect();
- match self.sock.send_vectored_with_fds(&iovec, fds.unwrap_or(&[])) {
- Ok(n) => {
- fds = None;
- advance_slices(&mut iovs, n);
- }
- Err(e) => match e.kind() {
- ErrorKind::WouldBlock | ErrorKind::Interrupted => {}
- _ => return Err(Error::SocketError(e)),
- },
- }
- }
- Ok(())
- }
-
- /// Sends a single message over the socket with optional attached file descriptors.
- ///
- /// - `hdr`: vhost message header
- /// - `body`: vhost message body (may be empty to send a header-only message)
- /// - `payload`: additional bytes to append to `body` (may be empty)
- pub fn send_message(
- &self,
- hdr: &[u8],
- body: &[u8],
- payload: &[u8],
- fds: Option<&[RawDescriptor]>,
- ) -> Result<()> {
- let mut iobufs = [hdr, body, payload];
- self.send_iovec_all(&mut iobufs, fds)
- }
-
- /// Reads bytes from the socket into the given scatter/gather vectors with optional attached
- /// file.
- ///
- /// The underlying communication channel is a Unix domain socket in STREAM mode. It's a little
- /// tricky to pass file descriptors through such a communication channel. Let's assume that a
- /// sender sending a message with some file descriptors attached. To successfully receive those
- /// attached file descriptors, the receiver must obey following rules:
- /// 1) file descriptors are attached to a message.
- /// 2) message(packet) boundaries must be respected on the receive side.
- /// In other words, recvmsg() operations must not cross the packet boundary, otherwise the
- /// attached file descriptors will get lost.
- /// Note that this function wraps received file descriptors as `File`.
- ///
- /// # Return:
- /// * - (number of bytes received, [received files]) on success
- /// * - Disconnect: the connection is closed.
- /// * - SocketRetry: temporary error caused by signals or short of resources.
- /// * - SocketBroken: the underline socket is broken.
- /// * - SocketError: other socket related errors.
- pub fn recv_into_bufs(
- &self,
- bufs: &mut [IoSliceMut],
- allow_fd: bool,
- ) -> Result<(usize, Option<Vec<File>>)> {
- let max_fds = if allow_fd { MAX_ATTACHED_FD_ENTRIES } else { 0 };
- let (bytes, fds) = self.sock.recv_vectored_with_fds(bufs, max_fds)?;
-
- // 0-bytes indicates that the connection is closed.
- if bytes == 0 {
- return Err(Error::Disconnect);
- }
-
- let files = if fds.is_empty() {
- None
- } else {
- Some(fds.into_iter().map(File::from).collect())
- };
-
- Ok((bytes, files))
- }
-}
-
-impl AsRawDescriptor for SocketPlatformConnection {
- fn as_raw_descriptor(&self) -> RawDescriptor {
- self.sock.as_raw_descriptor()
- }
-}
-
-impl AsMut<SystemStream> for SocketPlatformConnection {
- fn as_mut(&mut self) -> &mut SystemStream {
- self.sock.inner_mut()
- }
-}
-
-/// Convert a `SafeDescriptor` to a `UnixStream`.
-///
-/// # Safety
-///
-/// `file` must represent a unix domain socket.
-pub unsafe fn to_system_stream(fd: SafeDescriptor) -> Result<SystemStream> {
- Ok(fd.into())
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::connection::unix::tests::temp_dir;
-
- #[test]
- fn create_listener() {
- let dir = temp_dir();
- let mut path = dir.path().to_owned();
- path.push("sock");
- let listener = SocketListener::new(&path, true).unwrap();
-
- assert!(listener.as_raw_descriptor() > 0);
- }
-
- #[test]
- fn accept_connection() {
- let dir = temp_dir();
- let mut path = dir.path().to_owned();
- path.push("sock");
- let mut listener = SocketListener::new(&path, true).unwrap();
- listener.set_nonblocking(true).unwrap();
-
- // accept on a fd without incoming connection
- let conn = listener.accept().unwrap();
- assert!(conn.is_none());
- }
-
- #[test]
- fn test_advance_slices() {
- // Test case from https://doc.rust-lang.org/std/io/struct.IoSlice.html#method.advance_slices
- let buf1 = [1; 8];
- let buf2 = [2; 16];
- let buf3 = [3; 8];
- let mut bufs = &mut [&buf1[..], &buf2[..], &buf3[..]][..];
- advance_slices(&mut bufs, 10);
- assert_eq!(bufs[0], [2; 14].as_ref());
- assert_eq!(bufs[1], [3; 8].as_ref());
- }
-}
diff --git a/third_party/vmm_vhost/src/connection/tube.rs b/third_party/vmm_vhost/src/connection/tube.rs
deleted file mode 100644
index b22fa93..0000000
--- a/third_party/vmm_vhost/src/connection/tube.rs
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2022 The Chromium OS Authors. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-//! Structs for Tube based connection. Listeners are not used with Tubes, since they are
-//! essentially fancy socket pairs.
-
-use std::cmp::min;
-use std::fs::File;
-use std::io::IoSliceMut;
-use std::path::Path;
-use std::ptr::copy_nonoverlapping;
-
-use base::AsRawDescriptor;
-use base::FromRawDescriptor;
-use base::RawDescriptor;
-use base::SafeDescriptor;
-use base::Tube;
-use serde::Deserialize;
-use serde::Serialize;
-use tube_transporter::packed_tube;
-
-use crate::Error;
-use crate::Result;
-use crate::SystemStream;
-
-#[derive(Serialize, Deserialize)]
-struct RawDescriptorContainer {
- #[serde(with = "base::with_raw_descriptor")]
- rd: RawDescriptor,
-}
-
-#[derive(Serialize, Deserialize)]
-struct Message {
- rds: Vec<RawDescriptorContainer>,
- data: Vec<u8>,
-}
-
-/// Tube based vhost-user connection.
-pub struct TubePlatformConnection {
- tube: Tube,
-}
-
-impl TubePlatformConnection {
- pub(crate) fn get_tube(&self) -> &Tube {
- &self.tube
- }
-}
-
-impl From<Tube> for TubePlatformConnection {
- fn from(tube: Tube) -> Self {
- Self { tube }
- }
-}
-
-impl TubePlatformConnection {
- pub fn connect<P: AsRef<Path>>(_path: P) -> Result<Self> {
- unimplemented!("connections not supported on Tubes")
- }
-
- /// Sends a single message over the socket with optional attached file descriptors.
- ///
- /// - `hdr`: vhost message header
- /// - `body`: vhost message body (may be empty to send a header-only message)
- /// - `payload`: additional bytes to append to `body` (may be empty)
- pub fn send_message(
- &self,
- hdr: &[u8],
- body: &[u8],
- payload: &[u8],
- rds: Option<&[RawDescriptor]>,
- ) -> Result<()> {
- let hdr_msg = Message {
- rds: rds
- .unwrap_or(&[])
- .iter()
- .map(|rd| RawDescriptorContainer { rd: *rd })
- .collect(),
- data: hdr.to_vec(),
- };
-
- let mut body_data = Vec::with_capacity(body.len() + payload.len());
- body_data.extend_from_slice(body);
- body_data.extend_from_slice(payload);
- let body_msg = Message {
- rds: Vec::new(),
- data: body_data,
- };
-
- // We send the header and the body separately here. This is necessary on Windows. Otherwise
- // the recv side cannot read the header independently (the transport is message oriented).
- self.tube.send(&hdr_msg)?;
- if !body_msg.data.is_empty() {
- self.tube.send(&body_msg)?;
- }
-
- Ok(())
- }
-
- /// Reads bytes from the tube into the given scatter/gather vectors with optional attached
- /// file.
- ///
- /// The underlying communication channel is a Tube. Providing too little recv buffer space will
- /// cause data to get dropped (with an error). This is tricky to fix with Tube backing our
- /// transport layer, and as far as we can tell, is not exercised in practice.
- ///
- /// # Return:
- /// * - (number of bytes received, [received files]) on success
- /// * - RecvBufferTooSmall: Input bufs is too small for the received buffer.
- /// * - TubeError: tube related errors.
- pub fn recv_into_bufs(
- &self,
- bufs: &mut [IoSliceMut],
- _allow_rds: bool,
- ) -> Result<(usize, Option<Vec<File>>)> {
- // TODO(b/221882601): implement "allow_rds"
-
- let msg: Message = self.tube.recv()?;
-
- let files = match msg.rds.len() {
- 0 => None,
- _ => Some(
- msg.rds
- .iter()
- .map(|r|
- // SAFETY:
- // Safe because we own r.rd and it is guaranteed valid.
- unsafe { File::from_raw_descriptor(r.rd) })
- .collect::<Vec<File>>(),
- ),
- };
-
- let mut bytes_read = 0;
- for dest_iov in bufs.iter_mut() {
- if bytes_read >= msg.data.len() {
- // We've read all the available data into the iovecs.
- break;
- }
-
- let copy_count = min(dest_iov.len(), msg.data.len() - bytes_read);
-
- // SAFETY:
- // Safe because:
- // 1) msg.data and dest_iov do not overlap.
- // 2) copy_count is bounded by dest_iov's length and msg.data.len() so we can't
- // overrun.
- unsafe {
- copy_nonoverlapping(
- msg.data.as_ptr().add(bytes_read),
- dest_iov.as_mut_ptr(),
- copy_count,
- )
- };
- bytes_read += copy_count;
- }
-
- if bytes_read != msg.data.len() {
- // User didn't supply enough iov space.
- return Err(Error::RecvBufferTooSmall {
- got: bytes_read,
- want: msg.data.len(),
- });
- }
-
- Ok((bytes_read, files))
- }
-}
-
-/// Convert a`SafeDescriptor` to a `Tube`.
-///
-/// # Safety
-///
-/// `fd` must represent a packed tube.
-pub unsafe fn to_system_stream(fd: SafeDescriptor) -> Result<SystemStream> {
- // SAFETY: Safe because the file represents a packed tube.
- let tube = unsafe { packed_tube::unpack(fd).expect("unpacked Tube") };
- Ok(tube)
-}
-
-impl AsRawDescriptor for TubePlatformConnection {
- /// WARNING: this function does not return a waitable descriptor! Use base::ReadNotifier
- /// instead.
- fn as_raw_descriptor(&self) -> RawDescriptor {
- self.tube.as_raw_descriptor()
- }
-}
diff --git a/third_party/vmm_vhost/src/connection/unix.rs b/third_party/vmm_vhost/src/connection/unix.rs
deleted file mode 100644
index f64bde8..0000000
--- a/third_party/vmm_vhost/src/connection/unix.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2022 The Chromium OS Authors. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-//! Unix specific code that keeps rest of the code in the crate platform independent.
-
-#[cfg(test)]
-pub(crate) mod tests {
- use tempfile::Builder;
- use tempfile::TempDir;
-
- use crate::connection::socket::SocketListener;
- use crate::connection::Listener;
- use crate::master::Master;
- use crate::message::MasterReq;
- use crate::slave_req_handler::SlaveReqHandler;
- use crate::slave_req_handler::VhostUserSlaveReqHandler;
- use crate::Connection;
-
- pub(crate) fn temp_dir() -> TempDir {
- Builder::new().prefix("/tmp/vhost_test").tempdir().unwrap()
- }
-
- pub(crate) fn create_pair() -> (Master, Connection<MasterReq>) {
- let dir = temp_dir();
- let mut path = dir.path().to_owned();
- path.push("sock");
- let mut listener = SocketListener::new(&path, true).unwrap();
- listener.set_nonblocking(true).unwrap();
- let master = Master::connect(path).unwrap();
- let slave = listener.accept().unwrap().unwrap();
- (master, slave)
- }
-
- pub(crate) fn create_connection_pair() -> (Connection<MasterReq>, Connection<MasterReq>) {
- let dir = temp_dir();
- let mut path = dir.path().to_owned();
- path.push("sock");
- let mut listener = SocketListener::new(&path, true).unwrap();
- listener.set_nonblocking(true).unwrap();
- let master = Connection::<MasterReq>::connect(path).unwrap();
- let slave = listener.accept().unwrap().unwrap();
- (master, slave)
- }
-
- pub(crate) fn create_master_slave_pair<S>(backend: S) -> (Master, SlaveReqHandler<S>)
- where
- S: VhostUserSlaveReqHandler,
- {
- let dir = Builder::new().prefix("/tmp/vhost_test").tempdir().unwrap();
- let mut path = dir.path().to_owned();
- path.push("sock");
- let mut listener = SocketListener::new(&path, true).unwrap();
- let master = Master::connect(&path).unwrap();
- let connection = listener.accept().unwrap().unwrap();
- let req_handler = SlaveReqHandler::new(connection, backend);
- (master, req_handler)
- }
-
- // Create failures don't happen on using Tubes because there is no "connection". (The channel is
- // already up when we invoke this library.)
- #[test]
- fn test_create_failure() {
- let dir = temp_dir();
- let mut path = dir.path().to_owned();
- path.push("sock");
- let _ = SocketListener::new(&path, true).unwrap();
- let _ = SocketListener::new(&path, false).is_err();
- assert!(Master::connect(&path).is_err());
-
- let mut listener = SocketListener::new(&path, true).unwrap();
- assert!(SocketListener::new(&path, false).is_err());
- listener.set_nonblocking(true).unwrap();
-
- let _master = Master::connect(&path).unwrap();
- let _slave = listener.accept().unwrap().unwrap();
- }
-}
diff --git a/third_party/vmm_vhost/src/connection/windows.rs b/third_party/vmm_vhost/src/connection/windows.rs
deleted file mode 100644
index 96037e0..0000000
--- a/third_party/vmm_vhost/src/connection/windows.rs
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2022 The Chromium OS Authors. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-//! Windows specific code that keeps rest of the code in the crate platform independent.
-
-#[cfg(test)]
-pub(crate) mod tests {
- use crate::master::Master;
- use crate::message::MasterReq;
- use crate::slave_req_handler::SlaveReqHandler;
- use crate::slave_req_handler::VhostUserSlaveReqHandler;
- use crate::Connection;
- use crate::SystemStream;
-
- pub(crate) fn create_pair() -> (Master, Connection<MasterReq>) {
- let (master_tube, slave_tube) = SystemStream::pair().unwrap();
- let master = Master::from_stream(master_tube);
- (master, Connection::from(slave_tube))
- }
-
- pub(crate) fn create_connection_pair() -> (Connection<MasterReq>, Connection<MasterReq>) {
- let (master_tube, slave_tube) = SystemStream::pair().unwrap();
- let master = Connection::<MasterReq>::from(master_tube);
- (master, Connection::from(slave_tube))
- }
-
- pub(crate) fn create_master_slave_pair<S>(backend: S) -> (Master, SlaveReqHandler<S>)
- where
- S: VhostUserSlaveReqHandler,
- {
- let (master_tube, slave_tube) = SystemStream::pair().unwrap();
- let master = Master::from_stream(master_tube);
- (
- master,
- SlaveReqHandler::<S>::from_stream(slave_tube, backend),
- )
- }
-}
diff --git a/third_party/vmm_vhost/src/frontend_client.rs b/third_party/vmm_vhost/src/frontend_client.rs
new file mode 100644
index 0000000..f32dc70
--- /dev/null
+++ b/third_party/vmm_vhost/src/frontend_client.rs
@@ -0,0 +1,159 @@
+// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::mem;
+use std::string::ToString;
+
+use base::AsRawDescriptor;
+use base::RawDescriptor;
+use zerocopy::AsBytes;
+
+use crate::message::*;
+use crate::BackendReq;
+use crate::Connection;
+use crate::Error;
+use crate::Frontend;
+use crate::HandlerResult;
+use crate::Result;
+use crate::SystemStream;
+
+/// Client for a vhost-user frontend. Allows a backend to send requests to the frontend.
+pub struct FrontendClient {
+ sock: Connection<BackendReq>,
+
+ // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
+ reply_ack_negotiated: bool,
+
+ // whether the connection has encountered any failure
+ error: Option<i32>,
+}
+
+impl FrontendClient {
+ /// Create a new instance from the given connection.
+ pub fn new(ep: Connection<BackendReq>) -> Self {
+ FrontendClient {
+ sock: ep,
+ reply_ack_negotiated: false,
+ error: None,
+ }
+ }
+
+ /// Create a new instance from a `SystemStream` object.
+ pub fn from_stream(connection: SystemStream) -> Self {
+ Self::new(Connection::from(connection))
+ }
+
+ fn send_message<T>(
+ &mut self,
+ request: BackendReq,
+ msg: &T,
+ fds: Option<&[RawDescriptor]>,
+ ) -> HandlerResult<u64>
+ where
+ T: AsBytes,
+ {
+ let len = mem::size_of::<T>();
+ let mut hdr = VhostUserMsgHeader::new(request, 0, len as u32);
+ if self.reply_ack_negotiated {
+ hdr.set_need_reply(true);
+ }
+ self.sock
+ .send_message(&hdr, msg, fds)
+ .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
+
+ self.wait_for_reply(&hdr)
+ .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))
+ }
+
+ fn wait_for_reply(&mut self, hdr: &VhostUserMsgHeader<BackendReq>) -> Result<u64> {
+ let code = hdr.get_code().map_err(|_| Error::InvalidMessage)?;
+ if code != BackendReq::SHMEM_MAP
+ && code != BackendReq::SHMEM_UNMAP
+ && code != BackendReq::GPU_MAP
+ && code != BackendReq::EXTERNAL_MAP
+ && !self.reply_ack_negotiated
+ {
+ return Ok(0);
+ }
+
+ let (reply, body, rfds) = self.sock.recv_message::<VhostUserU64>()?;
+ if !reply.is_reply_for(hdr) || !rfds.is_empty() || !body.is_valid() {
+ return Err(Error::InvalidMessage);
+ }
+ if body.value != 0 {
+ return Err(Error::FrontendInternalError);
+ }
+
+ Ok(body.value)
+ }
+
+ /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
+ ///
+ /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated, the
+ /// "REPLY_ACK" flag will be set in the message header for every backend to frontend request
+ /// message.
+ pub fn set_reply_ack_flag(&mut self, enable: bool) {
+ self.reply_ack_negotiated = enable;
+ }
+
+ /// Mark connection as failed with specified error code.
+ pub fn set_failed(&mut self, error: i32) {
+ self.error = Some(error);
+ }
+}
+
+impl Frontend for FrontendClient {
+ /// Handle shared memory region mapping requests.
+ fn shmem_map(
+ &mut self,
+ req: &VhostUserShmemMapMsg,
+ fd: &dyn AsRawDescriptor,
+ ) -> HandlerResult<u64> {
+ self.send_message(BackendReq::SHMEM_MAP, req, Some(&[fd.as_raw_descriptor()]))
+ }
+
+ /// Handle shared memory region unmapping requests.
+ fn shmem_unmap(&mut self, req: &VhostUserShmemUnmapMsg) -> HandlerResult<u64> {
+ self.send_message(BackendReq::SHMEM_UNMAP, req, None)
+ }
+
+ /// Handle config change requests.
+ fn handle_config_change(&mut self) -> HandlerResult<u64> {
+ self.send_message(BackendReq::CONFIG_CHANGE_MSG, &VhostUserEmptyMessage, None)
+ }
+
+ /// Handle GPU shared memory region mapping requests.
+ fn gpu_map(
+ &mut self,
+ req: &VhostUserGpuMapMsg,
+ descriptor: &dyn AsRawDescriptor,
+ ) -> HandlerResult<u64> {
+ self.send_message(
+ BackendReq::GPU_MAP,
+ req,
+ Some(&[descriptor.as_raw_descriptor()]),
+ )
+ }
+
+ /// Handle external memory region mapping requests.
+ fn external_map(&mut self, req: &VhostUserExternalMapMsg) -> HandlerResult<u64> {
+ self.send_message(BackendReq::EXTERNAL_MAP, req, None)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use super::*;
+ use crate::SystemStream;
+
+ #[test]
+ fn test_backend_req_set_failed() {
+ let (p1, _p2) = SystemStream::pair().unwrap();
+ let mut frontend_client = FrontendClient::from_stream(p1);
+
+ assert!(frontend_client.error.is_none());
+ frontend_client.set_failed(libc::EAGAIN);
+ assert_eq!(frontend_client.error, Some(libc::EAGAIN));
+ }
+}
diff --git a/third_party/vmm_vhost/src/frontend_server.rs b/third_party/vmm_vhost/src/frontend_server.rs
new file mode 100644
index 0000000..4973083
--- /dev/null
+++ b/third_party/vmm_vhost/src/frontend_server.rs
@@ -0,0 +1,243 @@
+// Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::fs::File;
+use std::mem;
+
+use base::AsRawDescriptor;
+
+use crate::message::*;
+use crate::BackendReq;
+use crate::Connection;
+use crate::Error;
+use crate::HandlerResult;
+use crate::Result;
+use crate::SystemStream;
+
+/// Trait for vhost-user frontends to respond to requests from the backend.
+///
+/// Each method corresponds to a vhost-user protocol method. See the specification for details.
+pub trait Frontend {
+ /// Handle device configuration change notifications.
+ fn handle_config_change(&mut self) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared memory region mapping requests.
+ fn shmem_map(
+ &mut self,
+ _req: &VhostUserShmemMapMsg,
+ _fd: &dyn AsRawDescriptor,
+ ) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared memory region unmapping requests.
+ fn shmem_unmap(&mut self, _req: &VhostUserShmemUnmapMsg) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb);
+ // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: RawDescriptor);
+
+ /// Handle GPU shared memory region mapping requests.
+ fn gpu_map(
+ &mut self,
+ _req: &VhostUserGpuMapMsg,
+ _descriptor: &dyn AsRawDescriptor,
+ ) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle external memory region mapping requests.
+ fn external_map(&mut self, _req: &VhostUserExternalMapMsg) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+}
+
+/// Handles requests from a vhost-user backend connection by dispatching them to [[Frontend]]
+/// methods.
+pub struct FrontendServer<S: Frontend> {
+ // underlying Unix domain socket for communication
+ pub(crate) sub_sock: Connection<BackendReq>,
+ // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
+ reply_ack_negotiated: bool,
+
+ frontend: S,
+}
+
+impl<S: Frontend> FrontendServer<S> {
+ /// Create a server to handle requests from `stream`.
+ pub(crate) fn new(frontend: S, stream: SystemStream) -> Result<Self> {
+ Ok(FrontendServer {
+ sub_sock: Connection::from(stream),
+ reply_ack_negotiated: false,
+ frontend,
+ })
+ }
+
+ /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
+ ///
+ /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated,
+ /// the "REPLY_ACK" flag will be set in the message header for every request message.
+ pub fn set_reply_ack_flag(&mut self, enable: bool) {
+ self.reply_ack_negotiated = enable;
+ }
+
+ /// Get the underlying frontend
+ pub fn frontend_mut(&mut self) -> &mut S {
+ &mut self.frontend
+ }
+
+ /// Process the next received request.
+ ///
+ /// The caller needs to:
+ /// - serialize calls to this function
+ /// - decide what to do when errer happens
+ /// - optional recover from failure
+ pub fn handle_request(&mut self) -> Result<u64> {
+ // The underlying communication channel is a Unix domain socket in
+ // stream mode, and recvmsg() is a little tricky here. To successfully
+ // receive attached file descriptors, we need to receive messages and
+ // corresponding attached file descriptors in this way:
+ // . recv messsage header and optional attached file
+ // . validate message header
+ // . recv optional message body and payload according size field in
+ // message header
+ // . validate message body and optional payload
+ let (hdr, files) = self.sub_sock.recv_header()?;
+ self.check_attached_files(&hdr, &files)?;
+ let buf = self.sub_sock.recv_body_bytes(&hdr)?;
+ let size = buf.len();
+
+ let res = match hdr.get_code() {
+ Ok(BackendReq::CONFIG_CHANGE_MSG) => {
+ self.check_msg_size(&hdr, size, 0)?;
+ self.frontend
+ .handle_config_change()
+ .map_err(Error::ReqHandlerError)
+ }
+ Ok(BackendReq::SHMEM_MAP) => {
+ let msg = self.extract_msg_body::<VhostUserShmemMapMsg>(&hdr, size, &buf)?;
+ // check_attached_files() has validated files
+ self.frontend
+ .shmem_map(&msg, &files[0])
+ .map_err(Error::ReqHandlerError)
+ }
+ Ok(BackendReq::SHMEM_UNMAP) => {
+ let msg = self.extract_msg_body::<VhostUserShmemUnmapMsg>(&hdr, size, &buf)?;
+ self.frontend
+ .shmem_unmap(&msg)
+ .map_err(Error::ReqHandlerError)
+ }
+ Ok(BackendReq::GPU_MAP) => {
+ let msg = self.extract_msg_body::<VhostUserGpuMapMsg>(&hdr, size, &buf)?;
+ // check_attached_files() has validated files
+ self.frontend
+ .gpu_map(&msg, &files[0])
+ .map_err(Error::ReqHandlerError)
+ }
+ Ok(BackendReq::EXTERNAL_MAP) => {
+ let msg = self.extract_msg_body::<VhostUserExternalMapMsg>(&hdr, size, &buf)?;
+ self.frontend
+ .external_map(&msg)
+ .map_err(Error::ReqHandlerError)
+ }
+ _ => Err(Error::InvalidMessage),
+ };
+
+ self.send_reply(&hdr, &res)?;
+
+ res
+ }
+
+ fn check_msg_size(
+ &self,
+ hdr: &VhostUserMsgHeader<BackendReq>,
+ size: usize,
+ expected: usize,
+ ) -> Result<()> {
+ if hdr.get_size() as usize != expected
+ || hdr.is_reply()
+ || hdr.get_version() != 0x1
+ || size != expected
+ {
+ return Err(Error::InvalidMessage);
+ }
+ Ok(())
+ }
+
+ fn check_attached_files(
+ &self,
+ hdr: &VhostUserMsgHeader<BackendReq>,
+ files: &[File],
+ ) -> Result<()> {
+ let expected_num_files = match hdr.get_code().map_err(|_| Error::InvalidMessage)? {
+ // Expect a single file is passed.
+ BackendReq::SHMEM_MAP | BackendReq::GPU_MAP => 1,
+ _ => 0,
+ };
+
+ if files.len() == expected_num_files {
+ Ok(())
+ } else {
+ Err(Error::InvalidMessage)
+ }
+ }
+
+ fn extract_msg_body<T: Sized + VhostUserMsgValidator>(
+ &self,
+ hdr: &VhostUserMsgHeader<BackendReq>,
+ size: usize,
+ buf: &[u8],
+ ) -> Result<T> {
+ self.check_msg_size(hdr, size, mem::size_of::<T>())?;
+ // SAFETY: above check ensures that buf is `T` sized.
+ let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const T) };
+ if !msg.is_valid() {
+ return Err(Error::InvalidMessage);
+ }
+ Ok(msg)
+ }
+
+ fn new_reply_header<T: Sized>(
+ &self,
+ req: &VhostUserMsgHeader<BackendReq>,
+ ) -> Result<VhostUserMsgHeader<BackendReq>> {
+ Ok(VhostUserMsgHeader::new(
+ req.get_code().map_err(|_| Error::InvalidMessage)?,
+ VhostUserHeaderFlag::REPLY.bits(),
+ mem::size_of::<T>() as u32,
+ ))
+ }
+
+ fn send_reply(
+ &mut self,
+ req: &VhostUserMsgHeader<BackendReq>,
+ res: &Result<u64>,
+ ) -> Result<()> {
+ let code = req.get_code().map_err(|_| Error::InvalidMessage)?;
+ if code == BackendReq::SHMEM_MAP
+ || code == BackendReq::SHMEM_UNMAP
+ || code == BackendReq::GPU_MAP
+ || code == BackendReq::EXTERNAL_MAP
+ || (self.reply_ack_negotiated && req.is_need_reply())
+ {
+ let hdr = self.new_reply_header::<VhostUserU64>(req)?;
+ let def_err = libc::EINVAL;
+ let val = match res {
+ Ok(n) => *n,
+ Err(e) => match e {
+ Error::ReqHandlerError(ioerr) => match ioerr.raw_os_error() {
+ Some(rawerr) => -rawerr as u64,
+ None => -def_err as u64,
+ },
+ _ => -def_err as u64,
+ },
+ };
+ let msg = VhostUserU64::new(val);
+ self.sub_sock.send_message(&hdr, &msg, None)?;
+ }
+ Ok(())
+ }
+}
diff --git a/third_party/vmm_vhost/src/lib.rs b/third_party/vmm_vhost/src/lib.rs
index 6db01f6..5c36f42 100644
--- a/third_party/vmm_vhost/src/lib.rs
+++ b/third_party/vmm_vhost/src/lib.rs
@@ -25,9 +25,9 @@
//! Later Vhost-user protocol is introduced to complement the ioctl interface used to control the
//! vhost implementation in the Linux kernel. It implements the control plane needed to establish
//! virtqueues sharing with a user space process on the same host. It uses communication over a
-//! Unix domain socket to share file descriptors in the ancillary data of the message.
-//! The protocol defines 2 sides of the communication, master and slave. Master is the application
-//! that shares its virtqueues. Slave is the consumer of the virtqueues. Master and slave can be
+//! Unix domain socket to share file descriptors in the ancillary data of the message. The protocol
+//! defines 2 sides of the communication, frontend and backend. Frontend is the application that
+//! shares its virtqueues. Backend is the consumer of the virtqueues. Frontend and backend can be
//! either a client (i.e. connecting) or server (listening) in the socket communication.
use std::fs::File;
@@ -47,27 +47,29 @@
mod sys;
pub use connection::Connection;
-pub use message::MasterReq;
-pub use message::SlaveReq;
+pub use message::BackendReq;
+pub use message::FrontendReq;
pub use sys::SystemStream;
pub use sys::*;
-pub(crate) mod master;
-pub use self::master::Master;
-mod master_req_handler;
-pub use self::master_req_handler::VhostUserMasterReqHandler;
-mod slave_proxy;
-mod slave_req_handler;
-pub use self::master_req_handler::MasterReqHandler;
-pub use self::slave_proxy::Slave;
-pub use self::slave_req_handler::SlaveReqHandler;
-pub use self::slave_req_handler::SlaveReqHelper;
-pub use self::slave_req_handler::VhostUserSlaveReqHandler;
+pub(crate) mod backend_client;
+pub use backend_client::BackendClient;
+mod frontend_server;
+pub use self::frontend_server::Frontend;
+mod backend_server;
+mod frontend_client;
+pub use self::backend_server::Backend;
+pub use self::backend_server::BackendServer;
+pub use self::frontend_client::FrontendClient;
+pub use self::frontend_server::FrontendServer;
/// Errors for vhost-user operations
#[sorted]
#[derive(Debug, ThisError)]
pub enum Error {
+ /// Failure from the backend side.
+ #[error("backend internal error")]
+ BackendInternalError,
/// client exited properly.
#[error("client exited properly")]
ClientExit,
@@ -81,6 +83,9 @@
/// Virtio/protocol features mismatch.
#[error("virtio features mismatch")]
FeatureMismatch,
+ /// Failure from the frontend side.
+ #[error("frontend Internal error")]
+ FrontendInternalError,
/// Fd array in question is too big or too small
#[error("wrong number of attached fds")]
IncorrectFds,
@@ -96,9 +101,6 @@
/// Invalid parameters.
#[error("invalid parameters")]
InvalidParam,
- /// Failure from the master side.
- #[error("master Internal error")]
- MasterInternalError,
/// Message is too large
#[error("oversized message")]
OversizedMsg,
@@ -122,9 +124,6 @@
/// Failure to serialize data.
#[error("failed to serialize data")]
SerializationFailed,
- /// Failure from the slave side.
- #[error("slave internal error")]
- SlaveInternalError,
/// Failure to run device specific sleep.
#[error("Failed to run device specific sleep: {0}")]
SleepError(anyhow::Error),
@@ -230,7 +229,7 @@
}
#[cfg(test)]
-mod dummy_slave;
+mod test_backend;
#[cfg(test)]
mod tests {
@@ -242,15 +241,17 @@
use tempfile::tempfile;
use super::*;
- use crate::connection::tests::*;
- use crate::dummy_slave::DummySlaveReqHandler;
- use crate::dummy_slave::VIRTIO_FEATURES;
use crate::message::*;
+ pub(crate) use crate::sys::tests::create_client_server_pair;
+ pub(crate) use crate::sys::tests::create_connection_pair;
+ pub(crate) use crate::sys::tests::create_pair;
+ use crate::test_backend::TestBackend;
+ use crate::test_backend::VIRTIO_FEATURES;
use crate::VhostUserMemoryRegionInfo;
use crate::VringConfigData;
/// Utility function to process a header and a message together.
- fn handle_request(h: &mut SlaveReqHandler<DummySlaveReqHandler>) -> Result<()> {
+ fn handle_request(h: &mut BackendServer<TestBackend>) -> Result<()> {
// We assume that a header comes together with message body in tests so we don't wait before
// calling `process_message()`.
let (hdr, files) = h.recv_header()?;
@@ -258,150 +259,156 @@
}
#[test]
- fn create_dummy_slave() {
- let mut slave = DummySlaveReqHandler::new();
+ fn create_test_backend() {
+ let mut backend = TestBackend::new();
- slave.set_owner().unwrap();
- assert!(slave.set_owner().is_err());
+ backend.set_owner().unwrap();
+ assert!(backend.set_owner().is_err());
}
#[test]
fn test_set_owner() {
- let slave_be = DummySlaveReqHandler::new();
- let (master, mut slave) = create_master_slave_pair(slave_be);
+ let test_backend = TestBackend::new();
+ let (backend_client, mut backend_server) = create_client_server_pair(test_backend);
- assert!(!slave.as_ref().owned);
- master.set_owner().unwrap();
- handle_request(&mut slave).unwrap();
- assert!(slave.as_ref().owned);
- master.set_owner().unwrap();
- assert!(handle_request(&mut slave).is_err());
- assert!(slave.as_ref().owned);
+ assert!(!backend_server.as_ref().owned);
+ backend_client.set_owner().unwrap();
+ handle_request(&mut backend_server).unwrap();
+ assert!(backend_server.as_ref().owned);
+ backend_client.set_owner().unwrap();
+ assert!(handle_request(&mut backend_server).is_err());
+ assert!(backend_server.as_ref().owned);
}
#[test]
fn test_set_features() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
- let slave_be = DummySlaveReqHandler::new();
- let (mut master, mut slave) = create_master_slave_pair(slave_be);
+ let test_backend = TestBackend::new();
+ let (mut backend_client, mut backend_server) = create_client_server_pair(test_backend);
thread::spawn(move || {
- handle_request(&mut slave).unwrap();
- assert!(slave.as_ref().owned);
+ handle_request(&mut backend_server).unwrap();
+ assert!(backend_server.as_ref().owned);
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
- assert_eq!(slave.as_ref().acked_features, VIRTIO_FEATURES & !0x1);
-
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
assert_eq!(
- slave.as_ref().acked_protocol_features,
+ backend_server.as_ref().acked_features,
+ VIRTIO_FEATURES & !0x1
+ );
+
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ assert_eq!(
+ backend_server.as_ref().acked_protocol_features,
VhostUserProtocolFeatures::all().bits()
);
sbar.wait();
});
- master.set_owner().unwrap();
+ backend_client.set_owner().unwrap();
// set virtio features
- let features = master.get_features().unwrap();
+ let features = backend_client.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
- master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
+ backend_client.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
- let features = master.get_protocol_features().unwrap();
+ let features = backend_client.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
- master.set_protocol_features(features).unwrap();
+ backend_client.set_protocol_features(features).unwrap();
mbar.wait();
}
#[test]
- fn test_master_slave_process() {
+ fn test_client_server_process() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
- let slave_be = DummySlaveReqHandler::new();
- let (mut master, mut slave) = create_master_slave_pair(slave_be);
+ let test_backend = TestBackend::new();
+ let (mut backend_client, mut backend_server) = create_client_server_pair(test_backend);
thread::spawn(move || {
// set_own()
- handle_request(&mut slave).unwrap();
- assert!(slave.as_ref().owned);
+ handle_request(&mut backend_server).unwrap();
+ assert!(backend_server.as_ref().owned);
// get/set_features()
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
- assert_eq!(slave.as_ref().acked_features, VIRTIO_FEATURES & !0x1);
-
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
assert_eq!(
- slave.as_ref().acked_protocol_features,
+ backend_server.as_ref().acked_features,
+ VIRTIO_FEATURES & !0x1
+ );
+
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ assert_eq!(
+ backend_server.as_ref().acked_protocol_features,
VhostUserProtocolFeatures::all().bits()
);
// get_inflight_fd()
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
// set_inflight_fd()
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
// get_queue_num()
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
// set_mem_table()
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
// get/set_config()
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
- // set_slave_request_fd
- handle_request(&mut slave).unwrap();
+ // set_backend_req_fd
+ handle_request(&mut backend_server).unwrap();
// set_vring_enable
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
// set_log_base,set_log_fd()
- handle_request(&mut slave).unwrap_err();
- handle_request(&mut slave).unwrap_err();
+ handle_request(&mut backend_server).unwrap_err();
+ handle_request(&mut backend_server).unwrap_err();
// set_vring_xxx
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
+ handle_request(&mut backend_server).unwrap();
// get_max_mem_slots()
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
// add_mem_region()
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
// remove_mem_region()
- handle_request(&mut slave).unwrap();
+ handle_request(&mut backend_server).unwrap();
sbar.wait();
});
- master.set_owner().unwrap();
+ backend_client.set_owner().unwrap();
// set virtio features
- let features = master.get_features().unwrap();
+ let features = backend_client.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
- master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
+ backend_client.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
- let features = master.get_protocol_features().unwrap();
+ let features = backend_client.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
- master.set_protocol_features(features).unwrap();
+ backend_client.set_protocol_features(features).unwrap();
// Retrieve inflight I/O tracking information
- let (inflight_info, inflight_file) = master
+ let (inflight_info, inflight_file) = backend_client
.get_inflight_fd(&VhostUserInflight {
num_queues: 2,
queue_size: 256,
@@ -409,11 +416,11 @@
})
.unwrap();
// Set the buffer back to the backend
- master
+ backend_client
.set_inflight_fd(&inflight_info, inflight_file.as_raw_descriptor())
.unwrap();
- let num = master.get_queue_num().unwrap();
+ let num = backend_client.get_queue_num().unwrap();
assert_eq!(num, 2);
let event = base::Event::new().unwrap();
@@ -424,13 +431,13 @@
mmap_offset: 0,
mmap_handle: event.as_raw_descriptor(),
}];
- master.set_mem_table(&mem).unwrap();
+ backend_client.set_mem_table(&mem).unwrap();
- master
+ backend_client
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &[0xa5u8])
.unwrap();
let buf = [0x0u8; 4];
- let (reply_body, reply_payload) = master
+ let (reply_body, reply_payload) = backend_client
.get_config(0x100, 4, VhostUserConfigFlags::empty(), &buf)
.unwrap();
let offset = reply_body.offset;
@@ -448,17 +455,19 @@
#[cfg(unix)]
let descriptor = base::Event::new().unwrap();
- master.set_slave_request_fd(&descriptor).unwrap();
- master.set_vring_enable(0, true).unwrap();
+ backend_client.set_backend_req_fd(&descriptor).unwrap();
+ backend_client.set_vring_enable(0, true).unwrap();
// unimplemented yet
- master
+ backend_client
.set_log_base(0, Some(event.as_raw_descriptor()))
.unwrap();
- master.set_log_fd(event.as_raw_descriptor()).unwrap();
+ backend_client
+ .set_log_fd(event.as_raw_descriptor())
+ .unwrap();
- master.set_vring_num(0, 256).unwrap();
- master.set_vring_base(0, 0).unwrap();
+ backend_client.set_vring_num(0, 256).unwrap();
+ backend_client.set_vring_base(0, 0).unwrap();
let config = VringConfigData {
queue_size: 128,
flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(),
@@ -467,12 +476,12 @@
avail_ring_addr: 0x3000,
log_addr: Some(0x4000),
};
- master.set_vring_addr(0, &config).unwrap();
- master.set_vring_call(0, &event).unwrap();
- master.set_vring_kick(0, &event).unwrap();
- master.set_vring_err(0, &event).unwrap();
+ backend_client.set_vring_addr(0, &config).unwrap();
+ backend_client.set_vring_call(0, &event).unwrap();
+ backend_client.set_vring_kick(0, &event).unwrap();
+ backend_client.set_vring_err(0, &event).unwrap();
- let max_mem_slots = master.get_max_mem_slots().unwrap();
+ let max_mem_slots = backend_client.get_max_mem_slots().unwrap();
assert_eq!(max_mem_slots, 32);
let region_file = tempfile().unwrap();
@@ -483,9 +492,9 @@
mmap_offset: 0,
mmap_handle: region_file.as_raw_descriptor(),
};
- master.add_mem_region(®ion).unwrap();
+ backend_client.add_mem_region(®ion).unwrap();
- master.remove_mem_region(®ion).unwrap();
+ backend_client.remove_mem_region(®ion).unwrap();
mbar.wait();
}
diff --git a/third_party/vmm_vhost/src/master_req_handler.rs b/third_party/vmm_vhost/src/master_req_handler.rs
deleted file mode 100644
index 9e722e0..0000000
--- a/third_party/vmm_vhost/src/master_req_handler.rs
+++ /dev/null
@@ -1,347 +0,0 @@
-// Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-cfg_if::cfg_if! {
- if #[cfg(unix)] {
- mod unix;
- } else if #[cfg(windows)] {
- mod windows;
- }
-}
-
-use std::fs::File;
-use std::mem;
-
-use base::AsRawDescriptor;
-use base::SafeDescriptor;
-
-use crate::message::*;
-use crate::Connection;
-use crate::Error;
-use crate::HandlerResult;
-use crate::Result;
-use crate::SlaveReq;
-use crate::SystemStream;
-
-/// Define services provided by masters for the slave communication channel.
-///
-/// The vhost-user specification defines a slave communication channel, by which slaves could
-/// request services from masters. The [VhostUserMasterReqHandler] trait defines services provided
-/// by masters, and it's used both on the master side and slave side.
-/// - on the slave side, a stub forwarder implementing [VhostUserMasterReqHandler] will proxy
-/// service requests to masters. The [Slave] is an example stub forwarder.
-/// - on the master side, the [MasterReqHandler] will forward service requests to a handler
-/// implementing [VhostUserMasterReqHandler].
-///
-/// [VhostUserMasterReqHandler]: trait.VhostUserMasterReqHandler.html
-/// [MasterReqHandler]: struct.MasterReqHandler.html
-/// [Slave]: struct.Slave.html
-pub trait VhostUserMasterReqHandler {
- /// Handle device configuration change notifications.
- fn handle_config_change(&mut self) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle shared memory region mapping requests.
- fn shmem_map(
- &mut self,
- _req: &VhostUserShmemMapMsg,
- _fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle shared memory region unmapping requests.
- fn shmem_unmap(&mut self, _req: &VhostUserShmemUnmapMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs map file requests.
- fn fs_slave_map(
- &mut self,
- _fs: &VhostUserFSSlaveMsg,
- _fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs unmap file requests.
- fn fs_slave_unmap(&mut self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs sync file requests.
- fn fs_slave_sync(&mut self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs file IO requests.
- fn fs_slave_io(
- &mut self,
- _fs: &VhostUserFSSlaveMsg,
- _fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb);
- // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: RawDescriptor);
-
- /// Handle GPU shared memory region mapping requests.
- fn gpu_map(
- &mut self,
- _req: &VhostUserGpuMapMsg,
- _descriptor: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle external memory region mapping requests.
- fn external_map(&mut self, _req: &VhostUserExternalMapMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-}
-
-/// The [MasterReqHandler] acts as a server on the master side, to handle service requests from
-/// slaves on the slave communication channel. It's actually a proxy invoking the registered
-/// handler implementing [VhostUserMasterReqHandler] to do the real work.
-///
-/// [MasterReqHandler]: struct.MasterReqHandler.html
-/// [VhostUserMasterReqHandler]: trait.VhostUserMasterReqHandler.html
-///
-/// Server to handle service requests from slaves from the slave communication channel.
-pub struct MasterReqHandler<S: VhostUserMasterReqHandler> {
- // underlying Unix domain socket for communication
- sub_sock: Connection<SlaveReq>,
- tx_sock: Option<SystemStream>,
- // Serializes tx_sock for passing to the backend.
- serialize_tx: Box<dyn Fn(SystemStream) -> SafeDescriptor + Send>,
- // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
- reply_ack_negotiated: bool,
-
- /// the VirtIO backend device object
- backend: S,
-}
-
-impl<S: VhostUserMasterReqHandler> MasterReqHandler<S> {
- /// Create a server to handle service requests from slaves on the slave communication channel.
- ///
- /// This opens a pair of connected anonymous sockets to form the slave communication channel.
- /// The socket fd returned by [Self::take_tx_descriptor()] should be sent to the slave by
- /// [Master::set_slave_request_fd()].
- ///
- /// [Self::take_tx_descriptor()]: struct.MasterReqHandler.html#method.take_tx_descriptor
- /// [Master::set_slave_request_fd()]: struct.Master.html#method.set_slave_request_fd
- pub fn new(
- backend: S,
- serialize_tx: Box<dyn Fn(SystemStream) -> SafeDescriptor + Send>,
- ) -> Result<Self> {
- let (tx, rx) = SystemStream::pair()?;
-
- Ok(MasterReqHandler {
- sub_sock: Connection::from(rx),
- tx_sock: Some(tx),
- serialize_tx,
- reply_ack_negotiated: false,
- backend,
- })
- }
-
- /// Get the descriptor for the slave to communication with the master.
- ///
- /// The caller owns the descriptor. The returned descriptor should be sent to the slave by
- /// [Master::set_slave_request_fd()].
- ///
- /// [Master::set_slave_request_fd()]: struct.Master.html#method.set_slave_request_fd
- pub fn take_tx_descriptor(&mut self) -> SafeDescriptor {
- (self.serialize_tx)(self.tx_sock.take().expect("tx_sock should have a value"))
- }
-
- /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
- ///
- /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated,
- /// the "REPLY_ACK" flag will be set in the message header for every slave to master request
- /// message.
- pub fn set_reply_ack_flag(&mut self, enable: bool) {
- self.reply_ack_negotiated = enable;
- }
-
- /// Get the underlying backend device
- pub fn backend_mut(&mut self) -> &mut S {
- &mut self.backend
- }
-
- /// Main entrance to server slave request from the slave communication channel.
- ///
- /// The caller needs to:
- /// - serialize calls to this function
- /// - decide what to do when errer happens
- /// - optional recover from failure
- pub fn handle_request(&mut self) -> Result<u64> {
- // The underlying communication channel is a Unix domain socket in
- // stream mode, and recvmsg() is a little tricky here. To successfully
- // receive attached file descriptors, we need to receive messages and
- // corresponding attached file descriptors in this way:
- // . recv messsage header and optional attached file
- // . validate message header
- // . recv optional message body and payload according size field in
- // message header
- // . validate message body and optional payload
- let (hdr, files) = self.sub_sock.recv_header()?;
- self.check_attached_files(&hdr, &files)?;
- let buf = self.sub_sock.recv_body_bytes(&hdr)?;
- let size = buf.len();
-
- let res = match hdr.get_code() {
- Ok(SlaveReq::CONFIG_CHANGE_MSG) => {
- self.check_msg_size(&hdr, size, 0)?;
- self.backend
- .handle_config_change()
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::SHMEM_MAP) => {
- let msg = self.extract_msg_body::<VhostUserShmemMapMsg>(&hdr, size, &buf)?;
- // check_attached_files() has validated files
- self.backend
- .shmem_map(&msg, &files[0])
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::SHMEM_UNMAP) => {
- let msg = self.extract_msg_body::<VhostUserShmemUnmapMsg>(&hdr, size, &buf)?;
- self.backend
- .shmem_unmap(&msg)
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_MAP) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- // check_attached_files() has validated files
- self.backend
- .fs_slave_map(&msg, &files[0])
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_UNMAP) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- self.backend
- .fs_slave_unmap(&msg)
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_SYNC) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- self.backend
- .fs_slave_sync(&msg)
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_IO) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- // check_attached_files() has validated files
- self.backend
- .fs_slave_io(&msg, &files[0])
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::GPU_MAP) => {
- let msg = self.extract_msg_body::<VhostUserGpuMapMsg>(&hdr, size, &buf)?;
- // check_attached_files() has validated files
- self.backend
- .gpu_map(&msg, &files[0])
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::EXTERNAL_MAP) => {
- let msg = self.extract_msg_body::<VhostUserExternalMapMsg>(&hdr, size, &buf)?;
- self.backend
- .external_map(&msg)
- .map_err(Error::ReqHandlerError)
- }
- _ => Err(Error::InvalidMessage),
- };
-
- self.send_reply(&hdr, &res)?;
-
- res
- }
-
- fn check_msg_size(
- &self,
- hdr: &VhostUserMsgHeader<SlaveReq>,
- size: usize,
- expected: usize,
- ) -> Result<()> {
- if hdr.get_size() as usize != expected
- || hdr.is_reply()
- || hdr.get_version() != 0x1
- || size != expected
- {
- return Err(Error::InvalidMessage);
- }
- Ok(())
- }
-
- fn check_attached_files(
- &self,
- hdr: &VhostUserMsgHeader<SlaveReq>,
- files: &[File],
- ) -> Result<()> {
- let expected_num_files = match hdr.get_code().map_err(|_| Error::InvalidMessage)? {
- // Expect a single file is passed.
- SlaveReq::SHMEM_MAP | SlaveReq::FS_MAP | SlaveReq::FS_IO | SlaveReq::GPU_MAP => 1,
- _ => 0,
- };
-
- if files.len() == expected_num_files {
- Ok(())
- } else {
- Err(Error::InvalidMessage)
- }
- }
-
- fn extract_msg_body<T: Sized + VhostUserMsgValidator>(
- &self,
- hdr: &VhostUserMsgHeader<SlaveReq>,
- size: usize,
- buf: &[u8],
- ) -> Result<T> {
- self.check_msg_size(hdr, size, mem::size_of::<T>())?;
- // SAFETY: above check ensures that buf is `T` sized.
- let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const T) };
- if !msg.is_valid() {
- return Err(Error::InvalidMessage);
- }
- Ok(msg)
- }
-
- fn new_reply_header<T: Sized>(
- &self,
- req: &VhostUserMsgHeader<SlaveReq>,
- ) -> Result<VhostUserMsgHeader<SlaveReq>> {
- Ok(VhostUserMsgHeader::new(
- req.get_code().map_err(|_| Error::InvalidMessage)?,
- VhostUserHeaderFlag::REPLY.bits(),
- mem::size_of::<T>() as u32,
- ))
- }
-
- fn send_reply(&mut self, req: &VhostUserMsgHeader<SlaveReq>, res: &Result<u64>) -> Result<()> {
- let code = req.get_code().map_err(|_| Error::InvalidMessage)?;
- if code == SlaveReq::SHMEM_MAP
- || code == SlaveReq::SHMEM_UNMAP
- || code == SlaveReq::GPU_MAP
- || code == SlaveReq::EXTERNAL_MAP
- || (self.reply_ack_negotiated && req.is_need_reply())
- {
- let hdr = self.new_reply_header::<VhostUserU64>(req)?;
- let def_err = libc::EINVAL;
- let val = match res {
- Ok(n) => *n,
- Err(e) => match e {
- Error::ReqHandlerError(ioerr) => match ioerr.raw_os_error() {
- Some(rawerr) => -rawerr as u64,
- None => -def_err as u64,
- },
- _ => -def_err as u64,
- },
- };
- let msg = VhostUserU64::new(val);
- self.sub_sock.send_message(&hdr, &msg, None)?;
- }
- Ok(())
- }
-}
diff --git a/third_party/vmm_vhost/src/master_req_handler/unix.rs b/third_party/vmm_vhost/src/master_req_handler/unix.rs
deleted file mode 100644
index 4cc2a09..0000000
--- a/third_party/vmm_vhost/src/master_req_handler/unix.rs
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2022 The Chromium OS Authors. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-//! Unix specific code that keeps rest of the code in the crate platform independent.
-
-use std::os::unix::io::IntoRawFd;
-
-use base::AsRawDescriptor;
-use base::FromRawDescriptor;
-use base::RawDescriptor;
-use base::SafeDescriptor;
-
-use crate::master_req_handler::MasterReqHandler;
-use crate::Result;
-use crate::VhostUserMasterReqHandler;
-
-impl<S: VhostUserMasterReqHandler> AsRawDescriptor for MasterReqHandler<S> {
- /// Used for polling.
- fn as_raw_descriptor(&self) -> RawDescriptor {
- self.sub_sock.as_raw_descriptor()
- }
-}
-
-impl<S: VhostUserMasterReqHandler> MasterReqHandler<S> {
- /// Create a `MasterReqHandler` that uses a Unix stream internally.
- pub fn with_stream(backend: S) -> Result<Self> {
- Self::new(
- backend,
- Box::new(|stream|
- // SAFETY:
- // Safe because we own the raw fd.
- unsafe {
- SafeDescriptor::from_raw_descriptor(stream.into_raw_fd())
- }),
- )
- }
-}
-
-#[cfg(test)]
-mod tests {
- use base::AsRawDescriptor;
- use base::Descriptor;
- use base::FromRawDescriptor;
- use base::INVALID_DESCRIPTOR;
-
- use super::*;
- use crate::message::VhostUserFSSlaveMsg;
- use crate::HandlerResult;
- use crate::Slave;
- use crate::SystemStream;
- use crate::VhostUserMasterReqHandler;
-
- struct MockMasterReqHandler {}
-
- impl VhostUserMasterReqHandler for MockMasterReqHandler {
- /// Handle virtio-fs map file requests from the slave.
- fn fs_slave_map(
- &mut self,
- _fs: &VhostUserFSSlaveMsg,
- _fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- Ok(0)
- }
-
- /// Handle virtio-fs unmap file requests from the slave.
- fn fs_slave_unmap(&mut self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
- }
-
- #[test]
- fn test_new_master_req_handler() {
- let backend = MockMasterReqHandler {};
- let mut handler = MasterReqHandler::with_stream(backend).unwrap();
-
- let tx_descriptor = handler.take_tx_descriptor();
- assert!(tx_descriptor.as_raw_descriptor() >= 0);
- assert!(handler.as_raw_descriptor() != INVALID_DESCRIPTOR);
- }
-
- #[test]
- fn test_master_slave_req_handler() {
- let backend = MockMasterReqHandler {};
- let mut handler = MasterReqHandler::with_stream(backend).unwrap();
-
- let tx_descriptor = handler.take_tx_descriptor();
- // SAFETY: return value of dup is checked.
- let fd = unsafe { libc::dup(tx_descriptor.as_raw_descriptor()) };
- if fd < 0 {
- panic!("failed to duplicated tx fd!");
- }
- // SAFETY: fd is created above and is valid
- let stream = unsafe { SystemStream::from_raw_descriptor(fd) };
- let mut fs_cache = Slave::from_stream(stream);
-
- std::thread::spawn(move || {
- let res = handler.handle_request().unwrap();
- assert_eq!(res, 0);
- handler.handle_request().unwrap_err();
- });
-
- fs_cache
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &Descriptor(fd))
- .unwrap();
- // When REPLY_ACK has not been negotiated, the master has no way to detect failure from
- // slave side.
- fs_cache
- .fs_slave_unmap(&VhostUserFSSlaveMsg::default())
- .unwrap();
- }
-
- #[test]
- fn test_master_slave_req_handler_with_ack() {
- let backend = MockMasterReqHandler {};
- let mut handler = MasterReqHandler::with_stream(backend).unwrap();
- handler.set_reply_ack_flag(true);
-
- let tx_descriptor = handler.take_tx_descriptor();
- // SAFETY: return value of dup is checked.
- let fd = unsafe { libc::dup(tx_descriptor.as_raw_descriptor()) };
- if fd < 0 {
- panic!("failed to duplicated tx fd!");
- }
-
- // SAFETY: fd is created above and is valid
- let stream = unsafe { SystemStream::from_raw_descriptor(fd) };
- let mut fs_cache = Slave::from_stream(stream);
-
- std::thread::spawn(move || {
- let res = handler.handle_request().unwrap();
- assert_eq!(res, 0);
- handler.handle_request().unwrap_err();
- });
-
- fs_cache.set_reply_ack_flag(true);
- fs_cache
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &Descriptor(fd))
- .unwrap();
- fs_cache
- .fs_slave_unmap(&VhostUserFSSlaveMsg::default())
- .unwrap_err();
- }
-}
diff --git a/third_party/vmm_vhost/src/master_req_handler/windows.rs b/third_party/vmm_vhost/src/master_req_handler/windows.rs
deleted file mode 100644
index e01a174..0000000
--- a/third_party/vmm_vhost/src/master_req_handler/windows.rs
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2022 The Chromium OS Authors. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-//! Windows specific code that keeps rest of the code in the crate platform independent.
-
-use base::AsRawDescriptor;
-use base::CloseNotifier;
-use base::ReadNotifier;
-use tube_transporter::packed_tube;
-
-use crate::master_req_handler::MasterReqHandler;
-use crate::Result;
-use crate::VhostUserMasterReqHandler;
-
-impl<S: VhostUserMasterReqHandler> MasterReqHandler<S> {
- /// Create a `MasterReqHandler` that uses a Tube internally. Must specify the backend process
- /// which will receive the Tube.
- pub fn with_tube(backend: S, backend_pid: u32) -> Result<Self> {
- Self::new(
- backend,
- Box::new(move |tube|
- // SAFETY:
- // Safe because we expect the tube to be unpacked in the other process.
- unsafe {
- packed_tube::pack(tube, backend_pid).expect("packed tube")
- }),
- )
- }
-}
-
-impl<S: VhostUserMasterReqHandler> ReadNotifier for MasterReqHandler<S> {
- /// Used for polling.
- fn get_read_notifier(&self) -> &dyn AsRawDescriptor {
- self.sub_sock.0.get_tube().get_read_notifier()
- }
-}
-
-impl<S: VhostUserMasterReqHandler> CloseNotifier for MasterReqHandler<S> {
- /// Used for closing.
- fn get_close_notifier(&self) -> &dyn AsRawDescriptor {
- self.sub_sock.0.get_tube().get_close_notifier()
- }
-}
-
-#[cfg(test)]
-mod tests {
- use base::AsRawDescriptor;
- use base::Descriptor;
- use base::INVALID_DESCRIPTOR;
-
- use super::*;
- use crate::message::VhostUserFSSlaveMsg;
- use crate::HandlerResult;
- use crate::Slave;
- use crate::VhostUserMasterReqHandler;
-
- struct MockMasterReqHandler {}
-
- impl VhostUserMasterReqHandler for MockMasterReqHandler {
- /// Handle virtio-fs map file requests from the slave.
- fn fs_slave_map(
- &mut self,
- _fs: &VhostUserFSSlaveMsg,
- _fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- Ok(0)
- }
-
- /// Handle virtio-fs unmap file requests from the slave.
- fn fs_slave_unmap(&mut self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
- }
-
- #[test]
- fn test_new_master_req_handler() {
- let backend = MockMasterReqHandler {};
- let handler = MasterReqHandler::with_tube(backend, std::process::id()).unwrap();
-
- assert!(handler.get_read_notifier().as_raw_descriptor() != INVALID_DESCRIPTOR);
- assert!(handler.get_close_notifier().as_raw_descriptor() != INVALID_DESCRIPTOR);
- }
-
- #[test]
- fn test_master_slave_req_handler() {
- let backend = MockMasterReqHandler {};
- let mut handler = MasterReqHandler::with_tube(backend, std::process::id()).unwrap();
-
- let event = base::Event::new().unwrap();
- let tx_descriptor = handler.take_tx_descriptor();
- // SAFETY:
- // Safe because we only do it once.
- let stream = unsafe { packed_tube::unpack(tx_descriptor).unwrap() };
- let mut fs_cache = Slave::from_stream(stream);
-
- std::thread::spawn(move || {
- let res = handler.handle_request().unwrap();
- assert_eq!(res, 0);
- handler.handle_request().unwrap_err();
- });
-
- fs_cache
- .fs_slave_map(
- &VhostUserFSSlaveMsg::default(),
- &Descriptor(event.as_raw_descriptor()),
- )
- .unwrap();
- // When REPLY_ACK has not been negotiated, the master has no way to detect failure from
- // slave side.
- fs_cache
- .fs_slave_unmap(&VhostUserFSSlaveMsg::default())
- .unwrap();
- }
-
- #[test]
- fn test_master_slave_req_handler_with_ack() {
- let backend = MockMasterReqHandler {};
- let mut handler = MasterReqHandler::with_tube(backend, std::process::id()).unwrap();
- handler.set_reply_ack_flag(true);
-
- let event = base::Event::new().unwrap();
- let tx_descriptor = handler.take_tx_descriptor();
- // SAFETY:
- // Safe because we only do it once.
- let stream = unsafe { packed_tube::unpack(tx_descriptor).unwrap() };
- let mut fs_cache = Slave::from_stream(stream);
-
- std::thread::spawn(move || {
- let res = handler.handle_request().unwrap();
- assert_eq!(res, 0);
- handler.handle_request().unwrap_err();
- });
-
- fs_cache.set_reply_ack_flag(true);
- fs_cache
- .fs_slave_map(
- &VhostUserFSSlaveMsg::default(),
- &Descriptor(event.as_raw_descriptor()),
- )
- .unwrap();
- fs_cache
- .fs_slave_unmap(&VhostUserFSSlaveMsg::default())
- .unwrap_err();
- }
-}
diff --git a/third_party/vmm_vhost/src/message.rs b/third_party/vmm_vhost/src/message.rs
index 6273709..7f50c08 100644
--- a/third_party/vmm_vhost/src/message.rs
+++ b/third_party/vmm_vhost/src/message.rs
@@ -37,7 +37,7 @@
/// Maximum number of vrings supported.
pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64;
-/// Used for the payload in Vhost Master messages.
+/// Message type. Either [[FrontendReq]] or [[BackendReq]].
pub trait Req:
Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Into<u32> + TryFrom<u32> + Send + Sync
{
@@ -51,19 +51,23 @@
InvalidValue(u32),
}
-/// Type of requests sending from masters to slaves.
+/// Type of requests sent to the backend.
+///
+/// These are called "front-end message types" in the spec, so we call them `FrontendReq` here even
+/// though it is somewhat confusing that the `BackendClient` sends `FrontendReq`s to a
+/// `BackendServer`.
#[repr(u32)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, enumn::N)]
-pub enum MasterReq {
+pub enum FrontendReq {
/// Get from the underlying vhost implementation the features bit mask.
GET_FEATURES = 1,
/// Enable features in the underlying vhost implementation using a bit mask.
SET_FEATURES = 2,
- /// Set the current Master as an owner of the session.
+ /// Set the current frontend as an owner of the session.
SET_OWNER = 3,
/// No longer used.
RESET_OWNER = 4,
- /// Set the memory map regions on the slave so it can translate the vring addresses.
+ /// Set the memory map regions on the backend so it can translate the vring addresses.
SET_MEM_TABLE = 5,
/// Set logging shared memory space.
SET_LOG_BASE = 6,
@@ -89,15 +93,15 @@
SET_PROTOCOL_FEATURES = 16,
/// Query how many queues the backend supports.
GET_QUEUE_NUM = 17,
- /// Signal slave to enable or disable corresponding vring.
+ /// Signal backend to enable or disable corresponding vring.
SET_VRING_ENABLE = 18,
/// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated
/// for guest that does not support GUEST_ANNOUNCE.
SEND_RARP = 19,
/// Set host MTU value exposed to the guest.
NET_SET_MTU = 20,
- /// Set the socket file descriptor for slave initiated requests.
- SET_SLAVE_REQ_FD = 21,
+ /// Set the socket file descriptor for backend initiated requests.
+ SET_BACKEND_REQ_FD = 21,
/// Send IOTLB messages with struct vhost_iotlb_msg as payload.
IOTLB_MSG = 22,
/// Set the endianness of a VQ for legacy devices.
@@ -110,15 +114,15 @@
CREATE_CRYPTO_SESSION = 26,
/// Close a session for crypto operation.
CLOSE_CRYPTO_SESSION = 27,
- /// Advise slave that a migration with postcopy enabled is underway.
+ /// Advise backend that a migration with postcopy enabled is underway.
POSTCOPY_ADVISE = 28,
- /// Advise slave that a transition to postcopy mode has happened.
+ /// Advise backend that a transition to postcopy mode has happened.
POSTCOPY_LISTEN = 29,
/// Advise that postcopy migration has now completed.
POSTCOPY_END = 30,
- /// Get a shared buffer from slave.
+ /// Get a shared buffer from backend.
GET_INFLIGHT_FD = 31,
- /// Send the shared inflight buffer back to slave.
+ /// Send the shared inflight buffer back to backend.
SET_INFLIGHT_FD = 32,
/// Sets the GPU protocol socket file descriptor.
GPU_SET_SOCKET = 33,
@@ -140,38 +144,44 @@
/// Query the backend for its device status as defined in the VIRTIO
/// specification.
GET_STATUS = 40,
- /// Get a list of the device's shared memory regions.
- GET_SHARED_MEMORY_REGIONS = 41,
+
+ // Non-standard message types.
/// Stop all queue handlers and save each queue state.
- SLEEP = 42,
+ SLEEP = 1000,
/// Start up all queue handlers with their saved queue state.
- WAKE = 43,
+ WAKE = 1001,
/// Request serialized state of vhost process.
- SNAPSHOT = 44,
+ SNAPSHOT = 1002,
/// Request to restore state of vhost process.
- RESTORE = 45,
+ RESTORE = 1003,
+ /// Get a list of the device's shared memory regions.
+ GET_SHARED_MEMORY_REGIONS = 1004,
}
-impl From<MasterReq> for u32 {
- fn from(req: MasterReq) -> u32 {
+impl From<FrontendReq> for u32 {
+ fn from(req: FrontendReq) -> u32 {
req as u32
}
}
-impl Req for MasterReq {}
+impl Req for FrontendReq {}
-impl TryFrom<u32> for MasterReq {
+impl TryFrom<u32> for FrontendReq {
type Error = ReqError;
fn try_from(value: u32) -> Result<Self, Self::Error> {
- MasterReq::n(value).ok_or(ReqError::InvalidValue(value))
+ FrontendReq::n(value).ok_or(ReqError::InvalidValue(value))
}
}
-/// Type of requests sending from slaves to masters.
+/// Type of requests sending from backends to frontends.
+///
+/// These are called "backend-end message types" in the spec, so we call them `BackendReq` here
+/// even though it is somewhat confusing that the `FrontendClient` sends `BackendReq`s to a
+/// `FrontendServer`.
#[repr(u32)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, enumn::N)]
-pub enum SlaveReq {
+pub enum BackendReq {
/// Send IOTLB messages with struct vhost_iotlb_msg as payload.
IOTLB_MSG = 1,
/// Notify that the virtio device's configuration space has changed.
@@ -182,37 +192,39 @@
VRING_CALL = 4,
/// Indicate that an error occurred on the specific vring.
VRING_ERR = 5,
+
+ // Non-standard message types.
/// Indicates a request to map a fd into a shared memory region.
- SHMEM_MAP = 6,
+ SHMEM_MAP = 1000,
/// Indicates a request to unmap part of a shared memory region.
- SHMEM_UNMAP = 7,
+ SHMEM_UNMAP = 1001,
/// Virtio-fs draft: map file content into the window.
- FS_MAP = 8,
+ DEPRECATED__FS_MAP = 1002,
/// Virtio-fs draft: unmap file content from the window.
- FS_UNMAP = 9,
+ DEPRECATED__FS_UNMAP = 1003,
/// Virtio-fs draft: sync file content.
- FS_SYNC = 10,
+ DEPRECATED__FS_SYNC = 1004,
/// Virtio-fs draft: perform a read/write from an fd directly to GPA.
- FS_IO = 11,
+ DEPRECATED__FS_IO = 1005,
/// Indicates a request to map GPU memory into a shared memory region.
- GPU_MAP = 12,
+ GPU_MAP = 1006,
/// Indicates a request to map external memory into a shared memory region.
- EXTERNAL_MAP = 13,
+ EXTERNAL_MAP = 1007,
}
-impl From<SlaveReq> for u32 {
- fn from(req: SlaveReq) -> u32 {
+impl From<BackendReq> for u32 {
+ fn from(req: BackendReq) -> u32 {
req as u32
}
}
-impl Req for SlaveReq {}
+impl Req for BackendReq {}
-impl TryFrom<u32> for SlaveReq {
+impl TryFrom<u32> for BackendReq {
type Error = ReqError;
fn try_from(value: u32) -> Result<Self, Self::Error> {
- SlaveReq::n(value).ok_or(ReqError::InvalidValue(value))
+ BackendReq::n(value).ok_or(ReqError::InvalidValue(value))
}
}
@@ -382,6 +394,8 @@
}
}
+pub const VIRTIO_F_RING_PACKED: u32 = 34;
+
/// Virtio feature flag for the vhost-user protocol features.
pub const VHOST_USER_F_PROTOCOL_FEATURES: u32 = 30;
@@ -401,19 +415,19 @@
const REPLY_ACK = 0x0000_0008;
/// Support setting MTU for virtio-net devices.
const MTU = 0x0000_0010;
- /// Allow the slave to send requests to the master by an optional communication channel.
- const SLAVE_REQ = 0x0000_0020;
- /// Support setting slave endian by SET_VRING_ENDIAN.
+ /// Allow the backend to send requests to the frontend by an optional communication channel.
+ const BACKEND_REQ = 0x0000_0020;
+ /// Support setting backend endian by SET_VRING_ENDIAN.
const CROSS_ENDIAN = 0x0000_0040;
/// Support crypto operations.
const CRYPTO_SESSION = 0x0000_0080;
- /// Support sending userfault_fd from slaves to masters.
+ /// Support sending userfault_fd from backends to frontends.
const PAGEFAULT = 0x0000_0100;
/// Support Virtio device configuration.
const CONFIG = 0x0000_0200;
- /// Allow the slave to send fds (at most 8 descriptors in each message) to the master.
- const SLAVE_SEND_FD = 0x0000_0400;
- /// Allow the slave to register a host notifier.
+ /// Allow the backend to send fds (at most 8 descriptors in each message) to the frontend.
+ const BACKEND_SEND_FD = 0x0000_0400;
+ /// Allow the backend to register a host notifier.
const HOST_NOTIFIER = 0x0000_0800;
/// Support inflight shmfd.
const INFLIGHT_SHMFD = 0x0000_1000;
@@ -713,9 +727,9 @@
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[repr(transparent)]
pub struct VhostUserConfigFlags: u32 {
- /// Vhost master messages used for writeable fields.
+ /// Vhost frontend messages used for writeable fields.
const WRITABLE = 0x1;
- /// Vhost master messages used for live migration.
+ /// Vhost frontend messages used for live migration.
const LIVE_MIGRATION = 0x2;
}
}
@@ -830,67 +844,6 @@
}
*/
-/// Flags for virtio-fs slave messages.
-#[repr(transparent)]
-#[derive(
- AsBytes,
- FromZeroes,
- FromBytes,
- Copy,
- Clone,
- Debug,
- Default,
- Eq,
- Hash,
- Ord,
- PartialEq,
- PartialOrd,
-)]
-pub struct VhostUserFSSlaveMsgFlags(u64);
-
-// Bit mask for flags in virtio-fs slave messages
-bitflags! {
- impl VhostUserFSSlaveMsgFlags: u64 {
- /// Empty permission.
- const EMPTY = 0x0;
- /// Read permission.
- const MAP_R = 0x1;
- /// Write permission.
- const MAP_W = 0x2;
- }
-}
-
-/// Max entries in one virtio-fs slave request.
-pub const VHOST_USER_FS_SLAVE_ENTRIES: usize = 8;
-
-/// Slave request message to update the MMIO window.
-#[repr(packed)]
-#[derive(Default, Copy, Clone, AsBytes, FromZeroes, FromBytes)]
-pub struct VhostUserFSSlaveMsg {
- /// File offset.
- pub fd_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES],
- /// Offset into the DAX window.
- pub cache_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES],
- /// Size of region to map.
- pub len: [u64; VHOST_USER_FS_SLAVE_ENTRIES],
- /// Flags for the mmap operation
- pub flags: [VhostUserFSSlaveMsgFlags; VHOST_USER_FS_SLAVE_ENTRIES],
-}
-
-impl VhostUserMsgValidator for VhostUserFSSlaveMsg {
- fn is_valid(&self) -> bool {
- for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
- if ({ self.flags[i] }.bits() & !VhostUserFSSlaveMsgFlags::all().bits()) != 0
- || self.fd_offset[i].checked_add(self.len[i]).is_none()
- || self.cache_offset[i].checked_add(self.len[i]).is_none()
- {
- return false;
- }
- }
- true
- }
-}
-
/// Flags for SHMEM_MAP messages.
#[repr(transparent)]
#[derive(
@@ -942,7 +895,7 @@
}
}
-/// Slave request message to map a file into a shared memory region.
+/// Backend request message to map a file into a shared memory region.
#[repr(C, packed)]
#[derive(Default, Copy, Clone, AsBytes, FromZeroes, FromBytes)]
pub struct VhostUserShmemMapMsg {
@@ -961,7 +914,7 @@
impl VhostUserMsgValidator for VhostUserShmemMapMsg {
fn is_valid(&self) -> bool {
- (self.flags.bits() & !VhostUserFSSlaveMsgFlags::all().bits() as u8) == 0
+ (self.flags.bits() & !VhostUserShmemMapMsgFlags::all().bits()) == 0
&& self.fd_offset.checked_add(self.len).is_some()
&& self.shm_offset.checked_add(self.len).is_some()
}
@@ -987,7 +940,7 @@
}
}
-/// Slave request message to map GPU memory into a shared memory region.
+/// Backend request message to map GPU memory into a shared memory region.
#[repr(C, packed)]
#[derive(Default, Copy, Clone, AsBytes, FromZeroes, FromBytes)]
pub struct VhostUserGpuMapMsg {
@@ -1038,7 +991,7 @@
}
}
-/// Slave request message to map external memory into a shared memory region.
+/// Backend request message to map external memory into a shared memory region.
#[repr(C, packed)]
#[derive(Default, Copy, Clone, AsBytes, FromZeroes, FromBytes)]
pub struct VhostUserExternalMapMsg {
@@ -1072,7 +1025,7 @@
}
}
-/// Slave request message to unmap part of a shared memory region.
+/// Backend request message to unmap part of a shared memory region.
#[repr(C, packed)]
#[derive(Default, Copy, Clone, FromZeroes, FromBytes, AsBytes)]
pub struct VhostUserShmemUnmapMsg {
@@ -1262,31 +1215,31 @@
use super::*;
#[test]
- fn check_master_request_code() {
- MasterReq::try_from(0).expect_err("invalid value");
- MasterReq::try_from(46).expect_err("invalid value");
- MasterReq::try_from(10000).expect_err("invalid value");
+ fn check_frontend_request_code() {
+ FrontendReq::try_from(0).expect_err("invalid value");
+ FrontendReq::try_from(46).expect_err("invalid value");
+ FrontendReq::try_from(10000).expect_err("invalid value");
- let code = MasterReq::try_from(MasterReq::GET_FEATURES as u32).unwrap();
+ let code = FrontendReq::try_from(FrontendReq::GET_FEATURES as u32).unwrap();
assert_eq!(code, code.clone());
}
#[test]
- fn check_slave_request_code() {
- SlaveReq::try_from(0).expect_err("invalid value");
- SlaveReq::try_from(14).expect_err("invalid value");
- SlaveReq::try_from(10000).expect_err("invalid value");
+ fn check_backend_request_code() {
+ BackendReq::try_from(0).expect_err("invalid value");
+ BackendReq::try_from(14).expect_err("invalid value");
+ BackendReq::try_from(10000).expect_err("invalid value");
- let code = SlaveReq::try_from(SlaveReq::CONFIG_CHANGE_MSG as u32).unwrap();
+ let code = BackendReq::try_from(BackendReq::CONFIG_CHANGE_MSG as u32).unwrap();
assert_eq!(code, code.clone());
}
#[test]
fn msg_header_ops() {
- let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0, 0x100);
- assert_eq!(hdr.get_code(), Ok(MasterReq::GET_FEATURES));
- hdr.set_code(MasterReq::SET_FEATURES);
- assert_eq!(hdr.get_code(), Ok(MasterReq::SET_FEATURES));
+ let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, 0x100);
+ assert_eq!(hdr.get_code(), Ok(FrontendReq::GET_FEATURES));
+ hdr.set_code(FrontendReq::SET_FEATURES);
+ assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_FEATURES));
assert_eq!(hdr.get_version(), 0x1);
@@ -1512,21 +1465,4 @@
msg.flags |= 0x4;
assert!(!msg.is_valid());
}
-
- #[test]
- fn test_vhost_user_fs_slave() {
- let mut fs_slave = VhostUserFSSlaveMsg::default();
-
- assert!(fs_slave.is_valid());
-
- fs_slave.fd_offset[0] = 0xffff_ffff_ffff_ffff;
- fs_slave.len[0] = 0x1;
- assert!(!fs_slave.is_valid());
-
- assert_ne!(
- VhostUserFSSlaveMsgFlags::MAP_R,
- VhostUserFSSlaveMsgFlags::MAP_W
- );
- assert_eq!(VhostUserFSSlaveMsgFlags::EMPTY.bits(), 0);
- }
}
diff --git a/third_party/vmm_vhost/src/slave_proxy.rs b/third_party/vmm_vhost/src/slave_proxy.rs
deleted file mode 100644
index 6ae2933..0000000
--- a/third_party/vmm_vhost/src/slave_proxy.rs
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-use std::mem;
-use std::string::ToString;
-
-use base::AsRawDescriptor;
-use base::RawDescriptor;
-use zerocopy::AsBytes;
-
-use crate::message::*;
-use crate::Connection;
-use crate::Error;
-use crate::HandlerResult;
-use crate::Result;
-use crate::SlaveReq;
-use crate::SystemStream;
-use crate::VhostUserMasterReqHandler;
-
-/// Request proxy to send slave requests to the master through the slave communication channel.
-///
-/// The [Slave] acts as a message proxy to forward slave requests to the master through the
-/// vhost-user slave communication channel. The forwarded messages will be handled by the
-/// [MasterReqHandler] server.
-///
-/// [Slave]: struct.Slave.html
-/// [MasterReqHandler]: struct.MasterReqHandler.html
-pub struct Slave {
- sock: Connection<SlaveReq>,
-
- // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
- reply_ack_negotiated: bool,
-
- // whether the connection has encountered any failure
- error: Option<i32>,
-}
-
-impl Slave {
- /// Constructs a new slave proxy from the given connection.
- pub fn new(ep: Connection<SlaveReq>) -> Self {
- Slave {
- sock: ep,
- reply_ack_negotiated: false,
- error: None,
- }
- }
-
- /// Create a new instance from a `SystemStream` object.
- pub fn from_stream(sock: SystemStream) -> Self {
- Self::new(Connection::from(sock))
- }
-
- fn send_message<T>(
- &mut self,
- request: SlaveReq,
- msg: &T,
- fds: Option<&[RawDescriptor]>,
- ) -> HandlerResult<u64>
- where
- T: AsBytes,
- {
- let len = mem::size_of::<T>();
- let mut hdr = VhostUserMsgHeader::new(request, 0, len as u32);
- if self.reply_ack_negotiated {
- hdr.set_need_reply(true);
- }
- self.sock
- .send_message(&hdr, msg, fds)
- .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
-
- self.wait_for_reply(&hdr)
- .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))
- }
-
- fn wait_for_reply(&mut self, hdr: &VhostUserMsgHeader<SlaveReq>) -> Result<u64> {
- let code = hdr.get_code().map_err(|_| Error::InvalidMessage)?;
- if code != SlaveReq::SHMEM_MAP
- && code != SlaveReq::SHMEM_UNMAP
- && code != SlaveReq::GPU_MAP
- && code != SlaveReq::EXTERNAL_MAP
- && !self.reply_ack_negotiated
- {
- return Ok(0);
- }
-
- let (reply, body, rfds) = self.sock.recv_message::<VhostUserU64>()?;
- if !reply.is_reply_for(hdr) || !rfds.is_empty() || !body.is_valid() {
- return Err(Error::InvalidMessage);
- }
- if body.value != 0 {
- return Err(Error::MasterInternalError);
- }
-
- Ok(body.value)
- }
-
- /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
- ///
- /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated,
- /// the "REPLY_ACK" flag will be set in the message header for every slave to master request
- /// message.
- pub fn set_reply_ack_flag(&mut self, enable: bool) {
- self.reply_ack_negotiated = enable;
- }
-
- /// Mark connection as failed with specified error code.
- pub fn set_failed(&mut self, error: i32) {
- self.error = Some(error);
- }
-}
-
-impl VhostUserMasterReqHandler for Slave {
- /// Handle shared memory region mapping requests.
- fn shmem_map(
- &mut self,
- req: &VhostUserShmemMapMsg,
- fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- self.send_message(SlaveReq::SHMEM_MAP, req, Some(&[fd.as_raw_descriptor()]))
- }
-
- /// Handle shared memory region unmapping requests.
- fn shmem_unmap(&mut self, req: &VhostUserShmemUnmapMsg) -> HandlerResult<u64> {
- self.send_message(SlaveReq::SHMEM_UNMAP, req, None)
- }
-
- /// Handle config change requests.
- fn handle_config_change(&mut self) -> HandlerResult<u64> {
- self.send_message(SlaveReq::CONFIG_CHANGE_MSG, &VhostUserEmptyMessage, None)
- }
-
- /// Forward vhost-user-fs map file requests to the slave.
- fn fs_slave_map(
- &mut self,
- fs: &VhostUserFSSlaveMsg,
- fd: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- self.send_message(SlaveReq::FS_MAP, fs, Some(&[fd.as_raw_descriptor()]))
- }
-
- /// Forward vhost-user-fs unmap file requests to the master.
- fn fs_slave_unmap(&mut self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- self.send_message(SlaveReq::FS_UNMAP, fs, None)
- }
-
- /// Handle GPU shared memory region mapping requests.
- fn gpu_map(
- &mut self,
- req: &VhostUserGpuMapMsg,
- descriptor: &dyn AsRawDescriptor,
- ) -> HandlerResult<u64> {
- self.send_message(
- SlaveReq::GPU_MAP,
- req,
- Some(&[descriptor.as_raw_descriptor()]),
- )
- }
-
- /// Handle external memory region mapping requests.
- fn external_map(&mut self, req: &VhostUserExternalMapMsg) -> HandlerResult<u64> {
- self.send_message(SlaveReq::EXTERNAL_MAP, req, None)
- }
-}
-
-#[cfg(test)]
-mod tests {
-
- use super::*;
- use crate::SystemStream;
-
- #[test]
- fn test_slave_req_set_failed() {
- let (p1, _p2) = SystemStream::pair().unwrap();
- let mut fs_cache = Slave::from_stream(p1);
-
- assert!(fs_cache.error.is_none());
- fs_cache.set_failed(libc::EAGAIN);
- assert_eq!(fs_cache.error, Some(libc::EAGAIN));
- }
-
- #[test]
- fn test_slave_recv_negative() {
- let (p1, p2) = SystemStream::pair().unwrap();
- let mut fs_cache = Slave::from_stream(p1);
- let master = Connection::from(p2);
-
- let len = mem::size_of::<VhostUserU64>();
- let mut hdr = VhostUserMsgHeader::new(
- SlaveReq::FS_MAP,
- VhostUserHeaderFlag::REPLY.bits(),
- len as u32,
- );
- let body = VhostUserU64::new(0);
-
- master
- .send_message(&hdr, &body, Some(&[master.as_raw_descriptor()]))
- .unwrap();
- fs_cache
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap();
-
- fs_cache.set_reply_ack_flag(true);
- fs_cache
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap_err();
-
- hdr.set_code(SlaveReq::FS_UNMAP);
- master.send_message(&hdr, &body, None).unwrap();
- fs_cache
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap_err();
- hdr.set_code(SlaveReq::FS_MAP);
-
- let body = VhostUserU64::new(1);
- master.send_message(&hdr, &body, None).unwrap();
- fs_cache
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap_err();
-
- let body = VhostUserU64::new(0);
- master.send_message(&hdr, &body, None).unwrap();
- fs_cache
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap();
- }
-}
diff --git a/third_party/vmm_vhost/src/sys.rs b/third_party/vmm_vhost/src/sys.rs
index 16f8616..8229c59 100644
--- a/third_party/vmm_vhost/src/sys.rs
+++ b/third_party/vmm_vhost/src/sys.rs
@@ -15,5 +15,13 @@
}
}
+pub use platform::to_system_stream;
pub(crate) use platform::PlatformConnection;
pub use platform::SystemStream;
+
+#[cfg(test)]
+pub(crate) mod tests {
+ pub(crate) use super::platform::tests::create_client_server_pair;
+ pub(crate) use super::platform::tests::create_connection_pair;
+ pub(crate) use super::platform::tests::create_pair;
+}
diff --git a/third_party/vmm_vhost/src/sys/unix.rs b/third_party/vmm_vhost/src/sys/unix.rs
index 18d9708..7475cc8 100644
--- a/third_party/vmm_vhost/src/sys/unix.rs
+++ b/third_party/vmm_vhost/src/sys/unix.rs
@@ -3,8 +3,30 @@
//! Unix specific code that keeps rest of the code in the crate platform independent.
+use std::any::Any;
+use std::fs::File;
+use std::io::ErrorKind;
+use std::io::IoSlice;
+use std::io::IoSliceMut;
+use std::os::fd::OwnedFd;
use std::os::unix::net::UnixListener;
use std::os::unix::net::UnixStream;
+use std::path::Path;
+use std::path::PathBuf;
+
+use base::AsRawDescriptor;
+use base::RawDescriptor;
+use base::SafeDescriptor;
+use base::ScmSocket;
+
+use crate::connection::Listener;
+use crate::frontend_server::FrontendServer;
+use crate::message::FrontendReq;
+use crate::message::MAX_ATTACHED_FD_ENTRIES;
+use crate::Connection;
+use crate::Error;
+use crate::Frontend;
+use crate::Result;
/// Alias to enable platform independent code.
pub type SystemListener = UnixListener;
@@ -12,4 +34,375 @@
/// Alias to enable platform independent code.
pub type SystemStream = UnixStream;
-pub(crate) use crate::connection::socket::SocketPlatformConnection as PlatformConnection;
+pub use SocketPlatformConnection as PlatformConnection;
+
+/// Unix domain socket listener for accepting incoming connections.
+pub struct SocketListener {
+ fd: SystemListener,
+ drop_path: Option<Box<dyn Any>>,
+}
+
+impl SocketListener {
+ /// Create a unix domain socket listener.
+ ///
+ /// # Return:
+ /// * - the new SocketListener object on success.
+ /// * - SocketError: failed to create listener socket.
+ pub fn new<P: AsRef<Path>>(path: P, unlink: bool) -> Result<Self> {
+ if unlink {
+ let _ = std::fs::remove_file(&path);
+ }
+ let fd = SystemListener::bind(&path).map_err(Error::SocketError)?;
+
+ struct DropPath {
+ path: PathBuf,
+ }
+
+ impl Drop for DropPath {
+ fn drop(&mut self) {
+ let _ = std::fs::remove_file(&self.path);
+ }
+ }
+
+ Ok(SocketListener {
+ fd,
+ drop_path: Some(Box::new(DropPath {
+ path: path.as_ref().to_owned(),
+ })),
+ })
+ }
+
+ /// Take and return the resources that the parent process needs to keep alive as long as the
+ /// child process lives, in case of incoming fork.
+ pub fn take_resources_for_parent(&mut self) -> Option<Box<dyn Any>> {
+ self.drop_path.take()
+ }
+}
+
+impl Listener for SocketListener {
+ /// Accept an incoming connection.
+ ///
+ /// # Return:
+ /// * - Some(SystemListener): new SystemListener object if new incoming connection is available.
+ /// * - None: no incoming connection available.
+ /// * - SocketError: errors from accept().
+ fn accept(&mut self) -> Result<Option<Connection<FrontendReq>>> {
+ loop {
+ match self.fd.accept() {
+ Ok((stream, _addr)) => {
+ return Ok(Some(Connection::from(stream)));
+ }
+ Err(e) => {
+ match e.kind() {
+ // No incoming connection available.
+ ErrorKind::WouldBlock => return Ok(None),
+ // New connection closed by peer.
+ ErrorKind::ConnectionAborted => return Ok(None),
+ // Interrupted by signals, retry
+ ErrorKind::Interrupted => continue,
+ _ => return Err(Error::SocketError(e)),
+ }
+ }
+ }
+ }
+ }
+
+ /// Change blocking status on the listener.
+ ///
+ /// # Return:
+ /// * - () on success.
+ /// * - SocketError: failure from set_nonblocking().
+ fn set_nonblocking(&self, block: bool) -> Result<()> {
+ self.fd.set_nonblocking(block).map_err(Error::SocketError)
+ }
+}
+
+impl AsRawDescriptor for SocketListener {
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.fd.as_raw_descriptor()
+ }
+}
+
+/// Unix domain socket based vhost-user connection.
+pub struct SocketPlatformConnection {
+ sock: ScmSocket<SystemStream>,
+}
+
+// TODO: Switch to TryFrom to avoid the unwrap.
+impl From<SystemStream> for SocketPlatformConnection {
+ fn from(sock: SystemStream) -> Self {
+ Self {
+ sock: sock.try_into().unwrap(),
+ }
+ }
+}
+
+// Advance the internal cursor of the slices.
+// This is same with a nightly API `IoSlice::advance_slices` but for `&[u8]`.
+fn advance_slices(bufs: &mut &mut [&[u8]], mut count: usize) {
+ use std::mem::take;
+
+ let mut idx = 0;
+ for b in bufs.iter() {
+ if count < b.len() {
+ break;
+ }
+ count -= b.len();
+ idx += 1;
+ }
+ *bufs = &mut take(bufs)[idx..];
+ if !bufs.is_empty() {
+ bufs[0] = &bufs[0][count..];
+ }
+}
+
+impl SocketPlatformConnection {
+ /// Create a new stream by connecting to server at `str`.
+ ///
+ /// # Return:
+ /// * - the new SocketPlatformConnection object on success.
+ /// * - SocketConnect: failed to connect to peer.
+ pub fn connect<P: AsRef<Path>>(path: P) -> Result<Self> {
+ let sock = SystemStream::connect(path).map_err(Error::SocketConnect)?;
+ Ok(Self::from(sock))
+ }
+
+ /// Sends all bytes from scatter-gather vectors with optional attached file descriptors. Will
+ /// loop until all data has been transfered.
+ ///
+ /// # TODO
+ /// This function takes a slice of `&[u8]` instead of `IoSlice` because the internal
+ /// cursor needs to be moved by `advance_slices()`.
+ /// Once `IoSlice::advance_slices()` becomes stable, this should be updated.
+ /// <https://github.com/rust-lang/rust/issues/62726>.
+ fn send_iovec_all(
+ &self,
+ mut iovs: &mut [&[u8]],
+ mut fds: Option<&[RawDescriptor]>,
+ ) -> Result<()> {
+ // Guarantee that `iovs` becomes empty if it doesn't contain any data.
+ advance_slices(&mut iovs, 0);
+
+ while !iovs.is_empty() {
+ let iovec: Vec<_> = iovs.iter_mut().map(|i| IoSlice::new(i)).collect();
+ match self.sock.send_vectored_with_fds(&iovec, fds.unwrap_or(&[])) {
+ Ok(n) => {
+ fds = None;
+ advance_slices(&mut iovs, n);
+ }
+ Err(e) => match e.kind() {
+ ErrorKind::WouldBlock | ErrorKind::Interrupted => {}
+ _ => return Err(Error::SocketError(e)),
+ },
+ }
+ }
+ Ok(())
+ }
+
+ /// Sends a single message over the socket with optional attached file descriptors.
+ ///
+ /// - `hdr`: vhost message header
+ /// - `body`: vhost message body (may be empty to send a header-only message)
+ /// - `payload`: additional bytes to append to `body` (may be empty)
+ pub fn send_message(
+ &self,
+ hdr: &[u8],
+ body: &[u8],
+ payload: &[u8],
+ fds: Option<&[RawDescriptor]>,
+ ) -> Result<()> {
+ let mut iobufs = [hdr, body, payload];
+ self.send_iovec_all(&mut iobufs, fds)
+ }
+
+ /// Reads bytes from the socket into the given scatter/gather vectors with optional attached
+ /// file.
+ ///
+ /// The underlying communication channel is a Unix domain socket in STREAM mode. It's a little
+ /// tricky to pass file descriptors through such a communication channel. Let's assume that a
+ /// sender sending a message with some file descriptors attached. To successfully receive those
+ /// attached file descriptors, the receiver must obey following rules:
+ /// 1) file descriptors are attached to a message.
+ /// 2) message(packet) boundaries must be respected on the receive side.
+ /// In other words, recvmsg() operations must not cross the packet boundary, otherwise the
+ /// attached file descriptors will get lost.
+ /// Note that this function wraps received file descriptors as `File`.
+ ///
+ /// # Return:
+ /// * - (number of bytes received, [received files]) on success
+ /// * - Disconnect: the connection is closed.
+ /// * - SocketRetry: temporary error caused by signals or short of resources.
+ /// * - SocketBroken: the underline socket is broken.
+ /// * - SocketError: other socket related errors.
+ pub fn recv_into_bufs(
+ &self,
+ bufs: &mut [IoSliceMut],
+ allow_fd: bool,
+ ) -> Result<(usize, Option<Vec<File>>)> {
+ let max_fds = if allow_fd { MAX_ATTACHED_FD_ENTRIES } else { 0 };
+ let (bytes, fds) = self.sock.recv_vectored_with_fds(bufs, max_fds)?;
+
+ // 0-bytes indicates that the connection is closed.
+ if bytes == 0 {
+ return Err(Error::Disconnect);
+ }
+
+ let files = if fds.is_empty() {
+ None
+ } else {
+ Some(fds.into_iter().map(File::from).collect())
+ };
+
+ Ok((bytes, files))
+ }
+}
+
+impl AsRawDescriptor for SocketPlatformConnection {
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.sock.as_raw_descriptor()
+ }
+}
+
+impl AsMut<SystemStream> for SocketPlatformConnection {
+ fn as_mut(&mut self) -> &mut SystemStream {
+ self.sock.inner_mut()
+ }
+}
+
+/// Convert a `SafeDescriptor` to a `UnixStream`.
+///
+/// # Safety
+///
+/// `file` must represent a unix domain socket.
+pub unsafe fn to_system_stream(fd: SafeDescriptor) -> Result<SystemStream> {
+ Ok(fd.into())
+}
+
+impl<S: Frontend> AsRawDescriptor for FrontendServer<S> {
+ /// Used for polling.
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.sub_sock.as_raw_descriptor()
+ }
+}
+
+impl<S: Frontend> FrontendServer<S> {
+ /// Create a `FrontendServer` that uses a Unix stream internally.
+ ///
+ /// The returned `SafeDescriptor` is the client side of the stream and should be sent to the
+ /// backend using [BackendClient::set_slave_request_fd()].
+ ///
+ /// [BackendClient::set_slave_request_fd()]: struct.BackendClient.html#method.set_slave_request_fd
+ pub fn with_stream(backend: S) -> Result<(Self, SafeDescriptor)> {
+ let (tx, rx) = SystemStream::pair()?;
+ Ok((
+ Self::new(backend, rx)?,
+ SafeDescriptor::from(OwnedFd::from(tx)),
+ ))
+ }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+ use tempfile::Builder;
+ use tempfile::TempDir;
+
+ use super::*;
+ use crate::backend_client::BackendClient;
+ use crate::backend_server::Backend;
+ use crate::backend_server::BackendServer;
+ use crate::connection::Listener;
+ use crate::message::FrontendReq;
+ use crate::Connection;
+
+ pub(crate) fn temp_dir() -> TempDir {
+ Builder::new().prefix("/tmp/vhost_test").tempdir().unwrap()
+ }
+
+ pub(crate) fn create_pair() -> (BackendClient, Connection<FrontendReq>) {
+ let dir = temp_dir();
+ let mut path = dir.path().to_owned();
+ path.push("sock");
+ let mut listener = SocketListener::new(&path, true).unwrap();
+ listener.set_nonblocking(true).unwrap();
+ let backend_client = BackendClient::connect(path).unwrap();
+ let server_connection = listener.accept().unwrap().unwrap();
+ (backend_client, server_connection)
+ }
+
+ pub(crate) fn create_connection_pair() -> (Connection<FrontendReq>, Connection<FrontendReq>) {
+ let dir = temp_dir();
+ let mut path = dir.path().to_owned();
+ path.push("sock");
+ let mut listener = SocketListener::new(&path, true).unwrap();
+ listener.set_nonblocking(true).unwrap();
+ let client_connection = Connection::<FrontendReq>::connect(path).unwrap();
+ let server_connection = listener.accept().unwrap().unwrap();
+ (client_connection, server_connection)
+ }
+
+ pub(crate) fn create_client_server_pair<S>(backend: S) -> (BackendClient, BackendServer<S>)
+ where
+ S: Backend,
+ {
+ let dir = Builder::new().prefix("/tmp/vhost_test").tempdir().unwrap();
+ let mut path = dir.path().to_owned();
+ path.push("sock");
+ let mut listener = SocketListener::new(&path, true).unwrap();
+ let backend_client = BackendClient::connect(&path).unwrap();
+ let connection = listener.accept().unwrap().unwrap();
+ let req_handler = BackendServer::new(connection, backend);
+ (backend_client, req_handler)
+ }
+
+ #[test]
+ fn create_listener() {
+ let dir = temp_dir();
+ let mut path = dir.path().to_owned();
+ path.push("sock");
+ let listener = SocketListener::new(&path, true).unwrap();
+
+ assert!(listener.as_raw_descriptor() > 0);
+ }
+
+ #[test]
+ fn accept_connection() {
+ let dir = temp_dir();
+ let mut path = dir.path().to_owned();
+ path.push("sock");
+ let mut listener = SocketListener::new(&path, true).unwrap();
+ listener.set_nonblocking(true).unwrap();
+
+ // accept on a fd without incoming connection
+ let conn = listener.accept().unwrap();
+ assert!(conn.is_none());
+ }
+
+ #[test]
+ fn test_create_failure() {
+ let dir = temp_dir();
+ let mut path = dir.path().to_owned();
+ path.push("sock");
+ let _ = SocketListener::new(&path, true).unwrap();
+ let _ = SocketListener::new(&path, false).is_err();
+ assert!(BackendClient::connect(&path).is_err());
+
+ let mut listener = SocketListener::new(&path, true).unwrap();
+ assert!(SocketListener::new(&path, false).is_err());
+ listener.set_nonblocking(true).unwrap();
+
+ let _backend_client = BackendClient::connect(&path).unwrap();
+ let _server_connection = listener.accept().unwrap().unwrap();
+ }
+
+ #[test]
+ fn test_advance_slices() {
+ // Test case from https://doc.rust-lang.org/std/io/struct.IoSlice.html#method.advance_slices
+ let buf1 = [1; 8];
+ let buf2 = [2; 16];
+ let buf3 = [3; 8];
+ let mut bufs = &mut [&buf1[..], &buf2[..], &buf3[..]][..];
+ advance_slices(&mut bufs, 10);
+ assert_eq!(bufs[0], [2; 14].as_ref());
+ assert_eq!(bufs[1], [3; 8].as_ref());
+ }
+}
diff --git a/third_party/vmm_vhost/src/sys/windows.rs b/third_party/vmm_vhost/src/sys/windows.rs
index 3408fba..801dfb0 100644
--- a/third_party/vmm_vhost/src/sys/windows.rs
+++ b/third_party/vmm_vhost/src/sys/windows.rs
@@ -3,9 +3,255 @@
//! Windows specific code that keeps rest of the code in the crate platform independent.
+use std::cmp::min;
+use std::fs::File;
+use std::io::IoSliceMut;
+use std::path::Path;
+use std::ptr::copy_nonoverlapping;
+
+use base::AsRawDescriptor;
+use base::CloseNotifier;
+use base::FromRawDescriptor;
+use base::RawDescriptor;
+use base::ReadNotifier;
+use base::SafeDescriptor;
use base::Tube;
+use serde::Deserialize;
+use serde::Serialize;
+use tube_transporter::packed_tube;
+
+use crate::Error;
+use crate::Frontend;
+use crate::FrontendServer;
+use crate::Result;
/// Alias to enable platform independent code.
pub type SystemStream = Tube;
-pub(crate) use crate::connection::TubePlatformConnection as PlatformConnection;
+pub use TubePlatformConnection as PlatformConnection;
+
+#[derive(Serialize, Deserialize)]
+struct RawDescriptorContainer {
+ #[serde(with = "base::with_raw_descriptor")]
+ rd: RawDescriptor,
+}
+
+#[derive(Serialize, Deserialize)]
+struct Message {
+ rds: Vec<RawDescriptorContainer>,
+ data: Vec<u8>,
+}
+
+/// Tube based vhost-user connection.
+pub struct TubePlatformConnection {
+ tube: Tube,
+}
+
+impl TubePlatformConnection {
+ pub(crate) fn get_tube(&self) -> &Tube {
+ &self.tube
+ }
+}
+
+impl From<Tube> for TubePlatformConnection {
+ fn from(tube: Tube) -> Self {
+ Self { tube }
+ }
+}
+
+impl TubePlatformConnection {
+ pub fn connect<P: AsRef<Path>>(_path: P) -> Result<Self> {
+ unimplemented!("connections not supported on Tubes")
+ }
+
+ /// Sends a single message over the socket with optional attached file descriptors.
+ ///
+ /// - `hdr`: vhost message header
+ /// - `body`: vhost message body (may be empty to send a header-only message)
+ /// - `payload`: additional bytes to append to `body` (may be empty)
+ pub fn send_message(
+ &self,
+ hdr: &[u8],
+ body: &[u8],
+ payload: &[u8],
+ rds: Option<&[RawDescriptor]>,
+ ) -> Result<()> {
+ let hdr_msg = Message {
+ rds: rds
+ .unwrap_or(&[])
+ .iter()
+ .map(|rd| RawDescriptorContainer { rd: *rd })
+ .collect(),
+ data: hdr.to_vec(),
+ };
+
+ let mut body_data = Vec::with_capacity(body.len() + payload.len());
+ body_data.extend_from_slice(body);
+ body_data.extend_from_slice(payload);
+ let body_msg = Message {
+ rds: Vec::new(),
+ data: body_data,
+ };
+
+ // We send the header and the body separately here. This is necessary on Windows. Otherwise
+ // the recv side cannot read the header independently (the transport is message oriented).
+ self.tube.send(&hdr_msg)?;
+ if !body_msg.data.is_empty() {
+ self.tube.send(&body_msg)?;
+ }
+
+ Ok(())
+ }
+
+ /// Reads bytes from the tube into the given scatter/gather vectors with optional attached
+ /// file.
+ ///
+ /// The underlying communication channel is a Tube. Providing too little recv buffer space will
+ /// cause data to get dropped (with an error). This is tricky to fix with Tube backing our
+ /// transport layer, and as far as we can tell, is not exercised in practice.
+ ///
+ /// # Return:
+ /// * - (number of bytes received, [received files]) on success
+ /// * - RecvBufferTooSmall: Input bufs is too small for the received buffer.
+ /// * - TubeError: tube related errors.
+ pub fn recv_into_bufs(
+ &self,
+ bufs: &mut [IoSliceMut],
+ _allow_rds: bool,
+ ) -> Result<(usize, Option<Vec<File>>)> {
+ // TODO(b/221882601): implement "allow_rds"
+
+ let msg: Message = self.tube.recv()?;
+
+ let files = match msg.rds.len() {
+ 0 => None,
+ _ => Some(
+ msg.rds
+ .iter()
+ .map(|r|
+ // SAFETY:
+ // Safe because we own r.rd and it is guaranteed valid.
+ unsafe { File::from_raw_descriptor(r.rd) })
+ .collect::<Vec<File>>(),
+ ),
+ };
+
+ let mut bytes_read = 0;
+ for dest_iov in bufs.iter_mut() {
+ if bytes_read >= msg.data.len() {
+ // We've read all the available data into the iovecs.
+ break;
+ }
+
+ let copy_count = min(dest_iov.len(), msg.data.len() - bytes_read);
+
+ // SAFETY:
+ // Safe because:
+ // 1) msg.data and dest_iov do not overlap.
+ // 2) copy_count is bounded by dest_iov's length and msg.data.len() so we can't
+ // overrun.
+ unsafe {
+ copy_nonoverlapping(
+ msg.data.as_ptr().add(bytes_read),
+ dest_iov.as_mut_ptr(),
+ copy_count,
+ )
+ };
+ bytes_read += copy_count;
+ }
+
+ if bytes_read != msg.data.len() {
+ // User didn't supply enough iov space.
+ return Err(Error::RecvBufferTooSmall {
+ got: bytes_read,
+ want: msg.data.len(),
+ });
+ }
+
+ Ok((bytes_read, files))
+ }
+}
+
+/// Convert a`SafeDescriptor` to a `Tube`.
+///
+/// # Safety
+///
+/// `fd` must represent a packed tube.
+pub unsafe fn to_system_stream(fd: SafeDescriptor) -> Result<SystemStream> {
+ // SAFETY: Safe because the file represents a packed tube.
+ let tube = unsafe { packed_tube::unpack(fd).expect("unpacked Tube") };
+ Ok(tube)
+}
+
+impl AsRawDescriptor for TubePlatformConnection {
+ /// WARNING: this function does not return a waitable descriptor! Use base::ReadNotifier
+ /// instead.
+ fn as_raw_descriptor(&self) -> RawDescriptor {
+ self.tube.as_raw_descriptor()
+ }
+}
+
+impl<S: Frontend> FrontendServer<S> {
+ /// Create a `FrontendServer` that uses a Tube internally. Must specify the backend process
+ /// which will receive the Tube.
+ ///
+ /// The returned `SafeDescriptor` is the client side of the tube and should be sent to the
+ /// backend using [BackendClient::set_slave_request_fd()].
+ ///
+ /// [BackendClient::set_slave_request_fd()]: struct.BackendClient.html#method.set_slave_request_fd
+ pub fn with_tube(backend: S, backend_pid: u32) -> Result<(Self, SafeDescriptor)> {
+ let (tx, rx) = SystemStream::pair()?;
+ // SAFETY:
+ // Safe because we expect the tube to be unpacked in the other process.
+ let tx = unsafe { packed_tube::pack(tx, backend_pid).expect("packed tube") };
+ Ok((Self::new(backend, rx)?, tx))
+ }
+}
+
+impl<S: Frontend> ReadNotifier for FrontendServer<S> {
+ /// Used for polling.
+ fn get_read_notifier(&self) -> &dyn AsRawDescriptor {
+ self.sub_sock.0.get_tube().get_read_notifier()
+ }
+}
+
+impl<S: Frontend> CloseNotifier for FrontendServer<S> {
+ /// Used for closing.
+ fn get_close_notifier(&self) -> &dyn AsRawDescriptor {
+ self.sub_sock.0.get_tube().get_close_notifier()
+ }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+ use crate::backend_client::BackendClient;
+ use crate::backend_server::Backend;
+ use crate::backend_server::BackendServer;
+ use crate::message::FrontendReq;
+ use crate::Connection;
+ use crate::SystemStream;
+
+ pub(crate) fn create_pair() -> (BackendClient, Connection<FrontendReq>) {
+ let (client_tube, server_tube) = SystemStream::pair().unwrap();
+ let backend_client = BackendClient::from_stream(client_tube);
+ (backend_client, Connection::from(server_tube))
+ }
+
+ pub(crate) fn create_connection_pair() -> (Connection<FrontendReq>, Connection<FrontendReq>) {
+ let (client_tube, server_tube) = SystemStream::pair().unwrap();
+ let backend_connection = Connection::<FrontendReq>::from(client_tube);
+ (backend_connection, Connection::from(server_tube))
+ }
+
+ pub(crate) fn create_client_server_pair<S>(backend: S) -> (BackendClient, BackendServer<S>)
+ where
+ S: Backend,
+ {
+ let (client_tube, server_tube) = SystemStream::pair().unwrap();
+ let backend_client = BackendClient::from_stream(client_tube);
+ (
+ backend_client,
+ BackendServer::<S>::from_stream(server_tube, backend),
+ )
+ }
+}
diff --git a/third_party/vmm_vhost/src/dummy_slave.rs b/third_party/vmm_vhost/src/test_backend.rs
similarity index 93%
rename from third_party/vmm_vhost/src/dummy_slave.rs
rename to third_party/vmm_vhost/src/test_backend.rs
index 789ebdb..5a63726 100644
--- a/third_party/vmm_vhost/src/dummy_slave.rs
+++ b/third_party/vmm_vhost/src/test_backend.rs
@@ -4,9 +4,9 @@
use std::fs::File;
use crate::message::*;
+use crate::Backend;
use crate::Error;
use crate::Result;
-use crate::VhostUserSlaveReqHandler;
pub const MAX_QUEUE_NUM: usize = 2;
pub const MAX_VRING_NUM: usize = 256;
@@ -14,7 +14,7 @@
pub const VIRTIO_FEATURES: u64 = 0x40000003;
#[derive(Default)]
-pub struct DummySlaveReqHandler {
+pub struct TestBackend {
pub owned: bool,
pub features_acked: bool,
pub acked_features: u64,
@@ -30,16 +30,16 @@
pub inflight_file: Option<File>,
}
-impl DummySlaveReqHandler {
+impl TestBackend {
pub fn new() -> Self {
- DummySlaveReqHandler {
+ TestBackend {
queue_num: MAX_QUEUE_NUM,
..Default::default()
}
}
}
-impl VhostUserSlaveReqHandler for DummySlaveReqHandler {
+impl Backend for TestBackend {
fn set_owner(&mut self) -> Result<()> {
if self.owned {
return Err(Error::InvalidOperation);
@@ -174,10 +174,10 @@
}
fn set_protocol_features(&mut self, features: u64) -> Result<()> {
- // Note: slave that reported VHOST_USER_F_PROTOCOL_FEATURES must
+ // Note: Backend that reported VHOST_USER_F_PROTOCOL_FEATURES must
// support this message even before VHOST_USER_SET_FEATURES was
// called.
- // What happens if the master calls set_features() with
+ // What happens if the frontend calls set_features() with
// VHOST_USER_F_PROTOCOL_FEATURES cleared after calling this
// interface?
self.acked_protocol_features = features;
@@ -196,11 +196,6 @@
} else if index as usize >= self.queue_num || index as usize > self.queue_num {
return Err(Error::InvalidParam);
}
-
- // Slave must not pass data to/from the backend until ring is
- // enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1,
- // or after it has been disabled by VHOST_USER_SET_VRING_ENABLE
- // with parameter 0.
self.vring_enabled[index as usize] = enable;
Ok(())
}
diff --git a/tools/chromeos/merge_bot b/tools/chromeos/merge_bot
index 1e4a9ae..08268a5 100755
--- a/tools/chromeos/merge_bot
+++ b/tools/chromeos/merge_bot
@@ -212,6 +212,11 @@
if not commits:
print("Nothing to merge.")
return (0, False)
+ else:
+ commit_authors = git_log(f"HEAD..{revision}", "--pretty=%an").lines()
+ if all(map(lambda x: x == "recipe-roller", commit_authors)):
+ print("All commits are from recipe roller, don't merge yet")
+ return (0, False)
# Create a merge commit for each batch
batches = list(batched(commits, max_size)) if max_size > 0 else [commits]
diff --git a/tools/impl/cros_container/Dockerfile b/tools/impl/cros_container/Dockerfile
index 9bf4de6..4ba7d10 100644
--- a/tools/impl/cros_container/Dockerfile
+++ b/tools/impl/cros_container/Dockerfile
@@ -38,4 +38,4 @@
RUN --security=insecure cros_sdk --create && rm /home/crosvmdev/chromiumos/.cache/sdks/*
RUN --security=insecure cros_sdk setup_board --board=${BOARD}
-RUN --security=insecure cros_sdk emerge-${BOARD} --update --deep -j$(nproc) crosvm
+RUN --security=insecure cros_sdk emerge-${BOARD} --update --deep -j$(nproc) chromeos-base/crosvm
diff --git a/tube_transporter/Cargo.toml b/tube_transporter/Cargo.toml
index e69b8a7..7759b32 100644
--- a/tube_transporter/Cargo.toml
+++ b/tube_transporter/Cargo.toml
@@ -7,7 +7,6 @@
[dependencies]
base = { path = "../base" }
-data_model = { path = "../common/data_model" }
rand = "0.8"
thiserror = "1.0.20"
serde = { version = "1", features = [ "derive" ] }
diff --git a/usb_sys/Android.bp b/usb_sys/Android.bp
index d5d60f8..f78cbf7 100644
--- a/usb_sys/Android.bp
+++ b/usb_sys/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/usb_util/Android.bp b/usb_util/Android.bp
index c397910..dcd10ce 100644
--- a/usb_util/Android.bp
+++ b/usb_util/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/vendor/generic/anti_tamper/Android.bp b/vendor/generic/anti_tamper/Android.bp
index 9065f3a..b13eaa1 100644
--- a/vendor/generic/anti_tamper/Android.bp
+++ b/vendor/generic/anti_tamper/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/vendor/generic/broker_ipc/Android.bp b/vendor/generic/broker_ipc/Android.bp
index 13fce80..bec0ceb 100644
--- a/vendor/generic/broker_ipc/Android.bp
+++ b/vendor/generic/broker_ipc/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/vendor/generic/crash_report/Android.bp b/vendor/generic/crash_report/Android.bp
index 00a636f..23becbc 100644
--- a/vendor/generic/crash_report/Android.bp
+++ b/vendor/generic/crash_report/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/vendor/generic/crypto/Android.bp b/vendor/generic/crypto/Android.bp
index 4df25f4..15db079 100644
--- a/vendor/generic/crypto/Android.bp
+++ b/vendor/generic/crypto/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -12,10 +13,10 @@
}
rust_library {
- name: "libcrypto_product",
+ name: "libcrypto_generic",
defaults: ["crosvm_inner_defaults"],
host_supported: true,
- crate_name: "crypto_product",
+ crate_name: "crypto_generic",
cargo_env_compat: true,
cargo_pkg_version: "0.1.0",
srcs: ["src/lib.rs"],
diff --git a/vendor/generic/crypto/Cargo.toml b/vendor/generic/crypto/Cargo.toml
index 42bc5ad..db728c1 100644
--- a/vendor/generic/crypto/Cargo.toml
+++ b/vendor/generic/crypto/Cargo.toml
@@ -1,5 +1,5 @@
[package]
-name = "crypto_product"
+name = "crypto_generic"
version = "0.1.0"
authors = ["The ChromiumOS Authors"]
edition = "2021"
diff --git a/vendor/generic/crypto/src/always_panic_impl.rs b/vendor/generic/crypto/src/always_panic_impl.rs
index e4e7c05..9752a75 100644
--- a/vendor/generic/crypto/src/always_panic_impl.rs
+++ b/vendor/generic/crypto/src/always_panic_impl.rs
@@ -20,13 +20,13 @@
impl<T: Write> CryptWriter<T> {
/// Creates a new writer using an internally randomly generated key.
- fn new(_inner_writable: T, _chunk_size_bytes: usize) -> anyhow::Result<Box<Self>> {
+ pub fn new(_inner_writable: T, _chunk_size_bytes: usize) -> anyhow::Result<Box<Self>> {
panic!("no crypto support was compiled in this build");
}
/// Creates a new writer using the provided key and encrypted chunk size. Generally, larger
/// chunks are more performant but have buffering cost of O(chunk_size).
- fn new_from_key(
+ pub fn new_from_key(
_inner_writable: T,
_chunk_size_bytes: usize,
_key: &CryptKey,
@@ -56,12 +56,12 @@
{
/// Given a newly opened file previously written by a `CryptWriter`, extracts the encryption key
/// used to write the file.
- fn extract_key(_inner_readable: T) -> anyhow::Result<CryptKey> {
+ pub fn extract_key(_inner_readable: T) -> anyhow::Result<CryptKey> {
panic!("no crypto support was compiled in this build");
}
/// Creates a CryptReader over a file given a key.
- fn from_file_and_key(_inner_readable: T, _key: &CryptKey) -> anyhow::Result<Box<Self>> {
+ pub fn from_file_and_key(_inner_readable: T, _key: &CryptKey) -> anyhow::Result<Box<Self>> {
panic!("no crypto support was compiled in this build");
}
}
diff --git a/vendor/generic/crypto/src/lib.rs b/vendor/generic/crypto/src/lib.rs
index 0ce5035..2a2efe1 100644
--- a/vendor/generic/crypto/src/lib.rs
+++ b/vendor/generic/crypto/src/lib.rs
@@ -21,8 +21,6 @@
///
/// Note: there may be multiple copies of this trait because we want to restrict the internals
/// to access only within this crate.
-///
-/// WARNING: Under no circumstances should Display or Debug be implemented for this type.
#[derive(Clone, Default, Serialize, Deserialize)]
#[repr(transparent)]
pub struct CryptKey {
@@ -44,7 +42,7 @@
}
impl Debug for SecureByteVec {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- f.write_str("Debug: SecureByteVec")
+ f.write_str("debug: SecureByteVec")
}
}
diff --git a/vendor/generic/metrics/Android.bp b/vendor/generic/metrics/Android.bp
index e208771..42a6702 100644
--- a/vendor/generic/metrics/Android.bp
+++ b/vendor/generic/metrics/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -24,8 +25,6 @@
rustlibs: [
"libanyhow",
"libbase_rust",
- "libcfg_if",
- "libprotobuf",
- "libserde",
+ "libmetrics_events",
],
}
diff --git a/vendor/generic/metrics/Cargo.toml b/vendor/generic/metrics/Cargo.toml
index b7dd68d..b5f57f8 100644
--- a/vendor/generic/metrics/Cargo.toml
+++ b/vendor/generic/metrics/Cargo.toml
@@ -12,12 +12,4 @@
[dependencies]
anyhow = "*"
base = { path = "../../../base" }
-cfg-if = "*"
-protobuf = "3.2"
-serde = { version = "1", features = ["derive"] }
-
-[target.'cfg(windows)'.dependencies]
-win_util = { path = "../../../win_util" }
-
-[build-dependencies]
-proto_build_tools = { path = "../../../proto_build_tools" }
+metrics_events = { path = "../../../metrics_events" }
diff --git a/vendor/generic/metrics/build.rs b/vendor/generic/metrics/build.rs
deleted file mode 100644
index bf1fe61..0000000
--- a/vendor/generic/metrics/build.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2022 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use std::env;
-use std::path::PathBuf;
-
-fn main() {
- let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
-
- #[cfg(debug_assertions)]
- println!(
- "cargo:rustc-link-search={}\\..\\..\\..\\libs\\debug\\",
- manifest_dir
- );
- #[cfg(not(debug_assertions))]
- println!(
- "cargo:rustc-link-search={}\\..\\..\\..\\libs\\release\\",
- manifest_dir
- );
-
- build_protos(&PathBuf::from(manifest_dir));
-}
-
-fn build_protos(manifest_dir: &PathBuf) {
- let mut event_details_path = manifest_dir.to_owned();
- event_details_path.extend(["protos", "event_details.proto"]);
-
- let mut out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR env does not exist."));
- // ANDROID: b/259142784 - we remove metrics_out subdir b/c cargo2android
- // out_dir.push("metrics_protos");
- proto_build_tools::build_protos(&out_dir, &[event_details_path]);
-}
diff --git a/vendor/generic/metrics/protos/event_details.proto b/vendor/generic/metrics/protos/event_details.proto
deleted file mode 100644
index 60e46f4..0000000
--- a/vendor/generic/metrics/protos/event_details.proto
+++ /dev/null
@@ -1,105 +0,0 @@
-// Provides data structures for additional details for metrics events.
-syntax = "proto2";
-
-message RecordDetails {
- reserved 1 to 11, 14 to 18;
- // Additional details about an unexpected exit of a child process within
- // the emulator.
- optional EmulatorChildProcessExitDetails emulator_child_process_exit_details =
- 12;
- // Additional details about wave formats from the Window's host system.
- optional WaveFormatDetails wave_format_details = 13;
- optional EmulatorDllDetails emulator_dll_details = 19;
-}
-
-message WaveFormatDetails {
- // Format requested by WASAPI `GetMixFormat` system call.
- optional WaveFormat requested = 1;
- // Originally the requested wave format that's modified by the emulator. Only
- // populated if the emulator decides the requested wave format should not be
- // used.
- optional WaveFormat modified = 2;
- // Format that is valid and closest matching to the modified format, if the
- // modified was rejected. Should only be populated if modified is also
- // non-null and was rejected by WASAPI `IsFormatSupported` system call.
- optional WaveFormat closest_matched = 3;
-}
-
-// Defines the format of waveformat audio data. This information is used by
-// WASAPI to determine how to process the audio playback data coming from the
-// emulator.
-//
-// The fields in the structure come from WAVEFORMATEXTENSIBLE of win32 api.
-// https://docs.microsoft.com/en-us/windows/win32/api/mmreg/ns-mmreg-waveformatextensible
-message WaveFormat {
- // Ex. 65534 (Maps to WAVE_FORMAT_EXTENSIBLE)
- optional int32 format_tag = 1;
- // Number of channels.
- optional int32 channels = 2;
- // Sample rate in Hz. Ex: 48000
- optional int32 samples_per_sec = 3;
- // Required average data-transfer rate for the format tag. Usually this will
- // be samples_per_sec * block_align, since the format tag is usually
- // WAVE_FORMAT_IEEE_FLOAT or it's extensible and SubFormat is
- // KSDATAFORMAT_SUBTYPE_IEEE_FLOAT.
- optional int32 avg_bytes_per_sec = 4;
- // Minimum atomic unit of data based on the format_tag. Usually this will
- // just be bits_per_samples * channels.
- optional int32 block_align = 5;
- // Bits used per sample. Must be a multiple of 8.
- optional int32 bits_per_sample = 6;
- // Size in bytes of extra information appended to WAVEFORMATEX struct.
- optional int32 size_bytes = 7;
-
- // The next fields are part of the WAVEFORMATEXTENSIBLE struct. They will only
- // be non-null if format_tag is WAVE_FORMAT_EXTENSIBLE.
-
- // Bit depth. Can be any value. Ex. bits_per_sample is 24,
- // but samples is 20. Note: This value is a union, so it could mean something
- // slightly different, but most likely won't. Refer to doc for more info.
- optional int32 samples = 8;
- // Bitmask mapping channels in stream to speaker positions.
- // Ex. 3 ( SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT )
- optional int64 channel_mask = 9;
- // Similar to format_tag, but for WAVEFORMATEXTENSIBLE structs.
- optional WaveFormatSubFormat sub_format = 10;
-
- // Subformat GUID mapping:
- // https://github.com/retep998/winapi-rs/blob/2f76bdea3a79817ccfab496fbd1786d5a697387b/src/shared/ksmedia.rs
- enum WaveFormatSubFormat {
- KSDATAFORMAT_SUBTYPE_INVALID = 0;
- KSDATAFORMAT_SUBTYPE_ANALOG = 1;
- KSDATAFORMAT_SUBTYPE_PCM = 2;
- KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = 3;
- KSDATAFORMAT_SUBTYPE_DRM = 4;
- KSDATAFORMAT_SUBTYPE_ALAW = 5;
- KSDATAFORMAT_SUBTYPE_MULAW = 6;
- KSDATAFORMAT_SUBTYPE_ADPCM = 7;
- KSDATAFORMAT_SUBTYPE_MPEG = 8;
- }
-}
-
-enum EmulatorProcessType {
- PROCESS_TYPE_UNKNOWN = 0;
- PROCESS_TYPE_MAIN = 1;
- PROCESS_TYPE_BLOCK = 2;
- PROCESS_TYPE_METRICS = 3;
- PROCESS_TYPE_NET = 4;
- PROCESS_TYPE_SLIRP = 5;
- PROCESS_TYPE_GPU = 6;
- PROCESS_TYPE_SOUND = 7;
- PROCESS_TYPE_BROKER = 8;
- PROCESS_TYPE_SPU = 9;
-}
-
-message EmulatorChildProcessExitDetails {
- // The Windows exit code of the child process
- optional uint32 exit_code = 1;
- // The process identifier, as defined by the ProcessType enum in the
- // emulator code.
- optional EmulatorProcessType process_type = 2;
-}
-
-message EmulatorDllDetails {
- optional string dll_base_name = 1;
-}
diff --git a/vendor/generic/metrics/src/client.rs b/vendor/generic/metrics/src/client.rs
index 380cca5..f7e691a 100644
--- a/vendor/generic/metrics/src/client.rs
+++ b/vendor/generic/metrics/src/client.rs
@@ -2,17 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-use base::Tube;
+use base::RawDescriptor;
+use base::SendTube;
+use metrics_events::MetricEventType;
+use metrics_events::RecordDetails;
-use crate::protos::event_details::RecordDetails;
-use crate::MetricEventType;
use crate::MetricsClientDestructor;
/// This interface exists to be used and re-implemented by downstream forks. Updates shouldn't be
/// done without ensuring they won't cause breakages in dependent codebases.
-pub fn initialize(_: Tube) {}
+pub fn initialize(_: SendTube) {}
#[cfg(test)]
-pub fn force_initialize(_: Tube) {}
+pub fn force_initialize(_: SendTube) {}
+
+pub fn push_descriptors(_: &mut Vec<RawDescriptor>) {}
+
pub fn get_destructor() -> MetricsClientDestructor {
MetricsClientDestructor::new(|| {})
}
diff --git a/vendor/generic/metrics/src/event_types.rs b/vendor/generic/metrics/src/event_types.rs
deleted file mode 100644
index 64ee5ed..0000000
--- a/vendor/generic/metrics/src/event_types.rs
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2022 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use std::convert::From;
-use std::convert::TryFrom;
-
-use anyhow::Error;
-use serde::Deserialize;
-use serde::Serialize;
-
-// TODO(mikehoyle): Create a way to generate these directly from the
-// proto for a single source-of-truth.
-#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
-pub enum MetricEventType {
- CpuUsage,
- MemoryUsage,
- Fps,
- JankyFps,
- NetworkTxRate,
- NetworkRxRate,
- Interrupts,
- FrameTime,
- EmulatorGraphicsFreeze,
- EmulatorGraphicsUnfreeze,
- EmulatorGfxstreamVkAbortReason,
- ChildProcessExit,
- ReadIo,
- WriteIo,
- AudioFormatRequestOk,
- AudioFormatModifiedOk,
- AudioFormatFailed,
- TscCoresOutOfSync,
- NetworkTxRateSummarized,
- NetworkRxRateSummarized,
- DllLoaded,
- GraphicsHangRenderThread,
- GraphicsHangSyncThread,
- AudioNoopStreamForced,
- AudioPlaybackError,
- Other(i64),
-}
-
-impl From<MetricEventType> for i64 {
- fn from(event_code: MetricEventType) -> Self {
- match event_code {
- MetricEventType::CpuUsage => 10001,
- MetricEventType::MemoryUsage => 10002,
- MetricEventType::Fps => 10003,
- MetricEventType::JankyFps => 10004,
- MetricEventType::NetworkTxRate => 10005,
- MetricEventType::NetworkRxRate => 10006,
- MetricEventType::Interrupts => 10007,
- MetricEventType::FrameTime => 10008,
- MetricEventType::EmulatorGraphicsFreeze => 10009,
- MetricEventType::EmulatorGraphicsUnfreeze => 10010,
- MetricEventType::EmulatorGfxstreamVkAbortReason => 10011,
- MetricEventType::ChildProcessExit => 10012,
- MetricEventType::ReadIo => 10013,
- MetricEventType::WriteIo => 10014,
- MetricEventType::AudioFormatRequestOk => 10015,
- MetricEventType::AudioFormatModifiedOk => 10016,
- MetricEventType::AudioFormatFailed => 10017,
- MetricEventType::TscCoresOutOfSync => 10018,
- MetricEventType::NetworkTxRateSummarized => 10019,
- MetricEventType::NetworkRxRateSummarized => 10020,
- MetricEventType::DllLoaded => 10021,
- MetricEventType::GraphicsHangRenderThread => 10024,
- MetricEventType::GraphicsHangSyncThread => 10026,
- MetricEventType::AudioNoopStreamForced => 10038,
- MetricEventType::AudioPlaybackError => 10039,
- MetricEventType::Other(code) => code,
- }
- }
-}
-
-impl TryFrom<i64> for MetricEventType {
- type Error = Error;
-
- fn try_from(event_code: i64) -> Result<Self, Self::Error> {
- match event_code {
- 10001 => Ok(MetricEventType::CpuUsage),
- 10002 => Ok(MetricEventType::MemoryUsage),
- 10003 => Ok(MetricEventType::Fps),
- 10004 => Ok(MetricEventType::JankyFps),
- 10005 => Ok(MetricEventType::NetworkTxRate),
- 10006 => Ok(MetricEventType::NetworkRxRate),
- 10007 => Ok(MetricEventType::Interrupts),
- 10008 => Ok(MetricEventType::FrameTime),
- 10009 => Ok(MetricEventType::EmulatorGraphicsFreeze),
- 10010 => Ok(MetricEventType::EmulatorGraphicsUnfreeze),
- 10011 => Ok(MetricEventType::EmulatorGfxstreamVkAbortReason),
- 10012 => Ok(MetricEventType::ChildProcessExit),
- 10013 => Ok(MetricEventType::ReadIo),
- 10014 => Ok(MetricEventType::WriteIo),
- 10015 => Ok(MetricEventType::AudioFormatRequestOk),
- 10016 => Ok(MetricEventType::AudioFormatModifiedOk),
- 10017 => Ok(MetricEventType::AudioFormatFailed),
- 10018 => Ok(MetricEventType::TscCoresOutOfSync),
- 10019 => Ok(MetricEventType::NetworkTxRateSummarized),
- 10020 => Ok(MetricEventType::NetworkRxRateSummarized),
- 10021 => Ok(MetricEventType::DllLoaded),
- 10024 => Ok(MetricEventType::GraphicsHangRenderThread),
- 10026 => Ok(MetricEventType::GraphicsHangSyncThread),
- 10038 => Ok(MetricEventType::AudioNoopStreamForced),
- 10039 => Ok(MetricEventType::AudioPlaybackError),
- _ => Ok(MetricEventType::Other(event_code)),
- }
- }
-}
diff --git a/vendor/generic/metrics/src/lib.rs b/vendor/generic/metrics/src/lib.rs
index 45c30d3..da5c812 100644
--- a/vendor/generic/metrics/src/lib.rs
+++ b/vendor/generic/metrics/src/lib.rs
@@ -6,16 +6,9 @@
//! to log metrics.
mod client;
-mod event_types;
-mod metrics_requests;
mod periodic_logger;
mod request_handler;
-mod sys;
-pub mod protos {
- // ANDROID: b/259142784 - we remove metrics_out subdir b/c cargo2android
- include!(concat!(env!("OUT_DIR"), "/generated.rs"));
-}
mod metrics_cleanup;
use std::time::Duration;
@@ -32,12 +25,11 @@
pub use client::log_histogram_metric;
pub use client::log_metric;
pub use client::merge_session_invariants;
+pub use client::push_descriptors;
pub use client::set_auth_token;
pub use client::set_graphics_api;
pub use client::set_package_name;
-pub use event_types::MetricEventType;
pub use metrics_cleanup::MetricsClientDestructor;
-pub use metrics_requests::MetricsRequest;
pub use periodic_logger::PeriodicLogger;
pub use request_handler::MetricsRequestHandler;
diff --git a/vendor/generic/metrics/src/metrics_requests.rs b/vendor/generic/metrics/src/metrics_requests.rs
deleted file mode 100644
index 33b77f2..0000000
--- a/vendor/generic/metrics/src/metrics_requests.rs
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2022 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//! Structs used to transport log requests between client processes and the logging controller
-
-use serde::Deserialize;
-use serde::Serialize;
-
-use crate::MetricEventType;
-
-#[derive(Serialize, Deserialize, Debug)]
-pub struct LogMetric {
- pub event_code: MetricEventType,
- pub value: i64,
-}
-
-#[derive(Serialize, Deserialize, Debug)]
-pub struct LogDescriptor {
- pub event_code: MetricEventType,
- pub descriptor: i64,
-}
-
-#[derive(Serialize, Deserialize, Debug)]
-pub struct LogHighFrequencyDescriptorMetric {
- pub event_code: MetricEventType,
- pub descriptor: i64,
- pub step: i64,
-}
-
-#[derive(Serialize, Deserialize, Debug)]
-pub struct EventWithSerializedDetails {
- pub event_code: MetricEventType,
- pub serialized_details: Box<[u8]>,
-}
-
-#[derive(Serialize, Deserialize, Debug)]
-pub enum MetricsRequest {
- LogDescriptor(LogDescriptor),
- LogEvent(MetricEventType),
- LogMetric(LogMetric),
- LogHistogram(LogMetric),
- SetAuthToken(String),
- SetGraphicsApi(String),
- SetPackageName(String),
- MergeSessionInvariants(Vec<u8>),
- LogHighFrequencyDescriptorMetric(LogHighFrequencyDescriptorMetric),
- LogEventWithSerializedDetails(EventWithSerializedDetails),
-}
diff --git a/vendor/generic/metrics/src/out/event_details.rs b/vendor/generic/metrics/src/out/event_details.rs
deleted file mode 100644
index a44b05c..0000000
--- a/vendor/generic/metrics/src/out/event_details.rs
+++ /dev/null
@@ -1,1408 +0,0 @@
-// This file is generated by rust-protobuf 3.2.0. Do not edit
-// .proto file is parsed by protoc 3.21.12
-// @generated
-
-// https://github.com/rust-lang/rust-clippy/issues/702
-#![allow(unknown_lints)]
-#![allow(clippy::all)]
-
-#![allow(unused_attributes)]
-#![cfg_attr(rustfmt, rustfmt::skip)]
-
-#![allow(box_pointers)]
-#![allow(dead_code)]
-#![allow(missing_docs)]
-#![allow(non_camel_case_types)]
-#![allow(non_snake_case)]
-#![allow(non_upper_case_globals)]
-#![allow(trivial_casts)]
-#![allow(unused_results)]
-#![allow(unused_mut)]
-
-//! Generated file from `event_details.proto`
-
-/// Generated files are compatible only with the same version
-/// of protobuf runtime.
-const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_3_2_0;
-
-#[derive(PartialEq,Clone,Default,Debug)]
-// @@protoc_insertion_point(message:RecordDetails)
-pub struct RecordDetails {
- // message fields
- // @@protoc_insertion_point(field:RecordDetails.emulator_child_process_exit_details)
- pub emulator_child_process_exit_details: ::protobuf::MessageField<EmulatorChildProcessExitDetails>,
- // @@protoc_insertion_point(field:RecordDetails.wave_format_details)
- pub wave_format_details: ::protobuf::MessageField<WaveFormatDetails>,
- // @@protoc_insertion_point(field:RecordDetails.emulator_dll_details)
- pub emulator_dll_details: ::protobuf::MessageField<EmulatorDllDetails>,
- // special fields
- // @@protoc_insertion_point(special_field:RecordDetails.special_fields)
- pub special_fields: ::protobuf::SpecialFields,
-}
-
-impl<'a> ::std::default::Default for &'a RecordDetails {
- fn default() -> &'a RecordDetails {
- <RecordDetails as ::protobuf::Message>::default_instance()
- }
-}
-
-impl RecordDetails {
- pub fn new() -> RecordDetails {
- ::std::default::Default::default()
- }
-
- fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
- let mut fields = ::std::vec::Vec::with_capacity(3);
- let mut oneofs = ::std::vec::Vec::with_capacity(0);
- fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, EmulatorChildProcessExitDetails>(
- "emulator_child_process_exit_details",
- |m: &RecordDetails| { &m.emulator_child_process_exit_details },
- |m: &mut RecordDetails| { &mut m.emulator_child_process_exit_details },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, WaveFormatDetails>(
- "wave_format_details",
- |m: &RecordDetails| { &m.wave_format_details },
- |m: &mut RecordDetails| { &mut m.wave_format_details },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, EmulatorDllDetails>(
- "emulator_dll_details",
- |m: &RecordDetails| { &m.emulator_dll_details },
- |m: &mut RecordDetails| { &mut m.emulator_dll_details },
- ));
- ::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<RecordDetails>(
- "RecordDetails",
- fields,
- oneofs,
- )
- }
-}
-
-impl ::protobuf::Message for RecordDetails {
- const NAME: &'static str = "RecordDetails";
-
- fn is_initialized(&self) -> bool {
- true
- }
-
- fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
- while let Some(tag) = is.read_raw_tag_or_eof()? {
- match tag {
- 98 => {
- ::protobuf::rt::read_singular_message_into_field(is, &mut self.emulator_child_process_exit_details)?;
- },
- 106 => {
- ::protobuf::rt::read_singular_message_into_field(is, &mut self.wave_format_details)?;
- },
- 154 => {
- ::protobuf::rt::read_singular_message_into_field(is, &mut self.emulator_dll_details)?;
- },
- tag => {
- ::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
- },
- };
- }
- ::std::result::Result::Ok(())
- }
-
- // Compute sizes of nested messages
- #[allow(unused_variables)]
- fn compute_size(&self) -> u64 {
- let mut my_size = 0;
- if let Some(v) = self.emulator_child_process_exit_details.as_ref() {
- let len = v.compute_size();
- my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
- }
- if let Some(v) = self.wave_format_details.as_ref() {
- let len = v.compute_size();
- my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
- }
- if let Some(v) = self.emulator_dll_details.as_ref() {
- let len = v.compute_size();
- my_size += 2 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
- }
- my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
- self.special_fields.cached_size().set(my_size as u32);
- my_size
- }
-
- fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
- if let Some(v) = self.emulator_child_process_exit_details.as_ref() {
- ::protobuf::rt::write_message_field_with_cached_size(12, v, os)?;
- }
- if let Some(v) = self.wave_format_details.as_ref() {
- ::protobuf::rt::write_message_field_with_cached_size(13, v, os)?;
- }
- if let Some(v) = self.emulator_dll_details.as_ref() {
- ::protobuf::rt::write_message_field_with_cached_size(19, v, os)?;
- }
- os.write_unknown_fields(self.special_fields.unknown_fields())?;
- ::std::result::Result::Ok(())
- }
-
- fn special_fields(&self) -> &::protobuf::SpecialFields {
- &self.special_fields
- }
-
- fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
- &mut self.special_fields
- }
-
- fn new() -> RecordDetails {
- RecordDetails::new()
- }
-
- fn clear(&mut self) {
- self.emulator_child_process_exit_details.clear();
- self.wave_format_details.clear();
- self.emulator_dll_details.clear();
- self.special_fields.clear();
- }
-
- fn default_instance() -> &'static RecordDetails {
- static instance: RecordDetails = RecordDetails {
- emulator_child_process_exit_details: ::protobuf::MessageField::none(),
- wave_format_details: ::protobuf::MessageField::none(),
- emulator_dll_details: ::protobuf::MessageField::none(),
- special_fields: ::protobuf::SpecialFields::new(),
- };
- &instance
- }
-}
-
-impl ::protobuf::MessageFull for RecordDetails {
- fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
- static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
- descriptor.get(|| file_descriptor().message_by_package_relative_name("RecordDetails").unwrap()).clone()
- }
-}
-
-impl ::std::fmt::Display for RecordDetails {
- fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
- ::protobuf::text_format::fmt(self, f)
- }
-}
-
-impl ::protobuf::reflect::ProtobufValue for RecordDetails {
- type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
-}
-
-#[derive(PartialEq,Clone,Default,Debug)]
-// @@protoc_insertion_point(message:WaveFormatDetails)
-pub struct WaveFormatDetails {
- // message fields
- // @@protoc_insertion_point(field:WaveFormatDetails.requested)
- pub requested: ::protobuf::MessageField<WaveFormat>,
- // @@protoc_insertion_point(field:WaveFormatDetails.modified)
- pub modified: ::protobuf::MessageField<WaveFormat>,
- // @@protoc_insertion_point(field:WaveFormatDetails.closest_matched)
- pub closest_matched: ::protobuf::MessageField<WaveFormat>,
- // special fields
- // @@protoc_insertion_point(special_field:WaveFormatDetails.special_fields)
- pub special_fields: ::protobuf::SpecialFields,
-}
-
-impl<'a> ::std::default::Default for &'a WaveFormatDetails {
- fn default() -> &'a WaveFormatDetails {
- <WaveFormatDetails as ::protobuf::Message>::default_instance()
- }
-}
-
-impl WaveFormatDetails {
- pub fn new() -> WaveFormatDetails {
- ::std::default::Default::default()
- }
-
- fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
- let mut fields = ::std::vec::Vec::with_capacity(3);
- let mut oneofs = ::std::vec::Vec::with_capacity(0);
- fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, WaveFormat>(
- "requested",
- |m: &WaveFormatDetails| { &m.requested },
- |m: &mut WaveFormatDetails| { &mut m.requested },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, WaveFormat>(
- "modified",
- |m: &WaveFormatDetails| { &m.modified },
- |m: &mut WaveFormatDetails| { &mut m.modified },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, WaveFormat>(
- "closest_matched",
- |m: &WaveFormatDetails| { &m.closest_matched },
- |m: &mut WaveFormatDetails| { &mut m.closest_matched },
- ));
- ::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<WaveFormatDetails>(
- "WaveFormatDetails",
- fields,
- oneofs,
- )
- }
-}
-
-impl ::protobuf::Message for WaveFormatDetails {
- const NAME: &'static str = "WaveFormatDetails";
-
- fn is_initialized(&self) -> bool {
- true
- }
-
- fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
- while let Some(tag) = is.read_raw_tag_or_eof()? {
- match tag {
- 10 => {
- ::protobuf::rt::read_singular_message_into_field(is, &mut self.requested)?;
- },
- 18 => {
- ::protobuf::rt::read_singular_message_into_field(is, &mut self.modified)?;
- },
- 26 => {
- ::protobuf::rt::read_singular_message_into_field(is, &mut self.closest_matched)?;
- },
- tag => {
- ::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
- },
- };
- }
- ::std::result::Result::Ok(())
- }
-
- // Compute sizes of nested messages
- #[allow(unused_variables)]
- fn compute_size(&self) -> u64 {
- let mut my_size = 0;
- if let Some(v) = self.requested.as_ref() {
- let len = v.compute_size();
- my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
- }
- if let Some(v) = self.modified.as_ref() {
- let len = v.compute_size();
- my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
- }
- if let Some(v) = self.closest_matched.as_ref() {
- let len = v.compute_size();
- my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
- }
- my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
- self.special_fields.cached_size().set(my_size as u32);
- my_size
- }
-
- fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
- if let Some(v) = self.requested.as_ref() {
- ::protobuf::rt::write_message_field_with_cached_size(1, v, os)?;
- }
- if let Some(v) = self.modified.as_ref() {
- ::protobuf::rt::write_message_field_with_cached_size(2, v, os)?;
- }
- if let Some(v) = self.closest_matched.as_ref() {
- ::protobuf::rt::write_message_field_with_cached_size(3, v, os)?;
- }
- os.write_unknown_fields(self.special_fields.unknown_fields())?;
- ::std::result::Result::Ok(())
- }
-
- fn special_fields(&self) -> &::protobuf::SpecialFields {
- &self.special_fields
- }
-
- fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
- &mut self.special_fields
- }
-
- fn new() -> WaveFormatDetails {
- WaveFormatDetails::new()
- }
-
- fn clear(&mut self) {
- self.requested.clear();
- self.modified.clear();
- self.closest_matched.clear();
- self.special_fields.clear();
- }
-
- fn default_instance() -> &'static WaveFormatDetails {
- static instance: WaveFormatDetails = WaveFormatDetails {
- requested: ::protobuf::MessageField::none(),
- modified: ::protobuf::MessageField::none(),
- closest_matched: ::protobuf::MessageField::none(),
- special_fields: ::protobuf::SpecialFields::new(),
- };
- &instance
- }
-}
-
-impl ::protobuf::MessageFull for WaveFormatDetails {
- fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
- static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
- descriptor.get(|| file_descriptor().message_by_package_relative_name("WaveFormatDetails").unwrap()).clone()
- }
-}
-
-impl ::std::fmt::Display for WaveFormatDetails {
- fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
- ::protobuf::text_format::fmt(self, f)
- }
-}
-
-impl ::protobuf::reflect::ProtobufValue for WaveFormatDetails {
- type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
-}
-
-#[derive(PartialEq,Clone,Default,Debug)]
-// @@protoc_insertion_point(message:WaveFormat)
-pub struct WaveFormat {
- // message fields
- // @@protoc_insertion_point(field:WaveFormat.format_tag)
- pub format_tag: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.channels)
- pub channels: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.samples_per_sec)
- pub samples_per_sec: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.avg_bytes_per_sec)
- pub avg_bytes_per_sec: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.block_align)
- pub block_align: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.bits_per_sample)
- pub bits_per_sample: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.size_bytes)
- pub size_bytes: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.samples)
- pub samples: ::std::option::Option<i32>,
- // @@protoc_insertion_point(field:WaveFormat.channel_mask)
- pub channel_mask: ::std::option::Option<i64>,
- // @@protoc_insertion_point(field:WaveFormat.sub_format)
- pub sub_format: ::std::option::Option<::protobuf::EnumOrUnknown<wave_format::WaveFormatSubFormat>>,
- // special fields
- // @@protoc_insertion_point(special_field:WaveFormat.special_fields)
- pub special_fields: ::protobuf::SpecialFields,
-}
-
-impl<'a> ::std::default::Default for &'a WaveFormat {
- fn default() -> &'a WaveFormat {
- <WaveFormat as ::protobuf::Message>::default_instance()
- }
-}
-
-impl WaveFormat {
- pub fn new() -> WaveFormat {
- ::std::default::Default::default()
- }
-
- // optional int32 format_tag = 1;
-
- pub fn format_tag(&self) -> i32 {
- self.format_tag.unwrap_or(0)
- }
-
- pub fn clear_format_tag(&mut self) {
- self.format_tag = ::std::option::Option::None;
- }
-
- pub fn has_format_tag(&self) -> bool {
- self.format_tag.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_format_tag(&mut self, v: i32) {
- self.format_tag = ::std::option::Option::Some(v);
- }
-
- // optional int32 channels = 2;
-
- pub fn channels(&self) -> i32 {
- self.channels.unwrap_or(0)
- }
-
- pub fn clear_channels(&mut self) {
- self.channels = ::std::option::Option::None;
- }
-
- pub fn has_channels(&self) -> bool {
- self.channels.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_channels(&mut self, v: i32) {
- self.channels = ::std::option::Option::Some(v);
- }
-
- // optional int32 samples_per_sec = 3;
-
- pub fn samples_per_sec(&self) -> i32 {
- self.samples_per_sec.unwrap_or(0)
- }
-
- pub fn clear_samples_per_sec(&mut self) {
- self.samples_per_sec = ::std::option::Option::None;
- }
-
- pub fn has_samples_per_sec(&self) -> bool {
- self.samples_per_sec.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_samples_per_sec(&mut self, v: i32) {
- self.samples_per_sec = ::std::option::Option::Some(v);
- }
-
- // optional int32 avg_bytes_per_sec = 4;
-
- pub fn avg_bytes_per_sec(&self) -> i32 {
- self.avg_bytes_per_sec.unwrap_or(0)
- }
-
- pub fn clear_avg_bytes_per_sec(&mut self) {
- self.avg_bytes_per_sec = ::std::option::Option::None;
- }
-
- pub fn has_avg_bytes_per_sec(&self) -> bool {
- self.avg_bytes_per_sec.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_avg_bytes_per_sec(&mut self, v: i32) {
- self.avg_bytes_per_sec = ::std::option::Option::Some(v);
- }
-
- // optional int32 block_align = 5;
-
- pub fn block_align(&self) -> i32 {
- self.block_align.unwrap_or(0)
- }
-
- pub fn clear_block_align(&mut self) {
- self.block_align = ::std::option::Option::None;
- }
-
- pub fn has_block_align(&self) -> bool {
- self.block_align.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_block_align(&mut self, v: i32) {
- self.block_align = ::std::option::Option::Some(v);
- }
-
- // optional int32 bits_per_sample = 6;
-
- pub fn bits_per_sample(&self) -> i32 {
- self.bits_per_sample.unwrap_or(0)
- }
-
- pub fn clear_bits_per_sample(&mut self) {
- self.bits_per_sample = ::std::option::Option::None;
- }
-
- pub fn has_bits_per_sample(&self) -> bool {
- self.bits_per_sample.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_bits_per_sample(&mut self, v: i32) {
- self.bits_per_sample = ::std::option::Option::Some(v);
- }
-
- // optional int32 size_bytes = 7;
-
- pub fn size_bytes(&self) -> i32 {
- self.size_bytes.unwrap_or(0)
- }
-
- pub fn clear_size_bytes(&mut self) {
- self.size_bytes = ::std::option::Option::None;
- }
-
- pub fn has_size_bytes(&self) -> bool {
- self.size_bytes.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_size_bytes(&mut self, v: i32) {
- self.size_bytes = ::std::option::Option::Some(v);
- }
-
- // optional int32 samples = 8;
-
- pub fn samples(&self) -> i32 {
- self.samples.unwrap_or(0)
- }
-
- pub fn clear_samples(&mut self) {
- self.samples = ::std::option::Option::None;
- }
-
- pub fn has_samples(&self) -> bool {
- self.samples.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_samples(&mut self, v: i32) {
- self.samples = ::std::option::Option::Some(v);
- }
-
- // optional int64 channel_mask = 9;
-
- pub fn channel_mask(&self) -> i64 {
- self.channel_mask.unwrap_or(0)
- }
-
- pub fn clear_channel_mask(&mut self) {
- self.channel_mask = ::std::option::Option::None;
- }
-
- pub fn has_channel_mask(&self) -> bool {
- self.channel_mask.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_channel_mask(&mut self, v: i64) {
- self.channel_mask = ::std::option::Option::Some(v);
- }
-
- // optional .WaveFormat.WaveFormatSubFormat sub_format = 10;
-
- pub fn sub_format(&self) -> wave_format::WaveFormatSubFormat {
- match self.sub_format {
- Some(e) => e.enum_value_or(wave_format::WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_INVALID),
- None => wave_format::WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_INVALID,
- }
- }
-
- pub fn clear_sub_format(&mut self) {
- self.sub_format = ::std::option::Option::None;
- }
-
- pub fn has_sub_format(&self) -> bool {
- self.sub_format.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_sub_format(&mut self, v: wave_format::WaveFormatSubFormat) {
- self.sub_format = ::std::option::Option::Some(::protobuf::EnumOrUnknown::new(v));
- }
-
- fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
- let mut fields = ::std::vec::Vec::with_capacity(10);
- let mut oneofs = ::std::vec::Vec::with_capacity(0);
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "format_tag",
- |m: &WaveFormat| { &m.format_tag },
- |m: &mut WaveFormat| { &mut m.format_tag },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "channels",
- |m: &WaveFormat| { &m.channels },
- |m: &mut WaveFormat| { &mut m.channels },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "samples_per_sec",
- |m: &WaveFormat| { &m.samples_per_sec },
- |m: &mut WaveFormat| { &mut m.samples_per_sec },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "avg_bytes_per_sec",
- |m: &WaveFormat| { &m.avg_bytes_per_sec },
- |m: &mut WaveFormat| { &mut m.avg_bytes_per_sec },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "block_align",
- |m: &WaveFormat| { &m.block_align },
- |m: &mut WaveFormat| { &mut m.block_align },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "bits_per_sample",
- |m: &WaveFormat| { &m.bits_per_sample },
- |m: &mut WaveFormat| { &mut m.bits_per_sample },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "size_bytes",
- |m: &WaveFormat| { &m.size_bytes },
- |m: &mut WaveFormat| { &mut m.size_bytes },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "samples",
- |m: &WaveFormat| { &m.samples },
- |m: &mut WaveFormat| { &mut m.samples },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "channel_mask",
- |m: &WaveFormat| { &m.channel_mask },
- |m: &mut WaveFormat| { &mut m.channel_mask },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "sub_format",
- |m: &WaveFormat| { &m.sub_format },
- |m: &mut WaveFormat| { &mut m.sub_format },
- ));
- ::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<WaveFormat>(
- "WaveFormat",
- fields,
- oneofs,
- )
- }
-}
-
-impl ::protobuf::Message for WaveFormat {
- const NAME: &'static str = "WaveFormat";
-
- fn is_initialized(&self) -> bool {
- true
- }
-
- fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
- while let Some(tag) = is.read_raw_tag_or_eof()? {
- match tag {
- 8 => {
- self.format_tag = ::std::option::Option::Some(is.read_int32()?);
- },
- 16 => {
- self.channels = ::std::option::Option::Some(is.read_int32()?);
- },
- 24 => {
- self.samples_per_sec = ::std::option::Option::Some(is.read_int32()?);
- },
- 32 => {
- self.avg_bytes_per_sec = ::std::option::Option::Some(is.read_int32()?);
- },
- 40 => {
- self.block_align = ::std::option::Option::Some(is.read_int32()?);
- },
- 48 => {
- self.bits_per_sample = ::std::option::Option::Some(is.read_int32()?);
- },
- 56 => {
- self.size_bytes = ::std::option::Option::Some(is.read_int32()?);
- },
- 64 => {
- self.samples = ::std::option::Option::Some(is.read_int32()?);
- },
- 72 => {
- self.channel_mask = ::std::option::Option::Some(is.read_int64()?);
- },
- 80 => {
- self.sub_format = ::std::option::Option::Some(is.read_enum_or_unknown()?);
- },
- tag => {
- ::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
- },
- };
- }
- ::std::result::Result::Ok(())
- }
-
- // Compute sizes of nested messages
- #[allow(unused_variables)]
- fn compute_size(&self) -> u64 {
- let mut my_size = 0;
- if let Some(v) = self.format_tag {
- my_size += ::protobuf::rt::int32_size(1, v);
- }
- if let Some(v) = self.channels {
- my_size += ::protobuf::rt::int32_size(2, v);
- }
- if let Some(v) = self.samples_per_sec {
- my_size += ::protobuf::rt::int32_size(3, v);
- }
- if let Some(v) = self.avg_bytes_per_sec {
- my_size += ::protobuf::rt::int32_size(4, v);
- }
- if let Some(v) = self.block_align {
- my_size += ::protobuf::rt::int32_size(5, v);
- }
- if let Some(v) = self.bits_per_sample {
- my_size += ::protobuf::rt::int32_size(6, v);
- }
- if let Some(v) = self.size_bytes {
- my_size += ::protobuf::rt::int32_size(7, v);
- }
- if let Some(v) = self.samples {
- my_size += ::protobuf::rt::int32_size(8, v);
- }
- if let Some(v) = self.channel_mask {
- my_size += ::protobuf::rt::int64_size(9, v);
- }
- if let Some(v) = self.sub_format {
- my_size += ::protobuf::rt::int32_size(10, v.value());
- }
- my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
- self.special_fields.cached_size().set(my_size as u32);
- my_size
- }
-
- fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
- if let Some(v) = self.format_tag {
- os.write_int32(1, v)?;
- }
- if let Some(v) = self.channels {
- os.write_int32(2, v)?;
- }
- if let Some(v) = self.samples_per_sec {
- os.write_int32(3, v)?;
- }
- if let Some(v) = self.avg_bytes_per_sec {
- os.write_int32(4, v)?;
- }
- if let Some(v) = self.block_align {
- os.write_int32(5, v)?;
- }
- if let Some(v) = self.bits_per_sample {
- os.write_int32(6, v)?;
- }
- if let Some(v) = self.size_bytes {
- os.write_int32(7, v)?;
- }
- if let Some(v) = self.samples {
- os.write_int32(8, v)?;
- }
- if let Some(v) = self.channel_mask {
- os.write_int64(9, v)?;
- }
- if let Some(v) = self.sub_format {
- os.write_enum(10, ::protobuf::EnumOrUnknown::value(&v))?;
- }
- os.write_unknown_fields(self.special_fields.unknown_fields())?;
- ::std::result::Result::Ok(())
- }
-
- fn special_fields(&self) -> &::protobuf::SpecialFields {
- &self.special_fields
- }
-
- fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
- &mut self.special_fields
- }
-
- fn new() -> WaveFormat {
- WaveFormat::new()
- }
-
- fn clear(&mut self) {
- self.format_tag = ::std::option::Option::None;
- self.channels = ::std::option::Option::None;
- self.samples_per_sec = ::std::option::Option::None;
- self.avg_bytes_per_sec = ::std::option::Option::None;
- self.block_align = ::std::option::Option::None;
- self.bits_per_sample = ::std::option::Option::None;
- self.size_bytes = ::std::option::Option::None;
- self.samples = ::std::option::Option::None;
- self.channel_mask = ::std::option::Option::None;
- self.sub_format = ::std::option::Option::None;
- self.special_fields.clear();
- }
-
- fn default_instance() -> &'static WaveFormat {
- static instance: WaveFormat = WaveFormat {
- format_tag: ::std::option::Option::None,
- channels: ::std::option::Option::None,
- samples_per_sec: ::std::option::Option::None,
- avg_bytes_per_sec: ::std::option::Option::None,
- block_align: ::std::option::Option::None,
- bits_per_sample: ::std::option::Option::None,
- size_bytes: ::std::option::Option::None,
- samples: ::std::option::Option::None,
- channel_mask: ::std::option::Option::None,
- sub_format: ::std::option::Option::None,
- special_fields: ::protobuf::SpecialFields::new(),
- };
- &instance
- }
-}
-
-impl ::protobuf::MessageFull for WaveFormat {
- fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
- static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
- descriptor.get(|| file_descriptor().message_by_package_relative_name("WaveFormat").unwrap()).clone()
- }
-}
-
-impl ::std::fmt::Display for WaveFormat {
- fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
- ::protobuf::text_format::fmt(self, f)
- }
-}
-
-impl ::protobuf::reflect::ProtobufValue for WaveFormat {
- type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
-}
-
-/// Nested message and enums of message `WaveFormat`
-pub mod wave_format {
- #[derive(Clone,Copy,PartialEq,Eq,Debug,Hash)]
- // @@protoc_insertion_point(enum:WaveFormat.WaveFormatSubFormat)
- pub enum WaveFormatSubFormat {
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_INVALID)
- KSDATAFORMAT_SUBTYPE_INVALID = 0,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_ANALOG)
- KSDATAFORMAT_SUBTYPE_ANALOG = 1,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_PCM)
- KSDATAFORMAT_SUBTYPE_PCM = 2,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
- KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = 3,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_DRM)
- KSDATAFORMAT_SUBTYPE_DRM = 4,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_ALAW)
- KSDATAFORMAT_SUBTYPE_ALAW = 5,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_MULAW)
- KSDATAFORMAT_SUBTYPE_MULAW = 6,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_ADPCM)
- KSDATAFORMAT_SUBTYPE_ADPCM = 7,
- // @@protoc_insertion_point(enum_value:WaveFormat.WaveFormatSubFormat.KSDATAFORMAT_SUBTYPE_MPEG)
- KSDATAFORMAT_SUBTYPE_MPEG = 8,
- }
-
- impl ::protobuf::Enum for WaveFormatSubFormat {
- const NAME: &'static str = "WaveFormatSubFormat";
-
- fn value(&self) -> i32 {
- *self as i32
- }
-
- fn from_i32(value: i32) -> ::std::option::Option<WaveFormatSubFormat> {
- match value {
- 0 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_INVALID),
- 1 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_ANALOG),
- 2 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_PCM),
- 3 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT),
- 4 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_DRM),
- 5 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_ALAW),
- 6 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_MULAW),
- 7 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_ADPCM),
- 8 => ::std::option::Option::Some(WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_MPEG),
- _ => ::std::option::Option::None
- }
- }
-
- const VALUES: &'static [WaveFormatSubFormat] = &[
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_INVALID,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_ANALOG,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_PCM,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_DRM,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_ALAW,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_MULAW,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_ADPCM,
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_MPEG,
- ];
- }
-
- impl ::protobuf::EnumFull for WaveFormatSubFormat {
- fn enum_descriptor() -> ::protobuf::reflect::EnumDescriptor {
- static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::Lazy::new();
- descriptor.get(|| super::file_descriptor().enum_by_package_relative_name("WaveFormat.WaveFormatSubFormat").unwrap()).clone()
- }
-
- fn descriptor(&self) -> ::protobuf::reflect::EnumValueDescriptor {
- let index = *self as usize;
- Self::enum_descriptor().value_by_index(index)
- }
- }
-
- impl ::std::default::Default for WaveFormatSubFormat {
- fn default() -> Self {
- WaveFormatSubFormat::KSDATAFORMAT_SUBTYPE_INVALID
- }
- }
-
- impl WaveFormatSubFormat {
- pub(in super) fn generated_enum_descriptor_data() -> ::protobuf::reflect::GeneratedEnumDescriptorData {
- ::protobuf::reflect::GeneratedEnumDescriptorData::new::<WaveFormatSubFormat>("WaveFormat.WaveFormatSubFormat")
- }
- }
-}
-
-#[derive(PartialEq,Clone,Default,Debug)]
-// @@protoc_insertion_point(message:EmulatorChildProcessExitDetails)
-pub struct EmulatorChildProcessExitDetails {
- // message fields
- // @@protoc_insertion_point(field:EmulatorChildProcessExitDetails.exit_code)
- pub exit_code: ::std::option::Option<u32>,
- // @@protoc_insertion_point(field:EmulatorChildProcessExitDetails.process_type)
- pub process_type: ::std::option::Option<::protobuf::EnumOrUnknown<EmulatorProcessType>>,
- // special fields
- // @@protoc_insertion_point(special_field:EmulatorChildProcessExitDetails.special_fields)
- pub special_fields: ::protobuf::SpecialFields,
-}
-
-impl<'a> ::std::default::Default for &'a EmulatorChildProcessExitDetails {
- fn default() -> &'a EmulatorChildProcessExitDetails {
- <EmulatorChildProcessExitDetails as ::protobuf::Message>::default_instance()
- }
-}
-
-impl EmulatorChildProcessExitDetails {
- pub fn new() -> EmulatorChildProcessExitDetails {
- ::std::default::Default::default()
- }
-
- // optional uint32 exit_code = 1;
-
- pub fn exit_code(&self) -> u32 {
- self.exit_code.unwrap_or(0)
- }
-
- pub fn clear_exit_code(&mut self) {
- self.exit_code = ::std::option::Option::None;
- }
-
- pub fn has_exit_code(&self) -> bool {
- self.exit_code.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_exit_code(&mut self, v: u32) {
- self.exit_code = ::std::option::Option::Some(v);
- }
-
- // optional .EmulatorProcessType process_type = 2;
-
- pub fn process_type(&self) -> EmulatorProcessType {
- match self.process_type {
- Some(e) => e.enum_value_or(EmulatorProcessType::PROCESS_TYPE_UNKNOWN),
- None => EmulatorProcessType::PROCESS_TYPE_UNKNOWN,
- }
- }
-
- pub fn clear_process_type(&mut self) {
- self.process_type = ::std::option::Option::None;
- }
-
- pub fn has_process_type(&self) -> bool {
- self.process_type.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_process_type(&mut self, v: EmulatorProcessType) {
- self.process_type = ::std::option::Option::Some(::protobuf::EnumOrUnknown::new(v));
- }
-
- fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
- let mut fields = ::std::vec::Vec::with_capacity(2);
- let mut oneofs = ::std::vec::Vec::with_capacity(0);
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "exit_code",
- |m: &EmulatorChildProcessExitDetails| { &m.exit_code },
- |m: &mut EmulatorChildProcessExitDetails| { &mut m.exit_code },
- ));
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "process_type",
- |m: &EmulatorChildProcessExitDetails| { &m.process_type },
- |m: &mut EmulatorChildProcessExitDetails| { &mut m.process_type },
- ));
- ::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<EmulatorChildProcessExitDetails>(
- "EmulatorChildProcessExitDetails",
- fields,
- oneofs,
- )
- }
-}
-
-impl ::protobuf::Message for EmulatorChildProcessExitDetails {
- const NAME: &'static str = "EmulatorChildProcessExitDetails";
-
- fn is_initialized(&self) -> bool {
- true
- }
-
- fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
- while let Some(tag) = is.read_raw_tag_or_eof()? {
- match tag {
- 8 => {
- self.exit_code = ::std::option::Option::Some(is.read_uint32()?);
- },
- 16 => {
- self.process_type = ::std::option::Option::Some(is.read_enum_or_unknown()?);
- },
- tag => {
- ::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
- },
- };
- }
- ::std::result::Result::Ok(())
- }
-
- // Compute sizes of nested messages
- #[allow(unused_variables)]
- fn compute_size(&self) -> u64 {
- let mut my_size = 0;
- if let Some(v) = self.exit_code {
- my_size += ::protobuf::rt::uint32_size(1, v);
- }
- if let Some(v) = self.process_type {
- my_size += ::protobuf::rt::int32_size(2, v.value());
- }
- my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
- self.special_fields.cached_size().set(my_size as u32);
- my_size
- }
-
- fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
- if let Some(v) = self.exit_code {
- os.write_uint32(1, v)?;
- }
- if let Some(v) = self.process_type {
- os.write_enum(2, ::protobuf::EnumOrUnknown::value(&v))?;
- }
- os.write_unknown_fields(self.special_fields.unknown_fields())?;
- ::std::result::Result::Ok(())
- }
-
- fn special_fields(&self) -> &::protobuf::SpecialFields {
- &self.special_fields
- }
-
- fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
- &mut self.special_fields
- }
-
- fn new() -> EmulatorChildProcessExitDetails {
- EmulatorChildProcessExitDetails::new()
- }
-
- fn clear(&mut self) {
- self.exit_code = ::std::option::Option::None;
- self.process_type = ::std::option::Option::None;
- self.special_fields.clear();
- }
-
- fn default_instance() -> &'static EmulatorChildProcessExitDetails {
- static instance: EmulatorChildProcessExitDetails = EmulatorChildProcessExitDetails {
- exit_code: ::std::option::Option::None,
- process_type: ::std::option::Option::None,
- special_fields: ::protobuf::SpecialFields::new(),
- };
- &instance
- }
-}
-
-impl ::protobuf::MessageFull for EmulatorChildProcessExitDetails {
- fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
- static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
- descriptor.get(|| file_descriptor().message_by_package_relative_name("EmulatorChildProcessExitDetails").unwrap()).clone()
- }
-}
-
-impl ::std::fmt::Display for EmulatorChildProcessExitDetails {
- fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
- ::protobuf::text_format::fmt(self, f)
- }
-}
-
-impl ::protobuf::reflect::ProtobufValue for EmulatorChildProcessExitDetails {
- type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
-}
-
-#[derive(PartialEq,Clone,Default,Debug)]
-// @@protoc_insertion_point(message:EmulatorDllDetails)
-pub struct EmulatorDllDetails {
- // message fields
- // @@protoc_insertion_point(field:EmulatorDllDetails.dll_base_name)
- pub dll_base_name: ::std::option::Option<::std::string::String>,
- // special fields
- // @@protoc_insertion_point(special_field:EmulatorDllDetails.special_fields)
- pub special_fields: ::protobuf::SpecialFields,
-}
-
-impl<'a> ::std::default::Default for &'a EmulatorDllDetails {
- fn default() -> &'a EmulatorDllDetails {
- <EmulatorDllDetails as ::protobuf::Message>::default_instance()
- }
-}
-
-impl EmulatorDllDetails {
- pub fn new() -> EmulatorDllDetails {
- ::std::default::Default::default()
- }
-
- // optional string dll_base_name = 1;
-
- pub fn dll_base_name(&self) -> &str {
- match self.dll_base_name.as_ref() {
- Some(v) => v,
- None => "",
- }
- }
-
- pub fn clear_dll_base_name(&mut self) {
- self.dll_base_name = ::std::option::Option::None;
- }
-
- pub fn has_dll_base_name(&self) -> bool {
- self.dll_base_name.is_some()
- }
-
- // Param is passed by value, moved
- pub fn set_dll_base_name(&mut self, v: ::std::string::String) {
- self.dll_base_name = ::std::option::Option::Some(v);
- }
-
- // Mutable pointer to the field.
- // If field is not initialized, it is initialized with default value first.
- pub fn mut_dll_base_name(&mut self) -> &mut ::std::string::String {
- if self.dll_base_name.is_none() {
- self.dll_base_name = ::std::option::Option::Some(::std::string::String::new());
- }
- self.dll_base_name.as_mut().unwrap()
- }
-
- // Take field
- pub fn take_dll_base_name(&mut self) -> ::std::string::String {
- self.dll_base_name.take().unwrap_or_else(|| ::std::string::String::new())
- }
-
- fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
- let mut fields = ::std::vec::Vec::with_capacity(1);
- let mut oneofs = ::std::vec::Vec::with_capacity(0);
- fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
- "dll_base_name",
- |m: &EmulatorDllDetails| { &m.dll_base_name },
- |m: &mut EmulatorDllDetails| { &mut m.dll_base_name },
- ));
- ::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<EmulatorDllDetails>(
- "EmulatorDllDetails",
- fields,
- oneofs,
- )
- }
-}
-
-impl ::protobuf::Message for EmulatorDllDetails {
- const NAME: &'static str = "EmulatorDllDetails";
-
- fn is_initialized(&self) -> bool {
- true
- }
-
- fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
- while let Some(tag) = is.read_raw_tag_or_eof()? {
- match tag {
- 10 => {
- self.dll_base_name = ::std::option::Option::Some(is.read_string()?);
- },
- tag => {
- ::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
- },
- };
- }
- ::std::result::Result::Ok(())
- }
-
- // Compute sizes of nested messages
- #[allow(unused_variables)]
- fn compute_size(&self) -> u64 {
- let mut my_size = 0;
- if let Some(v) = self.dll_base_name.as_ref() {
- my_size += ::protobuf::rt::string_size(1, &v);
- }
- my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
- self.special_fields.cached_size().set(my_size as u32);
- my_size
- }
-
- fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
- if let Some(v) = self.dll_base_name.as_ref() {
- os.write_string(1, v)?;
- }
- os.write_unknown_fields(self.special_fields.unknown_fields())?;
- ::std::result::Result::Ok(())
- }
-
- fn special_fields(&self) -> &::protobuf::SpecialFields {
- &self.special_fields
- }
-
- fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
- &mut self.special_fields
- }
-
- fn new() -> EmulatorDllDetails {
- EmulatorDllDetails::new()
- }
-
- fn clear(&mut self) {
- self.dll_base_name = ::std::option::Option::None;
- self.special_fields.clear();
- }
-
- fn default_instance() -> &'static EmulatorDllDetails {
- static instance: EmulatorDllDetails = EmulatorDllDetails {
- dll_base_name: ::std::option::Option::None,
- special_fields: ::protobuf::SpecialFields::new(),
- };
- &instance
- }
-}
-
-impl ::protobuf::MessageFull for EmulatorDllDetails {
- fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
- static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
- descriptor.get(|| file_descriptor().message_by_package_relative_name("EmulatorDllDetails").unwrap()).clone()
- }
-}
-
-impl ::std::fmt::Display for EmulatorDllDetails {
- fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
- ::protobuf::text_format::fmt(self, f)
- }
-}
-
-impl ::protobuf::reflect::ProtobufValue for EmulatorDllDetails {
- type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
-}
-
-#[derive(Clone,Copy,PartialEq,Eq,Debug,Hash)]
-// @@protoc_insertion_point(enum:EmulatorProcessType)
-pub enum EmulatorProcessType {
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_UNKNOWN)
- PROCESS_TYPE_UNKNOWN = 0,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_MAIN)
- PROCESS_TYPE_MAIN = 1,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_BLOCK)
- PROCESS_TYPE_BLOCK = 2,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_METRICS)
- PROCESS_TYPE_METRICS = 3,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_NET)
- PROCESS_TYPE_NET = 4,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_SLIRP)
- PROCESS_TYPE_SLIRP = 5,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_GPU)
- PROCESS_TYPE_GPU = 6,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_SOUND)
- PROCESS_TYPE_SOUND = 7,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_BROKER)
- PROCESS_TYPE_BROKER = 8,
- // @@protoc_insertion_point(enum_value:EmulatorProcessType.PROCESS_TYPE_SPU)
- PROCESS_TYPE_SPU = 9,
-}
-
-impl ::protobuf::Enum for EmulatorProcessType {
- const NAME: &'static str = "EmulatorProcessType";
-
- fn value(&self) -> i32 {
- *self as i32
- }
-
- fn from_i32(value: i32) -> ::std::option::Option<EmulatorProcessType> {
- match value {
- 0 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_UNKNOWN),
- 1 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_MAIN),
- 2 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_BLOCK),
- 3 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_METRICS),
- 4 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_NET),
- 5 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_SLIRP),
- 6 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_GPU),
- 7 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_SOUND),
- 8 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_BROKER),
- 9 => ::std::option::Option::Some(EmulatorProcessType::PROCESS_TYPE_SPU),
- _ => ::std::option::Option::None
- }
- }
-
- const VALUES: &'static [EmulatorProcessType] = &[
- EmulatorProcessType::PROCESS_TYPE_UNKNOWN,
- EmulatorProcessType::PROCESS_TYPE_MAIN,
- EmulatorProcessType::PROCESS_TYPE_BLOCK,
- EmulatorProcessType::PROCESS_TYPE_METRICS,
- EmulatorProcessType::PROCESS_TYPE_NET,
- EmulatorProcessType::PROCESS_TYPE_SLIRP,
- EmulatorProcessType::PROCESS_TYPE_GPU,
- EmulatorProcessType::PROCESS_TYPE_SOUND,
- EmulatorProcessType::PROCESS_TYPE_BROKER,
- EmulatorProcessType::PROCESS_TYPE_SPU,
- ];
-}
-
-impl ::protobuf::EnumFull for EmulatorProcessType {
- fn enum_descriptor() -> ::protobuf::reflect::EnumDescriptor {
- static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::Lazy::new();
- descriptor.get(|| file_descriptor().enum_by_package_relative_name("EmulatorProcessType").unwrap()).clone()
- }
-
- fn descriptor(&self) -> ::protobuf::reflect::EnumValueDescriptor {
- let index = *self as usize;
- Self::enum_descriptor().value_by_index(index)
- }
-}
-
-impl ::std::default::Default for EmulatorProcessType {
- fn default() -> Self {
- EmulatorProcessType::PROCESS_TYPE_UNKNOWN
- }
-}
-
-impl EmulatorProcessType {
- fn generated_enum_descriptor_data() -> ::protobuf::reflect::GeneratedEnumDescriptorData {
- ::protobuf::reflect::GeneratedEnumDescriptorData::new::<EmulatorProcessType>("EmulatorProcessType")
- }
-}
-
-static file_descriptor_proto_data: &'static [u8] = b"\
- \n\x13event_details.proto\"\x96\x02\n\rRecordDetails\x12n\n#emulator_chi\
- ld_process_exit_details\x18\x0c\x20\x01(\x0b2\x20.EmulatorChildProcessEx\
- itDetailsR\x1femulatorChildProcessExitDetails\x12B\n\x13wave_format_deta\
- ils\x18\r\x20\x01(\x0b2\x12.WaveFormatDetailsR\x11waveFormatDetails\x12E\
- \n\x14emulator_dll_details\x18\x13\x20\x01(\x0b2\x13.EmulatorDllDetailsR\
- \x12emulatorDllDetailsJ\x04\x08\x01\x10\x0cJ\x04\x08\x0e\x10\x13\"\x9d\
- \x01\n\x11WaveFormatDetails\x12)\n\trequested\x18\x01\x20\x01(\x0b2\x0b.\
- WaveFormatR\trequested\x12'\n\x08modified\x18\x02\x20\x01(\x0b2\x0b.Wave\
- FormatR\x08modified\x124\n\x0fclosest_matched\x18\x03\x20\x01(\x0b2\x0b.\
- WaveFormatR\x0eclosestMatched\"\xb9\x05\n\nWaveFormat\x12\x1d\n\nformat_\
- tag\x18\x01\x20\x01(\x05R\tformatTag\x12\x1a\n\x08channels\x18\x02\x20\
- \x01(\x05R\x08channels\x12&\n\x0fsamples_per_sec\x18\x03\x20\x01(\x05R\r\
- samplesPerSec\x12)\n\x11avg_bytes_per_sec\x18\x04\x20\x01(\x05R\x0eavgBy\
- tesPerSec\x12\x1f\n\x0bblock_align\x18\x05\x20\x01(\x05R\nblockAlign\x12\
- &\n\x0fbits_per_sample\x18\x06\x20\x01(\x05R\rbitsPerSample\x12\x1d\n\ns\
- ize_bytes\x18\x07\x20\x01(\x05R\tsizeBytes\x12\x18\n\x07samples\x18\x08\
- \x20\x01(\x05R\x07samples\x12!\n\x0cchannel_mask\x18\t\x20\x01(\x03R\x0b\
- channelMask\x12>\n\nsub_format\x18\n\x20\x01(\x0e2\x1f.WaveFormat.WaveFo\
- rmatSubFormatR\tsubFormat\"\xb7\x02\n\x13WaveFormatSubFormat\x12\x20\n\
- \x1cKSDATAFORMAT_SUBTYPE_INVALID\x10\0\x12\x1f\n\x1bKSDATAFORMAT_SUBTYPE\
- _ANALOG\x10\x01\x12\x1c\n\x18KSDATAFORMAT_SUBTYPE_PCM\x10\x02\x12#\n\x1f\
- KSDATAFORMAT_SUBTYPE_IEEE_FLOAT\x10\x03\x12\x1c\n\x18KSDATAFORMAT_SUBTYP\
- E_DRM\x10\x04\x12\x1d\n\x19KSDATAFORMAT_SUBTYPE_ALAW\x10\x05\x12\x1e\n\
- \x1aKSDATAFORMAT_SUBTYPE_MULAW\x10\x06\x12\x1e\n\x1aKSDATAFORMAT_SUBTYPE\
- _ADPCM\x10\x07\x12\x1d\n\x19KSDATAFORMAT_SUBTYPE_MPEG\x10\x08\"w\n\x1fEm\
- ulatorChildProcessExitDetails\x12\x1b\n\texit_code\x18\x01\x20\x01(\rR\
- \x08exitCode\x127\n\x0cprocess_type\x18\x02\x20\x01(\x0e2\x14.EmulatorPr\
- ocessTypeR\x0bprocessType\"8\n\x12EmulatorDllDetails\x12\"\n\rdll_base_n\
- ame\x18\x01\x20\x01(\tR\x0bdllBaseName*\x83\x02\n\x13EmulatorProcessType\
- \x12\x18\n\x14PROCESS_TYPE_UNKNOWN\x10\0\x12\x15\n\x11PROCESS_TYPE_MAIN\
- \x10\x01\x12\x16\n\x12PROCESS_TYPE_BLOCK\x10\x02\x12\x18\n\x14PROCESS_TY\
- PE_METRICS\x10\x03\x12\x14\n\x10PROCESS_TYPE_NET\x10\x04\x12\x16\n\x12PR\
- OCESS_TYPE_SLIRP\x10\x05\x12\x14\n\x10PROCESS_TYPE_GPU\x10\x06\x12\x16\n\
- \x12PROCESS_TYPE_SOUND\x10\x07\x12\x17\n\x13PROCESS_TYPE_BROKER\x10\x08\
- \x12\x14\n\x10PROCESS_TYPE_SPU\x10\t\
-";
-
-/// `FileDescriptorProto` object which was a source for this generated file
-fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
- static file_descriptor_proto_lazy: ::protobuf::rt::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::Lazy::new();
- file_descriptor_proto_lazy.get(|| {
- ::protobuf::Message::parse_from_bytes(file_descriptor_proto_data).unwrap()
- })
-}
-
-/// `FileDescriptor` object which allows dynamic access to files
-pub fn file_descriptor() -> &'static ::protobuf::reflect::FileDescriptor {
- static generated_file_descriptor_lazy: ::protobuf::rt::Lazy<::protobuf::reflect::GeneratedFileDescriptor> = ::protobuf::rt::Lazy::new();
- static file_descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::FileDescriptor> = ::protobuf::rt::Lazy::new();
- file_descriptor.get(|| {
- let generated_file_descriptor = generated_file_descriptor_lazy.get(|| {
- let mut deps = ::std::vec::Vec::with_capacity(0);
- let mut messages = ::std::vec::Vec::with_capacity(5);
- messages.push(RecordDetails::generated_message_descriptor_data());
- messages.push(WaveFormatDetails::generated_message_descriptor_data());
- messages.push(WaveFormat::generated_message_descriptor_data());
- messages.push(EmulatorChildProcessExitDetails::generated_message_descriptor_data());
- messages.push(EmulatorDllDetails::generated_message_descriptor_data());
- let mut enums = ::std::vec::Vec::with_capacity(2);
- enums.push(EmulatorProcessType::generated_enum_descriptor_data());
- enums.push(wave_format::WaveFormatSubFormat::generated_enum_descriptor_data());
- ::protobuf::reflect::GeneratedFileDescriptor::new_generated(
- file_descriptor_proto(),
- deps,
- messages,
- enums,
- )
- });
- ::protobuf::reflect::FileDescriptor::new_generated_2(generated_file_descriptor)
- })
-}
diff --git a/vendor/generic/metrics/src/out/generated.rs b/vendor/generic/metrics/src/out/generated.rs
deleted file mode 100644
index c7dc617..0000000
--- a/vendor/generic/metrics/src/out/generated.rs
+++ /dev/null
@@ -1,2 +0,0 @@
-#[path = "./event_details.rs"]
-pub mod event_details;
diff --git a/vendor/generic/metrics/src/out/mod.rs b/vendor/generic/metrics/src/out/mod.rs
deleted file mode 100644
index 2db7abd..0000000
--- a/vendor/generic/metrics/src/out/mod.rs
+++ /dev/null
@@ -1,3 +0,0 @@
-// @generated
-
-pub mod event_details;
diff --git a/vendor/generic/metrics/src/periodic_logger.rs b/vendor/generic/metrics/src/periodic_logger.rs
index 4093078..b6469f6 100644
--- a/vendor/generic/metrics/src/periodic_logger.rs
+++ b/vendor/generic/metrics/src/periodic_logger.rs
@@ -5,7 +5,7 @@
use std::result::Result;
use std::time::Duration;
-use crate::MetricEventType;
+use metrics_events::MetricEventType;
/// A logging struct meant for use in tracking and periodically
/// logging a single metric. The metric is aggregated over the
diff --git a/vendor/generic/metrics/src/request_handler.rs b/vendor/generic/metrics/src/request_handler.rs
index 7ed300d..7520c70 100644
--- a/vendor/generic/metrics/src/request_handler.rs
+++ b/vendor/generic/metrics/src/request_handler.rs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-use crate::metrics_requests::MetricsRequest;
+use base::RecvTube;
#[derive(Default)]
pub struct MetricsRequestHandler;
@@ -10,6 +10,8 @@
pub fn new() -> Self {
MetricsRequestHandler
}
- pub fn handle_request(&self, _req: MetricsRequest) {}
+ pub fn handle_tube_readable(&self, _tube: &RecvTube) {
+ unreachable!();
+ }
pub fn shutdown(&self) {}
}
diff --git a/vendor/generic/metrics/src/sys/windows.rs b/vendor/generic/metrics/src/sys/windows.rs
deleted file mode 100644
index 0aedb5c..0000000
--- a/vendor/generic/metrics/src/sys/windows.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2022 The ChromiumOS Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use win_util::ProcessType;
-
-use crate::protos::event_details::EmulatorProcessType;
-
-impl From<ProcessType> for EmulatorProcessType {
- fn from(process_type: ProcessType) -> Self {
- match process_type {
- ProcessType::Block => EmulatorProcessType::PROCESS_TYPE_BLOCK,
- ProcessType::Main => EmulatorProcessType::PROCESS_TYPE_MAIN,
- ProcessType::Metrics => EmulatorProcessType::PROCESS_TYPE_METRICS,
- ProcessType::Net => EmulatorProcessType::PROCESS_TYPE_NET,
- ProcessType::Slirp => EmulatorProcessType::PROCESS_TYPE_SLIRP,
- ProcessType::Gpu => EmulatorProcessType::PROCESS_TYPE_GPU,
- ProcessType::Snd => EmulatorProcessType::PROCESS_TYPE_SOUND,
- ProcessType::Broker => EmulatorProcessType::PROCESS_TYPE_BROKER,
- ProcessType::Spu => EmulatorProcessType::PROCESS_TYPE_SPU,
- ProcessType::UnknownType => panic!("Unknown process type found"),
- }
- }
-}
diff --git a/vendor/generic/metrics_events/Android.bp b/vendor/generic/metrics_events/Android.bp
new file mode 100644
index 0000000..90ea97a
--- /dev/null
+++ b/vendor/generic/metrics_events/Android.bp
@@ -0,0 +1,20 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
+// Content before the first "rust_*" or "genrule" module is preserved.
+
+package {
+ default_applicable_licenses: ["external_crosvm_license"],
+}
+
+rust_library {
+ name: "libmetrics_events_generic",
+ defaults: ["crosvm_inner_defaults"],
+ host_supported: true,
+ crate_name: "metrics_events_generic",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.1.0",
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ rustlibs: ["libserde"],
+}
diff --git a/vendor/generic/metrics_events/Cargo.toml b/vendor/generic/metrics_events/Cargo.toml
new file mode 100644
index 0000000..19d3a5b
--- /dev/null
+++ b/vendor/generic/metrics_events/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "metrics_events_generic"
+version = "0.1.0"
+authors = ["The ChromiumOS Authors"]
+edition = "2021"
+
+[dependencies]
+serde = { version = "1", features = ["derive"] }
diff --git a/vendor/generic/metrics_events/src/lib.rs b/vendor/generic/metrics_events/src/lib.rs
new file mode 100644
index 0000000..85a7075
--- /dev/null
+++ b/vendor/generic/metrics_events/src/lib.rs
@@ -0,0 +1,18 @@
+// Copyright 2024 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use serde::Deserialize;
+use serde::Serialize;
+
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub enum MetricEventType {
+ // No events should ever be added to this enum - all events defined in
+ // upstream CrosVM should be added to the metrics_event package. Downstream
+ // projects can replace the generic metrics_event package if they need
+ // downstream only events.
+}
+
+pub struct RecordDetails {
+ // Similar to above, this is for downstream projects.
+}
diff --git a/vendor/generic/vm_control/Android.bp b/vendor/generic/vm_control/Android.bp
index 56b56b9..1371389 100644
--- a/vendor/generic/vm_control/Android.bp
+++ b/vendor/generic/vm_control/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/vfio_sys/Android.bp b/vfio_sys/Android.bp
index 2f2ddde..a8024da 100644
--- a/vfio_sys/Android.bp
+++ b/vfio_sys/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/vhost/Android.bp b/vhost/Android.bp
index 1522e3e..86146a2 100644
--- a/vhost/Android.bp
+++ b/vhost/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/virtio_sys/Android.bp b/virtio_sys/Android.bp
index 92f33a5..da9f5ce 100644
--- a/virtio_sys/Android.bp
+++ b/virtio_sys/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/vm_control/Android.bp b/vm_control/Android.bp
index fb45dd5..ff9274c 100644
--- a/vm_control/Android.bp
+++ b/vm_control/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
@@ -32,7 +33,7 @@
"libballoon_control",
"libbase_rust",
"libcfg_if",
- "libdata_model",
+ "libcrypto_generic",
"libgdbstub",
"libgdbstub_arch",
"libhypervisor",
@@ -50,6 +51,7 @@
"libvm_memory",
],
proc_macros: ["libremain"],
+ aliases: ["crypto_generic:crypto"],
visibility: ["//packages/modules/Virtualization/virtualizationmanager"],
}
@@ -79,7 +81,7 @@
"libballoon_control",
"libbase_rust",
"libcfg_if",
- "libdata_model",
+ "libcrypto_generic",
"libgdbstub",
"libgdbstub_arch",
"libhypervisor",
@@ -97,4 +99,5 @@
"libvm_memory",
],
proc_macros: ["libremain"],
+ aliases: ["crypto_generic:crypto"],
}
diff --git a/vm_control/Cargo.toml b/vm_control/Cargo.toml
index eb42acd..6c2e482 100644
--- a/vm_control/Cargo.toml
+++ b/vm_control/Cargo.toml
@@ -17,7 +17,7 @@
balloon_control = { path = "../common/balloon_control" }
base = { path = "../base" }
cfg-if = "*"
-data_model = { path = "../common/data_model" }
+crypto = { path = "../vendor/generic/crypto", package = "crypto_generic" }
gdbstub = { version = "0.7.0", optional = true }
gdbstub_arch = { version = "0.3.0", optional = true }
hypervisor = { path = "../hypervisor" }
diff --git a/vm_control/src/client.rs b/vm_control/src/client.rs
index b8ca6a3..67e7405 100644
--- a/vm_control/src/client.rs
+++ b/vm_control/src/client.rs
@@ -120,6 +120,22 @@
}
}
+pub fn do_security_key_attach<T: AsRef<Path> + std::fmt::Debug>(
+ socket_path: T,
+ dev_path: &Path,
+) -> ModifyUsbResult<UsbControlResult> {
+ let usb_file = open_file_or_duplicate(dev_path, OpenOptions::new().read(true).write(true))
+ .map_err(|e| ModifyUsbError::FailedToOpenDevice(dev_path.into(), e))?;
+
+ let request = VmRequest::UsbCommand(UsbControlCommand::AttachSecurityKey { file: usb_file });
+ let response =
+ handle_request(&request, socket_path).map_err(|_| ModifyUsbError::SocketFailed)?;
+ match response {
+ VmResponse::UsbResponse(usb_resp) => Ok(usb_resp),
+ r => Err(ModifyUsbError::UnexpectedResponse(r)),
+ }
+}
+
pub fn do_usb_detach<T: AsRef<Path> + std::fmt::Debug>(
socket_path: T,
port: u8,
diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs
index e19b04f..9c989b4 100644
--- a/vm_control/src/lib.rs
+++ b/vm_control/src/lib.rs
@@ -233,6 +233,10 @@
#[serde(with = "with_as_descriptor")]
file: File,
},
+ AttachSecurityKey {
+ #[serde(with = "with_as_descriptor")]
+ file: File,
+ },
DetachDevice {
port: u8,
},
@@ -316,13 +320,17 @@
Take {
snapshot_path: PathBuf,
compress_memory: bool,
+ encrypt: bool,
},
}
/// Commands for restore feature
#[derive(Serialize, Deserialize, Debug)]
pub enum RestoreCommand {
- Apply { restore_path: PathBuf },
+ Apply {
+ restore_path: PathBuf,
+ require_encrypted: bool,
+ },
}
/// Commands for actions on devices and the devices control thread.
@@ -1253,7 +1261,7 @@
}
/// Message for communicating a suspend or resume to the virtio-pvclock device.
-#[derive(Serialize, Deserialize, Debug)]
+#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum PvClockCommand {
Suspend,
Resume,
@@ -1263,6 +1271,7 @@
#[derive(Serialize, Deserialize, Debug)]
pub enum PvClockCommandResponse {
Ok,
+ DeviceInactive,
Err(SysError),
}
@@ -1959,6 +1968,7 @@
VmRequest::Snapshot(SnapshotCommand::Take {
ref snapshot_path,
compress_memory,
+ encrypt,
}) => {
info!("Starting crosvm snapshot");
match do_snapshot(
@@ -1969,6 +1979,7 @@
vcpu_size,
snapshot_irqchip,
compress_memory,
+ encrypt,
) {
Ok(()) => {
info!("Finished crosvm snapshot successfully");
@@ -1980,7 +1991,10 @@
}
}
}
- VmRequest::Restore(RestoreCommand::Apply { ref restore_path }) => {
+ VmRequest::Restore(RestoreCommand::Apply {
+ ref restore_path,
+ require_encrypted,
+ }) => {
info!("Starting crosvm restore");
match do_restore(
restore_path.clone(),
@@ -1990,6 +2004,7 @@
device_control_tube,
vcpu_size,
restore_irqchip,
+ require_encrypted,
) {
Ok(()) => {
info!("Finished crosvm restore successfully");
@@ -2026,6 +2041,7 @@
vcpu_size: usize,
snapshot_irqchip: impl Fn() -> anyhow::Result<serde_json::Value>,
compress_memory: bool,
+ encrypt: bool,
) -> anyhow::Result<()> {
let _vcpu_guard = VcpuSuspendGuard::new(&kick_vcpus, vcpu_size)?;
let _device_guard = DeviceSleepGuard::new(device_control_tube)?;
@@ -2073,7 +2089,7 @@
}
info!("flushed IRQs in {} iterations", flush_attempts);
- let snapshot_writer = SnapshotWriter::new(snapshot_path)?;
+ let snapshot_writer = SnapshotWriter::new(snapshot_path, encrypt)?;
// Snapshot Vcpus
info!("VCPUs snapshotting...");
@@ -2129,11 +2145,12 @@
device_control_tube: &Tube,
vcpu_size: usize,
mut restore_irqchip: impl FnMut(serde_json::Value) -> anyhow::Result<()>,
+ require_encrypted: bool,
) -> anyhow::Result<()> {
let _guard = VcpuSuspendGuard::new(&kick_vcpus, vcpu_size);
let _devices_guard = DeviceSleepGuard::new(device_control_tube)?;
- let snapshot_reader = SnapshotReader::new(restore_path)?;
+ let snapshot_reader = SnapshotReader::new(restore_path, require_encrypted)?;
// Restore IrqChip
let irq_snapshot: serde_json::Value = snapshot_reader.read_fragment("irqchip")?;
diff --git a/vm_control/src/snapshot_format.rs b/vm_control/src/snapshot_format.rs
index 1f3d74f..0e5a4c4 100644
--- a/vm_control/src/snapshot_format.rs
+++ b/vm_control/src/snapshot_format.rs
@@ -2,13 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+use std::fmt::Debug;
+use std::fmt::Formatter;
use std::fs::File;
use std::io::Read;
use std::io::Write;
+use std::path::Path;
use std::path::PathBuf;
use anyhow::Context;
use anyhow::Result;
+use crypto::CryptKey;
+
+// Use 4kB encrypted chunks by default (if encryption is used).
+const DEFAULT_ENCRYPTED_CHUNK_SIZE_BYTES: usize = 1024 * 4;
/// Writer of serialized VM snapshots.
///
@@ -18,25 +25,68 @@
/// In the current implementation, fragments are files and namespaces are directories, but the API
/// is kept abstract so that we can potentially support something like a single file archive
/// output.
-#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
+#[derive(Clone, serde::Serialize, serde::Deserialize)]
pub struct SnapshotWriter {
dir: PathBuf,
+ /// If encryption is used, the plaintext key will be stored here.
+ key: Option<CryptKey>,
+}
+
+impl Debug for SnapshotWriter {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("SnapshotWriter")
+ .field("dir", &format!("{:?}", self.dir))
+ .field("key", if self.key.is_some() { &"Some" } else { &"None" })
+ .finish()
+ }
}
impl SnapshotWriter {
/// Creates a new `SnapshotWriter` that will writes its data to a dir at `root`. The path must
- /// not exist yet.
+ /// not exist yet. If encryption is desired, set encrypt (Note: only supported downstream on
+ /// Windows).
// TODO(b/268094487): If the snapshot fails, we leave incomplete snapshot files at the
// requested path. Consider building up the snapshot dir somewhere else and moving it into
// place at the end.
- pub fn new(root: PathBuf) -> Result<Self> {
+ pub fn new(root: PathBuf, encrypt: bool) -> Result<Self> {
std::fs::create_dir(&root)
.with_context(|| format!("failed to create snapshot root dir: {}", root.display()))?;
- Ok(Self { dir: root })
+
+ if encrypt {
+ let key = crypto::generate_random_key();
+ // Creating an empty CryptWriter will still write header information
+ // to the file, and that header information is what we need. This
+ // ensures we use a single key for *all* snapshot files.
+ let mut writer = crypto::CryptWriter::new_from_key(
+ File::create(root.join("enc_metadata")).context("failed to create enc_metadata")?,
+ 1024,
+ &key,
+ )
+ .context("failed to create enc_metadata writer")?;
+ writer.flush().context("flush of enc_metadata failed")?;
+ return Ok(Self {
+ dir: root,
+ key: Some(key),
+ });
+ }
+
+ Ok(Self {
+ dir: root,
+ key: None,
+ })
}
/// Creates a snapshot fragment and get access to the `Write` impl representing it.
pub fn raw_fragment(&self, name: &str) -> Result<Box<dyn Write>> {
+ self.raw_fragment_with_chunk_size(name, DEFAULT_ENCRYPTED_CHUNK_SIZE_BYTES)
+ }
+
+ /// When encryption is used, allows direct control of the encrypted chunk size.
+ pub fn raw_fragment_with_chunk_size(
+ &self,
+ name: &str,
+ chunk_size_bytes: usize,
+ ) -> Result<Box<dyn Write>> {
let path = self.dir.join(name);
let file = File::options()
.write(true)
@@ -48,6 +98,15 @@
path.display()
)
})?;
+
+ if let Some(key) = self.key.as_ref() {
+ return Ok(Box::new(crypto::CryptWriter::new_from_key(
+ file,
+ chunk_size_bytes,
+ key,
+ )?));
+ }
+
Ok(Box::new(file))
}
@@ -69,20 +128,50 @@
dir.display()
)
})?;
- Ok(Self { dir })
+ Ok(Self {
+ dir,
+ key: self.key.clone(),
+ })
}
}
/// Reads snapshots created by `SnapshotWriter`.
-#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
+#[derive(Clone, serde::Serialize, serde::Deserialize)]
pub struct SnapshotReader {
dir: PathBuf,
+ /// If encryption is used, the plaintext key will be stored here.
+ key: Option<CryptKey>,
+}
+
+impl Debug for SnapshotReader {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("SnapshotReader")
+ .field("dir", &format!("{:?}", self.dir))
+ .field("key", if self.key.is_some() { &"Some" } else { &"None" })
+ .finish()
+ }
}
impl SnapshotReader {
- /// Reads a snapshot at `root`.
- pub fn new(root: PathBuf) -> Result<Self> {
- Ok(Self { dir: root })
+ /// Reads a snapshot at `root`. Set require_encrypted to require an encrypted snapshot.
+ pub fn new(root: PathBuf, require_encrypted: bool) -> Result<Self> {
+ let enc_metadata_path = root.join("enc_metadata");
+ if Path::exists(&enc_metadata_path) {
+ let key = Some(
+ crypto::CryptReader::extract_key(
+ File::open(&enc_metadata_path).context("failed to open encryption metadata")?,
+ )
+ .context("failed to load snapshot key")?,
+ );
+ return Ok(Self { dir: root, key });
+ } else if require_encrypted {
+ return Err(anyhow::anyhow!("snapshot was not encrypted"));
+ }
+
+ Ok(Self {
+ dir: root,
+ key: None,
+ })
}
/// Gets access to a `Read` impl that represents a fragment.
@@ -94,6 +183,10 @@
path.display()
)
})?;
+ if let Some(key) = self.key.as_ref() {
+ return Ok(Box::new(crypto::CryptReader::from_file_and_key(file, key)?));
+ }
+
Ok(Box::new(file))
}
@@ -121,7 +214,10 @@
/// Open a namespace.
pub fn namespace(&self, name: &str) -> Result<Self> {
let dir = self.dir.join(name);
- Ok(Self { dir })
+ Ok(Self {
+ dir,
+ key: self.key.clone(),
+ })
}
/// Reads the names of all child namespaces
diff --git a/vm_memory/Android.bp b/vm_memory/Android.bp
index 461e85a..5ab3db8 100644
--- a/vm_memory/Android.bp
+++ b/vm_memory/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/win_audio/src/win_audio_impl/mod.rs b/win_audio/src/win_audio_impl/mod.rs
index 7b87047..1840e51 100644
--- a/win_audio/src/win_audio_impl/mod.rs
+++ b/win_audio/src/win_audio_impl/mod.rs
@@ -1801,7 +1801,7 @@
use std::thread;
use cros_async::Executor;
- use metrics::MetricEventType;
+ use metrics::sys::WaveFormatDetails;
use winapi::shared::ksmedia::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
use winapi::shared::mmreg::WAVEFORMATEX;
use winapi::shared::mmreg::WAVEFORMATEXTENSIBLE;
@@ -2038,8 +2038,8 @@
assert!(check_format(
&audio_client,
&format,
- WaveFormatDetailsProto::new(),
- MetricEventType::AudioFormatRequestOk,
+ WaveFormatDetails::default(),
+ AudioFormatEventType::RequestOk,
)
.is_ok());
@@ -2067,8 +2067,8 @@
assert!(check_format(
&audio_client,
&format,
- WaveFormatDetailsProto::new(),
- MetricEventType::AudioFormatRequestOk,
+ WaveFormatDetails::default(),
+ AudioFormatEventType::RequestOk,
)
.is_ok());
@@ -2097,8 +2097,8 @@
assert!(check_format(
&audio_client,
&format,
- WaveFormatDetailsProto::new(),
- MetricEventType::AudioFormatRequestOk,
+ WaveFormatDetails::default(),
+ AudioFormatEventType::RequestOk,
)
.is_err());
}
diff --git a/win_audio/src/win_audio_impl/wave_format.rs b/win_audio/src/win_audio_impl/wave_format.rs
index 9fd0eaf..0e6ed69 100644
--- a/win_audio/src/win_audio_impl/wave_format.rs
+++ b/win_audio/src/win_audio_impl/wave_format.rs
@@ -11,10 +11,9 @@
use base::info;
use base::warn;
use base::Error;
-use metrics::protos::event_details::wave_format::WaveFormatSubFormat;
-use metrics::protos::event_details::RecordDetails;
-use metrics::protos::event_details::WaveFormat;
-use metrics::protos::event_details::WaveFormatDetails;
+use metrics::sys::WaveFormat as WaveFormatMetric;
+use metrics::sys::WaveFormatDetails as WaveFormatDetailsMetric;
+use metrics::sys::WaveFormatSubFormat as WaveFormatSubFormatMetric;
use metrics::MetricEventType;
use winapi::shared::guiddef::IsEqualGUID;
use winapi::shared::guiddef::GUID;
@@ -46,10 +45,6 @@
use crate::MONO_CHANNEL_COUNT;
use crate::STEREO_CHANNEL_COUNT;
-pub type WaveFormatDetailsProto = WaveFormatDetails;
-pub type WaveFormatProto = WaveFormat;
-pub type SubFormatProto = WaveFormatSubFormat;
-
/// Wrapper around `WAVEFORMATEX` and `WAVEFORMATEXTENSIBLE` to hide some of the unsafe calls
/// that could be made.
pub enum WaveAudioFormat {
@@ -59,6 +54,12 @@
WaveFormatExtensible(WAVEFORMATEXTENSIBLE),
}
+pub(crate) enum AudioFormatEventType {
+ RequestOk,
+ ModifiedOk,
+ Failed,
+}
+
impl WaveAudioFormat {
/// Wraps a WAVEFORMATEX pointer to make it's use more safe.
///
@@ -389,59 +390,51 @@
}
}
-impl From<&WaveAudioFormat> for WaveFormatProto {
- fn from(format: &WaveAudioFormat) -> WaveFormatProto {
- let mut wave_format_proto = WaveFormatProto::new();
-
+impl From<&WaveAudioFormat> for WaveFormatMetric {
+ fn from(format: &WaveAudioFormat) -> WaveFormatMetric {
match format {
- WaveAudioFormat::WaveFormat(wave_format) => {
- wave_format_proto.set_format_tag(wave_format.wFormatTag.into());
- wave_format_proto.set_channels(wave_format.nChannels.into());
- wave_format_proto.set_samples_per_sec(
- wave_format
- .nSamplesPerSec
- .try_into()
- .expect("Failed to cast nSamplesPerSec to i32"),
- );
- wave_format_proto.set_avg_bytes_per_sec(
- wave_format
- .nAvgBytesPerSec
- .try_into()
- .expect("Failed to cast nAvgBytesPerSec"),
- );
- wave_format_proto.set_block_align(wave_format.nBlockAlign.into());
- wave_format_proto.set_bits_per_sample(wave_format.wBitsPerSample.into());
- wave_format_proto.set_size_bytes(wave_format.cbSize.into());
- }
+ WaveAudioFormat::WaveFormat(wave_format) => WaveFormatMetric {
+ format_tag: wave_format.wFormatTag.into(),
+ channels: wave_format.nChannels.into(),
+ samples_per_sec: wave_format
+ .nSamplesPerSec
+ .try_into()
+ .expect("Failed to cast nSamplesPerSec to i32"),
+ avg_bytes_per_sec: wave_format
+ .nAvgBytesPerSec
+ .try_into()
+ .expect("Failed to cast nAvgBytesPerSec"),
+ block_align: wave_format.nBlockAlign.into(),
+ bits_per_sample: wave_format.wBitsPerSample.into(),
+ size_bytes: wave_format.cbSize.into(),
+ samples: None,
+ channel_mask: None,
+ sub_format: None,
+ },
WaveAudioFormat::WaveFormatExtensible(wave_format_extensible) => {
- wave_format_proto.set_format_tag(wave_format_extensible.Format.wFormatTag.into());
- wave_format_proto.set_channels(wave_format_extensible.Format.nChannels.into());
- wave_format_proto.set_samples_per_sec(
- wave_format_extensible
+ let sub_format = wave_format_extensible.SubFormat;
+ WaveFormatMetric {
+ format_tag: wave_format_extensible.Format.wFormatTag.into(),
+ channels: wave_format_extensible.Format.nChannels.into(),
+ samples_per_sec: wave_format_extensible
.Format
.nSamplesPerSec
.try_into()
.expect("Failed to cast nSamplesPerSec to i32"),
- );
- wave_format_proto.set_avg_bytes_per_sec(
- wave_format_extensible
+ avg_bytes_per_sec: wave_format_extensible
.Format
.nAvgBytesPerSec
.try_into()
.expect("Failed to cast nAvgBytesPerSec"),
- );
- wave_format_proto.set_block_align(wave_format_extensible.Format.nBlockAlign.into());
- wave_format_proto
- .set_bits_per_sample(wave_format_extensible.Format.wBitsPerSample.into());
- wave_format_proto.set_size_bytes(wave_format_extensible.Format.cbSize.into());
- wave_format_proto.set_samples(wave_format_extensible.Samples.into());
- wave_format_proto.set_channel_mask(wave_format_extensible.dwChannelMask.into());
- let sub_format = wave_format_extensible.SubFormat;
- wave_format_proto.set_sub_format(GuidWrapper(&sub_format).into());
+ block_align: wave_format_extensible.Format.nBlockAlign.into(),
+ bits_per_sample: wave_format_extensible.Format.wBitsPerSample.into(),
+ size_bytes: wave_format_extensible.Format.cbSize.into(),
+ samples: Some(wave_format_extensible.Samples.into()),
+ channel_mask: Some(wave_format_extensible.dwChannelMask.into()),
+ sub_format: Some(GuidWrapper(&sub_format).into()),
+ }
}
}
-
- wave_format_proto
}
}
@@ -465,18 +458,18 @@
WaveAudioFormat::new(format_ptr)
};
- let mut wave_format_details = WaveFormatDetailsProto::new();
- let mut event_code = MetricEventType::AudioFormatRequestOk;
- wave_format_details.requested = Some(WaveFormatProto::from(&format)).into();
+ let mut wave_format_details = WaveFormatDetailsMetric::default();
+ let mut event_code = AudioFormatEventType::RequestOk;
+ wave_format_details.requested = Some(WaveFormatMetric::from(&format));
info!("Printing mix format from `GetMixFormat`:\n{:?}", format);
const BIT_DEPTH: usize = 32;
format.modify_mix_format(BIT_DEPTH, KSDATAFORMAT_SUBTYPE_IEEE_FLOAT);
- let modified_wave_format = Some(WaveFormatProto::from(&format)).into();
+ let modified_wave_format = Some(WaveFormatMetric::from(&format));
if modified_wave_format != wave_format_details.requested {
wave_format_details.modified = modified_wave_format;
- event_code = MetricEventType::AudioFormatModifiedOk;
+ event_code = AudioFormatEventType::ModifiedOk;
}
info!("Audio Engine Mix Format Used: \n{:?}", format);
@@ -491,8 +484,8 @@
pub(crate) fn check_format(
audio_client: &IAudioClient,
format: &WaveAudioFormat,
- mut wave_format_details: WaveFormatDetailsProto,
- event_code: MetricEventType,
+ mut wave_format_details: WaveFormatDetailsMetric,
+ event_code: AudioFormatEventType,
) -> Result<(), WinAudioError> {
let mut closest_match_format: *mut WAVEFORMATEX = std::ptr::null_mut();
// SAFETY: All values passed into `IsFormatSupport` is owned by us and we will
@@ -511,8 +504,7 @@
// SAFETY: If the `hr` value is `S_FALSE`, then `IsFormatSupported` must've
// given us a closest match.
let closest_match_enum = unsafe { WaveAudioFormat::new(closest_match_format) };
- wave_format_details.closest_matched =
- Some(WaveFormatProto::from(&closest_match_enum)).into();
+ wave_format_details.closest_matched = Some(WaveFormatMetric::from(&closest_match_enum));
error!(
"Current audio format not supported, the closest format is:\n{:?}",
@@ -526,7 +518,7 @@
let last_error = Error::last();
// TODO:(b/253509368): Only upload for audio rendering, since these metrics can't
// differentiate between rendering and capture.
- upload_metrics(wave_format_details, MetricEventType::AudioFormatFailed);
+ upload_metrics(wave_format_details, AudioFormatEventType::Failed);
Err(WinAudioError::WindowsError(hr, last_error))
} else {
@@ -536,38 +528,38 @@
}
}
-fn upload_metrics(
- wave_format_details: WaveFormatDetailsProto,
- metrics_event_code: MetricEventType,
-) {
- let mut details = RecordDetails::new();
- details.wave_format_details = Some(wave_format_details).into();
- metrics::log_event_with_details(metrics_event_code, &details);
+fn upload_metrics(details: WaveFormatDetailsMetric, event_type: AudioFormatEventType) {
+ let event = match event_type {
+ AudioFormatEventType::RequestOk => MetricEventType::AudioFormatRequestOk(details),
+ AudioFormatEventType::ModifiedOk => MetricEventType::AudioFormatModifiedOk(details),
+ AudioFormatEventType::Failed => MetricEventType::AudioFormatFailed(details),
+ };
+ metrics::log_event(event);
}
struct GuidWrapper<'a>(&'a GUID);
-impl<'a> From<GuidWrapper<'a>> for SubFormatProto {
- fn from(guid: GuidWrapper) -> SubFormatProto {
+impl<'a> From<GuidWrapper<'a>> for WaveFormatSubFormatMetric {
+ fn from(guid: GuidWrapper) -> WaveFormatSubFormatMetric {
let guid = guid.0;
if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_ANALOG) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_ANALOG
+ WaveFormatSubFormatMetric::Analog
} else if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_PCM) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_PCM
+ WaveFormatSubFormatMetric::Pcm
} else if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
+ WaveFormatSubFormatMetric::IeeeFloat
} else if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_DRM) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_DRM
+ WaveFormatSubFormatMetric::Drm
} else if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_ALAW) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_ALAW
+ WaveFormatSubFormatMetric::ALaw
} else if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_MULAW) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_MULAW
+ WaveFormatSubFormatMetric::MuLaw
} else if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_ADPCM) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_ADPCM
+ WaveFormatSubFormatMetric::Adpcm
} else if IsEqualGUID(guid, &KSDATAFORMAT_SUBTYPE_MPEG) {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_MPEG
+ WaveFormatSubFormatMetric::Mpeg
} else {
- SubFormatProto::KSDATAFORMAT_SUBTYPE_INVALID
+ WaveFormatSubFormatMetric::Invalid
}
}
}
@@ -1150,18 +1142,22 @@
unsafe { WaveAudioFormat::new((&wave_format) as *const _ as *mut WAVEFORMATEX) };
// Testing the `into`.
- let wave_format_proto = WaveFormatProto::from(&wave_audio_format);
+ let wave_format_metric = WaveFormatMetric::from(&wave_audio_format);
- let mut expected = WaveFormatProto::new();
- expected.set_format_tag(WAVE_FORMAT_PCM.into());
- expected.set_channels(2);
- expected.set_samples_per_sec(48000);
- expected.set_avg_bytes_per_sec(192000);
- expected.set_block_align(4);
- expected.set_bits_per_sample(16);
- expected.set_size_bytes(0);
+ let expected = WaveFormatMetric {
+ format_tag: WAVE_FORMAT_PCM.into(),
+ channels: 2,
+ samples_per_sec: 48000,
+ avg_bytes_per_sec: 192000,
+ block_align: 4,
+ bits_per_sample: 16,
+ size_bytes: 0,
+ samples: None,
+ channel_mask: None,
+ sub_format: None,
+ };
- assert_eq!(wave_format_proto, expected);
+ assert_eq!(wave_format_metric, expected);
}
#[test]
@@ -1188,20 +1184,21 @@
};
// Testing the `into`.
- let wave_format_proto = WaveFormatProto::from(&wave_audio_format);
+ let wave_format_metric = WaveFormatMetric::from(&wave_audio_format);
- let mut expected = WaveFormatProto::new();
- expected.set_format_tag(WAVE_FORMAT_EXTENSIBLE.into());
- expected.set_channels(2);
- expected.set_samples_per_sec(48000);
- expected.set_avg_bytes_per_sec(8 * 48000);
- expected.set_block_align(8);
- expected.set_bits_per_sample(32);
- expected.set_size_bytes(22);
- expected.set_samples(32);
- expected.set_channel_mask((SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT) as i64);
- expected.set_sub_format(GuidWrapper(&KSDATAFORMAT_SUBTYPE_IEEE_FLOAT).into());
+ let expected = WaveFormatMetric {
+ format_tag: WAVE_FORMAT_EXTENSIBLE.into(),
+ channels: 2,
+ samples_per_sec: 48000,
+ avg_bytes_per_sec: 8 * 48000,
+ block_align: 8,
+ bits_per_sample: 32,
+ size_bytes: 22,
+ samples: Some(32),
+ channel_mask: Some((SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT) as i64),
+ sub_format: Some(WaveFormatSubFormatMetric::IeeeFloat),
+ };
- assert_eq!(wave_format_proto, expected);
+ assert_eq!(wave_format_metric, expected);
}
}
diff --git a/x86_64/Android.bp b/x86_64/Android.bp
index 266c603..4c882f2 100644
--- a/x86_64/Android.bp
+++ b/x86_64/Android.bp
@@ -1,5 +1,6 @@
// This file is generated by cargo_embargo.
-// Do not modify this file as most changes will be overridden on upgrade.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
// Content before the first "rust_*" or "genrule" module is preserved.
package {
diff --git a/x86_64/Cargo.toml b/x86_64/Cargo.toml
index ed80c02..963848b 100644
--- a/x86_64/Cargo.toml
+++ b/x86_64/Cargo.toml
@@ -14,7 +14,7 @@
arch = { path = "../arch" }
anyhow = "*"
cfg-if = "1.0.0"
-chrono = { version = "0.4.19", default-features = false }
+chrono = { version = "0.4.34", default-features = false }
cros_fdt = { path = "../cros_fdt" }
devices = { path = "../devices" }
gdbstub_arch = { version = "0.3.0", optional = true }
diff --git a/x86_64/src/acpi.rs b/x86_64/src/acpi.rs
index 23087eb..bd25e8f 100644
--- a/x86_64/src/acpi.rs
+++ b/x86_64/src/acpi.rs
@@ -408,7 +408,7 @@
_space_id: ADR_SPACE_SYSTEM_IO,
_bit_width: 8,
_bit_offset: 0,
- _access_width: 8,
+ _access_width: 1,
_address: reset_port.into(),
},
);
diff --git a/x86_64/src/lib.rs b/x86_64/src/lib.rs
index c526ea4..6202e24 100644
--- a/x86_64/src/lib.rs
+++ b/x86_64/src/lib.rs
@@ -52,6 +52,7 @@
use acpi_tables::sdt::SDT;
use anyhow::Context;
use arch::get_serial_cmdline;
+use arch::serial::SerialDeviceInfo;
use arch::CpuSet;
use arch::DtbOverlay;
use arch::GetSerialCmdlineError;
@@ -872,7 +873,7 @@
} else {
None
};
- Self::setup_serial_devices(
+ let serial_devices = Self::setup_serial_devices(
components.hv_cfg.protection_type,
irq_chip.as_irq_chip_mut(),
&io_bus,
@@ -988,7 +989,7 @@
let mut cmdline = Self::get_base_linux_cmdline();
- get_serial_cmdline(&mut cmdline, serial_parameters, "io")
+ get_serial_cmdline(&mut cmdline, serial_parameters, "io", &serial_devices)
.map_err(Error::GetSerialCmdline)?;
for param in components.extra_kernel_params {
@@ -1003,8 +1004,8 @@
let pci_start = read_pci_mmio_before_32bit().start;
let mut vcpu_init = vec![VcpuInitX86_64::default(); vcpu_count];
+ let mut msrs = BTreeMap::new();
- let mut msrs;
match components.vm_image {
VmImage::Bios(ref mut bios) => {
// Allow a bios to hardcode CMDLINE_OFFSET and read the kernel command line from it.
@@ -1015,7 +1016,7 @@
)
.map_err(Error::LoadCmdline)?;
Self::load_bios(&mem, bios)?;
- msrs = regs::default_msrs();
+ regs::set_default_msrs(&mut msrs);
// The default values for `Regs` and `Sregs` already set up the reset vector.
}
VmImage::Kernel(ref mut kernel_image) => {
@@ -1038,8 +1039,8 @@
vcpu_init[0].regs.rsp = BOOT_STACK_POINTER;
vcpu_init[0].regs.rsi = ZERO_PAGE_OFFSET;
- msrs = regs::long_mode_msrs();
- msrs.append(&mut regs::mtrr_msrs(&vm, pci_start));
+ regs::set_long_mode_msrs(&mut msrs);
+ regs::set_mtrr_msrs(&mut msrs, &vm, pci_start);
// Set up long mode and enable paging.
regs::configure_segments_and_sregs(&mem, &mut vcpu_init[0].sregs)
@@ -1110,23 +1111,25 @@
let vcpu_supported_var_mtrrs = regs::vcpu_supported_variable_mtrrs(vcpu);
let num_var_mtrrs = regs::count_variable_mtrrs(&vcpu_init.msrs);
- let msrs = if num_var_mtrrs > vcpu_supported_var_mtrrs {
+ let skip_mtrr_msrs = if num_var_mtrrs > vcpu_supported_var_mtrrs {
warn!(
"Too many variable MTRR entries ({} required, {} supported),
please check pci_start addr, guest with pass through device may be very slow",
num_var_mtrrs, vcpu_supported_var_mtrrs,
);
// Filter out the MTRR entries from the MSR list.
- vcpu_init
- .msrs
- .into_iter()
- .filter(|&msr| !regs::is_mtrr_msr(msr.id))
- .collect()
+ true
} else {
- vcpu_init.msrs
+ false
};
- vcpu.set_msrs(&msrs).map_err(Error::SetupMsrs)?;
+ for (msr_index, value) in vcpu_init.msrs.into_iter() {
+ if skip_mtrr_msrs && regs::is_mtrr_msr(msr_index) {
+ continue;
+ }
+
+ vcpu.set_msr(msr_index, value).map_err(Error::SetupMsrs)?;
+ }
interrupts::set_lint(vcpu_id, irq_chip).map_err(Error::SetLint)?;
@@ -2172,14 +2175,13 @@
))
}
- /// Sets up the serial devices for this platform. Returns the serial port number and serial
- /// device to be used for stdout
+ /// Sets up the serial devices for this platform. Returns a list of configured serial devices.
///
/// # Arguments
///
/// * - `irq_chip` the IrqChip object for registering irq events
/// * - `io_bus` the I/O bus to add the devices to
- /// * - `serial_parmaters` - definitions for how the serial devices should be configured
+ /// * - `serial_parameters` - definitions for how the serial devices should be configured
pub fn setup_serial_devices(
protection_type: ProtectionType,
irq_chip: &mut dyn IrqChip,
@@ -2187,15 +2189,15 @@
serial_parameters: &BTreeMap<(SerialHardware, u8), SerialParameters>,
serial_jail: Option<Minijail>,
#[cfg(feature = "swap")] swap_controller: &mut Option<swap::SwapController>,
- ) -> Result<()> {
+ ) -> Result<Vec<SerialDeviceInfo>> {
let com_evt_1_3 = devices::IrqEdgeEvent::new().map_err(Error::CreateEvent)?;
let com_evt_2_4 = devices::IrqEdgeEvent::new().map_err(Error::CreateEvent)?;
- arch::add_serial_devices(
+ let serial_devices = arch::add_serial_devices(
protection_type,
io_bus,
- com_evt_1_3.get_trigger(),
- com_evt_2_4.get_trigger(),
+ (X86_64_SERIAL_1_3_IRQ, com_evt_1_3.get_trigger()),
+ (X86_64_SERIAL_2_4_IRQ, com_evt_2_4.get_trigger()),
serial_parameters,
serial_jail,
#[cfg(feature = "swap")]
@@ -2215,7 +2217,7 @@
.register_edge_irq_event(X86_64_SERIAL_2_4_IRQ, &com_evt_2_4, source)
.map_err(Error::RegisterIrqfd)?;
- Ok(())
+ Ok(serial_devices)
}
fn setup_debugcon_devices(
diff --git a/x86_64/src/regs.rs b/x86_64/src/regs.rs
index 2dc48b8..cbdd189 100644
--- a/x86_64/src/regs.rs
+++ b/x86_64/src/regs.rs
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+use std::collections::BTreeMap;
use std::mem;
use std::result;
use base::warn;
-use hypervisor::Register;
use hypervisor::Sregs;
use hypervisor::VcpuX86_64;
use hypervisor::Vm;
@@ -98,15 +98,12 @@
/// Returns the number of variable MTRR entries supported by `vcpu`.
pub fn vcpu_supported_variable_mtrrs(vcpu: &dyn VcpuX86_64) -> usize {
// Get VAR MTRR num from MSR_MTRRcap
- let mut msrs = vec![Register {
- id: crate::msr_index::MSR_MTRRcap,
- ..Default::default()
- }];
- if vcpu.get_msrs(&mut msrs).is_err() {
- warn!("get msrs fail, guest with pass through device may be very slow");
- 0
- } else {
- (msrs[0].value & VAR_MTRR_NUM_MASK) as usize
+ match vcpu.get_msr(crate::msr_index::MSR_MTRRcap) {
+ Ok(value) => (value & VAR_MTRR_NUM_MASK) as usize,
+ Err(_e) => {
+ warn!("failed to get MSR_MTRRcap, guests with passthrough devices may be very slow");
+ 0
+ }
}
}
@@ -119,105 +116,64 @@
}
/// Returns the count of variable MTRR entries specified by the list of `msrs`.
-pub fn count_variable_mtrrs(msrs: &[Register]) -> usize {
+pub fn count_variable_mtrrs(msrs: &BTreeMap<u32, u64>) -> usize {
// Each variable MTRR takes up two MSRs (base + mask), so divide by 2. This will also count the
// MTRRdefType entry, but that is only one extra and the division truncates, so it won't affect
// the final count.
- msrs.iter().filter(|msr| is_mtrr_msr(msr.id)).count() / 2
+ msrs.keys().filter(|&msr| is_mtrr_msr(*msr)).count() / 2
}
/// Returns a set of MSRs containing the MTRR configuration.
-pub fn mtrr_msrs(vm: &dyn Vm, pci_start: u64) -> Vec<Register> {
+pub fn set_mtrr_msrs(msrs: &mut BTreeMap<u32, u64>, vm: &dyn Vm, pci_start: u64) {
// Set pci_start .. 4G as UC
// all others are set to default WB
let pci_len = (1 << 32) - pci_start;
let vecs = get_mtrr_pairs(pci_start, pci_len);
- let mut entries = Vec::new();
-
let phys_mask: u64 = (1 << vm.get_guest_phys_addr_bits()) - 1;
for (idx, (base, len)) in vecs.iter().enumerate() {
let reg_idx = idx as u32 * 2;
- entries.push(Register {
- id: MTRR_PHYS_BASE_MSR + reg_idx,
- value: base | MTRR_MEMTYPE_UC as u64,
- });
+ msrs.insert(MTRR_PHYS_BASE_MSR + reg_idx, base | MTRR_MEMTYPE_UC as u64);
let mask: u64 = len.wrapping_neg() & phys_mask | MTRR_VAR_VALID;
- entries.push(Register {
- id: MTRR_PHYS_MASK_MSR + reg_idx,
- value: mask,
- });
+ msrs.insert(MTRR_PHYS_MASK_MSR + reg_idx, mask);
}
// Disable fixed MTRRs and enable variable MTRRs, set default type as WB
- entries.push(Register {
- id: crate::msr_index::MSR_MTRRdefType,
- value: MTRR_ENABLE | MTRR_MEMTYPE_WB as u64,
- });
- entries
+ msrs.insert(
+ crate::msr_index::MSR_MTRRdefType,
+ MTRR_ENABLE | MTRR_MEMTYPE_WB as u64,
+ );
}
/// Returns the default value of MSRs at reset.
///
/// Currently only sets IA32_TSC to 0.
-pub fn default_msrs() -> Vec<Register> {
- vec![
- Register {
- id: crate::msr_index::MSR_IA32_TSC,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_IA32_MISC_ENABLE,
- value: crate::msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64,
- },
- ]
+pub fn set_default_msrs(msrs: &mut BTreeMap<u32, u64>) {
+ msrs.insert(crate::msr_index::MSR_IA32_TSC, 0x0);
+ msrs.insert(
+ crate::msr_index::MSR_IA32_MISC_ENABLE,
+ crate::msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64,
+ );
}
/// Configure Model specific registers for long (64-bit) mode.
-pub fn long_mode_msrs() -> Vec<Register> {
- vec![
- Register {
- id: crate::msr_index::MSR_IA32_SYSENTER_CS,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_IA32_SYSENTER_ESP,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_IA32_SYSENTER_EIP,
- value: 0x0,
- },
- // x86_64 specific msrs, we only run on x86_64 not x86
- Register {
- id: crate::msr_index::MSR_STAR,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_CSTAR,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_KERNEL_GS_BASE,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_SYSCALL_MASK,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_LSTAR,
- value: 0x0,
- },
- // end of x86_64 specific code
- Register {
- id: crate::msr_index::MSR_IA32_TSC,
- value: 0x0,
- },
- Register {
- id: crate::msr_index::MSR_IA32_MISC_ENABLE,
- value: crate::msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64,
- },
- ]
+pub fn set_long_mode_msrs(msrs: &mut BTreeMap<u32, u64>) {
+ msrs.insert(crate::msr_index::MSR_IA32_SYSENTER_CS, 0x0);
+ msrs.insert(crate::msr_index::MSR_IA32_SYSENTER_ESP, 0x0);
+ msrs.insert(crate::msr_index::MSR_IA32_SYSENTER_EIP, 0x0);
+
+ // x86_64 specific msrs, we only run on x86_64 not x86
+ msrs.insert(crate::msr_index::MSR_STAR, 0x0);
+ msrs.insert(crate::msr_index::MSR_CSTAR, 0x0);
+ msrs.insert(crate::msr_index::MSR_KERNEL_GS_BASE, 0x0);
+ msrs.insert(crate::msr_index::MSR_SYSCALL_MASK, 0x0);
+ msrs.insert(crate::msr_index::MSR_LSTAR, 0x0);
+ // end of x86_64 specific code
+
+ msrs.insert(crate::msr_index::MSR_IA32_TSC, 0x0);
+ msrs.insert(
+ crate::msr_index::MSR_IA32_MISC_ENABLE,
+ crate::msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64,
+ );
}
const X86_CR0_PE: u64 = 0x1;
diff --git a/x86_64/tests/integration/main.rs b/x86_64/tests/integration/main.rs
index 4948f59..9c617b2 100644
--- a/x86_64/tests/integration/main.rs
+++ b/x86_64/tests/integration/main.rs
@@ -45,8 +45,8 @@
use x86_64::read_pci_mmio_before_32bit;
use x86_64::read_pcie_cfg_mmio;
use x86_64::regs::configure_segments_and_sregs;
-use x86_64::regs::long_mode_msrs;
-use x86_64::regs::mtrr_msrs;
+use x86_64::regs::set_long_mode_msrs;
+use x86_64::regs::set_mtrr_msrs;
use x86_64::regs::setup_page_tables;
use x86_64::smbios;
use x86_64::X8664arch;
@@ -281,9 +281,12 @@
setup_cpuid(&hyp, &irq_chip, &vcpu, 0, 1, cpu_config).unwrap();
}
- let mut msrs = long_mode_msrs();
- msrs.append(&mut mtrr_msrs(&vm, read_pci_mmio_before_32bit().start));
- vcpu.set_msrs(&msrs).unwrap();
+ let mut msrs = BTreeMap::new();
+ set_long_mode_msrs(&mut msrs);
+ set_mtrr_msrs(&mut msrs, &vm, read_pci_mmio_before_32bit().start);
+ for (msr_index, value) in msrs {
+ vcpu.set_msr(msr_index, value).unwrap();
+ }
let mut vcpu_regs = Regs {
rip: start_addr.offset(),