From 52f19c427681cc7f1d59c50ee0473353ef38d852 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Wed, 3 Sep 2025 18:37:09 +0800 Subject: [PATCH 01/20] instant recording crate --- Cargo.lock | 8 ++++++++ crates/instant-recording/Cargo.toml | 13 +++++++++++++ crates/instant-recording/src/lib.rs | 15 +++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 crates/instant-recording/Cargo.toml create mode 100644 crates/instant-recording/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index f86cdc3c68..3c4b420b74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1204,6 +1204,14 @@ dependencies = [ "wgpu", ] +[[package]] +name = "cap-instant-recording" +version = "0.1.0" +dependencies = [ + "windows 0.60.0", + "windows-core 0.60.1", +] + [[package]] name = "cap-media" version = "0.1.0" diff --git a/crates/instant-recording/Cargo.toml b/crates/instant-recording/Cargo.toml new file mode 100644 index 0000000000..f155f4b723 --- /dev/null +++ b/crates/instant-recording/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "cap-instant-recording" +version = "0.1.0" +edition = "2024" + +[dependencies] + +[target.'cfg(windows)'.dependencies] +windows-core = { workspace = true } +windows = { workspace = true, features = ["Win32_System_Performance"] } + +[lints] +workspace = true diff --git a/crates/instant-recording/src/lib.rs b/crates/instant-recording/src/lib.rs new file mode 100644 index 0000000000..eb56e45cbd --- /dev/null +++ b/crates/instant-recording/src/lib.rs @@ -0,0 +1,15 @@ +use std::time::{Instant, SystemTime}; + +struct MultiSourceTimestamp { + instant: Instant, + system_time: SystemTime, +} + +impl MultiSourceTimestamp { + pub fn now() -> Self { + Self { + instant: Instant::now(), + system_time: SystemTime::now(), + } + } +} From 3cbe0fbadf97f01c7c14e399060b671df417c597 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Wed, 3 Sep 2025 21:11:47 +0800 Subject: [PATCH 02/20] StartSourceTimestamp --- crates/recording/src/capture_pipeline.rs | 25 +++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/crates/recording/src/capture_pipeline.rs b/crates/recording/src/capture_pipeline.rs index 30fc96f14e..d06dc14abb 100644 --- a/crates/recording/src/capture_pipeline.rs +++ b/crates/recording/src/capture_pipeline.rs @@ -14,9 +14,32 @@ use std::{ future::Future, path::PathBuf, sync::{Arc, atomic::AtomicBool}, - time::SystemTime, + time::{Instant, SystemTime}, }; +pub enum SourceTimestamp { + Instant(Instant), + SystemTime(SystemTime), + #[cfg(windows)] + PerformanceCounter(u64), +} + +struct StartSourceTimestamp { + instant: Instant, + system_time: SystemTime, + #[cfg(windows)] + performance_counter: u64, +} + +impl StartSourceTimestamp { + fn new() -> Self { + Self { + instant: Instant::now(), + system_time: SystemTime::now(), + } + } +} + pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { fn make_studio_mode_pipeline( builder: PipelineBuilder, From eb57961b5b9e059daee8a724fc200179f591fd01 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Mon, 8 Sep 2025 22:16:28 +0800 Subject: [PATCH 03/20] SourceTimestamp --- Cargo.lock | 125 +-- Cargo.toml | 2 +- crates/camera-directshow/examples/cli.rs | 4 +- crates/camera-directshow/src/lib.rs | 19 +- crates/camera-mediafoundation/Cargo.toml | 2 + crates/camera-mediafoundation/src/lib.rs | 35 +- crates/camera-windows/examples/cli.rs | 7 +- crates/camera-windows/src/lib.rs | 12 +- crates/camera/src/lib.rs | 4 +- crates/camera/src/windows.rs | 4 +- crates/media-info/src/lib.rs | 28 +- crates/recording/Cargo.toml | 3 +- crates/recording/examples/recording-cli.rs | 4 +- crates/recording/src/capture_pipeline.rs | 191 +++- crates/recording/src/cursor.rs | 9 +- crates/recording/src/feeds/camera.rs | 13 +- crates/recording/src/feeds/microphone.rs | 3 + crates/recording/src/instant_recording.rs | 11 +- crates/recording/src/sources/audio_input.rs | 50 +- crates/recording/src/sources/audio_mixer.rs | 953 +++++++++++++++++- crates/recording/src/sources/camera.rs | 35 +- crates/recording/src/sources/mod.rs | 1 + .../recording/src/sources/new_audio_mixer.rs | 539 ++++++++++ .../src/sources/screen_capture/mod.rs | 17 +- .../src/sources/screen_capture/windows.rs | 42 +- crates/recording/src/studio_recording.rs | 88 +- 26 files changed, 1881 insertions(+), 320 deletions(-) create mode 100644 crates/recording/src/sources/new_audio_mixer.rs diff --git a/Cargo.lock b/Cargo.lock index 3c4b420b74..7949c27a9f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -866,7 +866,7 @@ name = "cap-audio" version = "0.1.0" dependencies = [ "cidre 0.11.0", - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "ffmpeg-next", "tokio", ] @@ -954,7 +954,7 @@ name = "cap-cpal-ffmpeg" version = "0.1.0" dependencies = [ "cap-ffmpeg-utils", - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "ffmpeg-next", ] @@ -1006,7 +1006,7 @@ dependencies = [ "cocoa 0.26.1", "core-foundation 0.10.1", "core-graphics 0.24.0", - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "device_query", "dirs", "dotenvy_macro", @@ -1083,7 +1083,7 @@ dependencies = [ "cap-media-info", "cap-project", "cap-rendering", - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "ffmpeg-next", "flume", "futures", @@ -1229,7 +1229,7 @@ name = "cap-media-info" version = "0.1.0" dependencies = [ "cap-ffmpeg-utils", - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "ffmpeg-next", "thiserror 1.0.69", ] @@ -1294,7 +1294,7 @@ dependencies = [ "chrono", "cidre 0.11.0", "cocoa 0.26.1", - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "device_query", "either", "ffmpeg-next", @@ -2038,7 +2038,7 @@ dependencies = [ [[package]] name = "cpal" version = "0.15.3" -source = "git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf#f43d36e55494993bbbde3299af0c53e5cdf4d4cf" +source = "git+https://github.com/CapSoftware/cpal?rev=75a365a24507#75a365a24507038fbd23b43534a75d3071a92b7a" dependencies = [ "alsa", "core-foundation-sys", @@ -2242,13 +2242,13 @@ checksum = "5c297a1c74b71ae29df00c3e22dd9534821d60eb9af5a0192823fa2acea70c2a" [[package]] name = "dbus" -version = "0.9.7" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bb21987b9fb1613058ba3843121dd18b163b254d8a6e797e144cbac14d96d1b" +checksum = "190b6255e8ab55a7b568df5a883e9497edc3e4821c06396612048b430e5ad1e9" dependencies = [ "libc", "libdbus-sys", - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -3146,16 +3146,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "gethostname" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0176e0459c2e4a1fe232f984bca6890e681076abb9934f6cea7c326f3fc47818" -dependencies = [ - "libc", - "windows-targets 0.48.5", -] - [[package]] name = "gethostname" version = "1.0.2" @@ -4418,9 +4408,9 @@ checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libdbus-sys" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06085512b750d640299b79be4bad3d2fa90a9c00b1fd9e1b46364f66f0485c72" +checksum = "5cbe856efeb50e4681f010e9aaa2bf0a644e10139e54cde10fc83a307c23bd9f" dependencies = [ "pkg-config", ] @@ -4653,6 +4643,15 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "matches" version = "0.1.10" @@ -6599,8 +6598,17 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -6611,9 +6619,15 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.5", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -6800,13 +6814,12 @@ checksum = "6c20b6793b5c2fa6553b250154b78d6d0db37e72700ae35fad9387a46f487c97" [[package]] name = "rust-ini" -version = "0.21.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e310ef0e1b6eeb79169a1171daf9abcb87a2e17c03bee2c4bb100b55c75409f" +checksum = "796e8d2b6696392a43bea58116b667fb4c29727dc5abd27d6acf338bb4f688c7" dependencies = [ "cfg-if", "ordered-multimap", - "trim-in-place", ] [[package]] @@ -6977,7 +6990,7 @@ dependencies = [ name = "scap-cpal" version = "0.1.0" dependencies = [ - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "thiserror 1.0.69", ] @@ -6998,7 +7011,7 @@ version = "0.1.0" dependencies = [ "cap-ffmpeg-utils", "cidre 0.11.0", - "cpal 0.15.3 (git+https://github.com/RustAudio/cpal?rev=f43d36e55494993bbbde3299af0c53e5cdf4d4cf)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", "ffmpeg-next", "futures", "scap-cpal", @@ -8084,9 +8097,9 @@ dependencies = [ [[package]] name = "tao" -version = "0.34.2" +version = "0.34.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4daa814018fecdfb977b59a094df4bd43b42e8e21f88fddfc05807e6f46efaaf" +checksum = "959469667dbcea91e5485fc48ba7dd6023face91bb0f1a14681a70f99847c3f7" dependencies = [ "bitflags 2.9.1", "block2 0.6.1", @@ -8152,9 +8165,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tauri" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d545ccf7b60dcd44e07c6fb5aeb09140966f0aabd5d2aa14a6821df7bc99348" +checksum = "d4d1d3b3dc4c101ac989fd7db77e045cc6d91a25349cd410455cb5c57d510c1c" dependencies = [ "anyhow", "bytes", @@ -8207,9 +8220,9 @@ dependencies = [ [[package]] name = "tauri-build" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67945dbaf8920dbe3a1e56721a419a0c3d085254ab24cff5b9ad55e2b0016e0b" +checksum = "9c432ccc9ff661803dab74c6cd78de11026a578a9307610bbc39d3c55be7943f" dependencies = [ "anyhow", "cargo_toml", @@ -8318,9 +8331,9 @@ dependencies = [ [[package]] name = "tauri-plugin-deep-link" -version = "2.4.2" +version = "2.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d430110d4ee102a9b673d3c03ff48098c80fe8ca71ba1ff52d8a5919538a1a6" +checksum = "cd67112fb1131834c2a7398ffcba520dbbf62c17de3b10329acd1a3554b1a9bb" dependencies = [ "dunce", "plist", @@ -8339,9 +8352,9 @@ dependencies = [ [[package]] name = "tauri-plugin-dialog" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee5a3c416dc59d7d9aa0de5490a82d6e201c67ffe97388979d77b69b08cda40" +checksum = "0beee42a4002bc695550599b011728d9dfabf82f767f134754ed6655e434824e" dependencies = [ "log", "raw-window-handle", @@ -8477,7 +8490,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a1c77ebf6f20417ab2a74e8c310820ba52151406d0c80fbcea7df232e3f6ba" dependencies = [ - "gethostname 1.0.2", + "gethostname", "log", "os_info", "serde", @@ -8537,9 +8550,9 @@ dependencies = [ [[package]] name = "tauri-plugin-single-instance" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "236043404a4d1502ed7cce11a8ec88ea1e85597eec9887b4701bb10b66b13b6e" +checksum = "fb9cac815bf11c4a80fb498666bcdad66d65b89e3ae24669e47806febb76389c" dependencies = [ "serde", "serde_json", @@ -9236,10 +9249,14 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex", "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", ] @@ -9278,12 +9295,6 @@ dependencies = [ "petgraph", ] -[[package]] -name = "trim-in-place" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc" - [[package]] name = "try-lock" version = "0.2.5" @@ -10978,20 +10989,20 @@ dependencies = [ [[package]] name = "x11rb" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12" +checksum = "9993aa5be5a26815fe2c3eacfc1fde061fc1a1f094bf1ad2a18bf9c495dd7414" dependencies = [ - "gethostname 0.4.3", - "rustix 0.38.44", + "gethostname", + "rustix 1.0.8", "x11rb-protocol", ] [[package]] name = "x11rb-protocol" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec107c4503ea0b4a98ef47356329af139c0a4f7750e621cf2973cd3385ebcb3d" +checksum = "ea6fc2961e4ef194dcbfe56bb845534d0dc8098940c7e5c012a258bfec6701bd" [[package]] name = "xattr" @@ -11231,9 +11242,9 @@ dependencies = [ [[package]] name = "zip" -version = "4.5.0" +version = "4.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835eb39822904d39cb19465de1159e05d371973f0c6df3a365ad50565ddc8b9" +checksum = "caa8cd6af31c3b31c6631b8f483848b91589021b28fffe50adada48d4f4d2ed1" dependencies = [ "arbitrary", "crc32fast", diff --git a/Cargo.toml b/Cargo.toml index 46cd816eaf..401232358c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ members = ["apps/cli", "apps/desktop/src-tauri", "crates/*"] anyhow = { version = "1.0.86" } # This includes a currently-unreleased fix that ensures the audio stream is actually # stopped and released on drop on macOS -cpal = { git = "https://github.com/RustAudio/cpal", rev = "f43d36e55494993bbbde3299af0c53e5cdf4d4cf" } +cpal = { git = "https://github.com/CapSoftware/cpal", rev = "75a365a24507" } ffmpeg = { package = "ffmpeg-next", git = "https://github.com/CapSoftware/rust-ffmpeg", rev = "49db1fede112" } tokio = { version = "1.39.3", features = [ "macros", diff --git a/crates/camera-directshow/examples/cli.rs b/crates/camera-directshow/examples/cli.rs index bad9b5761b..d712c1a655 100644 --- a/crates/camera-directshow/examples/cli.rs +++ b/crates/camera-directshow/examples/cli.rs @@ -115,8 +115,8 @@ mod windows { &selected_format.media_type, Box::new(|frame| { unsafe { dbg!(frame.sample.GetActualDataLength()) }; - dbg!(frame.media_type.subtype_str()); - dbg!(frame.reference_time); + // dbg!(frame.media_type.subtype_str()); + // dbg!(frame.reference_time); dbg!(frame.timestamp); }), ) diff --git a/crates/camera-directshow/src/lib.rs b/crates/camera-directshow/src/lib.rs index bfb468667a..41ce055c59 100644 --- a/crates/camera-directshow/src/lib.rs +++ b/crates/camera-directshow/src/lib.rs @@ -21,6 +21,7 @@ use windows::{ }, System::{ Com::{StructuredStorage::IPropertyBag, *}, + Performance::QueryPerformanceCounter, Variant::{VARIANT, VT_BSTR}, }, }, @@ -790,8 +791,8 @@ impl<'a> IEnumPins_Impl for PinEnumerator_Impl<'a> { pub struct CallbackData<'a> { pub sample: &'a IMediaSample, pub media_type: &'a AMMediaType, - pub reference_time: Instant, pub timestamp: Duration, + pub perf_counter: i64, } pub type SinkCallback = Box; @@ -994,6 +995,9 @@ impl IMemInputPin_Impl for SinkInputPin_Impl { &self, psample: windows_core::Ref<'_, windows::Win32::Media::DirectShow::IMediaSample>, ) -> windows_core::Result<()> { + let mut perf_counter = 0; + unsafe { QueryPerformanceCounter(&mut perf_counter)? }; + let Some(psample) = psample.as_ref() else { return Ok(()); }; @@ -1022,20 +1026,13 @@ impl IMemInputPin_Impl for SinkInputPin_Impl { let mut start_time = 0; let mut end_time = 0; - let mut timestamp = unsafe { psample.GetTime(&mut start_time, &mut end_time) } - .ok() - .map(|_| Duration::from_micros(start_time as u64 / 10)); - - let mut first_ref_time = self.first_ref_time.borrow_mut(); - let first_ref_time = first_ref_time.get_or_insert(Instant::now()); - - let timestamp = timestamp.get_or_insert(Instant::now() - *first_ref_time); + unsafe { psample.GetTime(&mut start_time, &mut end_time) }?; (self.callback.borrow_mut())(CallbackData { sample: psample, media_type: &media_type, - reference_time: *first_ref_time, - timestamp: *timestamp, + timestamp: Duration::from_micros(start_time as u64 / 10), + perf_counter, }); Ok(()) diff --git a/crates/camera-mediafoundation/Cargo.toml b/crates/camera-mediafoundation/Cargo.toml index db4970a4fb..239ce520c4 100644 --- a/crates/camera-mediafoundation/Cargo.toml +++ b/crates/camera-mediafoundation/Cargo.toml @@ -15,6 +15,8 @@ windows = { workspace = true, features = [ "Win32_Media_MediaFoundation", "Win32_Media_DirectShow", "Win32_System_Com", + "Win32_System_WinRT", + "Win32_System_Performance", ] } [dev-dependencies] diff --git a/crates/camera-mediafoundation/src/lib.rs b/crates/camera-mediafoundation/src/lib.rs index 175120a5c5..e4335d9634 100644 --- a/crates/camera-mediafoundation/src/lib.rs +++ b/crates/camera-mediafoundation/src/lib.rs @@ -13,13 +13,16 @@ use std::{ Mutex, mpsc::{Receiver, Sender, channel}, }, - time::{Duration, Instant}, + time::Duration, }; use tracing::error; use windows::Win32::{ Foundation::{S_FALSE, *}, Media::MediaFoundation::*, - System::Com::{CLSCTX_INPROC_SERVER, CoCreateInstance, CoInitialize}, + System::{ + Com::{CLSCTX_INPROC_SERVER, CoCreateInstance, CoInitialize}, + Performance::QueryPerformanceCounter, + }, }; use windows_core::{ComObjectInner, Interface, PWSTR, implement}; @@ -542,9 +545,8 @@ fn get_device_model_id(device_id: &str) -> Option { pub struct CallbackData { pub sample: IMFSample, - pub reference_time: Instant, pub timestamp: Duration, - pub capture_begin_time: Instant, + pub perf_counter: i64, } #[implement(IMFCaptureEngineOnSampleCallback, IMFCaptureEngineOnEventCallback)] @@ -555,6 +557,9 @@ struct VideoCallback { impl IMFCaptureEngineOnSampleCallback_Impl for VideoCallback_Impl { fn OnSample(&self, psample: windows_core::Ref<'_, IMFSample>) -> windows_core::Result<()> { + let mut perf_counter = 0; + unsafe { QueryPerformanceCounter(&mut perf_counter)? }; + let Some(sample) = psample.as_ref() else { return Ok(()); }; @@ -563,28 +568,12 @@ impl IMFCaptureEngineOnSampleCallback_Impl for VideoCallback_Impl { return Ok(()); }; - let reference_time = Instant::now(); - let mf_time_now = Duration::from_micros(unsafe { MFGetSystemTime() / 10 } as u64); - - let raw_time_stamp = unsafe { sample.GetSampleTime() }.unwrap_or(0); - let timestamp = Duration::from_micros((raw_time_stamp / 10) as u64); - - let raw_capture_begin_time = - unsafe { sample.GetUINT64(&MFSampleExtension_DeviceReferenceSystemTime) } - .or_else( - // retry, it's what chromium does /shrug - |_| unsafe { sample.GetUINT64(&MFSampleExtension_DeviceReferenceSystemTime) }, - ) - .unwrap_or(unsafe { MFGetSystemTime() } as u64); - - let capture_begin_time = - reference_time + Duration::from_micros(raw_capture_begin_time / 10) - mf_time_now; + let sample_time = unsafe { sample.GetSampleTime() }?; (callback)(CallbackData { sample: sample.clone(), - reference_time, - timestamp, - capture_begin_time, + timestamp: Duration::from_micros(sample_time as u64 / 10), + perf_counter, }); Ok(()) diff --git a/crates/camera-windows/examples/cli.rs b/crates/camera-windows/examples/cli.rs index cf73abca5c..76b241bb7e 100644 --- a/crates/camera-windows/examples/cli.rs +++ b/crates/camera-windows/examples/cli.rs @@ -32,7 +32,12 @@ mod windows { let Ok(bytes) = frame.bytes() else { return; }; - dbg!(bytes.len(), frame.pixel_format); + dbg!( + bytes.len(), + frame.pixel_format, + frame.timestamp, + frame.perf_counter + ); }) .unwrap(); diff --git a/crates/camera-windows/src/lib.rs b/crates/camera-windows/src/lib.rs index 9214e10978..51500743f3 100644 --- a/crates/camera-windows/src/lib.rs +++ b/crates/camera-windows/src/lib.rs @@ -98,8 +98,8 @@ impl VideoDeviceInfo { height: format.height() as usize, pixel_format: format.pixel_format, timestamp: data.timestamp, - reference_time: data.reference_time, - capture_begin_time: Some(data.capture_begin_time), + perf_counter: data.perf_counter, + // capture_begin_time: Some(data.capture_begin_time), }) } }), @@ -128,8 +128,8 @@ impl VideoDeviceInfo { width: video_info.bmiHeader.biWidth as usize, height: video_info.bmiHeader.biHeight as usize, timestamp: data.timestamp, - reference_time: data.reference_time, - capture_begin_time: None, + perf_counter: data.perf_counter, + // capture_begin_time: None, }); }), )?; @@ -178,9 +178,9 @@ pub struct Frame { pub pixel_format: PixelFormat, pub width: usize, pub height: usize, - pub reference_time: Instant, + // pub reference_time: Instant, pub timestamp: Duration, - pub capture_begin_time: Option, + pub perf_counter: i64, inner: FrameInner, } diff --git a/crates/camera/src/lib.rs b/crates/camera/src/lib.rs index a1af62c6f3..5745060a76 100644 --- a/crates/camera/src/lib.rs +++ b/crates/camera/src/lib.rs @@ -190,9 +190,9 @@ pub enum StartCapturingError { #[derive(Debug)] pub struct CapturedFrame { native: NativeCapturedFrame, - pub reference_time: Instant, + // pub reference_time: Instant, pub timestamp: Duration, - pub capture_begin_time: Option, + // pub capture_begin_time: Option, } impl CapturedFrame { diff --git a/crates/camera/src/windows.rs b/crates/camera/src/windows.rs index 9b7771efde..d76909a5df 100644 --- a/crates/camera/src/windows.rs +++ b/crates/camera/src/windows.rs @@ -73,8 +73,8 @@ pub(super) fn start_capturing_impl( Ok(WindowsCaptureHandle { inner: device.start_capturing(format.native(), move |frame| { callback(CapturedFrame { - reference_time: frame.reference_time, - capture_begin_time: frame.capture_begin_time, + // reference_time: frame.reference_time, + // capture_begin_time: frame.capture_begin_time, timestamp: frame.timestamp, native: NativeCapturedFrame(frame), }); diff --git a/crates/media-info/src/lib.rs b/crates/media-info/src/lib.rs index 9ea1d1b1f0..217a836ea0 100644 --- a/crates/media-info/src/lib.rs +++ b/crates/media-info/src/lib.rs @@ -27,23 +27,34 @@ pub enum AudioInfoError { impl AudioInfo { pub const MAX_AUDIO_CHANNELS: u16 = 2; - pub fn new( + pub const fn new( sample_format: Sample, sample_rate: u32, channel_count: u16, ) -> Result { - Self::channel_layout_raw(channel_count) - .ok_or(AudioInfoError::ChannelLayout(channel_count))?; + if Self::channel_layout_raw(channel_count).is_none() { + return Err(AudioInfoError::ChannelLayout(channel_count)); + } Ok(Self { sample_format, sample_rate, - channels: channel_count.into(), + channels: channel_count as usize, time_base: FFRational(1, 1_000_000), buffer_size: 1024, }) } + pub const fn new_raw(sample_format: Sample, sample_rate: u32, channel_count: u16) -> Self { + Self { + sample_format, + sample_rate, + channels: channel_count as usize, + time_base: FFRational(1, 1_000_000), + buffer_size: 1024, + } + } + pub fn from_stream_config(config: &SupportedStreamConfig) -> Self { let sample_format = ffmpeg_sample_format_for(config.sample_format()).unwrap(); let buffer_size = match config.buffer_size() { @@ -78,7 +89,7 @@ impl AudioInfo { }) } - fn channel_layout_raw(channels: u16) -> Option { + const fn channel_layout_raw(channels: u16) -> Option { Some(match channels { 1 => ChannelLayout::MONO, 2 => ChannelLayout::STEREO, @@ -94,8 +105,8 @@ impl AudioInfo { self.sample_format.bytes() } - pub fn rate(&self) -> i32 { - self.sample_rate.try_into().unwrap() + pub const fn rate(&self) -> i32 { + self.sample_rate as i32 } pub fn empty_frame(&self, sample_count: usize) -> frame::Audio { @@ -105,13 +116,12 @@ impl AudioInfo { frame } - pub fn wrap_frame(&self, data: &[u8], timestamp: i64) -> frame::Audio { + pub fn wrap_frame(&self, data: &[u8]) -> frame::Audio { let sample_size = self.sample_size(); let interleaved_chunk_size = sample_size * self.channels; let samples = data.len() / interleaved_chunk_size; let mut frame = frame::Audio::new(self.sample_format, samples, self.channel_layout()); - frame.set_pts(Some(timestamp)); frame.set_rate(self.sample_rate); if self.channels == 0 { diff --git a/crates/recording/Cargo.toml b/crates/recording/Cargo.toml index 52ac8672a5..7807247b3d 100644 --- a/crates/recording/Cargo.toml +++ b/crates/recording/Cargo.toml @@ -65,6 +65,7 @@ windows = { workspace = true, features = [ "Win32_Foundation", "Win32_Graphics_Gdi", "Win32_UI_WindowsAndMessaging", + "Win32_System_Performance", ] } scap-direct3d = { path = "../scap-direct3d" } scap-ffmpeg = { path = "../scap-ffmpeg" } @@ -72,4 +73,4 @@ scap-cpal = { path = "../scap-cpal" } [dev-dependencies] tempfile = "3.20.0" -tracing-subscriber = { version = "0.3.19", default-features = false } +tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } diff --git a/crates/recording/examples/recording-cli.rs b/crates/recording/examples/recording-cli.rs index fc1c126b22..b2861a9a38 100644 --- a/crates/recording/examples/recording-cli.rs +++ b/crates/recording/examples/recording-cli.rs @@ -22,7 +22,7 @@ pub async fn main() { info!("Recording to directory '{}'", dir.path().display()); - let (handle, _ready_rx) = cap_recording::spawn_studio_recording_actor( + let (handle, _ready_rx) = cap_recording::spawn_instant_recording_actor( "test".to_string(), dir.path().into(), RecordingBaseInputs { @@ -33,7 +33,7 @@ pub async fn main() { camera_feed: None, mic_feed: None, }, - false, + // false, // true, ) .await diff --git a/crates/recording/src/capture_pipeline.rs b/crates/recording/src/capture_pipeline.rs index d06dc14abb..701bc41f40 100644 --- a/crates/recording/src/capture_pipeline.rs +++ b/crates/recording/src/capture_pipeline.rs @@ -3,41 +3,132 @@ use crate::{ feeds::microphone::MicrophoneFeedLock, pipeline::builder::PipelineBuilder, sources::{ - AudioInputSource, AudioMixer, ScreenCaptureFormat, ScreenCaptureSource, - ScreenCaptureTarget, screen_capture, + AudioInputSource, ScreenCaptureFormat, ScreenCaptureSource, ScreenCaptureTarget, + screen_capture, }, }; use cap_media::MediaError; use cap_media_info::AudioInfo; +use cpal::StreamInstant; use flume::{Receiver, Sender}; use std::{ future::Future, + ops::Add, path::PathBuf, sync::{Arc, atomic::AtomicBool}, - time::{Instant, SystemTime}, + time::{Duration, Instant, SystemTime}, }; +#[cfg(windows)] +mod win { + use windows::Win32::System::Performance::{QueryPerformanceCounter, QueryPerformanceFrequency}; + + use super::*; + + #[derive(Clone, Copy, Debug)] + pub struct PerformanceCounterTimestamp(i64); + + impl PerformanceCounterTimestamp { + pub fn new(value: i64) -> Self { + Self(value) + } + + pub fn duration_since(&self, other: Self) -> Duration { + let mut freq = 0; + unsafe { QueryPerformanceFrequency(&mut freq).unwrap() }; + + Duration::from_secs_f64((self.0 - other.0) as f64 / freq as f64) + } + + pub fn from_cpal(instant: StreamInstant) -> Self { + use cpal::host::wasapi::StreamInstantExt; + + Self(instant.as_performance_counter()) + } + + pub fn now() -> Self { + let mut value = 0; + unsafe { QueryPerformanceCounter(&mut value).unwrap() }; + Self(value) + } + } + + impl Add for PerformanceCounterTimestamp { + type Output = Self; + + fn add(self, rhs: Duration) -> Self::Output { + let mut freq = 0; + unsafe { QueryPerformanceFrequency(&mut freq) }.unwrap(); + Self(self.0 + (rhs.as_secs_f64() * freq as f64) as i64) + } + } +} + +#[cfg(windows)] +pub use win::*; + +#[derive(Clone, Copy, Debug)] pub enum SourceTimestamp { Instant(Instant), SystemTime(SystemTime), #[cfg(windows)] - PerformanceCounter(u64), + PerformanceCounter(PerformanceCounterTimestamp), +} + +impl SourceTimestamp { + pub fn duration_since(&self, start: SourceTimestamps) -> Duration { + match self { + Self::Instant(instant) => instant.duration_since(start.instant), + Self::SystemTime(time) => time.duration_since(start.system_time).unwrap(), + #[cfg(windows)] + Self::PerformanceCounter(counter) => counter.duration_since(start.performance_counter), + } + } + + pub fn from_cpal(instant: StreamInstant) -> Self { + #[cfg(windows)] + Self::PerformanceCounter(PerformanceCounterTimestamp::from_cpal(instant)) + } +} + +impl Add for &SourceTimestamp { + type Output = SourceTimestamp; + + fn add(self, rhs: Duration) -> Self::Output { + match *self { + SourceTimestamp::Instant(i) => SourceTimestamp::Instant(i + rhs), + SourceTimestamp::SystemTime(t) => SourceTimestamp::SystemTime(t + rhs), + #[cfg(windows)] + SourceTimestamp::PerformanceCounter(c) => SourceTimestamp::PerformanceCounter(c + rhs), + } + } } -struct StartSourceTimestamp { +#[derive(Clone, Copy, Debug)] +pub struct SourceTimestamps { instant: Instant, system_time: SystemTime, #[cfg(windows)] - performance_counter: u64, + performance_counter: PerformanceCounterTimestamp, } -impl StartSourceTimestamp { - fn new() -> Self { +impl SourceTimestamps { + pub fn now() -> Self { Self { instant: Instant::now(), system_time: SystemTime::now(), + #[cfg(windows)] + performance_counter: PerformanceCounterTimestamp::now(), } } + + pub fn instant(&self) -> Instant { + self.instant + } + + pub fn system_time(&self) -> SystemTime { + self.system_time + } } pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { @@ -45,10 +136,10 @@ pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, f64)>, + flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, ), output_path: PathBuf, - ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> + ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> where Self: Sized; @@ -56,10 +147,10 @@ pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, f64)>, + flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, ), audio: Option>, - system_audio: Option<(Receiver<(ffmpeg::frame::Audio, f64)>, AudioInfo)>, + system_audio: Option<(Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, AudioInfo)>, output_path: PathBuf, pause_flag: Arc, ) -> impl Future> + Send @@ -73,10 +164,10 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { mut builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, f64)>, + flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, ), output_path: PathBuf, - ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> { + ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> { let screen_config = source.0.info(); tracing::info!("screen config: {:?}", screen_config); @@ -180,7 +271,7 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { let _ = ready.send(Ok(())); let mut time = None; - while let Ok(mut frame) = audio_rx.recv() { + while let Ok((mut frame, duration, start_timestamps)) = audio_rx.recv() { let pts = frame.pts().unwrap(); if let Ok(first_time) = first_frame_rx.try_recv() { @@ -251,10 +342,10 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { mut builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, f64)>, + flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, ), output_path: PathBuf, - ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> + ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> where Self: Sized, { @@ -281,7 +372,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { match native_encoder { Ok(encoder) => { - let mut muxer = cap_mediafoundation_ffmpeg::H264StreamMuxer::new( + let muxer = cap_mediafoundation_ffmpeg::H264StreamMuxer::new( &mut output, cap_mediafoundation_ffmpeg::MuxerConfig { width: screen_config.width, @@ -405,31 +496,38 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { mut builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, f64)>, + flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, ), audio: Option>, - system_audio: Option<(Receiver<(ffmpeg::frame::Audio, f64)>, AudioInfo)>, + system_audio: Option<(Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, AudioInfo)>, output_path: PathBuf, _pause_flag: Arc, ) -> Result where Self: Sized, { + use std::sync::mpsc; + use cap_enc_ffmpeg::{AACEncoder, AudioEncoder}; use windows::Graphics::SizeInt32; + use crate::sources::new_audio_mixer; + cap_mediafoundation_utils::thread_init(); + let start_time = SourceTimestamps::now(); + let (audio_tx, audio_rx) = flume::bounded(64); - let mut audio_mixer = AudioMixer::new(audio_tx); + let mut audio_mixer = new_audio_mixer::AudioMixer::builder(audio_tx); if let Some(system_audio) = system_audio { audio_mixer.add_source(system_audio.1, system_audio.0); } if let Some(audio) = audio { - let sink = audio_mixer.sink(*audio.audio_info()); - let source = AudioInputSource::init(audio, sink.tx, SystemTime::now()); + let (tx, rx) = flume::bounded(32); + audio_mixer.add_source(*audio.audio_info(), rx); + let source = AudioInputSource::init(audio, tx); builder.spawn_source("microphone_capture", source); } @@ -492,7 +590,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let audio_encoder = has_audio_sources .then(|| { - AACEncoder::init("mic_audio", AudioMixer::info(), &mut output) + AACEncoder::init("mic_audio", new_audio_mixer::AudioMixer::INFO, &mut output) .map(|v| v.boxed()) .map_err(|e| MediaError::Any(e.to_string().into())) }) @@ -505,14 +603,40 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let output = Arc::new(std::sync::Mutex::new(output)); + let (screen_first_tx, screen_first_rx) = mpsc::sync_channel(1); + if let Some(mut audio_encoder) = audio_encoder { builder.spawn_source("audio_mixer", audio_mixer); // let is_done = is_done.clone(); let output = output.clone(); builder.spawn_task("audio_encoding", move |ready| { + let screen_first_offset = loop { + match screen_first_rx.recv_timeout(Duration::from_millis(5)) { + Ok(offset) => { + audio_rx.drain().count(); + break offset; + } + Err(mpsc::RecvTimeoutError::Timeout) => continue, + Err(mpsc::RecvTimeoutError::Disconnected) => return Ok(()), + } + }; + let _ = ready.send(Ok(())); - while let Ok(frame) = audio_rx.recv() { + + while let Ok((mut frame, timestamp)) = audio_rx.recv() { + let ts_offset = timestamp.duration_since(start_time); + // dbg!(ts_offset, frame.samples()); + + let Some(ts_offset) = ts_offset.checked_sub(screen_first_offset) else { + continue; + }; + + let pts = (ts_offset.as_secs_f64() * frame.rate() as f64) as i64; + frame.set_pts(Some(pts)); + + // dbg!(pts); + if let Ok(mut output) = output.lock() { audio_encoder.queue_frame(frame, &mut *output); } @@ -532,6 +656,8 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let _ = ready.send(Ok(())); + let mut screen_first_tx = Some(screen_first_tx); + while let Ok(e) = encoder.get_event() { match e { MediaFoundation::METransformNeedInput => { @@ -544,6 +670,15 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { .SystemRelativeTime() .map_err(|e| format!("Frame Time: {e}"))?; + let timestamp = SourceTimestamp::PerformanceCounter( + PerformanceCounterTimestamp::new(frame_time.Duration), + ); + + if let Some(screen_first_tx) = screen_first_tx.take() { + let _ = screen_first_tx + .try_send(timestamp.duration_since(start_time)); + } + encoder .handle_needs_input(frame.texture(), frame_time) .map_err(|e| format!("NeedsInput: {e}"))?; @@ -611,7 +746,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { type ScreenCaptureReturn = ( ScreenCaptureSource, - Receiver<(::VideoFormat, f64)>, + Receiver<(::VideoFormat, SourceTimestamp)>, ); #[cfg(target_os = "macos")] @@ -624,8 +759,7 @@ pub async fn create_screen_capture( capture_target: &ScreenCaptureTarget, force_show_cursor: bool, max_fps: u32, - audio_tx: Option>, - start_time: SystemTime, + audio_tx: Option>, #[cfg(windows)] d3d_device: ::windows::Win32::Graphics::Direct3D11::ID3D11Device, ) -> Result, RecordingError> { let (video_tx, video_rx) = flume::bounded(16); @@ -636,7 +770,6 @@ pub async fn create_screen_capture( max_fps, video_tx, audio_tx, - start_time, tokio::runtime::Handle::current(), #[cfg(windows)] d3d_device, diff --git a/crates/recording/src/cursor.rs b/crates/recording/src/cursor.rs index 7c42933dbf..28b59ebe85 100644 --- a/crates/recording/src/cursor.rs +++ b/crates/recording/src/cursor.rs @@ -5,6 +5,8 @@ use std::{collections::HashMap, path::PathBuf, time::SystemTime}; use tokio::sync::oneshot; use tokio_util::sync::CancellationToken; +use crate::capture_pipeline::SourceTimestamps; + pub struct Cursor { pub file_name: String, pub id: u32, @@ -41,7 +43,7 @@ pub fn spawn_cursor_recorder( cursors_dir: PathBuf, prev_cursors: Cursors, next_cursor_id: u32, - start_time: SystemTime, + start_time: SourceTimestamps, ) -> CursorActor { use cap_utils::spawn_actor; use device_query::{DeviceQuery, DeviceState}; @@ -81,10 +83,7 @@ pub fn spawn_cursor_recorder( break; }; - let Ok(elapsed) = start_time.elapsed() else { - continue; - }; - let elapsed = elapsed.as_secs_f64() * 1000.0; + let elapsed = start_time.instant().elapsed().as_secs_f64() * 1000.0; let mouse_state = device_state.get_mouse(); let cursor_data = get_cursor_data(); diff --git a/crates/recording/src/feeds/camera.rs b/crates/recording/src/feeds/camera.rs index 16660407f6..3453b9fbc0 100644 --- a/crates/recording/src/feeds/camera.rs +++ b/crates/recording/src/feeds/camera.rs @@ -16,13 +16,16 @@ use tracing::{debug, error, trace, warn}; use cap_camera_ffmpeg::*; +#[cfg(windows)] +use crate::capture_pipeline::PerformanceCounterTimestamp; +use crate::capture_pipeline::SourceTimestamp; + const CAMERA_INIT_TIMEOUT: Duration = Duration::from_secs(4); #[derive(Clone)] pub struct RawCameraFrame { pub frame: frame::Video, - pub timestamp: Duration, - pub reference_time: Instant, + pub timestamp: SourceTimestamp, } #[derive(Actor)] @@ -282,8 +285,10 @@ async fn setup_camera( let _ = recipient .tell(NewFrame(RawCameraFrame { frame: ff_frame, - timestamp: frame.timestamp, - reference_time: frame.reference_time, + #[cfg(windows)] + timestamp: SourceTimestamp::PerformanceCounter( + PerformanceCounterTimestamp::new(frame.native().perf_counter), + ), })) .try_send(); }) diff --git a/crates/recording/src/feeds/microphone.rs b/crates/recording/src/feeds/microphone.rs index d20b90092b..da0f44eb6f 100644 --- a/crates/recording/src/feeds/microphone.rs +++ b/crates/recording/src/feeds/microphone.rs @@ -1,3 +1,4 @@ +use crate::capture_pipeline::SourceTimestamp; use cap_media_info::{AudioInfo, ffmpeg_sample_format_for}; use cpal::{ Device, InputCallbackInfo, SampleFormat, StreamError, SupportedStreamConfig, @@ -21,6 +22,7 @@ pub struct MicrophoneSamples { pub data: Vec, pub format: SampleFormat, pub info: InputCallbackInfo, + pub timestamp: SourceTimestamp, } #[derive(Actor)] @@ -297,6 +299,7 @@ impl Message for MicrophoneFeed { data: data.bytes().to_vec(), format: data.sample_format(), info: info.clone(), + timestamp: SourceTimestamp::from_cpal(info.timestamp().capture), }) .try_send(); } diff --git a/crates/recording/src/instant_recording.rs b/crates/recording/src/instant_recording.rs index bcd10c60f5..2c2007729a 100644 --- a/crates/recording/src/instant_recording.rs +++ b/crates/recording/src/instant_recording.rs @@ -13,7 +13,9 @@ use tracing::{Instrument, debug, error, info, trace}; use crate::{ ActorError, RecordingBaseInputs, RecordingError, - capture_pipeline::{MakeCapturePipeline, create_screen_capture}, + capture_pipeline::{ + MakeCapturePipeline, SourceTimestamp, SourceTimestamps, create_screen_capture, + }, feeds::microphone::MicrophoneFeedLock, pipeline::Pipeline, sources::{ScreenCaptureSource, ScreenCaptureTarget}, @@ -112,10 +114,10 @@ async fn create_pipeline( output_path: PathBuf, screen_source: ( ScreenCaptureSource, - flume::Receiver<(TCaptureFormat::VideoFormat, f64)>, + flume::Receiver<(TCaptureFormat::VideoFormat, SourceTimestamp)>, ), mic_feed: Option>, - system_audio: Option>, + system_audio: Option>, ) -> Result< ( InstantRecordingPipeline, @@ -171,8 +173,6 @@ pub async fn spawn_instant_recording_actor( > { ensure_dir(&recording_dir)?; - let start_time = SystemTime::now(); - let (done_tx, done_rx) = oneshot::channel(); trace!("creating recording actor"); @@ -195,7 +195,6 @@ pub async fn spawn_instant_recording_actor( true, 30, system_audio.0, - start_time, #[cfg(windows)] d3d_device, ) diff --git a/crates/recording/src/sources/audio_input.rs b/crates/recording/src/sources/audio_input.rs index 67416adc98..5b21ad0d93 100644 --- a/crates/recording/src/sources/audio_input.rs +++ b/crates/recording/src/sources/audio_input.rs @@ -1,15 +1,16 @@ use crate::{ + capture_pipeline::SourceTimestamp, feeds::microphone::{self, MicrophoneFeedLock, MicrophoneSamples}, pipeline::{control::Control, task::PipelineSourceTask}, }; use cap_fail::fail; use cap_media::MediaError; use cap_media_info::AudioInfo; -use cpal::{Device, StreamInstant, SupportedStreamConfig}; -use ffmpeg::{frame::Audio as FFAudio, sys::AV_TIME_BASE_Q}; +use cpal::{Device, SupportedStreamConfig}; +use ffmpeg::frame::Audio as FFAudio; use flume::{Receiver, Sender}; use indexmap::IndexMap; -use std::{sync::Arc, time::SystemTime}; +use std::sync::Arc; use tracing::{error, info}; pub type AudioInputDeviceMap = IndexMap; @@ -17,26 +18,15 @@ pub type AudioInputDeviceMap = IndexMap pub struct AudioInputSource { feed: Arc, audio_info: AudioInfo, - tx: Sender<(FFAudio, f64)>, - start_timestamp: Option<(StreamInstant, SystemTime)>, - start_time: f64, + tx: Sender<(FFAudio, SourceTimestamp)>, } impl AudioInputSource { - pub fn init( - feed: Arc, - tx: Sender<(FFAudio, f64)>, - start_time: SystemTime, - ) -> Self { + pub fn init(feed: Arc, tx: Sender<(FFAudio, SourceTimestamp)>) -> Self { Self { audio_info: *feed.audio_info(), feed, tx, - start_timestamp: None, - start_time: start_time - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs_f64(), } } @@ -45,33 +35,9 @@ impl AudioInputSource { } fn process_frame(&mut self, samples: MicrophoneSamples) -> Result<(), MediaError> { - let start_timestamp = match self.start_timestamp { - None => *self - .start_timestamp - .insert((samples.info.timestamp().capture, SystemTime::now())), - Some(v) => v, - }; - - let elapsed = samples - .info - .timestamp() - .capture - .duration_since(&start_timestamp.0) - .unwrap(); - - let timestamp = start_timestamp - .1 - .checked_add(elapsed) - .unwrap() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs_f64() - - self.start_time; + let timestamp = SourceTimestamp::from_cpal(samples.info.timestamp().capture); - let frame = self.audio_info.wrap_frame( - &samples.data, - (elapsed.as_secs_f64() * AV_TIME_BASE_Q.den as f64) as i64, - ); + let frame = self.audio_info.wrap_frame(&samples.data); if self.tx.send((frame, timestamp)).is_err() { return Err(MediaError::Any( "Pipeline is unreachable! Stopping capture".into(), diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index 57f8d509ea..d3eb3de507 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -1,34 +1,218 @@ use cap_media_info::AudioInfo; use ffmpeg::sys::AV_TIME_BASE_Q; use flume::{Receiver, Sender}; -use std::time::Duration; -use tracing::{debug, warn}; +use std::collections::VecDeque; +use std::time::{Duration, SystemTime}; +use tracing::{debug, trace, warn}; -use crate::pipeline::task::PipelineSourceTask; +use crate::{ + capture_pipeline::{SourceTimestamp, SourceTimestamps}, + pipeline::task::PipelineSourceTask, +}; + +struct BufferedAudioSource { + rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, + info: AudioInfo, + buffer: VecDeque<(ffmpeg::frame::Audio, SourceTimestamp)>, + last_processed_timestamp: Option, + last_output_pts: i64, + expected_frame_duration_ms: f64, + total_samples_processed: u64, + timeline_position: Duration, // Track our position in the timeline directly +} + +impl BufferedAudioSource { + fn new(rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, info: AudioInfo) -> Self { + let expected_frame_duration_ms = 1024.0 / info.rate() as f64 * 1000.0; + + Self { + rx, + info, + buffer: VecDeque::new(), + last_processed_timestamp: None, + last_output_pts: 0, + expected_frame_duration_ms, + total_samples_processed: 0, + timeline_position: Duration::ZERO, + } + } + + fn fill_buffer(&mut self) { + let initial_size = self.buffer.len(); + let mut frames_received = 0; + let is_disconnected = self.rx.is_disconnected(); + + while let Ok((frame, timestamp)) = self.rx.try_recv() { + trace!( + "Received audio frame: {} samples, timestamp: {:?}", + frame.samples(), + timestamp + ); + self.buffer.push_back((frame, timestamp)); + frames_received += 1; + } + + if frames_received > 0 { + trace!( + "Buffer filled: {} new frames, total buffer size: {} -> {}", + frames_received, + initial_size, + self.buffer.len() + ); + } else if is_disconnected { + trace!("Receiver disconnected, no more frames will be received"); + } + } + + fn has_sufficient_buffer(&self) -> bool { + self.buffer.len() >= 2 || self.rx.is_disconnected() + } + + fn generate_silent_frame(&self, samples: usize) -> ffmpeg::frame::Audio { + let mut frame = + ffmpeg::frame::Audio::new(self.info.sample_format, samples, self.info.channel_layout()); + + unsafe { + let data = frame.data_mut(0); + let bytes_per_sample = self.info.sample_format.bytes() as usize; + let total_bytes = + samples * self.info.channel_layout().channels() as usize * bytes_per_sample; + std::ptr::write_bytes(data.as_mut_ptr(), 0, total_bytes); + } + + frame + } + + fn generate_initial_silence_if_needed( + &mut self, + _target_time: Duration, + _start_timestamps: SourceTimestamps, + ) -> Vec { + // No longer generate initial silence - let the mixer handle silence generation + Vec::new() + } + + fn process_with_gap_filling( + &mut self, + target_time: Duration, + start_timestamps: SourceTimestamps, + ) -> Vec { + let mut output_frames = Vec::new(); + + // Always process ALL available frames - don't leave them in buffer + while !self.buffer.is_empty() { + let (mut frame, timestamp) = self.buffer.pop_front().unwrap(); + let frame_time = timestamp.duration_since(start_timestamps); + let frame_samples = frame.samples() as u64; + + // Check for gap if we've processed frames before + if let Some(last_ts) = &self.last_processed_timestamp { + let last_time = last_ts.duration_since(start_timestamps); + let expected_next = last_time + + Duration::from_secs_f64(frame_samples as f64 / self.info.rate() as f64); + + // If there's a gap larger than 1.5 frames, fill it with silence + if frame_time > expected_next + Duration::from_millis(30) { + let gap = frame_time - expected_next; + let silent_samples = ((gap.as_secs_f64()) * self.info.rate() as f64) as usize; + + let mut remaining = silent_samples; + while remaining > 0 { + let chunk_size = remaining.min(1024); + let mut silent_frame = self.generate_silent_frame(chunk_size); + + let pts = (self.total_samples_processed as f64 / self.info.rate() as f64 + * AV_TIME_BASE_Q.den as f64) as i64; + silent_frame.set_pts(Some(pts)); + + output_frames.push(silent_frame); + self.total_samples_processed += chunk_size as u64; + self.last_output_pts = pts; + remaining -= chunk_size; + } + } + } + + // Process the actual frame + let pts = (self.total_samples_processed as f64 / self.info.rate() as f64 + * AV_TIME_BASE_Q.den as f64) as i64; + frame.set_pts(Some(pts)); + + self.last_output_pts = pts; + self.total_samples_processed += frame_samples; + self.last_processed_timestamp = Some(timestamp); + self.timeline_position = frame_time + + Duration::from_secs_f64(frame_samples as f64 / self.info.rate() as f64); + + output_frames.push(frame); + } + + // If buffer is empty but we've processed frames before, generate silence to maintain continuity + if output_frames.is_empty() && self.last_processed_timestamp.is_some() { + // Calculate how much silence we need based on the time gap + let last_time = self.timeline_position; + if target_time > last_time { + let gap = target_time - last_time; + let silent_samples = ((gap.as_secs_f64()) * self.info.rate() as f64) as usize; + + if silent_samples > 0 { + let mut remaining = silent_samples; + while remaining > 0 { + let chunk_size = remaining.min(1024); + let mut silent_frame = self.generate_silent_frame(chunk_size); + + let pts = (self.total_samples_processed as f64 / self.info.rate() as f64 + * AV_TIME_BASE_Q.den as f64) as i64; + silent_frame.set_pts(Some(pts)); + + output_frames.push(silent_frame); + self.total_samples_processed += chunk_size as u64; + self.last_output_pts = pts; + self.timeline_position += + Duration::from_secs_f64(chunk_size as f64 / self.info.rate() as f64); + remaining -= chunk_size; + } + } + } + } + + output_frames + } +} pub struct AudioMixer { - sources: Vec, - output: Sender, + sources: Vec, + output: Sender<(ffmpeg::frame::Audio, Duration, SourceTimestamps)>, + start_timestamps: SourceTimestamps, + output_sample_count: u64, + output_sample_rate: u32, } impl AudioMixer { - pub fn new(output: Sender) -> Self { + pub fn new(output: Sender<(ffmpeg::frame::Audio, Duration, SourceTimestamps)>) -> Self { Self { sources: Vec::new(), output, + start_timestamps: SourceTimestamps::now(), + output_sample_count: 0, + output_sample_rate: 48000, } } pub fn sink(&mut self, info: AudioInfo) -> AudioMixerSink { let (tx, rx) = flume::bounded(32); - self.sources.push(AudioMixerSource { rx, info }); + self.sources.push(BufferedAudioSource::new(rx, info)); AudioMixerSink { tx } } - pub fn add_source(&mut self, info: AudioInfo, rx: Receiver<(ffmpeg::frame::Audio, f64)>) { - self.sources.push(AudioMixerSource { rx, info }) + pub fn add_source( + &mut self, + info: AudioInfo, + rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, + ) { + self.sources.push(BufferedAudioSource::new(rx, info)); } pub fn has_sources(&self) -> bool { @@ -116,47 +300,156 @@ impl AudioMixer { on_ready(); + let frame_size = 1024usize; + let frame_duration = + Duration::from_secs_f64(frame_size as f64 / self.output_sample_rate as f64); + let mut next_output_time = std::time::Instant::now() + Duration::from_millis(50); let mut filtered = ffmpeg::frame::Audio::empty(); + let mut startup_phase = true; + let mut processing_time = Duration::ZERO; + let mut first_frame_time: Option = None; + loop { if get_is_stopped() { return; } - for (i, source) in self.sources.iter().enumerate() { - loop { - let value = match source.rx.try_recv() { - Ok(v) => v, - Err(flume::TryRecvError::Disconnected) => return, - Err(flume::TryRecvError::Empty) => break, - }; - let frame = &value.0; + // Fill all source buffers + for (i, source) in self.sources.iter_mut().enumerate() { + let buffer_size_before = source.buffer.len(); + source.fill_buffer(); + let buffer_size_after = source.buffer.len(); + + if buffer_size_after > buffer_size_before { + trace!( + "Source {}: buffer grew from {} to {} frames", + i, buffer_size_before, buffer_size_after + ); + // Log timing of first frame in buffer + if let Some((_, timestamp)) = source.buffer.front() { + let frame_time = timestamp.duration_since(self.start_timestamps); + trace!( + "Source {}: first buffered frame time: {:.2}ms vs processing_time: {:.2}ms (delta: {:.2}ms)", + i, + frame_time.as_secs_f64() * 1000.0, + processing_time.as_secs_f64() * 1000.0, + (frame_time.as_secs_f64() - processing_time.as_secs_f64()) * 1000.0 + ); + } + } else if source.rx.is_disconnected() { + trace!("Source {}: receiver disconnected", i); + } + } + + // During startup, wait for sufficient initial data + if startup_phase { + let sources_with_data = + self.sources.iter().filter(|s| !s.buffer.is_empty()).count(); + let sources_with_sufficient_buffer = + self.sources.iter().filter(|s| s.buffer.len() >= 2).count(); - abuffers[i].source().add(frame).unwrap(); + // Wait until we have some buffering to avoid underruns + if sources_with_data == 0 { + std::thread::sleep(Duration::from_millis(5)); + continue; + } else if sources_with_sufficient_buffer < sources_with_data { + // We have some data but not enough buffering yet + trace!( + "Startup: waiting for buffer (sources with data: {}, with sufficient buffer: {})", + sources_with_data, sources_with_sufficient_buffer + ); + std::thread::sleep(Duration::from_millis(5)); + continue; } + + startup_phase = false; + debug!( + "Startup complete: {} sources ready with sufficient buffering", + sources_with_data + ); + // Reset next output time after receiving sufficient data + next_output_time = std::time::Instant::now() + Duration::from_millis(20); + } + + let now = std::time::Instant::now(); + + // Check if it's time to produce output + if now >= next_output_time { + // Feed frames to each source's filter input + for (i, source) in self.sources.iter_mut().enumerate() { + // Process ALL frames from buffer (including silence generation) + let frames = + source.process_with_gap_filling(processing_time, self.start_timestamps); + + // Add all frames (real or silence) to the filter + for frame in frames { + if let Err(e) = abuffers[i].source().add(&frame) { + warn!("Source {}: Failed to add frame to filter: {:?}", i, e); + } + } + } + + // Update timing for next iteration + processing_time += frame_duration; + next_output_time += frame_duration; } + // Try to get output from the filter graph + let mut frames_output = 0; while abuffersink.sink().frame(&mut filtered).is_ok() { - let adjusted_pts = - filtered.pts().unwrap() as f64 / 48000.0 * AV_TIME_BASE_Q.den as f64; - filtered.set_pts(Some(adjusted_pts as i64)); - if self.output.send(filtered).is_err() { + let output_duration = Duration::from_secs_f64( + self.output_sample_count as f64 / self.output_sample_rate as f64, + ); + + let pts = (output_duration.as_secs_f64() * AV_TIME_BASE_Q.den as f64) as i64; + filtered.set_pts(Some(pts)); + + let sample_count = filtered.samples() as u64; + + trace!( + "Output frame: {} samples, pts: {}, duration: {:?}", + sample_count, pts, output_duration + ); + + if self + .output + .send((filtered, output_duration, self.start_timestamps)) + .is_err() + { warn!("Mixer unable to send output"); return; } - filtered = ffmpeg::frame::Audio::empty() + + self.output_sample_count += sample_count; + frames_output += 1; + filtered = ffmpeg::frame::Audio::empty(); + } + + if frames_output > 0 { + debug!( + "Filter graph produced {} output frames, total samples: {}", + frames_output, self.output_sample_count + ); + } else { + trace!("Filter graph produced no output this cycle"); } - std::thread::sleep(Duration::from_millis(2)) + // Sleep until next output time, but check frequently for new data + let time_until_next = + next_output_time.saturating_duration_since(std::time::Instant::now()); + if time_until_next > Duration::from_millis(2) { + std::thread::sleep(Duration::from_millis(2)); + } } } } pub struct AudioMixerSink { - pub tx: flume::Sender<(ffmpeg::frame::Audio, f64)>, + pub tx: flume::Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, } pub struct AudioMixerSource { - rx: flume::Receiver<(ffmpeg::frame::Audio, f64)>, + rx: flume::Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, info: AudioInfo, } @@ -181,3 +474,611 @@ impl PipelineSourceTask for AudioMixer { Ok(()) } } + +#[cfg(test)] +mod tests { + use super::*; + use std::time::{Duration, Instant, SystemTime}; + + fn create_test_audio_info() -> AudioInfo { + AudioInfo::new( + ffmpeg::format::Sample::F32(ffmpeg::format::sample::Type::Packed), + 48000, + 2, + ) + .unwrap() + } + + fn create_test_frame(samples: usize, info: &AudioInfo) -> ffmpeg::frame::Audio { + let mut frame = + ffmpeg::frame::Audio::new(info.sample_format, samples, info.channel_layout()); + + unsafe { + let data = frame.data_mut(0); + let bytes_per_sample = info.sample_format.bytes() as usize; + let channels = info.channel_layout().channels() as usize; + let total_bytes = samples * channels * bytes_per_sample; + + // Fill with test pattern (non-zero to distinguish from silence) + for i in 0..total_bytes { + data.as_mut_ptr().add(i).write((i % 256) as u8); + } + } + + frame.set_pts(Some(0)); + frame + } + + fn is_silent_frame(frame: &ffmpeg::frame::Audio) -> bool { + unsafe { + let data = frame.data(0); + let bytes_per_sample = frame.format().bytes() as usize; + let channels = frame.channels() as usize; + let total_bytes = frame.samples() as usize * channels * bytes_per_sample; + + for i in 0..total_bytes { + if *data.as_ptr().add(i) != 0 { + return false; + } + } + } + + true + } + + #[test] + fn test_buffered_source_initialization() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let source = BufferedAudioSource::new(rx, info); + + assert!(source.buffer.is_empty()); + assert!(source.last_processed_timestamp.is_none()); + assert_eq!(source.total_samples_processed, 0); + assert_eq!(source.last_output_pts, 0); + assert_eq!(source.timeline_position, Duration::ZERO); + + // Expected frame duration for 1024 samples at 48kHz + let expected_duration = 1024.0 / 48000.0 * 1000.0; + assert!((source.expected_frame_duration_ms - expected_duration).abs() < 0.001); + } + + #[test] + fn test_fill_buffer() { + let info = create_test_audio_info(); + let (tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + + // Send some frames + let timestamp1 = SourceTimestamp::Instant(Instant::now()); + let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(21)); + let timestamp3 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(42)); + + tx.send((create_test_frame(1024, &info), timestamp1)) + .unwrap(); + tx.send((create_test_frame(1024, &info), timestamp2)) + .unwrap(); + tx.send((create_test_frame(1024, &info), timestamp3)) + .unwrap(); + + assert_eq!(source.buffer.len(), 0); + + source.fill_buffer(); + + assert_eq!(source.buffer.len(), 3); + } + + #[test] + fn test_sufficient_buffer_detection() { + let info = create_test_audio_info(); + let (tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + + // Initially insufficient + assert!(!source.has_sufficient_buffer()); + + // Add one frame - still insufficient + let timestamp = SourceTimestamp::Instant(Instant::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp)); + assert!(!source.has_sufficient_buffer()); + + // Add second frame - now sufficient + let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(21)); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp2)); + assert!(source.has_sufficient_buffer()); + + // Disconnect channel - always sufficient + drop(tx); + source.buffer.clear(); + assert!(source.has_sufficient_buffer()); + } + + #[test] + fn test_silent_frame_generation() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded::<(ffmpeg::frame::Audio, SourceTimestamp)>(32); + let source = BufferedAudioSource::new(rx, info.clone()); + + // Test various sizes + for size in [512, 1024, 2048, 4096] { + let frame = source.generate_silent_frame(size); + assert_eq!(frame.samples(), size); + assert!(is_silent_frame(&frame)); + } + } + + #[test] + fn test_gap_detection_and_filling() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Process first frame + let timestamp1 = SourceTimestamp::Instant(Instant::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp1)); + + let frames = source.process_with_gap_filling(Duration::from_secs(1), start_timestamps); + assert_eq!(frames.len(), 1); + assert!(!is_silent_frame(&frames[0])); + assert!(source.last_processed_timestamp.is_some()); + + // Create a gap - add frame 100ms later than expected + let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(121)); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp2)); + + let frames = source.process_with_gap_filling(Duration::from_secs(2), start_timestamps); + + // Should have silent frames followed by the real frame + assert!(frames.len() > 1); + + // Check that we got silent frames for the gap + for i in 0..frames.len() - 1 { + assert!(is_silent_frame(&frames[i])); + } + + // Last frame should be the real one + assert!(!is_silent_frame(&frames[frames.len() - 1])); + } + + #[test] + fn test_no_gap_when_continuous() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Process frames with correct timing (21.33ms apart for 1024 samples at 48kHz) + let base_time = Instant::now(); + let frame_duration = Duration::from_micros(21333); // 1024/48000 * 1000000 + + for i in 0..5 { + let timestamp = SourceTimestamp::Instant(base_time + frame_duration * i); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp)); + + let frames = source.process_with_gap_filling(Duration::from_secs(10), start_timestamps); + + // Should only get one frame (no gaps) + assert_eq!(frames.len(), 1); + assert!(!is_silent_frame(&frames[0])); + } + } + + #[test] + fn test_pts_calculation() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Process multiple frames + for i in 0..3 { + let timestamp = + SourceTimestamp::Instant(Instant::now() + Duration::from_millis(i * 21)); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp)); + + let frames = source.process_with_gap_filling(Duration::from_secs(10), start_timestamps); + + assert_eq!(frames.len(), 1); + + // Check PTS progression + let expected_pts = (i as u64 * 1024 * 1_000_000) / 48000; // microseconds + let actual_pts = frames[0].pts().unwrap() as u64; + + // PTS should progress correctly + let tolerance = 1000; // 1ms tolerance + assert!( + (actual_pts as i64 - expected_pts as i64).abs() < tolerance, + "PTS mismatch: expected {}, got {}", + expected_pts, + actual_pts + ); + } + } + + #[test] + fn test_large_gap_handling() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Process first frame + let timestamp1 = SourceTimestamp::Instant(Instant::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp1)); + source.process_with_gap_filling(Duration::from_secs(1), start_timestamps); + + // Create a 1-second gap + let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_secs(1)); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp2)); + + let frames = source.process_with_gap_filling(Duration::from_secs(2), start_timestamps); + + // Should have many silent frames (48000 samples for 1 second at 48kHz) + let total_silent_samples: usize = frames[..frames.len() - 1] + .iter() + .map(|f| f.samples() as usize) + .sum(); + + // Should be approximately 48000 samples (1 second) + assert!(total_silent_samples > 45000 && total_silent_samples < 50000); + } + + #[test] + fn test_sample_counting() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Process frames with different sizes + let sizes = vec![512, 1024, 2048, 1024, 512]; + let mut expected_total = 0u64; + + for (i, size) in sizes.iter().enumerate() { + let timestamp = + SourceTimestamp::Instant(Instant::now() + Duration::from_millis(i as u64 * 20)); + source + .buffer + .push_back((create_test_frame(*size, &info), timestamp)); + + source.process_with_gap_filling(Duration::from_secs(10), start_timestamps); + + expected_total += *size as u64; + assert_eq!(source.total_samples_processed, expected_total); + } + } + + #[test] + fn test_timestamp_types() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Test with SystemTime timestamp + let timestamp1 = SourceTimestamp::SystemTime(SystemTime::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp1)); + + let frames = source.process_with_gap_filling(Duration::from_secs(1), start_timestamps); + assert_eq!(frames.len(), 1); + + // Test with Instant timestamp + let timestamp2 = SourceTimestamp::Instant(Instant::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp2)); + + let frames = source.process_with_gap_filling(Duration::from_secs(2), start_timestamps); + assert_eq!(frames.len(), 1); + } + + #[test] + fn test_mixer_initialization() { + let (tx, _rx) = flume::bounded(64); + let mut mixer = AudioMixer::new(tx); + + assert!(!mixer.has_sources()); + assert_eq!(mixer.sources.len(), 0); + assert_eq!(mixer.output_sample_count, 0); + assert_eq!(mixer.output_sample_rate, 48000); + + // Add sources + let info = create_test_audio_info(); + let (_source_tx, source_rx) = flume::bounded(32); + mixer.add_source(info, source_rx); + + assert!(mixer.has_sources()); + assert_eq!(mixer.sources.len(), 1); + } + + #[test] + fn test_mixer_sink_creation() { + let (tx, _rx) = flume::bounded(64); + let mut mixer = AudioMixer::new(tx); + + let info = create_test_audio_info(); + let sink = mixer.sink(info); + + assert!(mixer.has_sources()); + assert_eq!(mixer.sources.len(), 1); + + // Test that sink can send data + let timestamp = SourceTimestamp::Instant(Instant::now()); + let frame = create_test_frame(1024, &info); + sink.tx.send((frame, timestamp)).unwrap(); + } + + #[test] + fn test_continuous_output_without_input() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Test that we get silence when no input has ever been provided + let processing_time = Duration::from_millis(100); + + // First call should produce no frames (no data, no last timestamp) + let frames = source.process_with_gap_filling(processing_time, start_timestamps); + assert_eq!(frames.len(), 0); + + // After processing once, subsequent calls should maintain timing with silence + // Process first real frame to establish timing + let timestamp1 = SourceTimestamp::Instant(Instant::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp1)); + + let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); + assert_eq!(frames.len(), 1); + assert!(!is_silent_frame(&frames[0])); + + // Now with no more input, but time advancing, we should get silence + let frames = source.process_with_gap_filling(Duration::from_millis(200), start_timestamps); + + // Should have generated silent frames to fill the gap + assert!(frames.len() > 0); + for frame in &frames { + assert!(is_silent_frame(frame)); + } + } + + #[test] + fn test_source_silence_on_empty_buffer() { + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Establish initial timing with a frame + let timestamp = SourceTimestamp::Instant(Instant::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp)); + + // Process the frame + let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); + assert_eq!(frames.len(), 1); + assert!(!is_silent_frame(&frames[0])); + + // Now buffer is empty, advance time significantly + let frames = source.process_with_gap_filling(Duration::from_millis(500), start_timestamps); + + // Should produce silent frames for the time gap + let total_samples: usize = frames.iter().map(|f| f.samples() as usize).sum(); + + // ~450ms of silence at 48kHz should be around 21600 samples + assert!(total_samples > 20000 && total_samples < 23000); + + // All frames should be silent + for frame in &frames { + assert!(is_silent_frame(frame)); + } + } + + #[test] + fn test_mixer_output_with_silent_sources() { + // Test that buffered sources produce continuous output when needed + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // First, establish timing with an initial frame + let timestamp1 = SourceTimestamp::Instant(Instant::now()); + source + .buffer + .push_back((create_test_frame(1024, &info), timestamp1)); + + // Process the initial frame + let frames = source.process_with_gap_filling(Duration::from_millis(21), start_timestamps); + assert_eq!(frames.len(), 1); + assert!(!is_silent_frame(&frames[0])); + + // Now simulate continuous time progression without new input + // The source should generate silence to maintain output timing + let mut total_frames = Vec::new(); + let mut current_time = Duration::from_millis(21); + + for _ in 0..10 { + current_time += Duration::from_millis(21); + let frames = source.process_with_gap_filling(current_time, start_timestamps); + total_frames.extend(frames); + } + + // We should have received silent frames to maintain continuous output + assert!( + !total_frames.is_empty(), + "Should have generated silence frames" + ); + + // All generated frames should be silent + for frame in &total_frames { + assert!(is_silent_frame(frame), "Generated frames should be silent"); + } + + // Total samples should roughly match the time progression + let total_samples: usize = total_frames.iter().map(|f| f.samples() as usize).sum(); + let expected_samples = (210.0 / 1000.0 * 48000.0) as usize; // 210ms at 48kHz + assert!( + total_samples > expected_samples * 9 / 10 && total_samples < expected_samples * 11 / 10, + "Sample count should match time progression: got {}, expected ~{}", + total_samples, + expected_samples + ); + } + + #[test] + fn test_initial_silence_generation() { + // Test that sources only generate silence after they've processed actual data + let info = create_test_audio_info(); + let (tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // First, no silence should be generated without any prior data + let target_time = Duration::from_millis(100); + let frames = source.process_with_gap_filling(target_time, start_timestamps); + assert!( + frames.is_empty(), + "Should not generate silence without prior data" + ); + + // Send and process one frame to establish timing + let timestamp = SourceTimestamp::Instant(Instant::now()); + tx.send((create_test_frame(1024, &info), timestamp)) + .unwrap(); + source.fill_buffer(); + + let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); + assert_eq!(frames.len(), 1, "Should process the buffered frame"); + assert!( + !is_silent_frame(&frames[0]), + "First frame should not be silent" + ); + + // Now with no more input but time advancing, we should get silence + let frames = source.process_with_gap_filling(Duration::from_millis(150), start_timestamps); + assert!( + !frames.is_empty(), + "Should generate silence to fill time gap" + ); + + // All generated frames should be silent + for frame in &frames { + assert!( + is_silent_frame(frame), + "Gap-filling frames should be silent" + ); + } + + // After generating initial silence, last_processed_timestamp should be set + // timeline_position should be updated even without last_processed_timestamp + assert_eq!(source.timeline_position, target_time); + + // Subsequent calls shouldn't generate more initial silence + let frames2 = source.generate_initial_silence_if_needed(target_time, start_timestamps); + assert!( + frames2.is_empty(), + "Should not generate initial silence twice" + ); + } + + #[test] + fn test_mixer_handles_source_disconnection() { + // Test that buffered source handles disconnection gracefully + let info = create_test_audio_info(); + let (source_tx, source_rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(source_rx, info.clone()); + + // Send one frame + let timestamp = SourceTimestamp::Instant(Instant::now()); + source_tx + .send((create_test_frame(1024, &info), timestamp)) + .unwrap(); + + // Fill buffer to get the frame + source.fill_buffer(); + assert_eq!(source.buffer.len(), 1); + + // Disconnect the source + drop(source_tx); + + // Verify disconnection is detected + assert!(source.has_sufficient_buffer()); + + // Should still be able to process the buffered frame + let start_timestamps = SourceTimestamps::now(); + let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); + assert_eq!(frames.len(), 1); + assert!(!is_silent_frame(&frames[0])); + + // After processing the buffered frame, should generate silence for continuity + let frames = source.process_with_gap_filling(Duration::from_millis(100), start_timestamps); + assert!(!frames.is_empty()); + for frame in &frames { + assert!(is_silent_frame(frame)); + } + } + + #[test] + fn test_continuous_timing_maintenance() { + // Test that sources maintain consistent timing across gaps and silence + let info = create_test_audio_info(); + let (_tx, rx) = flume::bounded(32); + let mut source = BufferedAudioSource::new(rx, info.clone()); + let start_timestamps = SourceTimestamps::now(); + + // Process frames with intermittent data + let base_time = Instant::now(); + + // Frame at t=0 + source.buffer.push_back(( + create_test_frame(1024, &info), + SourceTimestamp::Instant(base_time), + )); + let _frames = source.process_with_gap_filling(Duration::from_millis(21), start_timestamps); + assert_eq!(source.total_samples_processed, 1024); + + // No frame at t=21ms (should generate silence) + let _frames = source.process_with_gap_filling(Duration::from_millis(42), start_timestamps); + // Frames may or may not be generated depending on timing thresholds + + // Frame at t=63ms (after gap) + source.buffer.push_back(( + create_test_frame(1024, &info), + SourceTimestamp::Instant(base_time + Duration::from_millis(63)), + )); + let frames = source.process_with_gap_filling(Duration::from_millis(84), start_timestamps); + + // Should have at least one frame (the real frame, and possibly silence) + assert!(!frames.is_empty()); + + // Verify total samples processed is reasonable + // We processed at least 2 frames (2048 samples minimum) + assert!( + source.total_samples_processed >= 2048, + "Should have processed at least 2 frames worth of samples: got {}", + source.total_samples_processed + ); + } +} diff --git a/crates/recording/src/sources/camera.rs b/crates/recording/src/sources/camera.rs index 0580428527..26cdeb3f1f 100644 --- a/crates/recording/src/sources/camera.rs +++ b/crates/recording/src/sources/camera.rs @@ -9,6 +9,7 @@ use tracing::{error, info}; use crate::{ MediaError, + capture_pipeline::SourceTimestamp, feeds::camera::{self, CameraFeedLock, RawCameraFrame}, pipeline::{control::Control, task::PipelineSourceTask}, }; @@ -16,7 +17,7 @@ use crate::{ pub struct CameraSource { feed: Arc, video_info: VideoInfo, - output: Sender<(frame::Video, f64)>, + output: Sender<(frame::Video, SourceTimestamp)>, first_frame_instant: Option, first_frame_timestamp: Option, start_instant: Instant, @@ -25,7 +26,7 @@ pub struct CameraSource { impl CameraSource { pub fn init( feed: Arc, - output: Sender<(frame::Video, f64)>, + output: Sender<(frame::Video, SourceTimestamp)>, start_instant: Instant, ) -> Self { Self { @@ -45,8 +46,8 @@ impl CameraSource { fn process_frame( &self, camera_frame: RawCameraFrame, - first_frame_instant: Instant, - first_frame_timestamp: Duration, + // first_frame_instant: Instant, + // first_frame_timestamp: Duration, ) -> Result<(), MediaError> { let check_skip_send = || { cap_fail::fail_err!("media::sources::camera::skip_send", ()); @@ -58,13 +59,13 @@ impl CameraSource { return Ok(()); } - let relative_timestamp = camera_frame.timestamp - first_frame_timestamp; + // let relative_timestamp = camera_frame.timestamp - first_frame_timestamp; if self .output .send(( camera_frame.frame, - (first_frame_instant + relative_timestamp - self.start_instant).as_secs_f64(), + camera_frame.timestamp, // (first_frame_instant + relative_timestamp - self.start_instant).as_secs_f64(), )) .is_err() { @@ -81,12 +82,10 @@ impl CameraSource { drop(frames_rx); for frame in frames { - let first_frame_instant = *self.first_frame_instant.get_or_insert(frame.reference_time); - let first_frame_timestamp = *self.first_frame_timestamp.get_or_insert(frame.timestamp); + // let first_frame_instant = *self.first_frame_instant.get_or_insert(frame.reference_time); + // let first_frame_timestamp = *self.first_frame_timestamp.get_or_insert(frame.timestamp); - if let Err(error) = - self.process_frame(frame, first_frame_instant, first_frame_timestamp) - { + if let Err(error) = self.process_frame(frame) { eprintln!("{error}"); break; } @@ -116,14 +115,12 @@ impl PipelineSourceTask for CameraSource { match control_signal.last() { Some(Control::Play) => match frames.drain().last().or_else(|| frames.recv().ok()) { Some(frame) => { - let first_frame_instant = - *self.first_frame_instant.get_or_insert(frame.reference_time); - let first_frame_timestamp = - *self.first_frame_timestamp.get_or_insert(frame.timestamp); - - if let Err(error) = - self.process_frame(frame, first_frame_instant, first_frame_timestamp) - { + // let first_frame_instant = + // *self.first_frame_instant.get_or_insert(frame.reference_time); + // let first_frame_timestamp = + // *self.first_frame_timestamp.get_or_insert(frame.timestamp); + + if let Err(error) = self.process_frame(frame) { eprintln!("{error}"); break; } diff --git a/crates/recording/src/sources/mod.rs b/crates/recording/src/sources/mod.rs index d83b759113..d3cee5ec26 100644 --- a/crates/recording/src/sources/mod.rs +++ b/crates/recording/src/sources/mod.rs @@ -1,6 +1,7 @@ pub mod audio_input; pub mod audio_mixer; pub mod camera; +pub mod new_audio_mixer; pub mod screen_capture; pub use audio_input::*; diff --git a/crates/recording/src/sources/new_audio_mixer.rs b/crates/recording/src/sources/new_audio_mixer.rs new file mode 100644 index 0000000000..f2a2c79582 --- /dev/null +++ b/crates/recording/src/sources/new_audio_mixer.rs @@ -0,0 +1,539 @@ +use std::{ + collections::VecDeque, + time::{Duration, Instant}, +}; + +use cap_media_info::AudioInfo; +use flume::{Receiver, Sender}; +use tracing::debug; + +use crate::{ + capture_pipeline::{SourceTimestamp, SourceTimestamps}, + pipeline::task::PipelineSourceTask, +}; + +struct MixerSource { + rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, + info: AudioInfo, + buffer: VecDeque<(ffmpeg::frame::Audio, SourceTimestamp)>, + buffer_last: Option<(SourceTimestamp, Duration)>, +} + +pub struct AudioMixerBuilder { + sources: Vec, + output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, +} + +impl AudioMixerBuilder { + pub fn new(output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>) -> Self { + Self { + sources: Vec::new(), + output, + } + } + + pub fn has_sources(&self) -> bool { + !self.sources.is_empty() + } + + pub fn add_source( + &mut self, + info: AudioInfo, + rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, + ) { + self.sources.push(MixerSource { + info, + rx, + buffer: VecDeque::new(), + buffer_last: None, + }); + } + + pub fn build(self) -> Result { + let mut filter_graph = ffmpeg::filter::Graph::new(); + + let mut abuffers = self + .sources + .iter() + .enumerate() + .map(|(i, source)| { + let info = &source.info; + let args = format!( + "time_base={}:sample_rate={}:sample_fmt={}:channel_layout=0x{:x}", + info.time_base, + info.rate(), + info.sample_format.name(), + info.channel_layout().bits() + ); + + debug!("audio mixer input {i}: {args}"); + + filter_graph.add( + &ffmpeg::filter::find("abuffer").expect("Failed to find abuffer filter"), + &format!("src{i}"), + &args, + ) + }) + .collect::, _>>()?; + + let mut amix = filter_graph.add( + &ffmpeg::filter::find("amix").expect("Failed to find amix filter"), + "amix", + &format!( + "inputs={}:duration=first:dropout_transition=0", + abuffers.len() + ), + )?; + + let aformat_args = "sample_fmts=flt:sample_rates=48000:channel_layouts=stereo"; + + let mut aformat = filter_graph.add( + &ffmpeg::filter::find("aformat").expect("Failed to find aformat filter"), + "aformat", + aformat_args, + )?; + + let mut abuffersink = filter_graph.add( + &ffmpeg::filter::find("abuffersink").expect("Failed to find abuffersink filter"), + "sink", + "", + )?; + + for (i, abuffer) in abuffers.iter_mut().enumerate() { + abuffer.link(0, &mut amix, i as u32); + } + + amix.link(0, &mut aformat, 0); + aformat.link(0, &mut abuffersink, 0); + + filter_graph.validate()?; + + Ok(AudioMixer { + sources: self.sources, + samples_out: 0, + output: self.output, + last_tick: None, + filter_graph, + abuffers, + amix, + aformat, + abuffersink, + }) + } +} + +pub struct AudioMixer { + sources: Vec, + samples_out: usize, + output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, + last_tick: Option, + + filter_graph: ffmpeg::filter::Graph, + abuffers: Vec, + amix: ffmpeg::filter::Context, + aformat: ffmpeg::filter::Context, + abuffersink: ffmpeg::filter::Context, +} + +impl AudioMixer { + pub const INFO: AudioInfo = AudioInfo::new_raw( + cap_media_info::Sample::F32(cap_media_info::Type::Packed), + 48_000, + 2, + ); + pub const BUFFER_TIMEOUT: Duration = Duration::from_millis(5); + + fn buffer_sources(&mut self, start: SourceTimestamps) { + for source in &mut self.sources { + while let Ok((frame, timestamp)) = source.rx.try_recv() { + // if gap between incoming and last, insert silence + if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { + let timestamp_elapsed = timestamp.duration_since(start); + let buffer_last_elapsed = buffer_last_timestamp.duration_since(start); + + if timestamp_elapsed > buffer_last_elapsed { + let elapsed_since_last_frame = timestamp_elapsed - buffer_last_elapsed; + + if elapsed_since_last_frame < buffer_last_duration + && buffer_last_duration - elapsed_since_last_frame + >= Duration::from_millis(1) + { + let gap = timestamp.duration_since(start) + - buffer_last_timestamp.duration_since(start) + - buffer_last_duration; + + debug!("Gap between last buffer frame, inserting {gap:?} of silence"); + + let silence_samples_needed = + (gap.as_secs_f64()) * source.info.rate() as f64; + let silence_samples_count = silence_samples_needed.ceil() as usize; + + let mut frame = ffmpeg::frame::Audio::new( + source.info.sample_format, + silence_samples_count, + source.info.channel_layout(), + ); + + frame.set_rate(source.info.rate() as u32); + + source.buffer_last = Some(( + &buffer_last_timestamp + gap, + Duration::from_secs_f64( + silence_samples_count as f64 / source.info.rate() as f64, + ), + )); + source.buffer.push_back((frame, buffer_last_timestamp)); + } + } + } else { + let gap = timestamp.duration_since(start); + if !gap.is_zero() { + let silence_samples_needed = gap.as_secs_f64() * source.info.rate() as f64; + let silence_samples_needed = silence_samples_needed.ceil() as usize; + + debug!("Gap from beginning of stream, inserting {gap:?} of silence"); + + // let mut frame = ffmpeg::frame::Audio::new( + // source.info.sample_format, + // silence_samples_needed, + // source.info.channel_layout(), + // ); + + // frame.set_rate(source.info.rate() as u32); + + // let timestamp = SourceTimestamp::Instant(start.instant()); + // source.buffer_last = Some(( + // timestamp, + // Duration::from_secs_f64( + // silence_samples_needed as f64 / source.info.rate() as f64, + // ), + // )); + // source.buffer.push_back((frame, timestamp)); + } + } + + source.buffer_last = Some(( + timestamp, + Duration::from_secs_f64(frame.samples() as f64 / frame.rate() as f64), + )); + source.buffer.push_back((frame, timestamp)); + } + } + } + + fn tick(&mut self, start: SourceTimestamps, now: Instant) -> Result<(), ()> { + // if let Some(last_tick) = self.last_tick { + // let time_since_last_tick = now.duration_since(last_tick); + // if time_since_last_tick > Self::BUFFER_TIMEOUT { + // let gap = Self::BUFFER_TIMEOUT; + // for source in &mut self.sources { + // let silence_samples_needed = (gap.as_secs_f64()) * source.info.rate() as f64; + // let silence_samples_count = silence_samples_needed.ceil() as usize; + + // let mut frame = ffmpeg::frame::Audio::new( + // source.info.sample_format, + // silence_samples_count, + // source.info.channel_layout(), + // ); + // dbg!(source.info.sample_format); + + // frame.set_rate(source.info.rate() as u32); + + // source.buffer_last = Some((SourceTimestamp::Instant(last_tick), gap)); + // source + // .buffer + // .push_back((frame, SourceTimestamp::Instant(last_tick))); + // } + // } + // } else { + // let time_since_last_tick = now.duration_since(start.instant()); + // if time_since_last_tick > Self::BUFFER_TIMEOUT { + // let gap = Self::BUFFER_TIMEOUT; + // for source in &mut self.sources { + // let silence_samples_needed = (gap.as_secs_f64()) * source.info.rate() as f64; + // let silence_samples_count = silence_samples_needed.ceil() as usize; + + // let mut frame = ffmpeg::frame::Audio::new( + // source.info.sample_format, + // silence_samples_count, + // source.info.channel_layout(), + // ); + + // frame.set_rate(source.info.rate() as u32); + + // source.buffer_last = Some((SourceTimestamp::Instant(start.instant()), gap)); + // source + // .buffer + // .push_back((frame, SourceTimestamp::Instant(start.instant()))); + // } + // } + // } + + self.buffer_sources(start); + + for (i, source) in self.sources.iter_mut().enumerate() { + for buffer in source.buffer.drain(..) { + let _ = self.abuffers[i].source().add(&buffer.0); + } + } + + let mut filtered = ffmpeg::frame::Audio::empty(); + while self.abuffersink.sink().frame(&mut filtered).is_ok() { + let timestamp = start.instant() + + Duration::from_secs_f64(self.samples_out as f64 / filtered.rate() as f64); + + self.samples_out += filtered.samples(); + + if self + .output + .send((filtered, SourceTimestamp::Instant(timestamp))) + .is_err() + { + return Err(()); + } + + filtered = ffmpeg::frame::Audio::empty(); + } + + self.last_tick = Some(now); + + Ok(()) + } + + pub fn run(&mut self) { + let start = SourceTimestamps::now(); + + while let Ok(()) = self.tick(start, Instant::now()) { + std::thread::sleep(Duration::from_millis(5)); + } + } + + pub fn builder(output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>) -> AudioMixerBuilder { + AudioMixerBuilder::new(output) + } +} + +impl PipelineSourceTask for AudioMixerBuilder { + fn run( + &mut self, + ready_signal: crate::pipeline::task::PipelineReadySignal, + mut control_signal: crate::pipeline::control::PipelineControlSignal, + ) -> Result<(), String> { + let start = SourceTimestamps::now(); + + let this = std::mem::replace(self, AudioMixerBuilder::new(self.output.clone())); + + let mut mixer = this.build().map_err(|e| format!("BuildMixer: {e}"))?; + + let _ = ready_signal.send(Ok(())); + + loop { + if control_signal + .last() + .map(|v| matches!(v, crate::pipeline::control::Control::Shutdown)) + .unwrap_or(false) + { + break; + } + + mixer + .tick(start, Instant::now()) + .map_err(|()| format!("Audio mixer tick failed"))?; + + std::thread::sleep(Duration::from_millis(5)); + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + const SAMPLE_RATE: u32 = 48_000; + const SOURCE_INFO: AudioInfo = AudioInfo::new_raw( + cap_media_info::Sample::U8(cap_media_info::Type::Packed), + SAMPLE_RATE, + 1, + ); + const ONE_SECOND: Duration = Duration::from_secs(1); + const SAMPLES_SECOND: usize = SOURCE_INFO.rate() as usize; + + #[test] + fn mix_sources() { + let (tx, output_rx) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(tx); + let start = SourceTimestamps::now(); + + let (tx1, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let (tx2, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let mut mixer = mixer.build().unwrap(); + + tx1.send(( + SOURCE_INFO.wrap_frame(&vec![128, 255, 255, 255]), + SourceTimestamp::Instant(start.instant()), + )) + .unwrap(); + tx2.send(( + SOURCE_INFO.wrap_frame(&vec![128, 128, 1, 255]), + SourceTimestamp::Instant(start.instant()), + )) + .unwrap(); + + let _ = mixer.tick(start); + + let (frame, _) = output_rx.recv().expect("No output frame"); + + let byte_count = frame.samples() * frame.channels() as usize; + let samples: &[f32] = unsafe { std::mem::transmute(&frame.data(0)[0..byte_count]) }; + + assert_eq!(samples[0], 0.0); + assert_eq!(samples[0], samples[1]); + + assert_eq!(samples[4], 0.0); + assert_eq!(samples[4], samples[5]); + } + + mod source_buffer { + use super::*; + + #[test] + fn single_frame() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = SourceTimestamps::now(); + + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let mut mixer = mixer.build().unwrap(); + + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + SourceTimestamp::Instant(start.instant()), + )) + .unwrap(); + + mixer.buffer_sources(start); + + assert_eq!(mixer.sources[0].buffer.len(), 1); + assert!(mixer.sources[0].rx.is_empty()); + } + + #[test] + fn frame_gap() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = SourceTimestamps::now(); + + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let mut mixer = mixer.build().unwrap(); + + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + SourceTimestamp::Instant(start.instant()), + )) + .unwrap(); + + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + SourceTimestamp::Instant(start.instant() + ONE_SECOND), + )) + .unwrap(); + + mixer.buffer_sources(start); + + let source = &mixer.sources[0]; + + assert_eq!(source.buffer.len(), 3); + assert!(source.rx.is_empty()); + + assert_eq!(source.buffer[1].1.duration_since(start), ONE_SECOND / 2); + assert_eq!( + source.buffer[1].0.samples(), + SOURCE_INFO.rate() as usize / 2 + ); + } + + #[test] + fn start_gap() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = SourceTimestamps::now(); + + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let mut mixer = mixer.build().unwrap(); + + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + SourceTimestamp::Instant(start.instant() + ONE_SECOND / 2), + )) + .unwrap(); + + mixer.buffer_sources(start); + + let source = &mixer.sources[0]; + + assert_eq!(source.buffer.len(), 2); + assert!(source.rx.is_empty()); + + assert_eq!(source.buffer[0].1.duration_since(start), Duration::ZERO); + assert_eq!( + source.buffer[0].0.samples(), + SOURCE_INFO.rate() as usize / 2 + ); + } + + #[test] + fn after_draining() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = SourceTimestamps::now(); + + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let mut mixer = mixer.build().unwrap(); + + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + SourceTimestamp::Instant(start.instant()), + )) + .unwrap(); + + mixer.buffer_sources(start); + + mixer.sources[0].buffer.clear(); + + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + SourceTimestamp::Instant(start.instant() + ONE_SECOND), + )) + .unwrap(); + + mixer.buffer_sources(start); + + let source = &mixer.sources[0]; + + assert_eq!(source.buffer.len(), 2); + assert!(source.rx.is_empty()); + + let item = &source.buffer[0]; + assert_eq!(item.1.duration_since(start), ONE_SECOND / 2); + assert_eq!(item.0.samples(), SOURCE_INFO.rate() as usize / 2); + + let item = &source.buffer[1]; + assert_eq!(item.1.duration_since(start), ONE_SECOND); + assert_eq!(item.0.samples(), SOURCE_INFO.rate() as usize / 2); + } + } +} diff --git a/crates/recording/src/sources/screen_capture/mod.rs b/crates/recording/src/sources/screen_capture/mod.rs index 27075375ea..61f9604f26 100644 --- a/crates/recording/src/sources/screen_capture/mod.rs +++ b/crates/recording/src/sources/screen_capture/mod.rs @@ -8,7 +8,10 @@ use specta::Type; use std::time::SystemTime; use tracing::{error, warn}; -use crate::pipeline::{control::Control, task::PipelineSourceTask}; +use crate::{ + capture_pipeline::{SourceTimestamp, SourceTimestamps}, + pipeline::{control::Control, task::PipelineSourceTask}, +}; #[cfg(windows)] mod windows; @@ -194,9 +197,8 @@ pub struct ScreenCaptureSource { config: Config, video_info: VideoInfo, tokio_handle: tokio::runtime::Handle, - video_tx: Sender<(TCaptureFormat::VideoFormat, f64)>, - audio_tx: Option>, - start_time: SystemTime, + video_tx: Sender<(TCaptureFormat::VideoFormat, SourceTimestamp)>, + audio_tx: Option>, _phantom: std::marker::PhantomData, #[cfg(windows)] d3d_device: ::windows::Win32::Graphics::Direct3D11::ID3D11Device, @@ -236,7 +238,6 @@ impl Clone for ScreenCaptureSource ScreenCaptureSource { target: &ScreenCaptureTarget, show_cursor: bool, max_fps: u32, - video_tx: Sender<(TCaptureFormat::VideoFormat, f64)>, - audio_tx: Option>, - start_time: SystemTime, + video_tx: Sender<(TCaptureFormat::VideoFormat, SourceTimestamp)>, + audio_tx: Option>, tokio_handle: tokio::runtime::Handle, #[cfg(windows)] d3d_device: ::windows::Win32::Graphics::Direct3D11::ID3D11Device, ) -> Result { @@ -404,7 +404,6 @@ impl ScreenCaptureSource { video_tx, audio_tx, tokio_handle, - start_time, _phantom: std::marker::PhantomData, #[cfg(windows)] d3d_device, diff --git a/crates/recording/src/sources/screen_capture/windows.rs b/crates/recording/src/sources/screen_capture/windows.rs index 1bed88387c..9bec2c4774 100644 --- a/crates/recording/src/sources/screen_capture/windows.rs +++ b/crates/recording/src/sources/screen_capture/windows.rs @@ -1,6 +1,6 @@ use super::*; +use crate::capture_pipeline::PerformanceCounterTimestamp; use ::windows::{ - Foundation::TimeSpan, Graphics::Capture::GraphicsCaptureItem, Win32::Graphics::Direct3D11::{D3D11_BOX, ID3D11Device}, }; @@ -47,12 +47,11 @@ impl ScreenCaptureFormat for Direct3DCapture { struct FrameHandler { capturer: WeakActorRef, - start_time: SystemTime, frames_dropped: u32, last_cleanup: Instant, last_log: Instant, frame_events: VecDeque<(Instant, bool)>, - video_tx: Sender<(scap_direct3d::Frame, f64)>, + video_tx: Sender<(scap_direct3d::Frame, SourceTimestamp)>, } impl Actor for FrameHandler { @@ -127,12 +126,16 @@ impl Message for FrameHandler { msg: NewFrame, ctx: &mut kameo::prelude::Context, ) -> Self::Reply { - let Ok(elapsed) = msg.display_time.duration_since(self.start_time) else { + let Ok(timestamp) = msg.frame.inner().SystemRelativeTime() else { return; }; - let now = Instant::now(); - let frame_dropped = match self.video_tx.try_send((msg.frame, elapsed.as_secs_f64())) { + let frame_dropped = match self.video_tx.try_send(( + msg.frame, + SourceTimestamp::PerformanceCounter(PerformanceCounterTimestamp::new( + timestamp.Duration, + )), + )) { Err(flume::TrySendError::Disconnected(_)) => { warn!("Pipeline disconnected"); let _ = ctx.actor_ref().stop_gracefully().await; @@ -146,6 +149,8 @@ impl Message for FrameHandler { _ => false, }; + let now = Instant::now(); + self.frame_events.push_back((now, frame_dropped)); if now.duration_since(self.last_cleanup) > Duration::from_millis(100) { @@ -211,7 +216,6 @@ impl PipelineSourceTask for ScreenCaptureSource { let video_tx = self.video_tx.clone(); let audio_tx = self.audio_tx.clone(); - let start_time = self.start_time; let d3d_device = self.d3d_device.clone(); // Frame drop rate tracking state @@ -226,7 +230,6 @@ impl PipelineSourceTask for ScreenCaptureSource { let frame_handler = FrameHandler::spawn(FrameHandler { capturer: capturer.downgrade(), video_tx, - start_time, frame_events: Default::default(), frames_dropped: Default::default(), last_cleanup: Instant::now(), @@ -284,7 +287,7 @@ impl PipelineSourceTask for ScreenCaptureSource { let audio_capture = if let Some(audio_tx) = audio_tx { let audio_capture = WindowsAudioCapture::spawn( - WindowsAudioCapture::new(audio_tx, start_time) + WindowsAudioCapture::new(audio_tx) .map_err(SourceError::CreateAudioCapture)?, ); @@ -478,28 +481,15 @@ pub mod audio { impl WindowsAudioCapture { pub fn new( - audio_tx: Sender<(ffmpeg::frame::Audio, f64)>, - start_time: SystemTime, + audio_tx: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, ) -> Result { - let mut i = 0; let capturer = scap_cpal::create_capturer( - move |data, _: &cpal::InputCallbackInfo, config| { + move |data, info, config| { use scap_ffmpeg::*; - let timestamp = SystemTime::now(); - let mut ff_frame = data.as_ffmpeg(config); - - let Ok(elapsed) = timestamp.duration_since(start_time) else { - warn!("Skipping audio frame {i} as elapsed time is invalid"); - return; - }; - - let rate = ff_frame.rate(); - - ff_frame.set_pts(Some((elapsed.as_secs_f64() * rate as f64) as i64)); + let timestamp = SourceTimestamp::from_cpal(info.timestamp().capture); - let _ = audio_tx.send((ff_frame, elapsed.as_secs_f64())); - i += 1; + let _ = audio_tx.send((data.as_ffmpeg(config), timestamp)); }, move |e| { dbg!(e); diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index 4042897095..737eeeefd3 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -1,6 +1,9 @@ use crate::{ ActorError, MediaError, RecordingBaseInputs, RecordingError, - capture_pipeline::{MakeCapturePipeline, ScreenCaptureMethod, create_screen_capture}, + capture_pipeline::{ + MakeCapturePipeline, ScreenCaptureMethod, SourceTimestamp, SourceTimestamps, + create_screen_capture, + }, cursor::{CursorActor, Cursors, spawn_cursor_recorder}, feeds::{camera::CameraFeedLock, microphone::MicrophoneFeedLock}, pipeline::Pipeline, @@ -61,7 +64,7 @@ pub struct StudioRecordingSegment { pub struct PipelineOutput { pub path: PathBuf, - pub first_timestamp_rx: flume::Receiver, + pub first_timestamp_rx: flume::Receiver, } pub struct ScreenPipelineOutput { @@ -70,6 +73,7 @@ pub struct ScreenPipelineOutput { } struct StudioRecordingPipeline { + pub start_time: SourceTimestamps, pub inner: Pipeline, pub screen: ScreenPipelineOutput, pub microphone: Option, @@ -489,38 +493,46 @@ async fn stop_recording( RelativePathBuf::from_path(path.strip_prefix(&actor.recording_dir).unwrap()).unwrap() }; - let recv_timestamp = |pipeline: &PipelineOutput| pipeline.first_timestamp_rx.try_recv().ok(); - let meta = StudioRecordingMeta::MultipleSegments { inner: MultipleSegments { segments: { actor .segments .iter() - .map(|s| MultipleSegment { - display: VideoMeta { - path: make_relative(&s.pipeline.screen.inner.path), - fps: actor.fps, - start_time: recv_timestamp(&s.pipeline.screen.inner), - }, - camera: s.pipeline.camera.as_ref().map(|camera| VideoMeta { - path: make_relative(&camera.inner.path), - fps: camera.fps, - start_time: recv_timestamp(&camera.inner), - }), - mic: s.pipeline.microphone.as_ref().map(|mic| AudioMeta { - path: make_relative(&mic.path), - start_time: recv_timestamp(mic), - }), - cursor: s - .pipeline - .cursor - .as_ref() - .map(|cursor| make_relative(&cursor.output_path)), - system_audio: s.pipeline.system_audio.as_ref().map(|audio| AudioMeta { - path: make_relative(&audio.path), - start_time: recv_timestamp(audio), - }), + .map(|s| { + let recv_timestamp = |pipeline: &PipelineOutput| { + pipeline + .first_timestamp_rx + .try_recv() + .ok() + .map(|v| v.duration_since(s.pipeline.start_time).as_secs_f64()) + }; + + MultipleSegment { + display: VideoMeta { + path: make_relative(&s.pipeline.screen.inner.path), + fps: actor.fps, + start_time: recv_timestamp(&s.pipeline.screen.inner), + }, + camera: s.pipeline.camera.as_ref().map(|camera| VideoMeta { + path: make_relative(&camera.inner.path), + fps: camera.fps, + start_time: recv_timestamp(&camera.inner), + }), + mic: s.pipeline.microphone.as_ref().map(|mic| AudioMeta { + path: make_relative(&mic.path), + start_time: recv_timestamp(mic), + }), + cursor: s + .pipeline + .cursor + .as_ref() + .map(|cursor| make_relative(&cursor.output_path)), + system_audio: s.pipeline.system_audio.as_ref().map(|audio| AudioMeta { + path: make_relative(&audio.path), + start_time: recv_timestamp(audio), + }), + } }) .collect() }, @@ -662,6 +674,8 @@ async fn create_segment_pipeline( ), CreateSegmentPipelineError, > { + let start_time = SourceTimestamps::now(); + let system_audio = if capture_system_audio { let (tx, rx) = flume::bounded(64); (Some(tx), Some(rx)) @@ -684,7 +698,6 @@ async fn create_segment_pipeline( !custom_cursor_capture, 120, system_audio.0, - start_time, #[cfg(windows)] d3d_device, ) @@ -731,7 +744,7 @@ async fn create_segment_pipeline( let microphone = if let Some(mic_feed) = mic_feed { let (tx, rx) = flume::bounded(8); - let mic_source = AudioInputSource::init(mic_feed, tx, start_time); + let mic_source = AudioInputSource::init(mic_feed, tx); let mic_config = mic_source.info(); let output_path = dir.join("audio-input.ogg"); @@ -744,7 +757,7 @@ async fn create_segment_pipeline( pipeline_builder.spawn_source("microphone_capture", mic_source); - let (timestamp_tx, timestamp_rx) = flume::bounded(1); + let (timestamp_tx, first_timestamp_rx) = flume::bounded(1); pipeline_builder.spawn_task("microphone_encoder", move |ready| { let mut timestamp_tx = Some(timestamp_tx); @@ -768,7 +781,7 @@ async fn create_segment_pipeline( Some(PipelineOutput { path: output_path, - first_timestamp_rx: timestamp_rx, + first_timestamp_rx, }) } else { None @@ -840,11 +853,11 @@ async fn create_segment_pipeline( } if let Some(start) = start { - frame.0.set_pts(Some( - ((camera_config.time_base.denominator() as f64 - / camera_config.time_base.numerator() as f64) - * (frame.1 - start)) as i64, - )); + // frame.0.set_pts(Some( + // ((camera_config.time_base.denominator() as f64 + // / camera_config.time_base.numerator() as f64) + // * (frame.1 - start)) as i64, + // )); } else { start = Some(frame.1); frame.0.set_pts(Some(0)); @@ -903,6 +916,7 @@ async fn create_segment_pipeline( Ok(( StudioRecordingPipeline { inner: pipeline, + start_time, screen, microphone, camera, From 04c2039e9844243074859576a9f1747fb09671f0 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Tue, 9 Sep 2025 02:36:42 +0800 Subject: [PATCH 04/20] base everything on new timestamp system + update audio mixer --- Cargo.lock | 48 +- Cargo.toml | 2 +- crates/camera/src/lib.rs | 2 +- crates/camera/src/macos.rs | 4 +- crates/cpal-ffmpeg/Cargo.toml | 1 - crates/cpal-ffmpeg/src/lib.rs | 3 +- crates/enc-avfoundation/Cargo.toml | 1 - crates/enc-avfoundation/src/mp4.rs | 11 +- crates/enc-ffmpeg/src/audio/aac.rs | 154 +- .../src/audio/buffered_resampler.rs | 581 ++++++++ crates/enc-ffmpeg/src/audio/mod.rs | 1 + crates/ffmpeg-utils/Cargo.toml | 10 - crates/ffmpeg-utils/src/lib.rs | 34 - crates/instant-recording/Cargo.toml | 13 - crates/instant-recording/src/lib.rs | 15 - crates/media-info/Cargo.toml | 1 - crates/media-info/src/lib.rs | 3 +- crates/recording/Cargo.toml | 2 +- crates/recording/src/capture_pipeline.rs | 200 +-- crates/recording/src/cursor.rs | 7 +- crates/recording/src/feeds/camera.rs | 24 +- crates/recording/src/feeds/microphone.rs | 6 +- crates/recording/src/instant_recording.rs | 12 +- crates/recording/src/pipeline/audio_buffer.rs | 8 +- crates/recording/src/sources/audio_input.rs | 8 +- crates/recording/src/sources/audio_mixer.rs | 1318 +++++------------ crates/recording/src/sources/camera.rs | 30 +- crates/recording/src/sources/mod.rs | 2 - .../recording/src/sources/new_audio_mixer.rs | 539 ------- .../src/sources/screen_capture/macos.rs | 44 +- .../src/sources/screen_capture/mod.rs | 20 +- crates/recording/src/studio_recording.rs | 31 +- crates/scap-ffmpeg/Cargo.toml | 1 - crates/scap-ffmpeg/src/cpal.rs | 3 +- crates/timestamp/Cargo.toml | 20 + crates/timestamp/src/lib.rs | 92 ++ crates/timestamp/src/macos.rs | 40 + crates/timestamp/src/win.rs | 41 + 38 files changed, 1325 insertions(+), 2007 deletions(-) create mode 100644 crates/enc-ffmpeg/src/audio/buffered_resampler.rs delete mode 100644 crates/ffmpeg-utils/Cargo.toml delete mode 100644 crates/ffmpeg-utils/src/lib.rs delete mode 100644 crates/instant-recording/Cargo.toml delete mode 100644 crates/instant-recording/src/lib.rs delete mode 100644 crates/recording/src/sources/new_audio_mixer.rs create mode 100644 crates/timestamp/Cargo.toml create mode 100644 crates/timestamp/src/lib.rs create mode 100644 crates/timestamp/src/macos.rs create mode 100644 crates/timestamp/src/win.rs diff --git a/Cargo.lock b/Cargo.lock index 7949c27a9f..1bae3f7383 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -866,7 +866,7 @@ name = "cap-audio" version = "0.1.0" dependencies = [ "cidre 0.11.0", - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "ffmpeg-next", "tokio", ] @@ -953,8 +953,7 @@ dependencies = [ name = "cap-cpal-ffmpeg" version = "0.1.0" dependencies = [ - "cap-ffmpeg-utils", - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "ffmpeg-next", ] @@ -1006,7 +1005,7 @@ dependencies = [ "cocoa 0.26.1", "core-foundation 0.10.1", "core-graphics 0.24.0", - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "device_query", "dirs", "dotenvy_macro", @@ -1083,7 +1082,7 @@ dependencies = [ "cap-media-info", "cap-project", "cap-rendering", - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "ffmpeg-next", "flume", "futures", @@ -1100,7 +1099,6 @@ dependencies = [ name = "cap-enc-avfoundation" version = "0.1.0" dependencies = [ - "cap-ffmpeg-utils", "cap-media-info", "cidre 0.11.0", "ffmpeg-next", @@ -1182,13 +1180,6 @@ dependencies = [ "inventory", ] -[[package]] -name = "cap-ffmpeg-utils" -version = "0.1.0" -dependencies = [ - "ffmpeg-next", -] - [[package]] name = "cap-flags" version = "0.1.0" @@ -1204,14 +1195,6 @@ dependencies = [ "wgpu", ] -[[package]] -name = "cap-instant-recording" -version = "0.1.0" -dependencies = [ - "windows 0.60.0", - "windows-core 0.60.1", -] - [[package]] name = "cap-media" version = "0.1.0" @@ -1228,8 +1211,7 @@ dependencies = [ name = "cap-media-info" version = "0.1.0" dependencies = [ - "cap-ffmpeg-utils", - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "ffmpeg-next", "thiserror 1.0.69", ] @@ -1283,18 +1265,18 @@ dependencies = [ "cap-enc-ffmpeg", "cap-enc-mediafoundation", "cap-fail", - "cap-ffmpeg-utils", "cap-flags", "cap-media", "cap-media-info", "cap-mediafoundation-ffmpeg", "cap-mediafoundation-utils", "cap-project", + "cap-timestamp", "cap-utils", "chrono", "cidre 0.11.0", "cocoa 0.26.1", - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "device_query", "either", "ffmpeg-next", @@ -1376,6 +1358,15 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "cap-timestamp" +version = "0.1.0" +dependencies = [ + "cidre 0.11.0", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", + "windows 0.60.0", +] + [[package]] name = "cap-utils" version = "0.1.0" @@ -2038,7 +2029,7 @@ dependencies = [ [[package]] name = "cpal" version = "0.15.3" -source = "git+https://github.com/CapSoftware/cpal?rev=75a365a24507#75a365a24507038fbd23b43534a75d3071a92b7a" +source = "git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca#3cc779a7b4ca51770211f1b7dc19f107978af707" dependencies = [ "alsa", "core-foundation-sys", @@ -6990,7 +6981,7 @@ dependencies = [ name = "scap-cpal" version = "0.1.0" dependencies = [ - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "thiserror 1.0.69", ] @@ -7009,9 +7000,8 @@ dependencies = [ name = "scap-ffmpeg" version = "0.1.0" dependencies = [ - "cap-ffmpeg-utils", "cidre 0.11.0", - "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=75a365a24507)", + "cpal 0.15.3 (git+https://github.com/CapSoftware/cpal?rev=3cc779a7b4ca)", "ffmpeg-next", "futures", "scap-cpal", diff --git a/Cargo.toml b/Cargo.toml index 401232358c..27c4104878 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ members = ["apps/cli", "apps/desktop/src-tauri", "crates/*"] anyhow = { version = "1.0.86" } # This includes a currently-unreleased fix that ensures the audio stream is actually # stopped and released on drop on macOS -cpal = { git = "https://github.com/CapSoftware/cpal", rev = "75a365a24507" } +cpal = { git = "https://github.com/CapSoftware/cpal", rev = "3cc779a7b4ca" } ffmpeg = { package = "ffmpeg-next", git = "https://github.com/CapSoftware/rust-ffmpeg", rev = "49db1fede112" } tokio = { version = "1.39.3", features = [ "macros", diff --git a/crates/camera/src/lib.rs b/crates/camera/src/lib.rs index 5745060a76..cec0e04961 100644 --- a/crates/camera/src/lib.rs +++ b/crates/camera/src/lib.rs @@ -3,7 +3,7 @@ use std::{ fmt::{Debug, Display}, ops::Deref, - time::{Duration, Instant}, + time::Duration, }; #[cfg(target_os = "macos")] diff --git a/crates/camera/src/macos.rs b/crates/camera/src/macos.rs index bf81e186e3..cb2035030c 100644 --- a/crates/camera/src/macos.rs +++ b/crates/camera/src/macos.rs @@ -105,9 +105,9 @@ pub(super) fn start_capturing_impl( callback(CapturedFrame { native: NativeCapturedFrame(image_buf.retained(), data.sample_buf.retained()), - reference_time: Instant::now(), + // reference_time: Instant::now(), timestamp: data.timestamp, - capture_begin_time: Some(data.capture_begin_time), + // capture_begin_time: Some(data.capture_begin_time), }); }))); diff --git a/crates/cpal-ffmpeg/Cargo.toml b/crates/cpal-ffmpeg/Cargo.toml index f0931bd38c..4ea431bd53 100644 --- a/crates/cpal-ffmpeg/Cargo.toml +++ b/crates/cpal-ffmpeg/Cargo.toml @@ -6,7 +6,6 @@ edition = "2024" [dependencies] cpal.workspace = true ffmpeg.workspace = true -cap-ffmpeg-utils = { path = "../ffmpeg-utils" } [lints] workspace = true diff --git a/crates/cpal-ffmpeg/src/lib.rs b/crates/cpal-ffmpeg/src/lib.rs index 80f90a2325..b09aab297d 100644 --- a/crates/cpal-ffmpeg/src/lib.rs +++ b/crates/cpal-ffmpeg/src/lib.rs @@ -1,4 +1,3 @@ -use cap_ffmpeg_utils::PlanarData; use cpal::{SampleFormat, StreamConfig}; use ffmpeg::format::{Sample, sample}; @@ -32,7 +31,7 @@ impl DataExt for ::cpal::Data { let base = (i as usize) * plane_size; ffmpeg_frame - .plane_data_mut(i as usize) + .data_mut(i as usize) .copy_from_slice(&self.bytes()[base..base + plane_size]); } } else { diff --git a/crates/enc-avfoundation/Cargo.toml b/crates/enc-avfoundation/Cargo.toml index e352610e05..244a3e41a6 100644 --- a/crates/enc-avfoundation/Cargo.toml +++ b/crates/enc-avfoundation/Cargo.toml @@ -5,7 +5,6 @@ edition = "2024" [dependencies] cap-media-info = { path = "../media-info" } -cap-ffmpeg-utils = { path = "../ffmpeg-utils" } ffmpeg.workspace = true thiserror.workspace = true diff --git a/crates/enc-avfoundation/src/mp4.rs b/crates/enc-avfoundation/src/mp4.rs index 2092c7efba..fbb90636bf 100644 --- a/crates/enc-avfoundation/src/mp4.rs +++ b/crates/enc-avfoundation/src/mp4.rs @@ -1,7 +1,6 @@ -use cap_ffmpeg_utils::PlanarData; use cap_media_info::{AudioInfo, VideoInfo}; use cidre::{cm::SampleTimingInfo, objc::Obj, *}; -use ffmpeg::{ffi::AV_TIME_BASE_Q, frame}; +use ffmpeg::frame; use std::path::PathBuf; use tracing::{debug, info}; @@ -265,7 +264,7 @@ impl MP4Encoder { if frame.is_planar() { let mut offset = 0; for plane_i in 0..frame.planes() { - let data = frame.plane_data(plane_i); + let data = frame.data(plane_i); block_buf_slice[offset..offset + data.len()] .copy_from_slice(&data[0..frame.samples() * frame.format().bytes()]); offset += data.len(); @@ -277,13 +276,17 @@ impl MP4Encoder { let format_desc = cm::AudioFormatDesc::with_asbd(&audio_desc).map_err(QueueAudioFrameError::Setup)?; - let time = cm::Time::new(frame.pts().unwrap_or(0), AV_TIME_BASE_Q.den); + let time = cm::Time::new(frame.pts().unwrap_or(0), frame.rate() as i32); + + // dbg!(time); let pts = self .start_time .add(self.elapsed_duration) .add(time.sub(self.segment_first_timestamp.unwrap())); + // dbg!(pts); + let buffer = cm::SampleBuf::create( Some(&block_buf), true, diff --git a/crates/enc-ffmpeg/src/audio/aac.rs b/crates/enc-ffmpeg/src/audio/aac.rs index 3df667a388..281185c332 100644 --- a/crates/enc-ffmpeg/src/audio/aac.rs +++ b/crates/enc-ffmpeg/src/audio/aac.rs @@ -5,9 +5,8 @@ use ffmpeg::{ frame, threading::Config, }; -use std::collections::VecDeque; -use crate::AudioEncoder; +use crate::{AudioEncoder, audio::buffered_resampler::BufferedResampler}; #[derive(thiserror::Error, Debug)] pub enum AACEncoderError { @@ -24,9 +23,7 @@ pub struct AACEncoder { tag: &'static str, encoder: encoder::Audio, packet: ffmpeg::Packet, - resampler: Option, - resampled_frame: frame::Audio, - buffer: Vec>, + resampler: BufferedResampler, stream_index: usize, } @@ -75,33 +72,20 @@ impl AACEncoder { output_config.sample_format = Self::SAMPLE_FORMAT; output_config.sample_rate = rate as u32; - let resampler = if ( - input_config.sample_format, - input_config.channel_layout(), - input_config.sample_rate, - ) != ( - output_config.sample_format, - output_config.channel_layout(), - output_config.sample_rate, - ) { - Some( - ffmpeg::software::resampler( - ( - input_config.sample_format, - input_config.channel_layout(), - input_config.sample_rate, - ), - ( - output_config.sample_format, - output_config.channel_layout(), - output_config.sample_rate, - ), - ) - .unwrap(), - ) - } else { - None - }; + let resampler = ffmpeg::software::resampler( + ( + input_config.sample_format, + input_config.channel_layout(), + input_config.sample_rate, + ), + ( + output_config.sample_format, + output_config.channel_layout(), + output_config.sample_rate, + ), + ) + .unwrap(); + let resampler = BufferedResampler::new(resampler); encoder.set_bit_rate(Self::OUTPUT_BITRATE); encoder.set_rate(rate); @@ -118,50 +102,19 @@ impl AACEncoder { Ok(Self { tag, - buffer: vec![VecDeque::new(); 2], encoder, stream_index, packet: ffmpeg::Packet::empty(), - resampled_frame: frame::Audio::empty(), resampler, }) } pub fn queue_frame(&mut self, frame: frame::Audio, output: &mut format::context::Output) { - let frame = if let Some(resampler) = &mut self.resampler { - resampler.run(&frame, &mut self.resampled_frame).unwrap(); - &self.resampled_frame - } else { - &frame - }; - - for i in 0..frame.planes() { - self.buffer[i] - .extend(&frame.data(i)[0..frame_size_bytes(frame) / frame.channels() as usize]); - } + self.resampler.add_frame(frame); - let channel_size_bytes = self.encoder.frame_size() as usize * self.encoder.format().bytes(); - - loop { - if self.buffer[0].len() < channel_size_bytes { - break; - } - - let mut frame = frame::Audio::new( - self.encoder.format(), - self.encoder.frame_size() as usize, - self.encoder.channel_layout(), - ); - - for i in 0..frame.planes() { - let bytes = self.buffer[i] - .drain(0..channel_size_bytes) - .collect::>(); - - frame.data_mut(i)[0..channel_size_bytes] - .copy_from_slice(&bytes[0..channel_size_bytes]); - } + let frame_size = self.encoder.frame_size() as usize; + while let Some(frame) = self.resampler.get_frame(frame_size) { self.encoder.send_frame(&frame).unwrap(); self.process_packets(output); @@ -180,69 +133,10 @@ impl AACEncoder { } pub fn finish(&mut self, output: &mut format::context::Output) { - let frame_size_bytes = self.encoder.frame_size() as usize - * self.encoder.channels() as usize - * self.encoder.format().bytes(); - - if let Some(mut resampler) = self.resampler.take() { - while resampler.delay().is_some() { - resampler.flush(&mut self.resampled_frame).unwrap(); - if self.resampled_frame.samples() == 0 { - break; - } - - for i in 0..self.resampled_frame.planes() { - self.buffer[i].extend( - &self.resampled_frame.data(0)[0..self.resampled_frame.samples() - * self.resampled_frame.format().bytes()], - ); - } - - while self.buffer.len() >= frame_size_bytes { - let mut frame = frame::Audio::new( - self.encoder.format(), - self.encoder.frame_size() as usize, - self.encoder.channel_layout(), - ); - - for i in 0..frame.planes() { - let bytes = self.buffer[i] - .drain(0..frame_size_bytes) - .collect::>(); - - frame.data_mut(0)[0..frame_size_bytes].copy_from_slice(&bytes); - } - - self.encoder.send_frame(&frame).unwrap(); - - self.process_packets(output); - } - } - - while !self.buffer[0].is_empty() { - let channel_size_bytes = - (frame_size_bytes / self.encoder.channels() as usize).min(self.buffer[0].len()); - let frame_size = channel_size_bytes / self.encoder.format().bytes(); - - let mut frame = frame::Audio::new( - self.encoder.format(), - frame_size, - self.encoder.channel_layout(), - ); - - for i in 0..frame.planes() { - let bytes = self.buffer[i] - .drain(0..channel_size_bytes) - .collect::>(); - - frame.data_mut(i)[0..channel_size_bytes] - .copy_from_slice(&bytes[0..channel_size_bytes]); - } - - self.encoder.send_frame(&frame).unwrap(); + while let Some(frame) = self.resampler.flush(self.encoder.frame_size() as usize) { + self.encoder.send_frame(&frame).unwrap(); - self.process_packets(output); - } + self.process_packets(output); } self.encoder.send_eof().unwrap(); @@ -260,7 +154,3 @@ impl AudioEncoder for AACEncoder { self.finish(output); } } - -fn frame_size_bytes(frame: &frame::Audio) -> usize { - frame.samples() * frame.format().bytes() * frame.channels() as usize -} diff --git a/crates/enc-ffmpeg/src/audio/buffered_resampler.rs b/crates/enc-ffmpeg/src/audio/buffered_resampler.rs new file mode 100644 index 0000000000..f26b29d8f4 --- /dev/null +++ b/crates/enc-ffmpeg/src/audio/buffered_resampler.rs @@ -0,0 +1,581 @@ +use std::collections::VecDeque; + +use ffmpeg::software::resampling; + +/// Consumes audio frames, resmaples them, buffers the results, +/// and allows retrieving new frames of any size. +/// When retrieving new frames via `get_frame`, silence will be accounted +/// for if the requested frame size is larger than the latest buffered frame, +/// ensuring that the resulting frame's PTS is always accurate. +pub struct BufferedResampler { + resampler: ffmpeg::software::resampling::Context, + buffer: VecDeque<(ffmpeg::frame::Audio, i64)>, + sample_index: usize, +} + +impl BufferedResampler { + pub fn new(resampler: ffmpeg::software::resampling::Context) -> Self { + Self { + resampler, + buffer: VecDeque::new(), + sample_index: 0, + } + } + + fn remaining_samples(&self) -> usize { + let (mut pts, mut remaining_samples) = if let Some(front) = self.buffer.front() { + ( + front.1 + front.0.samples() as i64, + front.0.samples() - self.sample_index, + ) + } else { + return 0; + }; + + for buffer in self.buffer.iter().skip(1) { + // fill in gap + remaining_samples += (buffer.1 - pts) as usize; + remaining_samples += buffer.0.samples(); + pts += buffer.0.samples() as i64; + } + + return remaining_samples; + } + + pub fn output(&self) -> resampling::context::Definition { + *self.resampler.output() + } + + pub fn add_frame(&mut self, frame: ffmpeg::frame::Audio) { + let pts = frame.pts().unwrap(); + + let mut resampled_frame = ffmpeg::frame::Audio::empty(); + + self.resampler.run(&frame, &mut resampled_frame).unwrap(); + + let resampled_pts = + (pts as f64 * (resampled_frame.rate() as f64 / frame.rate() as f64)) as i64; + + let mut next_pts = resampled_pts + resampled_frame.samples() as i64; + + self.buffer.push_back((resampled_frame, resampled_pts)); + + while let Some(_) = self.resampler.delay() { + let mut resampled_frame = ffmpeg::frame::Audio::new( + self.resampler.output().format, + 0, + self.resampler.output().channel_layout, + ); + self.resampler.flush(&mut resampled_frame).unwrap(); + let samples = resampled_frame.samples(); + if samples == 0 { + break; + } + + self.buffer.push_back((resampled_frame, next_pts)); + + next_pts = next_pts + samples as i64; + } + } + + fn get_frame_inner(&mut self, samples: usize) -> Option { + let output = self.output(); + + let mut out_frame = + ffmpeg::frame::Audio::new(output.format, samples, output.channel_layout); + + let mut samples_already_written = 0; + let mut current_pts = 0; + + if output.format.is_packed() { + let bytes_per_sample = + output.format.bytes() * output.channel_layout.channels() as usize; + + while let Some((frame, pts)) = self.buffer.pop_front() { + if out_frame.pts().is_none() { + current_pts = pts + self.sample_index as i64; + out_frame.set_pts(Some(current_pts)); + } + + if pts >= current_pts + samples as i64 { + self.buffer.push_front((frame, pts)); + + let dest_range_start_samples = samples_already_written; + let dest_range_end_samples = + dest_range_start_samples + (samples - samples_already_written); + out_frame.data_mut(0)[dest_range_start_samples * bytes_per_sample + ..dest_range_end_samples * bytes_per_sample] + .fill(0); + + break; + } + + if current_pts < pts { + let silence_needed = + ((pts - current_pts) as usize).min(samples - samples_already_written); + + out_frame.data_mut(0)[samples_already_written * bytes_per_sample + ..(samples_already_written + silence_needed) * bytes_per_sample] + .fill(0); + + samples_already_written += silence_needed; + current_pts += silence_needed as i64; + + if samples_already_written >= samples { + self.buffer.push_front((frame, pts)); + break; + } + } + + let sample_index = self.sample_index; + + let src_samples_remaining = frame.samples() - sample_index; + + let samples_to_write = + usize::min(src_samples_remaining, samples - samples_already_written); + + let dest_range_start = samples_already_written * bytes_per_sample; + let dest_range_end = dest_range_start + samples_to_write * bytes_per_sample; + + let src_range_start = sample_index * bytes_per_sample; + let src_range_end = src_range_start + samples_to_write * bytes_per_sample; + + out_frame.data_mut(0)[dest_range_start..dest_range_end] + .copy_from_slice(&frame.data(0)[src_range_start..src_range_end]); + + samples_already_written += samples_to_write; + + self.sample_index += samples_to_write; + + current_pts += samples_to_write as i64; + + if samples_to_write < src_samples_remaining { + self.buffer.push_front((frame, pts)); + break; + } else if samples_to_write >= src_samples_remaining { + self.sample_index -= frame.samples(); + } + } + } else { + let channels = output.channel_layout.channels() as usize; + let bytes_per_sample = output.format.bytes(); + + while let Some((frame, pts)) = self.buffer.pop_front() { + if out_frame.pts().is_none() { + current_pts = pts + self.sample_index as i64; + out_frame.set_pts(Some(current_pts)); + } + + if pts >= current_pts + samples as i64 { + self.buffer.push_front((frame, pts)); + + for i in 0..channels { + let dest_range_start_samples = samples_already_written; + let dest_range_end_samples = + dest_range_start_samples + (samples - samples_already_written); + out_frame.data_mut(i)[dest_range_start_samples * bytes_per_sample + ..dest_range_end_samples * bytes_per_sample] + .fill(0); + } + + break; + } + + if current_pts < pts { + let silence_needed = + ((pts - current_pts) as usize).min(samples - samples_already_written); + + for i in 0..channels { + out_frame.data_mut(i)[samples_already_written * bytes_per_sample + ..(samples_already_written + silence_needed) * bytes_per_sample] + .fill(0); + } + + samples_already_written += silence_needed; + current_pts += silence_needed as i64; + + if samples_already_written >= samples { + self.buffer.push_front((frame, pts)); + break; + } + } + + let sample_index = self.sample_index; + + let src_samples_remaining = frame.samples() - sample_index; + + let samples_to_write = + usize::min(src_samples_remaining, samples - samples_already_written); + + let dest_range_start = samples_already_written * bytes_per_sample; + let dest_range_end = dest_range_start + samples_to_write * bytes_per_sample; + + let src_range_start = sample_index * bytes_per_sample; + let src_range_end = src_range_start + samples_to_write * bytes_per_sample; + + for i in 0..channels { + out_frame.data_mut(i)[dest_range_start..dest_range_end] + .copy_from_slice(&frame.data(i)[src_range_start..src_range_end]); + } + + samples_already_written += samples_to_write; + + self.sample_index += samples_to_write; + + current_pts += samples_to_write as i64; + + if samples_to_write < src_samples_remaining { + self.buffer.push_front((frame, pts)); + break; + } else if samples_to_write >= src_samples_remaining { + self.sample_index -= frame.samples(); + } + } + } + + Some(out_frame) + } + + pub fn get_frame(&mut self, samples: usize) -> Option { + if self.remaining_samples() < samples { + return None; + } + + self.get_frame_inner(samples) + } + + pub fn flush(&mut self, max_samples: usize) -> Option { + self.get_frame_inner(self.remaining_samples().min(max_samples)) + } +} + +#[cfg(test)] +mod test { + use super::*; + use ffmpeg::{ChannelLayout, format}; + + const IN_RATE: u32 = 100; + + fn create_resampler(out_rate: u32) -> BufferedResampler { + let resampler = ffmpeg::software::resampler( + ( + format::Sample::U8(cap_media_info::Type::Packed), + ChannelLayout::MONO, + IN_RATE, + ), + ( + format::Sample::U8(cap_media_info::Type::Packed), + ChannelLayout::MONO, + out_rate, + ), + ) + .unwrap(); + + BufferedResampler::new(resampler) + } + + fn make_input_frame(samples: usize, pts: i64) -> ffmpeg::frame::Audio { + let mut frame = ffmpeg::frame::Audio::new( + cap_media_info::Sample::U8(cap_media_info::Type::Packed), + samples, + ChannelLayout::MONO, + ); + + frame.data_mut(0).fill(69); + + frame.set_rate(IN_RATE); + frame.set_pts(Some(pts)); + frame + } + + mod resampler { + use super::*; + + #[test] + fn sequential_frames() { + let mut bufferer = create_resampler(200); + + bufferer.add_frame(make_input_frame(100, 0)); + bufferer.add_frame(make_input_frame(100, 100)); + + let sample_sum = bufferer.buffer.iter().map(|f| f.0.samples()).sum::(); + assert_eq!(sample_sum, 400); + + let first = bufferer.buffer.front().unwrap(); + assert_eq!(first.1, 0); + + let last = bufferer.buffer.back().unwrap(); + assert_eq!(last.1 + last.0.samples() as i64, 400); + } + + #[test] + fn start_gap() { + let mut bufferer = create_resampler(200); + + bufferer.add_frame(make_input_frame(100, 100)); + bufferer.add_frame(make_input_frame(100, 200)); + + let first = bufferer.buffer.front().unwrap(); + assert_eq!(first.1, 200); + + let last = bufferer.buffer.back().unwrap(); + assert_eq!(last.1 + last.0.samples() as i64, 600); + } + + #[test] + fn middle_gap() { + let mut bufferer = create_resampler(200); + + bufferer.add_frame(make_input_frame(100, 0)); + bufferer.add_frame(make_input_frame(100, 200)); + + let first = bufferer.buffer.front().unwrap(); + assert_eq!(first.1, 0); + + let last = bufferer.buffer.back().unwrap(); + assert_eq!(last.1 + last.0.samples() as i64, 600); + } + } + + mod get_frame { + use super::*; + + #[test] + fn same_format() { + // Tests getting 50 then 50 then 50 + + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 0)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 50); + assert_eq!(bufferer.sample_index, 50); + assert_eq!(out_frame.pts(), Some(0)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 50); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(50)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_none()); + + // Tests getting 75 then 75 (should fail) then 25 (should succeed) + + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 0)); + + let out_frame = bufferer.get_frame(75); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 75); + assert_eq!(bufferer.sample_index, 75); + assert_eq!(out_frame.pts(), Some(0)); + + let out_frame = bufferer.get_frame(75); + assert!(out_frame.is_none()); + + let out_frame = bufferer.get_frame(25); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 25); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(75)); + } + + #[test] + fn different_format() { + let mut bufferer = create_resampler(200); + + bufferer.add_frame(make_input_frame(100, 0)); + + let out_frame = bufferer.get_frame(125); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 125); + assert_eq!(bufferer.sample_index, 25); + assert_eq!(out_frame.pts(), Some(0)); + + let out_frame = bufferer.get_frame(100); + assert!(out_frame.is_none()); + + let out_frame = bufferer.get_frame(75); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 75); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(125)); + } + + // start gap will never have silence + #[test] + fn start_gap() { + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 100)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 50); + assert_eq!(bufferer.sample_index, 50); + assert_eq!(out_frame.pts(), Some(100)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 50); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(150)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_none()); + } + + #[test] + fn middle_gap_no_silence() { + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 0)); + bufferer.add_frame(make_input_frame(100, 200)); + + let out_frame = bufferer.get_frame(100); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 100); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(0)); + + let out_frame = bufferer.get_frame(100); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 100); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(200)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_none()); + } + + #[test] + fn middle_gap_expect_silence() { + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 0)); + bufferer.add_frame(make_input_frame(100, 200)); + + let out_frame = bufferer.get_frame(150); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 150); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(0)); + + let out_frame = bufferer.get_frame(100); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 100); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(200)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_none()); + } + + #[test] + fn middle_gap_start_offset() { + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 0)); + bufferer.add_frame(make_input_frame(100, 200)); + + let out_frame = bufferer.get_frame(25); + assert!(out_frame.is_some()); + + let out_frame = bufferer.get_frame(175); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 175); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(25)); + + let out_frame = bufferer.get_frame(100); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 100); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(200)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_none()); + } + + #[test] + fn middle_gap_overlap() { + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 0)); + bufferer.add_frame(make_input_frame(100, 200)); + + let out_frame = bufferer.get_frame(75); + assert!(out_frame.is_some()); + + let out_frame = bufferer.get_frame(175); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 175); + assert_eq!(bufferer.sample_index, 50); + assert_eq!(out_frame.pts(), Some(75)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 50); + assert_eq!(bufferer.sample_index, 0); + assert_eq!(out_frame.pts(), Some(250)); + + let out_frame = bufferer.get_frame(50); + assert!(out_frame.is_none()); + } + + #[test] + fn many_small_frames() { + let mut bufferer = create_resampler(IN_RATE); + + bufferer.add_frame(make_input_frame(100, 0)); + bufferer.add_frame(make_input_frame(100, 200)); + + for i in 0..8 { + let out_frame = bufferer.get_frame(25); + assert!(out_frame.is_some()); + let out_frame = out_frame.unwrap(); + + assert_eq!(out_frame.samples(), 25); + assert_eq!(out_frame.pts(), Some(i % 4 * 25 + 200 * (i / 4))); + assert_eq!(bufferer.sample_index, ((i as usize + 1) % 4) * 25 % 100); + } + } + } +} diff --git a/crates/enc-ffmpeg/src/audio/mod.rs b/crates/enc-ffmpeg/src/audio/mod.rs index 529c8c28f3..184b460cde 100644 --- a/crates/enc-ffmpeg/src/audio/mod.rs +++ b/crates/enc-ffmpeg/src/audio/mod.rs @@ -1,4 +1,5 @@ mod audio_encoder; +mod buffered_resampler; pub use audio_encoder::*; mod opus; diff --git a/crates/ffmpeg-utils/Cargo.toml b/crates/ffmpeg-utils/Cargo.toml deleted file mode 100644 index b1225b5f2d..0000000000 --- a/crates/ffmpeg-utils/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "cap-ffmpeg-utils" -version = "0.1.0" -edition = "2024" - -[dependencies] -ffmpeg.workspace = true - -[lints] -workspace = true diff --git a/crates/ffmpeg-utils/src/lib.rs b/crates/ffmpeg-utils/src/lib.rs deleted file mode 100644 index 00a2b04050..0000000000 --- a/crates/ffmpeg-utils/src/lib.rs +++ /dev/null @@ -1,34 +0,0 @@ -pub trait PlanarData { - fn plane_data(&self, index: usize) -> &[u8]; - fn plane_data_mut(&mut self, index: usize) -> &mut [u8]; -} - -impl PlanarData for ffmpeg::frame::Audio { - #[inline] - fn plane_data(&self, index: usize) -> &[u8] { - if index >= self.planes() { - panic!("out of bounds"); - } - - unsafe { - std::slice::from_raw_parts( - (*self.as_ptr()).data[index], - (*self.as_ptr()).linesize[0] as usize, - ) - } - } - - #[inline] - fn plane_data_mut(&mut self, index: usize) -> &mut [u8] { - if index >= self.planes() { - panic!("out of bounds"); - } - - unsafe { - std::slice::from_raw_parts_mut( - (*self.as_mut_ptr()).data[index], - (*self.as_ptr()).linesize[0] as usize, - ) - } - } -} diff --git a/crates/instant-recording/Cargo.toml b/crates/instant-recording/Cargo.toml deleted file mode 100644 index f155f4b723..0000000000 --- a/crates/instant-recording/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "cap-instant-recording" -version = "0.1.0" -edition = "2024" - -[dependencies] - -[target.'cfg(windows)'.dependencies] -windows-core = { workspace = true } -windows = { workspace = true, features = ["Win32_System_Performance"] } - -[lints] -workspace = true diff --git a/crates/instant-recording/src/lib.rs b/crates/instant-recording/src/lib.rs deleted file mode 100644 index eb56e45cbd..0000000000 --- a/crates/instant-recording/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -use std::time::{Instant, SystemTime}; - -struct MultiSourceTimestamp { - instant: Instant, - system_time: SystemTime, -} - -impl MultiSourceTimestamp { - pub fn now() -> Self { - Self { - instant: Instant::now(), - system_time: SystemTime::now(), - } - } -} diff --git a/crates/media-info/Cargo.toml b/crates/media-info/Cargo.toml index f16081663a..b3eb8a6919 100644 --- a/crates/media-info/Cargo.toml +++ b/crates/media-info/Cargo.toml @@ -8,7 +8,6 @@ ffmpeg.workspace = true thiserror.workspace = true cpal.workspace = true -cap-ffmpeg-utils = { path = "../ffmpeg-utils" } [lints] workspace = true diff --git a/crates/media-info/src/lib.rs b/crates/media-info/src/lib.rs index 217a836ea0..a4274ab80a 100644 --- a/crates/media-info/src/lib.rs +++ b/crates/media-info/src/lib.rs @@ -1,4 +1,3 @@ -use cap_ffmpeg_utils::*; use cpal::{SampleFormat, SupportedBufferSize, SupportedStreamConfig}; use ffmpeg::frame; pub use ffmpeg::{ @@ -141,7 +140,7 @@ impl AudioInfo { for channel in 0..self.channels { let channel_start = channel * sample_size; let channel_end = channel_start + sample_size; - frame.plane_data_mut(channel)[start..end] + frame.data_mut(channel)[start..end] .copy_from_slice(&interleaved_chunk[channel_start..channel_end]); } } diff --git a/crates/recording/Cargo.toml b/crates/recording/Cargo.toml index 7807247b3d..746448c129 100644 --- a/crates/recording/Cargo.toml +++ b/crates/recording/Cargo.toml @@ -20,7 +20,7 @@ cap-cursor-info = { path = "../cursor-info" } cap-camera = { path = "../camera", features = ["serde", "specta"] } cap-camera-ffmpeg = { path = "../camera-ffmpeg" } cap-enc-ffmpeg = { path = "../enc-ffmpeg" } -cap-ffmpeg-utils = { path = "../ffmpeg-utils" } +cap-timestamp = { path = "../timestamp" } specta.workspace = true tokio.workspace = true diff --git a/crates/recording/src/capture_pipeline.rs b/crates/recording/src/capture_pipeline.rs index 701bc41f40..666e8040e2 100644 --- a/crates/recording/src/capture_pipeline.rs +++ b/crates/recording/src/capture_pipeline.rs @@ -4,142 +4,29 @@ use crate::{ pipeline::builder::PipelineBuilder, sources::{ AudioInputSource, ScreenCaptureFormat, ScreenCaptureSource, ScreenCaptureTarget, - screen_capture, + audio_mixer, screen_capture, }, }; use cap_media::MediaError; use cap_media_info::AudioInfo; -use cpal::StreamInstant; +use cap_timestamp::Timestamp; use flume::{Receiver, Sender}; use std::{ future::Future, - ops::Add, path::PathBuf, sync::{Arc, atomic::AtomicBool}, - time::{Duration, Instant, SystemTime}, + time::SystemTime, }; -#[cfg(windows)] -mod win { - use windows::Win32::System::Performance::{QueryPerformanceCounter, QueryPerformanceFrequency}; - - use super::*; - - #[derive(Clone, Copy, Debug)] - pub struct PerformanceCounterTimestamp(i64); - - impl PerformanceCounterTimestamp { - pub fn new(value: i64) -> Self { - Self(value) - } - - pub fn duration_since(&self, other: Self) -> Duration { - let mut freq = 0; - unsafe { QueryPerformanceFrequency(&mut freq).unwrap() }; - - Duration::from_secs_f64((self.0 - other.0) as f64 / freq as f64) - } - - pub fn from_cpal(instant: StreamInstant) -> Self { - use cpal::host::wasapi::StreamInstantExt; - - Self(instant.as_performance_counter()) - } - - pub fn now() -> Self { - let mut value = 0; - unsafe { QueryPerformanceCounter(&mut value).unwrap() }; - Self(value) - } - } - - impl Add for PerformanceCounterTimestamp { - type Output = Self; - - fn add(self, rhs: Duration) -> Self::Output { - let mut freq = 0; - unsafe { QueryPerformanceFrequency(&mut freq) }.unwrap(); - Self(self.0 + (rhs.as_secs_f64() * freq as f64) as i64) - } - } -} - -#[cfg(windows)] -pub use win::*; - -#[derive(Clone, Copy, Debug)] -pub enum SourceTimestamp { - Instant(Instant), - SystemTime(SystemTime), - #[cfg(windows)] - PerformanceCounter(PerformanceCounterTimestamp), -} - -impl SourceTimestamp { - pub fn duration_since(&self, start: SourceTimestamps) -> Duration { - match self { - Self::Instant(instant) => instant.duration_since(start.instant), - Self::SystemTime(time) => time.duration_since(start.system_time).unwrap(), - #[cfg(windows)] - Self::PerformanceCounter(counter) => counter.duration_since(start.performance_counter), - } - } - - pub fn from_cpal(instant: StreamInstant) -> Self { - #[cfg(windows)] - Self::PerformanceCounter(PerformanceCounterTimestamp::from_cpal(instant)) - } -} - -impl Add for &SourceTimestamp { - type Output = SourceTimestamp; - - fn add(self, rhs: Duration) -> Self::Output { - match *self { - SourceTimestamp::Instant(i) => SourceTimestamp::Instant(i + rhs), - SourceTimestamp::SystemTime(t) => SourceTimestamp::SystemTime(t + rhs), - #[cfg(windows)] - SourceTimestamp::PerformanceCounter(c) => SourceTimestamp::PerformanceCounter(c + rhs), - } - } -} - -#[derive(Clone, Copy, Debug)] -pub struct SourceTimestamps { - instant: Instant, - system_time: SystemTime, - #[cfg(windows)] - performance_counter: PerformanceCounterTimestamp, -} - -impl SourceTimestamps { - pub fn now() -> Self { - Self { - instant: Instant::now(), - system_time: SystemTime::now(), - #[cfg(windows)] - performance_counter: PerformanceCounterTimestamp::now(), - } - } - - pub fn instant(&self) -> Instant { - self.instant - } - - pub fn system_time(&self) -> SystemTime { - self.system_time - } -} - pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { fn make_studio_mode_pipeline( builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, + flume::Receiver<(Self::VideoFormat, Timestamp)>, ), output_path: PathBuf, - ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> + ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> where Self: Sized; @@ -147,10 +34,10 @@ pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, + flume::Receiver<(Self::VideoFormat, Timestamp)>, ), audio: Option>, - system_audio: Option<(Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, AudioInfo)>, + system_audio: Option<(Receiver<(ffmpeg::frame::Audio, Timestamp)>, AudioInfo)>, output_path: PathBuf, pause_flag: Arc, ) -> impl Future> + Send @@ -164,10 +51,10 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { mut builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, + flume::Receiver<(Self::VideoFormat, Timestamp)>, ), output_path: PathBuf, - ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> { + ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> { let screen_config = source.0.info(); tracing::info!("screen config: {:?}", screen_config); @@ -222,23 +109,26 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { mut builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, f64)>, + flume::Receiver<(Self::VideoFormat, Timestamp)>, ), audio: Option>, - system_audio: Option<(Receiver<(ffmpeg::frame::Audio, f64)>, AudioInfo)>, + system_audio: Option<(Receiver<(ffmpeg::frame::Audio, Timestamp)>, AudioInfo)>, output_path: PathBuf, pause_flag: Arc, ) -> Result { + let start_time = Timestamps::now(); + let (audio_tx, audio_rx) = flume::bounded(64); - let mut audio_mixer = AudioMixer::new(audio_tx); + let mut audio_mixer = audio_mixer::AudioMixer::builder(audio_tx); if let Some(system_audio) = system_audio { audio_mixer.add_source(system_audio.1, system_audio.0); } if let Some(audio) = audio { - let sink = audio_mixer.sink(*audio.audio_info()); - let source = AudioInputSource::init(audio, sink.tx, SystemTime::now()); + let (tx, rx) = flume::bounded(32); + audio_mixer.add_source(*audio.audio_info(), rx); + let source = AudioInputSource::init(audio, tx); builder.spawn_source("microphone_capture", source); } @@ -249,19 +139,19 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { cap_enc_avfoundation::MP4Encoder::init( "mp4", source.0.info(), - has_audio_sources.then_some(AudioMixer::info()), + has_audio_sources.then_some(audio_mixer::AudioMixer::INFO), output_path, Some(1080), ) .map_err(|e| MediaError::Any(e.to_string().into()))?, )); + use cap_timestamp::Timestamps; use cidre::cm; - use ffmpeg::ffi::AV_TIME_BASE_Q; use tracing::error; let (first_frame_tx, mut first_frame_rx) = - tokio::sync::oneshot::channel::<(cm::Time, f64)>(); + tokio::sync::oneshot::channel::<(cm::Time, Timestamp)>(); if has_audio_sources { builder.spawn_source("audio_mixer", audio_mixer); @@ -271,9 +161,7 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { let _ = ready.send(Ok(())); let mut time = None; - while let Ok((mut frame, duration, start_timestamps)) = audio_rx.recv() { - let pts = frame.pts().unwrap(); - + while let Ok((mut frame, timestamp)) = audio_rx.recv() { if let Ok(first_time) = first_frame_rx.try_recv() { time = Some(first_time); }; @@ -282,14 +170,19 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { continue; }; - let elapsed = (pts as f64 / AV_TIME_BASE_Q.den as f64) - time.1; + let ts_offset = timestamp.duration_since(start_time); + let screen_first_offset = time.1.duration_since(start_time); - let time = time.0.add(cm::Time::new( - (elapsed * time.0.scale as f64 + time.1 * time.0.scale as f64) as i64, - time.0.scale, - )); + let Some(ts_offset) = ts_offset.checked_sub(screen_first_offset) else { + continue; + }; + + // dbg!(ts_offset); - frame.set_pts(Some(time.value / (time.scale / AV_TIME_BASE_Q.den) as i64)); + let pts = (ts_offset.as_secs_f64() * frame.rate() as f64) as i64; + frame.set_pts(Some(pts)); + + // dbg!(pts); if let Ok(mut mp4) = mp4.lock() && let Err(e) = mp4.queue_audio_frame(frame) @@ -306,7 +199,7 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { let mut first_frame_tx = Some(first_frame_tx); builder.spawn_task("screen_capture_encoder", move |ready| { let _ = ready.send(Ok(())); - while let Ok((frame, unix_time)) = source.1.recv() { + while let Ok((frame, timestamp)) = source.1.recv() { if let Ok(mut mp4) = mp4.lock() { if pause_flag.load(std::sync::atomic::Ordering::Relaxed) { mp4.pause(); @@ -315,7 +208,8 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { } if let Some(first_frame_tx) = first_frame_tx.take() { - let _ = first_frame_tx.send((frame.pts(), unix_time)); + // dbg!(timestamp); + let _ = first_frame_tx.send((frame.pts(), timestamp)); } mp4.queue_video_frame(frame.as_ref()) @@ -342,10 +236,10 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { mut builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, + flume::Receiver<(Self::VideoFormat, Timestamp)>, ), output_path: PathBuf, - ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> + ) -> Result<(PipelineBuilder, flume::Receiver), MediaError> where Self: Sized, { @@ -496,10 +390,10 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { mut builder: PipelineBuilder, source: ( ScreenCaptureSource, - flume::Receiver<(Self::VideoFormat, SourceTimestamp)>, + flume::Receiver<(Self::VideoFormat, Timestamp)>, ), audio: Option>, - system_audio: Option<(Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, AudioInfo)>, + system_audio: Option<(Receiver<(ffmpeg::frame::Audio, Timestamp)>, AudioInfo)>, output_path: PathBuf, _pause_flag: Arc, ) -> Result @@ -511,14 +405,12 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { use cap_enc_ffmpeg::{AACEncoder, AudioEncoder}; use windows::Graphics::SizeInt32; - use crate::sources::new_audio_mixer; - cap_mediafoundation_utils::thread_init(); - let start_time = SourceTimestamps::now(); + let start_time = Timestamps::now(); let (audio_tx, audio_rx) = flume::bounded(64); - let mut audio_mixer = new_audio_mixer::AudioMixer::builder(audio_tx); + let mut audio_mixer = audio_mixerdioMixer::builder(audio_tx); if let Some(system_audio) = system_audio { audio_mixer.add_source(system_audio.1, system_audio.0); @@ -590,7 +482,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let audio_encoder = has_audio_sources .then(|| { - AACEncoder::init("mic_audio", new_audio_mixer::AudioMixer::INFO, &mut output) + AACEncoder::init("mic_audio", audio_mixerdioMixer::INFO, &mut output) .map(|v| v.boxed()) .map_err(|e| MediaError::Any(e.to_string().into())) }) @@ -670,7 +562,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { .SystemRelativeTime() .map_err(|e| format!("Frame Time: {e}"))?; - let timestamp = SourceTimestamp::PerformanceCounter( + let timestamp = Timestamp::PerformanceCounter( PerformanceCounterTimestamp::new(frame_time.Duration), ); @@ -746,7 +638,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { type ScreenCaptureReturn = ( ScreenCaptureSource, - Receiver<(::VideoFormat, SourceTimestamp)>, + Receiver<(::VideoFormat, Timestamp)>, ); #[cfg(target_os = "macos")] @@ -759,7 +651,8 @@ pub async fn create_screen_capture( capture_target: &ScreenCaptureTarget, force_show_cursor: bool, max_fps: u32, - audio_tx: Option>, + audio_tx: Option>, + start_time: SystemTime, #[cfg(windows)] d3d_device: ::windows::Win32::Graphics::Direct3D11::ID3D11Device, ) -> Result, RecordingError> { let (video_tx, video_rx) = flume::bounded(16); @@ -770,6 +663,7 @@ pub async fn create_screen_capture( max_fps, video_tx, audio_tx, + start_time, tokio::runtime::Handle::current(), #[cfg(windows)] d3d_device, diff --git a/crates/recording/src/cursor.rs b/crates/recording/src/cursor.rs index 28b59ebe85..c17e1c1860 100644 --- a/crates/recording/src/cursor.rs +++ b/crates/recording/src/cursor.rs @@ -1,12 +1,11 @@ use cap_cursor_capture::CursorCropBounds; use cap_cursor_info::CursorShape; use cap_project::{CursorClickEvent, CursorMoveEvent, XY}; -use std::{collections::HashMap, path::PathBuf, time::SystemTime}; +use cap_timestamp::Timestamps; +use std::{collections::HashMap, path::PathBuf}; use tokio::sync::oneshot; use tokio_util::sync::CancellationToken; -use crate::capture_pipeline::SourceTimestamps; - pub struct Cursor { pub file_name: String, pub id: u32, @@ -43,7 +42,7 @@ pub fn spawn_cursor_recorder( cursors_dir: PathBuf, prev_cursors: Cursors, next_cursor_id: u32, - start_time: SourceTimestamps, + start_time: Timestamps, ) -> CursorActor { use cap_utils::spawn_actor; use device_query::{DeviceQuery, DeviceState}; diff --git a/crates/recording/src/feeds/camera.rs b/crates/recording/src/feeds/camera.rs index 3453b9fbc0..ccdb3e21d8 100644 --- a/crates/recording/src/feeds/camera.rs +++ b/crates/recording/src/feeds/camera.rs @@ -1,6 +1,8 @@ use cap_camera::CameraInfo; +use cap_camera_ffmpeg::*; use cap_fail::fail_err; use cap_media_info::VideoInfo; +use cap_timestamp::Timestamp; use ffmpeg::frame; use futures::{FutureExt, future::BoxFuture}; use kameo::prelude::*; @@ -9,23 +11,17 @@ use std::{ cmp::Ordering, ops::Deref, sync::mpsc::{self, SyncSender}, - time::{Duration, Instant}, + time::Duration, }; use tokio::{runtime::Runtime, sync::oneshot, task::LocalSet}; use tracing::{debug, error, trace, warn}; -use cap_camera_ffmpeg::*; - -#[cfg(windows)] -use crate::capture_pipeline::PerformanceCounterTimestamp; -use crate::capture_pipeline::SourceTimestamp; - const CAMERA_INIT_TIMEOUT: Duration = Duration::from_secs(4); #[derive(Clone)] pub struct RawCameraFrame { pub frame: frame::Video, - pub timestamp: SourceTimestamp, + pub timestamp: Timestamp, } #[derive(Actor)] @@ -286,8 +282,16 @@ async fn setup_camera( .tell(NewFrame(RawCameraFrame { frame: ff_frame, #[cfg(windows)] - timestamp: SourceTimestamp::PerformanceCounter( - PerformanceCounterTimestamp::new(frame.native().perf_counter), + timestamp: Timestamp::PerformanceCounter(PerformanceCounterTimestamp::new( + frame.native().perf_counter, + )), + #[cfg(target_os = "macos")] + timestamp: Timestamp::MachAbsoluteTime( + cap_timestamp::MachAbsoluteTimestamp::new( + cidre::cm::Clock::convert_host_time_to_sys_units( + frame.native().sample_buf().pts(), + ), + ), ), })) .try_send(); diff --git a/crates/recording/src/feeds/microphone.rs b/crates/recording/src/feeds/microphone.rs index da0f44eb6f..278dc38878 100644 --- a/crates/recording/src/feeds/microphone.rs +++ b/crates/recording/src/feeds/microphone.rs @@ -1,5 +1,5 @@ -use crate::capture_pipeline::SourceTimestamp; use cap_media_info::{AudioInfo, ffmpeg_sample_format_for}; +use cap_timestamp::Timestamp; use cpal::{ Device, InputCallbackInfo, SampleFormat, StreamError, SupportedStreamConfig, traits::{DeviceTrait, HostTrait, StreamTrait}, @@ -22,7 +22,7 @@ pub struct MicrophoneSamples { pub data: Vec, pub format: SampleFormat, pub info: InputCallbackInfo, - pub timestamp: SourceTimestamp, + pub timestamp: Timestamp, } #[derive(Actor)] @@ -299,7 +299,7 @@ impl Message for MicrophoneFeed { data: data.bytes().to_vec(), format: data.sample_format(), info: info.clone(), - timestamp: SourceTimestamp::from_cpal(info.timestamp().capture), + timestamp: Timestamp::from_cpal(info.timestamp().capture), }) .try_send(); } diff --git a/crates/recording/src/instant_recording.rs b/crates/recording/src/instant_recording.rs index 2c2007729a..0630d118f8 100644 --- a/crates/recording/src/instant_recording.rs +++ b/crates/recording/src/instant_recording.rs @@ -1,6 +1,7 @@ use cap_media::MediaError; use cap_media_info::{AudioInfo, VideoInfo}; use cap_project::InstantRecordingMeta; +use cap_timestamp::Timestamp; use cap_utils::{ensure_dir, spawn_actor}; use flume::Receiver; use std::{ @@ -13,9 +14,7 @@ use tracing::{Instrument, debug, error, info, trace}; use crate::{ ActorError, RecordingBaseInputs, RecordingError, - capture_pipeline::{ - MakeCapturePipeline, SourceTimestamp, SourceTimestamps, create_screen_capture, - }, + capture_pipeline::{MakeCapturePipeline, create_screen_capture}, feeds::microphone::MicrophoneFeedLock, pipeline::Pipeline, sources::{ScreenCaptureSource, ScreenCaptureTarget}, @@ -114,10 +113,10 @@ async fn create_pipeline( output_path: PathBuf, screen_source: ( ScreenCaptureSource, - flume::Receiver<(TCaptureFormat::VideoFormat, SourceTimestamp)>, + flume::Receiver<(TCaptureFormat::VideoFormat, Timestamp)>, ), mic_feed: Option>, - system_audio: Option>, + system_audio: Option>, ) -> Result< ( InstantRecordingPipeline, @@ -173,6 +172,8 @@ pub async fn spawn_instant_recording_actor( > { ensure_dir(&recording_dir)?; + let start_time = SystemTime::now(); + let (done_tx, done_rx) = oneshot::channel(); trace!("creating recording actor"); @@ -195,6 +196,7 @@ pub async fn spawn_instant_recording_actor( true, 30, system_audio.0, + start_time, #[cfg(windows)] d3d_device, ) diff --git a/crates/recording/src/pipeline/audio_buffer.rs b/crates/recording/src/pipeline/audio_buffer.rs index 39045e5cb5..fd3e6442fb 100644 --- a/crates/recording/src/pipeline/audio_buffer.rs +++ b/crates/recording/src/pipeline/audio_buffer.rs @@ -1,5 +1,4 @@ use cap_audio::cast_bytes_to_f32_slice; -use cap_ffmpeg_utils::*; use cap_media_info::AudioInfo; use ffmpeg::encoder; pub use ffmpeg::util::frame::Audio as FFAudio; @@ -47,8 +46,7 @@ impl AudioBuffer { if frame.is_planar() { for channel in 0..self.config.channels { - self.data[channel] - .extend(unsafe { cast_bytes_to_f32_slice(frame.plane_data(channel)) }); + self.data[channel].extend(unsafe { cast_bytes_to_f32_slice(frame.data(channel)) }); } } else { self.data[0].extend(unsafe { @@ -80,7 +78,7 @@ impl AudioBuffer { .drain(0..actual_samples_per_channel) .enumerate() { - self.frame.plane_data_mut(channel)[index * 4..(index + 1) * 4] + self.frame.data_mut(channel)[index * 4..(index + 1) * 4] .copy_from_slice(&byte.to_ne_bytes()); } } @@ -89,7 +87,7 @@ impl AudioBuffer { .drain(0..actual_samples_per_channel * self.config.channels) .enumerate() { - self.frame.plane_data_mut(0)[index * 4..(index + 1) * 4] + self.frame.data_mut(0)[index * 4..(index + 1) * 4] .copy_from_slice(&byte.to_ne_bytes()); } } diff --git a/crates/recording/src/sources/audio_input.rs b/crates/recording/src/sources/audio_input.rs index 5b21ad0d93..af10294e87 100644 --- a/crates/recording/src/sources/audio_input.rs +++ b/crates/recording/src/sources/audio_input.rs @@ -1,11 +1,11 @@ use crate::{ - capture_pipeline::SourceTimestamp, feeds::microphone::{self, MicrophoneFeedLock, MicrophoneSamples}, pipeline::{control::Control, task::PipelineSourceTask}, }; use cap_fail::fail; use cap_media::MediaError; use cap_media_info::AudioInfo; +use cap_timestamp::Timestamp; use cpal::{Device, SupportedStreamConfig}; use ffmpeg::frame::Audio as FFAudio; use flume::{Receiver, Sender}; @@ -18,11 +18,11 @@ pub type AudioInputDeviceMap = IndexMap pub struct AudioInputSource { feed: Arc, audio_info: AudioInfo, - tx: Sender<(FFAudio, SourceTimestamp)>, + tx: Sender<(FFAudio, Timestamp)>, } impl AudioInputSource { - pub fn init(feed: Arc, tx: Sender<(FFAudio, SourceTimestamp)>) -> Self { + pub fn init(feed: Arc, tx: Sender<(FFAudio, Timestamp)>) -> Self { Self { audio_info: *feed.audio_info(), feed, @@ -35,7 +35,7 @@ impl AudioInputSource { } fn process_frame(&mut self, samples: MicrophoneSamples) -> Result<(), MediaError> { - let timestamp = SourceTimestamp::from_cpal(samples.info.timestamp().capture); + let timestamp = Timestamp::from_cpal(samples.info.timestamp().capture); let frame = self.audio_info.wrap_frame(&samples.data); if self.tx.send((frame, timestamp)).is_err() { diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index d3eb3de507..8dff3077cf 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -1,234 +1,53 @@ +use crate::pipeline::task::PipelineSourceTask; use cap_media_info::AudioInfo; -use ffmpeg::sys::AV_TIME_BASE_Q; +use cap_timestamp::{Timestamp, Timestamps}; use flume::{Receiver, Sender}; -use std::collections::VecDeque; -use std::time::{Duration, SystemTime}; -use tracing::{debug, trace, warn}; - -use crate::{ - capture_pipeline::{SourceTimestamp, SourceTimestamps}, - pipeline::task::PipelineSourceTask, +use std::{ + collections::VecDeque, + time::{Duration, Instant}, }; +use tracing::debug; -struct BufferedAudioSource { - rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, - info: AudioInfo, - buffer: VecDeque<(ffmpeg::frame::Audio, SourceTimestamp)>, - last_processed_timestamp: Option, - last_output_pts: i64, - expected_frame_duration_ms: f64, - total_samples_processed: u64, - timeline_position: Duration, // Track our position in the timeline directly -} - -impl BufferedAudioSource { - fn new(rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, info: AudioInfo) -> Self { - let expected_frame_duration_ms = 1024.0 / info.rate() as f64 * 1000.0; - - Self { - rx, - info, - buffer: VecDeque::new(), - last_processed_timestamp: None, - last_output_pts: 0, - expected_frame_duration_ms, - total_samples_processed: 0, - timeline_position: Duration::ZERO, - } - } - - fn fill_buffer(&mut self) { - let initial_size = self.buffer.len(); - let mut frames_received = 0; - let is_disconnected = self.rx.is_disconnected(); - - while let Ok((frame, timestamp)) = self.rx.try_recv() { - trace!( - "Received audio frame: {} samples, timestamp: {:?}", - frame.samples(), - timestamp - ); - self.buffer.push_back((frame, timestamp)); - frames_received += 1; - } - - if frames_received > 0 { - trace!( - "Buffer filled: {} new frames, total buffer size: {} -> {}", - frames_received, - initial_size, - self.buffer.len() - ); - } else if is_disconnected { - trace!("Receiver disconnected, no more frames will be received"); - } - } - - fn has_sufficient_buffer(&self) -> bool { - self.buffer.len() >= 2 || self.rx.is_disconnected() - } - - fn generate_silent_frame(&self, samples: usize) -> ffmpeg::frame::Audio { - let mut frame = - ffmpeg::frame::Audio::new(self.info.sample_format, samples, self.info.channel_layout()); - - unsafe { - let data = frame.data_mut(0); - let bytes_per_sample = self.info.sample_format.bytes() as usize; - let total_bytes = - samples * self.info.channel_layout().channels() as usize * bytes_per_sample; - std::ptr::write_bytes(data.as_mut_ptr(), 0, total_bytes); - } - - frame - } - - fn generate_initial_silence_if_needed( - &mut self, - _target_time: Duration, - _start_timestamps: SourceTimestamps, - ) -> Vec { - // No longer generate initial silence - let the mixer handle silence generation - Vec::new() - } - - fn process_with_gap_filling( - &mut self, - target_time: Duration, - start_timestamps: SourceTimestamps, - ) -> Vec { - let mut output_frames = Vec::new(); - - // Always process ALL available frames - don't leave them in buffer - while !self.buffer.is_empty() { - let (mut frame, timestamp) = self.buffer.pop_front().unwrap(); - let frame_time = timestamp.duration_since(start_timestamps); - let frame_samples = frame.samples() as u64; - - // Check for gap if we've processed frames before - if let Some(last_ts) = &self.last_processed_timestamp { - let last_time = last_ts.duration_since(start_timestamps); - let expected_next = last_time - + Duration::from_secs_f64(frame_samples as f64 / self.info.rate() as f64); - - // If there's a gap larger than 1.5 frames, fill it with silence - if frame_time > expected_next + Duration::from_millis(30) { - let gap = frame_time - expected_next; - let silent_samples = ((gap.as_secs_f64()) * self.info.rate() as f64) as usize; - - let mut remaining = silent_samples; - while remaining > 0 { - let chunk_size = remaining.min(1024); - let mut silent_frame = self.generate_silent_frame(chunk_size); - - let pts = (self.total_samples_processed as f64 / self.info.rate() as f64 - * AV_TIME_BASE_Q.den as f64) as i64; - silent_frame.set_pts(Some(pts)); - - output_frames.push(silent_frame); - self.total_samples_processed += chunk_size as u64; - self.last_output_pts = pts; - remaining -= chunk_size; - } - } - } - - // Process the actual frame - let pts = (self.total_samples_processed as f64 / self.info.rate() as f64 - * AV_TIME_BASE_Q.den as f64) as i64; - frame.set_pts(Some(pts)); - - self.last_output_pts = pts; - self.total_samples_processed += frame_samples; - self.last_processed_timestamp = Some(timestamp); - self.timeline_position = frame_time - + Duration::from_secs_f64(frame_samples as f64 / self.info.rate() as f64); +// Wait TICK_MS for frames to arrive +// Assume all sources' frames for that tick have arrived after TICK_MS +// Insert silence where necessary for sources with no frames +// +// Current problem is generating an output timestamp that lines up with the input's timestamp - output_frames.push(frame); - } - - // If buffer is empty but we've processed frames before, generate silence to maintain continuity - if output_frames.is_empty() && self.last_processed_timestamp.is_some() { - // Calculate how much silence we need based on the time gap - let last_time = self.timeline_position; - if target_time > last_time { - let gap = target_time - last_time; - let silent_samples = ((gap.as_secs_f64()) * self.info.rate() as f64) as usize; - - if silent_samples > 0 { - let mut remaining = silent_samples; - while remaining > 0 { - let chunk_size = remaining.min(1024); - let mut silent_frame = self.generate_silent_frame(chunk_size); - - let pts = (self.total_samples_processed as f64 / self.info.rate() as f64 - * AV_TIME_BASE_Q.den as f64) as i64; - silent_frame.set_pts(Some(pts)); - - output_frames.push(silent_frame); - self.total_samples_processed += chunk_size as u64; - self.last_output_pts = pts; - self.timeline_position += - Duration::from_secs_f64(chunk_size as f64 / self.info.rate() as f64); - remaining -= chunk_size; - } - } - } - } - - output_frames - } +struct MixerSource { + rx: Receiver<(ffmpeg::frame::Audio, Timestamp)>, + info: AudioInfo, + buffer: VecDeque<(ffmpeg::frame::Audio, Timestamp)>, + buffer_last: Option<(Timestamp, Duration)>, } -pub struct AudioMixer { - sources: Vec, - output: Sender<(ffmpeg::frame::Audio, Duration, SourceTimestamps)>, - start_timestamps: SourceTimestamps, - output_sample_count: u64, - output_sample_rate: u32, +pub struct AudioMixerBuilder { + sources: Vec, + output: Sender<(ffmpeg::frame::Audio, Timestamp)>, } -impl AudioMixer { - pub fn new(output: Sender<(ffmpeg::frame::Audio, Duration, SourceTimestamps)>) -> Self { +impl AudioMixerBuilder { + pub fn new(output: Sender<(ffmpeg::frame::Audio, Timestamp)>) -> Self { Self { sources: Vec::new(), output, - start_timestamps: SourceTimestamps::now(), - output_sample_count: 0, - output_sample_rate: 48000, } } - pub fn sink(&mut self, info: AudioInfo) -> AudioMixerSink { - let (tx, rx) = flume::bounded(32); - - self.sources.push(BufferedAudioSource::new(rx, info)); - - AudioMixerSink { tx } - } - - pub fn add_source( - &mut self, - info: AudioInfo, - rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, - ) { - self.sources.push(BufferedAudioSource::new(rx, info)); - } - pub fn has_sources(&self) -> bool { !self.sources.is_empty() } - pub fn info() -> AudioInfo { - AudioInfo::new( - ffmpeg::format::Sample::F32(ffmpeg::format::sample::Type::Packed), - 48000, - 2, - ) - .unwrap() + pub fn add_source(&mut self, info: AudioInfo, rx: Receiver<(ffmpeg::frame::Audio, Timestamp)>) { + self.sources.push(MixerSource { + info, + rx, + buffer: VecDeque::new(), + buffer_last: None, + }); } - pub fn run(&mut self, mut get_is_stopped: impl FnMut() -> bool, on_ready: impl FnOnce()) { + pub fn build(self) -> Result { let mut filter_graph = ffmpeg::filter::Graph::new(); let mut abuffers = self @@ -247,45 +66,36 @@ impl AudioMixer { debug!("audio mixer input {i}: {args}"); - filter_graph - .add( - &ffmpeg::filter::find("abuffer").expect("Failed to find abuffer filter"), - &format!("src{i}"), - &args, - ) - .unwrap() + filter_graph.add( + &ffmpeg::filter::find("abuffer").expect("Failed to find abuffer filter"), + &format!("src{i}"), + &args, + ) }) - .collect::>(); - - let mut amix = filter_graph - .add( - &ffmpeg::filter::find("amix").expect("Failed to find amix filter"), - "amix", - &format!( - "inputs={}:duration=first:dropout_transition=0", - abuffers.len() - ), - ) - .unwrap(); + .collect::, _>>()?; + + let mut amix = filter_graph.add( + &ffmpeg::filter::find("amix").expect("Failed to find amix filter"), + "amix", + &format!( + "inputs={}:duration=first:dropout_transition=0", + abuffers.len() + ), + )?; let aformat_args = "sample_fmts=flt:sample_rates=48000:channel_layouts=stereo"; - debug!("aformat args: {aformat_args}"); - - let mut aformat = filter_graph - .add( - &ffmpeg::filter::find("aformat").expect("Failed to find aformat filter"), - "aformat", - aformat_args, - ) - .expect("Failed to add aformat filter"); - - let mut abuffersink = filter_graph - .add( - &ffmpeg::filter::find("abuffersink").expect("Failed to find abuffersink filter"), - "sink", - "", - ) - .expect("Failed to add abuffersink filter"); + + let mut aformat = filter_graph.add( + &ffmpeg::filter::find("aformat").expect("Failed to find aformat filter"), + "aformat", + aformat_args, + )?; + + let mut abuffersink = filter_graph.add( + &ffmpeg::filter::find("abuffersink").expect("Failed to find abuffersink filter"), + "sink", + "", + )?; for (i, abuffer) in abuffers.iter_mut().enumerate() { abuffer.link(0, &mut amix, i as u32); @@ -294,791 +104,413 @@ impl AudioMixer { amix.link(0, &mut aformat, 0); aformat.link(0, &mut abuffersink, 0); - filter_graph - .validate() - .expect("Failed to validate filter graph"); - - on_ready(); + filter_graph.validate()?; - let frame_size = 1024usize; - let frame_duration = - Duration::from_secs_f64(frame_size as f64 / self.output_sample_rate as f64); - let mut next_output_time = std::time::Instant::now() + Duration::from_millis(50); - let mut filtered = ffmpeg::frame::Audio::empty(); - let mut startup_phase = true; - let mut processing_time = Duration::ZERO; - let mut first_frame_time: Option = None; + Ok(AudioMixer { + sources: self.sources, + samples_out: 0, + output: self.output, + last_tick: None, + abuffers, + abuffersink, + _filter_graph: filter_graph, + _amix: amix, + _aformat: aformat, + }) + } +} - loop { - if get_is_stopped() { - return; - } +pub struct AudioMixer { + sources: Vec, + samples_out: usize, + output: Sender<(ffmpeg::frame::Audio, Timestamp)>, + last_tick: Option, + // sample_timestamps: VecDeque<(usize, Timestamp)>, + abuffers: Vec, + abuffersink: ffmpeg::filter::Context, + _filter_graph: ffmpeg::filter::Graph, + _amix: ffmpeg::filter::Context, + _aformat: ffmpeg::filter::Context, +} - // Fill all source buffers - for (i, source) in self.sources.iter_mut().enumerate() { - let buffer_size_before = source.buffer.len(); - source.fill_buffer(); - let buffer_size_after = source.buffer.len(); - - if buffer_size_after > buffer_size_before { - trace!( - "Source {}: buffer grew from {} to {} frames", - i, buffer_size_before, buffer_size_after - ); - // Log timing of first frame in buffer - if let Some((_, timestamp)) = source.buffer.front() { - let frame_time = timestamp.duration_since(self.start_timestamps); - trace!( - "Source {}: first buffered frame time: {:.2}ms vs processing_time: {:.2}ms (delta: {:.2}ms)", - i, - frame_time.as_secs_f64() * 1000.0, - processing_time.as_secs_f64() * 1000.0, - (frame_time.as_secs_f64() - processing_time.as_secs_f64()) * 1000.0 - ); +impl AudioMixer { + pub const INFO: AudioInfo = AudioInfo::new_raw( + cap_media_info::Sample::F32(cap_media_info::Type::Packed), + 48_000, + 2, + ); + pub const BUFFER_TIMEOUT: Duration = Duration::from_millis(10); + + fn buffer_sources(&mut self, start: Timestamps) { + for source in &mut self.sources { + let rate = source.info.rate(); + + while let Ok((frame, timestamp)) = source.rx.try_recv() { + // dbg!(timestamp.duration_since(start)); + // if gap between incoming and last, insert silence + if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { + let timestamp_elapsed = timestamp.duration_since(start); + let buffer_last_elapsed = buffer_last_timestamp.duration_since(start); + + if timestamp_elapsed > buffer_last_elapsed { + let elapsed_since_last_frame = timestamp_elapsed - buffer_last_elapsed; + + if elapsed_since_last_frame < buffer_last_duration + && buffer_last_duration - elapsed_since_last_frame + >= Duration::from_millis(1) + { + let gap = timestamp.duration_since(start) + - buffer_last_timestamp.duration_since(start) + - buffer_last_duration; + + debug!("Gap between last buffer frame, inserting {gap:?} of silence"); + + let silence_samples_needed = (gap.as_secs_f64()) * rate as f64; + let silence_samples_count = silence_samples_needed.ceil() as usize; + + let mut frame = ffmpeg::frame::Audio::new( + source.info.sample_format, + silence_samples_count, + source.info.channel_layout(), + ); + + frame.set_rate(source.info.rate() as u32); + + source.buffer_last = Some(( + &buffer_last_timestamp + gap, + Duration::from_secs_f64(silence_samples_count as f64 / rate as f64), + )); + source.buffer.push_back((frame, buffer_last_timestamp)); + } } - } else if source.rx.is_disconnected() { - trace!("Source {}: receiver disconnected", i); - } - } + } else { + let gap = timestamp.duration_since(start); - // During startup, wait for sufficient initial data - if startup_phase { - let sources_with_data = - self.sources.iter().filter(|s| !s.buffer.is_empty()).count(); - let sources_with_sufficient_buffer = - self.sources.iter().filter(|s| s.buffer.len() >= 2).count(); - - // Wait until we have some buffering to avoid underruns - if sources_with_data == 0 { - std::thread::sleep(Duration::from_millis(5)); - continue; - } else if sources_with_sufficient_buffer < sources_with_data { - // We have some data but not enough buffering yet - trace!( - "Startup: waiting for buffer (sources with data: {}, with sufficient buffer: {})", - sources_with_data, sources_with_sufficient_buffer - ); - std::thread::sleep(Duration::from_millis(5)); - continue; - } + if !gap.is_zero() { + debug!("Gap from beginning of stream, inserting {gap:?} of silence"); - startup_phase = false; - debug!( - "Startup complete: {} sources ready with sufficient buffering", - sources_with_data - ); - // Reset next output time after receiving sufficient data - next_output_time = std::time::Instant::now() + Duration::from_millis(20); - } + // TODO: refactor to be one while loop - let now = std::time::Instant::now(); + let gap_samples = gap.as_millis() as usize * rate as usize / 1000; + let chunk_size = rate as usize / 200; - // Check if it's time to produce output - if now >= next_output_time { - // Feed frames to each source's filter input - for (i, source) in self.sources.iter_mut().enumerate() { - // Process ALL frames from buffer (including silence generation) - let frames = - source.process_with_gap_filling(processing_time, self.start_timestamps); + let chunks = gap_samples as f64 / chunk_size as f64; - // Add all frames (real or silence) to the filter - for frame in frames { - if let Err(e) = abuffers[i].source().add(&frame) { - warn!("Source {}: Failed to add frame to filter: {:?}", i, e); - } - } - } + let chunk_duration = + Duration::from_secs_f64(chunk_size as f64 / rate as f64); + for i in 0..chunks.floor() as usize { + let mut frame = ffmpeg::frame::Audio::new( + source.info.sample_format, + chunk_size, + source.info.channel_layout(), + ); - // Update timing for next iteration - processing_time += frame_duration; - next_output_time += frame_duration; - } + frame.set_rate(rate as u32); - // Try to get output from the filter graph - let mut frames_output = 0; - while abuffersink.sink().frame(&mut filtered).is_ok() { - let output_duration = Duration::from_secs_f64( - self.output_sample_count as f64 / self.output_sample_rate as f64, - ); + let timestamp = + Timestamp::Instant(start.instant() + chunk_duration * i as u32); + source.buffer_last = Some((timestamp, chunk_duration)); + source.buffer.push_back((frame, timestamp)); + } - let pts = (output_duration.as_secs_f64() * AV_TIME_BASE_Q.den as f64) as i64; - filtered.set_pts(Some(pts)); + let leftover_chunk_size = (chunks.fract() * chunk_size as f64) as usize; - let sample_count = filtered.samples() as u64; + let mut frame = ffmpeg::frame::Audio::new( + source.info.sample_format, + leftover_chunk_size, + source.info.channel_layout(), + ); - trace!( - "Output frame: {} samples, pts: {}, duration: {:?}", - sample_count, pts, output_duration - ); + frame.set_rate(rate as u32); - if self - .output - .send((filtered, output_duration, self.start_timestamps)) - .is_err() - { - warn!("Mixer unable to send output"); - return; + let duration = + Duration::from_secs_f64(leftover_chunk_size as f64 / rate as f64); + let timestamp = Timestamp::Instant( + start.instant() + chunk_duration * chunks.floor() as u32 + duration, + ); + source.buffer_last = Some((timestamp, duration)); + source.buffer.push_back((frame, timestamp)); + } } - self.output_sample_count += sample_count; - frames_output += 1; - filtered = ffmpeg::frame::Audio::empty(); + source.buffer_last = Some(( + timestamp, + Duration::from_secs_f64(frame.samples() as f64 / frame.rate() as f64), + )); + source.buffer.push_back((frame, timestamp)); } + } + } - if frames_output > 0 { - debug!( - "Filter graph produced {} output frames, total samples: {}", - frames_output, self.output_sample_count - ); - } else { - trace!("Filter graph produced no output this cycle"); + fn tick(&mut self, start: Timestamps, now: Instant) -> Result<(), ()> { + self.buffer_sources(start); + + for (i, source) in self.sources.iter_mut().enumerate() { + for buffer in source.buffer.drain(..) { + let _ = self.abuffers[i].source().add(&buffer.0); } + } - // Sleep until next output time, but check frequently for new data - let time_until_next = - next_output_time.saturating_duration_since(std::time::Instant::now()); - if time_until_next > Duration::from_millis(2) { - std::thread::sleep(Duration::from_millis(2)); + let mut filtered = ffmpeg::frame::Audio::empty(); + while self.abuffersink.sink().frame(&mut filtered).is_ok() { + let elapsed = Duration::from_secs_f64(self.samples_out as f64 / filtered.rate() as f64); + let timestamp = start.instant() + elapsed; + + self.samples_out += filtered.samples(); + + if self + .output + .send((filtered, Timestamp::Instant(timestamp))) + .is_err() + { + return Err(()); } + + filtered = ffmpeg::frame::Audio::empty(); } + + self.last_tick = Some(now); + + Ok(()) } -} -pub struct AudioMixerSink { - pub tx: flume::Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, -} + pub fn run(&mut self) { + let start = Timestamps::now(); -pub struct AudioMixerSource { - rx: flume::Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, - info: AudioInfo, + while let Ok(()) = self.tick(start, Instant::now()) { + std::thread::sleep(Duration::from_millis(5)); + } + } + + pub fn builder(output: Sender<(ffmpeg::frame::Audio, Timestamp)>) -> AudioMixerBuilder { + AudioMixerBuilder::new(output) + } } -impl PipelineSourceTask for AudioMixer { +impl PipelineSourceTask for AudioMixerBuilder { fn run( &mut self, ready_signal: crate::pipeline::task::PipelineReadySignal, mut control_signal: crate::pipeline::control::PipelineControlSignal, ) -> Result<(), String> { - self.run( - || { - control_signal - .last() - .map(|v| matches!(v, crate::pipeline::control::Control::Shutdown)) - .unwrap_or(false) - }, - || { - let _ = ready_signal.send(Ok(())); - }, - ); - - Ok(()) - } -} + let start = Timestamps::now(); -#[cfg(test)] -mod tests { - use super::*; - use std::time::{Duration, Instant, SystemTime}; - - fn create_test_audio_info() -> AudioInfo { - AudioInfo::new( - ffmpeg::format::Sample::F32(ffmpeg::format::sample::Type::Packed), - 48000, - 2, - ) - .unwrap() - } + let this = std::mem::replace(self, AudioMixerBuilder::new(self.output.clone())); - fn create_test_frame(samples: usize, info: &AudioInfo) -> ffmpeg::frame::Audio { - let mut frame = - ffmpeg::frame::Audio::new(info.sample_format, samples, info.channel_layout()); + let mut mixer = this.build().map_err(|e| format!("BuildMixer: {e}"))?; - unsafe { - let data = frame.data_mut(0); - let bytes_per_sample = info.sample_format.bytes() as usize; - let channels = info.channel_layout().channels() as usize; - let total_bytes = samples * channels * bytes_per_sample; + let _ = ready_signal.send(Ok(())); - // Fill with test pattern (non-zero to distinguish from silence) - for i in 0..total_bytes { - data.as_mut_ptr().add(i).write((i % 256) as u8); + loop { + if control_signal + .last() + .map(|v| matches!(v, crate::pipeline::control::Control::Shutdown)) + .unwrap_or(false) + { + break; } - } - frame.set_pts(Some(0)); - frame - } - - fn is_silent_frame(frame: &ffmpeg::frame::Audio) -> bool { - unsafe { - let data = frame.data(0); - let bytes_per_sample = frame.format().bytes() as usize; - let channels = frame.channels() as usize; - let total_bytes = frame.samples() as usize * channels * bytes_per_sample; + mixer + .tick(start, Instant::now()) + .map_err(|()| format!("Audio mixer tick failed"))?; - for i in 0..total_bytes { - if *data.as_ptr().add(i) != 0 { - return false; - } - } + std::thread::sleep(Duration::from_millis(5)); } - true - } - - #[test] - fn test_buffered_source_initialization() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let source = BufferedAudioSource::new(rx, info); - - assert!(source.buffer.is_empty()); - assert!(source.last_processed_timestamp.is_none()); - assert_eq!(source.total_samples_processed, 0); - assert_eq!(source.last_output_pts, 0); - assert_eq!(source.timeline_position, Duration::ZERO); - - // Expected frame duration for 1024 samples at 48kHz - let expected_duration = 1024.0 / 48000.0 * 1000.0; - assert!((source.expected_frame_duration_ms - expected_duration).abs() < 0.001); + Ok(()) } +} - #[test] - fn test_fill_buffer() { - let info = create_test_audio_info(); - let (tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); +#[cfg(test)] +mod test { + use super::*; - // Send some frames - let timestamp1 = SourceTimestamp::Instant(Instant::now()); - let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(21)); - let timestamp3 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(42)); + const SAMPLE_RATE: u32 = 48_000; + const SOURCE_INFO: AudioInfo = AudioInfo::new_raw( + cap_media_info::Sample::U8(cap_media_info::Type::Packed), + SAMPLE_RATE, + 1, + ); + const ONE_SECOND: Duration = Duration::from_secs(1); + const SAMPLES_SECOND: usize = SOURCE_INFO.rate() as usize; - tx.send((create_test_frame(1024, &info), timestamp1)) - .unwrap(); - tx.send((create_test_frame(1024, &info), timestamp2)) - .unwrap(); - tx.send((create_test_frame(1024, &info), timestamp3)) - .unwrap(); + #[test] + fn mix_sources() { + let (tx, output_rx) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(tx); + let start = Timestamps::now(); + + let (tx1, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let (tx2, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); + + let mut mixer = mixer.build().unwrap(); + + tx1.send(( + SOURCE_INFO.wrap_frame(&vec![128, 255, 255, 255]), + Timestamp::Instant(start.instant()), + )) + .unwrap(); + tx2.send(( + SOURCE_INFO.wrap_frame(&vec![128, 128, 1, 255]), + Timestamp::Instant(start.instant()), + )) + .unwrap(); + + let _ = mixer.tick( + start, + start.instant() + Duration::from_secs_f64(4.0 / SAMPLE_RATE as f64), + ); - assert_eq!(source.buffer.len(), 0); + let (frame, _) = output_rx.recv().expect("No output frame"); - source.fill_buffer(); + let byte_count = frame.samples() * frame.channels() as usize; + let samples: &[f32] = unsafe { std::mem::transmute(&frame.data(0)[0..byte_count]) }; - assert_eq!(source.buffer.len(), 3); - } + assert_eq!(samples[0], 0.0); + assert_eq!(samples[0], samples[1]); - #[test] - fn test_sufficient_buffer_detection() { - let info = create_test_audio_info(); - let (tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - - // Initially insufficient - assert!(!source.has_sufficient_buffer()); - - // Add one frame - still insufficient - let timestamp = SourceTimestamp::Instant(Instant::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp)); - assert!(!source.has_sufficient_buffer()); - - // Add second frame - now sufficient - let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(21)); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp2)); - assert!(source.has_sufficient_buffer()); - - // Disconnect channel - always sufficient - drop(tx); - source.buffer.clear(); - assert!(source.has_sufficient_buffer()); + assert_eq!(samples[4], 0.0); + assert_eq!(samples[4], samples[5]); } - #[test] - fn test_silent_frame_generation() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded::<(ffmpeg::frame::Audio, SourceTimestamp)>(32); - let source = BufferedAudioSource::new(rx, info.clone()); - - // Test various sizes - for size in [512, 1024, 2048, 4096] { - let frame = source.generate_silent_frame(size); - assert_eq!(frame.samples(), size); - assert!(is_silent_frame(&frame)); - } - } + mod source_buffer { + use super::*; - #[test] - fn test_gap_detection_and_filling() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Process first frame - let timestamp1 = SourceTimestamp::Instant(Instant::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp1)); - - let frames = source.process_with_gap_filling(Duration::from_secs(1), start_timestamps); - assert_eq!(frames.len(), 1); - assert!(!is_silent_frame(&frames[0])); - assert!(source.last_processed_timestamp.is_some()); - - // Create a gap - add frame 100ms later than expected - let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_millis(121)); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp2)); - - let frames = source.process_with_gap_filling(Duration::from_secs(2), start_timestamps); - - // Should have silent frames followed by the real frame - assert!(frames.len() > 1); - - // Check that we got silent frames for the gap - for i in 0..frames.len() - 1 { - assert!(is_silent_frame(&frames[i])); - } + #[test] + fn single_frame() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = Timestamps::now(); - // Last frame should be the real one - assert!(!is_silent_frame(&frames[frames.len() - 1])); - } + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); - #[test] - fn test_no_gap_when_continuous() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Process frames with correct timing (21.33ms apart for 1024 samples at 48kHz) - let base_time = Instant::now(); - let frame_duration = Duration::from_micros(21333); // 1024/48000 * 1000000 - - for i in 0..5 { - let timestamp = SourceTimestamp::Instant(base_time + frame_duration * i); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp)); - - let frames = source.process_with_gap_filling(Duration::from_secs(10), start_timestamps); - - // Should only get one frame (no gaps) - assert_eq!(frames.len(), 1); - assert!(!is_silent_frame(&frames[0])); - } - } + let mut mixer = mixer.build().unwrap(); - #[test] - fn test_pts_calculation() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Process multiple frames - for i in 0..3 { - let timestamp = - SourceTimestamp::Instant(Instant::now() + Duration::from_millis(i * 21)); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp)); - - let frames = source.process_with_gap_filling(Duration::from_secs(10), start_timestamps); - - assert_eq!(frames.len(), 1); - - // Check PTS progression - let expected_pts = (i as u64 * 1024 * 1_000_000) / 48000; // microseconds - let actual_pts = frames[0].pts().unwrap() as u64; - - // PTS should progress correctly - let tolerance = 1000; // 1ms tolerance - assert!( - (actual_pts as i64 - expected_pts as i64).abs() < tolerance, - "PTS mismatch: expected {}, got {}", - expected_pts, - actual_pts - ); - } - } - - #[test] - fn test_large_gap_handling() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Process first frame - let timestamp1 = SourceTimestamp::Instant(Instant::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp1)); - source.process_with_gap_filling(Duration::from_secs(1), start_timestamps); - - // Create a 1-second gap - let timestamp2 = SourceTimestamp::Instant(Instant::now() + Duration::from_secs(1)); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp2)); - - let frames = source.process_with_gap_filling(Duration::from_secs(2), start_timestamps); - - // Should have many silent frames (48000 samples for 1 second at 48kHz) - let total_silent_samples: usize = frames[..frames.len() - 1] - .iter() - .map(|f| f.samples() as usize) - .sum(); + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + Timestamp::Instant(start.instant()), + )) + .unwrap(); - // Should be approximately 48000 samples (1 second) - assert!(total_silent_samples > 45000 && total_silent_samples < 50000); - } + mixer.buffer_sources(start); - #[test] - fn test_sample_counting() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Process frames with different sizes - let sizes = vec![512, 1024, 2048, 1024, 512]; - let mut expected_total = 0u64; - - for (i, size) in sizes.iter().enumerate() { - let timestamp = - SourceTimestamp::Instant(Instant::now() + Duration::from_millis(i as u64 * 20)); - source - .buffer - .push_back((create_test_frame(*size, &info), timestamp)); - - source.process_with_gap_filling(Duration::from_secs(10), start_timestamps); - - expected_total += *size as u64; - assert_eq!(source.total_samples_processed, expected_total); + assert_eq!(mixer.sources[0].buffer.len(), 1); + assert!(mixer.sources[0].rx.is_empty()); } - } - #[test] - fn test_timestamp_types() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Test with SystemTime timestamp - let timestamp1 = SourceTimestamp::SystemTime(SystemTime::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp1)); - - let frames = source.process_with_gap_filling(Duration::from_secs(1), start_timestamps); - assert_eq!(frames.len(), 1); - - // Test with Instant timestamp - let timestamp2 = SourceTimestamp::Instant(Instant::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp2)); - - let frames = source.process_with_gap_filling(Duration::from_secs(2), start_timestamps); - assert_eq!(frames.len(), 1); - } + #[test] + fn frame_gap() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = Timestamps::now(); - #[test] - fn test_mixer_initialization() { - let (tx, _rx) = flume::bounded(64); - let mut mixer = AudioMixer::new(tx); - - assert!(!mixer.has_sources()); - assert_eq!(mixer.sources.len(), 0); - assert_eq!(mixer.output_sample_count, 0); - assert_eq!(mixer.output_sample_rate, 48000); - - // Add sources - let info = create_test_audio_info(); - let (_source_tx, source_rx) = flume::bounded(32); - mixer.add_source(info, source_rx); - - assert!(mixer.has_sources()); - assert_eq!(mixer.sources.len(), 1); - } + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); - #[test] - fn test_mixer_sink_creation() { - let (tx, _rx) = flume::bounded(64); - let mut mixer = AudioMixer::new(tx); + let mut mixer = mixer.build().unwrap(); - let info = create_test_audio_info(); - let sink = mixer.sink(info); + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + Timestamp::Instant(start.instant()), + )) + .unwrap(); - assert!(mixer.has_sources()); - assert_eq!(mixer.sources.len(), 1); + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + Timestamp::Instant(start.instant() + ONE_SECOND), + )) + .unwrap(); - // Test that sink can send data - let timestamp = SourceTimestamp::Instant(Instant::now()); - let frame = create_test_frame(1024, &info); - sink.tx.send((frame, timestamp)).unwrap(); - } + mixer.buffer_sources(start); - #[test] - fn test_continuous_output_without_input() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Test that we get silence when no input has ever been provided - let processing_time = Duration::from_millis(100); - - // First call should produce no frames (no data, no last timestamp) - let frames = source.process_with_gap_filling(processing_time, start_timestamps); - assert_eq!(frames.len(), 0); - - // After processing once, subsequent calls should maintain timing with silence - // Process first real frame to establish timing - let timestamp1 = SourceTimestamp::Instant(Instant::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp1)); - - let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); - assert_eq!(frames.len(), 1); - assert!(!is_silent_frame(&frames[0])); - - // Now with no more input, but time advancing, we should get silence - let frames = source.process_with_gap_filling(Duration::from_millis(200), start_timestamps); - - // Should have generated silent frames to fill the gap - assert!(frames.len() > 0); - for frame in &frames { - assert!(is_silent_frame(frame)); - } - } + let source = &mixer.sources[0]; - #[test] - fn test_source_silence_on_empty_buffer() { - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Establish initial timing with a frame - let timestamp = SourceTimestamp::Instant(Instant::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp)); - - // Process the frame - let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); - assert_eq!(frames.len(), 1); - assert!(!is_silent_frame(&frames[0])); - - // Now buffer is empty, advance time significantly - let frames = source.process_with_gap_filling(Duration::from_millis(500), start_timestamps); - - // Should produce silent frames for the time gap - let total_samples: usize = frames.iter().map(|f| f.samples() as usize).sum(); - - // ~450ms of silence at 48kHz should be around 21600 samples - assert!(total_samples > 20000 && total_samples < 23000); - - // All frames should be silent - for frame in &frames { - assert!(is_silent_frame(frame)); - } - } + assert_eq!(source.buffer.len(), 3); + assert!(source.rx.is_empty()); - #[test] - fn test_mixer_output_with_silent_sources() { - // Test that buffered sources produce continuous output when needed - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // First, establish timing with an initial frame - let timestamp1 = SourceTimestamp::Instant(Instant::now()); - source - .buffer - .push_back((create_test_frame(1024, &info), timestamp1)); - - // Process the initial frame - let frames = source.process_with_gap_filling(Duration::from_millis(21), start_timestamps); - assert_eq!(frames.len(), 1); - assert!(!is_silent_frame(&frames[0])); - - // Now simulate continuous time progression without new input - // The source should generate silence to maintain output timing - let mut total_frames = Vec::new(); - let mut current_time = Duration::from_millis(21); - - for _ in 0..10 { - current_time += Duration::from_millis(21); - let frames = source.process_with_gap_filling(current_time, start_timestamps); - total_frames.extend(frames); + assert_eq!(source.buffer[1].1.duration_since(start), ONE_SECOND / 2); + assert_eq!( + source.buffer[1].0.samples(), + SOURCE_INFO.rate() as usize / 2 + ); } - // We should have received silent frames to maintain continuous output - assert!( - !total_frames.is_empty(), - "Should have generated silence frames" - ); - - // All generated frames should be silent - for frame in &total_frames { - assert!(is_silent_frame(frame), "Generated frames should be silent"); - } + #[test] + fn start_gap() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = Timestamps::now(); - // Total samples should roughly match the time progression - let total_samples: usize = total_frames.iter().map(|f| f.samples() as usize).sum(); - let expected_samples = (210.0 / 1000.0 * 48000.0) as usize; // 210ms at 48kHz - assert!( - total_samples > expected_samples * 9 / 10 && total_samples < expected_samples * 11 / 10, - "Sample count should match time progression: got {}, expected ~{}", - total_samples, - expected_samples - ); - } + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); - #[test] - fn test_initial_silence_generation() { - // Test that sources only generate silence after they've processed actual data - let info = create_test_audio_info(); - let (tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // First, no silence should be generated without any prior data - let target_time = Duration::from_millis(100); - let frames = source.process_with_gap_filling(target_time, start_timestamps); - assert!( - frames.is_empty(), - "Should not generate silence without prior data" - ); + let mut mixer = mixer.build().unwrap(); - // Send and process one frame to establish timing - let timestamp = SourceTimestamp::Instant(Instant::now()); - tx.send((create_test_frame(1024, &info), timestamp)) + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + Timestamp::Instant(start.instant() + ONE_SECOND / 2), + )) .unwrap(); - source.fill_buffer(); - let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); - assert_eq!(frames.len(), 1, "Should process the buffered frame"); - assert!( - !is_silent_frame(&frames[0]), - "First frame should not be silent" - ); + mixer.buffer_sources(start); - // Now with no more input but time advancing, we should get silence - let frames = source.process_with_gap_filling(Duration::from_millis(150), start_timestamps); - assert!( - !frames.is_empty(), - "Should generate silence to fill time gap" - ); + let source = &mixer.sources[0]; + + assert_eq!(source.buffer.len(), 2); + assert!(source.rx.is_empty()); - // All generated frames should be silent - for frame in &frames { - assert!( - is_silent_frame(frame), - "Gap-filling frames should be silent" + assert_eq!(source.buffer[0].1.duration_since(start), Duration::ZERO); + assert_eq!( + source.buffer[0].0.samples(), + SOURCE_INFO.rate() as usize / 2 ); } - // After generating initial silence, last_processed_timestamp should be set - // timeline_position should be updated even without last_processed_timestamp - assert_eq!(source.timeline_position, target_time); + #[test] + fn after_draining() { + let (output_tx, _) = flume::bounded(4); + let mut mixer = AudioMixerBuilder::new(output_tx); + let start = Timestamps::now(); - // Subsequent calls shouldn't generate more initial silence - let frames2 = source.generate_initial_silence_if_needed(target_time, start_timestamps); - assert!( - frames2.is_empty(), - "Should not generate initial silence twice" - ); - } + let (tx, rx) = flume::bounded(4); + mixer.add_source(SOURCE_INFO, rx); - #[test] - fn test_mixer_handles_source_disconnection() { - // Test that buffered source handles disconnection gracefully - let info = create_test_audio_info(); - let (source_tx, source_rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(source_rx, info.clone()); - - // Send one frame - let timestamp = SourceTimestamp::Instant(Instant::now()); - source_tx - .send((create_test_frame(1024, &info), timestamp)) + let mut mixer = mixer.build().unwrap(); + + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + Timestamp::Instant(start.instant()), + )) .unwrap(); - // Fill buffer to get the frame - source.fill_buffer(); - assert_eq!(source.buffer.len(), 1); + mixer.buffer_sources(start); - // Disconnect the source - drop(source_tx); + mixer.sources[0].buffer.clear(); - // Verify disconnection is detected - assert!(source.has_sufficient_buffer()); + tx.send(( + SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), + Timestamp::Instant(start.instant() + ONE_SECOND), + )) + .unwrap(); - // Should still be able to process the buffered frame - let start_timestamps = SourceTimestamps::now(); - let frames = source.process_with_gap_filling(Duration::from_millis(50), start_timestamps); - assert_eq!(frames.len(), 1); - assert!(!is_silent_frame(&frames[0])); + mixer.buffer_sources(start); - // After processing the buffered frame, should generate silence for continuity - let frames = source.process_with_gap_filling(Duration::from_millis(100), start_timestamps); - assert!(!frames.is_empty()); - for frame in &frames { - assert!(is_silent_frame(frame)); - } - } + let source = &mixer.sources[0]; - #[test] - fn test_continuous_timing_maintenance() { - // Test that sources maintain consistent timing across gaps and silence - let info = create_test_audio_info(); - let (_tx, rx) = flume::bounded(32); - let mut source = BufferedAudioSource::new(rx, info.clone()); - let start_timestamps = SourceTimestamps::now(); - - // Process frames with intermittent data - let base_time = Instant::now(); - - // Frame at t=0 - source.buffer.push_back(( - create_test_frame(1024, &info), - SourceTimestamp::Instant(base_time), - )); - let _frames = source.process_with_gap_filling(Duration::from_millis(21), start_timestamps); - assert_eq!(source.total_samples_processed, 1024); - - // No frame at t=21ms (should generate silence) - let _frames = source.process_with_gap_filling(Duration::from_millis(42), start_timestamps); - // Frames may or may not be generated depending on timing thresholds - - // Frame at t=63ms (after gap) - source.buffer.push_back(( - create_test_frame(1024, &info), - SourceTimestamp::Instant(base_time + Duration::from_millis(63)), - )); - let frames = source.process_with_gap_filling(Duration::from_millis(84), start_timestamps); - - // Should have at least one frame (the real frame, and possibly silence) - assert!(!frames.is_empty()); - - // Verify total samples processed is reasonable - // We processed at least 2 frames (2048 samples minimum) - assert!( - source.total_samples_processed >= 2048, - "Should have processed at least 2 frames worth of samples: got {}", - source.total_samples_processed - ); + assert_eq!(source.buffer.len(), 2); + assert!(source.rx.is_empty()); + + let item = &source.buffer[0]; + assert_eq!(item.1.duration_since(start), ONE_SECOND / 2); + assert_eq!(item.0.samples(), SOURCE_INFO.rate() as usize / 2); + + let item = &source.buffer[1]; + assert_eq!(item.1.duration_since(start), ONE_SECOND); + assert_eq!(item.0.samples(), SOURCE_INFO.rate() as usize / 2); + } } } diff --git a/crates/recording/src/sources/camera.rs b/crates/recording/src/sources/camera.rs index 26cdeb3f1f..f9da4beb3b 100644 --- a/crates/recording/src/sources/camera.rs +++ b/crates/recording/src/sources/camera.rs @@ -1,41 +1,27 @@ -use cap_media_info::VideoInfo; -use ffmpeg::frame; -use flume::{Receiver, Sender}; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use tracing::{error, info}; - use crate::{ MediaError, - capture_pipeline::SourceTimestamp, feeds::camera::{self, CameraFeedLock, RawCameraFrame}, pipeline::{control::Control, task::PipelineSourceTask}, }; +use cap_media_info::VideoInfo; +use cap_timestamp::Timestamp; +use ffmpeg::frame; +use flume::{Receiver, Sender}; +use std::sync::Arc; +use tracing::{error, info}; pub struct CameraSource { feed: Arc, video_info: VideoInfo, - output: Sender<(frame::Video, SourceTimestamp)>, - first_frame_instant: Option, - first_frame_timestamp: Option, - start_instant: Instant, + output: Sender<(frame::Video, Timestamp)>, } impl CameraSource { - pub fn init( - feed: Arc, - output: Sender<(frame::Video, SourceTimestamp)>, - start_instant: Instant, - ) -> Self { + pub fn init(feed: Arc, output: Sender<(frame::Video, Timestamp)>) -> Self { Self { video_info: *feed.video_info(), feed, output, - first_frame_instant: None, - first_frame_timestamp: None, - start_instant, } } diff --git a/crates/recording/src/sources/mod.rs b/crates/recording/src/sources/mod.rs index d3cee5ec26..7dcc47b783 100644 --- a/crates/recording/src/sources/mod.rs +++ b/crates/recording/src/sources/mod.rs @@ -1,10 +1,8 @@ pub mod audio_input; pub mod audio_mixer; pub mod camera; -pub mod new_audio_mixer; pub mod screen_capture; pub use audio_input::*; -pub use audio_mixer::*; pub use camera::*; pub use screen_capture::*; diff --git a/crates/recording/src/sources/new_audio_mixer.rs b/crates/recording/src/sources/new_audio_mixer.rs deleted file mode 100644 index f2a2c79582..0000000000 --- a/crates/recording/src/sources/new_audio_mixer.rs +++ /dev/null @@ -1,539 +0,0 @@ -use std::{ - collections::VecDeque, - time::{Duration, Instant}, -}; - -use cap_media_info::AudioInfo; -use flume::{Receiver, Sender}; -use tracing::debug; - -use crate::{ - capture_pipeline::{SourceTimestamp, SourceTimestamps}, - pipeline::task::PipelineSourceTask, -}; - -struct MixerSource { - rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, - info: AudioInfo, - buffer: VecDeque<(ffmpeg::frame::Audio, SourceTimestamp)>, - buffer_last: Option<(SourceTimestamp, Duration)>, -} - -pub struct AudioMixerBuilder { - sources: Vec, - output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, -} - -impl AudioMixerBuilder { - pub fn new(output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>) -> Self { - Self { - sources: Vec::new(), - output, - } - } - - pub fn has_sources(&self) -> bool { - !self.sources.is_empty() - } - - pub fn add_source( - &mut self, - info: AudioInfo, - rx: Receiver<(ffmpeg::frame::Audio, SourceTimestamp)>, - ) { - self.sources.push(MixerSource { - info, - rx, - buffer: VecDeque::new(), - buffer_last: None, - }); - } - - pub fn build(self) -> Result { - let mut filter_graph = ffmpeg::filter::Graph::new(); - - let mut abuffers = self - .sources - .iter() - .enumerate() - .map(|(i, source)| { - let info = &source.info; - let args = format!( - "time_base={}:sample_rate={}:sample_fmt={}:channel_layout=0x{:x}", - info.time_base, - info.rate(), - info.sample_format.name(), - info.channel_layout().bits() - ); - - debug!("audio mixer input {i}: {args}"); - - filter_graph.add( - &ffmpeg::filter::find("abuffer").expect("Failed to find abuffer filter"), - &format!("src{i}"), - &args, - ) - }) - .collect::, _>>()?; - - let mut amix = filter_graph.add( - &ffmpeg::filter::find("amix").expect("Failed to find amix filter"), - "amix", - &format!( - "inputs={}:duration=first:dropout_transition=0", - abuffers.len() - ), - )?; - - let aformat_args = "sample_fmts=flt:sample_rates=48000:channel_layouts=stereo"; - - let mut aformat = filter_graph.add( - &ffmpeg::filter::find("aformat").expect("Failed to find aformat filter"), - "aformat", - aformat_args, - )?; - - let mut abuffersink = filter_graph.add( - &ffmpeg::filter::find("abuffersink").expect("Failed to find abuffersink filter"), - "sink", - "", - )?; - - for (i, abuffer) in abuffers.iter_mut().enumerate() { - abuffer.link(0, &mut amix, i as u32); - } - - amix.link(0, &mut aformat, 0); - aformat.link(0, &mut abuffersink, 0); - - filter_graph.validate()?; - - Ok(AudioMixer { - sources: self.sources, - samples_out: 0, - output: self.output, - last_tick: None, - filter_graph, - abuffers, - amix, - aformat, - abuffersink, - }) - } -} - -pub struct AudioMixer { - sources: Vec, - samples_out: usize, - output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, - last_tick: Option, - - filter_graph: ffmpeg::filter::Graph, - abuffers: Vec, - amix: ffmpeg::filter::Context, - aformat: ffmpeg::filter::Context, - abuffersink: ffmpeg::filter::Context, -} - -impl AudioMixer { - pub const INFO: AudioInfo = AudioInfo::new_raw( - cap_media_info::Sample::F32(cap_media_info::Type::Packed), - 48_000, - 2, - ); - pub const BUFFER_TIMEOUT: Duration = Duration::from_millis(5); - - fn buffer_sources(&mut self, start: SourceTimestamps) { - for source in &mut self.sources { - while let Ok((frame, timestamp)) = source.rx.try_recv() { - // if gap between incoming and last, insert silence - if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { - let timestamp_elapsed = timestamp.duration_since(start); - let buffer_last_elapsed = buffer_last_timestamp.duration_since(start); - - if timestamp_elapsed > buffer_last_elapsed { - let elapsed_since_last_frame = timestamp_elapsed - buffer_last_elapsed; - - if elapsed_since_last_frame < buffer_last_duration - && buffer_last_duration - elapsed_since_last_frame - >= Duration::from_millis(1) - { - let gap = timestamp.duration_since(start) - - buffer_last_timestamp.duration_since(start) - - buffer_last_duration; - - debug!("Gap between last buffer frame, inserting {gap:?} of silence"); - - let silence_samples_needed = - (gap.as_secs_f64()) * source.info.rate() as f64; - let silence_samples_count = silence_samples_needed.ceil() as usize; - - let mut frame = ffmpeg::frame::Audio::new( - source.info.sample_format, - silence_samples_count, - source.info.channel_layout(), - ); - - frame.set_rate(source.info.rate() as u32); - - source.buffer_last = Some(( - &buffer_last_timestamp + gap, - Duration::from_secs_f64( - silence_samples_count as f64 / source.info.rate() as f64, - ), - )); - source.buffer.push_back((frame, buffer_last_timestamp)); - } - } - } else { - let gap = timestamp.duration_since(start); - if !gap.is_zero() { - let silence_samples_needed = gap.as_secs_f64() * source.info.rate() as f64; - let silence_samples_needed = silence_samples_needed.ceil() as usize; - - debug!("Gap from beginning of stream, inserting {gap:?} of silence"); - - // let mut frame = ffmpeg::frame::Audio::new( - // source.info.sample_format, - // silence_samples_needed, - // source.info.channel_layout(), - // ); - - // frame.set_rate(source.info.rate() as u32); - - // let timestamp = SourceTimestamp::Instant(start.instant()); - // source.buffer_last = Some(( - // timestamp, - // Duration::from_secs_f64( - // silence_samples_needed as f64 / source.info.rate() as f64, - // ), - // )); - // source.buffer.push_back((frame, timestamp)); - } - } - - source.buffer_last = Some(( - timestamp, - Duration::from_secs_f64(frame.samples() as f64 / frame.rate() as f64), - )); - source.buffer.push_back((frame, timestamp)); - } - } - } - - fn tick(&mut self, start: SourceTimestamps, now: Instant) -> Result<(), ()> { - // if let Some(last_tick) = self.last_tick { - // let time_since_last_tick = now.duration_since(last_tick); - // if time_since_last_tick > Self::BUFFER_TIMEOUT { - // let gap = Self::BUFFER_TIMEOUT; - // for source in &mut self.sources { - // let silence_samples_needed = (gap.as_secs_f64()) * source.info.rate() as f64; - // let silence_samples_count = silence_samples_needed.ceil() as usize; - - // let mut frame = ffmpeg::frame::Audio::new( - // source.info.sample_format, - // silence_samples_count, - // source.info.channel_layout(), - // ); - // dbg!(source.info.sample_format); - - // frame.set_rate(source.info.rate() as u32); - - // source.buffer_last = Some((SourceTimestamp::Instant(last_tick), gap)); - // source - // .buffer - // .push_back((frame, SourceTimestamp::Instant(last_tick))); - // } - // } - // } else { - // let time_since_last_tick = now.duration_since(start.instant()); - // if time_since_last_tick > Self::BUFFER_TIMEOUT { - // let gap = Self::BUFFER_TIMEOUT; - // for source in &mut self.sources { - // let silence_samples_needed = (gap.as_secs_f64()) * source.info.rate() as f64; - // let silence_samples_count = silence_samples_needed.ceil() as usize; - - // let mut frame = ffmpeg::frame::Audio::new( - // source.info.sample_format, - // silence_samples_count, - // source.info.channel_layout(), - // ); - - // frame.set_rate(source.info.rate() as u32); - - // source.buffer_last = Some((SourceTimestamp::Instant(start.instant()), gap)); - // source - // .buffer - // .push_back((frame, SourceTimestamp::Instant(start.instant()))); - // } - // } - // } - - self.buffer_sources(start); - - for (i, source) in self.sources.iter_mut().enumerate() { - for buffer in source.buffer.drain(..) { - let _ = self.abuffers[i].source().add(&buffer.0); - } - } - - let mut filtered = ffmpeg::frame::Audio::empty(); - while self.abuffersink.sink().frame(&mut filtered).is_ok() { - let timestamp = start.instant() - + Duration::from_secs_f64(self.samples_out as f64 / filtered.rate() as f64); - - self.samples_out += filtered.samples(); - - if self - .output - .send((filtered, SourceTimestamp::Instant(timestamp))) - .is_err() - { - return Err(()); - } - - filtered = ffmpeg::frame::Audio::empty(); - } - - self.last_tick = Some(now); - - Ok(()) - } - - pub fn run(&mut self) { - let start = SourceTimestamps::now(); - - while let Ok(()) = self.tick(start, Instant::now()) { - std::thread::sleep(Duration::from_millis(5)); - } - } - - pub fn builder(output: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>) -> AudioMixerBuilder { - AudioMixerBuilder::new(output) - } -} - -impl PipelineSourceTask for AudioMixerBuilder { - fn run( - &mut self, - ready_signal: crate::pipeline::task::PipelineReadySignal, - mut control_signal: crate::pipeline::control::PipelineControlSignal, - ) -> Result<(), String> { - let start = SourceTimestamps::now(); - - let this = std::mem::replace(self, AudioMixerBuilder::new(self.output.clone())); - - let mut mixer = this.build().map_err(|e| format!("BuildMixer: {e}"))?; - - let _ = ready_signal.send(Ok(())); - - loop { - if control_signal - .last() - .map(|v| matches!(v, crate::pipeline::control::Control::Shutdown)) - .unwrap_or(false) - { - break; - } - - mixer - .tick(start, Instant::now()) - .map_err(|()| format!("Audio mixer tick failed"))?; - - std::thread::sleep(Duration::from_millis(5)); - } - - Ok(()) - } -} - -#[cfg(test)] -mod test { - use super::*; - - const SAMPLE_RATE: u32 = 48_000; - const SOURCE_INFO: AudioInfo = AudioInfo::new_raw( - cap_media_info::Sample::U8(cap_media_info::Type::Packed), - SAMPLE_RATE, - 1, - ); - const ONE_SECOND: Duration = Duration::from_secs(1); - const SAMPLES_SECOND: usize = SOURCE_INFO.rate() as usize; - - #[test] - fn mix_sources() { - let (tx, output_rx) = flume::bounded(4); - let mut mixer = AudioMixerBuilder::new(tx); - let start = SourceTimestamps::now(); - - let (tx1, rx) = flume::bounded(4); - mixer.add_source(SOURCE_INFO, rx); - - let (tx2, rx) = flume::bounded(4); - mixer.add_source(SOURCE_INFO, rx); - - let mut mixer = mixer.build().unwrap(); - - tx1.send(( - SOURCE_INFO.wrap_frame(&vec![128, 255, 255, 255]), - SourceTimestamp::Instant(start.instant()), - )) - .unwrap(); - tx2.send(( - SOURCE_INFO.wrap_frame(&vec![128, 128, 1, 255]), - SourceTimestamp::Instant(start.instant()), - )) - .unwrap(); - - let _ = mixer.tick(start); - - let (frame, _) = output_rx.recv().expect("No output frame"); - - let byte_count = frame.samples() * frame.channels() as usize; - let samples: &[f32] = unsafe { std::mem::transmute(&frame.data(0)[0..byte_count]) }; - - assert_eq!(samples[0], 0.0); - assert_eq!(samples[0], samples[1]); - - assert_eq!(samples[4], 0.0); - assert_eq!(samples[4], samples[5]); - } - - mod source_buffer { - use super::*; - - #[test] - fn single_frame() { - let (output_tx, _) = flume::bounded(4); - let mut mixer = AudioMixerBuilder::new(output_tx); - let start = SourceTimestamps::now(); - - let (tx, rx) = flume::bounded(4); - mixer.add_source(SOURCE_INFO, rx); - - let mut mixer = mixer.build().unwrap(); - - tx.send(( - SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - SourceTimestamp::Instant(start.instant()), - )) - .unwrap(); - - mixer.buffer_sources(start); - - assert_eq!(mixer.sources[0].buffer.len(), 1); - assert!(mixer.sources[0].rx.is_empty()); - } - - #[test] - fn frame_gap() { - let (output_tx, _) = flume::bounded(4); - let mut mixer = AudioMixerBuilder::new(output_tx); - let start = SourceTimestamps::now(); - - let (tx, rx) = flume::bounded(4); - mixer.add_source(SOURCE_INFO, rx); - - let mut mixer = mixer.build().unwrap(); - - tx.send(( - SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - SourceTimestamp::Instant(start.instant()), - )) - .unwrap(); - - tx.send(( - SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - SourceTimestamp::Instant(start.instant() + ONE_SECOND), - )) - .unwrap(); - - mixer.buffer_sources(start); - - let source = &mixer.sources[0]; - - assert_eq!(source.buffer.len(), 3); - assert!(source.rx.is_empty()); - - assert_eq!(source.buffer[1].1.duration_since(start), ONE_SECOND / 2); - assert_eq!( - source.buffer[1].0.samples(), - SOURCE_INFO.rate() as usize / 2 - ); - } - - #[test] - fn start_gap() { - let (output_tx, _) = flume::bounded(4); - let mut mixer = AudioMixerBuilder::new(output_tx); - let start = SourceTimestamps::now(); - - let (tx, rx) = flume::bounded(4); - mixer.add_source(SOURCE_INFO, rx); - - let mut mixer = mixer.build().unwrap(); - - tx.send(( - SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - SourceTimestamp::Instant(start.instant() + ONE_SECOND / 2), - )) - .unwrap(); - - mixer.buffer_sources(start); - - let source = &mixer.sources[0]; - - assert_eq!(source.buffer.len(), 2); - assert!(source.rx.is_empty()); - - assert_eq!(source.buffer[0].1.duration_since(start), Duration::ZERO); - assert_eq!( - source.buffer[0].0.samples(), - SOURCE_INFO.rate() as usize / 2 - ); - } - - #[test] - fn after_draining() { - let (output_tx, _) = flume::bounded(4); - let mut mixer = AudioMixerBuilder::new(output_tx); - let start = SourceTimestamps::now(); - - let (tx, rx) = flume::bounded(4); - mixer.add_source(SOURCE_INFO, rx); - - let mut mixer = mixer.build().unwrap(); - - tx.send(( - SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - SourceTimestamp::Instant(start.instant()), - )) - .unwrap(); - - mixer.buffer_sources(start); - - mixer.sources[0].buffer.clear(); - - tx.send(( - SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - SourceTimestamp::Instant(start.instant() + ONE_SECOND), - )) - .unwrap(); - - mixer.buffer_sources(start); - - let source = &mixer.sources[0]; - - assert_eq!(source.buffer.len(), 2); - assert!(source.rx.is_empty()); - - let item = &source.buffer[0]; - assert_eq!(item.1.duration_since(start), ONE_SECOND / 2); - assert_eq!(item.0.samples(), SOURCE_INFO.rate() as usize / 2); - - let item = &source.buffer[1]; - assert_eq!(item.1.duration_since(start), ONE_SECOND); - assert_eq!(item.0.samples(), SOURCE_INFO.rate() as usize / 2); - } - } -} diff --git a/crates/recording/src/sources/screen_capture/macos.rs b/crates/recording/src/sources/screen_capture/macos.rs index cca201792b..6dfbe997a0 100644 --- a/crates/recording/src/sources/screen_capture/macos.rs +++ b/crates/recording/src/sources/screen_capture/macos.rs @@ -1,5 +1,4 @@ use super::*; -use cap_ffmpeg_utils::PlanarData; use cidre::*; use kameo::prelude::*; @@ -25,11 +24,8 @@ impl ScreenCaptureFormat for CMSampleBufferCapture { #[derive(Actor)] struct FrameHandler { - start_time_unix: f64, - start_cmtime: f64, - start_time_f64: f64, - video_tx: Sender<(arc::R, f64)>, - audio_tx: Option>, + video_tx: Sender<(arc::R, Timestamp)>, + audio_tx: Option>, } impl Message for FrameHandler { @@ -43,9 +39,9 @@ impl Message for FrameHandler { let frame = msg.0; let sample_buffer = frame.sample_buf(); - let frame_time = sample_buffer.pts().value as f64 / sample_buffer.pts().scale as f64; - let unix_timestamp = self.start_time_unix + frame_time - self.start_cmtime; - let relative_time = unix_timestamp - self.start_time_f64; + let timestamp = Timestamp::MachAbsoluteTime(cap_timestamp::MachAbsoluteTimestamp::new( + cm::Clock::convert_host_time_to_sys_units(sample_buffer.pts()), + )); match &frame { scap_screencapturekit::Frame::Screen(frame) => { @@ -62,7 +58,7 @@ impl Message for FrameHandler { if check_skip_send().is_ok() && self .video_tx - .send((sample_buffer.retained(), relative_time)) + .send((sample_buffer.retained(), timestamp)) .is_err() { warn!("Pipeline is unreachable"); @@ -94,14 +90,12 @@ impl Message for FrameHandler { frame.set_rate(48_000); let data_bytes_size = buf_list.list().buffers[0].data_bytes_size; for i in 0..frame.planes() { - frame.plane_data_mut(i).copy_from_slice( + frame.data_mut(i).copy_from_slice( &slice[i * data_bytes_size as usize..(i + 1) * data_bytes_size as usize], ); } - frame.set_pts(Some((relative_time * AV_TIME_BASE_Q.den as f64) as i64)); - - let _ = audio_tx.send((frame, relative_time)); + let _ = audio_tx.send((frame, timestamp)); } _ => {} } @@ -128,20 +122,6 @@ impl PipelineSourceTask for ScreenCaptureSource { ready_signal: crate::pipeline::task::PipelineReadySignal, control_signal: crate::pipeline::control::PipelineControlSignal, ) -> Result<(), String> { - let start = std::time::SystemTime::now(); - let start_time_unix = start - .duration_since(std::time::UNIX_EPOCH) - .expect("Time went backwards") - .as_secs_f64(); - let start_cmtime = cidre::cm::Clock::host_time_clock().time(); - let start_cmtime = start_cmtime.value as f64 / start_cmtime.scale as f64; - - let start_time_f64 = self - .start_time - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs_f64(); - let video_tx = self.video_tx.clone(); let audio_tx = self.audio_tx.clone(); let config = self.config.clone(); @@ -149,13 +129,7 @@ impl PipelineSourceTask for ScreenCaptureSource { self.tokio_handle .block_on(async move { let captures_audio = audio_tx.is_some(); - let frame_handler = FrameHandler::spawn(FrameHandler { - video_tx, - audio_tx, - start_time_unix, - start_cmtime, - start_time_f64, - }); + let frame_handler = FrameHandler::spawn(FrameHandler { video_tx, audio_tx }); let display = Display::from_id(&config.display) .ok_or_else(|| SourceError::NoDisplay(config.display))?; diff --git a/crates/recording/src/sources/screen_capture/mod.rs b/crates/recording/src/sources/screen_capture/mod.rs index 61f9604f26..b0a95525b9 100644 --- a/crates/recording/src/sources/screen_capture/mod.rs +++ b/crates/recording/src/sources/screen_capture/mod.rs @@ -1,6 +1,7 @@ +use crate::pipeline::{control::Control, task::PipelineSourceTask}; use cap_cursor_capture::CursorCropBounds; use cap_media_info::{AudioInfo, VideoInfo}; -use ffmpeg::sys::AV_TIME_BASE_Q; +use cap_timestamp::Timestamp; use flume::Sender; use scap_targets::{Display, DisplayId, Window, WindowId, bounds::*}; use serde::{Deserialize, Serialize}; @@ -8,11 +9,6 @@ use specta::Type; use std::time::SystemTime; use tracing::{error, warn}; -use crate::{ - capture_pipeline::{SourceTimestamp, SourceTimestamps}, - pipeline::{control::Control, task::PipelineSourceTask}, -}; - #[cfg(windows)] mod windows; #[cfg(windows)] @@ -197,8 +193,9 @@ pub struct ScreenCaptureSource { config: Config, video_info: VideoInfo, tokio_handle: tokio::runtime::Handle, - video_tx: Sender<(TCaptureFormat::VideoFormat, SourceTimestamp)>, - audio_tx: Option>, + video_tx: Sender<(TCaptureFormat::VideoFormat, Timestamp)>, + audio_tx: Option>, + start_time: SystemTime, _phantom: std::marker::PhantomData, #[cfg(windows)] d3d_device: ::windows::Win32::Graphics::Direct3D11::ID3D11Device, @@ -238,6 +235,7 @@ impl Clone for ScreenCaptureSource ScreenCaptureSource { target: &ScreenCaptureTarget, show_cursor: bool, max_fps: u32, - video_tx: Sender<(TCaptureFormat::VideoFormat, SourceTimestamp)>, - audio_tx: Option>, + video_tx: Sender<(TCaptureFormat::VideoFormat, Timestamp)>, + audio_tx: Option>, + start_time: SystemTime, tokio_handle: tokio::runtime::Handle, #[cfg(windows)] d3d_device: ::windows::Win32::Graphics::Direct3D11::ID3D11Device, ) -> Result { @@ -404,6 +403,7 @@ impl ScreenCaptureSource { video_tx, audio_tx, tokio_handle, + start_time, _phantom: std::marker::PhantomData, #[cfg(windows)] d3d_device, diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index 737eeeefd3..a9fec9a20f 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -1,9 +1,6 @@ use crate::{ ActorError, MediaError, RecordingBaseInputs, RecordingError, - capture_pipeline::{ - MakeCapturePipeline, ScreenCaptureMethod, SourceTimestamp, SourceTimestamps, - create_screen_capture, - }, + capture_pipeline::{MakeCapturePipeline, ScreenCaptureMethod, create_screen_capture}, cursor::{CursorActor, Cursors, spawn_cursor_recorder}, feeds::{camera::CameraFeedLock, microphone::MicrophoneFeedLock}, pipeline::Pipeline, @@ -12,6 +9,7 @@ use crate::{ use cap_enc_ffmpeg::{H264Encoder, MP4File, OggFile, OpusEncoder}; use cap_media_info::VideoInfo; use cap_project::{CursorEvents, StudioRecordingMeta}; +use cap_timestamp::{Timestamp, Timestamps}; use cap_utils::spawn_actor; use flume::Receiver; use relative_path::RelativePathBuf; @@ -64,7 +62,7 @@ pub struct StudioRecordingSegment { pub struct PipelineOutput { pub path: PathBuf, - pub first_timestamp_rx: flume::Receiver, + pub first_timestamp_rx: flume::Receiver, } pub struct ScreenPipelineOutput { @@ -73,7 +71,7 @@ pub struct ScreenPipelineOutput { } struct StudioRecordingPipeline { - pub start_time: SourceTimestamps, + pub start_time: Timestamps, pub inner: Pipeline, pub screen: ScreenPipelineOutput, pub microphone: Option, @@ -149,7 +147,7 @@ pub async fn spawn_studio_recording_actor( let cursors_dir = ensure_dir(&content_dir.join("cursors"))?; // TODO: move everything to start_instant - let start_time = SystemTime::now(); + let start_time = Timestamps::now(); let start_instant = Instant::now(); if let Some(camera_feed) = &base_inputs.camera_feed { @@ -167,7 +165,6 @@ pub async fn spawn_studio_recording_actor( base_inputs.clone(), custom_cursor_capture, start_time, - start_instant, ); let index = 0; @@ -575,8 +572,7 @@ struct SegmentPipelineFactory { cursors_dir: PathBuf, base_inputs: RecordingBaseInputs, custom_cursor_capture: bool, - start_time: SystemTime, - start_instant: Instant, + start_time: Timestamps, index: u32, } @@ -587,8 +583,7 @@ impl SegmentPipelineFactory { cursors_dir: PathBuf, base_inputs: RecordingBaseInputs, custom_cursor_capture: bool, - start_time: SystemTime, - start_instant: Instant, + start_time: Timestamps, ) -> Self { Self { segments_dir, @@ -596,7 +591,6 @@ impl SegmentPipelineFactory { base_inputs, custom_cursor_capture, start_time, - start_instant, index: 0, } } @@ -624,7 +618,6 @@ impl SegmentPipelineFactory { next_cursors_id, self.custom_cursor_capture, self.start_time, - self.start_instant, ) .await?; @@ -665,8 +658,7 @@ async fn create_segment_pipeline( prev_cursors: Cursors, next_cursors_id: u32, custom_cursor_capture: bool, - start_time: SystemTime, - start_instant: Instant, + start_time: Timestamps, ) -> Result< ( StudioRecordingPipeline, @@ -674,8 +666,6 @@ async fn create_segment_pipeline( ), CreateSegmentPipelineError, > { - let start_time = SourceTimestamps::now(); - let system_audio = if capture_system_audio { let (tx, rx) = flume::bounded(64); (Some(tx), Some(rx)) @@ -698,6 +688,7 @@ async fn create_segment_pipeline( !custom_cursor_capture, 120, system_audio.0, + start_time.system_time(), #[cfg(windows)] d3d_device, ) @@ -826,7 +817,7 @@ async fn create_segment_pipeline( let camera = if let Some(camera_feed) = camera_feed { let (tx, rx) = flume::bounded(8); - let camera_source = CameraSource::init(camera_feed, tx, start_instant); + let camera_source = CameraSource::init(camera_feed, tx); let camera_config = camera_source.info(); let output_path = dir.join("camera.mp4"); @@ -852,7 +843,7 @@ async fn create_segment_pipeline( timestamp_tx.send(frame.1).unwrap(); } - if let Some(start) = start { + if let Some(_) = start { // frame.0.set_pts(Some( // ((camera_config.time_base.denominator() as f64 // / camera_config.time_base.numerator() as f64) diff --git a/crates/scap-ffmpeg/Cargo.toml b/crates/scap-ffmpeg/Cargo.toml index 4cfee27bda..0c90dba2ae 100644 --- a/crates/scap-ffmpeg/Cargo.toml +++ b/crates/scap-ffmpeg/Cargo.toml @@ -8,7 +8,6 @@ license = "MIT" ffmpeg = { workspace = true } cpal = { workspace = true } scap-cpal = { optional = true, path = "../scap-cpal" } -cap-ffmpeg-utils = { path = "../ffmpeg-utils" } [target.'cfg(windows)'.dependencies] scap-direct3d = { path = "../scap-direct3d" } diff --git a/crates/scap-ffmpeg/src/cpal.rs b/crates/scap-ffmpeg/src/cpal.rs index 1da8a8b1bd..0ef265db14 100644 --- a/crates/scap-ffmpeg/src/cpal.rs +++ b/crates/scap-ffmpeg/src/cpal.rs @@ -1,4 +1,3 @@ -use cap_ffmpeg_utils::PlanarData; use cpal::{SampleFormat, StreamConfig}; use ffmpeg::format::{Sample, sample}; @@ -32,7 +31,7 @@ impl DataExt for ::cpal::Data { let base = (i as usize) * plane_size; ffmpeg_frame - .plane_data_mut(i as usize) + .data_mut(i as usize) .copy_from_slice(&self.bytes()[base..base + plane_size]); } } else { diff --git a/crates/timestamp/Cargo.toml b/crates/timestamp/Cargo.toml new file mode 100644 index 0000000000..ea06425822 --- /dev/null +++ b/crates/timestamp/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "cap-timestamp" +version = "0.1.0" +edition = "2024" + +[features] + +[dependencies] +cpal = { workspace = true } + +[target.'cfg(target_os = "macos")'.dependencies] +cidre = { workspace = true } + +[target.'cfg(target_os = "windows")'.dependencies] +windows = { workspace = true, features = [ + "Win32_System_Performance", +] } + +[lints] +workspace = true diff --git a/crates/timestamp/src/lib.rs b/crates/timestamp/src/lib.rs new file mode 100644 index 0000000000..2d34220fee --- /dev/null +++ b/crates/timestamp/src/lib.rs @@ -0,0 +1,92 @@ +use std::time::{Duration, Instant, SystemTime}; + +#[cfg(windows)] +mod win; +#[cfg(windows)] +pub use win::*; + +#[cfg(target_os = "macos")] +mod macos; +#[cfg(target_os = "macos")] +pub use macos::*; + +#[derive(Clone, Copy, Debug)] +pub enum Timestamp { + Instant(Instant), + SystemTime(SystemTime), + #[cfg(windows)] + PerformanceCounter(PerformanceCounterTimestamp), + #[cfg(target_os = "macos")] + MachAbsoluteTime(MachAbsoluteTimestamp), +} + +impl Timestamp { + pub fn duration_since(&self, start: Timestamps) -> Duration { + match self { + Self::Instant(instant) => instant.duration_since(start.instant), + Self::SystemTime(time) => time.duration_since(start.system_time).unwrap(), + #[cfg(windows)] + Self::PerformanceCounter(counter) => counter.duration_since(start.performance_counter), + #[cfg(target_os = "macos")] + Self::MachAbsoluteTime(time) => time.duration_since(start.mach_absolute_time), + } + } + + pub fn from_cpal(instant: cpal::StreamInstant) -> Self { + #[cfg(windows)] + Self::PerformanceCounter(PerformanceCounterTimestamp::from_cpal(instant)); + #[cfg(target_os = "macos")] + Self::MachAbsoluteTime(MachAbsoluteTimestamp::from_cpal(instant)) + } +} + +impl std::ops::Add for &Timestamp { + type Output = Timestamp; + + fn add(self, rhs: Duration) -> Self::Output { + match *self { + Timestamp::Instant(i) => Timestamp::Instant(i + rhs), + Timestamp::SystemTime(t) => Timestamp::SystemTime(t + rhs), + #[cfg(windows)] + Timestamp::PerformanceCounter(c) => Timestamp::PerformanceCounter(c + rhs), + #[cfg(target_os = "macos")] + Timestamp::MachAbsoluteTime(c) => Timestamp::MachAbsoluteTime(c + rhs), + } + } +} + +#[derive(Clone, Copy, Debug)] +pub struct Timestamps { + instant: Instant, + system_time: SystemTime, + #[cfg(windows)] + performance_counter: PerformanceCounterTimestamp, + #[cfg(target_os = "macos")] + mach_absolute_time: MachAbsoluteTimestamp, +} + +impl Timestamps { + pub fn now() -> Self { + Self { + instant: Instant::now(), + system_time: SystemTime::now(), + #[cfg(windows)] + performance_counter: PerformanceCounterTimestamp::now(), + #[cfg(target_os = "macos")] + mach_absolute_time: MachAbsoluteTimestamp::now(), + } + } + + pub fn instant(&self) -> Instant { + self.instant + } + + pub fn system_time(&self) -> SystemTime { + self.system_time + } + + #[cfg(target_os = "macos")] + pub fn mach_absolute_time(&self) -> MachAbsoluteTimestamp { + self.mach_absolute_time + } +} diff --git a/crates/timestamp/src/macos.rs b/crates/timestamp/src/macos.rs new file mode 100644 index 0000000000..a811a736f7 --- /dev/null +++ b/crates/timestamp/src/macos.rs @@ -0,0 +1,40 @@ +use std::{ops::Add, time::Duration}; + +use cidre::mach::TimeBaseInfo; + +#[derive(Clone, Copy, Debug)] +pub struct MachAbsoluteTimestamp(u64); + +impl MachAbsoluteTimestamp { + pub fn new(value: u64) -> Self { + Self(value) + } + + pub fn now() -> Self { + Self(cidre::mach::abs_time()) + } + + pub fn duration_since(&self, other: Self) -> Duration { + let info = TimeBaseInfo::new(); + let freq = info.numer as f64 / info.denom as f64; + + Duration::from_nanos(((self.0 - other.0) as f64 * freq) as u64) + } + + pub fn from_cpal(instant: cpal::StreamInstant) -> Self { + use cpal::host::coreaudio::StreamInstantExt; + + Self(instant.as_host_time()) + } +} + +impl Add for MachAbsoluteTimestamp { + type Output = Self; + + fn add(self, rhs: Duration) -> Self::Output { + let info = TimeBaseInfo::new(); + let freq = info.numer as f64 / info.denom as f64; + + Self((self.0 as f64 * rhs.as_secs_f64() * freq) as u64) + } +} diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs new file mode 100644 index 0000000000..1a88a81769 --- /dev/null +++ b/crates/timestamp/src/win.rs @@ -0,0 +1,41 @@ +use windows::Win32::System::Performance::{QueryPerformanceCounter, QueryPerformanceFrequency}; + +use super::*; + +#[derive(Clone, Copy, Debug)] +pub struct PerformanceCounterTimestamp(i64); + +impl PerformanceCounterTimestamp { + pub fn new(value: i64) -> Self { + Self(value) + } + + pub fn duration_since(&self, other: Self) -> Duration { + let mut freq = 0; + unsafe { QueryPerformanceFrequency(&mut freq).unwrap() }; + + Duration::from_secs_f64((self.0 - other.0) as f64 / freq as f64) + } + + pub fn now() -> Self { + let mut value = 0; + unsafe { QueryPerformanceCounter(&mut value).unwrap() }; + Self(value) + } + + pub fn from_cpal(instant: StreamInstant) -> Self { + use cpal::host::wasapi::StreamInstantExt; + + Self(instant.as_performance_counter()) + } +} + +impl Add for PerformanceCounterTimestamp { + type Output = Self; + + fn add(self, rhs: Duration) -> Self::Output { + let mut freq = 0; + unsafe { QueryPerformanceFrequency(&mut freq) }.unwrap(); + Self(self.0 + (rhs.as_secs_f64() * freq as f64) as i64) + } +} From 4d72d1d507295c78c7a90b805d9ba687be00c0ca Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Tue, 9 Sep 2025 19:28:34 +0800 Subject: [PATCH 05/20] basically flawless timing on macos + huge refactor --- apps/cli/src/record.rs | 21 +- apps/desktop/src-tauri/src/recording.rs | 78 +++--- crates/enc-avfoundation/src/mp4.rs | 21 +- .../src/audio/buffered_resampler.rs | 7 +- crates/enc-ffmpeg/src/audio/opus.rs | 170 +++---------- crates/enc-ffmpeg/src/mux/ogg.rs | 4 + crates/recording/examples/recording-cli.rs | 50 ++-- crates/recording/src/capture_pipeline.rs | 14 +- crates/recording/src/feeds/microphone.rs | 71 +++--- crates/recording/src/instant_recording.rs | 150 ++++++----- crates/recording/src/lib.rs | 7 +- crates/recording/src/pipeline/builder.rs | 6 +- crates/recording/src/pipeline/mod.rs | 4 +- crates/recording/src/sources/audio_mixer.rs | 10 +- .../src/sources/screen_capture/macos.rs | 7 +- crates/recording/src/studio_recording.rs | 236 +++++++++++------- 16 files changed, 434 insertions(+), 422 deletions(-) diff --git a/apps/cli/src/record.rs b/apps/cli/src/record.rs index f05d588869..73137dcb46 100644 --- a/apps/cli/src/record.rs +++ b/apps/cli/src/record.rs @@ -1,4 +1,4 @@ -use cap_recording::screen_capture::ScreenCaptureTarget; +use cap_recording::{screen_capture::ScreenCaptureTarget, studio_recording}; use clap::Args; use scap_targets::{DisplayId, WindowId}; use std::{env::current_dir, path::PathBuf}; @@ -58,19 +58,12 @@ impl RecordStart { .path .unwrap_or_else(|| current_dir().unwrap().join(format!("{id}.cap"))); - let actor = cap_recording::spawn_studio_recording_actor( - id, - path, - cap_recording::RecordingBaseInputs { - capture_target: target_info, - capture_system_audio: self.system_audio, - mic_feed: None, - camera_feed: None, // camera.map(|c| Arc::new(Mutex::new(c))), - }, - false, - ) - .await - .map_err(|e| e.to_string())?; + let actor = studio_recording::Actor::builder(path, target_info) + .with_system_audio(self.system_audio) + .with_custom_cursor(false) + .build() + .await + .map_err(|e| e.to_string())?; println!("Recording starting, press Enter to stop"); diff --git a/apps/desktop/src-tauri/src/recording.rs b/apps/desktop/src-tauri/src/recording.rs index ad566014d5..fc68098fff 100644 --- a/apps/desktop/src-tauri/src/recording.rs +++ b/apps/desktop/src-tauri/src/recording.rs @@ -5,10 +5,11 @@ use cap_project::{ ZoomSegment, cursor::CursorEvents, }; use cap_recording::{ - CompletedStudioRecording, RecordingError, RecordingMode, StudioRecordingHandle, + RecordingError, RecordingMode, feeds::{camera, microphone}, - instant_recording::{CompletedInstantRecording, InstantRecordingHandle}, + instant_recording, sources::{CaptureDisplay, CaptureWindow, ScreenCaptureTarget, screen_capture}, + studio_recording, }; use cap_rendering::ProjectRecordingsMeta; use cap_utils::{ensure_dir, spawn_actor}; @@ -40,7 +41,7 @@ use crate::{ pub enum InProgressRecording { Instant { target_name: String, - handle: InstantRecordingHandle, + handle: instant_recording::ActorHandle, progressive_upload: Option, video_upload_info: VideoUploadInfo, inputs: StartRecordingInputs, @@ -48,7 +49,7 @@ pub enum InProgressRecording { }, Studio { target_name: String, - handle: StudioRecordingHandle, + handle: studio_recording::ActorHandle, inputs: StartRecordingInputs, recording_dir: PathBuf, }, @@ -132,25 +133,18 @@ impl InProgressRecording { pub enum CompletedRecording { Instant { - recording: CompletedInstantRecording, + recording: instant_recording::CompletedRecording, target_name: String, progressive_upload: Option, video_upload_info: VideoUploadInfo, }, Studio { - recording: CompletedStudioRecording, + recording: studio_recording::CompletedStudioRecording, target_name: String, }, } impl CompletedRecording { - pub fn id(&self) -> &String { - match self { - Self::Instant { recording, .. } => &recording.id, - Self::Studio { recording, .. } => &recording.id, - } - } - pub fn project_path(&self) -> &PathBuf { match self { Self::Instant { recording, .. } => &recording.project_path, @@ -381,25 +375,28 @@ pub async fn start_recording( Err(e) => return Err(e.to_string()), }; - let base_inputs = cap_recording::RecordingBaseInputs { - capture_target: inputs.capture_target.clone(), - capture_system_audio: inputs.capture_system_audio, - mic_feed, - camera_feed, - }; - let (actor, actor_done_rx) = match inputs.mode { RecordingMode::Studio => { - let (handle, actor_done_rx) = cap_recording::spawn_studio_recording_actor( - id.clone(), + let mut builder = studio_recording::Actor::builder( recording_dir.clone(), - base_inputs, + inputs.capture_target.clone(), + ) + .with_system_audio(inputs.capture_system_audio) + .with_custom_cursor( general_settings .map(|s| s.custom_cursor_capture) .unwrap_or_default(), - ) - .await - .map_err(|e| { + ); + + if let Some(camera_feed) = camera_feed { + builder = builder.with_camera_feed(camera_feed); + } + + if let Some(mic_feed) = mic_feed { + builder = builder.with_mic_feed(mic_feed); + } + + let (handle, actor_done_rx) = builder.build().await.map_err(|e| { error!("Failed to spawn studio recording actor: {e}"); e.to_string() })?; @@ -419,17 +416,20 @@ pub async fn start_recording( return Err("Video upload info not found".to_string()); }; - let (handle, actor_done_rx) = - cap_recording::instant_recording::spawn_instant_recording_actor( - id.clone(), - recording_dir.clone(), - base_inputs, - ) - .await - .map_err(|e| { - error!("Failed to spawn studio recording actor: {e}"); - e.to_string() - })?; + let mut builder = instant_recording::Actor::builder( + recording_dir.clone(), + inputs.capture_target.clone(), + ) + .with_system_audio(inputs.capture_system_audio); + + if let Some(mic_feed) = mic_feed { + builder = builder.with_mic_feed(mic_feed); + } + + let (handle, actor_done_rx) = builder.build().await.map_err(|e| { + error!("Failed to spawn studio recording actor: {e}"); + e.to_string() + })?; ( InProgressRecording::Instant { @@ -945,7 +945,7 @@ fn generate_zoom_segments_from_clicks_impl( /// Generates zoom segments based on mouse click events during recording. /// Used during the recording completion process. pub fn generate_zoom_segments_from_clicks( - recording: &CompletedStudioRecording, + recording: &studio_recording::CompletedStudioRecording, recordings: &ProjectRecordingsMeta, ) -> Vec { // Build a temporary RecordingMeta so we can use the common implementation @@ -992,7 +992,7 @@ pub fn generate_zoom_segments_for_project( fn project_config_from_recording( app: &AppHandle, - completed_recording: &CompletedStudioRecording, + completed_recording: &studio_recording::CompletedStudioRecording, recordings: &ProjectRecordingsMeta, default_config: Option, ) -> ProjectConfiguration { diff --git a/crates/enc-avfoundation/src/mp4.rs b/crates/enc-avfoundation/src/mp4.rs index fbb90636bf..9f7eeb2929 100644 --- a/crates/enc-avfoundation/src/mp4.rs +++ b/crates/enc-avfoundation/src/mp4.rs @@ -194,6 +194,8 @@ impl MP4Encoder { }) } + /// Expects frames with whatever pts values you like + /// They will be made relative when encoding pub fn queue_video_frame( &mut self, frame: &cidre::cm::SampleBuf, @@ -204,17 +206,16 @@ impl MP4Encoder { let time = frame.pts(); + let new_pts = self + .elapsed_duration + .add(time.sub(self.segment_first_timestamp.unwrap_or(time))); + if !self.is_writing { self.is_writing = true; - self.asset_writer.start_session_at_src_time(time); + self.asset_writer.start_session_at_src_time(new_pts); self.start_time = time; } - let new_pts = self - .start_time - .add(self.elapsed_duration) - .add(time.sub(self.segment_first_timestamp.unwrap_or(time))); - let mut timing = frame.timing_info(0).unwrap(); timing.pts = new_pts; let frame = frame.copy_with_new_timing(&[timing]).unwrap(); @@ -233,8 +234,10 @@ impl MP4Encoder { Ok(()) } + /// Expects frames with pts values relative to the first frame's pts + /// in the timebase of 1 / sample rate pub fn queue_audio_frame(&mut self, frame: frame::Audio) -> Result<(), QueueAudioFrameError> { - if self.is_paused { + if self.is_paused || !self.is_writing { return Ok(()); } @@ -278,15 +281,11 @@ impl MP4Encoder { let time = cm::Time::new(frame.pts().unwrap_or(0), frame.rate() as i32); - // dbg!(time); - let pts = self .start_time .add(self.elapsed_duration) .add(time.sub(self.segment_first_timestamp.unwrap())); - // dbg!(pts); - let buffer = cm::SampleBuf::create( Some(&block_buf), true, diff --git a/crates/enc-ffmpeg/src/audio/buffered_resampler.rs b/crates/enc-ffmpeg/src/audio/buffered_resampler.rs index f26b29d8f4..7827db6a2a 100644 --- a/crates/enc-ffmpeg/src/audio/buffered_resampler.rs +++ b/crates/enc-ffmpeg/src/audio/buffered_resampler.rs @@ -245,7 +245,12 @@ impl BufferedResampler { } pub fn flush(&mut self, max_samples: usize) -> Option { - self.get_frame_inner(self.remaining_samples().min(max_samples)) + let remaining_samples = self.remaining_samples(); + if remaining_samples == 0 { + return None; + } + + self.get_frame_inner(remaining_samples.min(max_samples)) } } diff --git a/crates/enc-ffmpeg/src/audio/opus.rs b/crates/enc-ffmpeg/src/audio/opus.rs index aa6265a061..70dde56cc4 100644 --- a/crates/enc-ffmpeg/src/audio/opus.rs +++ b/crates/enc-ffmpeg/src/audio/opus.rs @@ -5,7 +5,8 @@ use ffmpeg::{ frame, threading::Config, }; -use std::collections::VecDeque; + +use crate::audio::buffered_resampler::BufferedResampler; use super::AudioEncoder; @@ -14,9 +15,7 @@ pub struct OpusEncoder { tag: &'static str, encoder: encoder::Audio, packet: ffmpeg::Packet, - resampler: Option, - resampled_frame: frame::Audio, - buffer: VecDeque, + resampler: BufferedResampler, stream_index: usize, } @@ -75,39 +74,26 @@ impl OpusEncoder { output_config.sample_format = Self::SAMPLE_FORMAT; output_config.sample_rate = rate as u32; - let resampler = if ( - input_config.sample_format, - input_config.channel_layout(), - input_config.sample_rate, - ) != ( - output_config.sample_format, - output_config.channel_layout(), - output_config.sample_rate, - ) { - Some( - ffmpeg::software::resampler( - ( - input_config.sample_format, - input_config.channel_layout(), - input_config.sample_rate, - ), - ( - output_config.sample_format, - output_config.channel_layout(), - output_config.sample_rate, - ), - ) - .unwrap(), - ) - } else { - None - }; + let resampler = ffmpeg::software::resampler( + ( + input_config.sample_format, + input_config.channel_layout(), + input_config.sample_rate, + ), + ( + output_config.sample_format, + output_config.channel_layout(), + output_config.sample_rate, + ), + ) + .unwrap(); + let resampler = BufferedResampler::new(resampler); encoder.set_bit_rate(Self::OUTPUT_BITRATE); encoder.set_rate(rate); encoder.set_format(output_config.sample_format); encoder.set_channel_layout(output_config.channel_layout()); - encoder.set_time_base(output_config.time_base); + encoder.set_time_base(FFRational(1, output_config.rate())); let encoder = encoder.open()?; @@ -118,68 +104,26 @@ impl OpusEncoder { Ok(Self { tag, - buffer: VecDeque::new(), encoder, stream_index, packet: ffmpeg::Packet::empty(), - resampled_frame: frame::Audio::empty(), resampler, }) } - pub fn queue_frame(&mut self, frame: frame::Audio, output: &mut format::context::Output) { - if let Some(resampler) = &mut self.resampler { - resampler.run(&frame, &mut self.resampled_frame).unwrap(); - - self.buffer - .extend(&self.resampled_frame.data(0)[0..frame_size_bytes(&self.resampled_frame)]); - - loop { - let frame_size_bytes = self.encoder.frame_size() as usize - * self.encoder.channels() as usize - * self.encoder.format().bytes(); - if self.buffer.len() < frame_size_bytes { - break; - } - - let bytes = self.buffer.drain(0..frame_size_bytes).collect::>(); - let mut frame = frame::Audio::new( - self.encoder.format(), - self.encoder.frame_size() as usize, - self.encoder.channel_layout(), - ); - - frame.data_mut(0)[0..frame_size_bytes].copy_from_slice(&bytes); - - self.encoder.send_frame(&frame).unwrap(); - - self.process_packets(output); - } - } else { - self.buffer - .extend(&frame.data(0)[0..frame_size_bytes(&frame)]); - - loop { - let frame_size_bytes = self.encoder.frame_size() as usize - * self.encoder.channels() as usize - * self.encoder.format().bytes(); - if self.buffer.len() < frame_size_bytes { - break; - } + pub fn input_time_base(&self) -> FFRational { + self.encoder.time_base() + } - let bytes = self.buffer.drain(0..frame_size_bytes).collect::>(); - let mut frame = frame::Audio::new( - self.encoder.format(), - self.encoder.frame_size() as usize, - self.encoder.channel_layout(), - ); + pub fn queue_frame(&mut self, frame: frame::Audio, output: &mut format::context::Output) { + self.resampler.add_frame(frame); - frame.data_mut(0)[0..frame_size_bytes].copy_from_slice(&bytes); + let frame_size = self.encoder.frame_size() as usize; - self.encoder.send_frame(&frame).unwrap(); + while let Some(frame) = self.resampler.get_frame(frame_size) { + self.encoder.send_frame(&frame).unwrap(); - self.process_packets(output); - } + self.process_packets(output); } } @@ -195,60 +139,10 @@ impl OpusEncoder { } pub fn finish(&mut self, output: &mut format::context::Output) { - let frame_size_bytes = self.encoder.frame_size() as usize - * self.encoder.channels() as usize - * self.encoder.format().bytes(); - - if let Some(mut resampler) = self.resampler.take() { - while resampler.delay().is_some() { - resampler.flush(&mut self.resampled_frame).unwrap(); - if self.resampled_frame.samples() == 0 { - break; - } + while let Some(frame) = self.resampler.flush(self.encoder.frame_size() as usize) { + self.encoder.send_frame(&frame).unwrap(); - self.buffer.extend( - &self.resampled_frame.data(0)[0..self.resampled_frame.samples() - * self.resampled_frame.channels() as usize - * self.resampled_frame.format().bytes()], - ); - - while self.buffer.len() >= frame_size_bytes { - let bytes = self.buffer.drain(0..frame_size_bytes).collect::>(); - - let mut frame = frame::Audio::new( - self.encoder.format(), - self.encoder.frame_size() as usize, - self.encoder.channel_layout(), - ); - - frame.data_mut(0)[0..frame_size_bytes].copy_from_slice(&bytes); - - self.encoder.send_frame(&frame).unwrap(); - - self.process_packets(output); - } - } - - while !self.buffer.is_empty() { - let frame_size_bytes = frame_size_bytes.min(self.buffer.len()); - let frame_size = frame_size_bytes - / self.encoder.channels() as usize - / self.encoder.format().bytes(); - - let bytes = self.buffer.drain(0..frame_size_bytes).collect::>(); - - let mut frame = frame::Audio::new( - self.encoder.format(), - frame_size, - self.encoder.channel_layout(), - ); - - frame.data_mut(0)[0..frame_size_bytes].copy_from_slice(&bytes); - - self.encoder.send_frame(&frame).unwrap(); - - self.process_packets(output); - } + self.process_packets(output); } self.encoder.send_eof().unwrap(); @@ -266,7 +160,3 @@ impl AudioEncoder for OpusEncoder { self.finish(output); } } - -fn frame_size_bytes(frame: &frame::Audio) -> usize { - frame.samples() * frame.format().bytes() * frame.channels() as usize -} diff --git a/crates/enc-ffmpeg/src/mux/ogg.rs b/crates/enc-ffmpeg/src/mux/ogg.rs index d45b6b813a..1f63a92e59 100644 --- a/crates/enc-ffmpeg/src/mux/ogg.rs +++ b/crates/enc-ffmpeg/src/mux/ogg.rs @@ -24,6 +24,10 @@ impl OggFile { Ok(Self { encoder, output }) } + pub fn encoder(&self) -> &OpusEncoder { + &self.encoder + } + pub fn queue_frame(&mut self, frame: frame::Audio) { self.encoder.queue_frame(frame, &mut self.output); } diff --git a/crates/recording/examples/recording-cli.rs b/crates/recording/examples/recording-cli.rs index b2861a9a38..6b57647fad 100644 --- a/crates/recording/examples/recording-cli.rs +++ b/crates/recording/examples/recording-cli.rs @@ -1,11 +1,13 @@ -use std::time::Duration; - -use cap_recording::{RecordingBaseInputs, screen_capture::ScreenCaptureTarget}; +use cap_recording::{feeds::microphone, screen_capture::ScreenCaptureTarget, *}; +use kameo::Actor; use scap_targets::Display; +use std::{sync::Arc, time::Duration}; use tracing::info; #[tokio::main] pub async fn main() { + unsafe { std::env::set_var("RUST_LOG", "trace") }; + #[cfg(windows)] { use windows::Win32::UI::HiDpi::{PROCESS_PER_MONITOR_DPI_AWARE, SetProcessDpiAwareness}; @@ -22,20 +24,40 @@ pub async fn main() { info!("Recording to directory '{}'", dir.path().display()); - let (handle, _ready_rx) = cap_recording::spawn_instant_recording_actor( - "test".to_string(), + // let camera_feed = CameraFeed::spawn(CameraFeed::default()); + + // camera_feed + // .ask(camera::SetInput { + // id: DeviceOrModelID::from_info(&cap_camera::list_cameras().next().unwrap()), + // }) + // .await + // .unwrap() + // .await + // .unwrap(); + + let (error_tx, _) = flume::bounded(1); + let mic_feed = MicrophoneFeed::spawn(MicrophoneFeed::new(error_tx)); + + mic_feed + .ask(microphone::SetInput { + label: MicrophoneFeed::default().map(|v| v.0).unwrap(), + }) + .await + .unwrap() + .await + .unwrap(); + + // tokio::time::sleep(Duration::from_millis(10)).await; + + let (handle, _ready_rx) = instant_recording::Actor::builder( dir.path().into(), - RecordingBaseInputs { - capture_target: ScreenCaptureTarget::Display { - id: Display::primary().id(), - }, - capture_system_audio: true, - camera_feed: None, - mic_feed: None, + ScreenCaptureTarget::Display { + id: Display::primary().id(), }, - // false, - // true, ) + .with_system_audio(true) + .with_mic_feed(Arc::new(mic_feed.ask(microphone::Lock).await.unwrap())) + .build() .await .unwrap(); diff --git a/crates/recording/src/capture_pipeline.rs b/crates/recording/src/capture_pipeline.rs index 666e8040e2..08acab4379 100644 --- a/crates/recording/src/capture_pipeline.rs +++ b/crates/recording/src/capture_pipeline.rs @@ -75,18 +75,19 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { let mut timestamp_tx = Some(timestamp_tx); let _ = ready.send(Ok(())); - let Ok(frame) = source.1.recv() else { + let Ok((frame, timestamp)) = source.1.recv() else { return Ok(()); }; if let Some(timestamp_tx) = timestamp_tx.take() { - let _ = timestamp_tx.send(frame.1); + let _ = timestamp_tx.send(timestamp); + let _ = screen_encoder.queue_video_frame(frame.as_ref()); } let result = loop { match source.1.recv() { - Ok(frame) => { - let _ = screen_encoder.queue_video_frame(frame.0.as_ref()); + Ok((frame, _)) => { + let _ = screen_encoder.queue_video_frame(frame.as_ref()); } // Err(RecvTimeoutError::Timeout) => { // break Err("Frame receive timeout".to_string()); @@ -177,13 +178,9 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { continue; }; - // dbg!(ts_offset); - let pts = (ts_offset.as_secs_f64() * frame.rate() as f64) as i64; frame.set_pts(Some(pts)); - // dbg!(pts); - if let Ok(mut mp4) = mp4.lock() && let Err(e) = mp4.queue_audio_frame(frame) { @@ -208,7 +205,6 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { } if let Some(first_frame_tx) = first_frame_tx.take() { - // dbg!(timestamp); let _ = first_frame_tx.send((frame.pts(), timestamp)); } diff --git a/crates/recording/src/feeds/microphone.rs b/crates/recording/src/feeds/microphone.rs index 278dc38878..7701a5ecaa 100644 --- a/crates/recording/src/feeds/microphone.rs +++ b/crates/recording/src/feeds/microphone.rs @@ -93,44 +93,15 @@ impl MicrophoneFeed { } } + pub fn default() -> Option<(String, Device, SupportedStreamConfig)> { + let host = cpal::default_host(); + host.default_input_device().and_then(get_usable_device) + } + pub fn list() -> MicrophonesMap { let host = cpal::default_host(); let mut device_map = IndexMap::new(); - let get_usable_device = |device: Device| { - device - .supported_input_configs() - .map_err(|error| { - error!( - "Error getting supported input configs for device: {}", - error - ); - error - }) - .ok() - .and_then(|configs| { - let mut configs = configs.collect::>(); - configs.sort_by(|a, b| { - b.sample_format() - .sample_size() - .cmp(&a.sample_format().sample_size()) - .then(b.max_sample_rate().cmp(&a.max_sample_rate())) - }); - configs - .into_iter() - .filter(|c| { - c.min_sample_rate().0 <= 48000 && c.max_sample_rate().0 <= 48000 - }) - .find(|c| ffmpeg_sample_format_for(c.sample_format()).is_some()) - }) - .and_then(|config| { - device - .name() - .ok() - .map(|name| (name, device, config.with_max_sample_rate())) - }) - }; - if let Some((name, device, config)) = host.default_input_device().and_then(get_usable_device) { @@ -154,6 +125,38 @@ impl MicrophoneFeed { } } +fn get_usable_device(device: Device) -> Option<(String, Device, SupportedStreamConfig)> { + device + .supported_input_configs() + .map_err(|error| { + error!( + "Error getting supported input configs for device: {}", + error + ); + error + }) + .ok() + .and_then(|configs| { + let mut configs = configs.collect::>(); + configs.sort_by(|a, b| { + b.sample_format() + .sample_size() + .cmp(&a.sample_format().sample_size()) + .then(b.max_sample_rate().cmp(&a.max_sample_rate())) + }); + configs + .into_iter() + .filter(|c| c.min_sample_rate().0 <= 48000 && c.max_sample_rate().0 <= 48000) + .find(|c| ffmpeg_sample_format_for(c.sample_format()).is_some()) + }) + .and_then(|config| { + device + .name() + .ok() + .map(|name| (name, device, config.with_max_sample_rate())) + }) +} + #[derive(Reply)] pub struct MicrophoneFeedLock { actor: ActorRef, diff --git a/crates/recording/src/instant_recording.rs b/crates/recording/src/instant_recording.rs index 0630d118f8..1875462570 100644 --- a/crates/recording/src/instant_recording.rs +++ b/crates/recording/src/instant_recording.rs @@ -1,3 +1,10 @@ +use crate::{ + ActorError, RecordingBaseInputs, RecordingError, + capture_pipeline::{MakeCapturePipeline, create_screen_capture}, + feeds::microphone::MicrophoneFeedLock, + pipeline::RecordingPipeline, + sources::{ScreenCaptureSource, ScreenCaptureTarget}, +}; use cap_media::MediaError; use cap_media_info::{AudioInfo, VideoInfo}; use cap_project::InstantRecordingMeta; @@ -12,37 +19,29 @@ use std::{ use tokio::sync::oneshot; use tracing::{Instrument, debug, error, info, trace}; -use crate::{ - ActorError, RecordingBaseInputs, RecordingError, - capture_pipeline::{MakeCapturePipeline, create_screen_capture}, - feeds::microphone::MicrophoneFeedLock, - pipeline::Pipeline, - sources::{ScreenCaptureSource, ScreenCaptureTarget}, -}; - -struct InstantRecordingPipeline { - pub inner: Pipeline, +struct Pipeline { + pub inner: RecordingPipeline, #[allow(unused)] pub output_path: PathBuf, pub pause_flag: Arc, } -enum InstantRecordingActorState { +enum ActorState { Recording { - pipeline: InstantRecordingPipeline, + pipeline: Pipeline, pipeline_done_rx: oneshot::Receiver>, segment_start_time: f64, }, Paused { - pipeline: InstantRecordingPipeline, + pipeline: Pipeline, pipeline_done_rx: oneshot::Receiver>, segment_start_time: f64, }, } #[derive(Clone)] -pub struct InstantRecordingHandle { - ctrl_tx: flume::Sender, +pub struct ActorHandle { + ctrl_tx: flume::Sender, pub capture_target: ScreenCaptureTarget, // pub bounds: Bounds, } @@ -58,32 +57,32 @@ macro_rules! send_message { }}; } -impl InstantRecordingHandle { - pub async fn stop(&self) -> Result { - send_message!(self.ctrl_tx, InstantRecordingActorControlMessage::Stop) +impl ActorHandle { + pub async fn stop(&self) -> Result { + send_message!(self.ctrl_tx, ActorControlMessage::Stop) } pub async fn pause(&self) -> Result<(), RecordingError> { - send_message!(self.ctrl_tx, InstantRecordingActorControlMessage::Pause) + send_message!(self.ctrl_tx, ActorControlMessage::Pause) } pub async fn resume(&self) -> Result<(), RecordingError> { - send_message!(self.ctrl_tx, InstantRecordingActorControlMessage::Resume) + send_message!(self.ctrl_tx, ActorControlMessage::Resume) } pub async fn cancel(&self) -> Result<(), RecordingError> { - send_message!(self.ctrl_tx, InstantRecordingActorControlMessage::Cancel) + send_message!(self.ctrl_tx, ActorControlMessage::Cancel) } } -pub enum InstantRecordingActorControlMessage { +pub enum ActorControlMessage { Pause(oneshot::Sender>), Resume(oneshot::Sender>), - Stop(oneshot::Sender>), + Stop(oneshot::Sender>), Cancel(oneshot::Sender>), } -impl std::fmt::Debug for InstantRecordingActorControlMessage { +impl std::fmt::Debug for ActorControlMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Pause(_) => write!(f, "Pause"), @@ -94,15 +93,13 @@ impl std::fmt::Debug for InstantRecordingActorControlMessage { } } -pub struct InstantRecordingActor { - id: String, +pub struct Actor { recording_dir: PathBuf, capture_target: ScreenCaptureTarget, video_info: VideoInfo, } -pub struct CompletedInstantRecording { - pub id: String, +pub struct CompletedRecording { pub project_path: PathBuf, pub display_source: ScreenCaptureTarget, pub meta: InstantRecordingMeta, @@ -117,13 +114,7 @@ async fn create_pipeline( ), mic_feed: Option>, system_audio: Option>, -) -> Result< - ( - InstantRecordingPipeline, - oneshot::Receiver>, - ), - MediaError, -> { +) -> Result<(Pipeline, oneshot::Receiver>), MediaError> { if let Some(mic_feed) = &mic_feed { debug!( "mic audio info: {:#?}", @@ -131,7 +122,7 @@ async fn create_pipeline( ); }; - let pipeline_builder = Pipeline::builder(); + let pipeline_builder = RecordingPipeline::builder(); let pause_flag = Arc::new(AtomicBool::new(false)); let system_audio = system_audio.map(|v| (v, screen_source.0.audio_info())); @@ -150,7 +141,7 @@ async fn create_pipeline( pipeline.play().await?; Ok(( - InstantRecordingPipeline { + Pipeline { inner: pipeline, output_path, pause_flag, @@ -159,13 +150,61 @@ async fn create_pipeline( )) } +impl Actor { + pub fn builder(output: PathBuf, capture_target: ScreenCaptureTarget) -> ActorBuilder { + ActorBuilder::new(output, capture_target) + } +} + +pub struct ActorBuilder { + output_path: PathBuf, + capture_target: ScreenCaptureTarget, + system_audio: bool, + mic_feed: Option>, +} + +impl ActorBuilder { + pub fn new(output: PathBuf, capture_target: ScreenCaptureTarget) -> Self { + Self { + output_path: output, + capture_target, + system_audio: false, + mic_feed: None, + } + } + + pub fn with_system_audio(mut self, system_audio: bool) -> Self { + self.system_audio = system_audio; + self + } + + pub fn with_mic_feed(mut self, mic_feed: Arc) -> Self { + self.mic_feed = Some(mic_feed); + self + } + + pub async fn build( + self, + ) -> Result<(ActorHandle, oneshot::Receiver>), RecordingError> { + spawn_instant_recording_actor( + self.output_path, + RecordingBaseInputs { + capture_target: self.capture_target, + capture_system_audio: self.system_audio, + mic_feed: self.mic_feed, + camera_feed: None, + }, + ) + .await + } +} + pub async fn spawn_instant_recording_actor( - id: String, recording_dir: PathBuf, inputs: RecordingBaseInputs, ) -> Result< ( - InstantRecordingHandle, + ActorHandle, tokio::sync::oneshot::Receiver>, ), RecordingError, @@ -222,14 +261,13 @@ pub async fn spawn_instant_recording_actor( let inputs = inputs.clone(); let video_info = screen_source.info(); async move { - let mut actor = InstantRecordingActor { - id, + let mut actor = Actor { recording_dir, capture_target: inputs.capture_target, video_info, }; - let mut state = InstantRecordingActorState::Recording { + let mut state = ActorState::Recording { pipeline, pipeline_done_rx, segment_start_time, @@ -254,7 +292,7 @@ pub async fn spawn_instant_recording_actor( }); Ok(( - InstantRecordingHandle { + ActorHandle { ctrl_tx, capture_target: inputs.capture_target, // bounds: *screen_source.get_bounds(), @@ -281,16 +319,15 @@ macro_rules! send_response { } async fn run_actor_iteration( - state: InstantRecordingActorState, - ctrl_rx: &Receiver, - actor: InstantRecordingActor, -) -> Result, InstantRecordingActorError> -{ - use InstantRecordingActorControlMessage as Msg; - use InstantRecordingActorState as State; + state: ActorState, + ctrl_rx: &Receiver, + actor: Actor, +) -> Result, InstantRecordingActorError> { + use ActorControlMessage as Msg; + use ActorState as State; // Helper function to shutdown pipeline - async fn shutdown(mut pipeline: InstantRecordingPipeline) -> Result<(), RecordingError> { + async fn shutdown(mut pipeline: Pipeline) -> Result<(), RecordingError> { pipeline.inner.shutdown().await?; Ok(()) } @@ -375,7 +412,7 @@ async fn run_actor_iteration( let res = shutdown(pipeline).await; let res = match res { - Ok(_) => stop_recording(actor).await, + Ok(_) => Ok(stop_recording(actor).await), Err(e) => Err(e), }; @@ -432,20 +469,17 @@ async fn run_actor_iteration( }) } -async fn stop_recording( - actor: InstantRecordingActor, -) -> Result { +async fn stop_recording(actor: Actor) -> CompletedRecording { use cap_project::*; - Ok(CompletedInstantRecording { - id: actor.id, + CompletedRecording { project_path: actor.recording_dir.clone(), meta: InstantRecordingMeta { fps: actor.video_info.fps(), sample_rate: None, }, display_source: actor.capture_target, - }) + } } fn current_time_f64() -> f64 { diff --git a/crates/recording/src/lib.rs b/crates/recording/src/lib.rs index 007599dda2..5842be6715 100644 --- a/crates/recording/src/lib.rs +++ b/crates/recording/src/lib.rs @@ -6,13 +6,8 @@ pub mod pipeline; pub mod sources; pub mod studio_recording; -pub use instant_recording::{ - CompletedInstantRecording, InstantRecordingActor, spawn_instant_recording_actor, -}; +pub use feeds::{camera::CameraFeed, microphone::MicrophoneFeed}; pub use sources::{camera, screen_capture}; -pub use studio_recording::{ - CompletedStudioRecording, StudioRecordingHandle, spawn_studio_recording_actor, -}; use cap_media::MediaError; use feeds::microphone::MicrophoneFeedLock; diff --git a/crates/recording/src/pipeline/builder.rs b/crates/recording/src/pipeline/builder.rs index a1b8f53193..cb3972b1b0 100644 --- a/crates/recording/src/pipeline/builder.rs +++ b/crates/recording/src/pipeline/builder.rs @@ -8,7 +8,7 @@ use tokio::sync::oneshot; use tracing::{error, info}; use crate::pipeline::{ - MediaError, Pipeline, + MediaError, RecordingPipeline, control::ControlBroadcast, task::{PipelineReadySignal, PipelineSourceTask}, }; @@ -107,7 +107,7 @@ impl PipelineBuilder { impl PipelineBuilder { pub async fn build( self, - ) -> Result<(Pipeline, oneshot::Receiver>), MediaError> { + ) -> Result<(RecordingPipeline, oneshot::Receiver>), MediaError> { let Self { control, tasks } = self; if tasks.is_empty() { @@ -154,7 +154,7 @@ impl PipelineBuilder { }); Ok(( - Pipeline { + RecordingPipeline { control, task_handles, is_shutdown: false, diff --git a/crates/recording/src/pipeline/mod.rs b/crates/recording/src/pipeline/mod.rs index 4d2e16c235..f0050b7944 100644 --- a/crates/recording/src/pipeline/mod.rs +++ b/crates/recording/src/pipeline/mod.rs @@ -12,13 +12,13 @@ use crate::MediaError; use builder::PipelineBuilder; use control::{Control, ControlBroadcast, PipelineControlSignal}; -pub struct Pipeline { +pub struct RecordingPipeline { control: ControlBroadcast, task_handles: IndexMap>, is_shutdown: bool, } -impl Pipeline { +impl RecordingPipeline { pub fn builder() -> PipelineBuilder { PipelineBuilder::default() } diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index 8dff3077cf..02690a1ae6 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -146,7 +146,6 @@ impl AudioMixer { let rate = source.info.rate(); while let Ok((frame, timestamp)) = source.rx.try_recv() { - // dbg!(timestamp.duration_since(start)); // if gap between incoming and last, insert silence if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { let timestamp_elapsed = timestamp.duration_since(start); @@ -205,6 +204,10 @@ impl AudioMixer { source.info.channel_layout(), ); + for i in 0..frame.planes() { + frame.data_mut(i).fill(0); + } + frame.set_rate(rate as u32); let timestamp = @@ -221,7 +224,12 @@ impl AudioMixer { source.info.channel_layout(), ); + for i in 0..frame.planes() { + frame.data_mut(i).fill(0); + } + frame.set_rate(rate as u32); + frame.data_mut(0).fill(0); let duration = Duration::from_secs_f64(leftover_chunk_size as f64 / rate as f64); diff --git a/crates/recording/src/sources/screen_capture/macos.rs b/crates/recording/src/sources/screen_capture/macos.rs index 6dfbe997a0..476e45dd43 100644 --- a/crates/recording/src/sources/screen_capture/macos.rs +++ b/crates/recording/src/sources/screen_capture/macos.rs @@ -39,9 +39,9 @@ impl Message for FrameHandler { let frame = msg.0; let sample_buffer = frame.sample_buf(); - let timestamp = Timestamp::MachAbsoluteTime(cap_timestamp::MachAbsoluteTimestamp::new( - cm::Clock::convert_host_time_to_sys_units(sample_buffer.pts()), - )); + let mach_timestamp = cm::Clock::convert_host_time_to_sys_units(sample_buffer.pts()); + let timestamp = + Timestamp::MachAbsoluteTime(cap_timestamp::MachAbsoluteTimestamp::new(mach_timestamp)); match &frame { scap_screencapturekit::Frame::Screen(frame) => { @@ -164,6 +164,7 @@ impl PipelineSourceTask for ScreenCaptureSource { .build(); settings.set_pixel_format(cv::PixelFormat::_32_BGRA); + settings.set_color_space_name(cg::color_space::names::srgb()); if let Some(crop_bounds) = config.crop_bounds { tracing::info!("crop bounds: {:?}", crop_bounds); diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index a9fec9a20f..d86f6a1e09 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -3,7 +3,7 @@ use crate::{ capture_pipeline::{MakeCapturePipeline, ScreenCaptureMethod, create_screen_capture}, cursor::{CursorActor, Cursors, spawn_cursor_recorder}, feeds::{camera::CameraFeedLock, microphone::MicrophoneFeedLock}, - pipeline::Pipeline, + pipeline::RecordingPipeline, sources::{AudioInputSource, CameraSource, ScreenCaptureFormat, ScreenCaptureTarget}, }; use cap_enc_ffmpeg::{H264Encoder, MP4File, OggFile, OpusEncoder}; @@ -23,9 +23,9 @@ use tokio::sync::oneshot; use tracing::{debug, info, trace}; #[allow(clippy::large_enum_variant)] -enum StudioRecordingActorState { +enum ActorState { Recording { - pipeline: StudioRecordingPipeline, + pipeline: Pipeline, pipeline_done_rx: oneshot::Receiver>, index: u32, segment_start_time: f64, @@ -38,26 +38,23 @@ enum StudioRecordingActorState { }, } -pub enum StudioRecordingActorControlMessage { +pub enum ActorControlMessage { Pause(oneshot::Sender>), Resume(oneshot::Sender>), Stop(oneshot::Sender>), Cancel(oneshot::Sender>), } -pub struct StudioRecordingActor { - id: String, +pub struct Actor { recording_dir: PathBuf, fps: u32, - segments: Vec, - #[allow(unused)] - start_instant: Instant, + segments: Vec, } -pub struct StudioRecordingSegment { +pub struct RecordingSegment { pub start: f64, pub end: f64, - pipeline: StudioRecordingPipeline, + pipeline: Pipeline, } pub struct PipelineOutput { @@ -70,9 +67,9 @@ pub struct ScreenPipelineOutput { pub video_info: VideoInfo, } -struct StudioRecordingPipeline { +struct Pipeline { pub start_time: Timestamps, - pub inner: Pipeline, + pub inner: RecordingPipeline, pub screen: ScreenPipelineOutput, pub microphone: Option, pub camera: Option, @@ -86,8 +83,8 @@ struct CursorPipeline { } #[derive(Clone)] -pub struct StudioRecordingHandle { - ctrl_tx: flume::Sender, +pub struct ActorHandle { + ctrl_tx: flume::Sender, pub capture_target: ScreenCaptureTarget, } @@ -102,39 +99,101 @@ macro_rules! send_message { }}; } -impl StudioRecordingHandle { +impl ActorHandle { pub async fn stop(&self) -> Result { - send_message!(self.ctrl_tx, StudioRecordingActorControlMessage::Stop) + send_message!(self.ctrl_tx, ActorControlMessage::Stop) } pub async fn pause(&self) -> Result<(), RecordingError> { - send_message!(self.ctrl_tx, StudioRecordingActorControlMessage::Pause) + send_message!(self.ctrl_tx, ActorControlMessage::Pause) } pub async fn resume(&self) -> Result<(), CreateSegmentPipelineError> { - send_message!(self.ctrl_tx, StudioRecordingActorControlMessage::Resume) + send_message!(self.ctrl_tx, ActorControlMessage::Resume) } pub async fn cancel(&self) -> Result<(), RecordingError> { - send_message!(self.ctrl_tx, StudioRecordingActorControlMessage::Cancel) + send_message!(self.ctrl_tx, ActorControlMessage::Cancel) } } #[derive(Debug, thiserror::Error)] -pub enum SpawnStudioRecordingError { +pub enum SpawnError { #[error("{0}")] Media(#[from] MediaError), #[error("{0}")] PipelineCreationError(#[from] CreateSegmentPipelineError), } -pub async fn spawn_studio_recording_actor( - id: String, +impl Actor { + pub fn builder(output: PathBuf, capture_target: ScreenCaptureTarget) -> ActorBuilder { + ActorBuilder::new(output, capture_target) + } +} + +pub struct ActorBuilder { + output_path: PathBuf, + capture_target: ScreenCaptureTarget, + system_audio: bool, + mic_feed: Option>, + camera_feed: Option>, + custom_cursor: bool, +} + +impl ActorBuilder { + pub fn new(output: PathBuf, capture_target: ScreenCaptureTarget) -> Self { + Self { + output_path: output, + capture_target, + system_audio: false, + mic_feed: None, + camera_feed: None, + custom_cursor: false, + } + } + + pub fn with_system_audio(mut self, system_audio: bool) -> Self { + self.system_audio = system_audio; + self + } + + pub fn with_mic_feed(mut self, mic_feed: Arc) -> Self { + self.mic_feed = Some(mic_feed); + self + } + + pub fn with_camera_feed(mut self, camera_feed: Arc) -> Self { + self.camera_feed = Some(camera_feed); + self + } + + pub fn with_custom_cursor(mut self, custom_cursor: bool) -> Self { + self.custom_cursor = custom_cursor; + self + } + + pub async fn build( + self, + ) -> Result<(ActorHandle, oneshot::Receiver>), SpawnError> { + spawn_studio_recording_actor( + self.output_path, + RecordingBaseInputs { + capture_target: self.capture_target, + capture_system_audio: self.system_audio, + mic_feed: self.mic_feed, + camera_feed: self.camera_feed, + }, + self.custom_cursor, + ) + .await + } +} + +async fn spawn_studio_recording_actor( recording_dir: PathBuf, base_inputs: RecordingBaseInputs, custom_cursor_capture: bool, -) -> Result<(StudioRecordingHandle, oneshot::Receiver>), SpawnStudioRecordingError> -{ +) -> Result<(ActorHandle, oneshot::Receiver>), SpawnError> { ensure_dir(&recording_dir)?; let (done_tx, done_rx) = oneshot::channel(); @@ -146,9 +205,7 @@ pub async fn spawn_studio_recording_actor( let segments_dir = ensure_dir(&content_dir.join("segments"))?; let cursors_dir = ensure_dir(&content_dir.join("cursors"))?; - // TODO: move everything to start_instant let start_time = Timestamps::now(); - let start_instant = Instant::now(); if let Some(camera_feed) = &base_inputs.camera_feed { debug!("camera device info: {:#?}", camera_feed.camera_info()); @@ -182,15 +239,13 @@ pub async fn spawn_studio_recording_actor( let fps = pipeline.screen.video_info.fps(); spawn_actor(async move { - let mut actor = StudioRecordingActor { - id, + let mut actor = Actor { recording_dir, fps, segments: Vec::new(), - start_instant, }; - let mut state = StudioRecordingActorState::Recording { + let mut state = ActorState::Recording { pipeline, pipeline_done_rx, index, @@ -215,7 +270,7 @@ pub async fn spawn_studio_recording_actor( }); Ok(( - StudioRecordingHandle { + ActorHandle { ctrl_tx, capture_target: base_inputs.capture_target, }, @@ -241,18 +296,18 @@ macro_rules! send_response { } async fn run_actor_iteration( - state: StudioRecordingActorState, - ctrl_rx: &Receiver, - mut actor: StudioRecordingActor, + state: ActorState, + ctrl_rx: &Receiver, + mut actor: Actor, segment_pipeline_factory: &mut SegmentPipelineFactory, -) -> Result, StudioRecordingActorError> { - use StudioRecordingActorControlMessage as Msg; - use StudioRecordingActorState as State; +) -> Result, StudioRecordingActorError> { + use ActorControlMessage as Msg; + use ActorState as State; // Helper function to shutdown pipeline and save cursor data async fn shutdown( - mut pipeline: StudioRecordingPipeline, - actor: &mut StudioRecordingActor, + mut pipeline: Pipeline, + actor: &mut Actor, segment_start_time: f64, ) -> Result<(Cursors, u32), RecordingError> { tracing::info!("pipeline shuting down"); @@ -283,7 +338,7 @@ async fn run_actor_iteration( (Default::default(), 0) }; - actor.segments.push(StudioRecordingSegment { + actor.segments.push(RecordingSegment { start: segment_start_time, end: segment_stop_time, pipeline, @@ -473,15 +528,14 @@ async fn run_actor_iteration( } pub struct CompletedStudioRecording { - pub id: String, pub project_path: PathBuf, pub meta: StudioRecordingMeta, pub cursor_data: cap_project::CursorImages, - pub segments: Vec, + pub segments: Vec, } async fn stop_recording( - actor: StudioRecordingActor, + actor: Actor, cursors: Cursors, ) -> Result { use cap_project::*; @@ -558,7 +612,6 @@ async fn stop_recording( .map_err(RecordingError::from)?; Ok(CompletedStudioRecording { - id: actor.id, project_path: actor.recording_dir.clone(), meta, cursor_data: Default::default(), @@ -599,13 +652,7 @@ impl SegmentPipelineFactory { &mut self, cursors: Cursors, next_cursors_id: u32, - ) -> Result< - ( - StudioRecordingPipeline, - oneshot::Receiver>, - ), - CreateSegmentPipelineError, - > { + ) -> Result<(Pipeline, oneshot::Receiver>), CreateSegmentPipelineError> { let result = create_segment_pipeline( &self.segments_dir, &self.cursors_dir, @@ -659,13 +706,7 @@ async fn create_segment_pipeline( next_cursors_id: u32, custom_cursor_capture: bool, start_time: Timestamps, -) -> Result< - ( - StudioRecordingPipeline, - oneshot::Receiver>, - ), - CreateSegmentPipelineError, -> { +) -> Result<(Pipeline, oneshot::Receiver>), CreateSegmentPipelineError> { let system_audio = if capture_system_audio { let (tx, rx) = flume::bounded(64); (Some(tx), Some(rx)) @@ -697,7 +738,7 @@ async fn create_segment_pipeline( let dir = ensure_dir(&segments_dir.join(format!("segment-{index}")))?; - let mut pipeline_builder = Pipeline::builder(); + let mut pipeline_builder = RecordingPipeline::builder(); let screen_output_path = dir.join("display.mp4"); @@ -733,35 +774,46 @@ async fn create_segment_pipeline( }; let microphone = if let Some(mic_feed) = mic_feed { - let (tx, rx) = flume::bounded(8); + let (tx, channel) = flume::bounded(8); let mic_source = AudioInputSource::init(mic_feed, tx); let mic_config = mic_source.info(); let output_path = dir.join("audio-input.ogg"); - let mut mic_encoder = OggFile::init( + let mut output = OggFile::init( output_path.clone(), OpusEncoder::factory("microphone", mic_config), ) .map_err(|e| MediaError::Any(e.to_string().into()))?; + let time_base = output.encoder().input_time_base(); pipeline_builder.spawn_source("microphone_capture", mic_source); let (timestamp_tx, first_timestamp_rx) = flume::bounded(1); pipeline_builder.spawn_task("microphone_encoder", move |ready| { + let mut first_timestamp = None; let mut timestamp_tx = Some(timestamp_tx); let _ = ready.send(Ok(())); - while let Ok(frame) = rx.recv() { + let rate = time_base.denominator() as f64 / time_base.numerator() as f64; + + while let Ok((mut frame, timestamp)) = channel.recv() { if let Some(timestamp_tx) = timestamp_tx.take() { - timestamp_tx.send(frame.1).unwrap(); + let _ = timestamp_tx.send(timestamp); } - mic_encoder.queue_frame(frame.0); + let first_timestamp = first_timestamp.get_or_insert(timestamp); + + let elapsed = timestamp.duration_since(start_time) + - first_timestamp.duration_since(start_time); + frame.set_pts(Some((elapsed.as_secs_f64() * rate) as i64)); + + output.queue_frame(frame); } - mic_encoder.finish(); + + output.finish(); Ok(()) }); @@ -783,26 +835,38 @@ async fn create_segment_pipeline( { let output_path = dir.join("system_audio.ogg"); - let mut system_audio_encoder = OggFile::init( + let mut output = OggFile::init( output_path.clone(), OpusEncoder::factory("system_audio", config), ) .map_err(|e| MediaError::Any(e.to_string().into()))?; + let time_base = output.encoder().input_time_base(); + let (timestamp_tx, timestamp_rx) = flume::bounded(1); pipeline_builder.spawn_task("system_audio_encoder", move |ready| { + let mut first_timestamp = None; let mut timestamp_tx = Some(timestamp_tx); let _ = ready.send(Ok(())); - while let Ok(frame) = channel.recv() { + let rate = time_base.denominator() as f64 / time_base.numerator() as f64; + + while let Ok((mut frame, timestamp)) = channel.recv() { if let Some(timestamp_tx) = timestamp_tx.take() { - timestamp_tx.send(frame.1).unwrap(); + let _ = timestamp_tx.send(timestamp); } - system_audio_encoder.queue_frame(frame.0); + let first_timestamp = first_timestamp.get_or_insert(timestamp); + + let elapsed = timestamp.duration_since(start_time) + - first_timestamp.duration_since(start_time); + frame.set_pts(Some((elapsed.as_secs_f64() * rate) as i64)); + + output.queue_frame(frame); } - system_audio_encoder.finish(); + + output.finish(); Ok(()) }); @@ -815,10 +879,11 @@ async fn create_segment_pipeline( }; let camera = if let Some(camera_feed) = camera_feed { - let (tx, rx) = flume::bounded(8); + let (tx, channel) = flume::bounded(8); let camera_source = CameraSource::init(camera_feed, tx); let camera_config = camera_source.info(); + let time_base = camera_config.time_base; let output_path = dir.join("camera.mp4"); let mut camera_encoder = MP4File::init( @@ -834,27 +899,24 @@ async fn create_segment_pipeline( let (timestamp_tx, timestamp_rx) = flume::bounded(1); pipeline_builder.spawn_task("camera_encoder", move |ready| { + let mut first_timestamp = None; let mut timestamp_tx = Some(timestamp_tx); let _ = ready.send(Ok(())); - let mut start = None; - while let Ok(mut frame) = rx.recv() { + let rate = time_base.denominator() as f64 / time_base.numerator() as f64; + + while let Ok((mut frame, timestamp)) = channel.recv() { if let Some(timestamp_tx) = timestamp_tx.take() { - timestamp_tx.send(frame.1).unwrap(); + let _ = timestamp_tx.send(timestamp); } - if let Some(_) = start { - // frame.0.set_pts(Some( - // ((camera_config.time_base.denominator() as f64 - // / camera_config.time_base.numerator() as f64) - // * (frame.1 - start)) as i64, - // )); - } else { - start = Some(frame.1); - frame.0.set_pts(Some(0)); - } + let first_timestamp = first_timestamp.get_or_insert(timestamp); + + let elapsed = timestamp.duration_since(start_time) + - first_timestamp.duration_since(start_time); + frame.set_pts(Some((elapsed.as_secs_f64() * rate) as i64)); - camera_encoder.queue_video_frame(frame.0); + camera_encoder.queue_video_frame(frame); } camera_encoder.finish(); Ok(()) @@ -905,7 +967,7 @@ async fn create_segment_pipeline( info!("pipeline playing"); Ok(( - StudioRecordingPipeline { + Pipeline { inner: pipeline, start_time, screen, From a4b8bddfcabeb93805680f17ae46f0904fbc6c12 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Tue, 9 Sep 2025 21:03:15 +0800 Subject: [PATCH 06/20] fix build on windows --- crates/recording/examples/recording-cli.rs | 3 ++- crates/recording/src/capture_pipeline.rs | 20 +++++++++---------- crates/recording/src/feeds/camera.rs | 2 ++ crates/recording/src/sources/audio_mixer.rs | 10 ++++++---- .../src/sources/screen_capture/windows.rs | 12 +++++------ crates/scap-direct3d/src/lib.rs | 17 +++------------- crates/timestamp/src/lib.rs | 8 ++++++-- crates/timestamp/src/macos.rs | 3 +-- crates/timestamp/src/win.rs | 2 ++ 9 files changed, 36 insertions(+), 41 deletions(-) diff --git a/crates/recording/examples/recording-cli.rs b/crates/recording/examples/recording-cli.rs index 6b57647fad..fffc2bbc0e 100644 --- a/crates/recording/examples/recording-cli.rs +++ b/crates/recording/examples/recording-cli.rs @@ -7,6 +7,7 @@ use tracing::info; #[tokio::main] pub async fn main() { unsafe { std::env::set_var("RUST_LOG", "trace") }; + unsafe { std::env::set_var("RUST_BACKTRACE", "1") }; #[cfg(windows)] { @@ -56,7 +57,7 @@ pub async fn main() { }, ) .with_system_audio(true) - .with_mic_feed(Arc::new(mic_feed.ask(microphone::Lock).await.unwrap())) + // .with_mic_feed(Arc::new(mic_feed.ask(microphone::Lock).await.unwrap())) .build() .await .unwrap(); diff --git a/crates/recording/src/capture_pipeline.rs b/crates/recording/src/capture_pipeline.rs index 08acab4379..395a6bd717 100644 --- a/crates/recording/src/capture_pipeline.rs +++ b/crates/recording/src/capture_pipeline.rs @@ -4,18 +4,18 @@ use crate::{ pipeline::builder::PipelineBuilder, sources::{ AudioInputSource, ScreenCaptureFormat, ScreenCaptureSource, ScreenCaptureTarget, - audio_mixer, screen_capture, + audio_mixer::AudioMixer, screen_capture, }, }; use cap_media::MediaError; use cap_media_info::AudioInfo; -use cap_timestamp::Timestamp; +use cap_timestamp::{Timestamp, Timestamps}; use flume::{Receiver, Sender}; use std::{ future::Future, path::PathBuf, sync::{Arc, atomic::AtomicBool}, - time::SystemTime, + time::{Duration, SystemTime}, }; pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { @@ -120,7 +120,7 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { let start_time = Timestamps::now(); let (audio_tx, audio_rx) = flume::bounded(64); - let mut audio_mixer = audio_mixer::AudioMixer::builder(audio_tx); + let mut audio_mixer = AudioMixer::builder(audio_tx); if let Some(system_audio) = system_audio { audio_mixer.add_source(system_audio.1, system_audio.0); @@ -140,14 +140,13 @@ impl MakeCapturePipeline for screen_capture::CMSampleBufferCapture { cap_enc_avfoundation::MP4Encoder::init( "mp4", source.0.info(), - has_audio_sources.then_some(audio_mixer::AudioMixer::INFO), + has_audio_sources.then_some(AudioMixer::INFO), output_path, Some(1080), ) .map_err(|e| MediaError::Any(e.to_string().into()))?, )); - use cap_timestamp::Timestamps; use cidre::cm; use tracing::error; @@ -406,7 +405,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let start_time = Timestamps::now(); let (audio_tx, audio_rx) = flume::bounded(64); - let mut audio_mixer = audio_mixerdioMixer::builder(audio_tx); + let mut audio_mixer = AudioMixer::builder(audio_tx); if let Some(system_audio) = system_audio { audio_mixer.add_source(system_audio.1, system_audio.0); @@ -478,7 +477,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let audio_encoder = has_audio_sources .then(|| { - AACEncoder::init("mic_audio", audio_mixerdioMixer::INFO, &mut output) + AACEncoder::init("mic_audio", AudioMixer::INFO, &mut output) .map(|v| v.boxed()) .map_err(|e| MediaError::Any(e.to_string().into())) }) @@ -514,7 +513,6 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { while let Ok((mut frame, timestamp)) = audio_rx.recv() { let ts_offset = timestamp.duration_since(start_time); - // dbg!(ts_offset, frame.samples()); let Some(ts_offset) = ts_offset.checked_sub(screen_first_offset) else { continue; @@ -523,8 +521,6 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let pts = (ts_offset.as_secs_f64() * frame.rate() as f64) as i64; frame.set_pts(Some(pts)); - // dbg!(pts); - if let Ok(mut output) = output.lock() { audio_encoder.queue_frame(frame, &mut *output); } @@ -549,6 +545,8 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { while let Ok(e) = encoder.get_event() { match e { MediaFoundation::METransformNeedInput => { + use cap_timestamp::PerformanceCounterTimestamp; + let Ok((frame, _)) = source.1.recv() else { break; }; diff --git a/crates/recording/src/feeds/camera.rs b/crates/recording/src/feeds/camera.rs index ccdb3e21d8..b805497a84 100644 --- a/crates/recording/src/feeds/camera.rs +++ b/crates/recording/src/feeds/camera.rs @@ -2,6 +2,8 @@ use cap_camera::CameraInfo; use cap_camera_ffmpeg::*; use cap_fail::fail_err; use cap_media_info::VideoInfo; +#[cfg(windows)] +use cap_timestamp::PerformanceCounterTimestamp; use cap_timestamp::Timestamp; use ffmpeg::frame; use futures::{FutureExt, future::BoxFuture}; diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index 02690a1ae6..0c5dba774b 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -158,9 +158,9 @@ impl AudioMixer { && buffer_last_duration - elapsed_since_last_frame >= Duration::from_millis(1) { - let gap = timestamp.duration_since(start) - - buffer_last_timestamp.duration_since(start) - - buffer_last_duration; + let gap = (buffer_last_timestamp.duration_since(start) + + buffer_last_duration) + - timestamp.duration_since(start); debug!("Gap between last buffer frame, inserting {gap:?} of silence"); @@ -229,7 +229,6 @@ impl AudioMixer { } frame.set_rate(rate as u32); - frame.data_mut(0).fill(0); let duration = Duration::from_secs_f64(leftover_chunk_size as f64 / rate as f64); @@ -241,6 +240,7 @@ impl AudioMixer { } } + // dbg!(frame.samples()); source.buffer_last = Some(( timestamp, Duration::from_secs_f64(frame.samples() as f64 / frame.rate() as f64), @@ -266,6 +266,8 @@ impl AudioMixer { self.samples_out += filtered.samples(); + // dbg!(filtered.samples(), timestamp); + if self .output .send((filtered, Timestamp::Instant(timestamp))) diff --git a/crates/recording/src/sources/screen_capture/windows.rs b/crates/recording/src/sources/screen_capture/windows.rs index 9bec2c4774..ea8dbaa38d 100644 --- a/crates/recording/src/sources/screen_capture/windows.rs +++ b/crates/recording/src/sources/screen_capture/windows.rs @@ -1,10 +1,10 @@ use super::*; -use crate::capture_pipeline::PerformanceCounterTimestamp; use ::windows::{ Graphics::Capture::GraphicsCaptureItem, Win32::Graphics::Direct3D11::{D3D11_BOX, ID3D11Device}, }; use cap_fail::fail_err; +use cap_timestamp::PerformanceCounterTimestamp; use cpal::traits::{DeviceTrait, HostTrait}; use kameo::prelude::*; use scap_ffmpeg::*; @@ -51,7 +51,7 @@ struct FrameHandler { last_cleanup: Instant, last_log: Instant, frame_events: VecDeque<(Instant, bool)>, - video_tx: Sender<(scap_direct3d::Frame, SourceTimestamp)>, + video_tx: Sender<(scap_direct3d::Frame, Timestamp)>, } impl Actor for FrameHandler { @@ -132,9 +132,7 @@ impl Message for FrameHandler { let frame_dropped = match self.video_tx.try_send(( msg.frame, - SourceTimestamp::PerformanceCounter(PerformanceCounterTimestamp::new( - timestamp.Duration, - )), + Timestamp::PerformanceCounter(PerformanceCounterTimestamp::new(timestamp.Duration)), )) { Err(flume::TrySendError::Disconnected(_)) => { warn!("Pipeline disconnected"); @@ -481,13 +479,13 @@ pub mod audio { impl WindowsAudioCapture { pub fn new( - audio_tx: Sender<(ffmpeg::frame::Audio, SourceTimestamp)>, + audio_tx: Sender<(ffmpeg::frame::Audio, Timestamp)>, ) -> Result { let capturer = scap_cpal::create_capturer( move |data, info, config| { use scap_ffmpeg::*; - let timestamp = SourceTimestamp::from_cpal(info.timestamp().capture); + let timestamp = Timestamp::from_cpal(info.timestamp().capture); let _ = audio_tx.send((data.as_ffmpeg(config), timestamp)); }, diff --git a/crates/scap-direct3d/src/lib.rs b/crates/scap-direct3d/src/lib.rs index 73edb47a6c..57f594d58d 100644 --- a/crates/scap-direct3d/src/lib.rs +++ b/crates/scap-direct3d/src/lib.rs @@ -3,16 +3,13 @@ #![cfg(windows)] use std::{ - os::windows::io::AsRawHandle, sync::{ Arc, atomic::{AtomicBool, Ordering}, mpsc::RecvError, }, - thread::JoinHandle, time::Duration, }; - use windows::{ Foundation::{Metadata::ApiInformation, TypedEventHandler}, Graphics::{ @@ -23,7 +20,7 @@ use windows::{ DirectX::{Direct3D11::IDirect3DDevice, DirectXPixelFormat}, }, Win32::{ - Foundation::{HANDLE, HMODULE, LPARAM, S_FALSE, WPARAM}, + Foundation::HMODULE, Graphics::{ Direct3D::D3D_DRIVER_TYPE_HARDWARE, Direct3D11::{ @@ -41,16 +38,8 @@ use windows::{ IDXGIDevice, }, }, - System::{ - Threading::GetThreadId, - WinRT::{ - CreateDispatcherQueueController, DQTAT_COM_NONE, DQTYPE_THREAD_CURRENT, - Direct3D11::{CreateDirect3D11DeviceFromDXGIDevice, IDirect3DDxgiInterfaceAccess}, - DispatcherQueueOptions, RO_INIT_MULTITHREADED, RoInitialize, - }, - }, - UI::WindowsAndMessaging::{ - DispatchMessageW, GetMessageW, MSG, PostThreadMessageW, TranslateMessage, WM_QUIT, + System::WinRT::Direct3D11::{ + CreateDirect3D11DeviceFromDXGIDevice, IDirect3DDxgiInterfaceAccess, }, }, core::{HSTRING, IInspectable, Interface}, diff --git a/crates/timestamp/src/lib.rs b/crates/timestamp/src/lib.rs index 2d34220fee..7790329fe5 100644 --- a/crates/timestamp/src/lib.rs +++ b/crates/timestamp/src/lib.rs @@ -34,9 +34,13 @@ impl Timestamp { pub fn from_cpal(instant: cpal::StreamInstant) -> Self { #[cfg(windows)] - Self::PerformanceCounter(PerformanceCounterTimestamp::from_cpal(instant)); + { + Self::PerformanceCounter(PerformanceCounterTimestamp::from_cpal(instant)) + } #[cfg(target_os = "macos")] - Self::MachAbsoluteTime(MachAbsoluteTimestamp::from_cpal(instant)) + { + Self::MachAbsoluteTime(MachAbsoluteTimestamp::from_cpal(instant)) + } } } diff --git a/crates/timestamp/src/macos.rs b/crates/timestamp/src/macos.rs index a811a736f7..c65b074a45 100644 --- a/crates/timestamp/src/macos.rs +++ b/crates/timestamp/src/macos.rs @@ -1,6 +1,5 @@ -use std::{ops::Add, time::Duration}; - use cidre::mach::TimeBaseInfo; +use std::{ops::Add, time::Duration}; #[derive(Clone, Copy, Debug)] pub struct MachAbsoluteTimestamp(u64); diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs index 1a88a81769..3f81a83d84 100644 --- a/crates/timestamp/src/win.rs +++ b/crates/timestamp/src/win.rs @@ -1,3 +1,5 @@ +use cpal::StreamInstant; +use std::{ops::Add, time::Duration}; use windows::Win32::System::Performance::{QueryPerformanceCounter, QueryPerformanceFrequency}; use super::*; From 17169d1d65451b87befb192214eb0586ad9ec3ed Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Wed, 10 Sep 2025 02:34:32 +0800 Subject: [PATCH 07/20] more fixes --- crates/enc-ffmpeg/src/audio/aac.rs | 2 +- .../src/audio/buffered_resampler.rs | 13 ++++- crates/recording/examples/recording-cli.rs | 22 ++++----- crates/recording/src/capture_pipeline.rs | 33 +++++-------- crates/recording/src/sources/audio_mixer.rs | 48 +++++++++++++------ .../src/sources/screen_capture/windows.rs | 2 + crates/recording/src/studio_recording.rs | 2 +- 7 files changed, 73 insertions(+), 49 deletions(-) diff --git a/crates/enc-ffmpeg/src/audio/aac.rs b/crates/enc-ffmpeg/src/audio/aac.rs index 281185c332..2f97657900 100644 --- a/crates/enc-ffmpeg/src/audio/aac.rs +++ b/crates/enc-ffmpeg/src/audio/aac.rs @@ -91,7 +91,7 @@ impl AACEncoder { encoder.set_rate(rate); encoder.set_format(output_config.sample_format); encoder.set_channel_layout(output_config.channel_layout()); - encoder.set_time_base(output_config.time_base); + encoder.set_time_base(FFRational(1, output_config.rate())); let encoder = encoder.open()?; diff --git a/crates/enc-ffmpeg/src/audio/buffered_resampler.rs b/crates/enc-ffmpeg/src/audio/buffered_resampler.rs index 7827db6a2a..c7706011d9 100644 --- a/crates/enc-ffmpeg/src/audio/buffered_resampler.rs +++ b/crates/enc-ffmpeg/src/audio/buffered_resampler.rs @@ -11,6 +11,8 @@ pub struct BufferedResampler { resampler: ffmpeg::software::resampling::Context, buffer: VecDeque<(ffmpeg::frame::Audio, i64)>, sample_index: usize, + // used to account for cases where pts is rounded down instead of up + min_next_pts: Option, } impl BufferedResampler { @@ -19,6 +21,7 @@ impl BufferedResampler { resampler, buffer: VecDeque::new(), sample_index: 0, + min_next_pts: None, } } @@ -46,7 +49,13 @@ impl BufferedResampler { *self.resampler.output() } - pub fn add_frame(&mut self, frame: ffmpeg::frame::Audio) { + pub fn add_frame(&mut self, mut frame: ffmpeg::frame::Audio) { + if let Some(min_next_pts) = self.min_next_pts { + if let Some(pts) = frame.pts() { + frame.set_pts(Some(pts.max(min_next_pts))); + } + } + let pts = frame.pts().unwrap(); let mut resampled_frame = ffmpeg::frame::Audio::empty(); @@ -76,6 +85,8 @@ impl BufferedResampler { next_pts = next_pts + samples as i64; } + + self.min_next_pts = Some(pts + frame.samples() as i64); } fn get_frame_inner(&mut self, samples: usize) -> Option { diff --git a/crates/recording/examples/recording-cli.rs b/crates/recording/examples/recording-cli.rs index fffc2bbc0e..cc9a1cff39 100644 --- a/crates/recording/examples/recording-cli.rs +++ b/crates/recording/examples/recording-cli.rs @@ -36,17 +36,17 @@ pub async fn main() { // .await // .unwrap(); - let (error_tx, _) = flume::bounded(1); - let mic_feed = MicrophoneFeed::spawn(MicrophoneFeed::new(error_tx)); - - mic_feed - .ask(microphone::SetInput { - label: MicrophoneFeed::default().map(|v| v.0).unwrap(), - }) - .await - .unwrap() - .await - .unwrap(); + // let (error_tx, _) = flume::bounded(1); + // let mic_feed = MicrophoneFeed::spawn(MicrophoneFeed::new(error_tx)); + + // mic_feed + // .ask(microphone::SetInput { + // label: MicrophoneFeed::default().map(|v| v.0).unwrap(), + // }) + // .await + // .unwrap() + // .await + // .unwrap(); // tokio::time::sleep(Duration::from_millis(10)).await; diff --git a/crates/recording/src/capture_pipeline.rs b/crates/recording/src/capture_pipeline.rs index 395a6bd717..c72ceabb9e 100644 --- a/crates/recording/src/capture_pipeline.rs +++ b/crates/recording/src/capture_pipeline.rs @@ -15,7 +15,7 @@ use std::{ future::Future, path::PathBuf, sync::{Arc, atomic::AtomicBool}, - time::{Duration, SystemTime}, + time::SystemTime, }; pub trait MakeCapturePipeline: ScreenCaptureFormat + std::fmt::Debug + 'static { @@ -395,8 +395,6 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { where Self: Sized, { - use std::sync::mpsc; - use cap_enc_ffmpeg::{AACEncoder, AudioEncoder}; use windows::Graphics::SizeInt32; @@ -490,29 +488,21 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let output = Arc::new(std::sync::Mutex::new(output)); - let (screen_first_tx, screen_first_rx) = mpsc::sync_channel(1); + let (first_frame_tx, first_frame_rx) = tokio::sync::oneshot::channel::(); if let Some(mut audio_encoder) = audio_encoder { builder.spawn_source("audio_mixer", audio_mixer); - // let is_done = is_done.clone(); let output = output.clone(); builder.spawn_task("audio_encoding", move |ready| { - let screen_first_offset = loop { - match screen_first_rx.recv_timeout(Duration::from_millis(5)) { - Ok(offset) => { - audio_rx.drain().count(); - break offset; - } - Err(mpsc::RecvTimeoutError::Timeout) => continue, - Err(mpsc::RecvTimeoutError::Disconnected) => return Ok(()), - } - }; - let _ = ready.send(Ok(())); + let time = first_frame_rx.blocking_recv().unwrap(); + let screen_first_offset = time.duration_since(start_time); + while let Ok((mut frame, timestamp)) = audio_rx.recv() { let ts_offset = timestamp.duration_since(start_time); + dbg!(ts_offset); let Some(ts_offset) = ts_offset.checked_sub(screen_first_offset) else { continue; @@ -520,11 +510,13 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let pts = (ts_offset.as_secs_f64() * frame.rate() as f64) as i64; frame.set_pts(Some(pts)); + dbg!(pts); if let Ok(mut output) = output.lock() { - audio_encoder.queue_frame(frame, &mut *output); + audio_encoder.queue_frame(frame, &mut output) } } + Ok(()) }); } @@ -540,7 +532,7 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let _ = ready.send(Ok(())); - let mut screen_first_tx = Some(screen_first_tx); + let mut first_frame_tx = Some(first_frame_tx); while let Ok(e) = encoder.get_event() { match e { @@ -560,9 +552,8 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { PerformanceCounterTimestamp::new(frame_time.Duration), ); - if let Some(screen_first_tx) = screen_first_tx.take() { - let _ = screen_first_tx - .try_send(timestamp.duration_since(start_time)); + if let Some(first_frame_tx) = first_frame_tx.take() { + let _ = first_frame_tx.send(timestamp); } encoder diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index 0c5dba774b..a5a4956e6b 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -116,6 +116,8 @@ impl AudioMixerBuilder { _filter_graph: filter_graph, _amix: amix, _aformat: aformat, + start_timestamp: None, + timestamps: Timestamps::now(), }) } } @@ -131,6 +133,8 @@ pub struct AudioMixer { _filter_graph: ffmpeg::filter::Graph, _amix: ffmpeg::filter::Context, _aformat: ffmpeg::filter::Context, + timestamps: Timestamps, + start_timestamp: Option, } impl AudioMixer { @@ -141,15 +145,15 @@ impl AudioMixer { ); pub const BUFFER_TIMEOUT: Duration = Duration::from_millis(10); - fn buffer_sources(&mut self, start: Timestamps) { + fn buffer_sources(&mut self) { for source in &mut self.sources { let rate = source.info.rate(); while let Ok((frame, timestamp)) = source.rx.try_recv() { // if gap between incoming and last, insert silence if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { - let timestamp_elapsed = timestamp.duration_since(start); - let buffer_last_elapsed = buffer_last_timestamp.duration_since(start); + let timestamp_elapsed = timestamp.duration_since(self.timestamps); + let buffer_last_elapsed = buffer_last_timestamp.duration_since(self.timestamps); if timestamp_elapsed > buffer_last_elapsed { let elapsed_since_last_frame = timestamp_elapsed - buffer_last_elapsed; @@ -158,9 +162,9 @@ impl AudioMixer { && buffer_last_duration - elapsed_since_last_frame >= Duration::from_millis(1) { - let gap = (buffer_last_timestamp.duration_since(start) + let gap = (buffer_last_timestamp.duration_since(self.timestamps) + buffer_last_duration) - - timestamp.duration_since(start); + - timestamp.duration_since(self.timestamps); debug!("Gap between last buffer frame, inserting {gap:?} of silence"); @@ -183,7 +187,7 @@ impl AudioMixer { } } } else { - let gap = timestamp.duration_since(start); + let gap = timestamp.duration_since(self.timestamps); if !gap.is_zero() { debug!("Gap from beginning of stream, inserting {gap:?} of silence"); @@ -210,8 +214,9 @@ impl AudioMixer { frame.set_rate(rate as u32); - let timestamp = - Timestamp::Instant(start.instant() + chunk_duration * i as u32); + let timestamp = Timestamp::Instant( + self.timestamps.instant() + chunk_duration * i as u32, + ); source.buffer_last = Some((timestamp, chunk_duration)); source.buffer.push_back((frame, timestamp)); } @@ -233,14 +238,15 @@ impl AudioMixer { let duration = Duration::from_secs_f64(leftover_chunk_size as f64 / rate as f64); let timestamp = Timestamp::Instant( - start.instant() + chunk_duration * chunks.floor() as u32 + duration, + self.timestamps.instant() + + chunk_duration * chunks.floor() as u32 + + duration, ); source.buffer_last = Some((timestamp, duration)); source.buffer.push_back((frame, timestamp)); } } - // dbg!(frame.samples()); source.buffer_last = Some(( timestamp, Duration::from_secs_f64(frame.samples() as f64 / frame.rate() as f64), @@ -251,7 +257,23 @@ impl AudioMixer { } fn tick(&mut self, start: Timestamps, now: Instant) -> Result<(), ()> { - self.buffer_sources(start); + self.buffer_sources(); + + if self.start_timestamp.is_none() { + self.start_timestamp = self + .sources + .iter() + .filter_map(|s| s.buffer.get(0)) + .min_by(|a, b| { + a.1.duration_since(self.timestamps) + .cmp(&b.1.duration_since(self.timestamps)) + }) + .map(|v| v.1); + } + + let Some(start_timestamp) = self.start_timestamp else { + return Ok(()); + }; for (i, source) in self.sources.iter_mut().enumerate() { for buffer in source.buffer.drain(..) { @@ -262,12 +284,10 @@ impl AudioMixer { let mut filtered = ffmpeg::frame::Audio::empty(); while self.abuffersink.sink().frame(&mut filtered).is_ok() { let elapsed = Duration::from_secs_f64(self.samples_out as f64 / filtered.rate() as f64); - let timestamp = start.instant() + elapsed; + let timestamp = start.instant() + start_timestamp.duration_since(start) + elapsed; self.samples_out += filtered.samples(); - // dbg!(filtered.samples(), timestamp); - if self .output .send((filtered, Timestamp::Instant(timestamp))) diff --git a/crates/recording/src/sources/screen_capture/windows.rs b/crates/recording/src/sources/screen_capture/windows.rs index ea8dbaa38d..44ca916aec 100644 --- a/crates/recording/src/sources/screen_capture/windows.rs +++ b/crates/recording/src/sources/screen_capture/windows.rs @@ -487,6 +487,8 @@ pub mod audio { let timestamp = Timestamp::from_cpal(info.timestamp().capture); + dbg!(timestamp); + let _ = audio_tx.send((data.as_ffmpeg(config), timestamp)); }, move |e| { diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index d86f6a1e09..90ebd28904 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -861,7 +861,7 @@ async fn create_segment_pipeline( let elapsed = timestamp.duration_since(start_time) - first_timestamp.duration_since(start_time); - frame.set_pts(Some((elapsed.as_secs_f64() * rate) as i64)); + frame.set_pts(Some(dbg!(elapsed.as_secs_f64() * rate).round() as i64)); output.queue_frame(frame); } From 1746c94f06382b295cd4f062e5d048053ae4b158 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Wed, 10 Sep 2025 18:57:53 +0800 Subject: [PATCH 08/20] make audio mixer work with 2 inputs that stop and start at different times --- crates/recording/examples/recording-cli.rs | 34 +-- crates/recording/src/capture_pipeline.rs | 2 - crates/recording/src/sources/audio_mixer.rs | 205 ++++++++++-------- .../src/sources/screen_capture/windows.rs | 2 - crates/recording/src/studio_recording.rs | 2 +- crates/timestamp/src/lib.rs | 30 +++ crates/timestamp/src/macos.rs | 16 +- crates/timestamp/src/win.rs | 17 +- 8 files changed, 189 insertions(+), 119 deletions(-) diff --git a/crates/recording/examples/recording-cli.rs b/crates/recording/examples/recording-cli.rs index cc9a1cff39..d810466377 100644 --- a/crates/recording/examples/recording-cli.rs +++ b/crates/recording/examples/recording-cli.rs @@ -1,7 +1,7 @@ use cap_recording::{feeds::microphone, screen_capture::ScreenCaptureTarget, *}; use kameo::Actor; use scap_targets::Display; -use std::{sync::Arc, time::Duration}; +use std::time::Duration; use tracing::info; #[tokio::main] @@ -36,19 +36,25 @@ pub async fn main() { // .await // .unwrap(); - // let (error_tx, _) = flume::bounded(1); - // let mic_feed = MicrophoneFeed::spawn(MicrophoneFeed::new(error_tx)); - - // mic_feed - // .ask(microphone::SetInput { - // label: MicrophoneFeed::default().map(|v| v.0).unwrap(), - // }) - // .await - // .unwrap() - // .await - // .unwrap(); - - // tokio::time::sleep(Duration::from_millis(10)).await; + let (error_tx, _) = flume::bounded(1); + let mic_feed = MicrophoneFeed::spawn(MicrophoneFeed::new(error_tx)); + + mic_feed + .ask(microphone::SetInput { + label: + // MicrophoneFeed::list() + // .into_iter() + // .find(|(k, _)| k.contains("Focusrite")) + MicrophoneFeed::default() + .map(|v| v.0) + .unwrap(), + }) + .await + .unwrap() + .await + .unwrap(); + + tokio::time::sleep(Duration::from_millis(10)).await; let (handle, _ready_rx) = instant_recording::Actor::builder( dir.path().into(), diff --git a/crates/recording/src/capture_pipeline.rs b/crates/recording/src/capture_pipeline.rs index c72ceabb9e..f34b6e9134 100644 --- a/crates/recording/src/capture_pipeline.rs +++ b/crates/recording/src/capture_pipeline.rs @@ -502,7 +502,6 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { while let Ok((mut frame, timestamp)) = audio_rx.recv() { let ts_offset = timestamp.duration_since(start_time); - dbg!(ts_offset); let Some(ts_offset) = ts_offset.checked_sub(screen_first_offset) else { continue; @@ -510,7 +509,6 @@ impl MakeCapturePipeline for screen_capture::Direct3DCapture { let pts = (ts_offset.as_secs_f64() * frame.rate() as f64) as i64; frame.set_pts(Some(pts)); - dbg!(pts); if let Ok(mut output) = output.lock() { audio_encoder.queue_frame(frame, &mut output) diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index a5a4956e6b..dd54eb723f 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -126,7 +126,7 @@ pub struct AudioMixer { sources: Vec, samples_out: usize, output: Sender<(ffmpeg::frame::Audio, Timestamp)>, - last_tick: Option, + last_tick: Option, // sample_timestamps: VecDeque<(usize, Timestamp)>, abuffers: Vec, abuffersink: ffmpeg::filter::Context, @@ -145,10 +145,44 @@ impl AudioMixer { ); pub const BUFFER_TIMEOUT: Duration = Duration::from_millis(10); - fn buffer_sources(&mut self) { + fn buffer_sources(&mut self, now: Timestamp) { for source in &mut self.sources { let rate = source.info.rate(); + if let Some(last) = source.buffer_last { + let last_end = &last.0 + last.1; + if let Some(elapsed_since_last) = now + .duration_since(self.timestamps) + .checked_sub(last_end.duration_since(self.timestamps)) + { + let mut remaining = elapsed_since_last; + while remaining > Self::BUFFER_TIMEOUT { + let chunk_samples = + (Self::BUFFER_TIMEOUT.as_secs_f64() * rate as f64) as usize; + + let mut frame = ffmpeg::frame::Audio::new( + source.info.sample_format, + chunk_samples, + source.info.channel_layout(), + ); + for i in 0..frame.planes() { + frame.data_mut(i).fill(0); + } + + frame.set_rate(source.info.rate() as u32); + + let timestamp = last_end + (elapsed_since_last - remaining); + source.buffer_last = Some(( + timestamp, + Duration::from_secs_f64(chunk_samples as f64 / rate as f64), + )); + source.buffer.push_back((frame, timestamp)); + + remaining -= Self::BUFFER_TIMEOUT; + } + } + } + while let Ok((frame, timestamp)) = source.rx.try_recv() { // if gap between incoming and last, insert silence if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { @@ -158,15 +192,13 @@ impl AudioMixer { if timestamp_elapsed > buffer_last_elapsed { let elapsed_since_last_frame = timestamp_elapsed - buffer_last_elapsed; - if elapsed_since_last_frame < buffer_last_duration - && buffer_last_duration - elapsed_since_last_frame - >= Duration::from_millis(1) + if let Some(diff) = + elapsed_since_last_frame.checked_sub(buffer_last_duration) + && diff >= Duration::from_millis(1) { - let gap = (buffer_last_timestamp.duration_since(self.timestamps) - + buffer_last_duration) - - timestamp.duration_since(self.timestamps); + let gap = diff; - debug!("Gap between last buffer frame, inserting {gap:?} of silence"); + print!("Gap between last buffer frame, inserting {gap:?} of silence"); let silence_samples_needed = (gap.as_secs_f64()) * rate as f64; let silence_samples_count = silence_samples_needed.ceil() as usize; @@ -177,34 +209,60 @@ impl AudioMixer { source.info.channel_layout(), ); + for i in 0..frame.planes() { + frame.data_mut(i).fill(0); + } + frame.set_rate(source.info.rate() as u32); + let timestamp = buffer_last_timestamp + gap; source.buffer_last = Some(( - &buffer_last_timestamp + gap, + timestamp, Duration::from_secs_f64(silence_samples_count as f64 / rate as f64), )); - source.buffer.push_back((frame, buffer_last_timestamp)); + source.buffer.push_back((frame, timestamp)); } } - } else { - let gap = timestamp.duration_since(self.timestamps); + } - if !gap.is_zero() { - debug!("Gap from beginning of stream, inserting {gap:?} of silence"); + source.buffer_last = Some(( + timestamp, + Duration::from_secs_f64(frame.samples() as f64 / frame.rate() as f64), + )); + source.buffer.push_back((frame, timestamp)); + } + } - // TODO: refactor to be one while loop + if self.start_timestamp.is_none() { + self.start_timestamp = self + .sources + .iter() + .filter_map(|s| s.buffer.get(0)) + .min_by(|a, b| { + a.1.duration_since(self.timestamps) + .cmp(&b.1.duration_since(self.timestamps)) + }) + .map(|v| v.1); + } - let gap_samples = gap.as_millis() as usize * rate as usize / 1000; - let chunk_size = rate as usize / 200; + if let Some(start_timestamp) = self.start_timestamp { + if let Some(elapsed_since_start) = now + .duration_since(self.timestamps) + .checked_sub(start_timestamp.duration_since(self.timestamps)) + && elapsed_since_start > Self::BUFFER_TIMEOUT + { + for source in &mut self.sources { + if source.buffer_last.is_none() { + let rate = source.info.rate(); - let chunks = gap_samples as f64 / chunk_size as f64; + let mut remaining = elapsed_since_start; + while remaining > Self::BUFFER_TIMEOUT { + let chunk_samples = + (Self::BUFFER_TIMEOUT.as_secs_f64() * rate as f64) as usize; - let chunk_duration = - Duration::from_secs_f64(chunk_size as f64 / rate as f64); - for i in 0..chunks.floor() as usize { let mut frame = ffmpeg::frame::Audio::new( source.info.sample_format, - chunk_size, + chunk_samples, source.info.channel_layout(), ); @@ -212,64 +270,25 @@ impl AudioMixer { frame.data_mut(i).fill(0); } - frame.set_rate(rate as u32); - - let timestamp = Timestamp::Instant( - self.timestamps.instant() + chunk_duration * i as u32, - ); - source.buffer_last = Some((timestamp, chunk_duration)); - source.buffer.push_back((frame, timestamp)); - } - - let leftover_chunk_size = (chunks.fract() * chunk_size as f64) as usize; + frame.set_rate(source.info.rate() as u32); - let mut frame = ffmpeg::frame::Audio::new( - source.info.sample_format, - leftover_chunk_size, - source.info.channel_layout(), - ); + let timestamp = start_timestamp + (elapsed_since_start - remaining); + source.buffer_last = Some(( + timestamp, + Duration::from_secs_f64(chunk_samples as f64 / rate as f64), + )); + source.buffer.push_front((frame, timestamp)); - for i in 0..frame.planes() { - frame.data_mut(i).fill(0); + remaining -= Self::BUFFER_TIMEOUT; } - - frame.set_rate(rate as u32); - - let duration = - Duration::from_secs_f64(leftover_chunk_size as f64 / rate as f64); - let timestamp = Timestamp::Instant( - self.timestamps.instant() - + chunk_duration * chunks.floor() as u32 - + duration, - ); - source.buffer_last = Some((timestamp, duration)); - source.buffer.push_back((frame, timestamp)); } } - - source.buffer_last = Some(( - timestamp, - Duration::from_secs_f64(frame.samples() as f64 / frame.rate() as f64), - )); - source.buffer.push_back((frame, timestamp)); } } } - fn tick(&mut self, start: Timestamps, now: Instant) -> Result<(), ()> { - self.buffer_sources(); - - if self.start_timestamp.is_none() { - self.start_timestamp = self - .sources - .iter() - .filter_map(|s| s.buffer.get(0)) - .min_by(|a, b| { - a.1.duration_since(self.timestamps) - .cmp(&b.1.duration_since(self.timestamps)) - }) - .map(|v| v.1); - } + fn tick(&mut self, start: Timestamps, now: Timestamp) -> Result<(), ()> { + self.buffer_sources(now); let Some(start_timestamp) = self.start_timestamp else { return Ok(()); @@ -304,14 +323,6 @@ impl AudioMixer { Ok(()) } - pub fn run(&mut self) { - let start = Timestamps::now(); - - while let Ok(()) = self.tick(start, Instant::now()) { - std::thread::sleep(Duration::from_millis(5)); - } - } - pub fn builder(output: Sender<(ffmpeg::frame::Audio, Timestamp)>) -> AudioMixerBuilder { AudioMixerBuilder::new(output) } @@ -341,7 +352,7 @@ impl PipelineSourceTask for AudioMixerBuilder { } mixer - .tick(start, Instant::now()) + .tick(start, Timestamp::Instant(Instant::now())) .map_err(|()| format!("Audio mixer tick failed"))?; std::thread::sleep(Duration::from_millis(5)); @@ -368,7 +379,6 @@ mod test { fn mix_sources() { let (tx, output_rx) = flume::bounded(4); let mut mixer = AudioMixerBuilder::new(tx); - let start = Timestamps::now(); let (tx1, rx) = flume::bounded(4); mixer.add_source(SOURCE_INFO, rx); @@ -377,6 +387,7 @@ mod test { mixer.add_source(SOURCE_INFO, rx); let mut mixer = mixer.build().unwrap(); + let start = mixer.timestamps; tx1.send(( SOURCE_INFO.wrap_frame(&vec![128, 255, 255, 255]), @@ -391,7 +402,7 @@ mod test { let _ = mixer.tick( start, - start.instant() + Duration::from_secs_f64(4.0 / SAMPLE_RATE as f64), + Timestamp::Instant(start.instant() + Duration::from_secs_f64(4.0 / SAMPLE_RATE as f64)), ); let (frame, _) = output_rx.recv().expect("No output frame"); @@ -426,7 +437,7 @@ mod test { )) .unwrap(); - mixer.buffer_sources(start); + mixer.buffer_sources(Timestamp::Instant(start.instant())); assert_eq!(mixer.sources[0].buffer.len(), 1); assert!(mixer.sources[0].rx.is_empty()); @@ -436,7 +447,6 @@ mod test { fn frame_gap() { let (output_tx, _) = flume::bounded(4); let mut mixer = AudioMixerBuilder::new(output_tx); - let start = Timestamps::now(); let (tx, rx) = flume::bounded(4); mixer.add_source(SOURCE_INFO, rx); @@ -445,24 +455,27 @@ mod test { tx.send(( SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - Timestamp::Instant(start.instant()), + Timestamp::Instant(mixer.timestamps.instant()), )) .unwrap(); tx.send(( SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), - Timestamp::Instant(start.instant() + ONE_SECOND), + Timestamp::Instant(mixer.timestamps.instant() + ONE_SECOND), )) .unwrap(); - mixer.buffer_sources(start); + mixer.buffer_sources(Timestamp::Instant(mixer.timestamps.instant())); let source = &mixer.sources[0]; assert_eq!(source.buffer.len(), 3); assert!(source.rx.is_empty()); - assert_eq!(source.buffer[1].1.duration_since(start), ONE_SECOND / 2); + assert_eq!( + source.buffer[1].1.duration_since(mixer.timestamps), + ONE_SECOND / 2 + ); assert_eq!( source.buffer[1].0.samples(), SOURCE_INFO.rate() as usize / 2 @@ -473,12 +486,12 @@ mod test { fn start_gap() { let (output_tx, _) = flume::bounded(4); let mut mixer = AudioMixerBuilder::new(output_tx); - let start = Timestamps::now(); let (tx, rx) = flume::bounded(4); mixer.add_source(SOURCE_INFO, rx); let mut mixer = mixer.build().unwrap(); + let start = mixer.timestamps; tx.send(( SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), @@ -486,14 +499,14 @@ mod test { )) .unwrap(); - mixer.buffer_sources(start); + mixer.buffer_sources(Timestamp::Instant(start.instant())); let source = &mixer.sources[0]; - assert_eq!(source.buffer.len(), 2); + assert_eq!(source.buffer.len(), 1); assert!(source.rx.is_empty()); - assert_eq!(source.buffer[0].1.duration_since(start), Duration::ZERO); + assert_eq!(source.buffer[0].1.duration_since(start), ONE_SECOND / 2); assert_eq!( source.buffer[0].0.samples(), SOURCE_INFO.rate() as usize / 2 @@ -504,12 +517,12 @@ mod test { fn after_draining() { let (output_tx, _) = flume::bounded(4); let mut mixer = AudioMixerBuilder::new(output_tx); - let start = Timestamps::now(); let (tx, rx) = flume::bounded(4); mixer.add_source(SOURCE_INFO, rx); let mut mixer = mixer.build().unwrap(); + let start = mixer.timestamps; tx.send(( SOURCE_INFO.wrap_frame(&vec![0; SAMPLES_SECOND / 2]), @@ -517,7 +530,7 @@ mod test { )) .unwrap(); - mixer.buffer_sources(start); + mixer.buffer_sources(Timestamp::Instant(start.instant())); mixer.sources[0].buffer.clear(); @@ -527,7 +540,7 @@ mod test { )) .unwrap(); - mixer.buffer_sources(start); + mixer.buffer_sources(Timestamp::Instant(start.instant() + ONE_SECOND)); let source = &mixer.sources[0]; diff --git a/crates/recording/src/sources/screen_capture/windows.rs b/crates/recording/src/sources/screen_capture/windows.rs index 44ca916aec..ea8dbaa38d 100644 --- a/crates/recording/src/sources/screen_capture/windows.rs +++ b/crates/recording/src/sources/screen_capture/windows.rs @@ -487,8 +487,6 @@ pub mod audio { let timestamp = Timestamp::from_cpal(info.timestamp().capture); - dbg!(timestamp); - let _ = audio_tx.send((data.as_ffmpeg(config), timestamp)); }, move |e| { diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index 90ebd28904..082168c95e 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -861,7 +861,7 @@ async fn create_segment_pipeline( let elapsed = timestamp.duration_since(start_time) - first_timestamp.duration_since(start_time); - frame.set_pts(Some(dbg!(elapsed.as_secs_f64() * rate).round() as i64)); + frame.set_pts(Some((elapsed.as_secs_f64() * rate).round() as i64)); output.queue_frame(frame); } diff --git a/crates/timestamp/src/lib.rs b/crates/timestamp/src/lib.rs index 7790329fe5..abd6d03482 100644 --- a/crates/timestamp/src/lib.rs +++ b/crates/timestamp/src/lib.rs @@ -59,6 +59,36 @@ impl std::ops::Add for &Timestamp { } } +impl std::ops::Add for Timestamp { + type Output = Timestamp; + + fn add(self, rhs: Duration) -> Self::Output { + match self { + Timestamp::Instant(i) => Timestamp::Instant(i + rhs), + Timestamp::SystemTime(t) => Timestamp::SystemTime(t + rhs), + #[cfg(windows)] + Timestamp::PerformanceCounter(c) => Timestamp::PerformanceCounter(c + rhs), + #[cfg(target_os = "macos")] + Timestamp::MachAbsoluteTime(c) => Timestamp::MachAbsoluteTime(c + rhs), + } + } +} + +impl std::ops::Sub for Timestamp { + type Output = Timestamp; + + fn sub(self, rhs: Duration) -> Self::Output { + match self { + Timestamp::Instant(i) => Timestamp::Instant(i - rhs), + Timestamp::SystemTime(t) => Timestamp::SystemTime(t - rhs), + #[cfg(windows)] + Timestamp::PerformanceCounter(c) => Timestamp::PerformanceCounter(c - rhs), + #[cfg(target_os = "macos")] + Timestamp::MachAbsoluteTime(c) => Timestamp::MachAbsoluteTime(c - rhs), + } + } +} + #[derive(Clone, Copy, Debug)] pub struct Timestamps { instant: Instant, diff --git a/crates/timestamp/src/macos.rs b/crates/timestamp/src/macos.rs index c65b074a45..a6b50893cd 100644 --- a/crates/timestamp/src/macos.rs +++ b/crates/timestamp/src/macos.rs @@ -1,5 +1,8 @@ use cidre::mach::TimeBaseInfo; -use std::{ops::Add, time::Duration}; +use std::{ + ops::{Add, Sub}, + time::Duration, +}; #[derive(Clone, Copy, Debug)] pub struct MachAbsoluteTimestamp(u64); @@ -37,3 +40,14 @@ impl Add for MachAbsoluteTimestamp { Self((self.0 as f64 * rhs.as_secs_f64() * freq) as u64) } } + +impl Sub for MachAbsoluteTimestamp { + type Output = Self; + + fn sub(self, rhs: Duration) -> Self::Output { + let info = TimeBaseInfo::new(); + let freq = info.numer as f64 / info.denom as f64; + + Self((self.0 as f64 / freq - rhs.as_millis() as f64) as u64) + } +} diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs index 3f81a83d84..c690259d51 100644 --- a/crates/timestamp/src/win.rs +++ b/crates/timestamp/src/win.rs @@ -1,9 +1,10 @@ use cpal::StreamInstant; -use std::{ops::Add, time::Duration}; +use std::{ + ops::{Add, Sub}, + time::Duration, +}; use windows::Win32::System::Performance::{QueryPerformanceCounter, QueryPerformanceFrequency}; -use super::*; - #[derive(Clone, Copy, Debug)] pub struct PerformanceCounterTimestamp(i64); @@ -41,3 +42,13 @@ impl Add for PerformanceCounterTimestamp { Self(self.0 + (rhs.as_secs_f64() * freq as f64) as i64) } } + +impl Sub for PerformanceCounterTimestamp { + type Output = Self; + + fn sub(self, rhs: Duration) -> Self::Output { + let mut freq = 0; + unsafe { QueryPerformanceFrequency(&mut freq) }.unwrap(); + Self(self.0 - (rhs.as_secs_f64() * freq as f64) as i64) + } +} From 8732e438912361bc165103d79197dec8498deb49 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Wed, 10 Sep 2025 21:11:59 +0800 Subject: [PATCH 09/20] fix on macos --- crates/recording/examples/recording-cli.rs | 4 +++- crates/recording/src/sources/audio_mixer.rs | 17 ++++++++++------- crates/timestamp/src/macos.rs | 4 ++-- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/crates/recording/examples/recording-cli.rs b/crates/recording/examples/recording-cli.rs index d810466377..9bdcbf9efd 100644 --- a/crates/recording/examples/recording-cli.rs +++ b/crates/recording/examples/recording-cli.rs @@ -63,7 +63,9 @@ pub async fn main() { }, ) .with_system_audio(true) - // .with_mic_feed(Arc::new(mic_feed.ask(microphone::Lock).await.unwrap())) + .with_mic_feed(std::sync::Arc::new( + mic_feed.ask(microphone::Lock).await.unwrap(), + )) .build() .await .unwrap(); diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index dd54eb723f..840d4b706c 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -15,7 +15,7 @@ use tracing::debug; // Current problem is generating an output timestamp that lines up with the input's timestamp struct MixerSource { - rx: Receiver<(ffmpeg::frame::Audio, Timestamp)>, + rx: std::iter::Peekable>, info: AudioInfo, buffer: VecDeque<(ffmpeg::frame::Audio, Timestamp)>, buffer_last: Option<(Timestamp, Duration)>, @@ -41,7 +41,7 @@ impl AudioMixerBuilder { pub fn add_source(&mut self, info: AudioInfo, rx: Receiver<(ffmpeg::frame::Audio, Timestamp)>) { self.sources.push(MixerSource { info, - rx, + rx: rx.into_iter().peekable(), buffer: VecDeque::new(), buffer_last: None, }); @@ -143,14 +143,14 @@ impl AudioMixer { 48_000, 2, ); - pub const BUFFER_TIMEOUT: Duration = Duration::from_millis(10); + pub const BUFFER_TIMEOUT: Duration = Duration::from_millis(200); fn buffer_sources(&mut self, now: Timestamp) { for source in &mut self.sources { let rate = source.info.rate(); if let Some(last) = source.buffer_last { - let last_end = &last.0 + last.1; + let last_end = last.0 + last.1; if let Some(elapsed_since_last) = now .duration_since(self.timestamps) .checked_sub(last_end.duration_since(self.timestamps)) @@ -165,13 +165,14 @@ impl AudioMixer { chunk_samples, source.info.channel_layout(), ); + frame.set_rate(source.info.rate() as u32); + for i in 0..frame.planes() { frame.data_mut(i).fill(0); } - frame.set_rate(source.info.rate() as u32); - let timestamp = last_end + (elapsed_since_last - remaining); + dbg!(timestamp); source.buffer_last = Some(( timestamp, Duration::from_secs_f64(chunk_samples as f64 / rate as f64), @@ -183,7 +184,7 @@ impl AudioMixer { } } - while let Ok((frame, timestamp)) = source.rx.try_recv() { + while let Some((frame, timestamp)) = source.rx.next() { // if gap between incoming and last, insert silence if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { let timestamp_elapsed = timestamp.duration_since(self.timestamps); @@ -216,6 +217,7 @@ impl AudioMixer { frame.set_rate(source.info.rate() as u32); let timestamp = buffer_last_timestamp + gap; + dbg!(timestamp); source.buffer_last = Some(( timestamp, Duration::from_secs_f64(silence_samples_count as f64 / rate as f64), @@ -273,6 +275,7 @@ impl AudioMixer { frame.set_rate(source.info.rate() as u32); let timestamp = start_timestamp + (elapsed_since_start - remaining); + dbg!(timestamp); source.buffer_last = Some(( timestamp, Duration::from_secs_f64(chunk_samples as f64 / rate as f64), diff --git a/crates/timestamp/src/macos.rs b/crates/timestamp/src/macos.rs index a6b50893cd..2571c71ee4 100644 --- a/crates/timestamp/src/macos.rs +++ b/crates/timestamp/src/macos.rs @@ -37,7 +37,7 @@ impl Add for MachAbsoluteTimestamp { let info = TimeBaseInfo::new(); let freq = info.numer as f64 / info.denom as f64; - Self((self.0 as f64 * rhs.as_secs_f64() * freq) as u64) + Self((self.0 as f64 + rhs.as_secs_f64() * freq) as u64) } } @@ -48,6 +48,6 @@ impl Sub for MachAbsoluteTimestamp { let info = TimeBaseInfo::new(); let freq = info.numer as f64 / info.denom as f64; - Self((self.0 as f64 / freq - rhs.as_millis() as f64) as u64) + Self((self.0 as f64 - rhs.as_secs_f64() * freq) as u64) } } From 1526bb34e94ce9118033d8efa67459c9f63f7f5a Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Wed, 10 Sep 2025 22:08:33 +0800 Subject: [PATCH 10/20] no more peek iter :( --- crates/recording/examples/recording-cli.rs | 6 +++--- crates/recording/src/sources/audio_mixer.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/recording/examples/recording-cli.rs b/crates/recording/examples/recording-cli.rs index 9bdcbf9efd..26200723a0 100644 --- a/crates/recording/examples/recording-cli.rs +++ b/crates/recording/examples/recording-cli.rs @@ -63,9 +63,9 @@ pub async fn main() { }, ) .with_system_audio(true) - .with_mic_feed(std::sync::Arc::new( - mic_feed.ask(microphone::Lock).await.unwrap(), - )) + // .with_mic_feed(std::sync::Arc::new( + // mic_feed.ask(microphone::Lock).await.unwrap(), + // )) .build() .await .unwrap(); diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index 840d4b706c..6a84ba1fad 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -15,7 +15,7 @@ use tracing::debug; // Current problem is generating an output timestamp that lines up with the input's timestamp struct MixerSource { - rx: std::iter::Peekable>, + rx: Receiver<(ffmpeg::frame::Audio, Timestamp)>, info: AudioInfo, buffer: VecDeque<(ffmpeg::frame::Audio, Timestamp)>, buffer_last: Option<(Timestamp, Duration)>, @@ -41,7 +41,7 @@ impl AudioMixerBuilder { pub fn add_source(&mut self, info: AudioInfo, rx: Receiver<(ffmpeg::frame::Audio, Timestamp)>) { self.sources.push(MixerSource { info, - rx: rx.into_iter().peekable(), + rx, buffer: VecDeque::new(), buffer_last: None, }); @@ -184,7 +184,7 @@ impl AudioMixer { } } - while let Some((frame, timestamp)) = source.rx.next() { + while let Ok((frame, timestamp)) = source.rx.try_recv() { // if gap between incoming and last, insert silence if let Some((buffer_last_timestamp, buffer_last_duration)) = source.buffer_last { let timestamp_elapsed = timestamp.duration_since(self.timestamps); From fb6b625321800674d850480cd6cf55c13f9bcf12 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Wed, 10 Sep 2025 23:45:23 +0800 Subject: [PATCH 11/20] fix audio skip fail on macos --- crates/enc-avfoundation/src/mp4.rs | 2 +- crates/recording/src/sources/audio_mixer.rs | 9 ++----- crates/timestamp/src/macos.rs | 26 +++++++++++++++++---- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/crates/enc-avfoundation/src/mp4.rs b/crates/enc-avfoundation/src/mp4.rs index 9f7eeb2929..c56b56b464 100644 --- a/crates/enc-avfoundation/src/mp4.rs +++ b/crates/enc-avfoundation/src/mp4.rs @@ -246,7 +246,7 @@ impl MP4Encoder { }; if !audio_input.is_ready_for_more_media_data() { - return Err(QueueAudioFrameError::NotReady); + return Ok(()); } let audio_desc = cat::audio::StreamBasicDesc::common_f32( diff --git a/crates/recording/src/sources/audio_mixer.rs b/crates/recording/src/sources/audio_mixer.rs index 6a84ba1fad..8e28e8855f 100644 --- a/crates/recording/src/sources/audio_mixer.rs +++ b/crates/recording/src/sources/audio_mixer.rs @@ -156,6 +156,7 @@ impl AudioMixer { .checked_sub(last_end.duration_since(self.timestamps)) { let mut remaining = elapsed_since_last; + while remaining > Self::BUFFER_TIMEOUT { let chunk_samples = (Self::BUFFER_TIMEOUT.as_secs_f64() * rate as f64) as usize; @@ -172,11 +173,7 @@ impl AudioMixer { } let timestamp = last_end + (elapsed_since_last - remaining); - dbg!(timestamp); - source.buffer_last = Some(( - timestamp, - Duration::from_secs_f64(chunk_samples as f64 / rate as f64), - )); + source.buffer_last = Some((timestamp, Self::BUFFER_TIMEOUT)); source.buffer.push_back((frame, timestamp)); remaining -= Self::BUFFER_TIMEOUT; @@ -217,7 +214,6 @@ impl AudioMixer { frame.set_rate(source.info.rate() as u32); let timestamp = buffer_last_timestamp + gap; - dbg!(timestamp); source.buffer_last = Some(( timestamp, Duration::from_secs_f64(silence_samples_count as f64 / rate as f64), @@ -275,7 +271,6 @@ impl AudioMixer { frame.set_rate(source.info.rate() as u32); let timestamp = start_timestamp + (elapsed_since_start - remaining); - dbg!(timestamp); source.buffer_last = Some(( timestamp, Duration::from_secs_f64(chunk_samples as f64 / rate as f64), diff --git a/crates/timestamp/src/macos.rs b/crates/timestamp/src/macos.rs index 2571c71ee4..e98834840e 100644 --- a/crates/timestamp/src/macos.rs +++ b/crates/timestamp/src/macos.rs @@ -5,11 +5,14 @@ use std::{ }; #[derive(Clone, Copy, Debug)] -pub struct MachAbsoluteTimestamp(u64); +pub struct MachAbsoluteTimestamp( + // Nanoseconds + u64, +); impl MachAbsoluteTimestamp { - pub fn new(value: u64) -> Self { - Self(value) + pub fn new(nanos: u64) -> Self { + Self(nanos) } pub fn now() -> Self { @@ -37,7 +40,7 @@ impl Add for MachAbsoluteTimestamp { let info = TimeBaseInfo::new(); let freq = info.numer as f64 / info.denom as f64; - Self((self.0 as f64 + rhs.as_secs_f64() * freq) as u64) + Self((self.0 as f64 + rhs.as_nanos() as f64 * freq) as u64) } } @@ -48,6 +51,19 @@ impl Sub for MachAbsoluteTimestamp { let info = TimeBaseInfo::new(); let freq = info.numer as f64 / info.denom as f64; - Self((self.0 as f64 - rhs.as_secs_f64() * freq) as u64) + Self((self.0 as f64 - rhs.as_nanos() as f64 * freq) as u64) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test() { + let a = MachAbsoluteTimestamp::new(0); + + dbg!(MachAbsoluteTimestamp::now()); + dbg!(a + Duration::from_secs(1)); } } From dae62cdfa83eafaf0eab634b013d28862ae05f08 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Thu, 11 Sep 2025 00:05:37 +0800 Subject: [PATCH 12/20] update plugin-dialog package --- apps/desktop/package.json | 2 +- pnpm-lock.yaml | 301 ++++++++++++++++++-------------------- 2 files changed, 144 insertions(+), 159 deletions(-) diff --git a/apps/desktop/package.json b/apps/desktop/package.json index f7d299f51f..ebeaf0d13f 100644 --- a/apps/desktop/package.json +++ b/apps/desktop/package.json @@ -41,7 +41,7 @@ "@tauri-apps/api": "2.5.0", "@tauri-apps/plugin-clipboard-manager": "^2.3.0", "@tauri-apps/plugin-deep-link": "^2.4.1", - "@tauri-apps/plugin-dialog": "^2.3.2", + "@tauri-apps/plugin-dialog": "^2.4.0", "@tauri-apps/plugin-fs": "^2.4.1", "@tauri-apps/plugin-http": "^2.5.1", "@tauri-apps/plugin-notification": "^2.3.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7811110fbf..fdd6394702 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -123,8 +123,8 @@ importers: specifier: ^2.4.1 version: 2.4.1 '@tauri-apps/plugin-dialog': - specifier: ^2.3.2 - version: 2.3.2 + specifier: ^2.4.0 + version: 2.4.0 '@tauri-apps/plugin-fs': specifier: ^2.4.1 version: 2.4.1 @@ -921,7 +921,7 @@ importers: version: 18.3.21 '@types/react-dom': specifier: latest - version: 19.1.7(@types/react@18.3.21) + version: 19.1.9(@types/react@18.3.21) dotenv-cli: specifier: latest version: 10.0.0 @@ -974,28 +974,28 @@ importers: version: 0.9.0(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@20.17.43)(typescript@5.8.3))) '@radix-ui/react-dialog': specifier: ^1.0.5 - version: 1.1.13(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 1.1.13(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-dropdown-menu': specifier: ^2.0.6 - version: 2.1.14(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 2.1.14(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-label': specifier: ^2.0.2 - version: 2.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 2.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-navigation-menu': specifier: ^1.1.4 - version: 1.2.12(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 1.2.12(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-popover': specifier: ^1.0.7 - version: 1.1.13(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 1.1.13(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-select': specifier: ^2.2.5 - version: 2.2.5(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 2.2.5(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-slot': specifier: ^1.0.2 version: 1.2.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-switch': specifier: ^1.1.0 - version: 1.2.4(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 1.2.4(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@tailwindcss/typography': specifier: ^0.5.9 version: 0.5.16(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@20.17.43)(typescript@5.8.3))) @@ -1032,7 +1032,7 @@ importers: version: 18.3.21 '@types/react-dom': specifier: latest - version: 19.1.7(@types/react@18.3.21) + version: 19.1.9(@types/react@18.3.21) '@vitejs/plugin-react': specifier: ^4.0.3 version: 4.4.1(vite@6.3.5(@types/node@20.17.43)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1)) @@ -5831,10 +5831,10 @@ packages: react-dom: optional: true - '@storybook/builder-vite@10.0.0-beta.0': - resolution: {integrity: sha512-IGd75RO4YR7ofOZWqND2dYHrKLjwN05+mM1xfrucb2SoQzjbNXvFNVVxtOCBmqsarjmrMKLKHuRgFzU41vs43g==} + '@storybook/builder-vite@10.0.0-beta.2': + resolution: {integrity: sha512-H4U+LXXrxXFezTzPc0pfvZA/qjyg9XmgPCGSXa0Q41LH+8UrW9uP42TDVarQkD77jS4Trd7QIFCN64xe34XAoQ==} peerDependencies: - storybook: ^10.0.0-beta.0 + storybook: ^10.0.0-beta.2 vite: ^5.0.0 || ^6.0.0 || ^7.0.0 '@storybook/core@8.6.12': @@ -5845,12 +5845,12 @@ packages: prettier: optional: true - '@storybook/csf-plugin@10.0.0-beta.0': - resolution: {integrity: sha512-2vFHbbnp/yGWt4p53OO5swDVZzmOp3HkJEbhc1zE9BW3YcX0mG8Gh2sd7K0+Io04uoupwJpawTz8VDdKfYkJUA==} + '@storybook/csf-plugin@10.0.0-beta.2': + resolution: {integrity: sha512-OBjwaEdG3OrgsvUncu0AptCOH9u9BDQeIlMChj/QxnlNCy4yTUdPYNJ140mYzl51a20IKi6OTRverLopP7RNUg==} peerDependencies: esbuild: '*' rollup: '*' - storybook: ^10.0.0-beta.0 + storybook: ^10.0.0-beta.2 vite: '*' webpack: '*' peerDependenciesMeta: @@ -6175,8 +6175,8 @@ packages: '@tauri-apps/plugin-deep-link@2.4.1': resolution: {integrity: sha512-I8Bo+spcAKGhIIJ1qN/gapp/Ot3mosQL98znxr975Zn2ODAkUZ++BQ9FnTpR7PDwfIl5ANSGdIW/YU01zVTcJw==} - '@tauri-apps/plugin-dialog@2.3.2': - resolution: {integrity: sha512-cNLo9YeQSC0MF4IgXnotHsqEgJk72MBZLXmQPrLA95qTaaWiiaFQ38hIMdZ6YbGUNkr3oni3EhU+AD5jLHcdUA==} + '@tauri-apps/plugin-dialog@2.4.0': + resolution: {integrity: sha512-OvXkrEBfWwtd8tzVCEXIvRfNEX87qs2jv6SqmVPiHcJjBhSF/GUvjqUNIDmKByb5N8nvDqVUM7+g1sXwdC/S9w==} '@tauri-apps/plugin-fs@2.4.1': resolution: {integrity: sha512-vJlKZVGF3UAFGoIEVT6Oq5L4HGDCD78WmA4uhzitToqYiBKWAvZR61M6zAyQzHqLs0ADemkE4RSy/5sCmZm6ZQ==} @@ -6463,8 +6463,8 @@ packages: peerDependencies: '@types/react': ^18.0.0 - '@types/react-dom@19.1.7': - resolution: {integrity: sha512-i5ZzwYpqjmrKenzkoLM2Ibzt6mAsM7pxB6BCIouEVVmgiqaMj1TjaK7hnA36hbW5aZv20kx7Lw6hWzPWg0Rurw==} + '@types/react-dom@19.1.9': + resolution: {integrity: sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==} peerDependencies: '@types/react': ^19.0.0 @@ -12602,7 +12602,7 @@ packages: superagent@8.1.2: resolution: {integrity: sha512-6WTxW1EB6yCxV5VFOIPQruWGHqc3yI7hEmZK6h+pyk69Lk/Ut7rLUY6W/ONF2MjBuGjvmMiIpsrVJ2vjrHlslA==} engines: {node: '>=6.4.0 <13 || >=14'} - deprecated: Please upgrade to superagent v10.2.2+, see release notes at https://github.com/forwardemail/superagent/releases/tag/v10.2.2 - maintenance is supported by Forward Email @ https://forwardemail.net + deprecated: Please upgrade to v9.0.0+ as we have fixed a public vulnerability with formidable dependency. Note that v9.0.0+ requires Node.js v14.18.0+. See https://github.com/ladjs/superagent/pull/1800 for insight. This project is supported and maintained by the team at Forward Email @ https://forwardemail.net supertest@6.3.4: resolution: {integrity: sha512-erY3HFDG0dPnhw4U+udPfrzXa4xhSG+n4rxfRuZWCUvjFWwKl+OxWf/7zk50s84/fAAs7vf5QAb9uRa0cCykxw==} @@ -17388,14 +17388,14 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-arrow@1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-arrow@1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-arrow@1.1.7(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17406,14 +17406,14 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-arrow@1.1.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-arrow@1.1.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-collection@1.1.6(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17427,17 +17427,17 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-collection@1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-collection@1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-slot': 1.2.2(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-collection@1.1.7(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17451,17 +17451,17 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-collection@1.1.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-collection@1.1.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-slot': 1.2.3(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-compose-refs@1.0.0(react@19.1.0)': dependencies: @@ -17507,18 +17507,18 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@radix-ui/react-dialog@1.1.13(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-dialog@1.1.13(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-focus-guards': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-focus-scope': 1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-focus-scope': 1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-id': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-portal': 1.1.8(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-portal': 1.1.8(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-slot': 1.2.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.21)(react@19.1.0) aria-hidden: 1.2.4 @@ -17527,7 +17527,7 @@ snapshots: react-remove-scroll: 2.6.3(@types/react@18.3.21)(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-direction@1.1.1(@types/react@18.3.21)(react@19.1.0)': dependencies: @@ -17559,18 +17559,18 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-dismissable-layer@1.1.10(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-dismissable-layer@1.1.10(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-escape-keydown': 1.1.1(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-dismissable-layer@1.1.9(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17585,18 +17585,18 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-dismissable-layer@1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-dismissable-layer@1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-escape-keydown': 1.1.1(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-dropdown-menu@2.1.14(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17613,20 +17613,20 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-dropdown-menu@2.1.14(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-dropdown-menu@2.1.14(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-id': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-menu': 2.1.14(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-menu': 2.1.14(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-focus-guards@1.0.0(react@19.1.0)': dependencies: @@ -17659,16 +17659,16 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-focus-scope@1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-focus-scope@1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-focus-scope@1.1.7(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17681,16 +17681,16 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-focus-scope@1.1.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-focus-scope@1.1.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-id@1.0.0(react@19.1.0)': dependencies: @@ -17705,14 +17705,14 @@ snapshots: optionalDependencies: '@types/react': 18.3.21 - '@radix-ui/react-label@2.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-label@2.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-menu@2.1.14(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17740,22 +17740,22 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-menu@2.1.14(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-menu@2.1.14(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-collection': 1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-collection': 1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-direction': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-focus-guards': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-focus-scope': 1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-focus-scope': 1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-id': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-popper': 1.2.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-portal': 1.1.8(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-roving-focus': 1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-popper': 1.2.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-portal': 1.1.8(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-roving-focus': 1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-slot': 1.2.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) aria-hidden: 1.2.4 @@ -17764,43 +17764,43 @@ snapshots: react-remove-scroll: 2.6.3(@types/react@18.3.21)(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) - '@radix-ui/react-navigation-menu@1.2.12(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-navigation-menu@1.2.12(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-collection': 1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-collection': 1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-direction': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-id': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-previous': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-visually-hidden': 1.2.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-visually-hidden': 1.2.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) - '@radix-ui/react-popover@1.1.13(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-popover@1.1.13(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-dismissable-layer': 1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-focus-guards': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-focus-scope': 1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-focus-scope': 1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-id': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-popper': 1.2.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-portal': 1.1.8(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-popper': 1.2.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-portal': 1.1.8(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-slot': 1.2.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.21)(react@19.1.0) aria-hidden: 1.2.4 @@ -17809,7 +17809,7 @@ snapshots: react-remove-scroll: 2.6.3(@types/react@18.3.21)(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-popper@1.2.6(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17829,13 +17829,13 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-popper@1.2.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-popper@1.2.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@floating-ui/react-dom': 2.1.2(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-arrow': 1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-arrow': 1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-rect': 1.1.1(@types/react@18.3.21)(react@19.1.0) @@ -17845,7 +17845,7 @@ snapshots: react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-popper@1.2.7(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17865,13 +17865,13 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-popper@1.2.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-popper@1.2.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@floating-ui/react-dom': 2.1.2(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-arrow': 1.1.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-arrow': 1.1.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-rect': 1.1.1(@types/react@18.3.21)(react@19.1.0) @@ -17881,7 +17881,7 @@ snapshots: react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-portal@1.0.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17900,15 +17900,15 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-portal@1.1.8(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-portal@1.1.8(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-portal@1.1.9(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17920,15 +17920,15 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-portal@1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-portal@1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-presence@1.0.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17948,7 +17948,7 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-presence@1.1.4(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-presence@1.1.4(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.21)(react@19.1.0) @@ -17956,7 +17956,7 @@ snapshots: react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-primitive@1.0.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17974,14 +17974,14 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-primitive@2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-primitive@2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/react-slot': 1.2.2(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-primitive@2.1.3(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -17992,14 +17992,14 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-primitive@2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-primitive@2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/react-slot': 1.2.3(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-roving-focus@1.1.9(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -18018,22 +18018,22 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-roving-focus@1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-roving-focus@1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-collection': 1.1.6(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-collection': 1.1.6(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-direction': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-id': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.21)(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-select@2.2.5(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -18064,34 +18064,34 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-select@2.2.5(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-select@2.2.5(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/number': 1.1.1 '@radix-ui/primitive': 1.1.2 - '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-direction': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-dismissable-layer': 1.1.10(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-dismissable-layer': 1.1.10(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-focus-guards': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-id': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-popper': 1.2.7(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-popper': 1.2.7(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-slot': 1.2.3(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-previous': 1.1.1(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-visually-hidden': 1.2.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-visually-hidden': 1.2.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) aria-hidden: 1.2.4 react: 19.1.0 react-dom: 19.1.0(react@19.1.0) react-remove-scroll: 2.6.3(@types/react@18.3.21)(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-slider@1.3.5(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -18132,12 +18132,12 @@ snapshots: optionalDependencies: '@types/react': 18.3.21 - '@radix-ui/react-switch@1.2.4(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-switch@1.2.4(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@radix-ui/primitive': 1.1.2 '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-context': 1.1.2(@types/react@18.3.21)(react@19.1.0) - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-previous': 1.1.1(@types/react@18.3.21)(react@19.1.0) '@radix-ui/react-use-size': 1.1.1(@types/react@18.3.21)(react@19.1.0) @@ -18145,7 +18145,7 @@ snapshots: react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-tooltip@1.2.6(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -18252,14 +18252,14 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-visually-hidden@1.2.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-visually-hidden@1.2.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.2(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@18.3.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: @@ -18270,14 +18270,14 @@ snapshots: '@types/react': 18.3.21 '@types/react-dom': 18.3.7(@types/react@18.3.21) - '@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.7(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.9(@types/react@18.3.21))(@types/react@18.3.21)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) optionalDependencies: '@types/react': 18.3.21 - '@types/react-dom': 19.1.7(@types/react@18.3.21) + '@types/react-dom': 19.1.9(@types/react@18.3.21) '@radix-ui/rect@1.1.1': {} @@ -19271,9 +19271,9 @@ snapshots: react: 19.1.0 react-dom: 19.1.0(react@19.1.0) - '@storybook/builder-vite@10.0.0-beta.0(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1))': + '@storybook/builder-vite@10.0.0-beta.2(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1))': dependencies: - '@storybook/csf-plugin': 10.0.0-beta.0(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1)) + '@storybook/csf-plugin': 10.0.0-beta.2(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1)) storybook: 8.6.12(prettier@3.5.3) ts-dedent: 2.2.0 vite: 6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1) @@ -19303,7 +19303,7 @@ snapshots: - supports-color - utf-8-validate - '@storybook/csf-plugin@10.0.0-beta.0(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1))': + '@storybook/csf-plugin@10.0.0-beta.2(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1))': dependencies: storybook: 8.6.12(prettier@3.5.3) unplugin: 2.3.8 @@ -19625,9 +19625,9 @@ snapshots: dependencies: '@tauri-apps/api': 2.7.0 - '@tauri-apps/plugin-dialog@2.3.2': + '@tauri-apps/plugin-dialog@2.4.0': dependencies: - '@tauri-apps/api': 2.7.0 + '@tauri-apps/api': 2.8.0 '@tauri-apps/plugin-fs@2.4.1': dependencies: @@ -19965,7 +19965,7 @@ snapshots: dependencies: '@types/react': 18.3.21 - '@types/react-dom@19.1.7(@types/react@18.3.21)': + '@types/react-dom@19.1.9(@types/react@18.3.21)': dependencies: '@types/react': 18.3.21 @@ -22278,8 +22278,8 @@ snapshots: '@typescript-eslint/parser': 5.62.0(eslint@8.57.1)(typescript@5.8.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1) - eslint-plugin-import: 2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.31.0)(eslint@8.57.1) + eslint-plugin-import: 2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1) eslint-plugin-react: 7.37.5(eslint@8.57.1) eslint-plugin-react-hooks: 4.6.2(eslint@8.57.1) @@ -22298,7 +22298,7 @@ snapshots: eslint: 9.30.1(jiti@2.4.2) eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.31.0(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)) - eslint-plugin-import: 2.31.0(@typescript-eslint/parser@5.62.0(eslint@9.30.1(jiti@2.4.2))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)) + eslint-plugin-import: 2.31.0(@typescript-eslint/parser@5.62.0(eslint@9.30.1(jiti@2.4.2))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.30.1(jiti@2.4.2)) eslint-plugin-jsx-a11y: 6.10.2(eslint@9.30.1(jiti@2.4.2)) eslint-plugin-react: 7.37.5(eslint@9.30.1(jiti@2.4.2)) eslint-plugin-react-hooks: 4.6.2(eslint@9.30.1(jiti@2.4.2)) @@ -22326,21 +22326,6 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1): - dependencies: - '@nolyfill/is-core-module': 1.0.39 - debug: 4.4.0(supports-color@5.5.0) - eslint: 8.57.1 - get-tsconfig: 4.10.0 - is-bun-module: 2.0.0 - stable-hash: 0.0.5 - tinyglobby: 0.2.13 - unrs-resolver: 1.7.2 - optionalDependencies: - eslint-plugin-import: 2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) - transitivePeerDependencies: - - supports-color - eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)): dependencies: '@nolyfill/is-core-module': 1.0.39 @@ -22352,7 +22337,7 @@ snapshots: tinyglobby: 0.2.13 unrs-resolver: 1.7.2 optionalDependencies: - eslint-plugin-import: 2.31.0(@typescript-eslint/parser@5.62.0(eslint@9.30.1(jiti@2.4.2))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)) + eslint-plugin-import: 2.31.0(@typescript-eslint/parser@5.62.0(eslint@9.30.1(jiti@2.4.2))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.30.1(jiti@2.4.2)) transitivePeerDependencies: - supports-color @@ -22371,14 +22356,14 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-module-utils@2.12.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: '@typescript-eslint/parser': 5.62.0(eslint@8.57.1)(typescript@5.8.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.31.0)(eslint@8.57.1) transitivePeerDependencies: - supports-color @@ -22404,7 +22389,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.8 @@ -22415,7 +22400,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-module-utils: 2.12.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -22433,7 +22418,7 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@9.30.1(jiti@2.4.2))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)))(eslint@9.30.1(jiti@2.4.2)): + eslint-plugin-import@2.31.0(@typescript-eslint/parser@5.62.0(eslint@9.30.1(jiti@2.4.2))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.30.1(jiti@2.4.2)): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.8 @@ -27514,7 +27499,7 @@ snapshots: storybook-solidjs-vite@1.0.0-beta.7(@storybook/test@8.6.12(storybook@8.6.12(prettier@3.5.3)))(esbuild@0.25.4)(rollup@4.40.2)(solid-js@1.9.6)(storybook@8.6.12(prettier@3.5.3))(vite-plugin-solid@2.11.6(@testing-library/jest-dom@6.5.0)(solid-js@1.9.6)(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1)))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1)): dependencies: - '@storybook/builder-vite': 10.0.0-beta.0(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1)) + '@storybook/builder-vite': 10.0.0-beta.2(esbuild@0.25.4)(rollup@4.40.2)(storybook@8.6.12(prettier@3.5.3))(vite@6.3.5(@types/node@22.15.17)(jiti@2.4.2)(terser@5.39.0)(yaml@2.8.1)) '@storybook/types': 9.0.0-alpha.1(storybook@8.6.12(prettier@3.5.3)) magic-string: 0.30.17 solid-js: 1.9.6 From aa4e825e3a5f5b9417b3d6e0382ba47bed5b0df1 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Thu, 11 Sep 2025 14:30:43 +0800 Subject: [PATCH 13/20] Update crates/timestamp/src/win.rs Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- crates/timestamp/src/win.rs | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs index c690259d51..bc017b61e9 100644 --- a/crates/timestamp/src/win.rs +++ b/crates/timestamp/src/win.rs @@ -13,13 +13,29 @@ impl PerformanceCounterTimestamp { Self(value) } - pub fn duration_since(&self, other: Self) -> Duration { - let mut freq = 0; - unsafe { QueryPerformanceFrequency(&mut freq).unwrap() }; +// At the top of crates/timestamp/src/win.rs +use std::sync::OnceLock; + +static PERF_FREQ: OnceLock = OnceLock::new(); +#[inline] +fn perf_freq() -> i64 { + *PERF_FREQ.get_or_init(|| { + let mut freq: i64 = 0; + // SAFETY: According to the Windows API docs, QueryPerformanceFrequency + // will succeed on all Windows XP and later systems. + unsafe { QueryPerformanceFrequency(&mut freq) }.unwrap(); + freq + }) +} + +// …later in the same file, replacing the original method: +impl Timestamp { + pub fn duration_since(&self, other: Self) -> Duration { + let freq = perf_freq(); Duration::from_secs_f64((self.0 - other.0) as f64 / freq as f64) } - +} pub fn now() -> Self { let mut value = 0; unsafe { QueryPerformanceCounter(&mut value).unwrap() }; From 5a8c1eae8260e34ff299db7394ff82692deb1dd0 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Thu, 11 Sep 2025 14:31:23 +0800 Subject: [PATCH 14/20] Update crates/timestamp/src/win.rs Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- crates/timestamp/src/win.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs index bc017b61e9..4a9a675958 100644 --- a/crates/timestamp/src/win.rs +++ b/crates/timestamp/src/win.rs @@ -63,8 +63,7 @@ impl Sub for PerformanceCounterTimestamp { type Output = Self; fn sub(self, rhs: Duration) -> Self::Output { - let mut freq = 0; - unsafe { QueryPerformanceFrequency(&mut freq) }.unwrap(); + let freq = perf_freq(); Self(self.0 - (rhs.as_secs_f64() * freq as f64) as i64) } } From e542920f4336d38839061bd52e77a7c9646ac70d Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Thu, 11 Sep 2025 14:31:34 +0800 Subject: [PATCH 15/20] Update crates/timestamp/src/win.rs Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- crates/timestamp/src/win.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs index 4a9a675958..fd11035a40 100644 --- a/crates/timestamp/src/win.rs +++ b/crates/timestamp/src/win.rs @@ -53,8 +53,7 @@ impl Add for PerformanceCounterTimestamp { type Output = Self; fn add(self, rhs: Duration) -> Self::Output { - let mut freq = 0; - unsafe { QueryPerformanceFrequency(&mut freq) }.unwrap(); + let freq = perf_freq(); Self(self.0 + (rhs.as_secs_f64() * freq as f64) as i64) } } From a955fae4658e05917012950e015f8209ce65f376 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Thu, 11 Sep 2025 14:42:45 +0800 Subject: [PATCH 16/20] fix --- crates/timestamp/src/win.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs index fd11035a40..4ceb25b302 100644 --- a/crates/timestamp/src/win.rs +++ b/crates/timestamp/src/win.rs @@ -1,6 +1,7 @@ use cpal::StreamInstant; use std::{ ops::{Add, Sub}, + sync::OnceLock, time::Duration, }; use windows::Win32::System::Performance::{QueryPerformanceCounter, QueryPerformanceFrequency}; @@ -12,9 +13,7 @@ impl PerformanceCounterTimestamp { pub fn new(value: i64) -> Self { Self(value) } - -// At the top of crates/timestamp/src/win.rs -use std::sync::OnceLock; +} static PERF_FREQ: OnceLock = OnceLock::new(); @@ -29,13 +28,12 @@ fn perf_freq() -> i64 { }) } -// …later in the same file, replacing the original method: impl Timestamp { pub fn duration_since(&self, other: Self) -> Duration { let freq = perf_freq(); Duration::from_secs_f64((self.0 - other.0) as f64 / freq as f64) } -} + pub fn now() -> Self { let mut value = 0; unsafe { QueryPerformanceCounter(&mut value).unwrap() }; From e802bb0dfbc80ebf3a96ab167c01a630f6a2520a Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Thu, 11 Sep 2025 14:51:52 +0800 Subject: [PATCH 17/20] bruh --- crates/timestamp/src/win.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/timestamp/src/win.rs b/crates/timestamp/src/win.rs index 4ceb25b302..119616e0ec 100644 --- a/crates/timestamp/src/win.rs +++ b/crates/timestamp/src/win.rs @@ -9,12 +9,6 @@ use windows::Win32::System::Performance::{QueryPerformanceCounter, QueryPerforma #[derive(Clone, Copy, Debug)] pub struct PerformanceCounterTimestamp(i64); -impl PerformanceCounterTimestamp { - pub fn new(value: i64) -> Self { - Self(value) - } -} - static PERF_FREQ: OnceLock = OnceLock::new(); #[inline] @@ -28,7 +22,11 @@ fn perf_freq() -> i64 { }) } -impl Timestamp { +impl PerformanceCounterTimestamp { + pub fn new(value: i64) -> Self { + Self(value) + } + pub fn duration_since(&self, other: Self) -> Duration { let freq = perf_freq(); Duration::from_secs_f64((self.0 - other.0) as f64 / freq as f64) From f82dd5255715e1fb1f71a788a8e76e3de2c350a0 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Fri, 12 Sep 2025 21:25:34 +0800 Subject: [PATCH 18/20] more logs --- Cargo.toml | 1 + crates/recording/src/studio_recording.rs | 2 ++ crates/timestamp/src/macos.rs | 6 +++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 27c4104878..1d4c223b0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ unexpected_cfgs = "allow" [workspace.lints.clippy] dbg_macro = "deny" let_underscore_future = "deny" +unchecked_duration_subtraction = "deny" # Optimize for smaller binary size [profile.release] diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index 082168c95e..f1f912c531 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -907,11 +907,13 @@ async fn create_segment_pipeline( while let Ok((mut frame, timestamp)) = channel.recv() { if let Some(timestamp_tx) = timestamp_tx.take() { + dbg!(×tamp_tx); let _ = timestamp_tx.send(timestamp); } let first_timestamp = first_timestamp.get_or_insert(timestamp); + dbg!(timestamp); let elapsed = timestamp.duration_since(start_time) - first_timestamp.duration_since(start_time); frame.set_pts(Some((elapsed.as_secs_f64() * rate) as i64)); diff --git a/crates/timestamp/src/macos.rs b/crates/timestamp/src/macos.rs index e98834840e..fd1065ec44 100644 --- a/crates/timestamp/src/macos.rs +++ b/crates/timestamp/src/macos.rs @@ -23,7 +23,11 @@ impl MachAbsoluteTimestamp { let info = TimeBaseInfo::new(); let freq = info.numer as f64 / info.denom as f64; - Duration::from_nanos(((self.0 - other.0) as f64 * freq) as u64) + let Some(diff) = self.0.checked_sub(other.0) else { + return Duration::ZERO; + }; + + Duration::from_nanos((diff as f64 * freq) as u64) } pub fn from_cpal(instant: cpal::StreamInstant) -> Self { From 627f0b968839b637e307efd445a6a503d25d66bf Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Sun, 14 Sep 2025 20:09:57 +0800 Subject: [PATCH 19/20] unsafe timestamp sub implementation --- crates/timestamp/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/timestamp/src/lib.rs b/crates/timestamp/src/lib.rs index abd6d03482..c5f3ffb7cd 100644 --- a/crates/timestamp/src/lib.rs +++ b/crates/timestamp/src/lib.rs @@ -79,7 +79,7 @@ impl std::ops::Sub for Timestamp { fn sub(self, rhs: Duration) -> Self::Output { match self { - Timestamp::Instant(i) => Timestamp::Instant(i - rhs), + Timestamp::Instant(i) => Timestamp::Instant(i.checked_sub(rhs).unwrap()), Timestamp::SystemTime(t) => Timestamp::SystemTime(t - rhs), #[cfg(windows)] Timestamp::PerformanceCounter(c) => Timestamp::PerformanceCounter(c - rhs), From ac124ede22ca8f1a5b45cc63520d1e86ab1e8a48 Mon Sep 17 00:00:00 2001 From: Brendan Allan Date: Mon, 15 Sep 2025 09:22:27 +0800 Subject: [PATCH 20/20] remove dbg --- crates/recording/src/studio_recording.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index f1f912c531..082168c95e 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -907,13 +907,11 @@ async fn create_segment_pipeline( while let Ok((mut frame, timestamp)) = channel.recv() { if let Some(timestamp_tx) = timestamp_tx.take() { - dbg!(×tamp_tx); let _ = timestamp_tx.send(timestamp); } let first_timestamp = first_timestamp.get_or_insert(timestamp); - dbg!(timestamp); let elapsed = timestamp.duration_since(start_time) - first_timestamp.duration_since(start_time); frame.set_pts(Some((elapsed.as_secs_f64() * rate) as i64));