diff --git a/Cargo.lock b/Cargo.lock index 67b6af243..e9f361647 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2603,6 +2603,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "ironrdp-egfx" +version = "0.1.0" +dependencies = [ + "bit_field", + "bitflags 2.10.0", + "ironrdp-core", + "ironrdp-dvc", + "ironrdp-graphics", + "ironrdp-pdu", + "tracing", +] + [[package]] name = "ironrdp-error" version = "0.1.3" @@ -2849,6 +2862,7 @@ dependencies = [ "ironrdp-core", "ironrdp-displaycontrol", "ironrdp-dvc", + "ironrdp-egfx", "ironrdp-fuzzing", "ironrdp-graphics", "ironrdp-input", diff --git a/crates/ironrdp-egfx/CHANGELOG.md b/crates/ironrdp-egfx/CHANGELOG.md new file mode 100644 index 000000000..2a2ac65a4 --- /dev/null +++ b/crates/ironrdp-egfx/CHANGELOG.md @@ -0,0 +1,22 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +- Initial release +- MS-RDPEGFX PDU types (all 23 PDUs) +- Client-side DVC processor +- Server-side implementation with: + - Multi-surface management (Offscreen Surfaces ADM element) + - Frame tracking with flow control (Unacknowledged Frames ADM element) + - V8/V8.1/V10/V10.1-V10.7 capability negotiation + - AVC420 and AVC444 frame sending + - QoE metrics processing + - Cache import handling + - Resize coordination diff --git a/crates/ironrdp-egfx/Cargo.toml b/crates/ironrdp-egfx/Cargo.toml new file mode 100644 index 000000000..fc42c1334 --- /dev/null +++ b/crates/ironrdp-egfx/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "ironrdp-egfx" +version = "0.1.0" +readme = "README.md" +description = "Graphics pipeline dynamic channel extension implementation" +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +authors.workspace = true +keywords.workspace = true +categories.workspace = true + +[lib] +doctest = false +# test = false + +[dependencies] +bit_field = "0.10" +bitflags = "2.4" +ironrdp-core = { path = "../ironrdp-core", version = "0.1" } # public +ironrdp-dvc = { path = "../ironrdp-dvc", version = "0.5" } # public +ironrdp-graphics = { path = "../ironrdp-graphics", version = "0.7" } # public +ironrdp-pdu = { path = "../ironrdp-pdu", version = "0.7" } # public +tracing = { version = "0.1", features = ["log"] } + +[lints] +workspace = true diff --git a/crates/ironrdp-egfx/LICENSE-APACHE b/crates/ironrdp-egfx/LICENSE-APACHE new file mode 120000 index 000000000..1cd601d0a --- /dev/null +++ b/crates/ironrdp-egfx/LICENSE-APACHE @@ -0,0 +1 @@ +../../LICENSE-APACHE \ No newline at end of file diff --git a/crates/ironrdp-egfx/LICENSE-MIT b/crates/ironrdp-egfx/LICENSE-MIT new file mode 120000 index 000000000..b2cfbdc7b --- /dev/null +++ b/crates/ironrdp-egfx/LICENSE-MIT @@ -0,0 +1 @@ +../../LICENSE-MIT \ No newline at end of file diff --git a/crates/ironrdp-egfx/README.md b/crates/ironrdp-egfx/README.md new file mode 100644 index 000000000..44956e3b9 --- /dev/null +++ b/crates/ironrdp-egfx/README.md @@ -0,0 +1,8 @@ +# ironrdp-egfx + +Graphics Pipeline Extension ([MS-RDPEGFX]) implementation for IronRDP. + +Provides PDU types and client/server processors for the Display Pipeline Virtual +Channel Extension, including H.264/AVC420 and AVC444 video streaming support. + +[MS-RDPEGFX]: https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/da5c75f9-cd99-450c-98c4-014a496942b0 diff --git a/crates/ironrdp-egfx/src/client.rs b/crates/ironrdp-egfx/src/client.rs new file mode 100644 index 000000000..b40c383d8 --- /dev/null +++ b/crates/ironrdp-egfx/src/client.rs @@ -0,0 +1,74 @@ +use ironrdp_core::{impl_as_any, ReadCursor}; +use ironrdp_dvc::{DvcClientProcessor, DvcMessage, DvcProcessor}; +use ironrdp_graphics::zgfx; +use ironrdp_pdu::{decode_cursor, decode_err, PduResult}; +use tracing::trace; + +use crate::{ + pdu::{CapabilitiesAdvertisePdu, CapabilitiesV8Flags, CapabilitySet, GfxPdu}, + CHANNEL_NAME, +}; + +/// Max capacity to keep for decompressed buffer when cleared. +const MAX_DECOMPRESSED_BUFFER_CAPACITY: usize = 16384; // 16 KiB + +pub trait GraphicsPipelineHandler: Send { + fn capabilities(&self) -> Vec { + vec![CapabilitySet::V8 { + flags: CapabilitiesV8Flags::empty(), + }] + } + + fn handle_pdu(&mut self, pdu: GfxPdu) { + trace!(?pdu); + } +} + +/// A client for the Graphics Pipeline Virtual Channel. +pub struct GraphicsPipelineClient { + handler: Box, + decompressor: zgfx::Decompressor, + decompressed_buffer: Vec, +} + +impl GraphicsPipelineClient { + pub fn new(handler: Box) -> Self { + Self { + handler, + decompressor: zgfx::Decompressor::new(), + decompressed_buffer: Vec::new(), + } + } +} + +impl_as_any!(GraphicsPipelineClient); + +impl DvcProcessor for GraphicsPipelineClient { + fn channel_name(&self) -> &str { + CHANNEL_NAME + } + + fn start(&mut self, _channel_id: u32) -> PduResult> { + let pdu = GfxPdu::CapabilitiesAdvertise(CapabilitiesAdvertisePdu(self.handler.capabilities())); + + Ok(vec![Box::new(pdu)]) + } + + fn process(&mut self, _channel_id: u32, payload: &[u8]) -> PduResult> { + self.decompressed_buffer.clear(); + self.decompressed_buffer.shrink_to(MAX_DECOMPRESSED_BUFFER_CAPACITY); + self.decompressor + .decompress(payload, &mut self.decompressed_buffer) + .map_err(|e| decode_err!(e))?; + + let mut cursor = ReadCursor::new(self.decompressed_buffer.as_slice()); + while !cursor.is_empty() { + let pdu = decode_cursor(&mut cursor).map_err(|e| decode_err!(e))?; + self.handler.handle_pdu(pdu); + } + + Ok(vec![]) + } +} + +impl DvcClientProcessor for GraphicsPipelineClient {} diff --git a/crates/ironrdp-egfx/src/lib.rs b/crates/ironrdp-egfx/src/lib.rs new file mode 100644 index 000000000..54cea6638 --- /dev/null +++ b/crates/ironrdp-egfx/src/lib.rs @@ -0,0 +1,8 @@ +#![cfg_attr(doc, doc = include_str!("../README.md"))] +#![doc(html_logo_url = "https://cdnweb.devolutions.net/images/projects/devolutions/logos/devolutions-icon-shadow.svg")] + +pub(crate) const CHANNEL_NAME: &str = "Microsoft::Windows::RDS::Graphics"; + +pub mod client; +pub mod pdu; +pub mod server; diff --git a/crates/ironrdp-egfx/src/pdu/avc.rs b/crates/ironrdp-egfx/src/pdu/avc.rs new file mode 100644 index 000000000..0e991f9be --- /dev/null +++ b/crates/ironrdp-egfx/src/pdu/avc.rs @@ -0,0 +1,554 @@ +use core::fmt; + +use ironrdp_pdu::{ + cast_length, ensure_fixed_part_size, ensure_size, geometry::InclusiveRectangle, invalid_field_err, Decode, + DecodeResult, Encode, EncodeResult, ReadCursor, WriteCursor, +}; + +use bit_field::BitField as _; +use bitflags::bitflags; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct QuantQuality { + pub quantization_parameter: u8, + pub progressive: bool, + pub quality: u8, +} + +impl QuantQuality { + const NAME: &'static str = "GfxQuantQuality"; + + const FIXED_PART_SIZE: usize = 1 /* data */ + 1 /* quality */; +} + +impl Encode for QuantQuality { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + let mut data = 0u8; + data.set_bits(0..6, self.quantization_parameter); + data.set_bit(7, self.progressive); + dst.write_u8(data); + dst.write_u8(self.quality); + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'de> Decode<'de> for QuantQuality { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let data = src.read_u8(); + let qp = data.get_bits(0..6); + let progressive = data.get_bit(7); + let quality = src.read_u8(); + Ok(QuantQuality { + quantization_parameter: qp, + progressive, + quality, + }) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct Avc420BitmapStream<'a> { + pub rectangles: Vec, + pub quant_qual_vals: Vec, + pub data: &'a [u8], +} + +impl fmt::Debug for Avc420BitmapStream<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Avc420BitmapStream") + .field("rectangles", &self.rectangles) + .field("quant_qual_vals", &self.quant_qual_vals) + .field("data_len", &self.data.len()) + .finish() + } +} + +impl Avc420BitmapStream<'_> { + const NAME: &'static str = "Avc420BitmapStream"; + + const FIXED_PART_SIZE: usize = 4 /* nRect */; +} + +impl Encode for Avc420BitmapStream<'_> { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + // INVARIANT: rectangles.len() == quant_qual_vals.len() + debug_assert_eq!(self.rectangles.len(), self.quant_qual_vals.len()); + + dst.write_u32(cast_length!("len", self.rectangles.len())?); + for rectangle in &self.rectangles { + rectangle.encode(dst)?; + } + for quant_qual_val in &self.quant_qual_vals { + quant_qual_val.encode(dst)?; + } + dst.write_slice(self.data); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + // Each rectangle is 8 bytes and 2 bytes for each quant val + Self::FIXED_PART_SIZE + self.rectangles.len() * 10 + self.data.len() + } +} + +impl<'de> Decode<'de> for Avc420BitmapStream<'de> { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let num_regions = src.read_u32(); + #[expect(clippy::as_conversions, reason = "num_regions bounded by practical limits")] + let num_regions_usize = num_regions as usize; + let mut rectangles = Vec::with_capacity(num_regions_usize); + let mut quant_qual_vals = Vec::with_capacity(num_regions_usize); + for _ in 0..num_regions { + rectangles.push(InclusiveRectangle::decode(src)?); + } + for _ in 0..num_regions { + quant_qual_vals.push(QuantQuality::decode(src)?); + } + let data = src.remaining(); + Ok(Avc420BitmapStream { + rectangles, + quant_qual_vals, + data, + }) + } +} + +bitflags! { + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct Encoding: u8 { + const LUMA_AND_CHROMA = 0x00; + const LUMA = 0x01; + const CHROMA = 0x02; + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Avc444BitmapStream<'a> { + pub encoding: Encoding, + pub stream1: Avc420BitmapStream<'a>, + pub stream2: Option>, +} + +impl Avc444BitmapStream<'_> { + const NAME: &'static str = "Avc444BitmapStream"; + + const FIXED_PART_SIZE: usize = 4 /* streamInfo */; +} + +impl Encode for Avc444BitmapStream<'_> { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + let mut stream_info = 0u32; + stream_info.set_bits(0..30, cast_length!("stream1size", self.stream1.size())?); + stream_info.set_bits(30..32, self.encoding.bits().into()); + dst.write_u32(stream_info); + self.stream1.encode(dst)?; + if let Some(stream) = self.stream2.as_ref() { + stream.encode(dst)?; + } + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + let stream2_size = if let Some(stream) = self.stream2.as_ref() { + stream.size() + } else { + 0 + }; + + Self::FIXED_PART_SIZE + self.stream1.size() + stream2_size + } +} + +impl<'de> Decode<'de> for Avc444BitmapStream<'de> { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let stream_info = src.read_u32(); + let stream_len = stream_info.get_bits(0..30); + #[expect(clippy::unwrap_used, reason = "2-bit extraction always fits in u8")] + let encoding_raw: u8 = stream_info.get_bits(30..32).try_into().unwrap(); + // Only 0x00 (LUMA_AND_CHROMA), 0x01 (LUMA), 0x02 (CHROMA) are defined. + if encoding_raw > 2 { + return Err(invalid_field_err!("encoding", "reserved encoding value")); + } + let encoding = Encoding::from_bits_truncate(encoding_raw); + + if stream_len == 0 { + if encoding == Encoding::LUMA_AND_CHROMA { + return Err(invalid_field_err!("encoding", "invalid encoding")); + } + + let stream1 = Avc420BitmapStream::decode(src)?; + Ok(Avc444BitmapStream { + encoding, + stream1, + stream2: None, + }) + } else { + #[expect(clippy::as_conversions, reason = "30-bit value fits in usize")] + let (mut stream1, mut stream2) = src.split_at(stream_len as usize); + let stream1 = Avc420BitmapStream::decode(&mut stream1)?; + let stream2 = if encoding == Encoding::LUMA_AND_CHROMA { + Some(Avc420BitmapStream::decode(&mut stream2)?) + } else { + None + }; + Ok(Avc444BitmapStream { + encoding, + stream1, + stream2, + }) + } + } +} + +// ============================================================================ +// Server-side utilities for H.264/AVC encoding +// ============================================================================ + +/// Region metadata for AVC420 bitmap streams (server-side) +/// +/// Describes a rectangular region within the frame along with its +/// H.264 encoding parameters. +/// +/// # Example +/// +/// ``` +/// use ironrdp_egfx::pdu::Avc420Region; +/// +/// // Create a region covering a 1920x1080 frame +/// let region = Avc420Region::full_frame(1920, 1080, 22); +/// assert_eq!(region.left, 0); +/// assert_eq!(region.right, 1919); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Avc420Region { + /// Left edge of the region (inclusive) + pub left: u16, + /// Top edge of the region (inclusive) + pub top: u16, + /// Right edge of the region (inclusive) + pub right: u16, + /// Bottom edge of the region (inclusive) + pub bottom: u16, + /// H.264 quantization parameter (0-51, lower = higher quality) + pub quantization_parameter: u8, + /// Quality value (0-100) + pub quality: u8, +} + +impl Avc420Region { + /// Create a region covering the entire frame + /// + /// # Arguments + /// + /// * `width` - Frame width in pixels + /// * `height` - Frame height in pixels + /// * `qp` - H.264 quantization parameter (0-51) + #[must_use] + pub fn full_frame(width: u16, height: u16, qp: u8) -> Self { + Self { + left: 0, + top: 0, + right: width.saturating_sub(1), + bottom: height.saturating_sub(1), + quantization_parameter: qp, + quality: 100, + } + } + + /// Create a region with custom bounds + #[must_use] + pub fn new(left: u16, top: u16, right: u16, bottom: u16, qp: u8, quality: u8) -> Self { + Self { + left, + top, + right, + bottom, + quantization_parameter: qp, + quality, + } + } + + /// Convert to `InclusiveRectangle` for PDU encoding + #[must_use] + pub fn to_rectangle(&self) -> InclusiveRectangle { + InclusiveRectangle { + left: self.left, + top: self.top, + right: self.right, + bottom: self.bottom, + } + } + + /// Convert to `QuantQuality` for PDU encoding + #[must_use] + pub fn to_quant_quality(&self) -> QuantQuality { + QuantQuality { + quantization_parameter: self.quantization_parameter, + progressive: false, + quality: self.quality, + } + } +} + +/// Convert H.264 Annex B format to AVC format +/// +/// MS-RDPEGFX requires AVC format (length-prefixed NAL units), +/// but most encoders output Annex B format (start code prefixed). +/// +/// ```text +/// Annex B: 00 00 00 01 00 00 00 01 ... +/// AVC: <4-byte BE length> <4-byte BE length> ... +/// ``` +/// +/// # Arguments +/// +/// * `data` - H.264 bitstream in Annex B format +/// +/// # Returns +/// +/// H.264 bitstream in AVC format with 4-byte big-endian length prefixes +/// +/// # Example +/// +/// ``` +/// use ironrdp_egfx::pdu::annex_b_to_avc; +/// +/// // NAL unit with 3-byte start code +/// let annex_b = [0x00, 0x00, 0x01, 0x67, 0x42, 0x00]; +/// let avc = annex_b_to_avc(&annex_b); +/// // Result: [0x00, 0x00, 0x00, 0x03, 0x67, 0x42, 0x00] +/// assert_eq!(avc[0..4], [0, 0, 0, 3]); // 4-byte length = 3 +/// ``` +#[must_use] +pub fn annex_b_to_avc(data: &[u8]) -> Vec { + let mut result = Vec::with_capacity(data.len()); + let mut i = 0; + + while i < data.len() { + // Find start code (00 00 01 or 00 00 00 01) + let start; + + if i + 4 <= data.len() && data[i..i + 4] == [0, 0, 0, 1] { + start = i + 4; + } else if i + 3 <= data.len() && data[i..i + 3] == [0, 0, 1] { + start = i + 3; + } else { + i += 1; + continue; + } + + // Find next start code or end of data + let mut end = data.len(); + for j in start..data.len().saturating_sub(2) { + if data[j..j + 3] == [0, 0, 1] { + // Could be 3-byte or 4-byte start code + // Check if there's a leading zero (4-byte) + if j > 0 && data[j - 1] == 0 { + end = j - 1; + } else { + end = j; + } + break; + } + } + + // Write length-prefixed NAL unit + let nal_data = &data[start..end]; + if !nal_data.is_empty() { + // NAL units in H.264 are limited to ~4GB (32-bit length), so truncation is not a concern + #[expect( + clippy::cast_possible_truncation, + clippy::as_conversions, + reason = "NAL unit length fits in u32" + )] + let len = nal_data.len() as u32; + result.extend_from_slice(&len.to_be_bytes()); + result.extend_from_slice(nal_data); + } + + // Move to end of current NAL unit; next iteration will find the next start code + i = end; + } + + result +} + +/// Align a dimension to 16-pixel boundary +/// +/// H.264 operates on 16x16 macroblocks. This function rounds up +/// a dimension to the nearest multiple of 16. +/// +/// # Example +/// +/// ``` +/// use ironrdp_egfx::pdu::align_to_16; +/// +/// assert_eq!(align_to_16(1920), 1920); // Already aligned +/// assert_eq!(align_to_16(1080), 1088); // Rounded up +/// assert_eq!(align_to_16(1), 16); +/// ``` +#[must_use] +pub const fn align_to_16(dimension: u32) -> u32 { + (dimension + 15) & !15 +} + +/// Create an owned AVC420 bitmap stream from regions and H.264 data +/// +/// This is a helper for server-side frame encoding. It creates +/// the bitmap stream structure that can be embedded in a +/// `WireToSurface1Pdu`. +/// +/// # Arguments +/// +/// * `regions` - List of regions with their encoding parameters +/// * `h264_data` - H.264 encoded data (should be in AVC format, not Annex B) +/// +/// # Returns +/// +/// Encoded `Avc420BitmapStream` as a byte vector +/// +/// # Panics +/// +/// Panics if internal encoding fails (should not happen with valid inputs). +#[must_use] +pub fn encode_avc420_bitmap_stream(regions: &[Avc420Region], h264_data: &[u8]) -> Vec { + let rectangles: Vec = regions.iter().map(Avc420Region::to_rectangle).collect(); + + let quant_qual_vals: Vec = regions.iter().map(Avc420Region::to_quant_quality).collect(); + + let stream = Avc420BitmapStream { + rectangles, + quant_qual_vals, + data: h264_data, + }; + + // Calculate size and encode + let size = stream.size(); + let mut buf = vec![0u8; size]; + let mut cursor = WriteCursor::new(&mut buf); + + // This should not fail as we pre-allocated the exact size + stream + .encode(&mut cursor) + .expect("encode_avc420_bitmap_stream: encoding failed"); + + buf +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_avc420_region_full_frame() { + let region = Avc420Region::full_frame(1920, 1080, 22); + assert_eq!(region.left, 0); + assert_eq!(region.top, 0); + assert_eq!(region.right, 1919); + assert_eq!(region.bottom, 1079); + assert_eq!(region.quantization_parameter, 22); + assert_eq!(region.quality, 100); + } + + #[test] + fn test_align_to_16() { + assert_eq!(align_to_16(0), 0); + assert_eq!(align_to_16(1), 16); + assert_eq!(align_to_16(15), 16); + assert_eq!(align_to_16(16), 16); + assert_eq!(align_to_16(17), 32); + assert_eq!(align_to_16(1920), 1920); + assert_eq!(align_to_16(1080), 1088); + } + + #[test] + fn test_annex_b_to_avc_3byte_start() { + // NAL with 3-byte start code: 00 00 01 + let annex_b = [0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1E]; + let avc = annex_b_to_avc(&annex_b); + + // Should be: 4-byte length (4) + NAL data + assert_eq!(avc.len(), 8); + assert_eq!(&avc[0..4], &[0, 0, 0, 4]); // Length = 4 + assert_eq!(&avc[4..8], &[0x67, 0x42, 0x00, 0x1E]); + } + + #[test] + fn test_annex_b_to_avc_4byte_start() { + // NAL with 4-byte start code: 00 00 00 01 + let annex_b = [0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00]; + let avc = annex_b_to_avc(&annex_b); + + assert_eq!(avc.len(), 7); + assert_eq!(&avc[0..4], &[0, 0, 0, 3]); // Length = 3 + assert_eq!(&avc[4..7], &[0x67, 0x42, 0x00]); + } + + #[test] + fn test_annex_b_to_avc_multiple_nals() { + // Two NAL units + let annex_b = [ + 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, // SPS + 0x00, 0x00, 0x01, 0x68, 0xCE, // PPS with 3-byte start + ]; + let avc = annex_b_to_avc(&annex_b); + + // First NAL: 4 bytes length + 2 bytes data + // Second NAL: 4 bytes length + 2 bytes data + assert!(avc.len() >= 12); + } + + #[test] + fn test_annex_b_to_avc_empty() { + let avc = annex_b_to_avc(&[]); + assert!(avc.is_empty()); + } + + #[test] + fn test_encode_avc420_bitmap_stream() { + let regions = vec![Avc420Region::full_frame(1920, 1080, 22)]; + let h264_data = [0x00, 0x00, 0x00, 0x01, 0x67]; // Minimal H.264 + + let encoded = encode_avc420_bitmap_stream(®ions, &h264_data); + + // Should have: 4 bytes (nRect=1) + 8 bytes (rectangle) + 2 bytes (quant) + 5 bytes (data) + assert_eq!(encoded.len(), 4 + 8 + 2 + 5); + + // Verify we can decode it back + let mut cursor = ReadCursor::new(&encoded); + let decoded = Avc420BitmapStream::decode(&mut cursor).expect("decode failed"); + + assert_eq!(decoded.rectangles.len(), 1); + assert_eq!(decoded.quant_qual_vals.len(), 1); + assert_eq!(decoded.data, &h264_data); + } +} diff --git a/crates/ironrdp-egfx/src/pdu/cmd.rs b/crates/ironrdp-egfx/src/pdu/cmd.rs new file mode 100644 index 000000000..f783d2243 --- /dev/null +++ b/crates/ironrdp-egfx/src/pdu/cmd.rs @@ -0,0 +1,2087 @@ +use core::fmt; +use core::iter; + +use ironrdp_core::{ + ensure_fixed_part_size, invalid_field_err, Decode, DecodeResult, Encode, EncodeResult, ReadCursor, WriteCursor, +}; +use ironrdp_dvc::DvcEncode; +use ironrdp_pdu::{ + cast_length, ensure_size, gcc::Monitor, geometry::InclusiveRectangle, read_padding, write_padding, DecodeError, +}; + +use bit_field::BitField as _; +use bitflags::bitflags; +use tracing::warn; + +use super::{Color, PixelFormat, Point}; + +const RDPGFX_CMDID_WIRETOSURFACE_1: u16 = 0x0001; +const RDPGFX_CMDID_WIRETOSURFACE_2: u16 = 0x0002; +const RDPGFX_CMDID_DELETEENCODINGCONTEXT: u16 = 0x0003; +const RDPGFX_CMDID_SOLIDFILL: u16 = 0x0004; +const RDPGFX_CMDID_SURFACETOSURFACE: u16 = 0x0005; +const RDPGFX_CMDID_SURFACETOCACHE: u16 = 0x0006; +const RDPGFX_CMDID_CACHETOSURFACE: u16 = 0x0007; +const RDPGFX_CMDID_EVICTCACHEENTRY: u16 = 0x0008; +const RDPGFX_CMDID_CREATESURFACE: u16 = 0x0009; +const RDPGFX_CMDID_DELETESURFACE: u16 = 0x000a; +const RDPGFX_CMDID_STARTFRAME: u16 = 0x000b; +const RDPGFX_CMDID_ENDFRAME: u16 = 0x000c; +const RDPGFX_CMDID_FRAMEACKNOWLEDGE: u16 = 0x000d; +const RDPGFX_CMDID_RESETGRAPHICS: u16 = 0x000e; +const RDPGFX_CMDID_MAPSURFACETOOUTPUT: u16 = 0x000f; +const RDPGFX_CMDID_CACHEIMPORTOFFER: u16 = 0x0010; +const RDPGFX_CMDID_CACHEIMPORTREPLY: u16 = 0x0011; +const RDPGFX_CMDID_CAPSADVERTISE: u16 = 0x0012; +const RDPGFX_CMDID_CAPSCONFIRM: u16 = 0x0013; +const RDPGFX_CMDID_MAPSURFACETOWINDOW: u16 = 0x0015; +const RDPGFX_CMDID_QOEFRAMEACKNOWLEDGE: u16 = 0x0016; +const RDPGFX_CMDID_MAPSURFACETOSCALEDOUTPUT: u16 = 0x0017; +const RDPGFX_CMDID_MAPSURFACETOSCALEDWINDOW: u16 = 0x0018; + +const MAX_RESET_GRAPHICS_WIDTH_HEIGHT: u32 = 32_766; +const MONITOR_COUNT_MAX: u32 = 16; +const RESET_GRAPHICS_PDU_SIZE: usize = 340 - GfxPdu::FIXED_PART_SIZE; + +/// Display Pipeline Virtual Channel message (PDU prefixed with `RDPGFX_HEADER`) +/// +/// INVARIANTS: size of encoded inner PDU is always less than `u32::MAX - Self::FIXED_PART_SIZE` +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum GfxPdu { + WireToSurface1(WireToSurface1Pdu), + WireToSurface2(WireToSurface2Pdu), + DeleteEncodingContext(DeleteEncodingContextPdu), + SolidFill(SolidFillPdu), + SurfaceToSurface(SurfaceToSurfacePdu), + SurfaceToCache(SurfaceToCachePdu), + CacheToSurface(CacheToSurfacePdu), + EvictCacheEntry(EvictCacheEntryPdu), + CreateSurface(CreateSurfacePdu), + DeleteSurface(DeleteSurfacePdu), + StartFrame(StartFramePdu), + EndFrame(EndFramePdu), + FrameAcknowledge(FrameAcknowledgePdu), + ResetGraphics(ResetGraphicsPdu), + MapSurfaceToOutput(MapSurfaceToOutputPdu), + CacheImportOffer(CacheImportOfferPdu), + CacheImportReply(CacheImportReplyPdu), + CapabilitiesAdvertise(CapabilitiesAdvertisePdu), + CapabilitiesConfirm(CapabilitiesConfirmPdu), + MapSurfaceToWindow(MapSurfaceToWindowPdu), + QoeFrameAcknowledge(QoeFrameAcknowledgePdu), + MapSurfaceToScaledOutput(MapSurfaceToScaledOutputPdu), + MapSurfaceToScaledWindow(MapSurfaceToScaledWindowPdu), +} + +/// 2.2.1.5 RDPGFX_HEADER +/// +/// [2.2.1.5]: +impl GfxPdu { + const NAME: &'static str = "RDPGFX_HEADER"; + + const FIXED_PART_SIZE: usize = 2 /* CmdId */ + 2 /* flags */ + 4 /* Length */; +} + +impl Encode for GfxPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + let (cmdid, payload_length) = match self { + GfxPdu::WireToSurface1(pdu) => (RDPGFX_CMDID_WIRETOSURFACE_1, pdu.size()), + GfxPdu::WireToSurface2(pdu) => (RDPGFX_CMDID_WIRETOSURFACE_2, pdu.size()), + GfxPdu::DeleteEncodingContext(pdu) => (RDPGFX_CMDID_DELETEENCODINGCONTEXT, pdu.size()), + GfxPdu::SolidFill(pdu) => (RDPGFX_CMDID_SOLIDFILL, pdu.size()), + GfxPdu::SurfaceToSurface(pdu) => (RDPGFX_CMDID_SURFACETOSURFACE, pdu.size()), + GfxPdu::SurfaceToCache(pdu) => (RDPGFX_CMDID_SURFACETOCACHE, pdu.size()), + GfxPdu::CacheToSurface(pdu) => (RDPGFX_CMDID_CACHETOSURFACE, pdu.size()), + GfxPdu::EvictCacheEntry(pdu) => (RDPGFX_CMDID_EVICTCACHEENTRY, pdu.size()), + GfxPdu::CreateSurface(pdu) => (RDPGFX_CMDID_CREATESURFACE, pdu.size()), + GfxPdu::DeleteSurface(pdu) => (RDPGFX_CMDID_DELETESURFACE, pdu.size()), + GfxPdu::StartFrame(pdu) => (RDPGFX_CMDID_STARTFRAME, pdu.size()), + GfxPdu::EndFrame(pdu) => (RDPGFX_CMDID_ENDFRAME, pdu.size()), + GfxPdu::FrameAcknowledge(pdu) => (RDPGFX_CMDID_FRAMEACKNOWLEDGE, pdu.size()), + GfxPdu::ResetGraphics(pdu) => (RDPGFX_CMDID_RESETGRAPHICS, pdu.size()), + GfxPdu::MapSurfaceToOutput(pdu) => (RDPGFX_CMDID_MAPSURFACETOOUTPUT, pdu.size()), + GfxPdu::CacheImportOffer(pdu) => (RDPGFX_CMDID_CACHEIMPORTOFFER, pdu.size()), + GfxPdu::CacheImportReply(pdu) => (RDPGFX_CMDID_CACHEIMPORTREPLY, pdu.size()), + GfxPdu::CapabilitiesAdvertise(pdu) => (RDPGFX_CMDID_CAPSADVERTISE, pdu.size()), + GfxPdu::CapabilitiesConfirm(pdu) => (RDPGFX_CMDID_CAPSCONFIRM, pdu.size()), + GfxPdu::MapSurfaceToWindow(pdu) => (RDPGFX_CMDID_MAPSURFACETOWINDOW, pdu.size()), + GfxPdu::QoeFrameAcknowledge(pdu) => (RDPGFX_CMDID_QOEFRAMEACKNOWLEDGE, pdu.size()), + GfxPdu::MapSurfaceToScaledOutput(pdu) => (RDPGFX_CMDID_MAPSURFACETOSCALEDOUTPUT, pdu.size()), + GfxPdu::MapSurfaceToScaledWindow(pdu) => (RDPGFX_CMDID_MAPSURFACETOSCALEDWINDOW, pdu.size()), + }; + + // This will never overflow as per invariants. + #[expect(clippy::arithmetic_side_effects, reason = "guaranteed by GfxPdu invariants")] + let pdu_size = payload_length + Self::FIXED_PART_SIZE; + + // Write `RDPGFX_HEADER` fields. + dst.write_u16(cmdid); + dst.write_u16(0); /* flags */ + #[expect(clippy::unwrap_used, reason = "pdu_size bounded by GfxPdu invariants")] + dst.write_u32(pdu_size.try_into().unwrap()); + + match self { + GfxPdu::WireToSurface1(pdu) => pdu.encode(dst), + GfxPdu::WireToSurface2(pdu) => pdu.encode(dst), + GfxPdu::DeleteEncodingContext(pdu) => pdu.encode(dst), + GfxPdu::SolidFill(pdu) => pdu.encode(dst), + GfxPdu::SurfaceToSurface(pdu) => pdu.encode(dst), + GfxPdu::SurfaceToCache(pdu) => pdu.encode(dst), + GfxPdu::CacheToSurface(pdu) => pdu.encode(dst), + GfxPdu::EvictCacheEntry(pdu) => pdu.encode(dst), + GfxPdu::CreateSurface(pdu) => pdu.encode(dst), + GfxPdu::DeleteSurface(pdu) => pdu.encode(dst), + GfxPdu::StartFrame(pdu) => pdu.encode(dst), + GfxPdu::EndFrame(pdu) => pdu.encode(dst), + GfxPdu::FrameAcknowledge(pdu) => pdu.encode(dst), + GfxPdu::ResetGraphics(pdu) => pdu.encode(dst), + GfxPdu::MapSurfaceToOutput(pdu) => pdu.encode(dst), + GfxPdu::CacheImportOffer(pdu) => pdu.encode(dst), + GfxPdu::CacheImportReply(pdu) => pdu.encode(dst), + GfxPdu::CapabilitiesAdvertise(pdu) => pdu.encode(dst), + GfxPdu::CapabilitiesConfirm(pdu) => pdu.encode(dst), + GfxPdu::MapSurfaceToWindow(pdu) => pdu.encode(dst), + GfxPdu::QoeFrameAcknowledge(pdu) => pdu.encode(dst), + GfxPdu::MapSurfaceToScaledOutput(pdu) => pdu.encode(dst), + GfxPdu::MapSurfaceToScaledWindow(pdu) => pdu.encode(dst), + }?; + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + // As per invariants: This will never overflow. + #[expect(clippy::arithmetic_side_effects, reason = "guaranteed by GfxPdu invariants")] + let size = Self::FIXED_PART_SIZE + + match self { + GfxPdu::WireToSurface1(pdu) => pdu.size(), + GfxPdu::WireToSurface2(pdu) => pdu.size(), + GfxPdu::DeleteEncodingContext(pdu) => pdu.size(), + GfxPdu::SolidFill(pdu) => pdu.size(), + GfxPdu::SurfaceToSurface(pdu) => pdu.size(), + GfxPdu::SurfaceToCache(pdu) => pdu.size(), + GfxPdu::CacheToSurface(pdu) => pdu.size(), + GfxPdu::EvictCacheEntry(pdu) => pdu.size(), + GfxPdu::CreateSurface(pdu) => pdu.size(), + GfxPdu::DeleteSurface(pdu) => pdu.size(), + GfxPdu::StartFrame(pdu) => pdu.size(), + GfxPdu::EndFrame(pdu) => pdu.size(), + GfxPdu::FrameAcknowledge(pdu) => pdu.size(), + GfxPdu::ResetGraphics(pdu) => pdu.size(), + GfxPdu::MapSurfaceToOutput(pdu) => pdu.size(), + GfxPdu::CacheImportOffer(pdu) => pdu.size(), + GfxPdu::CacheImportReply(pdu) => pdu.size(), + GfxPdu::CapabilitiesAdvertise(pdu) => pdu.size(), + GfxPdu::CapabilitiesConfirm(pdu) => pdu.size(), + GfxPdu::MapSurfaceToWindow(pdu) => pdu.size(), + GfxPdu::QoeFrameAcknowledge(pdu) => pdu.size(), + GfxPdu::MapSurfaceToScaledOutput(pdu) => pdu.size(), + GfxPdu::MapSurfaceToScaledWindow(pdu) => pdu.size(), + }; + + size + } +} + +impl DvcEncode for GfxPdu {} + +impl<'de> Decode<'de> for GfxPdu { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + // Read `RDPGFX_HEADER` fields. + let cmdid = src.read_u16(); + let flags = src.read_u16(); /* flags */ + if flags != 0 { + warn!(?flags, "invalid GFX flag"); + } + let pdu_length = src.read_u32(); + + #[expect(clippy::unwrap_used, reason = "FIXED_PART_SIZE is a small constant")] + let _payload_length = pdu_length + .checked_sub(Self::FIXED_PART_SIZE.try_into().unwrap()) + .ok_or_else(|| invalid_field_err!("Length", "GFX PDU length is too small"))?; + + match cmdid { + RDPGFX_CMDID_WIRETOSURFACE_1 => { + let pdu = WireToSurface1Pdu::decode(src)?; + Ok(GfxPdu::WireToSurface1(pdu)) + } + RDPGFX_CMDID_WIRETOSURFACE_2 => { + let pdu = WireToSurface2Pdu::decode(src)?; + Ok(GfxPdu::WireToSurface2(pdu)) + } + RDPGFX_CMDID_DELETEENCODINGCONTEXT => { + let pdu = DeleteEncodingContextPdu::decode(src)?; + Ok(GfxPdu::DeleteEncodingContext(pdu)) + } + RDPGFX_CMDID_SOLIDFILL => { + let pdu = SolidFillPdu::decode(src)?; + Ok(GfxPdu::SolidFill(pdu)) + } + RDPGFX_CMDID_SURFACETOSURFACE => { + let pdu = SurfaceToSurfacePdu::decode(src)?; + Ok(GfxPdu::SurfaceToSurface(pdu)) + } + RDPGFX_CMDID_SURFACETOCACHE => { + let pdu = SurfaceToCachePdu::decode(src)?; + Ok(GfxPdu::SurfaceToCache(pdu)) + } + RDPGFX_CMDID_CACHETOSURFACE => { + let pdu = CacheToSurfacePdu::decode(src)?; + Ok(GfxPdu::CacheToSurface(pdu)) + } + RDPGFX_CMDID_EVICTCACHEENTRY => { + let pdu = EvictCacheEntryPdu::decode(src)?; + Ok(GfxPdu::EvictCacheEntry(pdu)) + } + RDPGFX_CMDID_CREATESURFACE => { + let pdu = CreateSurfacePdu::decode(src)?; + Ok(GfxPdu::CreateSurface(pdu)) + } + RDPGFX_CMDID_DELETESURFACE => { + let pdu = DeleteSurfacePdu::decode(src)?; + Ok(GfxPdu::DeleteSurface(pdu)) + } + RDPGFX_CMDID_STARTFRAME => { + let pdu = StartFramePdu::decode(src)?; + Ok(GfxPdu::StartFrame(pdu)) + } + RDPGFX_CMDID_ENDFRAME => { + let pdu = EndFramePdu::decode(src)?; + Ok(GfxPdu::EndFrame(pdu)) + } + RDPGFX_CMDID_FRAMEACKNOWLEDGE => { + let pdu = FrameAcknowledgePdu::decode(src)?; + Ok(GfxPdu::FrameAcknowledge(pdu)) + } + RDPGFX_CMDID_RESETGRAPHICS => { + let pdu = ResetGraphicsPdu::decode(src)?; + Ok(GfxPdu::ResetGraphics(pdu)) + } + RDPGFX_CMDID_MAPSURFACETOOUTPUT => { + let pdu = MapSurfaceToOutputPdu::decode(src)?; + Ok(GfxPdu::MapSurfaceToOutput(pdu)) + } + RDPGFX_CMDID_CACHEIMPORTOFFER => { + let pdu = CacheImportOfferPdu::decode(src)?; + Ok(GfxPdu::CacheImportOffer(pdu)) + } + RDPGFX_CMDID_CACHEIMPORTREPLY => { + let pdu = CacheImportReplyPdu::decode(src)?; + Ok(GfxPdu::CacheImportReply(pdu)) + } + RDPGFX_CMDID_CAPSADVERTISE => { + let pdu = CapabilitiesAdvertisePdu::decode(src)?; + Ok(GfxPdu::CapabilitiesAdvertise(pdu)) + } + RDPGFX_CMDID_CAPSCONFIRM => { + let pdu = CapabilitiesConfirmPdu::decode(src)?; + Ok(GfxPdu::CapabilitiesConfirm(pdu)) + } + RDPGFX_CMDID_MAPSURFACETOWINDOW => { + let pdu = MapSurfaceToWindowPdu::decode(src)?; + Ok(GfxPdu::MapSurfaceToWindow(pdu)) + } + RDPGFX_CMDID_QOEFRAMEACKNOWLEDGE => { + let pdu = QoeFrameAcknowledgePdu::decode(src)?; + Ok(GfxPdu::QoeFrameAcknowledge(pdu)) + } + RDPGFX_CMDID_MAPSURFACETOSCALEDOUTPUT => { + let pdu = MapSurfaceToScaledOutputPdu::decode(src)?; + Ok(GfxPdu::MapSurfaceToScaledOutput(pdu)) + } + RDPGFX_CMDID_MAPSURFACETOSCALEDWINDOW => { + let pdu = MapSurfaceToScaledWindowPdu::decode(src)?; + Ok(GfxPdu::MapSurfaceToScaledWindow(pdu)) + } + _ => Err(invalid_field_err!("Type", "Unknown GFX PDU type")), + } + } +} + +/// 2.2.2.1 RDPGFX_WIRE_TO_SURFACE_PDU_1 +/// +/// [2.2.2.1]: +#[derive(Clone, PartialEq, Eq)] +pub struct WireToSurface1Pdu { + pub surface_id: u16, + pub codec_id: Codec1Type, + pub pixel_format: PixelFormat, + pub destination_rectangle: InclusiveRectangle, + pub bitmap_data: Vec, +} + +impl fmt::Debug for WireToSurface1Pdu { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WireToSurface1Pdu") + .field("surface_id", &self.surface_id) + .field("codec_id", &self.codec_id) + .field("pixel_format", &self.pixel_format) + .field("destination_rectangle", &self.destination_rectangle) + .field("bitmap_data_length", &self.bitmap_data.len()) + .finish() + } +} + +impl WireToSurface1Pdu { + const NAME: &'static str = "WireToSurface1Pdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 2 /* CodecId */ + 1 /* PixelFormat */ + InclusiveRectangle::FIXED_PART_SIZE /* Dest */ + 4 /* BitmapDataLen */; +} + +impl Encode for WireToSurface1Pdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.surface_id); + dst.write_u16(self.codec_id.into()); + dst.write_u8(self.pixel_format.into()); + self.destination_rectangle.encode(dst)?; + dst.write_u32(cast_length!("BitmapDataLen", self.bitmap_data.len())?); + dst.write_slice(&self.bitmap_data); + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.bitmap_data.len() + } +} + +impl<'a> Decode<'a> for WireToSurface1Pdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let codec_id = Codec1Type::try_from(src.read_u16())?; + let pixel_format = PixelFormat::try_from(src.read_u8())?; + let destination_rectangle = InclusiveRectangle::decode(src)?; + let bitmap_data_length = cast_length!("BitmapDataLen", src.read_u32())?; + + ensure_size!(in: src, size: bitmap_data_length); + let bitmap_data = src.read_slice(bitmap_data_length).to_vec(); + + Ok(Self { + surface_id, + codec_id, + pixel_format, + destination_rectangle, + bitmap_data, + }) + } +} + +/// 2.2.2.2 RDPGFX_WIRE_TO_SURFACE_PDU_2 +/// +/// [2.2.2.2]: +#[derive(Clone, PartialEq, Eq)] +pub struct WireToSurface2Pdu { + pub surface_id: u16, + pub codec_id: Codec2Type, + pub codec_context_id: u32, + pub pixel_format: PixelFormat, + pub bitmap_data: Vec, +} + +impl fmt::Debug for WireToSurface2Pdu { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WireToSurface2Pdu") + .field("surface_id", &self.surface_id) + .field("codec_id", &self.codec_id) + .field("codec_context_id", &self.codec_context_id) + .field("pixel_format", &self.pixel_format) + .field("bitmap_data_length", &self.bitmap_data.len()) + .finish() + } +} + +impl WireToSurface2Pdu { + const NAME: &'static str = "WireToSurface2Pdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 2 /* CodecId */ + 4 /* ContextId */ + 1 /* PixelFormat */ + 4 /* BitmapDataLen */; +} + +impl Encode for WireToSurface2Pdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.surface_id); + dst.write_u16(self.codec_id.into()); + dst.write_u32(self.codec_context_id); + dst.write_u8(self.pixel_format.into()); + dst.write_u32(cast_length!("BitmapDataLen", self.bitmap_data.len())?); + dst.write_slice(&self.bitmap_data); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.bitmap_data.len() + } +} + +impl<'a> Decode<'a> for WireToSurface2Pdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let codec_id = Codec2Type::try_from(src.read_u16())?; + let codec_context_id = src.read_u32(); + let pixel_format = PixelFormat::try_from(src.read_u8())?; + let bitmap_data_length = cast_length!("BitmapDataLen", src.read_u32())?; + + ensure_size!(in: src, size: bitmap_data_length); + let bitmap_data = src.read_slice(bitmap_data_length).to_vec(); + + Ok(Self { + surface_id, + codec_id, + codec_context_id, + pixel_format, + bitmap_data, + }) + } +} + +/// 2.2.2.3 RDPGFX_DELETE_ENCODING_CONTEXT_PDU +/// +/// [2.2.2.3]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DeleteEncodingContextPdu { + pub surface_id: u16, + pub codec_context_id: u32, +} + +impl DeleteEncodingContextPdu { + const NAME: &'static str = "DeleteEncodingContextPdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 4 /* CodecContextId */; +} + +impl Encode for DeleteEncodingContextPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u16(self.surface_id); + dst.write_u32(self.codec_context_id); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for DeleteEncodingContextPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let codec_context_id = src.read_u32(); + + Ok(Self { + surface_id, + codec_context_id, + }) + } +} + +/// 2.2.2.4 RDPGFX_SOLID_FILL_PDU +/// +/// [2.2.2.4]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SolidFillPdu { + pub surface_id: u16, + pub fill_pixel: Color, + pub rectangles: Vec, +} + +impl SolidFillPdu { + const NAME: &'static str = "SolidFillPdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + Color::FIXED_PART_SIZE /* Color */ + 2 /* RectCount */; +} + +impl Encode for SolidFillPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.surface_id); + self.fill_pixel.encode(dst)?; + dst.write_u16(cast_length!("nRect", self.rectangles.len())?); + + for rectangle in self.rectangles.iter() { + rectangle.encode(dst)?; + } + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.rectangles.iter().map(|r| r.size()).sum::() + } +} + +impl<'a> Decode<'a> for SolidFillPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let fill_pixel = Color::decode(src)?; + let rectangles_count = src.read_u16(); + + ensure_size!(in: src, size: usize::from(rectangles_count) * InclusiveRectangle::FIXED_PART_SIZE); + let rectangles = iter::repeat_with(|| InclusiveRectangle::decode(src)) + .take(usize::from(rectangles_count)) + .collect::>()?; + + Ok(Self { + surface_id, + fill_pixel, + rectangles, + }) + } +} + +/// 2.2.2.5 RDPGFX_SURFACE_TO_SURFACE_PDU +/// +/// [2.2.2.5]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SurfaceToSurfacePdu { + pub source_surface_id: u16, + pub destination_surface_id: u16, + pub source_rectangle: InclusiveRectangle, + pub destination_points: Vec, +} + +impl SurfaceToSurfacePdu { + const NAME: &'static str = "SurfaceToSurfacePdu"; + + const FIXED_PART_SIZE: usize = 2 /* SourceId */ + 2 /* DestId */ + InclusiveRectangle::FIXED_PART_SIZE /* SourceRect */ + 2 /* DestPointsCount */; +} + +impl Encode for SurfaceToSurfacePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.source_surface_id); + dst.write_u16(self.destination_surface_id); + self.source_rectangle.encode(dst)?; + + dst.write_u16(cast_length!("DestinationPoints", self.destination_points.len())?); + for rectangle in self.destination_points.iter() { + rectangle.encode(dst)?; + } + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.destination_points.iter().map(|r| r.size()).sum::() + } +} + +impl<'a> Decode<'a> for SurfaceToSurfacePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let source_surface_id = src.read_u16(); + let destination_surface_id = src.read_u16(); + let source_rectangle = InclusiveRectangle::decode(src)?; + let destination_points_count = src.read_u16(); + + let destination_points = iter::repeat_with(|| Point::decode(src)) + .take(usize::from(destination_points_count)) + .collect::>()?; + + Ok(Self { + source_surface_id, + destination_surface_id, + source_rectangle, + destination_points, + }) + } +} + +/// 2.2.2.6 RDPGFX_SURFACE_TO_CACHE_PDU +/// +/// [2.2.2.6]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SurfaceToCachePdu { + pub surface_id: u16, + pub cache_key: u64, + pub cache_slot: u16, + pub source_rectangle: InclusiveRectangle, +} + +impl SurfaceToCachePdu { + const NAME: &'static str = "SurfaceToCachePdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 8 /* CacheKey */ + 2 /* CacheSlot */ + InclusiveRectangle::FIXED_PART_SIZE /* SourceRect */; +} + +impl Encode for SurfaceToCachePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u16(self.surface_id); + dst.write_u64(self.cache_key); + dst.write_u16(self.cache_slot); + self.source_rectangle.encode(dst)?; + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for SurfaceToCachePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let cache_key = src.read_u64(); + let cache_slot = src.read_u16(); + let source_rectangle = InclusiveRectangle::decode(src)?; + + Ok(Self { + surface_id, + cache_key, + cache_slot, + source_rectangle, + }) + } +} + +/// 2.2.2.7 RDPGFX_CACHE_TO_SURFACE_PDU +/// +/// [2.2.2.7]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CacheToSurfacePdu { + pub cache_slot: u16, + pub surface_id: u16, + pub destination_points: Vec, +} + +impl CacheToSurfacePdu { + const NAME: &'static str = "CacheToSurfacePdu"; + + const FIXED_PART_SIZE: usize = 2 /* cache_slot */ + 2 /* surface_id */ + 2 /* npoints */; +} + +impl Encode for CacheToSurfacePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.cache_slot); + dst.write_u16(self.surface_id); + dst.write_u16(cast_length!("npoints", self.destination_points.len())?); + for point in self.destination_points.iter() { + point.encode(dst)?; + } + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.destination_points.iter().map(|p| p.size()).sum::() + } +} + +impl<'de> Decode<'de> for CacheToSurfacePdu { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let cache_slot = src.read_u16(); + let surface_id = src.read_u16(); + let destination_points_count = src.read_u16(); + + let destination_points = iter::repeat_with(|| Point::decode(src)) + .take(usize::from(destination_points_count)) + .collect::>()?; + + Ok(Self { + cache_slot, + surface_id, + destination_points, + }) + } +} + +/// 2.2.2.8 RDPGFX_EVICT_CACHE_ENTRY_PDU +/// +/// [2.2.2.8]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EvictCacheEntryPdu { + pub cache_slot: u16, +} + +impl EvictCacheEntryPdu { + const NAME: &'static str = "EvictCacheEntryPdu"; + + const FIXED_PART_SIZE: usize = 2; +} + +impl Encode for EvictCacheEntryPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u16(self.cache_slot); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for EvictCacheEntryPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let cache_slot = src.read_u16(); + + Ok(Self { cache_slot }) + } +} + +/// 2.2.2.9 RDPGFX_CREATE_SURFACE_PDU +/// +/// [2.2.2.9]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CreateSurfacePdu { + pub surface_id: u16, + pub width: u16, + pub height: u16, + pub pixel_format: PixelFormat, +} + +impl CreateSurfacePdu { + const NAME: &'static str = "CreateSurfacePdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 2 /* Width */ + 2 /* Height */ + 1 /* PixelFormat */; +} + +impl Encode for CreateSurfacePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u16(self.surface_id); + dst.write_u16(self.width); + dst.write_u16(self.height); + dst.write_u8(self.pixel_format.into()); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for CreateSurfacePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let width = src.read_u16(); + let height = src.read_u16(); + let pixel_format = PixelFormat::try_from(src.read_u8())?; + + Ok(Self { + surface_id, + width, + height, + pixel_format, + }) + } +} + +/// 2.2.2.10 RDPGFX_DELETE_SURFACE_PDU +/// +/// [2.2.2.10]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DeleteSurfacePdu { + pub surface_id: u16, +} + +impl DeleteSurfacePdu { + const NAME: &'static str = "DeleteSurfacePdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */; +} + +impl Encode for DeleteSurfacePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u16(self.surface_id); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for DeleteSurfacePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + + Ok(Self { surface_id }) + } +} + +/// 2.2.2.11 RDPGFX_START_FRAME_PDU +/// +/// [2.2.2.11]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StartFramePdu { + pub timestamp: Timestamp, + pub frame_id: u32, +} + +impl StartFramePdu { + const NAME: &'static str = "StartFramePdu"; + + const FIXED_PART_SIZE: usize = Timestamp::FIXED_PART_SIZE + 4 /* FrameId */; +} + +impl Encode for StartFramePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + self.timestamp.encode(dst)?; + dst.write_u32(self.frame_id); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for StartFramePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let timestamp = Timestamp::decode(src)?; + let frame_id = src.read_u32(); + + Ok(Self { timestamp, frame_id }) + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct Timestamp { + pub milliseconds: u16, + pub seconds: u8, + pub minutes: u8, + pub hours: u16, +} + +impl Timestamp { + const NAME: &'static str = "GfxTimestamp"; + + const FIXED_PART_SIZE: usize = 4; +} + +impl Encode for Timestamp { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + let mut timestamp: u32 = 0; + + timestamp.set_bits(..10, u32::from(self.milliseconds)); + timestamp.set_bits(10..16, u32::from(self.seconds)); + timestamp.set_bits(16..22, u32::from(self.minutes)); + timestamp.set_bits(22.., u32::from(self.hours)); + + dst.write_u32(timestamp); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for Timestamp { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let timestamp = src.read_u32(); + + // All these bit extractions are bounded by the bit ranges specified, + // so the conversions will never fail + #[expect(clippy::unwrap_used, reason = "bit field extraction bounded by range")] + let milliseconds = timestamp.get_bits(..10).try_into().unwrap(); + #[expect(clippy::unwrap_used, reason = "bit field extraction bounded by range")] + let seconds = timestamp.get_bits(10..16).try_into().unwrap(); + #[expect(clippy::unwrap_used, reason = "bit field extraction bounded by range")] + let minutes = timestamp.get_bits(16..22).try_into().unwrap(); + #[expect(clippy::unwrap_used, reason = "bit field extraction bounded by range")] + let hours = timestamp.get_bits(22..).try_into().unwrap(); + + Ok(Self { + milliseconds, + seconds, + minutes, + hours, + }) + } +} + +/// 2.2.2.12 RDPGFX_END_FRAME_PDU +/// +/// [2.2.2.12]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EndFramePdu { + pub frame_id: u32, +} + +impl EndFramePdu { + const NAME: &'static str = "EndFramePdu"; + + const FIXED_PART_SIZE: usize = 4; +} + +impl Encode for EndFramePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u32(self.frame_id); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for EndFramePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let frame_id = src.read_u32(); + + Ok(Self { frame_id }) + } +} + +/// 2.2.2.13 RDPGFX_FRAME_ACKNOWLEDGE_PDU +/// +/// [2.2.2.13]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FrameAcknowledgePdu { + pub queue_depth: QueueDepth, + pub frame_id: u32, + pub total_frames_decoded: u32, +} + +impl FrameAcknowledgePdu { + const NAME: &'static str = "FrameAcknowledgePdu"; + + const FIXED_PART_SIZE: usize = 4 /* QueueDepth */ + 4 /* FrameId */ + 4 /* TotalFramesDecoded */; +} + +impl Encode for FrameAcknowledgePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u32(self.queue_depth.to_u32()); + dst.write_u32(self.frame_id); + dst.write_u32(self.total_frames_decoded); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for FrameAcknowledgePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let queue_depth = QueueDepth::from_u32(src.read_u32()); + let frame_id = src.read_u32(); + let total_frames_decoded = src.read_u32(); + + Ok(Self { + queue_depth, + frame_id, + total_frames_decoded, + }) + } +} + +#[repr(u32)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum QueueDepth { + Unavailable, + AvailableBytes(u32), + Suspend, +} + +impl QueueDepth { + pub fn from_u32(v: u32) -> Self { + match v { + 0x0000_0000 => Self::Unavailable, + 0x0000_0001..=0xFFFF_FFFE => Self::AvailableBytes(v), + 0xFFFF_FFFF => Self::Suspend, + } + } + + pub fn to_u32(self) -> u32 { + match self { + Self::Unavailable => 0x0000_0000, + Self::AvailableBytes(v) => v, + Self::Suspend => 0xFFFF_FFFF, + } + } +} + +/// 2.2.2.14 RDPGFX_RESET_GRAPHICS_PDU +/// +/// [2.2.2.14]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ResetGraphicsPdu { + pub width: u32, + pub height: u32, + pub monitors: Vec, +} + +impl ResetGraphicsPdu { + const NAME: &'static str = "ResetGraphicsPdu"; + + const FIXED_PART_SIZE: usize = 4 /* Width */ + 4 /* Height */ + 4 /* nMonitors */; + + fn padding_size(&self) -> usize { + RESET_GRAPHICS_PDU_SIZE - Self::FIXED_PART_SIZE - self.monitors.iter().map(|m| m.size()).sum::() + } +} + +impl Encode for ResetGraphicsPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u32(self.width); + dst.write_u32(self.height); + dst.write_u32(cast_length!("nMonitors", self.monitors.len())?); + + for monitor in self.monitors.iter() { + monitor.encode(dst)?; + } + + write_padding!(dst, self.padding_size()); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.monitors.iter().map(|m| m.size()).sum::() + self.padding_size() + } +} + +impl<'a> Decode<'a> for ResetGraphicsPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let width = src.read_u32(); + if width > MAX_RESET_GRAPHICS_WIDTH_HEIGHT { + return Err(invalid_field_err!("width", "invalid reset graphics width")); + } + + let height = src.read_u32(); + if height > MAX_RESET_GRAPHICS_WIDTH_HEIGHT { + return Err(invalid_field_err!("height", "invalid reset graphics height")); + } + + let monitor_count = src.read_u32(); + if monitor_count > MONITOR_COUNT_MAX { + return Err(invalid_field_err!( + "monitor_count", + "invalid reset graphics monitor count" + )); + } + + #[expect(clippy::as_conversions, reason = "monitor_count validated above")] + let monitors = iter::repeat_with(|| Monitor::decode(src)) + .take(monitor_count as usize) + .collect::, _>>()?; + + let pdu = Self { + width, + height, + monitors, + }; + + read_padding!(src, pdu.padding_size()); + + Ok(pdu) + } +} + +/// 2.2.2.15 RDPGFX_MAP_SURFACE_TO_OUTPUT_PDU +/// +/// [2.2.2.15]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MapSurfaceToOutputPdu { + pub surface_id: u16, + pub output_origin_x: u32, + pub output_origin_y: u32, +} + +impl MapSurfaceToOutputPdu { + const NAME: &'static str = "MapSurfaceToOutputPdu"; + + const FIXED_PART_SIZE: usize = 2 /* surfaceId */ + 2 /* reserved */ + 4 /* OutOriginX */ + 4 /* OutOriginY */; +} + +impl Encode for MapSurfaceToOutputPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u16(self.surface_id); + dst.write_u16(0); // reserved + dst.write_u32(self.output_origin_x); + dst.write_u32(self.output_origin_y); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for MapSurfaceToOutputPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let _reserved = src.read_u16(); + let output_origin_x = src.read_u32(); + let output_origin_y = src.read_u32(); + + Ok(Self { + surface_id, + output_origin_x, + output_origin_y, + }) + } +} + +/// 2.2.2.16 RDPGFX_CACHE_IMPORT_OFFER_PDU +/// +/// [2.2.2.16]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CacheImportOfferPdu { + pub cache_entries: Vec, +} + +impl CacheImportOfferPdu { + const NAME: &'static str = "CacheImportOfferPdu"; + + const FIXED_PART_SIZE: usize = 2 /* Count */; +} + +impl Encode for CacheImportOfferPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(cast_length!("Count", self.cache_entries.len())?); + + for e in self.cache_entries.iter() { + e.encode(dst)?; + } + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.cache_entries.iter().map(|e| e.size()).sum::() + } +} + +impl<'a> Decode<'a> for CacheImportOfferPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let entries_count = src.read_u16(); + + let cache_entries = iter::repeat_with(|| CacheEntryMetadata::decode(src)) + .take(usize::from(entries_count)) + .collect::, _>>()?; + + Ok(Self { cache_entries }) + } +} + +/// 2.2.2.17 RDPGFX_CACHE_IMPORT_REPLY_PDU +/// +/// [2.2.2.17]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CacheImportReplyPdu { + pub cache_slots: Vec, +} + +impl CacheImportReplyPdu { + const NAME: &'static str = "CacheImportReplyPdu"; + + const FIXED_PART_SIZE: usize = 2 /* Count */; +} + +impl Encode for CacheImportReplyPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(cast_length!("Count", self.cache_slots.len())?); + + for cache_slot in self.cache_slots.iter() { + dst.write_u16(*cache_slot); + } + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.cache_slots.iter().map(|_| 2).sum::() + } +} + +impl<'a> Decode<'a> for CacheImportReplyPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let entries_count = src.read_u16(); + ensure_size!(in: src, size: 2 * usize::from(entries_count)); + + let cache_slots = iter::repeat_with(|| src.read_u16()) + .take(usize::from(entries_count)) + .collect(); + + Ok(Self { cache_slots }) + } +} + +/// 2.2.2.16.1 RDPGFX_CACHE_ENTRY_METADATA +/// +/// [2.2.2.16.1]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CacheEntryMetadata { + pub cache_key: u64, + pub bitmap_len: u32, +} + +impl CacheEntryMetadata { + const NAME: &'static str = "CacheEntryMetadata"; + + const FIXED_PART_SIZE: usize = 8 /* cache_key */ + 4 /* bitmap_len */; +} + +impl Encode for CacheEntryMetadata { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u64(self.cache_key); + dst.write_u32(self.bitmap_len); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for CacheEntryMetadata { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let cache_key = src.read_u64(); + let bitmap_len = src.read_u32(); + + Ok(Self { cache_key, bitmap_len }) + } +} + +/// 2.2.2.18 RDPGFX_CAPS_ADVERTISE_PDU +/// +/// [2.2.2.18]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CapabilitiesAdvertisePdu(pub Vec); + +impl CapabilitiesAdvertisePdu { + const NAME: &'static str = "CapabilitiesAdvertisePdu"; + + const FIXED_PART_SIZE: usize = 2 /* Count */; +} + +impl Encode for CapabilitiesAdvertisePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(cast_length!("Count", self.0.len())?); + + for capability_set in self.0.iter() { + capability_set.encode(dst)?; + } + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + self.0.iter().map(|c| c.size()).sum::() + } +} + +impl<'a> Decode<'a> for CapabilitiesAdvertisePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let capabilities_count = cast_length!("Count", src.read_u16())?; + + ensure_size!(in: src, size: capabilities_count * CapabilitySet::FIXED_PART_SIZE); + + let capabilities = iter::repeat_with(|| CapabilitySet::decode(src)) + .take(capabilities_count) + .collect::>()?; + + Ok(Self(capabilities)) + } +} + +/// 2.2.2.19 RDPGFX_CAPS_CONFIRM_PDU +/// +/// [2.2.2.19]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CapabilitiesConfirmPdu(pub CapabilitySet); + +impl CapabilitiesConfirmPdu { + const NAME: &'static str = "CapabilitiesConfirmPdu"; + + const FIXED_PART_SIZE: usize = 0; +} + +impl Encode for CapabilitiesConfirmPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + self.0.encode(dst)?; + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + self.0.size() + } +} + +impl<'a> Decode<'a> for CapabilitiesConfirmPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let cap = CapabilitySet::decode(src)?; + + Ok(Self(cap)) + } +} + +/// 2.2.1.6 RDPGFX_CAPSET +/// +/// [2.2.1.6]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CapabilitySet { + V8 { flags: CapabilitiesV8Flags }, + V8_1 { flags: CapabilitiesV81Flags }, + V10 { flags: CapabilitiesV10Flags }, + V10_1, + V10_2 { flags: CapabilitiesV10Flags }, + V10_3 { flags: CapabilitiesV103Flags }, + V10_4 { flags: CapabilitiesV104Flags }, + V10_5 { flags: CapabilitiesV104Flags }, + V10_6 { flags: CapabilitiesV104Flags }, + V10_6Err { flags: CapabilitiesV104Flags }, + V10_7 { flags: CapabilitiesV107Flags }, + Unknown(Vec), +} + +impl CapabilitySet { + const NAME: &'static str = "GfxCapabilitySet"; + + const FIXED_PART_SIZE: usize = 4 /* version */ + 4 /* capsDataLength */; + + fn version(&self) -> CapabilityVersion { + match self { + CapabilitySet::V8 { .. } => CapabilityVersion::V8, + CapabilitySet::V8_1 { .. } => CapabilityVersion::V8_1, + CapabilitySet::V10 { .. } => CapabilityVersion::V10, + CapabilitySet::V10_1 => CapabilityVersion::V10_1, + CapabilitySet::V10_2 { .. } => CapabilityVersion::V10_2, + CapabilitySet::V10_3 { .. } => CapabilityVersion::V10_3, + CapabilitySet::V10_4 { .. } => CapabilityVersion::V10_4, + CapabilitySet::V10_5 { .. } => CapabilityVersion::V10_5, + CapabilitySet::V10_6 { .. } => CapabilityVersion::V10_6, + CapabilitySet::V10_6Err { .. } => CapabilityVersion::V10_6Err, + CapabilitySet::V10_7 { .. } => CapabilityVersion::V10_7, + CapabilitySet::Unknown { .. } => CapabilityVersion::Unknown, + } + } +} + +impl Encode for CapabilitySet { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u32(self.version().into()); + dst.write_u32(cast_length!("dataLength", self.size() - Self::FIXED_PART_SIZE)?); + + match self { + CapabilitySet::V8 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V8_1 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10_1 => dst.write_u128(0), + CapabilitySet::V10_2 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10_3 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10_4 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10_5 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10_6 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10_6Err { flags } => dst.write_u32(flags.bits()), + CapabilitySet::V10_7 { flags } => dst.write_u32(flags.bits()), + CapabilitySet::Unknown(data) => dst.write_slice(data), + } + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + + match self { + CapabilitySet::V8 { .. } + | CapabilitySet::V8_1 { .. } + | CapabilitySet::V10 { .. } + | CapabilitySet::V10_2 { .. } + | CapabilitySet::V10_3 { .. } + | CapabilitySet::V10_4 { .. } + | CapabilitySet::V10_5 { .. } + | CapabilitySet::V10_6 { .. } + | CapabilitySet::V10_6Err { .. } + | CapabilitySet::V10_7 { .. } => 4, + CapabilitySet::V10_1 => 16, + CapabilitySet::Unknown(data) => data.len(), + } + } +} + +impl<'de> Decode<'de> for CapabilitySet { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let version = CapabilityVersion::try_from(src.read_u32())?; + let data_length: usize = cast_length!("dataLength", src.read_u32())?; + + ensure_size!(in: src, size: data_length); + let data = src.read_slice(data_length); + let mut cur = ReadCursor::new(data); + + let size = match version { + CapabilityVersion::V8 + | CapabilityVersion::V8_1 + | CapabilityVersion::V10 + | CapabilityVersion::V10_2 + | CapabilityVersion::V10_3 + | CapabilityVersion::V10_4 + | CapabilityVersion::V10_5 + | CapabilityVersion::V10_6 + | CapabilityVersion::V10_6Err + | CapabilityVersion::V10_7 => 4, + CapabilityVersion::V10_1 => 16, + CapabilityVersion::Unknown => 0, + }; + + ensure_size!(in: cur, size: size); + match version { + CapabilityVersion::V8 => Ok(CapabilitySet::V8 { + flags: CapabilitiesV8Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V8_1 => Ok(CapabilitySet::V8_1 { + flags: CapabilitiesV81Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10 => Ok(CapabilitySet::V10 { + flags: CapabilitiesV10Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10_1 => { + cur.read_u128(); + + Ok(CapabilitySet::V10_1) + } + CapabilityVersion::V10_2 => Ok(CapabilitySet::V10_2 { + flags: CapabilitiesV10Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10_3 => Ok(CapabilitySet::V10_3 { + flags: CapabilitiesV103Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10_4 => Ok(CapabilitySet::V10_4 { + flags: CapabilitiesV104Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10_5 => Ok(CapabilitySet::V10_5 { + flags: CapabilitiesV104Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10_6 => Ok(CapabilitySet::V10_6 { + flags: CapabilitiesV104Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10_6Err => Ok(CapabilitySet::V10_6Err { + flags: CapabilitiesV104Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::V10_7 => Ok(CapabilitySet::V10_7 { + flags: CapabilitiesV107Flags::from_bits_truncate(cur.read_u32()), + }), + CapabilityVersion::Unknown => Ok(CapabilitySet::Unknown(data.to_vec())), + } + } +} + +#[repr(u32)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum CapabilityVersion { + V8 = 0x8_0004, + V8_1 = 0x8_0105, + V10 = 0xa_0002, + V10_1 = 0xa_0100, + V10_2 = 0xa_0200, + V10_3 = 0xa_0301, + V10_4 = 0xa_0400, + V10_5 = 0xa_0502, + V10_6 = 0xa_0600, // [MS-RDPEGFX-errata] + V10_6Err = 0xa_0601, // defined similar to FreeRDP to maintain best compatibility + V10_7 = 0xa_0701, + Unknown = 0xa_0702, +} + +impl TryFrom for CapabilityVersion { + type Error = DecodeError; + + fn try_from(value: u32) -> Result { + let res = match value { + 0x8_0004 => CapabilityVersion::V8, + 0x8_0105 => CapabilityVersion::V8_1, + 0xa_0002 => CapabilityVersion::V10, + 0xa_0100 => CapabilityVersion::V10_1, + 0xa_0200 => CapabilityVersion::V10_2, + 0xa_0301 => CapabilityVersion::V10_3, + 0xa_0400 => CapabilityVersion::V10_4, + 0xa_0502 => CapabilityVersion::V10_5, + 0xa_0600 => CapabilityVersion::V10_6, + 0xa_0601 => CapabilityVersion::V10_6Err, + 0xa_0701 => CapabilityVersion::V10_7, + 0xa_0702 => CapabilityVersion::Unknown, + _ => return Err(invalid_field_err!("version", "invalid capability version")), + }; + + Ok(res) + } +} + +impl From for u32 { + #[expect(clippy::as_conversions, reason = "repr(u32) enum discriminant")] + fn from(value: CapabilityVersion) -> Self { + value as u32 + } +} + +bitflags! { + /// 2.2.3.1 RDPGFX_CAPSET_VERSION8 + /// + /// [2.2.3.1] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/027dd8eb-a066-42e8-ad65-2e0314c4dce5 + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct CapabilitiesV8Flags: u32 { + const THIN_CLIENT = 0x1; + const SMALL_CACHE = 0x2; + } +} + +bitflags! { + /// 2.2.3.2 RDPGFX_CAPSET_VERSION81 + /// + /// [2.2.3.2] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/487e57cc-cd16-44c4-add8-60b84bf6d9e4 + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct CapabilitiesV81Flags: u32 { + const THIN_CLIENT = 0x01; + const SMALL_CACHE = 0x02; + const AVC420_ENABLED = 0x10; + } +} + +bitflags! { + /// 2.2.3.3 RDPGFX_CAPSET_VERSION10 + /// + /// [2.2.3.3] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/d1899912-2b84-4e0d-9e6d-da0fd25d14bc + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct CapabilitiesV10Flags: u32 { + const SMALL_CACHE = 0x02; + const AVC_DISABLED = 0x20; + } +} + +// 2.2.3.4 RDPGFX_CAPSET_VERSION101 +// +// [2.2.3.4] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/5985e67e-4080-49a7-85e3-eb3ba0653ff6 +// reserved + +// 2.2.3.5 RDPGFX_CAPSET_VERSION102 +// +// [2.2.3.5] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/a73e87d5-10c3-4d3f-b00c-fd5579570a0b +//same as v10 + +bitflags! { + /// 2.2.3.6 RDPGFX_CAPSET_VERSION103 + /// + /// [2.2.3.6] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/a73e87d5-10c3-4d3f-b00c-fd5579570a0b + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct CapabilitiesV103Flags: u32 { + const AVC_DISABLED = 0x20; + const AVC_THIN_CLIENT = 0x40; + } +} + +bitflags! { + /// 2.2.3.7 RDPGFX_CAPSET_VERSION104 + /// + /// [2.2.3.7] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/be5ea8da-44db-478d-b55c-d42d82f11d26 + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct CapabilitiesV104Flags: u32 { + const SMALL_CACHE = 0x02; + const AVC_DISABLED = 0x20; + const AVC_THIN_CLIENT = 0x40; + } +} + +// 2.2.3.8 RDPGFX_CAPSET_VERSION105 +// +// [2.2.3.8] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/8fc20f1e-e63e-4b13-a546-22fba213ad83 +// same as v104 + +// 2.2.3.9 RDPGFX_CAPSET_VERSION106 +// +// [2.2.3.9] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/8d489900-e903-4778-bb83-691c5ab719d5 +// same as v104 + +bitflags! { + /// 2.2.3.10 RDPGFX_CAPSET_VERSION107 + /// + /// [2.2.3.10] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/ba94595b-04de-4fbd-8ee4-89d8ff8f5cf1 + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct CapabilitiesV107Flags: u32 { + const SMALL_CACHE = 0x02; + const AVC_DISABLED = 0x20; + const AVC_THIN_CLIENT = 0x40; + const SCALEDMAP_DISABLE = 0x80; + } +} + +/// 2.2.2.20 RDPGFX_MAP_SURFACE_TO_WINDOW_PDU +/// +/// [2.2.2.20]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MapSurfaceToWindowPdu { + pub surface_id: u16, + pub window_id: u64, + pub mapped_width: u32, + pub mapped_height: u32, +} + +impl MapSurfaceToWindowPdu { + const NAME: &'static str = "MapSurfaceToWindowPdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 8 /* WindowId */ + 4 /* MappedWidth */ + 4 /* MappedHeight */; +} + +impl Encode for MapSurfaceToWindowPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.surface_id); + dst.write_u64(self.window_id); + dst.write_u32(self.mapped_width); + dst.write_u32(self.mapped_height); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for MapSurfaceToWindowPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let window_id = src.read_u64(); + let mapped_width = src.read_u32(); + let mapped_height = src.read_u32(); + + Ok(Self { + surface_id, + window_id, + mapped_width, + mapped_height, + }) + } +} + +/// 2.2.2.21 RDPGFX_QOE_FRAME_ACKNOWLEDGE_PDU +/// +/// [2.2.2.21]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct QoeFrameAcknowledgePdu { + pub frame_id: u32, + pub timestamp: u32, + pub time_diff_se: u16, + pub time_diff_dr: u16, +} + +impl QoeFrameAcknowledgePdu { + const NAME: &'static str = "QoeFrameAcknowledgePdu"; + + const FIXED_PART_SIZE: usize = 4 /* FrameId */ + 4 /* timestamp */ + 2 /* diffSE */ + 2 /* diffDR */; +} + +impl Encode for QoeFrameAcknowledgePdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u32(self.frame_id); + dst.write_u32(self.timestamp); + dst.write_u16(self.time_diff_se); + dst.write_u16(self.time_diff_dr); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for QoeFrameAcknowledgePdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let frame_id = src.read_u32(); + let timestamp = src.read_u32(); + let time_diff_se = src.read_u16(); + let time_diff_dr = src.read_u16(); + + Ok(Self { + frame_id, + timestamp, + time_diff_se, + time_diff_dr, + }) + } +} + +/// 2.2.2.22 RDPGFX_MAP_SURFACE_TO_SCALED_OUTPUT_PDU +/// +/// [2.2.2.22]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MapSurfaceToScaledOutputPdu { + pub surface_id: u16, + pub output_origin_x: u32, + pub output_origin_y: u32, + pub target_width: u32, + pub target_height: u32, +} + +impl MapSurfaceToScaledOutputPdu { + const NAME: &'static str = "MapSurfaceToScaledOutputPdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 2 /* reserved */ + 4 /* oox */ + 4 /* ooy */ + 4 /* targetWidth */ + 4 /* targetHeight */; +} + +impl Encode for MapSurfaceToScaledOutputPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.surface_id); + dst.write_u16(0); // reserved + dst.write_u32(self.output_origin_x); + dst.write_u32(self.output_origin_y); + dst.write_u32(self.target_width); + dst.write_u32(self.target_height); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for MapSurfaceToScaledOutputPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let _reserved = src.read_u16(); + let output_origin_x = src.read_u32(); + let output_origin_y = src.read_u32(); + let target_width = src.read_u32(); + let target_height = src.read_u32(); + + Ok(Self { + surface_id, + output_origin_x, + output_origin_y, + target_width, + target_height, + }) + } +} + +/// 2.2.2.23 RDPGFX_MAP_SURFACE_TO_SCALED_WINDOW_PDU +/// +/// [2.2.2.23] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MapSurfaceToScaledWindowPdu { + pub surface_id: u16, + pub window_id: u64, + pub mapped_width: u32, + pub mapped_height: u32, + pub target_width: u32, + pub target_height: u32, +} + +impl MapSurfaceToScaledWindowPdu { + const NAME: &'static str = "MapSurfaceToScaledWindowPdu"; + + const FIXED_PART_SIZE: usize = 2 /* SurfaceId */ + 8 /* WindowId */ + 4 /* MappedWidth */ + 4 /* MappedHeight */ + 4 /* TargetWidth */ + 4 /* TargetHeight */; +} + +impl Encode for MapSurfaceToScaledWindowPdu { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_size!(in: dst, size: self.size()); + + dst.write_u16(self.surface_id); + dst.write_u64(self.window_id); + dst.write_u32(self.mapped_width); + dst.write_u32(self.mapped_height); + dst.write_u32(self.target_width); + dst.write_u32(self.target_height); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'a> Decode<'a> for MapSurfaceToScaledWindowPdu { + fn decode(src: &mut ReadCursor<'a>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let surface_id = src.read_u16(); + let window_id = src.read_u64(); + let mapped_width = src.read_u32(); + let mapped_height = src.read_u32(); + let target_width = src.read_u32(); + let target_height = src.read_u32(); + + Ok(Self { + surface_id, + window_id, + mapped_width, + mapped_height, + target_width, + target_height, + }) + } +} + +#[repr(u16)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Codec1Type { + Uncompressed = 0x0, + RemoteFx = 0x3, + ClearCodec = 0x8, + Planar = 0xa, + Avc420 = 0xb, + Alpha = 0xc, + Avc444 = 0xe, + Avc444v2 = 0xf, +} + +impl TryFrom for Codec1Type { + type Error = DecodeError; + + fn try_from(value: u16) -> Result { + match value { + 0x0 => Ok(Codec1Type::Uncompressed), + 0x3 => Ok(Codec1Type::RemoteFx), + 0x8 => Ok(Codec1Type::ClearCodec), + 0xa => Ok(Codec1Type::Planar), + 0xb => Ok(Codec1Type::Avc420), + 0xc => Ok(Codec1Type::Alpha), + 0xe => Ok(Codec1Type::Avc444), + 0xf => Ok(Codec1Type::Avc444v2), + _ => Err(invalid_field_err!("Codec1Type", "invalid codec type")), + } + } +} + +impl From for u16 { + #[expect(clippy::as_conversions, reason = "repr(u16) enum discriminant")] + fn from(value: Codec1Type) -> Self { + value as u16 + } +} + +#[repr(u16)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Codec2Type { + RemoteFxProgressive = 0x9, +} + +impl TryFrom for Codec2Type { + type Error = DecodeError; + + fn try_from(value: u16) -> Result { + match value { + 0x9 => Ok(Codec2Type::RemoteFxProgressive), + _ => Err(invalid_field_err!("Codec2Type", "invalid codec type")), + } + } +} + +impl From for u16 { + #[expect(clippy::as_conversions, reason = "repr(u16) enum discriminant")] + fn from(value: Codec2Type) -> Self { + value as u16 + } +} diff --git a/crates/ironrdp-egfx/src/pdu/common.rs b/crates/ironrdp-egfx/src/pdu/common.rs new file mode 100644 index 000000000..112683a2e --- /dev/null +++ b/crates/ironrdp-egfx/src/pdu/common.rs @@ -0,0 +1,129 @@ +use ironrdp_pdu::{ + ensure_fixed_part_size, invalid_field_err, Decode, DecodeError, DecodeResult, Encode, EncodeResult, ReadCursor, + WriteCursor, +}; + +/// 2.2.1.1 RDPGFX_POINT16 +/// +/// [2.2.1.1]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Point { + pub x: u16, + pub y: u16, +} + +impl Point { + const NAME: &'static str = "GfxPoint"; + + const FIXED_PART_SIZE: usize = 2 /* X */ + 2 /* Y */; +} + +impl Encode for Point { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u16(self.x); + dst.write_u16(self.y); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'de> Decode<'de> for Point { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let x = src.read_u16(); + let y = src.read_u16(); + + Ok(Self { x, y }) + } +} + +/// 2.2.1.3 RDPGFX_COLOR32 +/// +/// [2.2.1.3]: +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Color { + pub b: u8, + pub g: u8, + pub r: u8, + pub xa: u8, +} + +impl Color { + const NAME: &'static str = "GfxColor"; + + pub const FIXED_PART_SIZE: usize = 4 /* BGRA */; +} + +impl Encode for Color { + fn encode(&self, dst: &mut WriteCursor<'_>) -> EncodeResult<()> { + ensure_fixed_part_size!(in: dst); + + dst.write_u8(self.b); + dst.write_u8(self.g); + dst.write_u8(self.r); + dst.write_u8(self.xa); + + Ok(()) + } + + fn name(&self) -> &'static str { + Self::NAME + } + + fn size(&self) -> usize { + Self::FIXED_PART_SIZE + } +} + +impl<'de> Decode<'de> for Color { + fn decode(src: &mut ReadCursor<'de>) -> DecodeResult { + ensure_fixed_part_size!(in: src); + + let b = src.read_u8(); + let g = src.read_u8(); + let r = src.read_u8(); + let xa = src.read_u8(); + + Ok(Self { b, g, r, xa }) + } +} + +/// 2.2.1.4 RDPGFX_PIXELFORMAT +/// +/// [2.2.1.4]: +#[repr(u8)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum PixelFormat { + XRgb = 0x20, + ARgb = 0x21, +} + +impl TryFrom for PixelFormat { + type Error = DecodeError; + + fn try_from(value: u8) -> Result { + match value { + 0x20 => Ok(PixelFormat::XRgb), + 0x21 => Ok(PixelFormat::ARgb), + _ => Err(invalid_field_err!("PixelFormat", "invalid pixel format")), + } + } +} + +impl From for u8 { + #[expect(clippy::as_conversions, reason = "repr(u8) enum discriminant")] + fn from(value: PixelFormat) -> Self { + value as u8 + } +} diff --git a/crates/ironrdp-egfx/src/pdu/mod.rs b/crates/ironrdp-egfx/src/pdu/mod.rs new file mode 100644 index 000000000..5f0f2b785 --- /dev/null +++ b/crates/ironrdp-egfx/src/pdu/mod.rs @@ -0,0 +1,24 @@ +//! Display Pipeline Virtual Channel Extension PDUs [MS-RDPEGFX][1] implementation. +//! +//! This module provides PDU types for the Graphics Pipeline Extension, including +//! H.264/AVC420 video streaming support. +//! +//! # Server-Side Utilities +//! +//! For server implementations, the following utilities are provided: +//! +//! - [`Avc420Region`] - Region metadata for H.264 frames +//! - [`annex_b_to_avc`] - Convert H.264 Annex B to AVC format +//! - [`align_to_16`] - Align dimensions to H.264 macroblock boundaries +//! - [`encode_avc420_bitmap_stream`] - Create AVC420 bitmap streams +//! +//! [1]: https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegfx/da5c75f9-cd99-450c-98c4-014a496942b0 + +mod common; +pub use common::*; + +mod cmd; +pub use cmd::*; + +mod avc; +pub use avc::*; diff --git a/crates/ironrdp-egfx/src/server.rs b/crates/ironrdp-egfx/src/server.rs new file mode 100644 index 000000000..bf82bf2cb --- /dev/null +++ b/crates/ironrdp-egfx/src/server.rs @@ -0,0 +1,1156 @@ +//! Server-side EGFX implementation +//! +//! This module provides complete server-side support for the Graphics Pipeline Extension +//! (MS-RDPEGFX), enabling H.264 AVC420/AVC444 video streaming to RDP clients. +//! +//! # Protocol Compliance +//! +//! This implementation follows MS-RDPEGFX specification requirements: +//! +//! - **Capability Negotiation**: Supports V8, V8.1, V10, V10.1-V10.7 +//! - **Surface Management**: Multi-surface support with proper lifecycle +//! - **Frame Flow Control**: Tracks unacknowledged frames per spec +//! - **Codec Support**: AVC420, AVC444, with extensibility for others +//! +//! # Architecture +//! +//! The server follows this message flow: +//! +//! ```text +//! Client Server +//! | | +//! |--- CapabilitiesAdvertise ------------>| +//! | | (negotiate capabilities) +//! |<----------- CapabilitiesConfirm ------| +//! |<----------- ResetGraphics ------------| +//! |<----------- CreateSurface ------------| +//! |<----------- MapSurfaceToOutput -------| +//! | | +//! | (For each frame:) | +//! |<----------- StartFrame ---------------| +//! |<----------- WireToSurface1/2 ---------| (H.264 data) +//! |<----------- EndFrame -----------------| +//! | | +//! |--- FrameAcknowledge ----------------->| (flow control) +//! |--- QoeFrameAcknowledge -------------->| (optional, V10+) +//! ``` +//! +//! # Usage +//! +//! ```ignore +//! use ironrdp_egfx::server::{GraphicsPipelineServer, GraphicsPipelineHandler}; +//! +//! struct MyHandler; +//! +//! impl GraphicsPipelineHandler for MyHandler { +//! fn capabilities_advertise(&mut self, caps: &CapabilitiesAdvertisePdu) { +//! // Client sent capabilities +//! } +//! +//! fn on_ready(&mut self, negotiated: &CapabilitySet) { +//! // Server is ready to send frames +//! } +//! } +//! +//! let server = GraphicsPipelineServer::new(Box::new(MyHandler)); +//! ``` + +use std::collections::{HashMap, VecDeque}; +use std::time::Instant; + +use ironrdp_core::{decode, impl_as_any}; +use ironrdp_dvc::{DvcMessage, DvcProcessor, DvcServerProcessor}; +use ironrdp_pdu::gcc::Monitor; +use ironrdp_pdu::geometry::InclusiveRectangle; +use ironrdp_pdu::{decode_err, PduResult}; +use tracing::{debug, trace, warn}; + +use crate::pdu::{ + encode_avc420_bitmap_stream, Avc420BitmapStream, Avc420Region, Avc444BitmapStream, CacheImportOfferPdu, + CacheImportReplyPdu, CapabilitiesAdvertisePdu, CapabilitiesConfirmPdu, CapabilitiesV103Flags, + CapabilitiesV104Flags, CapabilitiesV107Flags, CapabilitiesV10Flags, CapabilitiesV81Flags, CapabilitiesV8Flags, + CapabilitySet, Codec1Type, CreateSurfacePdu, DeleteSurfacePdu, Encoding, EndFramePdu, FrameAcknowledgePdu, GfxPdu, + MapSurfaceToOutputPdu, PixelFormat, QoeFrameAcknowledgePdu, ResetGraphicsPdu, StartFramePdu, Timestamp, + WireToSurface1Pdu, +}; +use crate::CHANNEL_NAME; + +// ============================================================================ +// Constants +// ============================================================================ + +/// Default maximum frames in flight before applying backpressure +const DEFAULT_MAX_FRAMES_IN_FLIGHT: u32 = 3; + +/// Special queue depth value indicating client has disabled acknowledgments +const SUSPEND_FRAME_ACK_QUEUE_DEPTH: u32 = 0xFFFFFFFF; + +// ============================================================================ +// Surface Management +// ============================================================================ + +/// Surface state tracked by server +/// +/// Per MS-RDPEGFX, the server maintains an "Offscreen Surfaces ADM element" +/// which is a list of surfaces created on the client. +#[derive(Debug, Clone)] +pub struct Surface { + /// Surface identifier (unique per session) + pub id: u16, + /// Surface width in pixels + pub width: u16, + /// Surface height in pixels + pub height: u16, + /// Pixel format + pub pixel_format: PixelFormat, + /// Whether this surface is mapped to an output + pub is_mapped: bool, + /// Output X origin (if mapped) + pub output_origin_x: u32, + /// Output Y origin (if mapped) + pub output_origin_y: u32, +} + +impl Surface { + fn new(id: u16, width: u16, height: u16, pixel_format: PixelFormat) -> Self { + Self { + id, + width, + height, + pixel_format, + is_mapped: false, + output_origin_x: 0, + output_origin_y: 0, + } + } +} + +/// Multi-surface management +/// +/// Implements the "Offscreen Surfaces ADM element" from MS-RDPEGFX. +#[derive(Debug, Default)] +pub struct Surfaces { + surfaces: HashMap, + next_surface_id: u16, +} + +impl Surfaces { + /// Create a new surface manager + pub fn new() -> Self { + Self::default() + } + + /// Allocate a new surface ID + pub fn allocate_id(&mut self) -> u16 { + let id = self.next_surface_id; + debug_assert!(!self.surfaces.contains_key(&id), "surface ID {id} already in use"); + self.next_surface_id = self.next_surface_id.wrapping_add(1); + id + } + + /// Register a surface + pub fn insert(&mut self, surface: Surface) { + self.surfaces.insert(surface.id, surface); + } + + /// Remove a surface + pub fn remove(&mut self, surface_id: u16) -> Option { + self.surfaces.remove(&surface_id) + } + + /// Get a surface by ID + pub fn get(&self, surface_id: u16) -> Option<&Surface> { + self.surfaces.get(&surface_id) + } + + /// Get a mutable surface by ID + pub fn get_mut(&mut self, surface_id: u16) -> Option<&mut Surface> { + self.surfaces.get_mut(&surface_id) + } + + /// Check if a surface exists + pub fn contains(&self, surface_id: u16) -> bool { + self.surfaces.contains_key(&surface_id) + } + + /// Get all surface IDs + pub fn surface_ids(&self) -> impl Iterator + '_ { + self.surfaces.keys().copied() + } + + /// Clear all surfaces + pub fn clear(&mut self) { + self.surfaces.clear(); + } + + /// Number of surfaces + pub fn len(&self) -> usize { + self.surfaces.len() + } + + /// Check if empty + pub fn is_empty(&self) -> bool { + self.surfaces.is_empty() + } +} + +// ============================================================================ +// Frame Tracking +// ============================================================================ + +/// Information about a frame awaiting acknowledgment +/// +/// Per MS-RDPEGFX, the server maintains an "Unacknowledged Frames ADM element" +/// which tracks frames sent but not yet acknowledged. +#[derive(Debug, Clone)] +pub struct FrameInfo { + /// Frame identifier + pub frame_id: u32, + /// Frame timestamp + pub timestamp: Timestamp, + /// When the frame was sent + pub sent_at: Instant, + /// Approximate size in bytes + pub size_bytes: usize, +} + +/// Quality of Experience metrics from client +#[derive(Debug, Clone)] +pub struct QoeMetrics { + /// Frame ID this relates to + pub frame_id: u32, + /// Client timestamp when decode started + pub timestamp: u32, + /// Time difference for serial encode (microseconds) + pub time_diff_se: u16, + /// Time difference for decode and render (microseconds) + pub time_diff_dr: u16, +} + +/// Frame tracking for flow control +/// +/// Implements the "Unacknowledged Frames ADM element" from MS-RDPEGFX. +#[derive(Debug)] +pub struct FrameTracker { + /// Frames sent but not yet acknowledged + unacknowledged: HashMap, + /// Last reported client queue depth + client_queue_depth: u32, + /// Whether client has suspended acknowledgments + ack_suspended: bool, + /// Next frame ID to assign + next_frame_id: u32, + /// Maximum frames in flight before backpressure + max_in_flight: u32, + /// Total frames sent + total_sent: u64, + /// Total frames acknowledged + total_acked: u64, +} + +impl Default for FrameTracker { + fn default() -> Self { + Self::new() + } +} + +impl FrameTracker { + /// Create a new frame tracker + pub fn new() -> Self { + Self { + unacknowledged: HashMap::new(), + client_queue_depth: 0, + ack_suspended: false, + next_frame_id: 0, + max_in_flight: DEFAULT_MAX_FRAMES_IN_FLIGHT, + total_sent: 0, + total_acked: 0, + } + } + + /// Set maximum frames in flight + pub fn set_max_in_flight(&mut self, max: u32) { + self.max_in_flight = max; + } + + /// Allocate a new frame ID and track it + pub fn begin_frame(&mut self, timestamp: Timestamp) -> u32 { + let frame_id = self.next_frame_id; + self.next_frame_id = self.next_frame_id.wrapping_add(1); + + self.unacknowledged.insert( + frame_id, + FrameInfo { + frame_id, + timestamp, + sent_at: Instant::now(), + size_bytes: 0, + }, + ); + + self.total_sent += 1; + frame_id + } + + /// Update frame size after encoding + pub fn set_frame_size(&mut self, frame_id: u32, size_bytes: usize) { + if let Some(info) = self.unacknowledged.get_mut(&frame_id) { + info.size_bytes = size_bytes; + } + } + + /// Handle frame acknowledgment from client + pub fn acknowledge(&mut self, frame_id: u32, queue_depth: u32) -> Option { + if queue_depth == SUSPEND_FRAME_ACK_QUEUE_DEPTH { + self.ack_suspended = true; + self.client_queue_depth = 0; + } else { + self.ack_suspended = false; + self.client_queue_depth = queue_depth; + } + + let info = self.unacknowledged.remove(&frame_id); + if info.is_some() { + self.total_acked += 1; + } + info + } + + /// Number of frames in flight + #[expect( + clippy::cast_possible_truncation, + clippy::as_conversions, + reason = "frame count will never exceed u32::MAX" + )] + pub fn in_flight(&self) -> u32 { + self.unacknowledged.len() as u32 + } + + /// Check if backpressure should be applied + pub fn should_backpressure(&self) -> bool { + !self.ack_suspended && self.in_flight() >= self.max_in_flight + } + + /// Get client queue depth + pub fn client_queue_depth(&self) -> u32 { + self.client_queue_depth + } + + /// Check if acknowledgments are suspended + pub fn is_ack_suspended(&self) -> bool { + self.ack_suspended + } + + /// Get total frames sent + pub fn total_sent(&self) -> u64 { + self.total_sent + } + + /// Get total frames acknowledged + pub fn total_acked(&self) -> u64 { + self.total_acked + } + + /// Clear all tracking state + pub fn clear(&mut self) { + self.unacknowledged.clear(); + self.client_queue_depth = 0; + self.ack_suspended = false; + } +} + +// ============================================================================ +// Capability Negotiation +// ============================================================================ + +/// Codec capabilities determined from negotiation +#[derive(Debug, Clone, Default)] +pub struct CodecCapabilities { + /// AVC420 (H.264 4:2:0) is available + pub avc420: bool, + /// AVC444 (H.264 4:4:4) is available + pub avc444: bool, + /// Small cache mode + pub small_cache: bool, + /// Thin client mode + pub thin_client: bool, +} + +impl CodecCapabilities { + /// Extract codec capabilities from a capability set + fn from_capability_set(cap: &CapabilitySet) -> Self { + match cap { + CapabilitySet::V8 { flags } => Self { + avc420: false, + avc444: false, + small_cache: flags.contains(CapabilitiesV8Flags::SMALL_CACHE), + thin_client: flags.contains(CapabilitiesV8Flags::THIN_CLIENT), + }, + CapabilitySet::V8_1 { flags } => Self { + avc420: flags.contains(CapabilitiesV81Flags::AVC420_ENABLED), + avc444: false, + small_cache: flags.contains(CapabilitiesV81Flags::SMALL_CACHE), + thin_client: flags.contains(CapabilitiesV81Flags::THIN_CLIENT), + }, + CapabilitySet::V10 { flags } | CapabilitySet::V10_2 { flags } => Self { + avc420: !flags.contains(CapabilitiesV10Flags::AVC_DISABLED), + avc444: !flags.contains(CapabilitiesV10Flags::AVC_DISABLED), + small_cache: flags.contains(CapabilitiesV10Flags::SMALL_CACHE), + thin_client: false, + }, + CapabilitySet::V10_1 => Self { + avc420: true, + avc444: true, + small_cache: false, + thin_client: false, + }, + CapabilitySet::V10_3 { flags } => Self { + // V10.3 lacks SMALL_CACHE flag + avc420: !flags.contains(CapabilitiesV103Flags::AVC_DISABLED), + avc444: !flags.contains(CapabilitiesV103Flags::AVC_DISABLED), + small_cache: false, + thin_client: flags.contains(CapabilitiesV103Flags::AVC_THIN_CLIENT), + }, + CapabilitySet::V10_4 { flags } + | CapabilitySet::V10_5 { flags } + | CapabilitySet::V10_6 { flags } + | CapabilitySet::V10_6Err { flags } => Self { + avc420: !flags.contains(CapabilitiesV104Flags::AVC_DISABLED), + avc444: !flags.contains(CapabilitiesV104Flags::AVC_DISABLED), + small_cache: flags.contains(CapabilitiesV104Flags::SMALL_CACHE), + thin_client: flags.contains(CapabilitiesV104Flags::AVC_THIN_CLIENT), + }, + CapabilitySet::V10_7 { flags } => Self { + avc420: !flags.contains(CapabilitiesV107Flags::AVC_DISABLED), + avc444: !flags.contains(CapabilitiesV107Flags::AVC_DISABLED), + small_cache: flags.contains(CapabilitiesV107Flags::SMALL_CACHE), + thin_client: flags.contains(CapabilitiesV107Flags::AVC_THIN_CLIENT), + }, + CapabilitySet::Unknown(_) => Self::default(), + } + } +} + +/// Priority order for capability negotiation (highest to lowest) +fn capability_priority(cap: &CapabilitySet) -> u32 { + match cap { + CapabilitySet::V10_7 { .. } => 12, + CapabilitySet::V10_6Err { .. } => 11, + CapabilitySet::V10_6 { .. } => 10, + CapabilitySet::V10_5 { .. } => 9, + CapabilitySet::V10_4 { .. } => 8, + CapabilitySet::V10_3 { .. } => 7, + CapabilitySet::V10_2 { .. } => 6, + CapabilitySet::V10_1 => 5, + CapabilitySet::V10 { .. } => 4, + CapabilitySet::V8_1 { .. } => 3, + CapabilitySet::V8 { .. } => 2, + _ => 0, + } +} + +/// Negotiate the best capability set between client and server +fn negotiate_capabilities(client_caps: &[CapabilitySet], server_caps: &[CapabilitySet]) -> Option { + let mut server_sorted: Vec<_> = server_caps.iter().collect(); + server_sorted.sort_by_key(|cap| core::cmp::Reverse(capability_priority(cap))); + + for server_cap in server_sorted { + for client_cap in client_caps { + if core::mem::discriminant(client_cap) == core::mem::discriminant(server_cap) { + return Some(intersect_flags(client_cap, server_cap)); + } + } + } + + None +} + +/// Intersect flags for matching capability set versions +fn intersect_flags(client: &CapabilitySet, server: &CapabilitySet) -> CapabilitySet { + match (client, server) { + (CapabilitySet::V8 { flags: cf }, CapabilitySet::V8 { flags: sf }) => CapabilitySet::V8 { flags: *cf & *sf }, + (CapabilitySet::V8_1 { flags: cf }, CapabilitySet::V8_1 { flags: sf }) => { + CapabilitySet::V8_1 { flags: *cf & *sf } + } + (CapabilitySet::V10 { flags: cf }, CapabilitySet::V10 { flags: sf }) => CapabilitySet::V10 { flags: *cf & *sf }, + (CapabilitySet::V10_2 { flags: cf }, CapabilitySet::V10_2 { flags: sf }) => { + CapabilitySet::V10_2 { flags: *cf & *sf } + } + (CapabilitySet::V10_3 { flags: cf }, CapabilitySet::V10_3 { flags: sf }) => { + CapabilitySet::V10_3 { flags: *cf & *sf } + } + (CapabilitySet::V10_4 { flags: cf }, CapabilitySet::V10_4 { flags: sf }) => { + CapabilitySet::V10_4 { flags: *cf & *sf } + } + (CapabilitySet::V10_5 { flags: cf }, CapabilitySet::V10_5 { flags: sf }) => { + CapabilitySet::V10_5 { flags: *cf & *sf } + } + (CapabilitySet::V10_6 { flags: cf }, CapabilitySet::V10_6 { flags: sf }) => { + CapabilitySet::V10_6 { flags: *cf & *sf } + } + (CapabilitySet::V10_6Err { flags: cf }, CapabilitySet::V10_6Err { flags: sf }) => { + CapabilitySet::V10_6Err { flags: *cf & *sf } + } + (CapabilitySet::V10_7 { flags: cf }, CapabilitySet::V10_7 { flags: sf }) => { + CapabilitySet::V10_7 { flags: *cf & *sf } + } + // V10_1 has no flags; Unknown and mismatched variants return server as-is. + _ => server.clone(), + } +} + +// ============================================================================ +// Handler Trait +// ============================================================================ + +/// Handler trait for server-side EGFX events +/// +/// Implement this trait to receive callbacks when the EGFX channel state changes +/// or when client messages are received. +pub trait GraphicsPipelineHandler: Send { + /// Called when the client advertises its capabilities + /// + /// This is informational - the server will automatically negotiate + /// based on [`preferred_capabilities()`](Self::preferred_capabilities). + fn capabilities_advertise(&mut self, pdu: &CapabilitiesAdvertisePdu); + + /// Called when the EGFX channel is ready to send frames + /// + /// At this point, capability negotiation is complete. + /// The handler should create surfaces and start sending frames. + fn on_ready(&mut self, negotiated: &CapabilitySet); + + /// Called when a frame has been acknowledged by the client + fn on_frame_ack(&mut self, _frame_id: u32, _queue_depth: u32) {} + + /// Called when QoE metrics are received from client (V10+) + fn on_qoe_metrics(&mut self, _metrics: QoeMetrics) {} + + /// Called when a surface is created + fn on_surface_created(&mut self, _surface: &Surface) {} + + /// Called when a surface is deleted + fn on_surface_deleted(&mut self, _surface_id: u16) {} + + /// Called when the EGFX channel is closed + fn on_close(&mut self) {} + + /// Returns the server's preferred capabilities + /// + /// Override this to customize codec support. The default enables + /// AVC420/AVC444 with V10.7 and V8.1 as fallback. + fn preferred_capabilities(&self) -> Vec { + vec![ + CapabilitySet::V10_7 { + flags: CapabilitiesV107Flags::SMALL_CACHE, + }, + CapabilitySet::V10 { + flags: CapabilitiesV10Flags::SMALL_CACHE, + }, + CapabilitySet::V8_1 { + flags: CapabilitiesV81Flags::AVC420_ENABLED | CapabilitiesV81Flags::SMALL_CACHE, + }, + CapabilitySet::V8 { + flags: CapabilitiesV8Flags::SMALL_CACHE, + }, + ] + } + + /// Returns the maximum frames in flight before backpressure + fn max_frames_in_flight(&self) -> u32 { + DEFAULT_MAX_FRAMES_IN_FLIGHT + } + + /// Called when client offers to import cached bitmaps + /// + /// Return the list of cache slot IDs to accept. + /// Default rejects all (returns empty). + fn on_cache_import_offer(&mut self, _offer: &CacheImportOfferPdu) -> Vec { + vec![] + } +} + +// ============================================================================ +// Server State Machine +// ============================================================================ + +/// Server state machine states +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ServerState { + /// Waiting for client CapabilitiesAdvertise + WaitingForCapabilities, + /// Channel is ready, can send frames + Ready, + /// Performing a resize operation + Resizing, + /// Channel has been closed + Closed, +} + +// ============================================================================ +// Graphics Pipeline Server +// ============================================================================ + +/// Server for the Graphics Pipeline Virtual Channel (EGFX) +/// +/// This server handles capability negotiation, surface management, +/// and H.264 frame transmission to RDP clients per MS-RDPEGFX specification. +pub struct GraphicsPipelineServer { + handler: Box, + + state: ServerState, + negotiated_caps: Option, + codec_caps: CodecCapabilities, + + surfaces: Surfaces, + frames: FrameTracker, + + output_width: u16, + output_height: u16, + output_queue: VecDeque, +} + +impl GraphicsPipelineServer { + /// Create a new GraphicsPipelineServer + pub fn new(handler: Box) -> Self { + let max_frames = handler.max_frames_in_flight(); + let mut frames = FrameTracker::new(); + frames.set_max_in_flight(max_frames); + + Self { + handler, + state: ServerState::WaitingForCapabilities, + negotiated_caps: None, + codec_caps: CodecCapabilities::default(), + surfaces: Surfaces::new(), + frames, + output_width: 0, + output_height: 0, + output_queue: VecDeque::new(), + } + } + + // ======================================================================== + // State Queries + // ======================================================================== + + /// Check if the server is ready to send frames + #[must_use] + pub fn is_ready(&self) -> bool { + self.state == ServerState::Ready + } + + /// Get the negotiated capability set + #[must_use] + pub fn negotiated_capabilities(&self) -> Option<&CapabilitySet> { + self.negotiated_caps.as_ref() + } + + /// Get codec capabilities determined from negotiation + #[must_use] + pub fn codec_capabilities(&self) -> &CodecCapabilities { + &self.codec_caps + } + + /// Check if AVC420 (H.264 4:2:0) is available + #[must_use] + pub fn supports_avc420(&self) -> bool { + self.codec_caps.avc420 + } + + /// Check if AVC444 (H.264 4:4:4) is available + #[must_use] + pub fn supports_avc444(&self) -> bool { + self.codec_caps.avc444 + } + + /// Get the graphics output buffer dimensions + #[must_use] + pub fn output_dimensions(&self) -> (u16, u16) { + (self.output_width, self.output_height) + } + + // ======================================================================== + // Surface Management + // ======================================================================== + + /// Create a new surface + /// + /// Queues CreateSurface PDU and returns the surface ID. + /// Returns `None` if not ready. + pub fn create_surface(&mut self, width: u16, height: u16) -> Option { + self.create_surface_with_format(width, height, PixelFormat::XRgb) + } + + /// Create a new surface with specific pixel format + pub fn create_surface_with_format(&mut self, width: u16, height: u16, pixel_format: PixelFormat) -> Option { + if self.state != ServerState::Ready && self.state != ServerState::Resizing { + return None; + } + + let surface_id = self.surfaces.allocate_id(); + let surface = Surface::new(surface_id, width, height, pixel_format); + + self.output_queue.push_back(GfxPdu::CreateSurface(CreateSurfacePdu { + surface_id, + width, + height, + pixel_format, + })); + + self.handler.on_surface_created(&surface); + self.surfaces.insert(surface); + + debug!(surface_id, width, height, ?pixel_format, "Created surface"); + Some(surface_id) + } + + /// Delete a surface + /// + /// Queues DeleteSurface PDU. Returns `false` if surface doesn't exist. + pub fn delete_surface(&mut self, surface_id: u16) -> bool { + if self.surfaces.remove(surface_id).is_none() { + return false; + } + + self.output_queue + .push_back(GfxPdu::DeleteSurface(DeleteSurfacePdu { surface_id })); + + self.handler.on_surface_deleted(surface_id); + debug!(surface_id, "Deleted surface"); + true + } + + /// Map a surface to the graphics output buffer + pub fn map_surface_to_output(&mut self, surface_id: u16, origin_x: u32, origin_y: u32) -> bool { + let Some(surface) = self.surfaces.get_mut(surface_id) else { + return false; + }; + + surface.is_mapped = true; + surface.output_origin_x = origin_x; + surface.output_origin_y = origin_y; + + self.output_queue + .push_back(GfxPdu::MapSurfaceToOutput(MapSurfaceToOutputPdu { + surface_id, + output_origin_x: origin_x, + output_origin_y: origin_y, + })); + + debug!(surface_id, origin_x, origin_y, "Mapped surface to output"); + true + } + + /// Get a surface by ID + #[must_use] + pub fn get_surface(&self, surface_id: u16) -> Option<&Surface> { + self.surfaces.get(surface_id) + } + + /// Get all surface IDs + pub fn surface_ids(&self) -> impl Iterator + '_ { + self.surfaces.surface_ids() + } + + // ======================================================================== + // Resize Handling + // ======================================================================== + + /// Resize the graphics output buffer + /// + /// This initiates a resize sequence: + /// 1. Sends ResetGraphics with new dimensions + /// 2. Deletes existing surfaces + /// 3. Transitions to Ready state + /// + /// After calling this, create new surfaces for the new dimensions. + pub fn resize(&mut self, width: u16, height: u16) { + self.resize_with_monitors(width, height, Vec::new()); + } + + /// Resize with explicit monitor configuration + pub fn resize_with_monitors(&mut self, width: u16, height: u16, monitors: Vec) { + if self.state != ServerState::Ready { + debug!("Cannot resize: not in Ready state"); + return; + } + + // RDPGFX_RESET_GRAPHICS_PDU is fixed at 340 bytes, limiting to 16 monitors. + if monitors.len() > 16 { + warn!( + count = monitors.len(), + "Too many monitors for ResetGraphicsPdu (max 16)" + ); + return; + } + + debug!(width, height, monitors = monitors.len(), "Initiating resize"); + + self.state = ServerState::Resizing; + self.output_width = width; + self.output_height = height; + + let surface_ids: Vec<_> = self.surfaces.surface_ids().collect(); + for id in surface_ids { + self.delete_surface(id); + } + + self.frames.clear(); + + self.output_queue.push_back(GfxPdu::ResetGraphics(ResetGraphicsPdu { + width: u32::from(width), + height: u32::from(height), + monitors, + })); + + self.state = ServerState::Ready; + } + + // ======================================================================== + // Flow Control + // ======================================================================== + + /// Check if backpressure should be applied + /// + /// Returns `true` if too many frames are in flight and the caller + /// should drop or delay new frames. + #[must_use] + pub fn should_backpressure(&self) -> bool { + self.frames.should_backpressure() + } + + /// Get the number of frames currently in flight (awaiting ACK) + #[must_use] + pub fn frames_in_flight(&self) -> u32 { + self.frames.in_flight() + } + + /// Get the last reported client queue depth + #[must_use] + pub fn client_queue_depth(&self) -> u32 { + self.frames.client_queue_depth() + } + + /// Set the maximum frames in flight before backpressure + pub fn set_max_frames_in_flight(&mut self, max: u32) { + self.frames.set_max_in_flight(max); + } + + // ======================================================================== + // Frame Sending + // ======================================================================== + + /// Convert timestamp in milliseconds to Timestamp struct + #[expect( + clippy::as_conversions, + reason = "arithmetic results bounded and fit in target types" + )] + fn make_timestamp(timestamp_ms: u32) -> Timestamp { + Timestamp { + milliseconds: (timestamp_ms % 1000) as u16, + seconds: ((timestamp_ms / 1000) % 60) as u8, + minutes: ((timestamp_ms / 60000) % 60) as u8, + hours: ((timestamp_ms / 3600000) % 24) as u16, + } + } + + /// Compute bounding rectangle from regions + fn compute_dest_rect(regions: &[Avc420Region], default_width: u16, default_height: u16) -> InclusiveRectangle { + if let Some(first) = regions.first() { + let mut left = first.left; + let mut top = first.top; + let mut right = first.right; + let mut bottom = first.bottom; + + for r in regions.iter().skip(1) { + left = left.min(r.left); + top = top.min(r.top); + right = right.max(r.right); + bottom = bottom.max(r.bottom); + } + + InclusiveRectangle { + left, + top, + right, + bottom, + } + } else { + InclusiveRectangle { + left: 0, + top: 0, + right: default_width.saturating_sub(1), + bottom: default_height.saturating_sub(1), + } + } + } + + /// Queue an H.264 AVC420 frame for transmission + /// + /// Returns `Some(frame_id)` if queued, `None` if backpressure is active, + /// server not ready, or AVC420 not supported. + pub fn send_avc420_frame( + &mut self, + surface_id: u16, + h264_data: &[u8], + regions: &[Avc420Region], + timestamp_ms: u32, + ) -> Option { + if !self.is_ready() { + return None; + } + if !self.supports_avc420() { + return None; + } + if self.should_backpressure() { + return None; + } + + let surface = self.surfaces.get(surface_id)?; + + let timestamp = Self::make_timestamp(timestamp_ms); + let frame_id = self.frames.begin_frame(timestamp); + + let encoded_stream = encode_avc420_bitmap_stream(regions, h264_data); + let target_rect = Self::compute_dest_rect(regions, surface.width, surface.height); + + // MS-RDPEGFX requires three-PDU sequence per frame + self.output_queue + .push_back(GfxPdu::StartFrame(StartFramePdu { timestamp, frame_id })); + + self.output_queue.push_back(GfxPdu::WireToSurface1(WireToSurface1Pdu { + surface_id, + codec_id: Codec1Type::Avc420, + pixel_format: surface.pixel_format, + destination_rectangle: target_rect, + bitmap_data: encoded_stream, + })); + + self.output_queue.push_back(GfxPdu::EndFrame(EndFramePdu { frame_id })); + + Some(frame_id) + } + + /// Queue an H.264 AVC444 frame for transmission + /// + /// AVC444 uses two streams: luma (Y) and chroma (UV). Set `chroma_data` to + /// `None` for luma-only transmission. + /// + /// Returns `Some(frame_id)` if queued, `None` if not supported or backpressured. + pub fn send_avc444_frame( + &mut self, + surface_id: u16, + luma_data: &[u8], + luma_regions: &[Avc420Region], + chroma_data: Option<&[u8]>, + chroma_regions: Option<&[Avc420Region]>, + timestamp_ms: u32, + ) -> Option { + if !self.is_ready() { + return None; + } + if !self.supports_avc444() { + return None; + } + if self.should_backpressure() { + return None; + } + + let surface = self.surfaces.get(surface_id)?; + + let timestamp = Self::make_timestamp(timestamp_ms); + let frame_id = self.frames.begin_frame(timestamp); + + let luma_rectangles: Vec<_> = luma_regions.iter().map(Avc420Region::to_rectangle).collect(); + let luma_quant_vals: Vec<_> = luma_regions.iter().map(Avc420Region::to_quant_quality).collect(); + + let stream1 = Avc420BitmapStream { + rectangles: luma_rectangles, + quant_qual_vals: luma_quant_vals, + data: luma_data, + }; + + let (encoding, stream2) = if let (Some(chroma), Some(chroma_regs)) = (chroma_data, chroma_regions) { + let chroma_rectangles: Vec<_> = chroma_regs.iter().map(Avc420Region::to_rectangle).collect(); + let chroma_quant_vals: Vec<_> = chroma_regs.iter().map(Avc420Region::to_quant_quality).collect(); + + ( + Encoding::LUMA_AND_CHROMA, + Some(Avc420BitmapStream { + rectangles: chroma_rectangles, + quant_qual_vals: chroma_quant_vals, + data: chroma, + }), + ) + } else { + (Encoding::LUMA, None) + }; + + let avc444_stream = Avc444BitmapStream { + encoding, + stream1, + stream2, + }; + + let encoded_stream = encode_avc444_bitmap_stream(&avc444_stream); + let target_rect = Self::compute_dest_rect(luma_regions, surface.width, surface.height); + + self.output_queue + .push_back(GfxPdu::StartFrame(StartFramePdu { timestamp, frame_id })); + + self.output_queue.push_back(GfxPdu::WireToSurface1(WireToSurface1Pdu { + surface_id, + codec_id: Codec1Type::Avc444, + pixel_format: surface.pixel_format, + destination_rectangle: target_rect, + bitmap_data: encoded_stream, + })); + + self.output_queue.push_back(GfxPdu::EndFrame(EndFramePdu { frame_id })); + + Some(frame_id) + } + + // ======================================================================== + // Output Management + // ======================================================================== + + /// Drain the output queue and return PDUs to send + /// + /// Call this method to get pending PDUs that need to be sent to the client. + #[expect(clippy::as_conversions, reason = "Box to Box coercion")] + pub fn drain_output(&mut self) -> Vec { + self.output_queue + .drain(..) + .map(|pdu| Box::new(pdu) as DvcMessage) + .collect() + } + + /// Check if there are pending PDUs to send + #[must_use] + pub fn has_pending_output(&self) -> bool { + !self.output_queue.is_empty() + } + + // ======================================================================== + // Internal Message Handlers + // ======================================================================== + + fn handle_capabilities_advertise(&mut self, pdu: CapabilitiesAdvertisePdu) { + self.handler.capabilities_advertise(&pdu); + let server_caps = self.handler.preferred_capabilities(); + + // When no version overlaps with server preferences, confirm the client's + // highest-priority capability to avoid confirming a version the client + // did not advertise. + let negotiated = negotiate_capabilities(&pdu.0, &server_caps).unwrap_or_else(|| { + warn!("No capability match with server preferences, selecting client's highest version"); + let mut client_sorted = pdu.0.clone(); + client_sorted.sort_by_key(|cap| core::cmp::Reverse(capability_priority(cap))); + client_sorted.into_iter().next().unwrap_or(CapabilitySet::V8 { + flags: CapabilitiesV8Flags::empty(), + }) + }); + + self.codec_caps = CodecCapabilities::from_capability_set(&negotiated); + self.negotiated_caps = Some(negotiated.clone()); + + self.output_queue + .push_back(GfxPdu::CapabilitiesConfirm(CapabilitiesConfirmPdu(negotiated.clone()))); + + self.state = ServerState::Ready; + self.handler.on_ready(&negotiated); + } + + fn handle_frame_acknowledge(&mut self, pdu: FrameAcknowledgePdu) { + let queue_depth = pdu.queue_depth.to_u32(); + + if let Some(info) = self.frames.acknowledge(pdu.frame_id, queue_depth) { + trace!(frame_id = pdu.frame_id, latency = ?info.sent_at.elapsed()); + } + + self.handler.on_frame_ack(pdu.frame_id, queue_depth); + } + + fn handle_qoe_frame_acknowledge(&mut self, pdu: QoeFrameAcknowledgePdu) { + let metrics = QoeMetrics { + frame_id: pdu.frame_id, + timestamp: pdu.timestamp, + time_diff_se: pdu.time_diff_se, + time_diff_dr: pdu.time_diff_dr, + }; + + self.handler.on_qoe_metrics(metrics); + } + + fn handle_cache_import_offer(&mut self, pdu: CacheImportOfferPdu) { + let accepted = self.handler.on_cache_import_offer(&pdu); + + self.output_queue + .push_back(GfxPdu::CacheImportReply(CacheImportReplyPdu { cache_slots: accepted })); + } +} + +impl_as_any!(GraphicsPipelineServer); + +impl DvcProcessor for GraphicsPipelineServer { + fn channel_name(&self) -> &str { + CHANNEL_NAME + } + + fn start(&mut self, _channel_id: u32) -> PduResult> { + // Server waits for client CapabilitiesAdvertise before sending anything + Ok(vec![]) + } + + fn close(&mut self, _channel_id: u32) { + self.state = ServerState::Closed; + self.handler.on_close(); + } + + fn process(&mut self, _channel_id: u32, payload: &[u8]) -> PduResult> { + let pdu = decode(payload).map_err(|e| decode_err!(e))?; + + match pdu { + GfxPdu::CapabilitiesAdvertise(pdu) => { + self.handle_capabilities_advertise(pdu); + } + GfxPdu::FrameAcknowledge(pdu) => { + self.handle_frame_acknowledge(pdu); + } + GfxPdu::QoeFrameAcknowledge(pdu) => { + self.handle_qoe_frame_acknowledge(pdu); + } + GfxPdu::CacheImportOffer(pdu) => { + self.handle_cache_import_offer(pdu); + } + _ => { + warn!(?pdu, "Unhandled client GFX PDU"); + } + } + + Ok(self.drain_output()) + } +} + +impl DvcServerProcessor for GraphicsPipelineServer {} + +// ============================================================================ +// AVC444 Encoding Helper +// ============================================================================ + +/// Encode an AVC444 bitmap stream to bytes +fn encode_avc444_bitmap_stream(stream: &Avc444BitmapStream<'_>) -> Vec { + use ironrdp_pdu::{Encode as _, WriteCursor}; + + let size = stream.size(); + let mut buf = vec![0u8; size]; + let mut cursor = WriteCursor::new(&mut buf); + + stream + .encode(&mut cursor) + .expect("encode_avc444_bitmap_stream: encoding failed"); + + buf +} diff --git a/crates/ironrdp-testsuite-core/Cargo.toml b/crates/ironrdp-testsuite-core/Cargo.toml index e2e29a1d3..7c42a291d 100644 --- a/crates/ironrdp-testsuite-core/Cargo.toml +++ b/crates/ironrdp-testsuite-core/Cargo.toml @@ -38,6 +38,7 @@ ironrdp-cliprdr.path = "../ironrdp-cliprdr" ironrdp-connector.path = "../ironrdp-connector" ironrdp-displaycontrol.path = "../ironrdp-displaycontrol" ironrdp-dvc.path = "../ironrdp-dvc" +ironrdp-egfx.path = "../ironrdp-egfx" ironrdp-fuzzing.path = "../ironrdp-fuzzing" ironrdp-graphics.path = "../ironrdp-graphics" ironrdp-input.path = "../ironrdp-input" diff --git a/crates/ironrdp-testsuite-core/tests/egfx/mod.rs b/crates/ironrdp-testsuite-core/tests/egfx/mod.rs new file mode 100644 index 000000000..df4adf9fb --- /dev/null +++ b/crates/ironrdp-testsuite-core/tests/egfx/mod.rs @@ -0,0 +1 @@ +mod server; diff --git a/crates/ironrdp-testsuite-core/tests/egfx/server.rs b/crates/ironrdp-testsuite-core/tests/egfx/server.rs new file mode 100644 index 000000000..dcc192d65 --- /dev/null +++ b/crates/ironrdp-testsuite-core/tests/egfx/server.rs @@ -0,0 +1,249 @@ +use ironrdp_core::{Encode, WriteCursor}; +use ironrdp_dvc::DvcProcessor as _; +use ironrdp_egfx::pdu::{ + Avc420Region, CapabilitiesAdvertisePdu, CapabilitiesV10Flags, CapabilitiesV81Flags, CapabilitiesV8Flags, + CapabilitySet, GfxPdu, +}; +use ironrdp_egfx::server::{GraphicsPipelineHandler, GraphicsPipelineServer, QoeMetrics, Surface}; + +// ============================================================================ +// Test Handler +// ============================================================================ + +struct TestHandler { + ready_called: bool, + negotiated: Option, + frame_acks: Vec<(u32, u32)>, + surfaces_created: Vec, + surfaces_deleted: Vec, +} + +impl TestHandler { + fn new() -> Self { + Self { + ready_called: false, + negotiated: None, + frame_acks: Vec::new(), + surfaces_created: Vec::new(), + surfaces_deleted: Vec::new(), + } + } +} + +impl GraphicsPipelineHandler for TestHandler { + fn capabilities_advertise(&mut self, _pdu: &CapabilitiesAdvertisePdu) {} + + fn on_ready(&mut self, negotiated: &CapabilitySet) { + self.ready_called = true; + self.negotiated = Some(negotiated.clone()); + } + + fn on_frame_ack(&mut self, frame_id: u32, queue_depth: u32) { + self.frame_acks.push((frame_id, queue_depth)); + } + + fn on_qoe_metrics(&mut self, _metrics: QoeMetrics) {} + + fn on_surface_created(&mut self, surface: &Surface) { + self.surfaces_created.push(surface.id); + } + + fn on_surface_deleted(&mut self, surface_id: u16) { + self.surfaces_deleted.push(surface_id); + } +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/// Encode a PDU to bytes for sending to server's process() method +fn encode_pdu(pdu: &T) -> Vec { + let mut buf = vec![0u8; pdu.size()]; + let mut cursor = WriteCursor::new(&mut buf); + pdu.encode(&mut cursor).expect("encode failed"); + buf +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[test] +fn test_server_creation() { + let handler = Box::new(TestHandler::new()); + let server = GraphicsPipelineServer::new(handler); + + assert!(!server.is_ready()); + assert_eq!(server.frames_in_flight(), 0); + assert!(!server.supports_avc420()); + assert!(!server.supports_avc444()); +} + +#[test] +fn test_capability_negotiation_v8() { + let handler = Box::new(TestHandler::new()); + let mut server = GraphicsPipelineServer::new(handler); + + // Simulate client sending CapabilitiesAdvertise + let client_caps_pdu = GfxPdu::CapabilitiesAdvertise(CapabilitiesAdvertisePdu(vec![CapabilitySet::V8 { + flags: CapabilitiesV8Flags::SMALL_CACHE, + }])); + + let payload = encode_pdu(&client_caps_pdu); + let output = server.process(0, &payload).expect("process failed"); + + // Server should be ready now + assert!(server.is_ready()); + + // Should output CapabilitiesConfirm + assert_eq!(output.len(), 1); +} + +#[test] +fn test_capability_negotiation_v81_avc420() { + let handler = Box::new(TestHandler::new()); + let mut server = GraphicsPipelineServer::new(handler); + + let client_caps_pdu = GfxPdu::CapabilitiesAdvertise(CapabilitiesAdvertisePdu(vec![CapabilitySet::V8_1 { + flags: CapabilitiesV81Flags::AVC420_ENABLED | CapabilitiesV81Flags::SMALL_CACHE, + }])); + + let payload = encode_pdu(&client_caps_pdu); + let _output = server.process(0, &payload).expect("process failed"); + + assert!(server.is_ready()); + assert!(server.supports_avc420()); + assert!(!server.supports_avc444()); +} + +#[test] +fn test_capability_negotiation_v10_avc444() { + let handler = Box::new(TestHandler::new()); + let mut server = GraphicsPipelineServer::new(handler); + + let client_caps_pdu = GfxPdu::CapabilitiesAdvertise(CapabilitiesAdvertisePdu(vec![CapabilitySet::V10 { + flags: CapabilitiesV10Flags::SMALL_CACHE, + }])); + + let payload = encode_pdu(&client_caps_pdu); + let _output = server.process(0, &payload).expect("process failed"); + + assert!(server.is_ready()); + assert!(server.supports_avc420()); + assert!(server.supports_avc444()); +} + +#[test] +fn test_server_not_ready_before_capabilities() { + let handler = Box::new(TestHandler::new()); + let mut server = GraphicsPipelineServer::new(handler); + + // Server should not accept frames before capability negotiation + let h264_data = vec![0x00, 0x00, 0x00, 0x01, 0x67]; + let regions = vec![Avc420Region::full_frame(1920, 1080, 22)]; + + let result = server.send_avc420_frame(0, &h264_data, ®ions, 0); + assert!(result.is_none()); +} + +#[test] +fn test_surface_lifecycle() { + let handler = Box::new(TestHandler::new()); + let mut server = GraphicsPipelineServer::new(handler); + + // Negotiate capabilities first + let client_caps_pdu = GfxPdu::CapabilitiesAdvertise(CapabilitiesAdvertisePdu(vec![CapabilitySet::V8_1 { + flags: CapabilitiesV81Flags::AVC420_ENABLED, + }])); + let payload = encode_pdu(&client_caps_pdu); + let _output = server.process(0, &payload).expect("process failed"); + + assert!(server.is_ready()); + + // Create surface + let surface_id = server.create_surface(1920, 1080); + assert!(surface_id.is_some()); + let sid = surface_id.unwrap(); + + // Verify surface exists + let surface = server.get_surface(sid); + assert!(surface.is_some()); + assert_eq!(surface.unwrap().width, 1920); + assert_eq!(surface.unwrap().height, 1080); + + // Map to output + assert!(server.map_surface_to_output(sid, 0, 0)); + + // Delete surface + assert!(server.delete_surface(sid)); + assert!(server.get_surface(sid).is_none()); + + // Drain output (should have CreateSurface, MapSurfaceToOutput, DeleteSurface PDUs) + let output = server.drain_output(); + assert_eq!(output.len(), 3); +} + +#[test] +fn test_resize() { + let handler = Box::new(TestHandler::new()); + let mut server = GraphicsPipelineServer::new(handler); + + // Negotiate capabilities + let client_caps_pdu = GfxPdu::CapabilitiesAdvertise(CapabilitiesAdvertisePdu(vec![CapabilitySet::V8 { + flags: CapabilitiesV8Flags::SMALL_CACHE, + }])); + let payload = encode_pdu(&client_caps_pdu); + let _output = server.process(0, &payload).expect("process failed"); + + // Create a surface + let surface_id = server.create_surface(1920, 1080).unwrap(); + + // Resize + server.resize(2560, 1440); + + // Surface should be deleted + assert!(server.get_surface(surface_id).is_none()); + + // Output dimensions should be updated + assert_eq!(server.output_dimensions(), (2560, 1440)); + + // Should have output PDUs + assert!(server.has_pending_output()); +} + +#[test] +fn test_frame_flow_control() { + let handler = Box::new(TestHandler::new()); + let mut server = GraphicsPipelineServer::new(handler); + server.set_max_frames_in_flight(2); + + // Negotiate capabilities with AVC420 + let client_caps_pdu = GfxPdu::CapabilitiesAdvertise(CapabilitiesAdvertisePdu(vec![CapabilitySet::V8_1 { + flags: CapabilitiesV81Flags::AVC420_ENABLED, + }])); + let payload = encode_pdu(&client_caps_pdu); + let _output = server.process(0, &payload).expect("process failed"); + + // Create surface + let surface_id = server.create_surface(1920, 1080).unwrap(); + server.drain_output(); // Clear setup PDUs + + let h264_data = vec![0x00, 0x00, 0x00, 0x01, 0x67]; + let regions = vec![Avc420Region::full_frame(1920, 1080, 22)]; + + // First two frames should succeed + let frame1 = server.send_avc420_frame(surface_id, &h264_data, ®ions, 0); + assert!(frame1.is_some()); + + let frame2 = server.send_avc420_frame(surface_id, &h264_data, ®ions, 16); + assert!(frame2.is_some()); + + // Check backpressure is active + assert!(server.should_backpressure()); + assert_eq!(server.frames_in_flight(), 2); + + // Third frame should fail due to backpressure + let frame3 = server.send_avc420_frame(surface_id, &h264_data, ®ions, 33); + assert!(frame3.is_none()); +} diff --git a/crates/ironrdp-testsuite-core/tests/main.rs b/crates/ironrdp-testsuite-core/tests/main.rs index ba011f85c..b8f8e8542 100644 --- a/crates/ironrdp-testsuite-core/tests/main.rs +++ b/crates/ironrdp-testsuite-core/tests/main.rs @@ -15,6 +15,7 @@ mod clipboard; mod displaycontrol; mod dvc; +mod egfx; mod fuzz_regression; mod graphics; mod input;