From d6b9ddc1f387d10c121c57e635227a4e4b69eda9 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Tue, 21 Oct 2025 10:58:32 -0400 Subject: [PATCH 001/227] fix --- src/sinks/util/buffer/metrics/normalize.rs | 90 ++++++++++++++++++++-- 1 file changed, 82 insertions(+), 8 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index c244506508f18..e2a4eeea9fe35 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -15,6 +15,8 @@ use vector_lib::{ }, }; +use tracing::{debug, error, info, trace};s + #[derive(Debug, Snafu, PartialEq, Eq)] pub enum NormalizerError { #[snafu(display("`max_bytes` must be greater than zero"))] @@ -287,16 +289,29 @@ impl CapacityPolicy { pub fn free_item(&mut self, series: &MetricSeries, entry: &MetricEntry) { if self.max_bytes.is_some() { let freed_memory = self.item_size(series, entry); + info!( + message = "Freeing memory for item", + series_name = %series.name, + freed_bytes = freed_memory, + current_memory = self.current_memory, + max_memory = ?self.max_bytes + ); self.remove_memory(freed_memory); } } /// Updates memory tracking. - const fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { - self.current_memory = self - .current_memory - .saturating_sub(old_bytes) - .saturating_add(new_bytes); + fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { + let new_total = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes); + info!( + message = "Updating memory tracking", + old_bytes = old_bytes, + new_bytes = new_bytes, + old_total = self.current_memory, + new_total = new_total, + max_memory = ?self.max_bytes + ); + self.current_memory = new_total; } /// Checks if the current state exceeds memory limits. @@ -318,13 +333,40 @@ impl CapacityPolicy { } /// Returns true if any limits are currently exceeded. - const fn needs_eviction(&self, entry_count: usize) -> bool { - self.exceeds_memory_limit() || self.exceeds_entry_limit(entry_count) + fn needs_eviction(&self, entry_count: usize) -> bool { + let memory_exceeded = self.exceeds_memory_limit(); + let entries_exceeded = self.exceeds_entry_limit(entry_count); + + if memory_exceeded || entries_exceeded { + info!( + message = "Eviction needed", + memory_exceeded = memory_exceeded, + entries_exceeded = entries_exceeded, + current_memory = self.current_memory, + max_memory = ?self.max_bytes, + entry_count = entry_count, + max_entries = ?self.max_events + ); + } + + memory_exceeded || entries_exceeded } /// Gets the total memory size of entry/series, excluding LRU cache overhead. pub fn item_size(&self, series: &MetricSeries, entry: &MetricEntry) -> usize { - entry.allocated_bytes() + series.allocated_bytes() + let series_size = series.allocated_bytes(); + let entry_size = entry.allocated_bytes(); + let total_size = series_size + entry_size; + + info!( + message = "Calculating item size", + series_name = %series.name, + series_size = series_size, + entry_size = entry_size, + total_size = total_size + ); + + total_size } } @@ -462,15 +504,38 @@ impl MetricSet { let Some(ref mut capacity_policy) = self.capacity_policy else { return; // No capacity limits configured }; + + info!( + message = "Checking capacity policy", + current_memory = capacity_policy.current_memory(), + max_memory = ?capacity_policy.max_bytes, + current_entries = self.inner.len(), + max_entries = ?capacity_policy.max_events + ); // Keep evicting until we're within limits while capacity_policy.needs_eviction(self.inner.len()) { if let Some((series, entry)) = self.inner.pop_lru() { + info!( + message = "Evicting item due to capacity limits", + series_name = %series.name, + item_size = capacity_policy.item_size(&series, &entry), + current_memory = capacity_policy.current_memory(), + entry_count = self.inner.len() + ); capacity_policy.free_item(&series, &entry); } else { break; // No more entries to evict } } + + info!( + message = "After enforcement", + current_memory = capacity_policy.current_memory(), + max_memory = ?capacity_policy.max_bytes, + current_entries = self.inner.len(), + max_entries = ?capacity_policy.max_events + ); } /// Perform TTL cleanup if configured and needed. @@ -523,6 +588,15 @@ impl MetricSet { return; // No capacity limits configured, return immediately }; + info!( + message = "Inserting entry with tracking", + series_name = %series.name, + current_memory = capacity_policy.current_memory(), + max_memory = ?capacity_policy.max_bytes, + current_entries = self.inner.len(), + max_entries = ?capacity_policy.max_events + ); + // Handle differently based on whether we need to track memory if capacity_policy.max_bytes.is_some() { // When tracking memory, we need to calculate sizes before and after From 448d59eb3a2e543a6c1235696daa31698b25f208 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Tue, 21 Oct 2025 12:52:10 -0400 Subject: [PATCH 002/227] fix memory tracking final alloc --- lib/vector-core/src/event/metadata.rs | 45 ++++++++- lib/vector-core/src/event/metric/value.rs | 19 ++-- src/sinks/util/buffer/metrics/normalize.rs | 105 +++++++++++++++++++-- 3 files changed, 148 insertions(+), 21 deletions(-) diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index cd9cecbe5501c..e9459e82dd690 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -273,10 +273,47 @@ fn default_schema_definition() -> Arc { impl ByteSizeOf for EventMetadata { fn allocated_bytes(&self) -> usize { - // NOTE we don't count the `str` here because it's allocated somewhere - // else. We're just moving around the pointer, which is already captured - // by `ByteSizeOf::size_of`. - self.0.finalizers.allocated_bytes() + // Always count the Arc pointer itself + let mut size = std::mem::size_of::>(); + + // If we're the only reference to the Inner, count all its contents too + // This ensures we properly account for shared vs owned memory + if Arc::strong_count(&self.0) == 1 { + // Count the finalizers + size += self.0.finalizers.allocated_bytes(); + + // Count the value which might be a complex object + size += match &self.0.value { + Value::Object(map) => map.allocated_bytes(), + Value::Array(array) => array.allocated_bytes(), + Value::Bytes(s) => s.len(), + _ => 0, // Other value types don't allocate + }; + + // Count the secrets map + size += self.0.secrets.0.len() * (std::mem::size_of::() + 32); // Estimate for each key-value pair + + // Count dropped fields map + size += self.0.dropped_fields.len() * std::mem::size_of::<(String, Value)>(); + + // Count optional fields if present + if let Some(source_id) = &self.0.source_id { + size += std::mem::size_of_val(&**source_id); + } + + if let Some(source_type) = &self.0.source_type { + size += source_type.len(); + } + + if let Some(upstream_id) = &self.0.upstream_id { + size += std::mem::size_of_val(&**upstream_id); + } + + // Count the Inner struct itself + size += std::mem::size_of::(); + } + + size } } diff --git a/lib/vector-core/src/event/metric/value.rs b/lib/vector-core/src/event/metric/value.rs index ce21e80bbec24..de17d47b9e023 100644 --- a/lib/vector-core/src/event/metric/value.rs +++ b/lib/vector-core/src/event/metric/value.rs @@ -360,12 +360,13 @@ impl MetricValue { impl ByteSizeOf for MetricValue { fn allocated_bytes(&self) -> usize { match self { - Self::Counter { .. } | Self::Gauge { .. } => 0, - Self::Set { values } => values.allocated_bytes(), - Self::Distribution { samples, .. } => samples.allocated_bytes(), - Self::AggregatedHistogram { buckets, .. } => buckets.allocated_bytes(), - Self::AggregatedSummary { quantiles, .. } => quantiles.allocated_bytes(), - Self::Sketch { sketch } => sketch.allocated_bytes(), + // Account for the f64 value plus enum variant tag + Self::Counter { .. } | Self::Gauge { .. } => std::mem::size_of::(), + Self::Set { values } => values.allocated_bytes() + std::mem::size_of_val(values), + Self::Distribution { samples, .. } => samples.allocated_bytes() + std::mem::size_of_val(samples), + Self::AggregatedHistogram { buckets, .. } => buckets.allocated_bytes() + std::mem::size_of_val(buckets), + Self::AggregatedSummary { quantiles, .. } => quantiles.allocated_bytes() + std::mem::size_of_val(quantiles), + Self::Sketch { sketch } => sketch.allocated_bytes() + std::mem::size_of_val(sketch), } } } @@ -596,7 +597,7 @@ impl PartialEq for Sample { impl ByteSizeOf for Sample { fn allocated_bytes(&self) -> usize { - 0 + std::mem::size_of::() // Count the f64 value and u32 rate } } @@ -670,7 +671,7 @@ impl PartialEq for Bucket { impl ByteSizeOf for Bucket { fn allocated_bytes(&self) -> usize { - 0 + std::mem::size_of::() // Count the f64 upper_limit and u64 count } } @@ -739,6 +740,6 @@ impl Quantile { impl ByteSizeOf for Quantile { fn allocated_bytes(&self) -> usize { - 0 + std::mem::size_of::() // Count the two f64 fields } } diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index e2a4eeea9fe35..0a9ca1953539f 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -15,7 +15,7 @@ use vector_lib::{ }, }; -use tracing::{debug, error, info, trace};s +use tracing::{info}; #[derive(Debug, Snafu, PartialEq, Eq)] pub enum NormalizerError { @@ -207,7 +207,25 @@ pub struct MetricEntry { impl ByteSizeOf for MetricEntry { fn allocated_bytes(&self) -> usize { - self.data.allocated_bytes() + self.metadata.allocated_bytes() + // Calculate the size of the data and metadata + let data_size = self.data.allocated_bytes(); + let metadata_size = self.metadata.allocated_bytes(); + + // Include struct overhead - size of self without double-counting fields + // that we already accounted for in their respective allocated_bytes() calls + let struct_size = std::mem::size_of::(); + + let total = data_size + metadata_size + struct_size; + + info!( + message = "Entry allocated_bytes breakdown", + data_size = data_size, + metadata_size = metadata_size, + struct_size = struct_size, + total = total + ); + + total } } @@ -291,7 +309,6 @@ impl CapacityPolicy { let freed_memory = self.item_size(series, entry); info!( message = "Freeing memory for item", - series_name = %series.name, freed_bytes = freed_memory, current_memory = self.current_memory, max_memory = ?self.max_bytes @@ -360,9 +377,11 @@ impl CapacityPolicy { info!( message = "Calculating item size", - series_name = %series.name, + series_name_str = %series.name.name, + series_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(series, &mut hasher); std::hash::Hasher::finish(&hasher)}, series_size = series_size, entry_size = entry_size, + metric_kind = ?entry.data.kind, total_size = total_size ); @@ -432,6 +451,27 @@ pub struct MetricSet { } impl MetricSet { + /// Debugs memory usage across the cache + pub fn debug_memory(&self) -> String { + let total_tracked = self.capacity_policy + .as_ref() + .map(|cp| cp.current_memory()) + .unwrap_or(0); + + // Calculate actual memory usage + let mut actual_usage = 0; + for (series, entry) in self.inner.iter() { + let series_size = series.allocated_bytes(); + let entry_size = entry.allocated_bytes(); + actual_usage += series_size + entry_size; + } + + format!( + "Tracked memory: {}, Actual usage: {}, Items: {}", + total_tracked, actual_usage, self.inner.len() + ) + } + /// Creates a new MetricSet with the given settings. pub fn new(settings: MetricSetSettings) -> Self { // Create capacity policy if any capacity limit is set @@ -516,12 +556,17 @@ impl MetricSet { // Keep evicting until we're within limits while capacity_policy.needs_eviction(self.inner.len()) { if let Some((series, entry)) = self.inner.pop_lru() { + let item_size = capacity_policy.item_size(&series, &entry); info!( message = "Evicting item due to capacity limits", - series_name = %series.name, - item_size = capacity_policy.item_size(&series, &entry), + series_name_str = %series.name.name, + series_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(&series, &mut hasher); std::hash::Hasher::finish(&hasher)}, + item_size = item_size, current_memory = capacity_policy.current_memory(), - entry_count = self.inner.len() + memory_limit = ?capacity_policy.max_bytes, + entry_count = self.inner.len(), + entry_limit = ?capacity_policy.max_events, + metric_kind = ?entry.data.kind ); capacity_policy.free_item(&series, &entry); } else { @@ -536,6 +581,9 @@ impl MetricSet { current_entries = self.inner.len(), max_entries = ?capacity_policy.max_events ); + + // Log memory debug info + info!(message = "Memory after enforcement", debug = %self.debug_memory()); } /// Perform TTL cleanup if configured and needed. @@ -588,9 +636,45 @@ impl MetricSet { return; // No capacity limits configured, return immediately }; + // Debug the series equality issue + info!( + message = "Inserting series", + series_name = ?series.name, + series_name_str = %series.name.name, // Display the actual string name + series_tags = ?series.tags, + series_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(&series, &mut hasher); std::hash::Hasher::finish(&hasher)} + ); + + // Iterate through entries to check for series with same name + let mut found_similar = false; + for (existing, _) in self.inner.iter() { + if existing.name == series.name && *existing != series { + found_similar = true; + let existing_hash = { + let mut hasher = std::collections::hash::DefaultHasher::new(); + std::hash::Hash::hash(existing, &mut hasher); + std::hash::Hasher::finish(&hasher) + }; + info!( + message = "Series with same name not equal", + existing_name = ?existing.name, + existing_name_str = %existing.name.name, + existing_tags = ?existing.tags, + existing_hash = ?existing_hash, + new_name = ?series.name, + new_name_str = %series.name.name, + new_tags = ?series.tags, + new_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(&series, &mut hasher); std::hash::Hasher::finish(&hasher)} + ); + } + } + + if !found_similar && series.name.name.len() > 0 { + info!(message = "No similar series found with name", name = ?series.name.name, name_str = %series.name.name); + } + info!( message = "Inserting entry with tracking", - series_name = %series.name, current_memory = capacity_policy.current_memory(), max_memory = ?capacity_policy.max_bytes, current_entries = self.inner.len(), @@ -605,9 +689,11 @@ impl MetricSet { if let Some(existing_entry) = self.inner.put(series.clone(), entry) { // If we had an existing entry, calculate its size and adjust memory tracking let existing_size = capacity_policy.item_size(&series, &existing_entry); + info!(message = "Found existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(existing_size, entry_size); } else { // No existing entry, just add the new entry's size + info!(message = "No existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(0, entry_size); } } else { @@ -617,6 +703,9 @@ impl MetricSet { // Enforce limits after insertion self.enforce_capacity_policy(); + + // Log memory debug info + info!(message = "Memory after insertion", debug = %self.debug_memory()); } /// Consumes this MetricSet and returns a vector of Metric. From e37428cd6d196f676f17cab366105602c95b0289 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Wed, 22 Oct 2025 11:42:30 -0400 Subject: [PATCH 003/227] fix --- lib/vector-core/src/event/metadata.rs | 45 ++-------------------- lib/vector-core/src/event/metric/value.rs | 19 +++++---- src/sinks/util/buffer/metrics/normalize.rs | 42 +------------------- 3 files changed, 14 insertions(+), 92 deletions(-) diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index e9459e82dd690..cd9cecbe5501c 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -273,47 +273,10 @@ fn default_schema_definition() -> Arc { impl ByteSizeOf for EventMetadata { fn allocated_bytes(&self) -> usize { - // Always count the Arc pointer itself - let mut size = std::mem::size_of::>(); - - // If we're the only reference to the Inner, count all its contents too - // This ensures we properly account for shared vs owned memory - if Arc::strong_count(&self.0) == 1 { - // Count the finalizers - size += self.0.finalizers.allocated_bytes(); - - // Count the value which might be a complex object - size += match &self.0.value { - Value::Object(map) => map.allocated_bytes(), - Value::Array(array) => array.allocated_bytes(), - Value::Bytes(s) => s.len(), - _ => 0, // Other value types don't allocate - }; - - // Count the secrets map - size += self.0.secrets.0.len() * (std::mem::size_of::() + 32); // Estimate for each key-value pair - - // Count dropped fields map - size += self.0.dropped_fields.len() * std::mem::size_of::<(String, Value)>(); - - // Count optional fields if present - if let Some(source_id) = &self.0.source_id { - size += std::mem::size_of_val(&**source_id); - } - - if let Some(source_type) = &self.0.source_type { - size += source_type.len(); - } - - if let Some(upstream_id) = &self.0.upstream_id { - size += std::mem::size_of_val(&**upstream_id); - } - - // Count the Inner struct itself - size += std::mem::size_of::(); - } - - size + // NOTE we don't count the `str` here because it's allocated somewhere + // else. We're just moving around the pointer, which is already captured + // by `ByteSizeOf::size_of`. + self.0.finalizers.allocated_bytes() } } diff --git a/lib/vector-core/src/event/metric/value.rs b/lib/vector-core/src/event/metric/value.rs index de17d47b9e023..89b6909eeab47 100644 --- a/lib/vector-core/src/event/metric/value.rs +++ b/lib/vector-core/src/event/metric/value.rs @@ -360,13 +360,12 @@ impl MetricValue { impl ByteSizeOf for MetricValue { fn allocated_bytes(&self) -> usize { match self { - // Account for the f64 value plus enum variant tag - Self::Counter { .. } | Self::Gauge { .. } => std::mem::size_of::(), - Self::Set { values } => values.allocated_bytes() + std::mem::size_of_val(values), - Self::Distribution { samples, .. } => samples.allocated_bytes() + std::mem::size_of_val(samples), - Self::AggregatedHistogram { buckets, .. } => buckets.allocated_bytes() + std::mem::size_of_val(buckets), - Self::AggregatedSummary { quantiles, .. } => quantiles.allocated_bytes() + std::mem::size_of_val(quantiles), - Self::Sketch { sketch } => sketch.allocated_bytes() + std::mem::size_of_val(sketch), + Self::Counter { .. } | Self::Gauge { .. } => size_of::(), + Self::Set { values } => values.allocated_bytes() + size_of_val(values), + Self::Distribution { samples, .. } => samples.allocated_bytes() + size_of_val(samples), + Self::AggregatedHistogram { buckets, .. } => buckets.allocated_bytes() + size_of_val(buckets), + Self::AggregatedSummary { quantiles, .. } => quantiles.allocated_bytes() + size_of_val(quantiles), + Self::Sketch { sketch } => sketch.allocated_bytes() + size_of_val(sketch), } } } @@ -597,7 +596,7 @@ impl PartialEq for Sample { impl ByteSizeOf for Sample { fn allocated_bytes(&self) -> usize { - std::mem::size_of::() // Count the f64 value and u32 rate + size_of::() } } @@ -671,7 +670,7 @@ impl PartialEq for Bucket { impl ByteSizeOf for Bucket { fn allocated_bytes(&self) -> usize { - std::mem::size_of::() // Count the f64 upper_limit and u64 count + size_of::() } } @@ -740,6 +739,6 @@ impl Quantile { impl ByteSizeOf for Quantile { fn allocated_bytes(&self) -> usize { - std::mem::size_of::() // Count the two f64 fields + size_of::() } } diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 0a9ca1953539f..6bf69c52e2b3f 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -213,7 +213,7 @@ impl ByteSizeOf for MetricEntry { // Include struct overhead - size of self without double-counting fields // that we already accounted for in their respective allocated_bytes() calls - let struct_size = std::mem::size_of::(); + let struct_size = size_of::(); let total = data_size + metadata_size + struct_size; @@ -378,10 +378,8 @@ impl CapacityPolicy { info!( message = "Calculating item size", series_name_str = %series.name.name, - series_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(series, &mut hasher); std::hash::Hasher::finish(&hasher)}, series_size = series_size, entry_size = entry_size, - metric_kind = ?entry.data.kind, total_size = total_size ); @@ -560,7 +558,6 @@ impl MetricSet { info!( message = "Evicting item due to capacity limits", series_name_str = %series.name.name, - series_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(&series, &mut hasher); std::hash::Hasher::finish(&hasher)}, item_size = item_size, current_memory = capacity_policy.current_memory(), memory_limit = ?capacity_policy.max_bytes, @@ -636,43 +633,6 @@ impl MetricSet { return; // No capacity limits configured, return immediately }; - // Debug the series equality issue - info!( - message = "Inserting series", - series_name = ?series.name, - series_name_str = %series.name.name, // Display the actual string name - series_tags = ?series.tags, - series_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(&series, &mut hasher); std::hash::Hasher::finish(&hasher)} - ); - - // Iterate through entries to check for series with same name - let mut found_similar = false; - for (existing, _) in self.inner.iter() { - if existing.name == series.name && *existing != series { - found_similar = true; - let existing_hash = { - let mut hasher = std::collections::hash::DefaultHasher::new(); - std::hash::Hash::hash(existing, &mut hasher); - std::hash::Hasher::finish(&hasher) - }; - info!( - message = "Series with same name not equal", - existing_name = ?existing.name, - existing_name_str = %existing.name.name, - existing_tags = ?existing.tags, - existing_hash = ?existing_hash, - new_name = ?series.name, - new_name_str = %series.name.name, - new_tags = ?series.tags, - new_hash = ?{let mut hasher = std::collections::hash::DefaultHasher::new(); std::hash::Hash::hash(&series, &mut hasher); std::hash::Hasher::finish(&hasher)} - ); - } - } - - if !found_similar && series.name.name.len() > 0 { - info!(message = "No similar series found with name", name = ?series.name.name, name_str = %series.name.name); - } - info!( message = "Inserting entry with tracking", current_memory = capacity_policy.current_memory(), From 8861ced1c1d3feb05410abc1a6d9cffe5384b668 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Wed, 22 Oct 2025 14:21:43 -0400 Subject: [PATCH 004/227] change to trace level --- src/sinks/util/buffer/metrics/normalize.rs | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 6bf69c52e2b3f..dc1a98d5d77e5 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -15,7 +15,7 @@ use vector_lib::{ }, }; -use tracing::{info}; +use tracing::{trace}; #[derive(Debug, Snafu, PartialEq, Eq)] pub enum NormalizerError { @@ -217,7 +217,7 @@ impl ByteSizeOf for MetricEntry { let total = data_size + metadata_size + struct_size; - info!( + trace!( message = "Entry allocated_bytes breakdown", data_size = data_size, metadata_size = metadata_size, @@ -307,7 +307,7 @@ impl CapacityPolicy { pub fn free_item(&mut self, series: &MetricSeries, entry: &MetricEntry) { if self.max_bytes.is_some() { let freed_memory = self.item_size(series, entry); - info!( + trace!( message = "Freeing memory for item", freed_bytes = freed_memory, current_memory = self.current_memory, @@ -320,7 +320,7 @@ impl CapacityPolicy { /// Updates memory tracking. fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { let new_total = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes); - info!( + trace!( message = "Updating memory tracking", old_bytes = old_bytes, new_bytes = new_bytes, @@ -355,7 +355,7 @@ impl CapacityPolicy { let entries_exceeded = self.exceeds_entry_limit(entry_count); if memory_exceeded || entries_exceeded { - info!( + trace!( message = "Eviction needed", memory_exceeded = memory_exceeded, entries_exceeded = entries_exceeded, @@ -375,7 +375,7 @@ impl CapacityPolicy { let entry_size = entry.allocated_bytes(); let total_size = series_size + entry_size; - info!( + trace!( message = "Calculating item size", series_name_str = %series.name.name, series_size = series_size, @@ -543,7 +543,7 @@ impl MetricSet { return; // No capacity limits configured }; - info!( + trace!( message = "Checking capacity policy", current_memory = capacity_policy.current_memory(), max_memory = ?capacity_policy.max_bytes, @@ -555,7 +555,7 @@ impl MetricSet { while capacity_policy.needs_eviction(self.inner.len()) { if let Some((series, entry)) = self.inner.pop_lru() { let item_size = capacity_policy.item_size(&series, &entry); - info!( + trace!( message = "Evicting item due to capacity limits", series_name_str = %series.name.name, item_size = item_size, @@ -571,7 +571,7 @@ impl MetricSet { } } - info!( + trace!( message = "After enforcement", current_memory = capacity_policy.current_memory(), max_memory = ?capacity_policy.max_bytes, @@ -580,7 +580,7 @@ impl MetricSet { ); // Log memory debug info - info!(message = "Memory after enforcement", debug = %self.debug_memory()); + trace!(message = "Memory after enforcement", debug = %self.debug_memory()); } /// Perform TTL cleanup if configured and needed. @@ -633,7 +633,7 @@ impl MetricSet { return; // No capacity limits configured, return immediately }; - info!( + trace!( message = "Inserting entry with tracking", current_memory = capacity_policy.current_memory(), max_memory = ?capacity_policy.max_bytes, @@ -649,11 +649,11 @@ impl MetricSet { if let Some(existing_entry) = self.inner.put(series.clone(), entry) { // If we had an existing entry, calculate its size and adjust memory tracking let existing_size = capacity_policy.item_size(&series, &existing_entry); - info!(message = "Found existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); + trace!(message = "Found existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(existing_size, entry_size); } else { // No existing entry, just add the new entry's size - info!(message = "No existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); + trace!(message = "No existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(0, entry_size); } } else { @@ -665,7 +665,7 @@ impl MetricSet { self.enforce_capacity_policy(); // Log memory debug info - info!(message = "Memory after insertion", debug = %self.debug_memory()); + trace!(message = "Memory after insertion", debug = %self.debug_memory()); } /// Consumes this MetricSet and returns a vector of Metric. From dd60a34f756ce23fa2addb1bd5a5d11e66ef4b21 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Sat, 25 Oct 2025 02:11:21 -0400 Subject: [PATCH 005/227] get lru --- src/sinks/util/buffer/metrics/normalize.rs | 31 ++++++++++++---------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index dc1a98d5d77e5..3930d71407e2c 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -217,13 +217,13 @@ impl ByteSizeOf for MetricEntry { let total = data_size + metadata_size + struct_size; - trace!( - message = "Entry allocated_bytes breakdown", - data_size = data_size, - metadata_size = metadata_size, - struct_size = struct_size, - total = total - ); + // trace!( + // message = "Entry allocated_bytes breakdown", + // data_size = data_size, + // metadata_size = metadata_size, + // struct_size = struct_size, + // total = total + // ); total } @@ -375,13 +375,13 @@ impl CapacityPolicy { let entry_size = entry.allocated_bytes(); let total_size = series_size + entry_size; - trace!( - message = "Calculating item size", - series_name_str = %series.name.name, - series_size = series_size, - entry_size = entry_size, - total_size = total_size - ); + // trace!( + // message = "Calculating item size", + // series_name_str = %series.name.name, + // series_size = series_size, + // entry_size = entry_size, + // total_size = total_size + // ); total_size } @@ -635,6 +635,7 @@ impl MetricSet { trace!( message = "Inserting entry with tracking", + series_name = ?series.name, current_memory = capacity_policy.current_memory(), max_memory = ?capacity_policy.max_bytes, current_entries = self.inner.len(), @@ -651,10 +652,12 @@ impl MetricSet { let existing_size = capacity_policy.item_size(&series, &existing_entry); trace!(message = "Found existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(existing_size, entry_size); + self.inner.get(&series); } else { // No existing entry, just add the new entry's size trace!(message = "No existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(0, entry_size); + self.inner.get(&series); } } else { // When not tracking memory (only entry count limits), just put directly From 51bd83ed3ef0a68d46f37040615082b2493f36a2 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Sat, 25 Oct 2025 04:16:47 -0400 Subject: [PATCH 006/227] fix log lvl --- src/sinks/util/buffer/metrics/normalize.rs | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 3930d71407e2c..6d5cb6a1d0e86 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -15,7 +15,7 @@ use vector_lib::{ }, }; -use tracing::{trace}; +use tracing::{debug, info}; #[derive(Debug, Snafu, PartialEq, Eq)] pub enum NormalizerError { @@ -217,7 +217,7 @@ impl ByteSizeOf for MetricEntry { let total = data_size + metadata_size + struct_size; - // trace!( + // debug!( // message = "Entry allocated_bytes breakdown", // data_size = data_size, // metadata_size = metadata_size, @@ -307,7 +307,7 @@ impl CapacityPolicy { pub fn free_item(&mut self, series: &MetricSeries, entry: &MetricEntry) { if self.max_bytes.is_some() { let freed_memory = self.item_size(series, entry); - trace!( + debug!( message = "Freeing memory for item", freed_bytes = freed_memory, current_memory = self.current_memory, @@ -320,7 +320,7 @@ impl CapacityPolicy { /// Updates memory tracking. fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { let new_total = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes); - trace!( + debug!( message = "Updating memory tracking", old_bytes = old_bytes, new_bytes = new_bytes, @@ -355,7 +355,7 @@ impl CapacityPolicy { let entries_exceeded = self.exceeds_entry_limit(entry_count); if memory_exceeded || entries_exceeded { - trace!( + info!( message = "Eviction needed", memory_exceeded = memory_exceeded, entries_exceeded = entries_exceeded, @@ -375,7 +375,7 @@ impl CapacityPolicy { let entry_size = entry.allocated_bytes(); let total_size = series_size + entry_size; - // trace!( + // debug!( // message = "Calculating item size", // series_name_str = %series.name.name, // series_size = series_size, @@ -543,7 +543,7 @@ impl MetricSet { return; // No capacity limits configured }; - trace!( + debug!( message = "Checking capacity policy", current_memory = capacity_policy.current_memory(), max_memory = ?capacity_policy.max_bytes, @@ -555,7 +555,7 @@ impl MetricSet { while capacity_policy.needs_eviction(self.inner.len()) { if let Some((series, entry)) = self.inner.pop_lru() { let item_size = capacity_policy.item_size(&series, &entry); - trace!( + info!( message = "Evicting item due to capacity limits", series_name_str = %series.name.name, item_size = item_size, @@ -571,7 +571,7 @@ impl MetricSet { } } - trace!( + info!( message = "After enforcement", current_memory = capacity_policy.current_memory(), max_memory = ?capacity_policy.max_bytes, @@ -580,7 +580,7 @@ impl MetricSet { ); // Log memory debug info - trace!(message = "Memory after enforcement", debug = %self.debug_memory()); + debug!(message = "Memory after enforcement", debug = %self.debug_memory()); } /// Perform TTL cleanup if configured and needed. @@ -633,7 +633,7 @@ impl MetricSet { return; // No capacity limits configured, return immediately }; - trace!( + debug!( message = "Inserting entry with tracking", series_name = ?series.name, current_memory = capacity_policy.current_memory(), @@ -650,12 +650,12 @@ impl MetricSet { if let Some(existing_entry) = self.inner.put(series.clone(), entry) { // If we had an existing entry, calculate its size and adjust memory tracking let existing_size = capacity_policy.item_size(&series, &existing_entry); - trace!(message = "Found existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); + debug!(message = "Found existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(existing_size, entry_size); self.inner.get(&series); } else { // No existing entry, just add the new entry's size - trace!(message = "No existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); + debug!(message = "No existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); capacity_policy.replace_memory(0, entry_size); self.inner.get(&series); } @@ -668,7 +668,7 @@ impl MetricSet { self.enforce_capacity_policy(); // Log memory debug info - trace!(message = "Memory after insertion", debug = %self.debug_memory()); + debug!(message = "Memory after insertion", debug = %self.debug_memory()); } /// Consumes this MetricSet and returns a vector of Metric. From f9e948a1548443587d4eb987e9e95fc7d844c395 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Wed, 29 Oct 2025 22:30:13 -0400 Subject: [PATCH 007/227] fix --- src/sinks/util/buffer/metrics/normalize.rs | 43 +++++++++++----------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 6d5cb6a1d0e86..78d0475e50e15 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -370,11 +370,11 @@ impl CapacityPolicy { } /// Gets the total memory size of entry/series, excluding LRU cache overhead. - pub fn item_size(&self, series: &MetricSeries, entry: &MetricEntry) -> usize { - let series_size = series.allocated_bytes(); - let entry_size = entry.allocated_bytes(); - let total_size = series_size + entry_size; - + pub fn item_size(&self, _series: &MetricSeries, _entry: &MetricEntry) -> usize { + // let series_size = series.allocated_bytes(); + // let entry_size = entry.allocated_bytes(); + // let total_size = series_size + entry_size; + // // debug!( // message = "Calculating item size", // series_name_str = %series.name.name, @@ -382,8 +382,9 @@ impl CapacityPolicy { // entry_size = entry_size, // total_size = total_size // ); - - total_size + 200 + // + // total_size } } @@ -580,7 +581,7 @@ impl MetricSet { ); // Log memory debug info - debug!(message = "Memory after enforcement", debug = %self.debug_memory()); + info!(message = "Memory after enforcement", debug = %self.debug_memory()); } /// Perform TTL cleanup if configured and needed. @@ -645,20 +646,18 @@ impl MetricSet { // Handle differently based on whether we need to track memory if capacity_policy.max_bytes.is_some() { // When tracking memory, we need to calculate sizes before and after - let entry_size = capacity_policy.item_size(&series, &entry); - - if let Some(existing_entry) = self.inner.put(series.clone(), entry) { - // If we had an existing entry, calculate its size and adjust memory tracking - let existing_size = capacity_policy.item_size(&series, &existing_entry); - debug!(message = "Found existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); - capacity_policy.replace_memory(existing_size, entry_size); - self.inner.get(&series); - } else { - // No existing entry, just add the new entry's size - debug!(message = "No existing entry for series", series_name = ?series.name, series_name_str = %series.name.name); - capacity_policy.replace_memory(0, entry_size); - self.inner.get(&series); - } + // let entry_size = capacity_policy.item_size(&series, &entry); + capacity_policy.replace_memory(0, 200) + // if let Some(existing_entry) = self.inner.put(series, entry) { + // // If we had an existing entry, calculate its size and adjust memory tracking + // let existing_size = capacity_policy.item_size(&series, &existing_entry); + // capacity_policy.replace_memory(existing_size, entry_size); + // self.inner.get(&series); + // } else { + // // No existing entry, just add the new entry's size + // capacity_policy.replace_memory(0, entry_size); + // self.inner.get(&series); + // } } else { // When not tracking memory (only entry count limits), just put directly self.inner.put(series, entry); From e55b610aaccd177cb4bd3d017f8e6a0af33a2481 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 00:01:51 -0400 Subject: [PATCH 008/227] remove some logs --- src/sinks/util/buffer/metrics/normalize.rs | 118 +++------------------ 1 file changed, 17 insertions(+), 101 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 78d0475e50e15..da629339aabb3 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -15,8 +15,6 @@ use vector_lib::{ }, }; -use tracing::{debug, info}; - #[derive(Debug, Snafu, PartialEq, Eq)] pub enum NormalizerError { #[snafu(display("`max_bytes` must be greater than zero"))] @@ -215,17 +213,7 @@ impl ByteSizeOf for MetricEntry { // that we already accounted for in their respective allocated_bytes() calls let struct_size = size_of::(); - let total = data_size + metadata_size + struct_size; - - // debug!( - // message = "Entry allocated_bytes breakdown", - // data_size = data_size, - // metadata_size = metadata_size, - // struct_size = struct_size, - // total = total - // ); - - total + data_size + metadata_size + struct_size } } @@ -284,7 +272,7 @@ pub struct CapacityPolicy { impl CapacityPolicy { /// Creates a new capacity policy with both memory and entry limits. - pub const fn new(max_bytes: Option, max_events: Option) -> Self { + pub fn new(max_bytes: Option, max_events: Option) -> Self { Self { max_bytes, max_events, @@ -307,28 +295,13 @@ impl CapacityPolicy { pub fn free_item(&mut self, series: &MetricSeries, entry: &MetricEntry) { if self.max_bytes.is_some() { let freed_memory = self.item_size(series, entry); - debug!( - message = "Freeing memory for item", - freed_bytes = freed_memory, - current_memory = self.current_memory, - max_memory = ?self.max_bytes - ); self.remove_memory(freed_memory); } } /// Updates memory tracking. fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { - let new_total = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes); - debug!( - message = "Updating memory tracking", - old_bytes = old_bytes, - new_bytes = new_bytes, - old_total = self.current_memory, - new_total = new_total, - max_memory = ?self.max_bytes - ); - self.current_memory = new_total; + self.current_memory = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes);a } /// Checks if the current state exceeds memory limits. @@ -351,40 +324,12 @@ impl CapacityPolicy { /// Returns true if any limits are currently exceeded. fn needs_eviction(&self, entry_count: usize) -> bool { - let memory_exceeded = self.exceeds_memory_limit(); - let entries_exceeded = self.exceeds_entry_limit(entry_count); - - if memory_exceeded || entries_exceeded { - info!( - message = "Eviction needed", - memory_exceeded = memory_exceeded, - entries_exceeded = entries_exceeded, - current_memory = self.current_memory, - max_memory = ?self.max_bytes, - entry_count = entry_count, - max_entries = ?self.max_events - ); - } - - memory_exceeded || entries_exceeded + self.exceeds_memory_limit() || self.exceeds_entry_limit(entry_count) } /// Gets the total memory size of entry/series, excluding LRU cache overhead. - pub fn item_size(&self, _series: &MetricSeries, _entry: &MetricEntry) -> usize { - // let series_size = series.allocated_bytes(); - // let entry_size = entry.allocated_bytes(); - // let total_size = series_size + entry_size; - // - // debug!( - // message = "Calculating item size", - // series_name_str = %series.name.name, - // series_size = series_size, - // entry_size = entry_size, - // total_size = total_size - // ); - 200 - // - // total_size + pub fn item_size(&self, series: &MetricSeries, entry: &MetricEntry) -> usize { + series.allocated_bytes() + entry.allocated_bytes() } } @@ -543,43 +488,15 @@ impl MetricSet { let Some(ref mut capacity_policy) = self.capacity_policy else { return; // No capacity limits configured }; - - debug!( - message = "Checking capacity policy", - current_memory = capacity_policy.current_memory(), - max_memory = ?capacity_policy.max_bytes, - current_entries = self.inner.len(), - max_entries = ?capacity_policy.max_events - ); // Keep evicting until we're within limits while capacity_policy.needs_eviction(self.inner.len()) { if let Some((series, entry)) = self.inner.pop_lru() { - let item_size = capacity_policy.item_size(&series, &entry); - info!( - message = "Evicting item due to capacity limits", - series_name_str = %series.name.name, - item_size = item_size, - current_memory = capacity_policy.current_memory(), - memory_limit = ?capacity_policy.max_bytes, - entry_count = self.inner.len(), - entry_limit = ?capacity_policy.max_events, - metric_kind = ?entry.data.kind - ); capacity_policy.free_item(&series, &entry); } else { break; // No more entries to evict } } - - info!( - message = "After enforcement", - current_memory = capacity_policy.current_memory(), - max_memory = ?capacity_policy.max_bytes, - current_entries = self.inner.len(), - max_entries = ?capacity_policy.max_events - ); - // Log memory debug info info!(message = "Memory after enforcement", debug = %self.debug_memory()); } @@ -646,18 +563,17 @@ impl MetricSet { // Handle differently based on whether we need to track memory if capacity_policy.max_bytes.is_some() { // When tracking memory, we need to calculate sizes before and after - // let entry_size = capacity_policy.item_size(&series, &entry); - capacity_policy.replace_memory(0, 200) - // if let Some(existing_entry) = self.inner.put(series, entry) { - // // If we had an existing entry, calculate its size and adjust memory tracking - // let existing_size = capacity_policy.item_size(&series, &existing_entry); - // capacity_policy.replace_memory(existing_size, entry_size); - // self.inner.get(&series); - // } else { - // // No existing entry, just add the new entry's size - // capacity_policy.replace_memory(0, entry_size); - // self.inner.get(&series); - // } + let entry_size = capacity_policy.item_size(&series, &entry); + if let Some(existing_entry) = self.inner.put(series.clone(), entry) { + // If we had an existing entry, calculate its size and adjust memory tracking + let existing_size = capacity_policy.item_size(&series, &existing_entry); + capacity_policy.replace_memory(existing_size, entry_size); + self.inner.get(&series); + } else { + // No existing entry, just add the new entry's size + capacity_policy.replace_memory(0, entry_size); + self.inner.get(&series); + } } else { // When not tracking memory (only entry count limits), just put directly self.inner.put(series, entry); From 6f61c494fe172f5cda959d105763426590730959 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 00:04:46 -0400 Subject: [PATCH 009/227] remove some logs --- src/sinks/util/buffer/metrics/normalize.rs | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index da629339aabb3..dbfd11c7c9a96 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -301,7 +301,7 @@ impl CapacityPolicy { /// Updates memory tracking. fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { - self.current_memory = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes);a + self.current_memory = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes); } /// Checks if the current state exceeds memory limits. @@ -323,13 +323,13 @@ impl CapacityPolicy { } /// Returns true if any limits are currently exceeded. - fn needs_eviction(&self, entry_count: usize) -> bool { + const fn needs_eviction(&self, entry_count: usize) -> bool { self.exceeds_memory_limit() || self.exceeds_entry_limit(entry_count) } /// Gets the total memory size of entry/series, excluding LRU cache overhead. pub fn item_size(&self, series: &MetricSeries, entry: &MetricEntry) -> usize { - series.allocated_bytes() + entry.allocated_bytes() + entry.allocated_bytes() + series.allocated_bytes() } } @@ -551,15 +551,6 @@ impl MetricSet { return; // No capacity limits configured, return immediately }; - debug!( - message = "Inserting entry with tracking", - series_name = ?series.name, - current_memory = capacity_policy.current_memory(), - max_memory = ?capacity_policy.max_bytes, - current_entries = self.inner.len(), - max_entries = ?capacity_policy.max_events - ); - // Handle differently based on whether we need to track memory if capacity_policy.max_bytes.is_some() { // When tracking memory, we need to calculate sizes before and after @@ -568,17 +559,18 @@ impl MetricSet { // If we had an existing entry, calculate its size and adjust memory tracking let existing_size = capacity_policy.item_size(&series, &existing_entry); capacity_policy.replace_memory(existing_size, entry_size); - self.inner.get(&series); } else { // No existing entry, just add the new entry's size capacity_policy.replace_memory(0, entry_size); - self.inner.get(&series); } } else { // When not tracking memory (only entry count limits), just put directly self.inner.put(series, entry); } + // Get item; move to back of LRU cache + self.inner.get(&series); + // Enforce limits after insertion self.enforce_capacity_policy(); From 41b983f034a0b89a348c92a7fa56cff75eaf605a Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 00:21:06 -0400 Subject: [PATCH 010/227] fix clone --- src/sinks/util/buffer/metrics/normalize.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index dbfd11c7c9a96..cc1bedbf5efc3 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -272,7 +272,7 @@ pub struct CapacityPolicy { impl CapacityPolicy { /// Creates a new capacity policy with both memory and entry limits. - pub fn new(max_bytes: Option, max_events: Option) -> Self { + pub const fn new(max_bytes: Option, max_events: Option) -> Self { Self { max_bytes, max_events, @@ -300,8 +300,11 @@ impl CapacityPolicy { } /// Updates memory tracking. - fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { - self.current_memory = self.current_memory.saturating_sub(old_bytes).saturating_add(new_bytes); + const fn replace_memory(&mut self, old_bytes: usize, new_bytes: usize) { + self.current_memory = self + .current_memory + .saturating_sub(old_bytes) + .saturating_add(new_bytes); } /// Checks if the current state exceeds memory limits. @@ -565,7 +568,7 @@ impl MetricSet { } } else { // When not tracking memory (only entry count limits), just put directly - self.inner.put(series, entry); + self.inner.put(series.clone(), entry); } // Get item; move to back of LRU cache From df4eaee8e3bc5a823f36fdd0c9788854b3940ac9 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 02:08:31 -0400 Subject: [PATCH 011/227] wip --- src/sinks/util/buffer/metrics/normalize.rs | 25 +++++++++++++++ src/transforms/incremental_to_absolute.rs | 37 ++++++++++++++++++++-- 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index cc1bedbf5efc3..cb063e2bb8608 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -268,6 +268,8 @@ pub struct CapacityPolicy { pub max_events: Option, /// Current memory usage tracking current_memory: usize, + /// Counter for evictions. Used for metrics tacking + eviction_count: usize, } impl CapacityPolicy { @@ -277,6 +279,7 @@ impl CapacityPolicy { max_bytes, max_events, current_memory: 0, + eviction_count: 0, } } @@ -285,6 +288,18 @@ impl CapacityPolicy { self.current_memory } + /// Get the current eviction count + pub const fn eviction_count(&self) -> usize { + self.eviction_count + } + + /// Reset the eviction count and return the previous value + pub fn reset_eviction_count(&mut self) -> usize { + let count = self.eviction_count; + self.eviction_count = 0; + count + } + /// Updates memory tracking when an entry is removed. const fn remove_memory(&mut self, bytes: usize) { self.current_memory = self.current_memory.saturating_sub(bytes); @@ -496,6 +511,7 @@ impl MetricSet { while capacity_policy.needs_eviction(self.inner.len()) { if let Some((series, entry)) = self.inner.pop_lru() { capacity_policy.free_item(&series, &entry); + capacity_policy.eviction_count += 1; } else { break; // No more entries to evict } @@ -504,6 +520,15 @@ impl MetricSet { info!(message = "Memory after enforcement", debug = %self.debug_memory()); } + // Method to get eviction count and reset the counter + pub fn get_and_reset_eviction_count(&mut self) -> usize { + if let Some(ref mut capacity_policy) = self.capacity_policy { + capacity_policy.reset_eviction_count() + } else { + 0 + } + } + /// Perform TTL cleanup if configured and needed. fn maybe_cleanup(&mut self) { // Check if cleanup is needed and get the current timestamp in one operation diff --git a/src/transforms/incremental_to_absolute.rs b/src/transforms/incremental_to_absolute.rs index 2107f4722f805..dc0f379632bf1 100644 --- a/src/transforms/incremental_to_absolute.rs +++ b/src/transforms/incremental_to_absolute.rs @@ -1,8 +1,10 @@ -use std::{collections::HashMap, future::ready, pin::Pin, time::Duration}; +use std::{collections::HashMap, pin::Pin, time::Duration}; use futures::{Stream, StreamExt}; use vector_lib::{config::LogNamespace, configurable::configurable_component}; +use crate::internal_events::{IncrementalToAbsoluteMetricsCache}; + use crate::{ config::{DataType, Input, OutputId, TransformConfig, TransformContext, TransformOutput}, event::Event, @@ -81,6 +83,20 @@ impl IncrementalToAbsolute { .make_absolute(event.as_metric().clone()) .map(Event::Metric) } + + // Emit metrics on cache entries, internally tracked size, and eviction count + fn emit_metrics(&mut self) { + // Always emit the entries count + if let Some(cp) = self.data.capacity_policy() { + if cp.max_bytes.is_some() { + emit!(IncrementalToAbsoluteMetricsCache { + size: cp.current_memory(); + count: self.data.len(), + evictions: self.data.get_and_reset_eviction_count(), + }); + } + } + } } impl TaskTransform for IncrementalToAbsolute { @@ -92,7 +108,24 @@ impl TaskTransform for IncrementalToAbsolute { Self: 'static, { let mut inner = self; - Box::pin(task.filter_map(move |v| ready(inner.transform_one(v)))) + + // Emit initial metrics + inner.emit_metrics(); + + // Set up periodic metrics emission + let mut interval = tokio::time::interval(Duration::from_secs(60)); + + Box::pin(task + .filter_map(move |v| { + let mut cx = std::task::Context::from_waker(futures::task::noop_waker_ref()); + // Poll the interval and emit metrics if ready + while interval.poll_tick(&mut cx).is_ready() { + inner.emit_metrics(); + } + // Process the event as before + futures::future::ready(inner.transform_one(v)) + }) + ) } } From b0eb04704f79168764543364e71f7811c082a615 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 02:10:08 -0400 Subject: [PATCH 012/227] add metrics --- .../incremental_to_absolute.rs | 27 +++++++++++++++++++ src/transforms/incremental_to_absolute.rs | 6 ++--- 2 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 src/internal_events/incremental_to_absolute.rs diff --git a/src/internal_events/incremental_to_absolute.rs b/src/internal_events/incremental_to_absolute.rs new file mode 100644 index 0000000000000..dadb89ed84b12 --- /dev/null +++ b/src/internal_events/incremental_to_absolute.rs @@ -0,0 +1,27 @@ +use metrics::{counter, gauge}; +use vector_lib::internal_event::InternalEvent; + +/// Emitted to track the current size of the metrics cache in bytes +#[derive(Debug)] +pub struct IncrementalToAbsoluteMetricsCache { + pub size: usize, + pub count: usize, + pub evictions: usize, +} + +impl InternalEvent for IncrementalToAbsoluteMetricsCache { + fn emit(self) { + trace!( + message = "Metrics cache current size.", + size = %self.size, + count = %self.count, + evictions = %self.evictions, + ); + gauge!("component_cache_bytes", "component_type" => "transform", "transform_type" => "incremental_to_absolute") + .set(self.size as f64); + gauge!("component_cache_events", "component_type" => "transform", "transform_type" => "incremental_to_absolute") + .set(self.count as f64); + counter!("component_cache_evictions_total", "component_type" => "transform", "transform_type" => "incremental_to_absolute") + .increment(self.evictions as u64); + } +} diff --git a/src/transforms/incremental_to_absolute.rs b/src/transforms/incremental_to_absolute.rs index dc0f379632bf1..15498e9539acf 100644 --- a/src/transforms/incremental_to_absolute.rs +++ b/src/transforms/incremental_to_absolute.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, pin::Pin, time::Duration}; use futures::{Stream, StreamExt}; use vector_lib::{config::LogNamespace, configurable::configurable_component}; -use crate::internal_events::{IncrementalToAbsoluteMetricsCache}; +use crate::internal_events::incremental_to_absolute::IncrementalToAbsoluteMetricsCache; use crate::{ config::{DataType, Input, OutputId, TransformConfig, TransformContext, TransformOutput}, @@ -90,8 +90,8 @@ impl IncrementalToAbsolute { if let Some(cp) = self.data.capacity_policy() { if cp.max_bytes.is_some() { emit!(IncrementalToAbsoluteMetricsCache { - size: cp.current_memory(); - count: self.data.len(), + size: cp.current_memory(), + count: self.data.len(), evictions: self.data.get_and_reset_eviction_count(), }); } From ea263321da7939678d4f5778cbb27cb7f03ad730 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 10:43:42 -0400 Subject: [PATCH 013/227] use crate --- src/internal_events/incremental_to_absolute.rs | 2 +- src/internal_events/mod.rs | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/internal_events/incremental_to_absolute.rs b/src/internal_events/incremental_to_absolute.rs index dadb89ed84b12..139836048219c 100644 --- a/src/internal_events/incremental_to_absolute.rs +++ b/src/internal_events/incremental_to_absolute.rs @@ -12,7 +12,7 @@ pub struct IncrementalToAbsoluteMetricsCache { impl InternalEvent for IncrementalToAbsoluteMetricsCache { fn emit(self) { trace!( - message = "Metrics cache current size.", + message = "Metrics cache stats.", size = %self.size, count = %self.count, evictions = %self.evictions, diff --git a/src/internal_events/mod.rs b/src/internal_events/mod.rs index 1647e2b8ff0ca..3aef7d6d83d11 100644 --- a/src/internal_events/mod.rs +++ b/src/internal_events/mod.rs @@ -67,6 +67,8 @@ mod http; pub mod http_client; #[cfg(feature = "sources-utils-http-client")] mod http_client_source; +#[cfg(feature = "transforms-incremental_to_absolute")] +pub(crate) mod incremental_to_absolute; #[cfg(feature = "sinks-influxdb")] mod influxdb; #[cfg(feature = "sources-internal_logs")] From 8577fe791d0b292d5af226952563a34f1f3065c9 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 11:35:20 -0400 Subject: [PATCH 014/227] fix metrics --- .../incremental_to_absolute.rs | 16 +++- src/sinks/util/buffer/metrics/normalize.rs | 95 +++++-------------- src/transforms/incremental_to_absolute.rs | 25 ++--- 3 files changed, 49 insertions(+), 87 deletions(-) diff --git a/src/internal_events/incremental_to_absolute.rs b/src/internal_events/incremental_to_absolute.rs index 139836048219c..76b677cc4a7cd 100644 --- a/src/internal_events/incremental_to_absolute.rs +++ b/src/internal_events/incremental_to_absolute.rs @@ -7,6 +7,7 @@ pub struct IncrementalToAbsoluteMetricsCache { pub size: usize, pub count: usize, pub evictions: usize, + pub has_capacity_policy: bool, } impl InternalEvent for IncrementalToAbsoluteMetricsCache { @@ -16,12 +17,19 @@ impl InternalEvent for IncrementalToAbsoluteMetricsCache { size = %self.size, count = %self.count, evictions = %self.evictions, + has_capacity_policy = %self.has_capacity_policy, ); - gauge!("component_cache_bytes", "component_type" => "transform", "transform_type" => "incremental_to_absolute") - .set(self.size as f64); - gauge!("component_cache_events", "component_type" => "transform", "transform_type" => "incremental_to_absolute") + + // Only emit component_cache_bytes if capacity policy is defined + if self.has_capacity_policy { + gauge!("component_cache_bytes") + .set(self.size as f64); + } + + gauge!("component_cache_events") .set(self.count as f64); - counter!("component_cache_evictions_total", "component_type" => "transform", "transform_type" => "incremental_to_absolute") + + counter!("component_cache_evictions_total") .increment(self.evictions as u64); } } diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index cb063e2bb8608..c7f8d8d20d81d 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -268,8 +268,6 @@ pub struct CapacityPolicy { pub max_events: Option, /// Current memory usage tracking current_memory: usize, - /// Counter for evictions. Used for metrics tacking - eviction_count: usize, } impl CapacityPolicy { @@ -279,7 +277,6 @@ impl CapacityPolicy { max_bytes, max_events, current_memory: 0, - eviction_count: 0, } } @@ -288,30 +285,17 @@ impl CapacityPolicy { self.current_memory } - /// Get the current eviction count - pub const fn eviction_count(&self) -> usize { - self.eviction_count - } - - /// Reset the eviction count and return the previous value - pub fn reset_eviction_count(&mut self) -> usize { - let count = self.eviction_count; - self.eviction_count = 0; - count - } /// Updates memory tracking when an entry is removed. const fn remove_memory(&mut self, bytes: usize) { self.current_memory = self.current_memory.saturating_sub(bytes); } - /// Frees the memory for an item if max_bytes is set. - /// Only calculates and tracks memory when max_bytes is specified. + /// Frees the memory for an item, always tracking memory usage. + /// Memory tracking now happens regardless of whether max_bytes is set. pub fn free_item(&mut self, series: &MetricSeries, entry: &MetricEntry) { - if self.max_bytes.is_some() { - let freed_memory = self.item_size(series, entry); - self.remove_memory(freed_memory); - } + let freed_memory = self.item_size(series, entry); + self.remove_memory(freed_memory); } /// Updates memory tracking. @@ -410,30 +394,11 @@ pub struct MetricSet { capacity_policy: Option, /// Optional TTL policy for time-based expiration ttl_policy: Option, + /// Counter for evictions. Used for metrics tracking + eviction_count: usize, } impl MetricSet { - /// Debugs memory usage across the cache - pub fn debug_memory(&self) -> String { - let total_tracked = self.capacity_policy - .as_ref() - .map(|cp| cp.current_memory()) - .unwrap_or(0); - - // Calculate actual memory usage - let mut actual_usage = 0; - for (series, entry) in self.inner.iter() { - let series_size = series.allocated_bytes(); - let entry_size = entry.allocated_bytes(); - actual_usage += series_size + entry_size; - } - - format!( - "Tracked memory: {}, Actual usage: {}, Items: {}", - total_tracked, actual_usage, self.inner.len() - ) - } - /// Creates a new MetricSet with the given settings. pub fn new(settings: MetricSetSettings) -> Self { // Create capacity policy if any capacity limit is set @@ -461,6 +426,7 @@ impl MetricSet { inner: LruCache::unbounded(), capacity_policy, ttl_policy, + eviction_count: 0, } } @@ -511,22 +477,17 @@ impl MetricSet { while capacity_policy.needs_eviction(self.inner.len()) { if let Some((series, entry)) = self.inner.pop_lru() { capacity_policy.free_item(&series, &entry); - capacity_policy.eviction_count += 1; + self.eviction_count += 1; } else { break; // No more entries to evict } } - // Log memory debug info - info!(message = "Memory after enforcement", debug = %self.debug_memory()); } // Method to get eviction count and reset the counter - pub fn get_and_reset_eviction_count(&mut self) -> usize { - if let Some(ref mut capacity_policy) = self.capacity_policy { - capacity_policy.reset_eviction_count() - } else { - 0 - } + pub fn get_eviction_count(&mut self) -> usize { + let count = self.eviction_count; + count } /// Perform TTL cleanup if configured and needed. @@ -564,10 +525,11 @@ impl MetricSet { // Remove expired entries and update memory tracking (if max_bytes is set) for series in expired_keys { - if let Some(entry) = self.inner.pop(&series) - && let Some(ref mut capacity_policy) = self.capacity_policy - { - capacity_policy.free_item(&series, &entry); + if let Some(entry) = self.inner.pop(&series) { + self.eviction_count += 1; + if let Some(ref mut capacity_policy) = self.capacity_policy { + capacity_policy.free_item(&series, &entry); + } } } } @@ -579,21 +541,15 @@ impl MetricSet { return; // No capacity limits configured, return immediately }; - // Handle differently based on whether we need to track memory - if capacity_policy.max_bytes.is_some() { - // When tracking memory, we need to calculate sizes before and after - let entry_size = capacity_policy.item_size(&series, &entry); - if let Some(existing_entry) = self.inner.put(series.clone(), entry) { - // If we had an existing entry, calculate its size and adjust memory tracking - let existing_size = capacity_policy.item_size(&series, &existing_entry); - capacity_policy.replace_memory(existing_size, entry_size); - } else { - // No existing entry, just add the new entry's size - capacity_policy.replace_memory(0, entry_size); - } + // Always track memory when capacity policy exists + let entry_size = capacity_policy.item_size(&series, &entry); + if let Some(existing_entry) = self.inner.put(series.clone(), entry) { + // If we had an existing entry, calculate its size and adjust memory tracking + let existing_size = capacity_policy.item_size(&series, &existing_entry); + capacity_policy.replace_memory(existing_size, entry_size); } else { - // When not tracking memory (only entry count limits), just put directly - self.inner.put(series.clone(), entry); + // No existing entry, just add the new entry's size + capacity_policy.replace_memory(0, entry_size); } // Get item; move to back of LRU cache @@ -601,9 +557,6 @@ impl MetricSet { // Enforce limits after insertion self.enforce_capacity_policy(); - - // Log memory debug info - debug!(message = "Memory after insertion", debug = %self.debug_memory()); } /// Consumes this MetricSet and returns a vector of Metric. diff --git a/src/transforms/incremental_to_absolute.rs b/src/transforms/incremental_to_absolute.rs index 15498e9539acf..b29a75bf6e65a 100644 --- a/src/transforms/incremental_to_absolute.rs +++ b/src/transforms/incremental_to_absolute.rs @@ -86,16 +86,17 @@ impl IncrementalToAbsolute { // Emit metrics on cache entries, internally tracked size, and eviction count fn emit_metrics(&mut self) { - // Always emit the entries count - if let Some(cp) = self.data.capacity_policy() { - if cp.max_bytes.is_some() { - emit!(IncrementalToAbsoluteMetricsCache { - size: cp.current_memory(), - count: self.data.len(), - evictions: self.data.get_and_reset_eviction_count(), - }); - } - } + let (size, has_capacity_policy) = match self.data.capacity_policy() { + Some(cp) => (cp.current_memory(), true), + None => (0, false), + }; + + emit!(IncrementalToAbsoluteMetricsCache { + size, + count: self.data.len(), + evictions: self.data.get_eviction_count(), + has_capacity_policy, + }); } } @@ -112,8 +113,8 @@ impl TaskTransform for IncrementalToAbsolute { // Emit initial metrics inner.emit_metrics(); - // Set up periodic metrics emission - let mut interval = tokio::time::interval(Duration::from_secs(60)); + // Set up periodic metrics emission every 2 seconds + let mut interval = tokio::time::interval(Duration::from_secs(2)); Box::pin(task .filter_map(move |v| { From c6b8b96f09e2c0a7b8797b804e73b1c3c004b8e3 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 11:48:01 -0400 Subject: [PATCH 015/227] lint --- lib/vector-core/src/event/metric/value.rs | 8 +++++-- .../incremental_to_absolute.rs | 12 +++------- src/sinks/util/buffer/metrics/normalize.rs | 12 +++++----- src/transforms/incremental_to_absolute.rs | 22 +++++++++---------- 4 files changed, 24 insertions(+), 30 deletions(-) diff --git a/lib/vector-core/src/event/metric/value.rs b/lib/vector-core/src/event/metric/value.rs index 89b6909eeab47..cadd30a28f0a8 100644 --- a/lib/vector-core/src/event/metric/value.rs +++ b/lib/vector-core/src/event/metric/value.rs @@ -363,8 +363,12 @@ impl ByteSizeOf for MetricValue { Self::Counter { .. } | Self::Gauge { .. } => size_of::(), Self::Set { values } => values.allocated_bytes() + size_of_val(values), Self::Distribution { samples, .. } => samples.allocated_bytes() + size_of_val(samples), - Self::AggregatedHistogram { buckets, .. } => buckets.allocated_bytes() + size_of_val(buckets), - Self::AggregatedSummary { quantiles, .. } => quantiles.allocated_bytes() + size_of_val(quantiles), + Self::AggregatedHistogram { buckets, .. } => { + buckets.allocated_bytes() + size_of_val(buckets) + } + Self::AggregatedSummary { quantiles, .. } => { + quantiles.allocated_bytes() + size_of_val(quantiles) + } Self::Sketch { sketch } => sketch.allocated_bytes() + size_of_val(sketch), } } diff --git a/src/internal_events/incremental_to_absolute.rs b/src/internal_events/incremental_to_absolute.rs index 76b677cc4a7cd..346bcf21134bc 100644 --- a/src/internal_events/incremental_to_absolute.rs +++ b/src/internal_events/incremental_to_absolute.rs @@ -19,17 +19,11 @@ impl InternalEvent for IncrementalToAbsoluteMetricsCache { evictions = %self.evictions, has_capacity_policy = %self.has_capacity_policy, ); - // Only emit component_cache_bytes if capacity policy is defined if self.has_capacity_policy { - gauge!("component_cache_bytes") - .set(self.size as f64); + gauge!("component_cache_bytes").set(self.size as f64); } - - gauge!("component_cache_events") - .set(self.count as f64); - - counter!("component_cache_evictions_total") - .increment(self.evictions as u64); + gauge!("component_cache_events").set(self.count as f64); + counter!("component_cache_evictions_total").increment(self.evictions as u64); } } diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index c7f8d8d20d81d..4ac2b61454e17 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -208,11 +208,11 @@ impl ByteSizeOf for MetricEntry { // Calculate the size of the data and metadata let data_size = self.data.allocated_bytes(); let metadata_size = self.metadata.allocated_bytes(); - + // Include struct overhead - size of self without double-counting fields // that we already accounted for in their respective allocated_bytes() calls let struct_size = size_of::(); - + data_size + metadata_size + struct_size } } @@ -285,7 +285,6 @@ impl CapacityPolicy { self.current_memory } - /// Updates memory tracking when an entry is removed. const fn remove_memory(&mut self, bytes: usize) { self.current_memory = self.current_memory.saturating_sub(bytes); @@ -485,9 +484,8 @@ impl MetricSet { } // Method to get eviction count and reset the counter - pub fn get_eviction_count(&mut self) -> usize { - let count = self.eviction_count; - count + pub const fn get_eviction_count(&mut self) -> usize { + self.eviction_count } /// Perform TTL cleanup if configured and needed. @@ -554,7 +552,7 @@ impl MetricSet { // Get item; move to back of LRU cache self.inner.get(&series); - + // Enforce limits after insertion self.enforce_capacity_policy(); } diff --git a/src/transforms/incremental_to_absolute.rs b/src/transforms/incremental_to_absolute.rs index b29a75bf6e65a..320e92efdf872 100644 --- a/src/transforms/incremental_to_absolute.rs +++ b/src/transforms/incremental_to_absolute.rs @@ -90,7 +90,7 @@ impl IncrementalToAbsolute { Some(cp) => (cp.current_memory(), true), None => (0, false), }; - + emit!(IncrementalToAbsoluteMetricsCache { size, count: self.data.len(), @@ -116,17 +116,15 @@ impl TaskTransform for IncrementalToAbsolute { // Set up periodic metrics emission every 2 seconds let mut interval = tokio::time::interval(Duration::from_secs(2)); - Box::pin(task - .filter_map(move |v| { - let mut cx = std::task::Context::from_waker(futures::task::noop_waker_ref()); - // Poll the interval and emit metrics if ready - while interval.poll_tick(&mut cx).is_ready() { - inner.emit_metrics(); - } - // Process the event as before - futures::future::ready(inner.transform_one(v)) - }) - ) + Box::pin(task.filter_map(move |v| { + let mut cx = std::task::Context::from_waker(futures::task::noop_waker_ref()); + // Poll the interval and emit metrics if ready + while interval.poll_tick(&mut cx).is_ready() { + inner.emit_metrics(); + } + // Process the event as before + futures::future::ready(inner.transform_one(v)) + })) } } From 5d70d07c3806f09ddf0597374de4553ed1394399 Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 30 Oct 2025 11:51:26 -0400 Subject: [PATCH 016/227] feat(dev): add opentelemetry metrics e2e tests (#24109) * naively copy e2e otel-logs to otel-metrics * try to fix * Remove vector_default.yaml custom mapping * Test all metric types * Update vrl to main after fix was merged * Add additional attributes and verify them in tests * Format + remove Summary support * simplify assert_metric_attributes logic * Revert Cargo.toml changes * Add ci e2e trigger * Make test more concise * reuse common Dockerfiles * Use common functions in opentelemetry-logs * Hard code vector_otlp.yaml * Fix CI trigger * Add service to integration.yml matrix * Add and use generic assert_service_name_with * ranem data_type -> test_type --------- Co-authored-by: Pavlos Rontidis --- .github/workflows/changes.yml | 6 +- .github/workflows/ci-integration-review.yml | 7 + .github/workflows/integration.yml | 2 +- Cargo.lock | 2 +- .../collector.Dockerfile | 0 .../telemetrygen.Dockerfile | 0 scripts/e2e/opentelemetry-logs/compose.yaml | 4 +- .../e2e/opentelemetry-metrics/compose.yaml | 125 +++++++ scripts/e2e/opentelemetry-metrics/test.yaml | 26 ++ .../opentelemetry/metrics/collector-sink.yaml | 26 ++ .../metrics/collector-source.yaml | 27 ++ .../opentelemetry/metrics/vector_otlp.yaml | 42 +++ tests/e2e/opentelemetry/logs/mod.rs | 138 ++------ tests/e2e/opentelemetry/metrics/mod.rs | 304 ++++++++++++++++++ tests/e2e/opentelemetry/mod.rs | 115 +++++++ 15 files changed, 707 insertions(+), 117 deletions(-) rename scripts/e2e/{opentelemetry-logs => opentelemetry-common}/collector.Dockerfile (100%) rename scripts/e2e/{opentelemetry-logs => opentelemetry-common}/telemetrygen.Dockerfile (100%) create mode 100644 scripts/e2e/opentelemetry-metrics/compose.yaml create mode 100644 scripts/e2e/opentelemetry-metrics/test.yaml create mode 100644 tests/data/e2e/opentelemetry/metrics/collector-sink.yaml create mode 100644 tests/data/e2e/opentelemetry/metrics/collector-source.yaml create mode 100644 tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml create mode 100644 tests/e2e/opentelemetry/metrics/mod.rs diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index d916a66307274..c68eec6109509 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -141,6 +141,8 @@ on: value: ${{ jobs.e2e_tests.outputs.datadog-metrics }} e2e-opentelemetry-logs: value: ${{ jobs.e2e_tests.outputs.opentelemetry-logs }} + e2e-opentelemetry-metrics: + value: ${{ jobs.e2e_tests.outputs.opentelemetry-metrics }} int-tests-any: value: ${{ jobs.int_tests.outputs.any }} e2e-tests-any: @@ -400,6 +402,7 @@ jobs: datadog-logs: ${{ steps.filter.outputs.datadog-logs }} datadog-metrics: ${{ steps.filter.outputs.datadog-metrics }} opentelemetry-logs: ${{ steps.filter.outputs.opentelemetry-logs }} + opentelemetry-metrics: ${{ steps.filter.outputs.opentelemetry-metrics }} any: ${{ steps.detect-changes.outputs.any }} steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -430,7 +433,8 @@ jobs: { "datadog-logs": ${{ steps.filter.outputs.datadog-logs }}, "datadog-metrics": ${{ steps.filter.outputs.datadog-metrics }}, - "opentelemetry-logs": ${{ steps.filter.outputs.opentelemetry-logs }} + "opentelemetry-logs": ${{ steps.filter.outputs.opentelemetry-logs }}, + "opentelemetry-metrics": ${{ steps.filter.outputs.opentelemetry-metrics }} } EOF ) diff --git a/.github/workflows/ci-integration-review.yml b/.github/workflows/ci-integration-review.yml index 014f97b9547e3..8f25c614c3c58 100644 --- a/.github/workflows/ci-integration-review.yml +++ b/.github/workflows/ci-integration-review.yml @@ -173,6 +173,13 @@ jobs: || startsWith(github.event.review.body, '/ci-run-all') }} run: bash scripts/run-integration-test.sh e2e opentelemetry-logs + - name: e2e-opentelemetry-metrics + if: ${{ startsWith(github.event.review.body, '/ci-run-e2e-opentelemetry-metrics') + || startsWith(github.event.review.body, '/ci-run-e2e-all') + || startsWith(github.event.review.body, '/ci-run-all') }} + run: bash scripts/run-integration-test.sh e2e opentelemetry-metrics + + update-pr-status: name: Signal result to PR runs-on: ubuntu-24.04 diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 2dc938aab9962..805ee63d39b43 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -136,7 +136,7 @@ jobs: if: ${{ !failure() && !cancelled() && (github.event_name == 'merge_group' || github.event_name == 'workflow_dispatch') }} strategy: matrix: - service: [ "datadog-logs", "datadog-metrics", "opentelemetry-logs" ] + service: [ "datadog-logs", "datadog-metrics", "opentelemetry-logs", "opentelemetry-metrics" ] timeout-minutes: 90 steps: diff --git a/Cargo.lock b/Cargo.lock index 61e4ffce98b44..c04a3cce92cfe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12634,7 +12634,7 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" version = "0.27.0" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#dbfeee575e0527f46264667339f37a795362099e" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#90ea34f09903e3ee85f9b3e4813f44992f6e7c68" dependencies = [ "aes", "aes-siv", diff --git a/scripts/e2e/opentelemetry-logs/collector.Dockerfile b/scripts/e2e/opentelemetry-common/collector.Dockerfile similarity index 100% rename from scripts/e2e/opentelemetry-logs/collector.Dockerfile rename to scripts/e2e/opentelemetry-common/collector.Dockerfile diff --git a/scripts/e2e/opentelemetry-logs/telemetrygen.Dockerfile b/scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile similarity index 100% rename from scripts/e2e/opentelemetry-logs/telemetrygen.Dockerfile rename to scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile diff --git a/scripts/e2e/opentelemetry-logs/compose.yaml b/scripts/e2e/opentelemetry-logs/compose.yaml index 1fb0cb48a6fcd..e947c2293439c 100644 --- a/scripts/e2e/opentelemetry-logs/compose.yaml +++ b/scripts/e2e/opentelemetry-logs/compose.yaml @@ -18,7 +18,7 @@ services: container_name: logs-generator build: context: ../../../ - dockerfile: ./scripts/e2e/opentelemetry-logs/telemetrygen.Dockerfile + dockerfile: ./scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile init: true depends_on: otel-collector-source: @@ -44,7 +44,7 @@ services: container_name: otel-collector-sink build: context: ../../../ - dockerfile: ./scripts/e2e/opentelemetry-logs/collector.Dockerfile + dockerfile: ./scripts/e2e/opentelemetry-common/collector.Dockerfile args: CONFIG_COLLECTOR_VERSION: ${CONFIG_COLLECTOR_VERSION} init: true diff --git a/scripts/e2e/opentelemetry-metrics/compose.yaml b/scripts/e2e/opentelemetry-metrics/compose.yaml new file mode 100644 index 0000000000000..0652811ff4c27 --- /dev/null +++ b/scripts/e2e/opentelemetry-metrics/compose.yaml @@ -0,0 +1,125 @@ +name: opentelemetry-vector-e2e +services: + otel-collector-source: + container_name: otel-collector-source + image: otel/opentelemetry-collector-contrib:${CONFIG_COLLECTOR_VERSION} + init: true + volumes: + - type: bind + source: ../../../tests/data/e2e/opentelemetry/metrics/collector-source.yaml + target: /etc/otelcol-contrib/config.yaml + read_only: true + ports: + - "${OTEL_COLLECTOR_SOURCE_GRPC_PORT:-4317}:4317" + - "${OTEL_COLLECTOR_SOURCE_HTTP_PORT:-4318}:4318" + command: [ "--config=/etc/otelcol-contrib/config.yaml" ] + + metrics-generator: + container_name: metrics-generator + build: + context: ../../../ + dockerfile: ./scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile + init: true + depends_on: + otel-collector-source: + condition: service_started + vector: + condition: service_started + otel-collector-sink: + condition: service_started + command: + - "-c" + - | + until nc -z otel-collector-source 4318; do + sleep 0.5 + done + # Generate 50 Gauge metrics + telemetrygen metrics \ + --otlp-endpoint=otel-collector-source:4318 \ + --otlp-insecure \ + --otlp-http \ + --metrics=50 \ + --metric-type=Gauge \ + --otlp-metric-name=gauge_metric \ + --telemetry-attributes='metric.type="gauge"' \ + --rate=0 + # Generate 50 Sum metrics + telemetrygen metrics \ + --otlp-endpoint=otel-collector-source:4318 \ + --otlp-insecure \ + --otlp-http \ + --metrics=50 \ + --metric-type=Sum \ + --otlp-metric-name=sum_metric \ + --aggregation-temporality=cumulative \ + --telemetry-attributes='metric.type="sum"' \ + --rate=0 + # Generate 50 Histogram metrics + telemetrygen metrics \ + --otlp-endpoint=otel-collector-source:4318 \ + --otlp-insecure \ + --otlp-http \ + --metrics=50 \ + --metric-type=Histogram \ + --otlp-metric-name=histogram_metric \ + --aggregation-temporality=cumulative \ + --telemetry-attributes='metric.type="histogram"' \ + --rate=0 + # Generate 50 ExponentialHistogram metrics + telemetrygen metrics \ + --otlp-endpoint=otel-collector-source:4318 \ + --otlp-insecure \ + --otlp-http \ + --metrics=50 \ + --metric-type=ExponentialHistogram \ + --otlp-metric-name=exponential_histogram_metric \ + --aggregation-temporality=cumulative \ + --telemetry-attributes='metric.type="exponential_histogram"' \ + --rate=0 + + otel-collector-sink: + container_name: otel-collector-sink + build: + context: ../../../ + dockerfile: ./scripts/e2e/opentelemetry-common/collector.Dockerfile + args: + CONFIG_COLLECTOR_VERSION: ${CONFIG_COLLECTOR_VERSION} + init: true + user: "0:0" # test only, override special user with root + command: [ "--config", "/etc/otelcol-contrib/config.yaml" ] + volumes: + - type: bind + source: ../../../tests/data/e2e/opentelemetry/metrics/collector-sink.yaml + target: /etc/otelcol-contrib/config.yaml + read_only: true + - type: volume + source: vector_target + target: /output + ports: + - "${OTEL_COLLECTOR_SINK_HTTP_PORT:-5318}:5318" + + vector: + container_name: vector-otel-metrics-e2e + image: ${CONFIG_VECTOR_IMAGE} + init: true + volumes: + - type: bind + source: ../../../tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml + target: /etc/vector/vector.yaml + read_only: true + - type: volume + source: vector_target + target: /output + environment: + - VECTOR_LOG=${VECTOR_LOG:-info} + - FEATURES=e2e-tests-opentelemetry + command: [ "vector", "-c", "/etc/vector/vector.yaml" ] + +volumes: + vector_target: + external: true + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/e2e/opentelemetry-metrics/test.yaml b/scripts/e2e/opentelemetry-metrics/test.yaml new file mode 100644 index 0000000000000..3481aa607771e --- /dev/null +++ b/scripts/e2e/opentelemetry-metrics/test.yaml @@ -0,0 +1,26 @@ +features: + - e2e-tests-opentelemetry + +test: "e2e" + +test_filter: "opentelemetry::metrics::" + +runner: + needs_docker_socket: true + env: + OTEL_COLLECTOR_SOURCE_GRPC_PORT: '4317' + OTEL_COLLECTOR_SOURCE_HTTP_PORT: '4318' + OTEL_COLLECTOR_SINK_HTTP_PORT: '5318' + +matrix: + # Determines which `otel/opentelemetry-collector-contrib` version to use + collector_version: [ 'latest' ] + +# Only trigger this integration test if relevant OTEL source/sink files change +paths: + - "src/sources/opentelemetry/**" + - "src/sinks/opentelemetry/**" + - "src/internal_events/opentelemetry_*" + - "tests/e2e/opentelemetry/metrics/**" + - "scripts/e2e/opentelemetry-metrics/**" + - "lib/codecs/src/**/otlp.rs" diff --git a/tests/data/e2e/opentelemetry/metrics/collector-sink.yaml b/tests/data/e2e/opentelemetry/metrics/collector-sink.yaml new file mode 100644 index 0000000000000..120b29ff8e20f --- /dev/null +++ b/tests/data/e2e/opentelemetry/metrics/collector-sink.yaml @@ -0,0 +1,26 @@ +receivers: + otlp: + protocols: + http: + endpoint: "0.0.0.0:5318" + +processors: + batch: { } + +exporters: + debug: { } + file: + path: /output/opentelemetry-metrics/collector-file-exporter.log + rotation: + max_megabytes: 10 + max_days: 1 + +service: + pipelines: + metrics: + receivers: [ otlp ] + processors: [ batch ] + exporters: [ debug, file ] + telemetry: + logs: + level: "debug" diff --git a/tests/data/e2e/opentelemetry/metrics/collector-source.yaml b/tests/data/e2e/opentelemetry/metrics/collector-source.yaml new file mode 100644 index 0000000000000..6f1ba27560e87 --- /dev/null +++ b/tests/data/e2e/opentelemetry/metrics/collector-source.yaml @@ -0,0 +1,27 @@ +receivers: + otlp: + protocols: + http: + endpoint: "0.0.0.0:4318" + grpc: + +processors: + batch: { } + +exporters: + otlp/grpc: + endpoint: vector:4317 + tls: + insecure: true + otlphttp/vector: + endpoint: http://vector:4318 + tls: + insecure: true + debug: { } + +service: + pipelines: + metrics: + receivers: [ otlp ] + processors: [ batch ] + exporters: [ debug, otlp/grpc, otlphttp/vector ] diff --git a/tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml b/tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml new file mode 100644 index 0000000000000..0dec0745c3a13 --- /dev/null +++ b/tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml @@ -0,0 +1,42 @@ +sources: + source0: + type: opentelemetry + grpc: + address: 0.0.0.0:4317 + http: + address: 0.0.0.0:4318 + keepalive: + max_connection_age_jitter_factor: 0.1 + max_connection_age_secs: 300 + use_otlp_decoding: true + + internal_metrics: + type: internal_metrics + scrape_interval_secs: 60 + +sinks: + otel_sink: + inputs: + - source0.metrics + type: opentelemetry + protocol: + type: http + uri: http://otel-collector-sink:5318/v1/metrics + encoding: + codec: otlp + + otel_file_sink: + type: file + path: "/output/opentelemetry-metrics/vector-file-sink.log" + inputs: + - source0.metrics + encoding: + codec: json + + metrics_file_sink: + type: file + path: "/output/opentelemetry-metrics/vector-internal-metrics-sink.log" + inputs: + - internal_metrics + encoding: + codec: json diff --git a/tests/e2e/opentelemetry/logs/mod.rs b/tests/e2e/opentelemetry/logs/mod.rs index 66bd3d1edcc69..60c6337f7a5b0 100644 --- a/tests/e2e/opentelemetry/logs/mod.rs +++ b/tests/e2e/opentelemetry/logs/mod.rs @@ -1,80 +1,12 @@ -use prost::Message as ProstMessage; -use prost_reflect::{DescriptorPool, prost::Message as ProstReflectMessage}; -use serde_json::Value as JsonValue; -use std::{io, path::Path, process::Command}; +use vector_lib::opentelemetry::proto::LOGS_REQUEST_MESSAGE_TYPE; use vector_lib::opentelemetry::proto::collector::logs::v1::ExportLogsServiceRequest; use vector_lib::opentelemetry::proto::common::v1::any_value::Value as AnyValueEnum; -use vector_lib::opentelemetry::proto::{DESCRIPTOR_BYTES, LOGS_REQUEST_MESSAGE_TYPE}; -use vrl::value::Value as VrlValue; -const EXPECTED_LOG_COUNT: usize = 200; // 100 via gRPC + 100 via HTTP - -fn read_file_helper(filename: &str) -> Result { - let local_path = Path::new("/output/opentelemetry-logs").join(filename); - if local_path.exists() { - // Running inside the runner container, volume is mounted - std::fs::read_to_string(local_path) - } else { - // Running on host - let out = Command::new("docker") - .args([ - "run", - "--rm", - "-v", - "opentelemetry-logs_vector_target:/output", - "alpine:3.20", - "cat", - &format!("/output/{filename}"), - ]) - .output()?; - - if !out.status.success() { - return Err(io::Error::other(format!( - "docker run failed: {}\n{}", - out.status, - String::from_utf8_lossy(&out.stderr) - ))); - } +use crate::opentelemetry::{ + assert_service_name_with, parse_line_to_export_type_request, read_file_helper, +}; - Ok(String::from_utf8_lossy(&out.stdout).into_owned()) - } -} - -fn parse_line_to_export_logs_request(line: &str) -> Result { - // Parse JSON and convert to VRL Value - let vrl_value: VrlValue = serde_json::from_str::(line) - .map_err(|e| format!("Failed to parse JSON: {e}"))? - .into(); - - // Get the message descriptor from the descriptor pool - let descriptor_pool = DescriptorPool::decode(DESCRIPTOR_BYTES) - .map_err(|e| format!("Failed to decode descriptor pool: {e}"))?; - - let message_descriptor = descriptor_pool - .get_message_by_name(LOGS_REQUEST_MESSAGE_TYPE) - .ok_or_else(|| { - format!("Message type '{LOGS_REQUEST_MESSAGE_TYPE}' not found in descriptor pool",) - })?; - - // Encode VRL Value to DynamicMessage using VRL's encode_message with JSON names enabled - let dynamic_message = vrl::protobuf::encode::encode_message( - &message_descriptor, - vrl_value, - &vrl::protobuf::encode::Options { - use_json_names: true, - }, - ) - .map_err(|e| format!("Failed to encode VRL value to protobuf: {e}"))?; - - // Encode DynamicMessage to bytes (using prost 0.13.5) - let mut buf = Vec::new(); - ProstReflectMessage::encode(&dynamic_message, &mut buf) - .map_err(|e| format!("Failed to encode dynamic message to bytes: {e}"))?; - - // Decode bytes into ExportLogsServiceRequest (using prost 0.12.6) - ProstMessage::decode(&buf[..]) - .map_err(|e| format!("Failed to decode ExportLogsServiceRequest: {e}")) -} +const EXPECTED_LOG_COUNT: usize = 200; // 100 via gRPC + 100 via HTTP fn parse_export_logs_request(content: &str) -> Result { // The file may contain multiple lines, each with a JSON object containing an array of resourceLogs @@ -90,9 +22,12 @@ fn parse_export_logs_request(content: &str) -> Result( + LOGS_REQUEST_MESSAGE_TYPE, + line, + ) + .map_err(|e| format!("Line {}: {}", line_num + 1, e))? + .resource_logs, ); } @@ -103,37 +38,6 @@ fn parse_export_logs_request(content: &str) -> Result Result { + // The file may contain multiple lines, each with a JSON object containing an array of + // resourceMetrics + let mut merged_request = ExportMetricsServiceRequest { + resource_metrics: Vec::new(), + }; + + for (line_num, line) in content.lines().enumerate() { + let line = line.trim(); + if line.is_empty() { + continue; + } + + // Merge resource_metrics from this request into the accumulated result + merged_request.resource_metrics.extend( + parse_line_to_export_type_request::( + METRICS_REQUEST_MESSAGE_TYPE, + line, + ) + .map_err(|e| format!("Line {}: {}", line_num + 1, e))? + .resource_metrics, + ); + } + + if merged_request.resource_metrics.is_empty() { + return Err("No resource metrics found in file".to_string()); + } + + Ok(merged_request) +} + +/// Asserts that all metrics have: +/// - A non-empty name +/// - At least one data point +/// - Each data point has a valid timestamp and value +fn assert_metric_data_points(request: &ExportMetricsServiceRequest) { + for (rm_idx, rm) in request.resource_metrics.iter().enumerate() { + for (sm_idx, sm) in rm.scope_metrics.iter().enumerate() { + for (m_idx, metric) in sm.metrics.iter().enumerate() { + let prefix = + format!("resource_metrics[{rm_idx}].scope_metrics[{sm_idx}].metrics[{m_idx}]"); + + // Assert metric has a name + assert!(!metric.name.is_empty(), "{prefix} metric name is empty"); + + // Get data points based on metric type + let data_points_count = match &metric + .data + .as_ref() + .unwrap_or_else(|| panic!("{prefix} has no data")) + { + MetricData::Gauge(Gauge { data_points, .. }) + | MetricData::Sum(Sum { data_points, .. }) => { + assert!(!data_points.is_empty(), "{prefix} has no data points"); + for (dp_idx, dp) in data_points.iter().enumerate() { + assert!( + dp.time_unix_nano > 0, + "{prefix}.gauge.data_points[{dp_idx}] has invalid timestamp" + ); + assert!( + dp.value.is_some(), + "{prefix}.gauge.data_points[{dp_idx}] has no value" + ); + } + data_points.len() + } + MetricData::Histogram(histogram) => { + assert!( + !histogram.data_points.is_empty(), + "{prefix} histogram has no data points" + ); + histogram.data_points.len() + } + MetricData::ExponentialHistogram(exp_histogram) => { + assert!( + !exp_histogram.data_points.is_empty(), + "{prefix} exponential histogram has no data points" + ); + exp_histogram.data_points.len() + } + // not supported by telemetrygen + MetricData::Summary(_) => panic!("Unexpected Summary metric"), + }; + + assert!(data_points_count > 0, "{prefix} has zero data points"); + } + } + } +} + +/// Asserts that each metric has the expected telemetry attribute "metric.type" +fn assert_metric_attributes(request: &ExportMetricsServiceRequest) { + for (rm_idx, rm) in request.resource_metrics.iter().enumerate() { + for (sm_idx, sm) in rm.scope_metrics.iter().enumerate() { + for (m_idx, metric) in sm.metrics.iter().enumerate() { + let prefix = + format!("resource_metrics[{rm_idx}].scope_metrics[{sm_idx}].metrics[{m_idx}]"); + + // Get data points and verify attributes + let attrs: Box>> = match metric + .data + .as_ref() + .unwrap_or_else(|| panic!("{prefix} has no data")) + { + MetricData::Gauge(g) => { + assert_eq!(metric.name.as_str(), "gauge_metric"); + Box::new(g.data_points.iter().map(|g| &g.attributes)) + } + MetricData::Sum(s) => { + assert_eq!(metric.name.as_str(), "sum_metric"); + Box::new(s.data_points.iter().map(|s| &s.attributes)) + } + MetricData::Histogram(h) => { + assert_eq!(metric.name.as_str(), "histogram_metric"); + Box::new(h.data_points.iter().map(|h| &h.attributes)) + } + MetricData::ExponentialHistogram(h) => { + assert_eq!(metric.name.as_str(), "exponential_histogram_metric"); + Box::new(h.data_points.iter().map(|h| &h.attributes)) + } + // not supported by telemetrygen + MetricData::Summary(_) => panic!("Unexpected Summary metric"), + }; + let expected_attr_value = metric.name.strip_suffix("_metric").unwrap(); + + // Verify gauge and sum data point attributes + for (idx, attributes) in attrs.enumerate() { + let attr = attributes + .iter() + .find(|kv| kv.key == "metric.type") + .unwrap_or_else(|| { + panic!("{prefix}.data_points[{idx}] missing 'metric.type' attribute") + }); + + if let Some(AnyValueEnum::StringValue(s)) = + attr.value.as_ref().and_then(|v| v.value.as_ref()) + { + assert_eq!( + s, expected_attr_value, + "{prefix}.data_points[{idx}] 'metric.type' expected '{expected_attr_value}', got '{s}'" + ); + } else { + panic!("{prefix}.data_points[{idx}] 'metric.type' is not a string value"); + } + } + } + } + } +} + +/// Asserts that metrics have the expected names and counts by type +/// Expected: 100 gauge_metric (50 gRPC + 50 HTTP), 100 sum_metric, 100 histogram_metric, 100 exponential_histogram_metric +fn assert_metric_names_and_types(request: &ExportMetricsServiceRequest) { + use std::collections::HashMap; + + let mut metric_type_counts: HashMap<(&str, &str), usize> = HashMap::new(); + + for rm in &request.resource_metrics { + for sm in &rm.scope_metrics { + for metric in &sm.metrics { + let type_name = match &metric.data { + Some(MetricData::Gauge(_)) => "Gauge", + Some(MetricData::Sum(_)) => "Sum", + Some(MetricData::Histogram(_)) => "Histogram", + Some(MetricData::ExponentialHistogram(_)) => "ExponentialHistogram", + Some(MetricData::Summary(_)) | None => panic!("unexpected MetricData type"), + }; + + *metric_type_counts + .entry((&metric.name, type_name)) + .or_insert(0) += 1; + } + } + } + + // Verify we have exactly 100 of each metric type with the correct name + // (50 via gRPC + 50 via HTTP = 100 total per type) + assert_eq!( + metric_type_counts.get(&("gauge_metric", "Gauge")), + Some(&100), + "Expected 100 gauge_metric (Gauge), got {:?}", + metric_type_counts.get(&("gauge_metric", "Gauge")) + ); + + assert_eq!( + metric_type_counts.get(&("sum_metric", "Sum")), + Some(&100), + "Expected 100 sum_metric (Sum), got {:?}", + metric_type_counts.get(&("sum_metric", "Sum")) + ); + + assert_eq!( + metric_type_counts.get(&("histogram_metric", "Histogram")), + Some(&100), + "Expected 100 histogram_metric (Histogram), got {:?}", + metric_type_counts.get(&("histogram_metric", "Histogram")) + ); + + assert_eq!( + metric_type_counts.get(&("exponential_histogram_metric", "ExponentialHistogram")), + Some(&100), + "Expected 100 exponential_histogram_metric (ExponentialHistogram), got {:?}", + metric_type_counts.get(&("exponential_histogram_metric", "ExponentialHistogram")) + ); + + // Verify total count + let total_count: usize = metric_type_counts.values().sum(); + assert_eq!( + total_count, EXPECTED_METRIC_COUNT, + "Total metric count mismatch. Breakdown: {:?}", + metric_type_counts + ); +} + +#[test] +fn vector_sink_otel_sink_metrics_match() { + let collector_content = read_file_helper("metrics", "collector-file-exporter.log") + .expect("Failed to read collector file"); + let vector_content = + read_file_helper("metrics", "vector-file-sink.log").expect("Failed to read vector file"); + + let collector_request = parse_export_metrics_request(&collector_content) + .expect("Failed to parse collector metrics as ExportMetricsServiceRequest"); + let vector_request = parse_export_metrics_request(&vector_content) + .expect("Failed to parse vector metrics as ExportMetricsServiceRequest"); + + // Count total data points across all metric types + let count_data_points = |request: &ExportMetricsServiceRequest| -> usize { + request + .resource_metrics + .iter() + .flat_map(|rm| &rm.scope_metrics) + .flat_map(|sm| &sm.metrics) + .map(|m| match &m.data { + Some(MetricData::Gauge(g)) => g.data_points.len(), + Some(MetricData::Sum(s)) => s.data_points.len(), + Some(MetricData::Histogram(h)) => h.data_points.len(), + Some(MetricData::ExponentialHistogram(eh)) => eh.data_points.len(), + Some(MetricData::Summary(_)) => panic!("Unexpected Summary metric"), + None => 0, + }) + .sum() + }; + + let collector_metric_count = count_data_points(&collector_request); + let vector_metric_count = count_data_points(&vector_request); + + assert_eq!( + collector_metric_count, EXPECTED_METRIC_COUNT, + "Collector produced {collector_metric_count} metric data points, expected {EXPECTED_METRIC_COUNT}" + ); + + assert_eq!( + vector_metric_count, EXPECTED_METRIC_COUNT, + "Vector produced {vector_metric_count} metric data points, expected {EXPECTED_METRIC_COUNT}" + ); + + // Verify service.name attribute + assert_service_name_with( + &collector_request.resource_metrics, + "resource_metrics", + "telemetrygen", + |rl| rl.resource.as_ref(), + ); + assert_service_name_with( + &vector_request.resource_metrics, + "resource_metrics", + "telemetrygen", + |rl| rl.resource.as_ref(), + ); + + // Verify metric data points are valid + assert_metric_data_points(&collector_request); + assert_metric_data_points(&vector_request); + + // Verify metric names and types match expectations + assert_metric_names_and_types(&collector_request); + assert_metric_names_and_types(&vector_request); + + // Verify metric attributes are correct + assert_metric_attributes(&collector_request); + assert_metric_attributes(&vector_request); + + // Both collector and Vector receive 400 metrics total (200 via gRPC + 200 via HTTP). + // The 200 metrics consist of 50 each of: Gauge, Sum, Histogram, and ExponentialHistogram. + // Compare them directly to verify the entire pipeline works correctly. + assert_eq!( + collector_request, vector_request, + "Collector and Vector metric requests should match" + ); +} diff --git a/tests/e2e/opentelemetry/mod.rs b/tests/e2e/opentelemetry/mod.rs index af2c2c342fa4c..0ce403ce76429 100644 --- a/tests/e2e/opentelemetry/mod.rs +++ b/tests/e2e/opentelemetry/mod.rs @@ -1 +1,116 @@ pub mod logs; +pub mod metrics; + +use std::{io, path::Path, process::Command}; + +use prost::Message as ProstMessage; +use prost_reflect::{DescriptorPool, prost::Message as ProstReflectMessage}; +use vector_lib::opentelemetry::proto::{ + DESCRIPTOR_BYTES, common::v1::any_value::Value as AnyValueEnum, resource::v1::Resource, +}; +use vrl::value::Value as VrlValue; + +fn read_file_helper(data_type: &str, filename: &str) -> Result { + let local_path = Path::new(&format!("/output/opentelemetry-{data_type}")).join(filename); + if local_path.exists() { + // Running inside the runner container, volume is mounted + std::fs::read_to_string(local_path) + } else { + // Running on host + let out = Command::new("docker") + .args([ + "run", + "--rm", + "-v", + &format!("opentelemetry-{data_type}_vector_target:/output"), + "alpine:3.20", + "cat", + &format!("/output/{filename}"), + ]) + .output()?; + + if !out.status.success() { + return Err(io::Error::other(format!( + "docker run failed: {}\n{}", + out.status, + String::from_utf8_lossy(&out.stderr) + ))); + } + + Ok(String::from_utf8_lossy(&out.stdout).into_owned()) + } +} + +fn parse_line_to_export_type_request( + request_message_type: &str, + line: &str, +) -> Result +where + Message: ProstMessage + Default, +{ + // Parse JSON and convert to VRL Value + let vrl_value: VrlValue = serde_json::from_str::(line) + .map_err(|e| format!("Failed to parse JSON: {e}"))? + .into(); + + // Get the message descriptor from the descriptor pool + let descriptor_pool = DescriptorPool::decode(DESCRIPTOR_BYTES) + .map_err(|e| format!("Failed to decode descriptor pool: {e}"))?; + + let message_descriptor = descriptor_pool + .get_message_by_name(request_message_type) + .ok_or_else(|| { + format!("Message type '{request_message_type}' not found in descriptor pool",) + })?; + + // Encode VRL Value to DynamicMessage using VRL's encode_message with JSON names enabled + let dynamic_message = vrl::protobuf::encode::encode_message( + &message_descriptor, + vrl_value, + &vrl::protobuf::encode::Options { + use_json_names: true, + }, + ) + .map_err(|e| format!("Failed to encode VRL value to protobuf: {e}"))?; + + // Encode DynamicMessage to bytes (using prost 0.13.5) + let mut buf = Vec::new(); + ProstReflectMessage::encode(&dynamic_message, &mut buf) + .map_err(|e| format!("Failed to encode dynamic message to bytes: {e}"))?; + + // Decode bytes into T (using prost 0.12.6) + ProstMessage::decode(&buf[..]) + .map_err(|e| format!("Failed to decode ExportLogsServiceRequest: {e}")) +} + +pub fn assert_service_name_with( + request: &[ResourceT], + resource_name: &str, + expected_name: &str, + get_resource: F, +) where + F: Fn(&ResourceT) -> Option<&Resource>, +{ + for (i, item) in request.iter().enumerate() { + let resource = + get_resource(item).unwrap_or_else(|| panic!("{resource_name}[{i}] missing resource")); + let service_name_attr = resource + .attributes + .iter() + .find(|kv| kv.key == "service.name") + .unwrap_or_else(|| panic!("{resource_name}[{i}] missing 'service.name' attribute")); + let actual_value = service_name_attr + .value + .as_ref() + .and_then(|v| v.value.as_ref()) + .unwrap_or_else(|| panic!("{resource_name}[{i}] 'service.name' has no value")); + if let AnyValueEnum::StringValue(s) = actual_value { + assert_eq!( + s, expected_name, + "{resource_name}[{i}] 'service.name' expected '{expected_name}', got '{s}'" + ); + } else { + panic!("{resource_name}[{i}] 'service.name' is not a string value"); + } + } +} From ae8ad712906742fd77f38f58120db6af27cd757b Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 30 Oct 2025 12:06:05 -0400 Subject: [PATCH 017/227] fix(dev): multicast_and_unicast_udp_message no longer hangs on macOS (#24112) --- src/sources/socket/mod.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/sources/socket/mod.rs b/src/sources/socket/mod.rs index 9b467426d16ba..c06cee8ce1fd5 100644 --- a/src/sources/socket/mod.rs +++ b/src/sources/socket/mod.rs @@ -1453,9 +1453,12 @@ mod test { init_udp_with_config(tx, config).await; drop(guard); - let from = bind_unused_udp_any(); // Send packet to multicast address - let from = send_lines_udp_from(from, multicast_socket_address, ["test".to_string()]); + let _ = send_lines_udp_from( + bind_unused_udp_any(), + multicast_socket_address, + ["test".to_string()], + ); let event = rx.next().await.expect("must receive an event"); assert_eq!( event.as_log()[log_schema().message_key().unwrap().to_string()], @@ -1466,7 +1469,9 @@ mod test { // therefore we connect to `127.0.0.1` instead (the socket is listening at `0.0.0.0`) let to = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), socket_address.port()); // Send packet to unicast address - send_lines_udp_from(from, to, ["test".to_string()]); + // Use a fresh socket - on macOS, a socket bound to 0.0.0.0 that sends to multicast + // cannot subsequently send unicast packets that the listener receives + send_lines_udp_from(bind_unused_udp(), to, ["test".to_string()]); let event = rx.next().await.expect("must receive an event"); assert_eq!( event.as_log()[log_schema().message_key().unwrap().to_string()], From 16f99f6a8b6055759f10699cf3e5311dbcfecee9 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 12:15:03 -0400 Subject: [PATCH 018/227] fix eviction count --- src/sinks/util/buffer/metrics/normalize.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 4ac2b61454e17..1dd218be51d86 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -483,9 +483,11 @@ impl MetricSet { } } - // Method to get eviction count and reset the counter - pub const fn get_eviction_count(&mut self) -> usize { - self.eviction_count + /// Reset the eviction count and return the previous value + pub fn reset_eviction_count(&mut self) -> usize { + let count = self.eviction_count; + self.eviction_count = 0; + count } /// Perform TTL cleanup if configured and needed. From 9c2c93bdc361ca0e13da7ade8844572e231b4e98 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 12:30:37 -0400 Subject: [PATCH 019/227] wip --- ...ncremental_to_absolute_size_tracking.fix.md | 4 ++++ src/sinks/util/buffer/metrics/normalize.rs | 2 +- src/transforms/incremental_to_absolute.rs | 2 +- .../components/sources/internal_metrics.cue | 18 ++++++++++++++++++ .../transforms/incremental_to_absolute.cue | 6 ++++++ 5 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 changelog.d/incremental_to_absolute_size_tracking.fix.md diff --git a/changelog.d/incremental_to_absolute_size_tracking.fix.md b/changelog.d/incremental_to_absolute_size_tracking.fix.md new file mode 100644 index 0000000000000..52030f9d53715 --- /dev/null +++ b/changelog.d/incremental_to_absolute_size_tracking.fix.md @@ -0,0 +1,4 @@ +1. Fix memory tracking on MetricSet in incremental_to_absolute transform by accurately calculating metric sizes. Previously, all sizes were being calculated as 0, resulting in no actual tracking +2. Add metrics for incremental_to_absolute to track events, internally tracked cache size, and evictions + +authors: GreyLilac09 diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 1dd218be51d86..54a46dd9cb869 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -484,7 +484,7 @@ impl MetricSet { } /// Reset the eviction count and return the previous value - pub fn reset_eviction_count(&mut self) -> usize { + pub fn get_and_reset_eviction_count(&mut self) -> usize { let count = self.eviction_count; self.eviction_count = 0; count diff --git a/src/transforms/incremental_to_absolute.rs b/src/transforms/incremental_to_absolute.rs index 320e92efdf872..5040292a98031 100644 --- a/src/transforms/incremental_to_absolute.rs +++ b/src/transforms/incremental_to_absolute.rs @@ -94,7 +94,7 @@ impl IncrementalToAbsolute { emit!(IncrementalToAbsoluteMetricsCache { size, count: self.data.len(), - evictions: self.data.get_eviction_count(), + evictions: self.data.get_and_reset_eviction_count(), has_capacity_policy, }); } diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index 7498601b6aa9c..f95c4139db2aa 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -291,6 +291,24 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _component_tags } + component_cache_events: { + description: "The number of events in this component's metrics normalizer cache." + type: "gauge" + default_namespace: "vector" + tags: _component_tags + } + component_cache_bytes: { + description: "The number of events in this component's metrics normalizer cache." + type: "gauge" + default_namespace: "vector" + tags: _component_tags + } + component_cache_events: { + description: "The total number of errors encountered by this component." + type: "gauge" + default_namespace: "vector" + tags: _component_tags + } component_discarded_events_total: { description: "The number of events dropped by this component." type: "counter" diff --git a/website/cue/reference/components/transforms/incremental_to_absolute.cue b/website/cue/reference/components/transforms/incremental_to_absolute.cue index 0c3e655ed2938..5b2963b4a10aa 100644 --- a/website/cue/reference/components/transforms/incremental_to_absolute.cue +++ b/website/cue/reference/components/transforms/incremental_to_absolute.cue @@ -179,4 +179,10 @@ components: transforms: incremental_to_absolute: { """ } } + + telemetry: metrics: { + component_cache_bytes: components.transforms.internal_metrics.output.metrics.open_connections + component_cache_evictions_total: components.transforms.internal_metrics.output.metrics.connection_established_total + component_cache_events: components.sources.internal_metrics.output.metrics.connection_shutdown_total + } } From 376d616b567c93d2e3946c5d482008b3e85d63fd Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 12:36:27 -0400 Subject: [PATCH 020/227] fix --- changelog.d/incremental_to_absolute_size_tracking.fix.md | 4 ++-- .../cue/reference/components/sources/internal_metrics.cue | 8 ++++---- .../components/transforms/incremental_to_absolute.cue | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/changelog.d/incremental_to_absolute_size_tracking.fix.md b/changelog.d/incremental_to_absolute_size_tracking.fix.md index 52030f9d53715..f5562ec4a721f 100644 --- a/changelog.d/incremental_to_absolute_size_tracking.fix.md +++ b/changelog.d/incremental_to_absolute_size_tracking.fix.md @@ -1,4 +1,4 @@ -1. Fix memory tracking on MetricSet in incremental_to_absolute transform by accurately calculating metric sizes. Previously, all sizes were being calculated as 0, resulting in no actual tracking -2. Add metrics for incremental_to_absolute to track events, internally tracked cache size, and evictions +1. Fix memory tracking on MetricSet in incremental_to_absolute transform by accurately calculating metric sizes. Previously, all sizes were being calculated as 0, resulting in no actual tracking. +2. Add metrics for incremental_to_absolute to track events, internally tracked cache size, and evictions. authors: GreyLilac09 diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index f95c4139db2aa..c089b9ff52c5d 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -298,14 +298,14 @@ components: sources: internal_metrics: { tags: _component_tags } component_cache_bytes: { - description: "The number of events in this component's metrics normalizer cache." + description: "The size in bytes of events in this component's metrics normalizer cache." type: "gauge" default_namespace: "vector" tags: _component_tags } - component_cache_events: { - description: "The total number of errors encountered by this component." - type: "gauge" + component_cache_evictions_total: { + description: "The total number of cache evictions by this component's metrics normalizer cache." + type: "counter" default_namespace: "vector" tags: _component_tags } diff --git a/website/cue/reference/components/transforms/incremental_to_absolute.cue b/website/cue/reference/components/transforms/incremental_to_absolute.cue index 5b2963b4a10aa..153792994a02a 100644 --- a/website/cue/reference/components/transforms/incremental_to_absolute.cue +++ b/website/cue/reference/components/transforms/incremental_to_absolute.cue @@ -181,8 +181,8 @@ components: transforms: incremental_to_absolute: { } telemetry: metrics: { - component_cache_bytes: components.transforms.internal_metrics.output.metrics.open_connections - component_cache_evictions_total: components.transforms.internal_metrics.output.metrics.connection_established_total - component_cache_events: components.sources.internal_metrics.output.metrics.connection_shutdown_total + component_cache_bytes: components.sources.internal_metrics.output.metrics.component_cache_bytes + component_cache_evictions_total: components.sources.internal_metrics.output.metrics.component_cache_evictions_total + component_cache_events: components.sources.internal_metrics.output.metrics.component_cache_events } } From 62efd61d69c14a64656f740ad14974ebdb985fef Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 12:38:12 -0400 Subject: [PATCH 021/227] make const --- src/sinks/util/buffer/metrics/normalize.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 54a46dd9cb869..ba3989516dde0 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -484,7 +484,7 @@ impl MetricSet { } /// Reset the eviction count and return the previous value - pub fn get_and_reset_eviction_count(&mut self) -> usize { + pub const fn get_and_reset_eviction_count(&mut self) -> usize { let count = self.eviction_count; self.eviction_count = 0; count From d8abed57442322105ad05992368ec98f4c3227f6 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 30 Oct 2025 13:47:38 -0400 Subject: [PATCH 022/227] chore(ci): parallelize e2e tests (ci-integration-review) (#24115) chore(ci): parallelize e2e tests --- .github/workflows/ci-integration-review.yml | 37 ++++++--------------- 1 file changed, 10 insertions(+), 27 deletions(-) diff --git a/.github/workflows/ci-integration-review.yml b/.github/workflows/ci-integration-review.yml index 8f25c614c3c58..933caab0801ae 100644 --- a/.github/workflows/ci-integration-review.yml +++ b/.github/workflows/ci-integration-review.yml @@ -133,6 +133,11 @@ jobs: - build-test-runner runs-on: ubuntu-24.04-8core timeout-minutes: 30 + strategy: + matrix: + service: [ + "datadog-logs", "datadog-metrics", "opentelemetry-logs", "opentelemetry-metrics" + ] steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: @@ -147,37 +152,15 @@ jobs: - run: bash scripts/environment/prepare.sh --modules=datadog-ci - - name: e2e-datadog-logs - if: ${{ startsWith(github.event.review.body, '/ci-run-e2e-datadog-logs') - || startsWith(github.event.review.body, '/ci-run-e2e-all') - || startsWith(github.event.review.body, '/ci-run-all') }} - uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2 - with: - timeout_minutes: 35 - max_attempts: 3 - command: bash scripts/run-integration-test.sh e2e datadog-logs - - - name: datadog-e2e-metrics - if: ${{ startsWith(github.event.review.body, '/ci-run-e2e-datadog-metrics') - || startsWith(github.event.review.body, '/ci-run-e2e-all') - || startsWith(github.event.review.body, '/ci-run-all') }} + - name: E2E Tests - ${{ matrix.service }} + if: ${{ startsWith(github.event.review.body, '/ci-run-e2e-all') + || startsWith(github.event.review.body, '/ci-run-all') + || startsWith(github.event.review.body, format('/ci-run-e2e-{0}', matrix.service)) }} uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2 with: timeout_minutes: 35 max_attempts: 3 - command: bash scripts/run-integration-test.sh e2e datadog-metrics - - - name: e2e-opentelemetry-logs - if: ${{ startsWith(github.event.review.body, '/ci-run-e2e-opentelemetry-logs') - || startsWith(github.event.review.body, '/ci-run-e2e-all') - || startsWith(github.event.review.body, '/ci-run-all') }} - run: bash scripts/run-integration-test.sh e2e opentelemetry-logs - - - name: e2e-opentelemetry-metrics - if: ${{ startsWith(github.event.review.body, '/ci-run-e2e-opentelemetry-metrics') - || startsWith(github.event.review.body, '/ci-run-e2e-all') - || startsWith(github.event.review.body, '/ci-run-all') }} - run: bash scripts/run-integration-test.sh e2e opentelemetry-metrics + command: bash scripts/run-integration-test.sh e2e ${{ matrix.service }} update-pr-status: From e486428e05061d4810ea943250afbe5167e08d97 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 30 Oct 2025 13:22:52 -0400 Subject: [PATCH 023/227] fix(dev): fix journald tests for local macOS (#24114) --- src/sources/journald.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/sources/journald.rs b/src/sources/journald.rs index 04a0c5069d945..f5e7169e93cbb 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -1215,7 +1215,8 @@ mod tests { let source = config.build(cx).await.unwrap(); tokio::spawn(async move { source.await.unwrap() }); - sleep(Duration::from_millis(100)).await; + // Hack: Sleep to ensure journalctl process starts and emits events before shutdown. + sleep(Duration::from_secs(1)).await; shutdown .shutdown_all(Some(Instant::now() + Duration::from_secs(1))) .await; @@ -1407,7 +1408,8 @@ mod tests { // Make sure the checkpointer cursor is empty assert_eq!(checkpointer.get().await.unwrap(), None); - tokio::time::sleep(Duration::from_millis(100)).await; + // Hack: Sleep to ensure journalctl process starts and emits events. + sleep(Duration::from_secs(1)).await; // Acknowledge all the received events. let mut count = 0; @@ -1419,7 +1421,7 @@ mod tests { } assert_eq!(count, 8); - tokio::time::sleep(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; assert_eq!(checkpointer.get().await.unwrap().as_deref(), Some("8")); } From 42f71067ca5a9c9f989578bbce90ab84b503ecaf Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 30 Oct 2025 13:49:31 -0400 Subject: [PATCH 024/227] fix(dev): aws-kinesis-firehose tests (#24117) --- src/sources/aws_kinesis_firehose/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sources/aws_kinesis_firehose/mod.rs b/src/sources/aws_kinesis_firehose/mod.rs index 171484a25766f..158ba14cd8b08 100644 --- a/src/sources/aws_kinesis_firehose/mod.rs +++ b/src/sources/aws_kinesis_firehose/mod.rs @@ -430,7 +430,7 @@ mod tests { let handle = tokio::spawn(async move { send(address, timestamp, records, key, gzip, record_compression).await }); - sleep(Duration::from_millis(100)).await; + sleep(Duration::from_millis(500)).await; handle } From 1c16e266fd571e8ca3e22a6b93b40e97baf2f1e8 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 14:51:04 -0400 Subject: [PATCH 025/227] wip --- src/sinks/util/buffer/metrics/normalize.rs | 238 ++++++++++++++++++++- 1 file changed, 237 insertions(+), 1 deletion(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index ba3989516dde0..597714eb56b75 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -106,7 +106,7 @@ impl NormalizerSettings for DefaultNormalizerSettings { /// Normalizes metrics according to a set of rules. /// /// Depending on the system in which they are being sent to, metrics may have to be modified in order to fit the data -/// model or constraints placed on that system. Typically, this boils down to whether or not the system can accept +/// model or constraints placed on that system. Typically, this boils down to whether or not the system can accept /// absolute metrics or incremental metrics: the latest value of a metric, or the delta between the last time the /// metric was observed and now, respective. Other rules may need to be applied, such as dropping metrics of a specific /// type that the system does not support. @@ -719,3 +719,239 @@ impl Default for MetricSet { Self::new(MetricSetSettings::default()) } } + +#[cfg(test)] +mod tests { + use vector_lib::{ + event::{ + Metric, MetricKind, MetricValue + }, + }; + use super::*; + use similar_asserts::assert_eq; + + // Helper function to create a metric with a unique name and value + fn create_test_metric(name: &str, kind: MetricKind, value: MetricValue) -> Metric { + Metric::new( + name, + kind, + value, + ) + } + + #[test] + fn test_metric_set_max_events_limit() { + // Create a MetricSet with a max events limit of 5 + let settings = MetricSetSettings { + max_events: Some(5), + max_bytes: None, + time_to_live: None, + }; + let mut metric_set = MetricSet::new(settings); + + // Push 10 distinct metrics (0-9) + for i in 0..10 { + let metric = create_test_metric( + &format!("test-metric-{}", i), + MetricKind::Incremental, + MetricValue::Counter { + value: i as f64, + },); + metric_set.insert_update(metric); + } + + // Verify we have only 5 metrics in the cache + assert_eq!(metric_set.len(), 5); + + // Verify eviction count is 5 + assert_eq!(metric_set.get_and_reset_eviction_count(), 5); + + // Convert to vec and verify we have 5 metrics + let metrics = metric_set.into_metrics(); + assert_eq!(metrics.len(), 5); + + // Print the metrics for debugging + println!("Metrics after LRU eviction:"); + for (i, metric) in metrics.iter().enumerate() { + println!(" {}: name={} value={:?}", i, metric.name(), metric.value()); + } + + // Collect the metric names - these should be test-metric-5 through test-metric-9 + // since those are the most recently added metrics that should be retained by the LRU cache + let mut metric_names = Vec::new(); + for metric in &metrics { + metric_names.push(metric.name().to_string()); + } + + // Check that we have the expected metric names (the 5 most recently added) + for i in 5..10 { + let expected_name = format!("test-metric-{}", i); + assert!( + metric_names.contains(&expected_name), + "Expected to find metric named {} in result set", expected_name + ); + } + } + + #[test] + fn test_metric_set_max_bytes_limit() { + // For simplicity, we'll use a small max bytes (enough for ~3 metrics) + // The exact byte count will depend on implementation details + let max_bytes = 1000; // Small value for testing + + let settings = MetricSetSettings { + max_events: None, + max_bytes: Some(max_bytes), + time_to_live: None, + }; + let mut metric_set = MetricSet::new(settings); + + // Insert metrics until we exceed the max_bytes limit + for i in 0..10 { + let metric = create_test_metric( + &format!("test-metric-{}", i), + MetricKind::Absolute, + MetricValue::Counter { + value: i as f64, + }, + ); + metric_set.insert_update(metric); + } + + // Verify memory usage is less than or equal to max_bytes + let memory_usage = metric_set.weighted_size(); + assert!(memory_usage <= max_bytes as u64, + "Memory usage {} exceeds max_bytes {}", memory_usage, max_bytes); + + // Verify eviction count is positive (exact value depends on implementation) + let eviction_count = metric_set.get_and_reset_eviction_count(); + assert!(eviction_count > 0, "Expected some evictions due to memory limits"); + + // Convert to vec and verify the metrics + let metrics = metric_set.into_metrics(); + + // Print the metrics for debugging + println!("Metrics after memory-based eviction:"); + for (i, metric) in metrics.iter().enumerate() { + println!(" {}: name={} value={:?}", i, metric.name(), metric.value()); + } + + // The size of metrics should be less than 10 due to eviction + assert!(metrics.len() < 10 && metrics.len() > 0, + "Expected some metrics to be evicted, got {} metrics", metrics.len()); + + // Check for some of the most recently added metrics (they should be retained by LRU eviction) + // We can't check for exact indices since memory usage varies, but at least the most recent + // metrics should be present + let metric_names: Vec = metrics.iter() + .map(|m| m.name().to_string()) + .collect(); + + // Check that at least metric-8 and metric-9 are present (the most recently added) + let has_recent = metric_names.contains(&"test-metric-9".to_string()) || + metric_names.contains(&"test-metric-8".to_string()); + + assert!(has_recent, "Expected at least one of the most recent metrics to be retained"); + } + // + // #[test] + // fn test_incremental_to_absolute_conversion() { + // let mut metric_set = MetricSet::default(); + // + // // Create a series of incremental counter metrics with the same series + // let tags = Some(HashMap::from([("host".to_string(), "test-host".to_string())])); + // + // // Process a sequence of incremental metrics + // let incremental1 = create_test_metric("test-metric", MetricKind::Incremental, 1.0, tags.clone()); + // let absolute1 = metric_set.make_absolute(incremental1.clone()).unwrap(); + // + // // First metric should be converted to absolute with the same value + // assert_eq!(absolute1.kind(), MetricKind::Absolute); + // match absolute1.value() { + // MetricValue::Counter { value } => assert_eq!(*value, 1.0), + // _ => panic!("Expected counter metric"), + // } + // + // // Send a second incremental metric + // let incremental2 = create_test_metric("test-metric", MetricKind::Incremental, 2.0, tags.clone()); + // let absolute2 = metric_set.make_absolute(incremental2.clone()).unwrap(); + // + // // Second metric should be converted to absolute with accumulated value (1.0 + 2.0 = 3.0) + // assert_eq!(absolute2.kind(), MetricKind::Absolute); + // match absolute2.value() { + // MetricValue::Counter { value } => assert_eq!(*value, 3.0), + // _ => panic!("Expected counter metric"), + // } + // + // // Verify gauges are handled correctly + // let gauge_metric = create_gauge_metric("test-gauge", MetricKind::Incremental, 5.0, tags.clone()); + // + // // Process the gauge metric + // let gauge_absolute = metric_set.make_absolute(gauge_metric.clone()).unwrap(); + // + // // Gauge should be converted to absolute with the same value + // assert_eq!(gauge_absolute.kind(), MetricKind::Absolute); + // match gauge_absolute.value() { + // MetricValue::Gauge { value } => assert_eq!(*value, 5.0), + // _ => panic!("Expected gauge metric"), + // } + // } + // + // #[test] + // fn test_absolute_to_incremental_conversion() { + // let mut metric_set = MetricSet::default(); + // + // // Create a series of absolute counter metrics with the same series + // let tags = Some(HashMap::from([("host".to_string(), "test-host".to_string())])); + // + // // Process a sequence of absolute metrics + // let absolute1 = create_test_metric("test-metric", MetricKind::Absolute, 10.0, tags.clone()); + // + // // First metric should be stored but not emitted (returns None) + // let incremental1 = metric_set.make_incremental(absolute1.clone()); + // assert!(incremental1.is_none(), "First absolute metric should not produce an incremental output"); + // + // // Send a second absolute metric with a higher value + // let absolute2 = create_test_metric("test-metric", MetricKind::Absolute, 15.0, tags.clone()); + // let incremental2 = metric_set.make_incremental(absolute2.clone()).unwrap(); + // + // // Second metric should be converted to incremental with the delta (15.0 - 10.0 = 5.0) + // assert_eq!(incremental2.kind(), MetricKind::Incremental); + // match incremental2.value() { + // MetricValue::Counter { value } => assert_eq!(*value, 5.0), + // _ => panic!("Expected counter metric"), + // } + // + // // Send a third absolute metric with a lower value (simulating counter reset) + // let absolute3 = create_test_metric("test-metric", MetricKind::Absolute, 3.0, tags.clone()); + // let incremental3 = metric_set.make_incremental(absolute3.clone()).unwrap(); + // + // // Third metric should produce an incremental metric with the new value + // assert_eq!(incremental3.kind(), MetricKind::Incremental); + // match incremental3.value() { + // MetricValue::Counter { value } => assert_eq!(*value, 3.0), + // _ => panic!("Expected counter metric with reset value"), + // } + // + // // Verify gauges are handled correctly + // let gauge_metric = create_gauge_metric("test-gauge", MetricKind::Absolute, 5.0, tags.clone()); + // + // // Process the gauge metric + // // First gauge should be stored but not emitted + // let gauge_incremental1 = metric_set.make_incremental(gauge_metric.clone()); + // assert!(gauge_incremental1.is_none(), "First gauge metric should not produce an incremental output"); + // + // // Send a second gauge metric + // let gauge_metric2 = create_gauge_metric("test-gauge", MetricKind::Absolute, 8.0, tags.clone()); + // + // // Process the second gauge metric + // let gauge_incremental2 = metric_set.make_incremental(gauge_metric2.clone()).unwrap(); + // + // // Gauge should be converted to incremental with the delta (8.0 - 5.0 = 3.0) + // assert_eq!(gauge_incremental2.kind(), MetricKind::Incremental); + // match gauge_incremental2.value() { + // MetricValue::Gauge { value } => assert_eq!(*value, 3.0), + // _ => panic!("Expected gauge metric"), + // } + // } +} \ No newline at end of file From 9daa02e9f370a0595591c04a9127f93f83afe4c4 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 14:53:59 -0400 Subject: [PATCH 026/227] add test --- src/sinks/util/buffer/metrics/normalize.rs | 94 ++++++++++++---------- 1 file changed, 51 insertions(+), 43 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 597714eb56b75..47ec35b576eee 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -853,49 +853,57 @@ mod tests { assert!(has_recent, "Expected at least one of the most recent metrics to be retained"); } - // - // #[test] - // fn test_incremental_to_absolute_conversion() { - // let mut metric_set = MetricSet::default(); - // - // // Create a series of incremental counter metrics with the same series - // let tags = Some(HashMap::from([("host".to_string(), "test-host".to_string())])); - // - // // Process a sequence of incremental metrics - // let incremental1 = create_test_metric("test-metric", MetricKind::Incremental, 1.0, tags.clone()); - // let absolute1 = metric_set.make_absolute(incremental1.clone()).unwrap(); - // - // // First metric should be converted to absolute with the same value - // assert_eq!(absolute1.kind(), MetricKind::Absolute); - // match absolute1.value() { - // MetricValue::Counter { value } => assert_eq!(*value, 1.0), - // _ => panic!("Expected counter metric"), - // } - // - // // Send a second incremental metric - // let incremental2 = create_test_metric("test-metric", MetricKind::Incremental, 2.0, tags.clone()); - // let absolute2 = metric_set.make_absolute(incremental2.clone()).unwrap(); - // - // // Second metric should be converted to absolute with accumulated value (1.0 + 2.0 = 3.0) - // assert_eq!(absolute2.kind(), MetricKind::Absolute); - // match absolute2.value() { - // MetricValue::Counter { value } => assert_eq!(*value, 3.0), - // _ => panic!("Expected counter metric"), - // } - // - // // Verify gauges are handled correctly - // let gauge_metric = create_gauge_metric("test-gauge", MetricKind::Incremental, 5.0, tags.clone()); - // - // // Process the gauge metric - // let gauge_absolute = metric_set.make_absolute(gauge_metric.clone()).unwrap(); - // - // // Gauge should be converted to absolute with the same value - // assert_eq!(gauge_absolute.kind(), MetricKind::Absolute); - // match gauge_absolute.value() { - // MetricValue::Gauge { value } => assert_eq!(*value, 5.0), - // _ => panic!("Expected gauge metric"), - // } - // } + #[test] + fn test_incremental_to_absolute_conversion() { + let mut metric_set = MetricSet::default(); + + // Process a sequence of incremental counter metrics + let incremental1 = create_test_metric( + "test-metric", + MetricKind::Incremental, + MetricValue::Counter { value: 1.0 }, + ); + let absolute1 = metric_set.make_absolute(incremental1.clone()).unwrap(); + + // First metric should be converted to absolute with the same value + assert_eq!(absolute1.kind(), MetricKind::Absolute); + match absolute1.value() { + MetricValue::Counter { value } => assert_eq!(*value, 1.0), + _ => panic!("Expected counter metric"), + } + + // Send a second incremental metric + let incremental2 = create_test_metric( + "test-metric", + MetricKind::Incremental, + MetricValue::Counter { value: 2.0 }, + ); + let absolute2 = metric_set.make_absolute(incremental2.clone()).unwrap(); + + // Second metric should be converted to absolute with accumulated value (1.0 + 2.0 = 3.0) + assert_eq!(absolute2.kind(), MetricKind::Absolute); + match absolute2.value() { + MetricValue::Counter { value } => assert_eq!(*value, 3.0), + _ => panic!("Expected counter metric"), + } + + // Verify gauges are handled correctly + let gauge_metric = create_test_metric( + "test-gauge", + MetricKind::Incremental, + MetricValue::Gauge { value: 5.0 }, + ); + + // Process the gauge metric + let gauge_absolute = metric_set.make_absolute(gauge_metric.clone()).unwrap(); + + // Gauge should be converted to absolute with the same value + assert_eq!(gauge_absolute.kind(), MetricKind::Absolute); + match gauge_absolute.value() { + MetricValue::Gauge { value } => assert_eq!(*value, 5.0), + _ => panic!("Expected gauge metric"), + } + } // // #[test] // fn test_absolute_to_incremental_conversion() { From 086d8f3c928167e5290647a205132e3466549412 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 30 Oct 2025 15:05:57 -0400 Subject: [PATCH 027/227] chore(dev): add always build option to scripts/run-integration-test.sh (#24120) --- scripts/run-integration-test.sh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/scripts/run-integration-test.sh b/scripts/run-integration-test.sh index 689249800be89..f7ea444478c80 100755 --- a/scripts/run-integration-test.sh +++ b/scripts/run-integration-test.sh @@ -32,6 +32,7 @@ Options: -v Increase verbosity; repeat for more (e.g. -vv or -vvv) -e One or more environments to run (repeatable or comma-separated). If provided, these are used as TEST_ENVIRONMENTS instead of auto-discovery. + -b Always build images (disables --reuse-image which is enabled by default) Notes: - All existing two-argument invocations remain compatible: @@ -43,7 +44,8 @@ USAGE # Parse options # Note: options must come before positional args (standard getopts behavior) TEST_ENV="" -while getopts ":hr:v:e:" opt; do +REUSE_IMAGE="--reuse-image" +while getopts ":hr:v:e:b" opt; do case "$opt" in h) usage @@ -62,6 +64,9 @@ while getopts ":hr:v:e:" opt; do e) TEST_ENV="$OPTARG" ;; + b) + REUSE_IMAGE="" + ;; \?) echo "ERROR: unknown option: -$OPTARG" >&2 usage @@ -127,12 +132,12 @@ for TEST_ENV in "${TEST_ENVIRONMENTS[@]}"; do docker run --rm -v vector_target:/output/"${TEST_NAME}" alpine:3.20 \ sh -c "rm -rf /output/${TEST_NAME}/*" - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" start --build-all --reuse-image "${TEST_NAME}" "${TEST_ENV}" + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" start --build-all ${REUSE_IMAGE} "${TEST_NAME}" "${TEST_ENV}" START_RET=$? print_compose_logs_on_failure "$START_RET" if [[ "$START_RET" -eq 0 ]]; then - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" test --retries "$RETRIES" --build-all --reuse-image "${TEST_NAME}" "${TEST_ENV}" + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" test --retries "$RETRIES" --build-all ${REUSE_IMAGE} "${TEST_NAME}" "${TEST_ENV}" RET=$? print_compose_logs_on_failure "$RET" @@ -144,7 +149,7 @@ for TEST_ENV in "${TEST_ENVIRONMENTS[@]}"; do fi # Always stop the environment (best effort cleanup) - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" stop --build-all --reuse-image "${TEST_NAME}" || true + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" stop --build-all ${REUSE_IMAGE} "${TEST_NAME}" || true # Exit early on first failure if [[ "$RET" -ne 0 ]]; then From 0a179e112b1ef65dc9c7297e9329a7bb80d34d89 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 15:19:23 -0400 Subject: [PATCH 028/227] wip --- src/sinks/util/buffer/metrics/normalize.rs | 205 +++++++++++---------- 1 file changed, 112 insertions(+), 93 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 47ec35b576eee..7d37aae4c3eb4 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -770,12 +770,6 @@ mod tests { let metrics = metric_set.into_metrics(); assert_eq!(metrics.len(), 5); - // Print the metrics for debugging - println!("Metrics after LRU eviction:"); - for (i, metric) in metrics.iter().enumerate() { - println!(" {}: name={} value={:?}", i, metric.name(), metric.value()); - } - // Collect the metric names - these should be test-metric-5 through test-metric-9 // since those are the most recently added metrics that should be retained by the LRU cache let mut metric_names = Vec::new(); @@ -795,9 +789,21 @@ mod tests { #[test] fn test_metric_set_max_bytes_limit() { - // For simplicity, we'll use a small max bytes (enough for ~3 metrics) - // The exact byte count will depend on implementation details - let max_bytes = 1000; // Small value for testing + use std::mem::size_of; + + // First, let's calculate the size of a single test metric to set our max_bytes appropriately + let test_metric = create_test_metric( + "test-size-check", + MetricKind::Absolute, + MetricValue::Counter { value: 1.0 }, + ); + + // Get the raw metric size + let (series, entry) = MetricEntry::from_metric(test_metric, None); + let metric_size = entry.allocated_bytes() + series.allocated_bytes() + size_of::() + size_of::(); + + // Set max_bytes to allow exactly 3 metrics + let max_bytes = 3 * metric_size; let settings = MetricSetSettings { max_events: None, @@ -806,8 +812,8 @@ mod tests { }; let mut metric_set = MetricSet::new(settings); - // Insert metrics until we exceed the max_bytes limit - for i in 0..10 { + // Insert 5 metrics (should cause 2 to be evicted due to memory limits) + for i in 0..5 { let metric = create_test_metric( &format!("test-metric-{}", i), MetricKind::Absolute, @@ -823,37 +829,33 @@ mod tests { assert!(memory_usage <= max_bytes as u64, "Memory usage {} exceeds max_bytes {}", memory_usage, max_bytes); - // Verify eviction count is positive (exact value depends on implementation) + // Verify exactly 2 evictions occurred (we added 5 metrics with capacity for 3) let eviction_count = metric_set.get_and_reset_eviction_count(); - assert!(eviction_count > 0, "Expected some evictions due to memory limits"); + assert_eq!(eviction_count, 2, "Expected exactly 2 evictions due to memory limits"); + + // Verify we have exactly 3 metrics remaining + assert_eq!(metric_set.len(), 3, "Expected exactly 3 metrics in the set"); // Convert to vec and verify the metrics let metrics = metric_set.into_metrics(); + assert_eq!(metrics.len(), 3, "Expected exactly 3 metrics after conversion"); - // Print the metrics for debugging - println!("Metrics after memory-based eviction:"); - for (i, metric) in metrics.iter().enumerate() { - println!(" {}: name={} value={:?}", i, metric.name(), metric.value()); - } - - // The size of metrics should be less than 10 due to eviction - assert!(metrics.len() < 10 && metrics.len() > 0, - "Expected some metrics to be evicted, got {} metrics", metrics.len()); - - // Check for some of the most recently added metrics (they should be retained by LRU eviction) - // We can't check for exact indices since memory usage varies, but at least the most recent - // metrics should be present + // Collect the metric names and verify the most recent 3 metrics are present (2-4) let metric_names: Vec = metrics.iter() .map(|m| m.name().to_string()) .collect(); - - // Check that at least metric-8 and metric-9 are present (the most recently added) - let has_recent = metric_names.contains(&"test-metric-9".to_string()) || - metric_names.contains(&"test-metric-8".to_string()); - - assert!(has_recent, "Expected at least one of the most recent metrics to be retained"); - } - #[test] + + // Check that metrics 2, 3 and 4 are present (the 3 most recently added) + for i in 2..5 { + let expected_name = format!("test-metric-{}", i); + assert!( + metric_names.contains(&expected_name), + "Expected to find metric named {} in result set", expected_name + ); + } + } + + #[test] fn test_incremental_to_absolute_conversion() { let mut metric_set = MetricSet::default(); @@ -890,7 +892,7 @@ mod tests { // Verify gauges are handled correctly let gauge_metric = create_test_metric( "test-gauge", - MetricKind::Incremental, + MetricKind::Absolute, MetricValue::Gauge { value: 5.0 }, ); @@ -904,62 +906,79 @@ mod tests { _ => panic!("Expected gauge metric"), } } - // - // #[test] - // fn test_absolute_to_incremental_conversion() { - // let mut metric_set = MetricSet::default(); - // - // // Create a series of absolute counter metrics with the same series - // let tags = Some(HashMap::from([("host".to_string(), "test-host".to_string())])); - // - // // Process a sequence of absolute metrics - // let absolute1 = create_test_metric("test-metric", MetricKind::Absolute, 10.0, tags.clone()); - // - // // First metric should be stored but not emitted (returns None) - // let incremental1 = metric_set.make_incremental(absolute1.clone()); - // assert!(incremental1.is_none(), "First absolute metric should not produce an incremental output"); - // - // // Send a second absolute metric with a higher value - // let absolute2 = create_test_metric("test-metric", MetricKind::Absolute, 15.0, tags.clone()); - // let incremental2 = metric_set.make_incremental(absolute2.clone()).unwrap(); - // - // // Second metric should be converted to incremental with the delta (15.0 - 10.0 = 5.0) - // assert_eq!(incremental2.kind(), MetricKind::Incremental); - // match incremental2.value() { - // MetricValue::Counter { value } => assert_eq!(*value, 5.0), - // _ => panic!("Expected counter metric"), - // } - // - // // Send a third absolute metric with a lower value (simulating counter reset) - // let absolute3 = create_test_metric("test-metric", MetricKind::Absolute, 3.0, tags.clone()); - // let incremental3 = metric_set.make_incremental(absolute3.clone()).unwrap(); - // - // // Third metric should produce an incremental metric with the new value - // assert_eq!(incremental3.kind(), MetricKind::Incremental); - // match incremental3.value() { - // MetricValue::Counter { value } => assert_eq!(*value, 3.0), - // _ => panic!("Expected counter metric with reset value"), - // } - // - // // Verify gauges are handled correctly - // let gauge_metric = create_gauge_metric("test-gauge", MetricKind::Absolute, 5.0, tags.clone()); - // - // // Process the gauge metric - // // First gauge should be stored but not emitted - // let gauge_incremental1 = metric_set.make_incremental(gauge_metric.clone()); - // assert!(gauge_incremental1.is_none(), "First gauge metric should not produce an incremental output"); - // - // // Send a second gauge metric - // let gauge_metric2 = create_gauge_metric("test-gauge", MetricKind::Absolute, 8.0, tags.clone()); - // - // // Process the second gauge metric - // let gauge_incremental2 = metric_set.make_incremental(gauge_metric2.clone()).unwrap(); - // - // // Gauge should be converted to incremental with the delta (8.0 - 5.0 = 3.0) - // assert_eq!(gauge_incremental2.kind(), MetricKind::Incremental); - // match gauge_incremental2.value() { - // MetricValue::Gauge { value } => assert_eq!(*value, 3.0), - // _ => panic!("Expected gauge metric"), - // } - // } + + #[test] + fn test_absolute_to_incremental_conversion() { + let mut metric_set = MetricSet::default(); + + // Process a sequence of absolute counter metrics + let absolute1 = create_test_metric( + "test-metric", + MetricKind::Absolute, + MetricValue::Counter { value: 10.0 }, + ); + + // First metric should be stored but not emitted (returns None) + let incremental1 = metric_set.make_incremental(absolute1.clone()); + assert!(incremental1.is_none(), "First absolute metric should not produce an incremental output"); + + // Send a second absolute metric with a higher value + let absolute2 = create_test_metric( + "test-metric", + MetricKind::Absolute, + MetricValue::Counter { value: 15.0 }, + ); + let incremental2 = metric_set.make_incremental(absolute2.clone()).unwrap(); + + // Second metric should be converted to incremental with the delta (15.0 - 10.0 = 5.0) + assert_eq!(incremental2.kind(), MetricKind::Incremental); + match incremental2.value() { + MetricValue::Counter { value } => assert_eq!(*value, 5.0), + _ => panic!("Expected counter metric"), + } + + // Send a third absolute metric with a lower value (simulating counter reset) + let absolute3 = create_test_metric( + "test-metric", + MetricKind::Absolute, + MetricValue::Counter { value: 3.0 }, + ); + let incremental3 = metric_set.make_incremental(absolute3.clone()).unwrap(); + + // Third metric should be converted to incremental with the new value (3.0) + // The code treats this as a counter reset + assert_eq!(incremental3.kind(), MetricKind::Incremental); + match incremental3.value() { + MetricValue::Counter { value } => assert_eq!(*value, 3.0), + _ => panic!("Expected counter metric with reset value"), + } + + // Verify gauges are handled correctly + let gauge_metric = create_test_metric( + "test-gauge", + MetricKind::Absolute, + MetricValue::Gauge { value: 5.0 }, + ); + + // Process the gauge metric - first gauge should be stored but not emitted + let gauge_incremental1 = metric_set.make_incremental(gauge_metric.clone()); + assert!(gauge_incremental1.is_none(), "First gauge metric should not produce an incremental output"); + + // Send a second gauge metric + let gauge_metric2 = create_test_metric( + "test-gauge", + MetricKind::Absolute, + MetricValue::Gauge { value: 8.0 }, + ); + + // Process the second gauge metric + let gauge_incremental2 = metric_set.make_incremental(gauge_metric2.clone()).unwrap(); + + // Gauge should be converted to incremental with the delta (8.0 - 5.0 = 3.0) + assert_eq!(gauge_incremental2.kind(), MetricKind::Incremental); + match gauge_incremental2.value() { + MetricValue::Gauge { value } => assert_eq!(*value, 3.0), + _ => panic!("Expected gauge metric"), + } + } } \ No newline at end of file From cd7409d34bebcdbe03754399b4af79b7e16388ce Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 30 Oct 2025 16:16:40 -0400 Subject: [PATCH 029/227] add tests --- src/sinks/util/buffer/metrics/normalize.rs | 172 ++++++--------------- 1 file changed, 44 insertions(+), 128 deletions(-) diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index 7d37aae4c3eb4..a368bc48fbc16 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -722,21 +722,13 @@ impl Default for MetricSet { #[cfg(test)] mod tests { - use vector_lib::{ - event::{ - Metric, MetricKind, MetricValue - }, - }; use super::*; use similar_asserts::assert_eq; + use vector_lib::event::{Metric, MetricKind, MetricValue}; // Helper function to create a metric with a unique name and value fn create_test_metric(name: &str, kind: MetricKind, value: MetricValue) -> Metric { - Metric::new( - name, - kind, - value, - ) + Metric::new(name, kind, value) } #[test] @@ -748,109 +740,41 @@ mod tests { time_to_live: None, }; let mut metric_set = MetricSet::new(settings); - + // Push 10 distinct metrics (0-9) for i in 0..10 { let metric = create_test_metric( &format!("test-metric-{}", i), MetricKind::Incremental, - MetricValue::Counter { - value: i as f64, - },); + MetricValue::Counter { value: i as f64 }, + ); metric_set.insert_update(metric); } - + // Verify we have only 5 metrics in the cache assert_eq!(metric_set.len(), 5); - + // Verify eviction count is 5 assert_eq!(metric_set.get_and_reset_eviction_count(), 5); - + // Convert to vec and verify we have 5 metrics let metrics = metric_set.into_metrics(); assert_eq!(metrics.len(), 5); - + // Collect the metric names - these should be test-metric-5 through test-metric-9 // since those are the most recently added metrics that should be retained by the LRU cache let mut metric_names = Vec::new(); for metric in &metrics { metric_names.push(metric.name().to_string()); } - + // Check that we have the expected metric names (the 5 most recently added) for i in 5..10 { let expected_name = format!("test-metric-{}", i); assert!( metric_names.contains(&expected_name), - "Expected to find metric named {} in result set", expected_name - ); - } - } - - #[test] - fn test_metric_set_max_bytes_limit() { - use std::mem::size_of; - - // First, let's calculate the size of a single test metric to set our max_bytes appropriately - let test_metric = create_test_metric( - "test-size-check", - MetricKind::Absolute, - MetricValue::Counter { value: 1.0 }, - ); - - // Get the raw metric size - let (series, entry) = MetricEntry::from_metric(test_metric, None); - let metric_size = entry.allocated_bytes() + series.allocated_bytes() + size_of::() + size_of::(); - - // Set max_bytes to allow exactly 3 metrics - let max_bytes = 3 * metric_size; - - let settings = MetricSetSettings { - max_events: None, - max_bytes: Some(max_bytes), - time_to_live: None, - }; - let mut metric_set = MetricSet::new(settings); - - // Insert 5 metrics (should cause 2 to be evicted due to memory limits) - for i in 0..5 { - let metric = create_test_metric( - &format!("test-metric-{}", i), - MetricKind::Absolute, - MetricValue::Counter { - value: i as f64, - }, - ); - metric_set.insert_update(metric); - } - - // Verify memory usage is less than or equal to max_bytes - let memory_usage = metric_set.weighted_size(); - assert!(memory_usage <= max_bytes as u64, - "Memory usage {} exceeds max_bytes {}", memory_usage, max_bytes); - - // Verify exactly 2 evictions occurred (we added 5 metrics with capacity for 3) - let eviction_count = metric_set.get_and_reset_eviction_count(); - assert_eq!(eviction_count, 2, "Expected exactly 2 evictions due to memory limits"); - - // Verify we have exactly 3 metrics remaining - assert_eq!(metric_set.len(), 3, "Expected exactly 3 metrics in the set"); - - // Convert to vec and verify the metrics - let metrics = metric_set.into_metrics(); - assert_eq!(metrics.len(), 3, "Expected exactly 3 metrics after conversion"); - - // Collect the metric names and verify the most recent 3 metrics are present (2-4) - let metric_names: Vec = metrics.iter() - .map(|m| m.name().to_string()) - .collect(); - - // Check that metrics 2, 3 and 4 are present (the 3 most recently added) - for i in 2..5 { - let expected_name = format!("test-metric-{}", i); - assert!( - metric_names.contains(&expected_name), - "Expected to find metric named {} in result set", expected_name + "Expected to find metric named {} in result set", + expected_name ); } } @@ -889,19 +813,19 @@ mod tests { _ => panic!("Expected counter metric"), } - // Verify gauges are handled correctly - let gauge_metric = create_test_metric( - "test-gauge", + // Verify absolute metrics are handled correctly + let absolute_test = create_test_metric( + "test-gauge", MetricKind::Absolute, MetricValue::Gauge { value: 5.0 }, ); - // Process the gauge metric - let gauge_absolute = metric_set.make_absolute(gauge_metric.clone()).unwrap(); + // Process the absolute metric + let absolute_test = metric_set.make_absolute(absolute_test.clone()).unwrap(); - // Gauge should be converted to absolute with the same value - assert_eq!(gauge_absolute.kind(), MetricKind::Absolute); - match gauge_absolute.value() { + // Absolute metrics should be returned unchanged + assert_eq!(absolute_test.kind(), MetricKind::Absolute); + match absolute_test.value() { MetricValue::Gauge { value } => assert_eq!(*value, 5.0), _ => panic!("Expected gauge metric"), } @@ -920,7 +844,10 @@ mod tests { // First metric should be stored but not emitted (returns None) let incremental1 = metric_set.make_incremental(absolute1.clone()); - assert!(incremental1.is_none(), "First absolute metric should not produce an incremental output"); + assert!( + incremental1.is_none(), + "First absolute metric should not produce an incremental output" + ); // Send a second absolute metric with a higher value let absolute2 = create_test_metric( @@ -943,42 +870,31 @@ mod tests { MetricKind::Absolute, MetricValue::Counter { value: 3.0 }, ); - let incremental3 = metric_set.make_incremental(absolute3.clone()).unwrap(); - - // Third metric should be converted to incremental with the new value (3.0) - // The code treats this as a counter reset - assert_eq!(incremental3.kind(), MetricKind::Incremental); - match incremental3.value() { - MetricValue::Counter { value } => assert_eq!(*value, 3.0), - _ => panic!("Expected counter metric with reset value"), - } - // Verify gauges are handled correctly - let gauge_metric = create_test_metric( - "test-gauge", - MetricKind::Absolute, - MetricValue::Gauge { value: 5.0 }, + // For counter resets, we expect None to be returned + let incremental3 = metric_set.make_incremental(absolute3.clone()); + assert!( + incremental3.is_none(), + "Expected None when counter resets to a lower value" ); - // Process the gauge metric - first gauge should be stored but not emitted - let gauge_incremental1 = metric_set.make_incremental(gauge_metric.clone()); - assert!(gauge_incremental1.is_none(), "First gauge metric should not produce an incremental output"); - - // Send a second gauge metric - let gauge_metric2 = create_test_metric( - "test-gauge", - MetricKind::Absolute, - MetricValue::Gauge { value: 8.0 }, + // Verify incremental metrics are handled correctly + let incremental_test = create_test_metric( + "test-counter", + MetricKind::Incremental, + MetricValue::Counter { value: 5.0 }, ); - // Process the second gauge metric - let gauge_incremental2 = metric_set.make_incremental(gauge_metric2.clone()).unwrap(); + // Process the incremental metric + let incremental_result = metric_set + .make_incremental(incremental_test.clone()) + .unwrap(); - // Gauge should be converted to incremental with the delta (8.0 - 5.0 = 3.0) - assert_eq!(gauge_incremental2.kind(), MetricKind::Incremental); - match gauge_incremental2.value() { - MetricValue::Gauge { value } => assert_eq!(*value, 3.0), - _ => panic!("Expected gauge metric"), + // Incremental metrics should be returned unchanged + assert_eq!(incremental_result.kind(), MetricKind::Incremental); + match incremental_result.value() { + MetricValue::Counter { value } => assert_eq!(*value, 5.0), + _ => panic!("Expected counter metric"), } } -} \ No newline at end of file +} From d6421e32e38ed924893cc94ec3959c09fde16c33 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 31 Oct 2025 11:26:35 -0400 Subject: [PATCH 030/227] chore(deps): Update `dd-rust-license-tool` to v1.0.4 (#24122) --- LICENSE-3rdparty.csv | 110 +++++++++++++++++++++++++++++++++ aqua/aqua.yaml | 2 +- scripts/environment/prepare.sh | 2 +- 3 files changed, 112 insertions(+), 2 deletions(-) diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index c6050f1474c11..6967fbd458500 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -10,8 +10,12 @@ aes-siv,https://github.com/RustCrypto/AEADs,Apache-2.0 OR MIT,RustCrypto Develop ahash,https://github.com/tkaitchuck/ahash,MIT OR Apache-2.0,Tom Kaitchuck aho-corasick,https://github.com/BurntSushi/aho-corasick,Unlicense OR MIT,Andrew Gallant alloc-no-stdlib,https://github.com/dropbox/rust-alloc-no-stdlib,BSD-3-Clause,Daniel Reiter Horn +alloc-stdlib,https://github.com/dropbox/rust-alloc-no-stdlib,BSD-3-Clause,Daniel Reiter Horn allocator-api2,https://github.com/zakarumych/allocator-api2,MIT OR Apache-2.0,Zakarum amq-protocol,https://github.com/amqp-rs/amq-protocol,BSD-2-Clause,Marc-Antoine Perennou <%arc-Antoine@Perennou.com> +amq-protocol-tcp,https://github.com/amqp-rs/amq-protocol,BSD-2-Clause,Marc-Antoine Perennou <%arc-Antoine@Perennou.com> +amq-protocol-types,https://github.com/amqp-rs/amq-protocol,BSD-2-Clause,Marc-Antoine Perennou <%arc-Antoine@Perennou.com> +amq-protocol-uri,https://github.com/amqp-rs/amq-protocol,BSD-2-Clause,Marc-Antoine Perennou <%arc-Antoine@Perennou.com> android-tzdata,https://github.com/RumovZ/android-tzdata,MIT OR Apache-2.0,RumovZ android_system_properties,https://github.com/nical/android_system_properties,MIT OR Apache-2.0,Nicolas Silva ansi_term,https://github.com/ogham/rust-ansi-term,MIT,"ogham@bsago.me, Ryan Scheel (Havvy) , Josh Triplett " @@ -25,6 +29,7 @@ apache-avro,https://github.com/apache/avro,Apache-2.0,Apache Avro team , Manish Goregaokar , Simonas Kazlauskas , Brian L. Troutwine , Corey Farwell " arc-swap,https://github.com/vorner/arc-swap,MIT OR Apache-2.0,Michal 'vorner' Vaner arr_macro,https://github.com/JoshMcguigan/arr_macro,MIT OR Apache-2.0,Josh Mcguigan +arr_macro_impl,https://github.com/JoshMcguigan/arr_macro,MIT OR Apache-2.0,Josh Mcguigan arrayvec,https://github.com/bluss/arrayvec,MIT OR Apache-2.0,bluss ascii,https://github.com/tomprogrammer/rust-ascii,Apache-2.0 OR MIT,"Thomas Bahn , Torbjørn Birch Moltu , Simon Sapin " async-broadcast,https://github.com/smol-rs/async-broadcast,MIT OR Apache-2.0,"Stjepan Glavina , Yoshua Wuyts , Zeeshan Ali Khan " @@ -33,7 +38,12 @@ async-compression,https://github.com/Nullus157/async-compression,MIT OR Apache-2 async-executor,https://github.com/smol-rs/async-executor,Apache-2.0 OR MIT,Stjepan Glavina async-fs,https://github.com/smol-rs/async-fs,Apache-2.0 OR MIT,Stjepan Glavina async-global-executor,https://github.com/Keruspe/async-global-executor,Apache-2.0 OR MIT,Marc-Antoine Perennou +async-global-executor-trait,https://github.com/amqp-rs/executor-trait,Apache-2.0 OR MIT,Marc-Antoine Perennou async-graphql,https://github.com/async-graphql/async-graphql,MIT OR Apache-2.0,"sunli , Koxiaet" +async-graphql-derive,https://github.com/async-graphql/async-graphql,MIT OR Apache-2.0,"sunli , Koxiaet" +async-graphql-parser,https://github.com/async-graphql/async-graphql,MIT OR Apache-2.0,"sunli , Koxiaet" +async-graphql-value,https://github.com/async-graphql/async-graphql,MIT OR Apache-2.0,"sunli , Koxiaet" +async-graphql-warp,https://github.com/async-graphql/async-graphql,MIT OR Apache-2.0,"sunli , Koxiaet" async-io,https://github.com/smol-rs/async-io,Apache-2.0 OR MIT,Stjepan Glavina async-lock,https://github.com/smol-rs/async-lock,Apache-2.0 OR MIT,Stjepan Glavina async-nats,https://github.com/nats-io/nats.rs,Apache-2.0,"Tomasz Pietrek , Casper Beyer " @@ -43,6 +53,7 @@ async-reactor-trait,https://github.com/amqp-rs/reactor-trait,Apache-2.0 OR MIT,M async-recursion,https://github.com/dcchut/async-recursion,MIT OR Apache-2.0,Robert Usher <266585+dcchut@users.noreply.github.com> async-signal,https://github.com/smol-rs/async-signal,Apache-2.0 OR MIT,John Nunley async-stream,https://github.com/tokio-rs/async-stream,MIT,Carl Lerche +async-stream-impl,https://github.com/tokio-rs/async-stream,MIT,Carl Lerche async-task,https://github.com/smol-rs/async-task,Apache-2.0 OR MIT,Stjepan Glavina async-trait,https://github.com/dtolnay/async-trait,MIT OR Apache-2.0,David Tolnay atoi,https://github.com/pacman82/atoi-rs,MIT,Markus Klein @@ -105,6 +116,7 @@ block-padding,https://github.com/RustCrypto/utils,MIT OR Apache-2.0,RustCrypto D blocking,https://github.com/smol-rs/blocking,Apache-2.0 OR MIT,Stjepan Glavina bloomy,https://docs.rs/bloomy/,MIT,"Aleksandr Bezobchuk , Alexis Sellier " bollard,https://github.com/fussybeaver/bollard,Apache-2.0,Bollard contributors +bollard-stubs,https://github.com/fussybeaver/bollard,Apache-2.0,Bollard contributors borrow-or-share,https://github.com/yescallop/borrow-or-share,MIT-0,Scallop Ye brotli,https://github.com/dropbox/rust-brotli,BSD-3-Clause AND MIT,"Daniel Reiter Horn , The Brotli Authors" brotli-decompressor,https://github.com/dropbox/rust-brotli-decompressor,BSD-3-Clause OR MIT,"Daniel Reiter Horn , The Brotli Authors" @@ -112,6 +124,7 @@ bson,https://github.com/mongodb/bson-rust,MIT,"Y. T. Chung , bstr,https://github.com/BurntSushi/bstr,MIT OR Apache-2.0,Andrew Gallant bumpalo,https://github.com/fitzgen/bumpalo,MIT OR Apache-2.0,Nick Fitzgerald bytecheck,https://github.com/djkoloski/bytecheck,MIT,David Koloski +bytecheck_derive,https://github.com/djkoloski/bytecheck,MIT,David Koloski bytecount,https://github.com/llogiq/bytecount,Apache-2.0 OR MIT,"Andre Bogus , Joshua Landau " bytemuck,https://github.com/Lokathor/bytemuck,Zlib OR Apache-2.0 OR MIT,Lokathor byteorder,https://github.com/BurntSushi/byteorder,Unlicense OR MIT,Andrew Gallant @@ -131,6 +144,8 @@ charset,https://github.com/hsivonen/charset,MIT OR Apache-2.0,Henri Sivonen +ciborium-io,https://github.com/enarx/ciborium,Apache-2.0,Nathaniel McCallum +ciborium-ll,https://github.com/enarx/ciborium,Apache-2.0,Nathaniel McCallum cidr,https://github.com/stbuehler/rust-cidr,MIT,Stefan Bühler cipher,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers clap,https://github.com/clap-rs/clap,MIT OR Apache-2.0,The clap Authors @@ -145,6 +160,8 @@ colored,https://github.com/mackwic/colored,MPL-2.0,Thomas Wickham community-id,https://github.com/traceflight/rs-community-id,MIT OR Apache-2.0,Julian Wang compact_str,https://github.com/ParkMyCar/compact_str,MIT,Parker Timmerman +compression-codecs,https://github.com/Nullus157/async-compression,MIT OR Apache-2.0,"Wim Looman , Allen Bui " +compression-core,https://github.com/Nullus157/async-compression,MIT OR Apache-2.0,"Wim Looman , Allen Bui " concurrent-queue,https://github.com/smol-rs/concurrent-queue,Apache-2.0 OR MIT,"Stjepan Glavina , Taiki Endo , John Nunley " const-oid,https://github.com/RustCrypto/formats/tree/master/const-oid,Apache-2.0 OR MIT,RustCrypto Developers convert_case,https://github.com/rutrum/convert-case,MIT,David Purdum @@ -154,6 +171,7 @@ cookie-factory,https://github.com/rust-bakery/cookie-factory,MIT,"Geoffroy Coupr cookie_store,https://github.com/pfernie/cookie_store,MIT OR Apache-2.0,Patrick Fernie core-foundation,https://github.com/servo/core-foundation-rs,MIT OR Apache-2.0,The Servo Project Developers core-foundation,https://github.com/servo/core-foundation-rs,MIT OR Apache-2.0,The Servo Project Developers +core-foundation-sys,https://github.com/servo/core-foundation-rs,MIT OR Apache-2.0,The Servo Project Developers core2,https://github.com/bbqsrc/core2,Apache-2.0 OR MIT,Brendan Molloy cpufeatures,https://github.com/RustCrypto/utils,MIT OR Apache-2.0,RustCrypto Developers crc,https://github.com/mrhooray/crc-rs,MIT OR Apache-2.0,"Rui Hu , Akhil Velagapudi <4@4khil.com>" @@ -173,11 +191,14 @@ crypto-bigint,https://github.com/RustCrypto/crypto-bigint,Apache-2.0 OR MIT,Rust crypto-common,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers crypto_secretbox,https://github.com/RustCrypto/nacl-compat/tree/master/crypto_secretbox,Apache-2.0 OR MIT,RustCrypto Developers csv,https://github.com/BurntSushi/rust-csv,Unlicense OR MIT,Andrew Gallant +csv-core,https://github.com/BurntSushi/rust-csv,Unlicense OR MIT,Andrew Gallant ctr,https://github.com/RustCrypto/block-modes,MIT OR Apache-2.0,RustCrypto Developers curl-sys,https://github.com/alexcrichton/curl-rust,MIT,Alex Crichton curve25519-dalek,https://github.com/dalek-cryptography/curve25519-dalek/tree/main/curve25519-dalek,BSD-3-Clause,"Isis Lovecruft , Henry de Valence " curve25519-dalek-derive,https://github.com/dalek-cryptography/curve25519-dalek,MIT OR Apache-2.0,The curve25519-dalek-derive Authors darling,https://github.com/TedDriggs/darling,MIT,Ted Driggs +darling_core,https://github.com/TedDriggs/darling,MIT,Ted Driggs +darling_macro,https://github.com/TedDriggs/darling,MIT,Ted Driggs dary_heap,https://github.com/hanmertens/dary_heap,MIT OR Apache-2.0,Han Mertens dashmap,https://github.com/xacrimon/dashmap,MIT,Acrimon data-encoding,https://github.com/ia0/data-encoding,MIT,Julien Cretin @@ -185,6 +206,7 @@ data-url,https://github.com/servo/rust-url,MIT OR Apache-2.0,Simon Sapin dbl,https://github.com/RustCrypto/utils,MIT OR Apache-2.0,RustCrypto Developers deadpool,https://github.com/bikeshedder/deadpool,MIT OR Apache-2.0,Michael P. Jung +deadpool-runtime,https://github.com/bikeshedder/deadpool,MIT OR Apache-2.0,Michael P. Jung der,https://github.com/RustCrypto/formats/tree/master/der,Apache-2.0 OR MIT,RustCrypto Developers deranged,https://github.com/jhpratt/deranged,MIT OR Apache-2.0,Jacob Pratt derivative,https://github.com/mcarton/rust-derivative,MIT OR Apache-2.0,mcarton @@ -201,6 +223,7 @@ dns-lookup,https://github.com/keeperofdakeys/dns-lookup,MIT OR Apache-2.0,Josh D doc-comment,https://github.com/GuillaumeGomez/doc-comment,MIT,Guillaume Gomez document-features,https://github.com/slint-ui/document-features,MIT OR Apache-2.0,Slint Developers domain,https://github.com/nlnetlabs/domain,BSD-3-Clause,NLnet Labs +domain-macros,https://github.com/nlnetlabs/domain,BSD-3-Clause,NLnet Labs dotenvy,https://github.com/allan2/dotenvy,MIT,"Noemi Lapresta , Craig Hills , Mike Piccolo , Alice Maz , Sean Griffin , Adam Sharp , Arpad Borsos , Allan Zhang " dyn-clone,https://github.com/dtolnay/dyn-clone,MIT OR Apache-2.0,David Tolnay ecdsa,https://github.com/RustCrypto/signatures/tree/master/ecdsa,Apache-2.0 OR MIT,RustCrypto Developers @@ -215,6 +238,7 @@ endian-type,https://github.com/Lolirofle/endian-type,MIT,Lolirofle enum_dispatch,https://gitlab.com/antonok/enum_dispatch,MIT OR Apache-2.0,Anton Lazarev enumflags2,https://github.com/meithecatte/enumflags2,MIT OR Apache-2.0,"maik klein , Maja Kądziołka " +enumflags2_derive,https://github.com/meithecatte/enumflags2,MIT OR Apache-2.0,"maik klein , Maja Kądziołka " env_logger,https://github.com/env-logger-rs/env_logger,MIT OR Apache-2.0,The Rust Project Developers equivalent,https://github.com/cuviper/equivalent,Apache-2.0 OR MIT,The equivalent Authors erased-serde,https://github.com/dtolnay/erased-serde,MIT OR Apache-2.0,David Tolnay @@ -226,6 +250,7 @@ event-listener,https://github.com/smol-rs/event-listener,Apache-2.0 OR MIT,Stjep event-listener,https://github.com/smol-rs/event-listener,Apache-2.0 OR MIT,"Stjepan Glavina , John Nunley " event-listener-strategy,https://github.com/smol-rs/event-listener-strategy,Apache-2.0 OR MIT,John Nunley evmap,https://github.com/jonhoo/rust-evmap,MIT OR Apache-2.0,Jon Gjengset +evmap-derive,https://github.com/jonhoo/rust-evmap,MIT OR Apache-2.0,Jon Gjengset executor-trait,https://github.com/amqp-rs/executor-trait,Apache-2.0 OR MIT,Marc-Antoine Perennou exitcode,https://github.com/benwilber/exitcode,Apache-2.0,Ben Wilber fakedata_generator,https://github.com/kevingimbel/fakedata_generator,MIT,Kevin Gimbel @@ -243,6 +268,8 @@ flume,https://github.com/zesterer/flume,Apache-2.0 OR MIT,Joshua Barretto foldhash,https://github.com/orlp/foldhash,Zlib,Orson Peters foreign-types,https://github.com/sfackler/foreign-types,MIT OR Apache-2.0,Steven Fackler +foreign-types-shared,https://github.com/sfackler/foreign-types,MIT OR Apache-2.0,Steven Fackler +form_urlencoded,https://github.com/servo/rust-url,MIT OR Apache-2.0,The rust-url developers fraction,https://github.com/dnsl48/fraction,MIT OR Apache-2.0,dnsl48 fsevent-sys,https://github.com/octplane/fsevent-rust/tree/master/fsevent-sys,MIT,Pierre Baillet fslock,https://github.com/brunoczim/fslock,MIT,The fslock Authors @@ -283,9 +310,17 @@ hashbag,https://github.com/jonhoo/hashbag,MIT OR Apache-2.0,Jon Gjengset hashlink,https://github.com/kyren/hashlink,MIT OR Apache-2.0,kyren headers,https://github.com/hyperium/headers,MIT,Sean McArthur +headers-core,https://github.com/hyperium/headers,MIT,Sean McArthur heck,https://github.com/withoutboats/heck,MIT OR Apache-2.0,The heck Authors heck,https://github.com/withoutboats/heck,MIT OR Apache-2.0,Without Boats heim,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf +heim-common,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf +heim-cpu,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf +heim-disk,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf +heim-host,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf +heim-memory,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf +heim-net,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf +heim-runtime,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf hermit-abi,https://github.com/hermit-os/hermit-rs,MIT OR Apache-2.0,Stefan Lankes hex,https://github.com/KokaKiwi/rust-hex,MIT OR Apache-2.0,KokaKiwi hickory-proto,https://github.com/hickory-dns/hickory-dns,MIT OR Apache-2.0,The contributors to Hickory DNS @@ -295,6 +330,7 @@ home,https://github.com/rust-lang/cargo,MIT OR Apache-2.0,Brian Anderson , svartalf " http,https://github.com/hyperium/http,MIT OR Apache-2.0,"Alex Crichton , Carl Lerche , Sean McArthur " http-body,https://github.com/hyperium/http-body,MIT,"Carl Lerche , Lucio Franco , Sean McArthur " +http-body-util,https://github.com/hyperium/http-body,MIT,"Carl Lerche , Lucio Franco , Sean McArthur " http-range-header,https://github.com/MarcusGrass/parse-range-headers,MIT,The http-range-header Authors http-serde,https://gitlab.com/kornelski/http-serde,Apache-2.0 OR MIT,Kornel http-types,https://github.com/http-rs/http-types,MIT OR Apache-2.0,Yoshua Wuyts @@ -323,6 +359,7 @@ icu_properties_data,https://github.com/unicode-org/icu4x,Unicode-3.0,The ICU4X P icu_provider,https://github.com/unicode-org/icu4x,Unicode-3.0,The ICU4X Project Developers icu_provider_macros,https://github.com/unicode-org/icu4x,Unicode-3.0,The ICU4X Project Developers ident_case,https://github.com/TedDriggs/ident_case,MIT OR Apache-2.0,Ted Driggs +idna,https://github.com/servo/rust-url,MIT OR Apache-2.0,The rust-url developers idna_adapter,https://github.com/hsivonen/idna_adapter,Apache-2.0 OR MIT,The rust-url developers indexmap,https://github.com/bluss/indexmap,Apache-2.0 OR MIT,The indexmap Authors indexmap,https://github.com/indexmap-rs/indexmap,Apache-2.0 OR MIT,The indexmap Authors @@ -359,11 +396,15 @@ kqueue,https://gitlab.com/rust-kqueue/rust-kqueue,MIT,William Orr , Daniel (dmilith) Dettlaff " krb5-src,https://github.com/MaterializeInc/rust-krb5-src,Apache-2.0,"Materialize, Inc." kube,https://github.com/kube-rs/kube,Apache-2.0,"clux , Natalie Klestrup Röijezon , kazk " +kube-client,https://github.com/kube-rs/kube,Apache-2.0,"clux , Natalie Klestrup Röijezon , kazk " +kube-core,https://github.com/kube-rs/kube,Apache-2.0,"clux , Natalie Klestrup Röijezon , kazk " +kube-runtime,https://github.com/kube-rs/kube,Apache-2.0,"clux , Natalie Klestrup Röijezon , kazk " lalrpop-util,https://github.com/lalrpop/lalrpop,Apache-2.0 OR MIT,Niko Matsakis lapin,https://github.com/amqp-rs/lapin,MIT,"Geoffroy Couprie , Marc-Antoine Perennou " lazy_static,https://github.com/rust-lang-nursery/lazy-static.rs,MIT OR Apache-2.0,Marvin Löbel libc,https://github.com/rust-lang/libc,MIT OR Apache-2.0,The Rust Project Developers libflate,https://github.com/sile/libflate,MIT,Takeru Ohta +libflate_lz77,https://github.com/sile/libflate,MIT,Takeru Ohta libm,https://github.com/rust-lang/libm,MIT OR Apache-2.0,Jorge Aparicio libsqlite3-sys,https://github.com/rusqlite/rusqlite,MIT,The rusqlite developers libz-rs-sys,https://github.com/trifectatechfoundation/zlib-rs,Zlib,The libz-rs-sys Authors @@ -374,11 +415,13 @@ linux-raw-sys,https://github.com/sunfishcode/linux-raw-sys,Apache-2.0 WITH LLVM- listenfd,https://github.com/mitsuhiko/listenfd,Apache-2.0,Armin Ronacher litemap,https://github.com/unicode-org/icu4x,Unicode-3.0,The ICU4X Project Developers litrs,https://github.com/LukasKalbertodt/litrs,MIT OR Apache-2.0,Lukas Kalbertodt +lock_api,https://github.com/Amanieu/parking_lot,MIT OR Apache-2.0,Amanieu d'Antras lockfree-object-pool,https://github.com/EVaillant/lockfree-object-pool,BSL-1.0,Etienne Vaillant log,https://github.com/rust-lang/log,MIT OR Apache-2.0,The Rust Project Developers lru,https://github.com/jeromefroe/lru-rs,MIT,Jerome Froelich lru-cache,https://github.com/contain-rs/lru-cache,MIT OR Apache-2.0,Stepan Koltsov lz4,https://github.com/10xGenomics/lz4-rs,MIT,"Jens Heyens , Artem V. Navrotskiy , Patrick Marks " +lz4-sys,https://github.com/10xGenomics/lz4-rs,MIT,"Jens Heyens , Artem V. Navrotskiy , Patrick Marks " lz4_flex,https://github.com/pseitz/lz4_flex,MIT,"Pascal Seitz , Arthur Silva , ticki " macaddr,https://github.com/svartalf/rust-macaddr,Apache-2.0 OR MIT,svartalf mach,https://github.com/fitzgen/mach,BSD-2-Clause,"Nick Fitzgerald , David Cuddeback , Gonzalo Brito Gadeschi " @@ -394,6 +437,7 @@ memmap2,https://github.com/RazrFalcon/memmap2-rs,MIT OR Apache-2.0,"Dan Burkert memoffset,https://github.com/Gilnaa/memoffset,MIT,Gilad Naaman metrics,https://github.com/metrics-rs/metrics,MIT,Toby Lawrence metrics-tracing-context,https://github.com/metrics-rs/metrics,MIT,MOZGIII +metrics-util,https://github.com/metrics-rs/metrics,MIT,Toby Lawrence mime,https://github.com/hyperium/mime,MIT OR Apache-2.0,Sean McArthur mime_guess,https://github.com/abonander/mime_guess,MIT,Austin Bonander minimal-lexical,https://github.com/Alexhuszagh/minimal-lexical,MIT OR Apache-2.0,Alex Huszagh @@ -419,6 +463,7 @@ no-proxy,https://github.com/jdrouet/no-proxy,MIT,Jérémie Drouet nom,https://github.com/Geal/nom,MIT,contact@geoffroycouprie.com nom,https://github.com/rust-bakery/nom,MIT,contact@geoffroycouprie.com +nom-language,https://github.com/rust-bakery/nom,MIT,contact@geoffroycouprie.com nonzero_ext,https://github.com/antifuchs/nonzero_ext,Apache-2.0,Andreas Fuchs notify,https://github.com/notify-rs/notify,CC0-1.0,"Félix Saparelli , Daniel Faust , Aron Heinecke " notify-types,https://github.com/notify-rs/notify,MIT OR Apache-2.0,Daniel Faust @@ -438,6 +483,7 @@ num-rational,https://github.com/rust-num/num-rational,MIT OR Apache-2.0,The Rust num-traits,https://github.com/rust-num/num-traits,MIT OR Apache-2.0,The Rust Project Developers num_cpus,https://github.com/seanmonstar/num_cpus,MIT OR Apache-2.0,Sean McArthur num_enum,https://github.com/illicitonion/num_enum,BSD-3-Clause OR MIT OR Apache-2.0,"Daniel Wagner-Hall , Daniel Henry-Mantilla , Vincent Esche " +num_enum_derive,https://github.com/illicitonion/num_enum,BSD-3-Clause OR MIT OR Apache-2.0,"Daniel Wagner-Hall , Daniel Henry-Mantilla , Vincent Esche " num_threads,https://github.com/jhpratt/num_threads,MIT OR Apache-2.0,Jacob Pratt number_prefix,https://github.com/ogham/rust-number-prefix,MIT,Benjamin Sago oauth2,https://github.com/ramosbugs/oauth2-rs,MIT OR Apache-2.0,"Alex Crichton , Florin Lipan , David A. Ramos " @@ -449,6 +495,7 @@ octseq,https://github.com/NLnetLabs/octets/,BSD-3-Clause,NLnet Labs onig,https://github.com/iwillspeak/rust-onig,MIT,"Will Speak , Ivan Ivashchenko " +onig_sys,https://github.com/iwillspeak/rust-onig,MIT,"Will Speak , Ivan Ivashchenko " opaque-debug,https://github.com/RustCrypto/utils,MIT OR Apache-2.0,RustCrypto Developers opendal,https://github.com/apache/opendal,Apache-2.0,Apache OpenDAL openidconnect,https://github.com/ramosbugs/openidconnect-rs,MIT,David A. Ramos @@ -465,6 +512,8 @@ pad,https://github.com/ogham/rust-pad,MIT,Ben S parking,https://github.com/smol-rs/parking,Apache-2.0 OR MIT,"Stjepan Glavina , The Rust Project Developers" parking_lot,https://github.com/Amanieu/parking_lot,Apache-2.0 OR MIT,Amanieu d'Antras parking_lot,https://github.com/Amanieu/parking_lot,MIT OR Apache-2.0,Amanieu d'Antras +parking_lot_core,https://github.com/Amanieu/parking_lot,Apache-2.0 OR MIT,Amanieu d'Antras +parking_lot_core,https://github.com/Amanieu/parking_lot,MIT OR Apache-2.0,Amanieu d'Antras parse-size,https://github.com/kennytm/parse-size,MIT,kennytm passt,https://github.com/kevingimbel/passt,MIT OR Apache-2.0,Kevin Gimbel paste,https://github.com/dtolnay/paste,MIT OR Apache-2.0,David Tolnay @@ -472,8 +521,13 @@ pbkdf2,https://github.com/RustCrypto/password-hashes/tree/master/pbkdf2,MIT OR A peeking_take_while,https://github.com/fitzgen/peeking_take_while,MIT OR Apache-2.0,Nick Fitzgerald pem,https://github.com/jcreekmore/pem-rs,MIT,Jonathan Creekmore pem-rfc7468,https://github.com/RustCrypto/formats/tree/master/pem-rfc7468,Apache-2.0 OR MIT,RustCrypto Developers +percent-encoding,https://github.com/servo/rust-url,MIT OR Apache-2.0,The rust-url developers pest,https://github.com/pest-parser/pest,MIT OR Apache-2.0,Dragoș Tiselice +pest_derive,https://github.com/pest-parser/pest,MIT OR Apache-2.0,Dragoș Tiselice +pest_generator,https://github.com/pest-parser/pest,MIT OR Apache-2.0,Dragoș Tiselice +pest_meta,https://github.com/pest-parser/pest,MIT OR Apache-2.0,Dragoș Tiselice phf,https://github.com/rust-phf/rust-phf,MIT,Steven Fackler +phf_shared,https://github.com/rust-phf/rust-phf,MIT,Steven Fackler pin-project,https://github.com/taiki-e/pin-project,Apache-2.0 OR MIT,The pin-project Authors pin-project-internal,https://github.com/taiki-e/pin-project,Apache-2.0 OR MIT,The pin-project-internal Authors pin-project-lite,https://github.com/taiki-e/pin-project-lite,Apache-2.0 OR MIT,The pin-project-lite Authors @@ -499,13 +553,17 @@ proc-macro-crate,https://github.com/bkchr/proc-macro-crate,MIT OR Apache-2.0,Bas proc-macro-error-attr2,https://github.com/GnomedDev/proc-macro-error-2,MIT OR Apache-2.0,"CreepySkeleton , GnomedDev " proc-macro-error2,https://github.com/GnomedDev/proc-macro-error-2,MIT OR Apache-2.0,"CreepySkeleton , GnomedDev " proc-macro-hack,https://github.com/dtolnay/proc-macro-hack,MIT OR Apache-2.0,David Tolnay +proc-macro-nested,https://github.com/dtolnay/proc-macro-hack,MIT OR Apache-2.0,David Tolnay proc-macro2,https://github.com/dtolnay/proc-macro2,MIT OR Apache-2.0,"David Tolnay , Alex Crichton " proptest,https://github.com/proptest-rs/proptest,MIT OR Apache-2.0,Jason Lingle prost,https://github.com/tokio-rs/prost,Apache-2.0,"Dan Burkert , Lucio Franco , Casper Meijn , Tokio Contributors " +prost-derive,https://github.com/tokio-rs/prost,Apache-2.0,"Dan Burkert , Lucio Franco , Casper Meijn , Tokio Contributors " prost-reflect,https://github.com/andrewhickman/prost-reflect,MIT OR Apache-2.0,Andrew Hickman +prost-types,https://github.com/tokio-rs/prost,Apache-2.0,"Dan Burkert , Lucio Franco , Casper Meijn , Tokio Contributors " psl,https://github.com/addr-rs/psl,MIT OR Apache-2.0,rushmorem psl-types,https://github.com/addr-rs/psl-types,MIT OR Apache-2.0,rushmorem ptr_meta,https://github.com/djkoloski/ptr_meta,MIT,David Koloski +ptr_meta_derive,https://github.com/djkoloski/ptr_meta,MIT,David Koloski publicsuffix,https://github.com/rushmorem/publicsuffix,MIT OR Apache-2.0,rushmorem pulsar,https://github.com/streamnative/pulsar-rs,MIT OR Apache-2.0,"Colin Stearns , Kevin Stenerson , Geoffroy Couprie " quad-rand,https://github.com/not-fl3/quad-rand,MIT,not-fl3 @@ -523,6 +581,7 @@ radium,https://github.com/bitvecto-rs/radium,MIT,"Nika Layzell rand,https://github.com/rust-random/rand,MIT OR Apache-2.0,"The Rand Project Developers, The Rust Project Developers" rand_chacha,https://github.com/rust-random/rand,MIT OR Apache-2.0,"The Rand Project Developers, The Rust Project Developers, The CryptoCorrosion Contributors" +rand_core,https://github.com/rust-random/rand,MIT OR Apache-2.0,"The Rand Project Developers, The Rust Project Developers" rand_distr,https://github.com/rust-random/rand_distr,MIT OR Apache-2.0,The Rand Project Developers rand_hc,https://github.com/rust-random/rand,MIT OR Apache-2.0,The Rand Project Developers rand_xorshift,https://github.com/rust-random/rngs,MIT OR Apache-2.0,"The Rand Project Developers, The Rust Project Developers" @@ -530,10 +589,14 @@ ratatui,https://github.com/ratatui/ratatui,MIT,"Florian Dehau , raw-cpuid,https://github.com/gz/rust-cpuid,MIT,Gerd Zellweger raw-window-handle,https://github.com/rust-windowing/raw-window-handle,MIT OR Apache-2.0 OR Zlib,Osspial rdkafka,https://github.com/fede1024/rust-rdkafka,MIT,Federico Giraud +rdkafka-sys,https://github.com/fede1024/rust-rdkafka,MIT,Federico Giraud +reactor-trait,https://github.com/amqp-rs/executor-trait,Apache-2.0 OR MIT,Marc-Antoine Perennou redis,https://github.com/redis-rs/redis-rs,BSD-3-Clause,The redis Authors redox_syscall,https://gitlab.redox-os.org/redox-os/syscall,MIT,Jeremy Soller redox_users,https://gitlab.redox-os.org/redox-os/users,MIT,"Jose Narvaez , Wesley Hershberger " ref-cast,https://github.com/dtolnay/ref-cast,MIT OR Apache-2.0,David Tolnay +ref-cast-impl,https://github.com/dtolnay/ref-cast,MIT OR Apache-2.0,David Tolnay +referencing,https://github.com/Stranger6667/jsonschema,MIT,Dmitry Dygalo regex,https://github.com/rust-lang/regex,MIT OR Apache-2.0,"The Rust Project Developers, Andrew Gallant " regex-automata,https://github.com/rust-lang/regex/tree/master/regex-automata,MIT OR Apache-2.0,"The Rust Project Developers, Andrew Gallant " regex-filtered,https://github.com/ua-parser/uap-rust,BSD-3-Clause,The regex-filtered Authors @@ -542,11 +605,13 @@ regex-syntax,https://github.com/rust-lang/regex/tree/master/regex-syntax,MIT OR rend,https://github.com/djkoloski/rend,MIT,David Koloski reqwest,https://github.com/seanmonstar/reqwest,MIT OR Apache-2.0,Sean McArthur reqwest-middleware,https://github.com/TrueLayer/reqwest-middleware,MIT OR Apache-2.0,Rodrigo Gryzinski +reqwest-retry,https://github.com/TrueLayer/reqwest-middleware,MIT OR Apache-2.0,Rodrigo Gryzinski resolv-conf,http://github.com/tailhook/resolv-conf,MIT OR Apache-2.0,paul@colomiets.name retry-policies,https://github.com/TrueLayer/retry-policies,MIT OR Apache-2.0,Luca Palmieri rfc6979,https://github.com/RustCrypto/signatures/tree/master/rfc6979,Apache-2.0 OR MIT,RustCrypto Developers ring,https://github.com/briansmith/ring,Apache-2.0 AND ISC,The ring Authors rkyv,https://github.com/rkyv/rkyv,MIT,David Koloski +rkyv_derive,https://github.com/rkyv/rkyv,MIT,David Koloski rle-decode-fast,https://github.com/WanzenBug/rle-decode-helper,MIT OR Apache-2.0,Moritz Wanzenböck rmp,https://github.com/3Hren/msgpack-rust,MIT,Evgeny Safronov rmp-serde,https://github.com/3Hren/msgpack-rust,MIT,Evgeny Safronov @@ -584,6 +649,7 @@ seahash,https://gitlab.redox-os.org/redox-os/seahash,MIT,"ticki security-framework,https://github.com/kornelski/rust-security-framework,MIT OR Apache-2.0,"Steven Fackler , Kornel " +security-framework-sys,https://github.com/kornelski/rust-security-framework,MIT OR Apache-2.0,"Steven Fackler , Kornel " semver,https://github.com/dtolnay/semver,MIT OR Apache-2.0,David Tolnay semver,https://github.com/steveklabnik/semver,MIT OR Apache-2.0,"Steve Klabnik , The Rust Project Developers" semver-parser,https://github.com/steveklabnik/semver-parser,MIT OR Apache-2.0,Steve Klabnik @@ -591,6 +657,8 @@ serde,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar serde-value,https://github.com/arcnmx/serde-value,MIT,arcnmx serde_bytes,https://github.com/serde-rs/bytes,MIT OR Apache-2.0,David Tolnay +serde_derive,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " +serde_derive_internals,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " serde_json,https://github.com/serde-rs/json,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " serde_nanos,https://github.com/caspervonb/serde_nanos,MIT OR Apache-2.0,Casper Beyer serde_path_to_error,https://github.com/dtolnay/path-to-error,MIT OR Apache-2.0,David Tolnay @@ -608,6 +676,7 @@ sha2,https://github.com/RustCrypto/hashes,MIT OR Apache-2.0,RustCrypto Developer sha3,https://github.com/RustCrypto/hashes,MIT OR Apache-2.0,RustCrypto Developers sharded-slab,https://github.com/hawkw/sharded-slab,MIT,Eliza Weisman signal-hook,https://github.com/vorner/signal-hook,Apache-2.0 OR MIT,"Michal 'vorner' Vaner , Thomas Himmelstoss " +signal-hook-mio,https://github.com/vorner/signal-hook,Apache-2.0 OR MIT,"Michal 'vorner' Vaner , Thomas Himmelstoss " signal-hook-registry,https://github.com/vorner/signal-hook,Apache-2.0 OR MIT,"Michal 'vorner' Vaner , Masaki Hara " signatory,https://github.com/iqlusioninc/crates/tree/main/signatory,Apache-2.0 OR MIT,Tony Arcieri signature,https://github.com/RustCrypto/traits/tree/master/signature,Apache-2.0 OR MIT,RustCrypto Developers @@ -620,6 +689,7 @@ smallvec,https://github.com/servo/rust-smallvec,MIT OR Apache-2.0,The Servo Proj smol,https://github.com/smol-rs/smol,Apache-2.0 OR MIT,Stjepan Glavina smpl_jwt,https://github.com/durch/rust-jwt,MIT,Drazen Urch snafu,https://github.com/shepmaster/snafu,MIT OR Apache-2.0,Jake Goulding +snafu-derive,https://github.com/shepmaster/snafu,MIT OR Apache-2.0,Jake Goulding snap,https://github.com/BurntSushi/rust-snappy,BSD-3-Clause,Andrew Gallant socket2,https://github.com/rust-lang/socket2,MIT OR Apache-2.0,"Alex Crichton , Thomas de Zeeuw " spin,https://github.com/mvdnes/spin-rs,MIT,"Mathijs van de Nes , John Ericson " @@ -627,6 +697,12 @@ spin,https://github.com/mvdnes/spin-rs,MIT,"Mathijs van de Nes spki,https://github.com/RustCrypto/formats/tree/master/spki,Apache-2.0 OR MIT,RustCrypto Developers sqlx,https://github.com/launchbadge/sqlx,MIT OR Apache-2.0,"Ryan Leckey , Austin Bonander , Chloe Ross , Daniel Akhterov " +sqlx-core,https://github.com/launchbadge/sqlx,MIT OR Apache-2.0,"Ryan Leckey , Austin Bonander , Chloe Ross , Daniel Akhterov " +sqlx-macros,https://github.com/launchbadge/sqlx,MIT OR Apache-2.0,"Ryan Leckey , Austin Bonander , Chloe Ross , Daniel Akhterov " +sqlx-macros-core,https://github.com/launchbadge/sqlx,MIT OR Apache-2.0,"Ryan Leckey , Austin Bonander , Chloe Ross , Daniel Akhterov " +sqlx-mysql,https://github.com/launchbadge/sqlx,MIT OR Apache-2.0,"Ryan Leckey , Austin Bonander , Chloe Ross , Daniel Akhterov " +sqlx-postgres,https://github.com/launchbadge/sqlx,MIT OR Apache-2.0,"Ryan Leckey , Austin Bonander , Chloe Ross , Daniel Akhterov " +sqlx-sqlite,https://github.com/launchbadge/sqlx,MIT OR Apache-2.0,"Ryan Leckey , Austin Bonander , Chloe Ross , Daniel Akhterov " stable_deref_trait,https://github.com/storyyeller/stable_deref_trait,MIT OR Apache-2.0,Robert Grosse static_assertions,https://github.com/nvzqz/static-assertions-rs,MIT OR Apache-2.0,Nikolai Vazquez static_assertions_next,https://github.com/scuffletv/static-assertions,MIT OR Apache-2.0,Nikolai Vazquez @@ -636,6 +712,7 @@ strip-ansi-escapes,https://github.com/luser/strip-ansi-escapes,Apache-2.0 OR MIT strsim,https://github.com/dguo/strsim-rs,MIT,Danny Guo strsim,https://github.com/rapidfuzz/strsim-rs,MIT,"Danny Guo , maxbachmann " strum,https://github.com/Peternator7/strum,MIT,Peter Glotfelty +strum_macros,https://github.com/Peternator7/strum,MIT,Peter Glotfelty subtle,https://github.com/dalek-cryptography/subtle,BSD-3-Clause,"Isis Lovecruft , Henry de Valence " supports-color,https://github.com/zkat/supports-color,Apache-2.0,Kat Marchán syn,https://github.com/dtolnay/syn,MIT OR Apache-2.0,David Tolnay @@ -645,6 +722,7 @@ sysinfo,https://github.com/GuillaumeGomez/sysinfo,MIT,Guillaume Gomez system-configuration,https://github.com/mullvad/system-configuration-rs,MIT OR Apache-2.0,Mullvad VPN +system-configuration-sys,https://github.com/mullvad/system-configuration-rs,MIT OR Apache-2.0,Mullvad VPN tagptr,https://github.com/oliver-giersch/tagptr,MIT OR Apache-2.0,Oliver Giersch take_mut,https://github.com/Sgeo/take_mut,MIT,Sgeo tap,https://github.com/myrrlyn/tap,MIT,"Elliott Linder , myrrlyn " @@ -654,22 +732,28 @@ term,https://github.com/Stebalien/term,MIT OR Apache-2.0,"The Rust Project Devel termcolor,https://github.com/BurntSushi/termcolor,Unlicense OR MIT,Andrew Gallant terminal_size,https://github.com/eminence/terminal-size,MIT OR Apache-2.0,Andrew Chin thiserror,https://github.com/dtolnay/thiserror,MIT OR Apache-2.0,David Tolnay +thiserror-impl,https://github.com/dtolnay/thiserror,MIT OR Apache-2.0,David Tolnay thread_local,https://github.com/Amanieu/thread_local-rs,MIT OR Apache-2.0,Amanieu d'Antras tikv-jemalloc-sys,https://github.com/tikv/jemallocator,MIT OR Apache-2.0,"Alex Crichton , Gonzalo Brito Gadeschi , The TiKV Project Developers" tikv-jemallocator,https://github.com/tikv/jemallocator,MIT OR Apache-2.0,"Alex Crichton , Gonzalo Brito Gadeschi , Simon Sapin , Steven Fackler , The TiKV Project Developers" time,https://github.com/time-rs/time,MIT OR Apache-2.0,"Jacob Pratt , Time contributors" +time-core,https://github.com/time-rs/time,MIT OR Apache-2.0,"Jacob Pratt , Time contributors" +time-macros,https://github.com/time-rs/time,MIT OR Apache-2.0,"Jacob Pratt , Time contributors" tinystr,https://github.com/unicode-org/icu4x,Unicode-3.0,The ICU4X Project Developers tinyvec,https://github.com/Lokathor/tinyvec,Zlib OR Apache-2.0 OR MIT,Lokathor tinyvec_macros,https://github.com/Soveu/tinyvec_macros,MIT OR Apache-2.0 OR Zlib,Soveu tokio,https://github.com/tokio-rs/tokio,MIT,Tokio Contributors tokio-io,https://github.com/tokio-rs/tokio,MIT,Carl Lerche tokio-io-timeout,https://github.com/sfackler/tokio-io-timeout,MIT OR Apache-2.0,Steven Fackler +tokio-macros,https://github.com/tokio-rs/tokio,MIT,Tokio Contributors tokio-native-tls,https://github.com/tokio-rs/tls,MIT,Tokio Contributors tokio-openssl,https://github.com/tokio-rs/tokio-openssl,MIT OR Apache-2.0,Alex Crichton tokio-postgres,https://github.com/sfackler/rust-postgres,MIT OR Apache-2.0,Steven Fackler tokio-retry,https://github.com/srijs/rust-tokio-retry,MIT,Sam Rijs tokio-rustls,https://github.com/rustls/tokio-rustls,MIT OR Apache-2.0,The tokio-rustls Authors +tokio-stream,https://github.com/tokio-rs/tokio,MIT,Tokio Contributors tokio-tungstenite,https://github.com/snapview/tokio-tungstenite,MIT,"Daniel Abramov , Alexey Galakhov " +tokio-util,https://github.com/tokio-rs/tokio,MIT,Tokio Contributors tokio-websockets,https://github.com/Gelbpunkt/tokio-websockets,MIT,The tokio-websockets Authors toml,https://github.com/toml-rs/toml,MIT OR Apache-2.0,The toml Authors toml_datetime,https://github.com/toml-rs/toml,MIT OR Apache-2.0,The toml_datetime Authors @@ -681,9 +765,12 @@ toml_writer,https://github.com/toml-rs/toml,MIT OR Apache-2.0,The toml_writer Au tonic,https://github.com/hyperium/tonic,MIT,Lucio Franco tower,https://github.com/tower-rs/tower,MIT,Tower Maintainers tower-http,https://github.com/tower-rs/tower-http,MIT,Tower Maintainers +tower-layer,https://github.com/tower-rs/tower,MIT,Tower Maintainers +tower-service,https://github.com/tower-rs/tower,MIT,Tower Maintainers tracing,https://github.com/tokio-rs/tracing,MIT,"Eliza Weisman , Tokio Contributors " tracing-attributes,https://github.com/tokio-rs/tracing,MIT,"Tokio Contributors , Eliza Weisman , David Barsky " tracing-core,https://github.com/tokio-rs/tracing,MIT,Tokio Contributors +tracing-futures,https://github.com/tokio-rs/tracing,MIT,"Eliza Weisman , Tokio Contributors " tracing-log,https://github.com/tokio-rs/tracing,MIT,Tokio Contributors tracing-serde,https://github.com/tokio-rs/tracing,MIT,Tokio Contributors tracing-subscriber,https://github.com/tokio-rs/tracing,MIT,"Eliza Weisman , David Barsky , Tokio Contributors " @@ -696,11 +783,13 @@ tryhard,https://github.com/EmbarkStudios/tryhard,MIT OR Apache-2.0,Embark typed-builder,https://github.com/idanarye/rust-typed-builder,MIT OR Apache-2.0,"IdanArye , Chris Morgan " +typed-builder-macro,https://github.com/idanarye/rust-typed-builder,MIT OR Apache-2.0,"IdanArye , Chris Morgan " typenum,https://github.com/paholg/typenum,MIT OR Apache-2.0,"Paho Lurie-Gregg , Andre Bogus " typespec,https://github.com/azure/azure-sdk-for-rust,MIT,Microsoft typespec_client_core,https://github.com/azure/azure-sdk-for-rust,MIT,Microsoft typespec_macros,https://github.com/azure/azure-sdk-for-rust,MIT,Microsoft typetag,https://github.com/dtolnay/typetag,MIT OR Apache-2.0,David Tolnay +typetag-impl,https://github.com/dtolnay/typetag,MIT OR Apache-2.0,David Tolnay ua-parser,https://github.com/ua-parser/uap-rust,Apache-2.0,The ua-parser Authors ucd-trie,https://github.com/BurntSushi/ucd-generate,MIT OR Apache-2.0,Andrew Gallant unarray,https://github.com/cameron1024/unarray,MIT OR Apache-2.0,The unarray Authors @@ -722,6 +811,7 @@ utf-8,https://github.com/SimonSapin/rust-utf8,MIT OR Apache-2.0,Simon Sapin utf8-width,https://github.com/magiclen/utf8-width,MIT,Magic Len utf8_iter,https://github.com/hsivonen/utf8_iter,Apache-2.0 OR MIT,Henri Sivonen +utf8parse,https://github.com/alacritty/vte,Apache-2.0 OR MIT,"Joe Wilm , Christian Duerr " uuid,https://github.com/uuid-rs/uuid,Apache-2.0 OR MIT,"Ashley Mannix, Dylan DPC, Hunar Roop Kahlon" uuid-simd,https://github.com/Nugine/simd,MIT,The uuid-simd Authors valuable,https://github.com/tokio-rs/valuable,MIT,The valuable Authors @@ -753,12 +843,31 @@ whoami,https://github.com/ardaku/whoami,Apache-2.0 OR BSL-1.0 OR MIT,The whoami widestring,https://github.com/starkat99/widestring-rs,MIT OR Apache-2.0,Kathryn Long widestring,https://github.com/starkat99/widestring-rs,MIT OR Apache-2.0,The widestring Authors winapi,https://github.com/retep998/winapi-rs,MIT OR Apache-2.0,Peter Atashian +winapi-i686-pc-windows-gnu,https://github.com/retep998/winapi-rs,MIT OR Apache-2.0,Peter Atashian winapi-util,https://github.com/BurntSushi/winapi-util,Unlicense OR MIT,Andrew Gallant +winapi-x86_64-pc-windows-gnu,https://github.com/retep998/winapi-rs,MIT OR Apache-2.0,Peter Atashian windows,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-collections,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,The windows-collections Authors +windows-core,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-future,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,The windows-future Authors +windows-implement,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows-interface,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows-link,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-numerics,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,The windows-numerics Authors +windows-registry,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows-result,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-service,https://github.com/mullvad/windows-service-rs,MIT OR Apache-2.0,Mullvad VPN +windows-strings,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows-sys,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows-targets,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_aarch64_gnullvm,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_aarch64_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_i686_gnu,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_i686_gnullvm,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_i686_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_x86_64_gnu,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_x86_64_gnullvm,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows_x86_64_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft winnow,https://github.com/winnow-rs/winnow,MIT,The winnow Authors winreg,https://github.com/gentoo90/winreg-rs,MIT,Igor Shaula wit-bindgen-rt,https://github.com/bytecodealliance/wasi-rs,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,The wit-bindgen-rt Authors @@ -771,6 +880,7 @@ xxhash-rust,https://github.com/DoumanAsh/xxhash-rust,BSL-1.0,Douman yoke-derive,https://github.com/unicode-org/icu4x,Unicode-3.0,Manish Goregaokar zerocopy,https://github.com/google/zerocopy,BSD-2-Clause OR Apache-2.0 OR MIT,Joshua Liebow-Feeser +zerocopy-derive,https://github.com/google/zerocopy,BSD-2-Clause OR Apache-2.0 OR MIT,Joshua Liebow-Feeser zerofrom,https://github.com/unicode-org/icu4x,Unicode-3.0,Manish Goregaokar zerofrom-derive,https://github.com/unicode-org/icu4x,Unicode-3.0,Manish Goregaokar zeroize,https://github.com/RustCrypto/utils/tree/master/zeroize,Apache-2.0 OR MIT,The RustCrypto Project Developers diff --git a/aqua/aqua.yaml b/aqua/aqua.yaml index 37c8ae623bbe8..7d3dc9f8b956f 100644 --- a/aqua/aqua.yaml +++ b/aqua/aqua.yaml @@ -11,4 +11,4 @@ packages: - name: nextest-rs/nextest/cargo-nextest@cargo-nextest-0.9.47 - name: EmbarkStudios/cargo-deny@0.16.2 - name: foresterre/cargo-msrv@v0.15.1 - - name: crates.io/dd-rust-license-tool@1.0.1 + - name: crates.io/dd-rust-license-tool@1.0.4 diff --git a/scripts/environment/prepare.sh b/scripts/environment/prepare.sh index 44ce753a5c988..368e4287f4bce 100755 --- a/scripts/environment/prepare.sh +++ b/scripts/environment/prepare.sh @@ -176,7 +176,7 @@ fi if contains_module dd-rust-license-tool; then if ! dd-rust-license-tool --help &>/dev/null; then - cargo install dd-rust-license-tool --version 1.0.2 --force --locked + cargo install dd-rust-license-tool --version 1.0.4 --force --locked fi fi From 8c5af6208d673f488e980ae866ae881946710e78 Mon Sep 17 00:00:00 2001 From: Thomas Date: Sat, 1 Nov 2025 17:40:32 -0400 Subject: [PATCH 031/227] fix(ci): fix path in datadog-metrics e2e test.yaml (#24127) --- scripts/e2e/datadog-metrics/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/e2e/datadog-metrics/test.yaml b/scripts/e2e/datadog-metrics/test.yaml index d1b814fd02cb8..88b119411fff1 100644 --- a/scripts/e2e/datadog-metrics/test.yaml +++ b/scripts/e2e/datadog-metrics/test.yaml @@ -23,6 +23,6 @@ paths: - "src/internal_events/datadog_*" - "src/sinks/datadog/metrics/**" - "src/sinks/util/**" -- "scripts/integration/datadog-e2e/metrics/**" +- "scripts/e2e/datadog-metrics/**" - "tests/e2e/datadog/metrics/**" - "tests/data/e2e/datadog/metrics/**" From bd9b87700a4178decb54547bc158385156ad96f9 Mon Sep 17 00:00:00 2001 From: "silas.u" <41794887+sialais@users.noreply.github.com> Date: Tue, 4 Nov 2025 01:37:00 +0800 Subject: [PATCH 032/227] fix(instrument): Buffer counter underflowed (#23872) (#23973) * fix(instrument): Buffer counter underflowed (#23872) * Update code to be more clearly with to let Co-authored-by: Bruce Guenter * format fix to meet format&clippy --------- Co-authored-by: Bruce Guenter Co-authored-by: Pavlos Rontidis --- .../23872_buffer_counter_underflowed.fix.md | 3 ++ .../src/topology/channel/sender.rs | 42 +++++++++---------- 2 files changed, 22 insertions(+), 23 deletions(-) create mode 100644 changelog.d/23872_buffer_counter_underflowed.fix.md diff --git a/changelog.d/23872_buffer_counter_underflowed.fix.md b/changelog.d/23872_buffer_counter_underflowed.fix.md new file mode 100644 index 0000000000000..cf0a3a4449b16 --- /dev/null +++ b/changelog.d/23872_buffer_counter_underflowed.fix.md @@ -0,0 +1,3 @@ +Fix buffer counter underflowed, caused by the counter has not been updated(increase) timely when new event is coming. + +authors: sialais diff --git a/lib/vector-buffers/src/topology/channel/sender.rs b/lib/vector-buffers/src/topology/channel/sender.rs index cdfc41912f05b..e68b868e042e7 100644 --- a/lib/vector-buffers/src/topology/channel/sender.rs +++ b/lib/vector-buffers/src/topology/channel/sender.rs @@ -202,8 +202,14 @@ impl BufferSender { .as_ref() .map(|_| (item.event_count(), item.size_of())); - let mut sent_to_base = true; let mut was_dropped = false; + + if let Some(instrumentation) = self.instrumentation.as_ref() + && let Some((item_count, item_size)) = item_sizing + { + instrumentation + .increment_received_event_count_and_byte_size(item_count as u64, item_size as u64); + } match self.when_full { WhenFull::Block => self.base.send(item).await?, WhenFull::DropNewest => { @@ -213,7 +219,7 @@ impl BufferSender { } WhenFull::Overflow => { if let Some(item) = self.base.try_send(item).await? { - sent_to_base = false; + was_dropped = true; self.overflow .as_mut() .unwrap_or_else(|| unreachable!("overflow must exist")) @@ -223,30 +229,20 @@ impl BufferSender { } } - if (sent_to_base || was_dropped) - && let (Some(send_duration), Some(send_reference)) = - (self.send_duration.as_ref(), send_reference) - { - send_duration.emit(send_reference.elapsed()); - } - if let Some(instrumentation) = self.instrumentation.as_ref() && let Some((item_count, item_size)) = item_sizing + && was_dropped { - if sent_to_base { - instrumentation.increment_received_event_count_and_byte_size( - item_count as u64, - item_size as u64, - ); - } - - if was_dropped { - instrumentation.increment_dropped_event_count_and_byte_size( - item_count as u64, - item_size as u64, - true, - ); - } + instrumentation.increment_dropped_event_count_and_byte_size( + item_count as u64, + item_size as u64, + true, + ); + } + if let Some(send_duration) = self.send_duration.as_ref() + && let Some(send_reference) = send_reference + { + send_duration.emit(send_reference.elapsed()); } Ok(()) From 5cf227e646734e13b47de0d559c353b20aae3461 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 3 Nov 2025 12:37:13 -0500 Subject: [PATCH 033/227] fix(ci): vdev caching (#24126) * don't pull cargo bin dir when cargo-cache is enabled * Add specific cache layer to prepare.sh * Temporarily add github.event_name == 'pull_request' to integration.yml * Revert "Temporarily add github.event_name == 'pull_request' to integration.yml" This reverts commit 55a675c504d38c9de7a5b700e73da44e80af191f. * Hash all files under scripts/environment --- .github/actions/setup/action.yml | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 6a22e716ef990..1405c6461da0a 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -9,7 +9,7 @@ inputs: cargo-cache: required: false default: true - description: "Cache Cargo registry, index, git DB, and ~/.cargo/bin. Enabled automatically if any Rust tools are requested." + description: "Cache Cargo registry, index, and git DB. Enabled automatically if any Rust tools are requested." mold: required: false @@ -124,12 +124,11 @@ runs: EOF shell: bash - - name: Cache Cargo registry + index + - name: Cache Cargo registry, index, and git DB if: ${{ inputs.cargo-cache == 'true' || env.NEEDS_RUST == 'true' }} uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: | - ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ @@ -210,6 +209,24 @@ runs: if: ${{ inputs.vdev == 'true' }} uses: ./.github/actions/install-vdev + - name: Cache prepare.sh binaries + id: cache-prepare-binaries + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: | + ~/.cargo/bin/cargo-deb + ~/.cargo/bin/cross + ~/.cargo/bin/cargo-nextest + ~/.cargo/bin/cargo-deny + ~/.cargo/bin/cargo-msrv + ~/.cargo/bin/dd-rust-license-tool + ~/.cargo/bin/wasm-pack + /usr/local/bin/markdownlint + /usr/local/bin/datadog-ci + key: ${{ runner.os }}-prepare-binaries-${{ hashFiles('scripts/environment/*') }} + restore-keys: | + ${{ runner.os }}-prepare-binaries- + - name: Run prepare.sh shell: bash run: | From aef66cfae8f68a7006b9c1cebba9ff022e0520da Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Tue, 4 Nov 2025 01:57:01 +0800 Subject: [PATCH 034/227] feat(codecs): bump `avro-rs` crate to improve avro encoding error (#24119) * bump `avro-rs` crate to improve encoding error Signed-off-by: titaneric * add changelog Signed-off-by: titaneric * add newline Signed-off-by: titaneric * build licenses Signed-off-by: titaneric --------- Signed-off-by: titaneric --- Cargo.lock | 243 ++++++++++++++---- LICENSE-3rdparty.csv | 8 +- ...119_improve_avro_encoding_error.feature.md | 3 + lib/codecs/Cargo.toml | 2 +- lib/codecs/src/decoding/format/avro.rs | 9 + 5 files changed, 210 insertions(+), 55 deletions(-) create mode 100644 changelog.d/24119_improve_avro_encoding_error.feature.md diff --git a/Cargo.lock b/Cargo.lock index c04a3cce92cfe..2cf85a4ded1e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -286,6 +286,30 @@ dependencies = [ "uuid", ] +[[package]] +name = "apache-avro" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a033b4ced7c585199fb78ef50fca7fe2f444369ec48080c5fd072efa1a03cc7" +dependencies = [ + "bigdecimal", + "bon", + "digest", + "log", + "miniz_oxide", + "num-bigint", + "quad-rand", + "rand 0.9.2", + "regex-lite", + "serde", + "serde_bytes", + "serde_json", + "strum 0.27.2", + "strum_macros 0.27.2", + "thiserror 2.0.17", + "uuid", +] + [[package]] name = "approx" version = "0.5.1" @@ -1721,6 +1745,20 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bigdecimal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934" +dependencies = [ + "autocfg", + "libm", + "num-bigint", + "num-integer", + "num-traits", + "serde", +] + [[package]] name = "bit-set" version = "0.8.0" @@ -1848,7 +1886,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 2.0.3", + "thiserror 2.0.17", "tokio", "tokio-util", "tower-service", @@ -1869,6 +1907,31 @@ dependencies = [ "serde_with 3.14.0", ] +[[package]] +name = "bon" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebeb9aaf9329dff6ceb65c689ca3db33dbf15f324909c60e4e5eef5701ce31b1" +dependencies = [ + "bon-macros", + "rustversion", +] + +[[package]] +name = "bon-macros" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77e9d642a7e3a318e37c2c9427b5a6a48aa1ad55dcd986f3034ab2239045a645" +dependencies = [ + "darling 0.21.3", + "ident_case", + "prettyplease 0.2.15", + "proc-macro2 1.0.101", + "quote 1.0.40", + "rustversion", + "syn 2.0.106", +] + [[package]] name = "borrow-or-share" version = "0.2.2" @@ -2297,7 +2360,7 @@ dependencies = [ name = "codecs" version = "0.1.0" dependencies = [ - "apache-avro", + "apache-avro 0.20.0", "bytes 1.10.1", "chrono", "csv-core", @@ -2453,7 +2516,7 @@ checksum = "f29222b549d4e3ded127989d523da9e928918d0d0d7f7c1690b439d0d538bae9" dependencies = [ "directories", "serde", - "thiserror 2.0.3", + "thiserror 2.0.17", "toml 0.8.23", ] @@ -2940,6 +3003,16 @@ dependencies = [ "darling_macro 0.20.11", ] +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", +] + [[package]] name = "darling_core" version = "0.13.4" @@ -2968,6 +3041,20 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.101", + "quote 1.0.40", + "strsim 0.11.1", + "syn 2.0.106", +] + [[package]] name = "darling_macro" version = "0.13.4" @@ -2990,11 +3077,22 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "dary_heap" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" +checksum = "06d2e3287df1c007e74221c49ca10a95d557349e54b3a75dc2fb14712c751f04" [[package]] name = "dashmap" @@ -3917,6 +4015,12 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "foreign-types" version = "0.3.2" @@ -4443,32 +4547,34 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.13.1" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash 0.8.11", + "allocator-api2", ] [[package]] name = "hashbrown" -version = "0.14.5" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ - "ahash 0.8.11", "allocator-api2", + "equivalent", + "foldhash 0.1.3", ] [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.2.0", ] [[package]] @@ -4692,7 +4798,7 @@ dependencies = [ "rand 0.9.2", "ring", "rustls-pki-types", - "thiserror 2.0.3", + "thiserror 2.0.17", "time", "tinyvec", "tracing 0.1.41", @@ -5893,9 +5999,9 @@ checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "libflate" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7d5654ae1795afc7ff76f4365c2c8791b0feb18e8996a96adad8ffd7c3b2bf" +checksum = "249fa21ba2b59e8cbd69e722f5b31e1b466db96c937ae3de23e8b99ead0d1383" dependencies = [ "adler32", "core2", @@ -5906,12 +6012,12 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5f52fb8c451576ec6b79d3f4deb327398bc05bbdbd99021a6e77a4c855d524" +checksum = "a599cb10a9cd92b1300debcef28da8f70b935ec937f44fcd1b70a7c986a11c5c" dependencies = [ "core2", - "hashbrown 0.13.1", + "hashbrown 0.16.0", "rle-decode-fast", ] @@ -6064,9 +6170,9 @@ checksum = "3a69c0481fc2424cb55795de7da41add33372ea75a94f9b6588ab6a2826dfebc" [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "loki-logproto" @@ -6221,7 +6327,7 @@ dependencies = [ "memchr", "serde", "simdutf8", - "thiserror 2.0.3", + "thiserror 2.0.17", ] [[package]] @@ -6345,9 +6451,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] @@ -6830,6 +6936,7 @@ checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", + "serde", ] [[package]] @@ -8238,9 +8345,9 @@ dependencies = [ [[package]] name = "quad-rand" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658fa1faf7a4cc5f057c9ee5ef560f717ad9d8dc66d975267f709624d6e1ab88" +checksum = "5a651516ddc9168ebd67b24afd085a718be02f8858fe406591b013d101ce2f40" [[package]] name = "quanta" @@ -8274,7 +8381,7 @@ dependencies = [ "newtype-uuid", "quick-xml 0.37.4", "strip-ansi-escapes", - "thiserror 2.0.3", + "thiserror 2.0.17", "uuid", ] @@ -8333,7 +8440,7 @@ dependencies = [ "rustc-hash", "rustls 0.23.23", "socket2 0.5.10", - "thiserror 2.0.3", + "thiserror 2.0.17", "tokio", "tracing 0.1.41", ] @@ -8352,7 +8459,7 @@ dependencies = [ "rustls 0.23.23", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.17", "tinyvec", "tracing 0.1.41", "web-time", @@ -8713,7 +8820,7 @@ checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror 2.0.3", + "thiserror 2.0.17", ] [[package]] @@ -8795,9 +8902,9 @@ dependencies = [ [[package]] name = "regex-lite" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" +checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da" [[package]] name = "regex-syntax" @@ -9367,9 +9474,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" @@ -9601,10 +9708,11 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -9629,18 +9737,28 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.17" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" dependencies = [ "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2 1.0.101", "quote 1.0.40", @@ -9660,15 +9778,16 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.143" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "indexmap 2.11.0", "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] @@ -10220,7 +10339,7 @@ dependencies = [ "serde_json", "sha2", "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing 0.1.41", @@ -10303,7 +10422,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 2.0.3", + "thiserror 2.0.17", "tracing 0.1.41", "whoami", ] @@ -10341,7 +10460,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 2.0.3", + "thiserror 2.0.17", "tracing 0.1.41", "whoami", ] @@ -10366,7 +10485,7 @@ dependencies = [ "serde", "serde_urlencoded", "sqlx-core", - "thiserror 2.0.3", + "thiserror 2.0.17", "tracing 0.1.41", "url", ] @@ -10460,6 +10579,12 @@ dependencies = [ "strum_macros 0.26.4", ] +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" + [[package]] name = "strum_macros" version = "0.25.3" @@ -10486,6 +10611,18 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck 0.5.0", + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "subtle" version = "2.5.0" @@ -10755,11 +10892,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.17", ] [[package]] @@ -10775,9 +10912,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2 1.0.101", "quote 1.0.40", @@ -12050,7 +12187,7 @@ dependencies = [ name = "vector" version = "0.51.0" dependencies = [ - "apache-avro", + "apache-avro 0.16.0", "approx", "arc-swap", "arr_macro", @@ -12728,7 +12865,7 @@ dependencies = [ "strip-ansi-escapes", "syslog_loose 0.22.0", "termcolor", - "thiserror 2.0.3", + "thiserror 2.0.17", "tokio", "tracing 0.1.41", "ua-parser", diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 6967fbd458500..fbf3097a61c5d 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -26,6 +26,7 @@ anstyle-query,https://github.com/rust-cli/anstyle,MIT OR Apache-2.0,The anstyle- anstyle-wincon,https://github.com/rust-cli/anstyle,MIT OR Apache-2.0,The anstyle-wincon Authors anyhow,https://github.com/dtolnay/anyhow,MIT OR Apache-2.0,David Tolnay apache-avro,https://github.com/apache/avro,Apache-2.0,Apache Avro team +apache-avro,https://github.com/apache/avro-rs,Apache-2.0,The apache-avro Authors arbitrary,https://github.com/rust-fuzz/arbitrary,MIT OR Apache-2.0,"The Rust-Fuzz Project Developers, Nick Fitzgerald , Manish Goregaokar , Simonas Kazlauskas , Brian L. Troutwine , Corey Farwell " arc-swap,https://github.com/vorner/arc-swap,MIT OR Apache-2.0,Michal 'vorner' Vaner arr_macro,https://github.com/JoshMcguigan/arr_macro,MIT OR Apache-2.0,Josh Mcguigan @@ -106,6 +107,7 @@ base64,https://github.com/marshallpierce/rust-base64,MIT OR Apache-2.0,"Alice Ma base64,https://github.com/marshallpierce/rust-base64,MIT OR Apache-2.0,Marshall Pierce base64-simd,https://github.com/Nugine/simd,MIT,The base64-simd Authors base64ct,https://github.com/RustCrypto/formats/tree/master/base64ct,Apache-2.0 OR MIT,RustCrypto Developers +bigdecimal,https://github.com/akubera/bigdecimal-rs,MIT OR Apache-2.0,Andrew Kubera bit-set,https://github.com/contain-rs/bit-set,Apache-2.0 OR MIT,Alexis Beingessner bit-vec,https://github.com/contain-rs/bit-vec,Apache-2.0 OR MIT,Alexis Beingessner bitflags,https://github.com/bitflags/bitflags,MIT OR Apache-2.0,The Rust Project Developers @@ -117,6 +119,8 @@ blocking,https://github.com/smol-rs/blocking,Apache-2.0 OR MIT,Stjepan Glavina < bloomy,https://docs.rs/bloomy/,MIT,"Aleksandr Bezobchuk , Alexis Sellier " bollard,https://github.com/fussybeaver/bollard,Apache-2.0,Bollard contributors bollard-stubs,https://github.com/fussybeaver/bollard,Apache-2.0,Bollard contributors +bon,https://github.com/elastio/bon,MIT OR Apache-2.0,The bon Authors +bon-macros,https://github.com/elastio/bon,MIT OR Apache-2.0,The bon-macros Authors borrow-or-share,https://github.com/yescallop/borrow-or-share,MIT-0,Scallop Ye brotli,https://github.com/dropbox/rust-brotli,BSD-3-Clause AND MIT,"Daniel Reiter Horn , The Brotli Authors" brotli-decompressor,https://github.com/dropbox/rust-brotli-decompressor,BSD-3-Clause OR MIT,"Daniel Reiter Horn , The Brotli Authors" @@ -547,6 +551,7 @@ postgres-types,https://github.com/sfackler/rust-postgres,MIT OR Apache-2.0,Steve powerfmt,https://github.com/jhpratt/powerfmt,MIT OR Apache-2.0,Jacob Pratt ppv-lite86,https://github.com/cryptocorrosion/cryptocorrosion,MIT OR Apache-2.0,The CryptoCorrosion Contributors prettydiff,https://github.com/romankoblov/prettydiff,MIT,Roman Koblov +prettyplease,https://github.com/dtolnay/prettyplease,MIT OR Apache-2.0,David Tolnay prettytable-rs,https://github.com/phsym/prettytable-rs,BSD-3-Clause,Pierre-Henri Symoneaux primeorder,https://github.com/RustCrypto/elliptic-curves/tree/master/primeorder,Apache-2.0 OR MIT,RustCrypto Developers proc-macro-crate,https://github.com/bkchr/proc-macro-crate,MIT OR Apache-2.0,Bastian Köcher @@ -600,7 +605,7 @@ referencing,https://github.com/Stranger6667/jsonschema,MIT,Dmitry Dygalo " regex-automata,https://github.com/rust-lang/regex/tree/master/regex-automata,MIT OR Apache-2.0,"The Rust Project Developers, Andrew Gallant " regex-filtered,https://github.com/ua-parser/uap-rust,BSD-3-Clause,The regex-filtered Authors -regex-lite,https://github.com/rust-lang/regex/tree/master/regex-lite,MIT OR Apache-2.0,"The Rust Project Developers, Andrew Gallant " +regex-lite,https://github.com/rust-lang/regex,MIT OR Apache-2.0,"The Rust Project Developers, Andrew Gallant " regex-syntax,https://github.com/rust-lang/regex/tree/master/regex-syntax,MIT OR Apache-2.0,"The Rust Project Developers, Andrew Gallant " rend,https://github.com/djkoloski/rend,MIT,David Koloski reqwest,https://github.com/seanmonstar/reqwest,MIT OR Apache-2.0,Sean McArthur @@ -657,6 +662,7 @@ serde,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar serde-value,https://github.com/arcnmx/serde-value,MIT,arcnmx serde_bytes,https://github.com/serde-rs/bytes,MIT OR Apache-2.0,David Tolnay +serde_core,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " serde_derive,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " serde_derive_internals,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " serde_json,https://github.com/serde-rs/json,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " diff --git a/changelog.d/24119_improve_avro_encoding_error.feature.md b/changelog.d/24119_improve_avro_encoding_error.feature.md new file mode 100644 index 0000000000000..d51a9870480f9 --- /dev/null +++ b/changelog.d/24119_improve_avro_encoding_error.feature.md @@ -0,0 +1,3 @@ +Improve Avro encoding error where schema and value are included in the message + +authors: titaneric diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 8e4ce532c54b6..741c95b84a895 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -10,7 +10,7 @@ name = "generate-avro-fixtures" path = "tests/bin/generate-avro-fixtures.rs" [dependencies] -apache-avro = { version = "0.16.0", default-features = false } +apache-avro = { version = "0.20.0", default-features = false } bytes.workspace = true chrono.workspace = true csv-core = { version = "0.1.12", default-features = false } diff --git a/lib/codecs/src/decoding/format/avro.rs b/lib/codecs/src/decoding/format/avro.rs index 9a0121a09b495..dc41693ed3fab 100644 --- a/lib/codecs/src/decoding/format/avro.rs +++ b/lib/codecs/src/decoding/format/avro.rs @@ -230,6 +230,15 @@ pub fn try_from(value: AvroValue) -> vector_common::Result { AvroValue::Uuid(uuid) => Ok(VrlValue::from(uuid.as_hyphenated().to_string())), AvroValue::LocalTimestampMillis(ts_millis) => Ok(VrlValue::from(ts_millis)), AvroValue::LocalTimestampMicros(ts_micros) => Ok(VrlValue::from(ts_micros)), + AvroValue::BigDecimal(_) => Err(vector_common::Error::from( + "AvroValue::BigDecimal is not supported", + )), + AvroValue::TimestampNanos(_) => Err(vector_common::Error::from( + "AvroValue::TimestampNanos is not supported", + )), + AvroValue::LocalTimestampNanos(_) => Err(vector_common::Error::from( + "AvroValue::LocalTimestampNanos is not supported", + )), } } From 6bf28dd5dbcfbd50a7cd5564eff592df860cfc80 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Mon, 3 Nov 2025 15:33:39 -0500 Subject: [PATCH 035/227] chore(ci): add serde, tokio, tracing patterns (#24132) --- .github/dependabot.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a46fd34919f1f..11b5b6a980770 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -41,6 +41,9 @@ updates: patterns: - "futures" - "futures-util" + graphql: + patterns: + - "async-graphql*" metrics: patterns: - "metrics" @@ -49,9 +52,6 @@ updates: netlink: patterns: - "netlink-*" - graphql: - patterns: - - "async-graphql*" phf: patterns: - "phf*" @@ -59,6 +59,14 @@ updates: patterns: - "prost" - "prost-*" + serde: + patterns: + - "serde" + - "serde_*" + tokio: + patterns: + - "tokio" + - "tokio-*" tonic: patterns: - "tonic" @@ -67,6 +75,10 @@ updates: patterns: - "tower" - "tower-*" + tracing: + patterns: + - "tracing" + - "tracing-*" wasm-bindgen: patterns: - "wasm-bindgen-*" From 8bd1ad2c9abac0f92ca9f791d791b27e4992b4e8 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Mon, 3 Nov 2025 16:03:46 -0500 Subject: [PATCH 036/227] chore(ci): reorganize integration test files (#24108) * chore(ci): reorganize integration test files * revert nats data move * fix paths to data * fix more paths * amqp fix * Fix logstash int tests shared files paths * Rename scripts/integration -> tests/integration * scripts/integration -> tests/integration in source files * Fix scripts->tests * Fix gcp paths * Fix http-client pem path * move nats data --------- Co-authored-by: Thomas --- .github/workflows/changes.yml | 2 +- src/nats.rs | 2 +- .../gcp_chronicle/chronicle_unstructured.rs | 20 +++------ src/sinks/nats/integration_tests.rs | 18 ++++---- src/sources/amqp.rs | 2 +- src/sources/http_client/integration_tests.rs | 2 +- src/sources/logstash.rs | 8 +++- src/sources/nats/integration_tests.rs | 18 ++++---- {scripts => tests}/integration/.gitignore | 0 {scripts => tests}/integration/README.md | 0 .../integration/amqp/config}/compose.yaml | 2 +- .../integration/amqp/config}/test.yaml | 0 .../integration/appsignal/config}/test.yaml | 0 .../integration/aws/config}/compose.yaml | 0 .../integration/aws/config}/test.yaml | 2 +- .../integration/axiom/config}/test.yaml | 2 +- .../integration/azure/config}/compose.yaml | 0 .../integration/azure/config}/test.yaml | 2 +- .../clickhouse/config}/compose.yaml | 0 .../integration/clickhouse/config}/test.yaml | 2 +- .../integration/databend/config}/compose.yaml | 2 +- .../integration/databend/config}/test.yaml | 2 +- .../databend/data}/miniodat/databend/.keep | 0 .../datadog-agent/config}/compose.yaml | 2 +- .../datadog-agent/config}/test.yaml | 2 +- .../datadog-agent/data}/conf.yaml | 0 .../datadog-logs/config}/test.yaml | 2 +- .../datadog-metrics/config}/test.yaml | 2 +- .../datadog-traces/config}/compose.yaml | 0 .../datadog-traces/config}/test.yaml | 2 +- .../integration/dnstap/config}/compose.yaml | 2 +- .../integration/dnstap/config}/test.yaml | 2 +- .../dnstap/data}/Dockerfile | 0 .../dnstap/data}/configure_bind.sh | 0 .../dnstap/data}/db.example.com | 0 .../dnstap/data}/named.conf.local | 0 .../dnstap/data}/named.conf.options.template | 0 .../dnstap/data}/nsupdate.txt | 0 .../dnstap/data}/run_bind.sh | 0 .../integration/docker-logs/config}/test.yaml | 2 +- .../elasticsearch/config}/compose.yaml | 2 +- .../elasticsearch/config}/test.yaml | 2 +- .../eventstoredb/config}/compose.yaml | 2 +- .../eventstoredb/config}/test.yaml | 2 +- .../integration/fluent/config}/test.yaml | 2 +- .../fluent/data}/fluent-bit.conf | 0 .../fluent/data}/fluentd-gzip.conf | 0 .../fluent/data}/fluentd-plain.conf | 0 .../integration/gcp/config}/auth.json | 0 .../integration/gcp/config}/compose.yaml | 0 .../integration/gcp/config}/invalidauth.json | 0 .../integration/gcp/config}/public.pem | 0 .../integration/gcp/config}/test.yaml | 2 +- .../greptimedb/config}/compose.yaml | 0 .../integration/greptimedb/config}/test.yaml | 2 +- .../http-client/config}/compose.yaml | 10 ++--- .../integration/http-client/config}/test.yaml | 2 +- .../data}/certs/invalid-ca-cert.pem | 0 .../http-client/data}/serve/logs/bytes | 0 .../http-client/data}/serve/logs/json.json | 0 .../data}/serve/metrics/native.json | 0 .../data}/serve/traces/native.json | 0 .../integration/humio/config}/compose.yaml | 0 .../integration/humio/config}/test.yaml | 2 +- .../integration/influxdb/config}/compose.yaml | 2 +- .../integration/influxdb/config}/test.yaml | 2 +- .../integration/kafka/config}/compose.yaml | 6 +-- .../integration/kafka/config}/test.yaml | 2 +- .../integration/logstash/config}/compose.yaml | 6 +-- .../integration/logstash/config}/test.yaml | 2 +- .../logstash/data}/heartbeat.yml | 0 .../logstash/data}/logstash.conf | 0 .../integration/loki/config}/compose.yaml | 0 .../integration/loki/config}/test.yaml | 2 +- .../integration/mongodb/config}/compose.yaml | 0 .../integration/mongodb/config}/test.yaml | 2 +- .../integration/mqtt/config}/compose.yaml | 0 .../integration/mqtt/config}/test.yaml | 0 .../integration/nats/config}/compose.yaml | 10 ++--- .../integration/nats/config}/test.yaml | 2 +- .../nats => integration/nats/data}/README.md | 0 .../nats/data}/nats-bad.creds | 0 .../nats/data}/nats-client-cert.pem | 0 .../nats/data}/nats-client.key | 0 .../nats/data}/nats-client.pem | 0 .../nats/data}/nats-jetstream.conf | 0 .../nats/data}/nats-jwt.conf | 0 .../nats/data}/nats-nkey.conf | 0 .../nats/data}/nats-server.key | 0 .../nats/data}/nats-server.pem | 0 .../nats/data}/nats-tls-client-cert.conf | 0 .../nats/data}/nats-tls.conf | 0 .../nats => integration/nats/data}/nats.creds | 0 .../nats => integration/nats/data}/rootCA.pem | 0 .../integration/nginx/config}/compose.yaml | 4 +- .../integration/nginx/config}/test.yaml | 2 +- .../nginx/data}/nginx.conf | 0 .../nginx/data}/nginx_auth_basic.conf | 0 .../opentelemetry/config}/compose.yaml | 2 +- .../opentelemetry/config}/test.yaml | 2 +- .../opentelemetry/data}/config.yaml | 0 .../integration/postgres/config}/compose.yaml | 4 +- .../integration/postgres/config}/test.yaml | 2 +- .../postgres}/data/postgres-init.sh | 0 .../prometheus/config}/compose.yaml | 4 +- .../integration/prometheus/config}/test.yaml | 2 +- .../prometheus}/data/prometheus.yaml | 0 .../integration/pulsar/config}/compose.yaml | 6 +-- .../integration/pulsar/config}/test.yaml | 2 +- .../integration/redis/config}/compose.yaml | 0 .../integration/redis/config}/test.yaml | 2 +- .../shared}/data/host.docker.internal.crt | 0 .../shared}/data/host.docker.internal.key | 0 .../shared}/data/kafka_server_jaas.conf | 0 .../integration/shutdown/config}/compose.yaml | 6 +-- .../integration/shutdown/config}/test.yaml | 0 .../integration/splunk/config}/compose.yaml | 2 +- .../integration/splunk/config}/test.yaml | 2 +- .../splunk/data}/default.yml | 6 +-- .../integration/webhdfs/config}/compose.yaml | 4 +- .../integration/webhdfs/config}/test.yaml | 2 +- .../webhdfs/data}/hadoop.env | 0 vdev/src/commands/integration/mod.rs | 4 +- vdev/src/testing/config.rs | 45 +++++++++++++++---- vdev/src/testing/integration.rs | 2 +- 125 files changed, 149 insertions(+), 122 deletions(-) rename {scripts => tests}/integration/.gitignore (100%) rename {scripts => tests}/integration/README.md (100%) rename {scripts/integration/amqp => tests/integration/amqp/config}/compose.yaml (95%) rename {scripts/integration/amqp => tests/integration/amqp/config}/test.yaml (100%) rename {scripts/integration/appsignal => tests/integration/appsignal/config}/test.yaml (100%) rename {scripts/integration/aws => tests/integration/aws/config}/compose.yaml (100%) rename {scripts/integration/aws => tests/integration/aws/config}/test.yaml (96%) rename {scripts/integration/axiom => tests/integration/axiom/config}/test.yaml (92%) rename {scripts/integration/azure => tests/integration/azure/config}/compose.yaml (100%) rename {scripts/integration/azure => tests/integration/azure/config}/test.yaml (92%) rename {scripts/integration/clickhouse => tests/integration/clickhouse/config}/compose.yaml (100%) rename {scripts/integration/clickhouse => tests/integration/clickhouse/config}/test.yaml (90%) rename {scripts/integration/databend => tests/integration/databend/config}/compose.yaml (91%) rename {scripts/integration/databend => tests/integration/databend/config}/test.yaml (91%) rename tests/{data/databend => integration/databend/data}/miniodat/databend/.keep (100%) rename {scripts/integration/datadog-agent => tests/integration/datadog-agent/config}/compose.yaml (90%) rename {scripts/integration/datadog-agent => tests/integration/datadog-agent/config}/test.yaml (93%) rename tests/{data/datadog-agent => integration/datadog-agent/data}/conf.yaml (100%) rename {scripts/integration/datadog-logs => tests/integration/datadog-logs/config}/test.yaml (91%) rename {scripts/integration/datadog-metrics => tests/integration/datadog-metrics/config}/test.yaml (91%) rename {scripts/integration/datadog-traces => tests/integration/datadog-traces/config}/compose.yaml (100%) rename {scripts/integration/datadog-traces => tests/integration/datadog-traces/config}/test.yaml (93%) rename {scripts/integration/dnstap => tests/integration/dnstap/config}/compose.yaml (90%) rename {scripts/integration/dnstap => tests/integration/dnstap/config}/test.yaml (93%) rename tests/{data/dnstap => integration/dnstap/data}/Dockerfile (100%) rename tests/{data/dnstap => integration/dnstap/data}/configure_bind.sh (100%) rename tests/{data/dnstap => integration/dnstap/data}/db.example.com (100%) rename tests/{data/dnstap => integration/dnstap/data}/named.conf.local (100%) rename tests/{data/dnstap => integration/dnstap/data}/named.conf.options.template (100%) rename tests/{data/dnstap => integration/dnstap/data}/nsupdate.txt (100%) rename tests/{data/dnstap => integration/dnstap/data}/run_bind.sh (100%) rename {scripts/integration/docker-logs => tests/integration/docker-logs/config}/test.yaml (91%) rename {scripts/integration/elasticsearch => tests/integration/elasticsearch/config}/compose.yaml (94%) rename {scripts/integration/elasticsearch => tests/integration/elasticsearch/config}/test.yaml (93%) rename {scripts/integration/eventstoredb => tests/integration/eventstoredb/config}/compose.yaml (84%) rename {scripts/integration/eventstoredb => tests/integration/eventstoredb/config}/test.yaml (90%) rename {scripts/integration/fluent => tests/integration/fluent/config}/test.yaml (92%) rename tests/{data/fluent => integration/fluent/data}/fluent-bit.conf (100%) rename tests/{data/fluent => integration/fluent/data}/fluentd-gzip.conf (100%) rename tests/{data/fluent => integration/fluent/data}/fluentd-plain.conf (100%) rename {scripts/integration/gcp => tests/integration/gcp/config}/auth.json (100%) rename {scripts/integration/gcp => tests/integration/gcp/config}/compose.yaml (100%) rename {scripts/integration/gcp => tests/integration/gcp/config}/invalidauth.json (100%) rename {scripts/integration/gcp => tests/integration/gcp/config}/public.pem (100%) rename {scripts/integration/gcp => tests/integration/gcp/config}/test.yaml (94%) rename {scripts/integration/greptimedb => tests/integration/greptimedb/config}/compose.yaml (100%) rename {scripts/integration/greptimedb => tests/integration/greptimedb/config}/test.yaml (93%) rename {scripts/integration/http-client => tests/integration/http-client/config}/compose.yaml (61%) rename {scripts/integration/http-client => tests/integration/http-client/config}/test.yaml (91%) rename tests/{data/http-client => integration/http-client/data}/certs/invalid-ca-cert.pem (100%) rename tests/{data/http-client => integration/http-client/data}/serve/logs/bytes (100%) rename tests/{data/http-client => integration/http-client/data}/serve/logs/json.json (100%) rename tests/{data/http-client => integration/http-client/data}/serve/metrics/native.json (100%) rename tests/{data/http-client => integration/http-client/data}/serve/traces/native.json (100%) rename {scripts/integration/humio => tests/integration/humio/config}/compose.yaml (100%) rename {scripts/integration/humio => tests/integration/humio/config}/test.yaml (91%) rename {scripts/integration/influxdb => tests/integration/influxdb/config}/compose.yaml (94%) rename {scripts/integration/influxdb => tests/integration/influxdb/config}/test.yaml (93%) rename {scripts/integration/shutdown => tests/integration/kafka/config}/compose.yaml (80%) rename {scripts/integration/kafka => tests/integration/kafka/config}/test.yaml (92%) rename {scripts/integration/logstash => tests/integration/logstash/config}/compose.yaml (60%) rename {scripts/integration/logstash => tests/integration/logstash/config}/test.yaml (91%) rename tests/{data/logstash => integration/logstash/data}/heartbeat.yml (100%) rename tests/{data/logstash => integration/logstash/data}/logstash.conf (100%) rename {scripts/integration/loki => tests/integration/loki/config}/compose.yaml (100%) rename {scripts/integration/loki => tests/integration/loki/config}/test.yaml (91%) rename {scripts/integration/mongodb => tests/integration/mongodb/config}/compose.yaml (100%) rename {scripts/integration/mongodb => tests/integration/mongodb/config}/test.yaml (93%) rename {scripts/integration/mqtt => tests/integration/mqtt/config}/compose.yaml (100%) rename {scripts/integration/mqtt => tests/integration/mqtt/config}/test.yaml (100%) rename {scripts/integration/nats => tests/integration/nats/config}/compose.yaml (81%) rename {scripts/integration/nats => tests/integration/nats/config}/test.yaml (95%) rename tests/{data/nats => integration/nats/data}/README.md (100%) rename tests/{data/nats => integration/nats/data}/nats-bad.creds (100%) rename tests/{data/nats => integration/nats/data}/nats-client-cert.pem (100%) rename tests/{data/nats => integration/nats/data}/nats-client.key (100%) rename tests/{data/nats => integration/nats/data}/nats-client.pem (100%) rename tests/{data/nats => integration/nats/data}/nats-jetstream.conf (100%) rename tests/{data/nats => integration/nats/data}/nats-jwt.conf (100%) rename tests/{data/nats => integration/nats/data}/nats-nkey.conf (100%) rename tests/{data/nats => integration/nats/data}/nats-server.key (100%) rename tests/{data/nats => integration/nats/data}/nats-server.pem (100%) rename tests/{data/nats => integration/nats/data}/nats-tls-client-cert.conf (100%) rename tests/{data/nats => integration/nats/data}/nats-tls.conf (100%) rename tests/{data/nats => integration/nats/data}/nats.creds (100%) rename tests/{data/nats => integration/nats/data}/rootCA.pem (100%) rename {scripts/integration/nginx => tests/integration/nginx/config}/compose.yaml (81%) rename {scripts/integration/nginx => tests/integration/nginx/config}/test.yaml (93%) rename tests/{data/nginx => integration/nginx/data}/nginx.conf (100%) rename tests/{data/nginx => integration/nginx/data}/nginx_auth_basic.conf (100%) rename {scripts/integration/opentelemetry => tests/integration/opentelemetry/config}/compose.yaml (71%) rename {scripts/integration/opentelemetry => tests/integration/opentelemetry/config}/test.yaml (91%) rename tests/{data/opentelemetry => integration/opentelemetry/data}/config.yaml (100%) rename {scripts/integration/postgres => tests/integration/postgres/config}/compose.yaml (83%) rename {scripts/integration/postgres => tests/integration/postgres/config}/test.yaml (93%) rename tests/{ => integration/postgres}/data/postgres-init.sh (100%) rename {scripts/integration/prometheus => tests/integration/prometheus/config}/compose.yaml (89%) rename {scripts/integration/prometheus => tests/integration/prometheus/config}/test.yaml (92%) rename tests/{ => integration/prometheus}/data/prometheus.yaml (100%) rename {scripts/integration/pulsar => tests/integration/pulsar/config}/compose.yaml (64%) rename {scripts/integration/pulsar => tests/integration/pulsar/config}/test.yaml (91%) rename {scripts/integration/redis => tests/integration/redis/config}/compose.yaml (100%) rename {scripts/integration/redis => tests/integration/redis/config}/test.yaml (93%) rename tests/{ => integration/shared}/data/host.docker.internal.crt (100%) rename tests/{ => integration/shared}/data/host.docker.internal.key (100%) rename tests/{ => integration/shared}/data/kafka_server_jaas.conf (100%) rename {scripts/integration/kafka => tests/integration/shutdown/config}/compose.yaml (80%) rename {scripts/integration/shutdown => tests/integration/shutdown/config}/test.yaml (100%) rename {scripts/integration/splunk => tests/integration/splunk/config}/compose.yaml (83%) rename {scripts/integration/splunk => tests/integration/splunk/config}/test.yaml (93%) rename tests/{data/splunk => integration/splunk/data}/default.yml (93%) rename {scripts/integration/webhdfs => tests/integration/webhdfs/config}/compose.yaml (89%) rename {scripts/integration/webhdfs => tests/integration/webhdfs/config}/test.yaml (91%) rename tests/{data/webhdfs => integration/webhdfs/data}/hadoop.env (100%) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index c68eec6109509..01919d4cb0b75 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -324,7 +324,7 @@ jobs: # creates a yaml file that contains the filters for each integration, # extracted from the output of the `vdev int ci-paths` command, which - # sources the paths from the scripts/integration/.../test.yaml files + # sources the paths from the tests/integration/.../config/test.yaml files - name: Create filter rules for integrations run: vdev int ci-paths > int_test_filters.yaml diff --git a/src/nats.rs b/src/nats.rs index d33eadafa1646..715078f966577 100644 --- a/src/nats.rs +++ b/src/nats.rs @@ -266,7 +266,7 @@ mod tests { parse_auth( r#" strategy = "credentials_file" - credentials_file.path = "tests/data/nats/nats.creds" + credentials_file.path = "tests/integration/nats/data/nats.creds" "#, ) .unwrap(); diff --git a/src/sinks/gcp_chronicle/chronicle_unstructured.rs b/src/sinks/gcp_chronicle/chronicle_unstructured.rs index 0262648c18483..399034f33ce54 100644 --- a/src/sinks/gcp_chronicle/chronicle_unstructured.rs +++ b/src/sinks/gcp_chronicle/chronicle_unstructured.rs @@ -718,10 +718,9 @@ mod integration_tests { trace_init(); let log_type = random_string(10); - let (sink, healthcheck) = - config_build(&log_type, "/home/vector/scripts/integration/gcp/auth.json") - .await - .expect("Building sink failed"); + let (sink, healthcheck) = config_build(&log_type, "tests/integration/gcp/config/auth.json") + .await + .expect("Building sink failed"); healthcheck.await.expect("Health check failed"); @@ -749,11 +748,7 @@ mod integration_tests { let log_type = random_string(10); // Test with an auth file that doesnt match the public key sent to the dummy chronicle server. - let sink = config_build( - &log_type, - "/home/vector/scripts/integration/gcp/invalidauth.json", - ) - .await; + let sink = config_build(&log_type, "tests/integration/gcp/config/invalidauth.json").await; assert!(sink.is_err()) } @@ -765,10 +760,9 @@ mod integration_tests { // The chronicle-emulator we are testing against is setup so a `log_type` of "INVALID" // will return a `400 BAD_REQUEST`. let log_type = "INVALID"; - let (sink, healthcheck) = - config_build(log_type, "/home/vector/scripts/integration/gcp/auth.json") - .await - .expect("Building sink failed"); + let (sink, healthcheck) = config_build(log_type, "tests/integration/gcp/config/auth.json") + .await + .expect("Building sink failed"); healthcheck.await.expect("Health check failed"); diff --git a/src/sinks/nats/integration_tests.rs b/src/sinks/nats/integration_tests.rs index 434020f812ebe..9e80223ca53db 100644 --- a/src/sinks/nats/integration_tests.rs +++ b/src/sinks/nats/integration_tests.rs @@ -252,7 +252,7 @@ async fn nats_tls_valid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); @@ -293,9 +293,9 @@ async fn nats_tls_client_cert_valid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), - crt_file: Some("tests/data/nats/nats-client.pem".into()), - key_file: Some("tests/data/nats/nats-client.key".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), + crt_file: Some("tests/integration/nats/data/nats-client.pem".into()), + key_file: Some("tests/integration/nats/data/nats-client.key".into()), ..Default::default() }, }); @@ -319,7 +319,7 @@ async fn nats_tls_client_cert_invalid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); @@ -343,13 +343,13 @@ async fn nats_tls_jwt_auth_valid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); conf.auth = Some(NatsAuthConfig::CredentialsFile { credentials_file: NatsAuthCredentialsFile { - path: "tests/data/nats/nats.creds".into(), + path: "tests/integration/nats/data/nats.creds".into(), }, }); @@ -372,13 +372,13 @@ async fn nats_tls_jwt_auth_invalid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); conf.auth = Some(NatsAuthConfig::CredentialsFile { credentials_file: NatsAuthCredentialsFile { - path: "tests/data/nats/nats-bad.creds".into(), + path: "tests/integration/nats/data/nats-bad.creds".into(), }, }); diff --git a/src/sources/amqp.rs b/src/sources/amqp.rs index 407351c00ed24..b9928606a7341 100644 --- a/src/sources/amqp.rs +++ b/src/sources/amqp.rs @@ -599,7 +599,7 @@ pub mod test { } } -/// Integration tests use the docker compose files in `scripts/integration/docker-compose.amqp.yml`. +/// Integration tests use the docker compose files in `tests/integration/docker-compose.amqp.yml`. #[cfg(feature = "amqp-integration-tests")] #[cfg(test)] mod integration_test { diff --git a/src/sources/http_client/integration_tests.rs b/src/sources/http_client/integration_tests.rs index f1f85863ec1c1..f493323708163 100644 --- a/src/sources/http_client/integration_tests.rs +++ b/src/sources/http_client/integration_tests.rs @@ -244,7 +244,7 @@ async fn tls_invalid_ca() { headers: HashMap::new(), method: HttpMethod::Get, tls: Some(TlsConfig { - ca_file: Some("tests/data/http-client/certs/invalid-ca-cert.pem".into()), + ca_file: Some("tests/integration/http-client/data/certs/invalid-ca-cert.pem".into()), ..Default::default() }), auth: None, diff --git a/src/sources/logstash.rs b/src/sources/logstash.rs index c605ae18f6ce5..057822a6dfa4f 100644 --- a/src/sources/logstash.rs +++ b/src/sources/logstash.rs @@ -966,8 +966,12 @@ mod integration_tests { Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - crt_file: Some("tests/data/host.docker.internal.crt".into()), - key_file: Some("tests/data/host.docker.internal.key".into()), + crt_file: Some( + "tests/integration/shared/data/host.docker.internal.crt".into(), + ), + key_file: Some( + "tests/integration/shared/data/host.docker.internal.key".into(), + ), ..Default::default() }, }), diff --git a/src/sources/nats/integration_tests.rs b/src/sources/nats/integration_tests.rs index 8ffd6cabbc176..a3caa7e766c07 100644 --- a/src/sources/nats/integration_tests.rs +++ b/src/sources/nats/integration_tests.rs @@ -280,7 +280,7 @@ async fn nats_tls_valid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); @@ -317,9 +317,9 @@ async fn nats_tls_client_cert_valid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), - crt_file: Some("tests/data/nats/nats-client.pem".into()), - key_file: Some("tests/data/nats/nats-client.key".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), + crt_file: Some("tests/integration/nats/data/nats-client.pem".into()), + key_file: Some("tests/integration/nats/data/nats-client.key".into()), ..Default::default() }, }); @@ -341,7 +341,7 @@ async fn nats_tls_client_cert_invalid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); @@ -363,13 +363,13 @@ async fn nats_tls_jwt_auth_valid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); conf.auth = Some(NatsAuthConfig::CredentialsFile { credentials_file: NatsAuthCredentialsFile { - path: "tests/data/nats/nats.creds".into(), + path: "tests/integration/nats/data/nats.creds".into(), }, }); @@ -390,13 +390,13 @@ async fn nats_tls_jwt_auth_invalid() { conf.tls = Some(TlsEnableableConfig { enabled: Some(true), options: TlsConfig { - ca_file: Some("tests/data/nats/rootCA.pem".into()), + ca_file: Some("tests/integration/nats/data/rootCA.pem".into()), ..Default::default() }, }); conf.auth = Some(NatsAuthConfig::CredentialsFile { credentials_file: NatsAuthCredentialsFile { - path: "tests/data/nats/nats-bad.creds".into(), + path: "tests/integration/nats/data/nats-bad.creds".into(), }, }); diff --git a/scripts/integration/.gitignore b/tests/integration/.gitignore similarity index 100% rename from scripts/integration/.gitignore rename to tests/integration/.gitignore diff --git a/scripts/integration/README.md b/tests/integration/README.md similarity index 100% rename from scripts/integration/README.md rename to tests/integration/README.md diff --git a/scripts/integration/amqp/compose.yaml b/tests/integration/amqp/config/compose.yaml similarity index 95% rename from scripts/integration/amqp/compose.yaml rename to tests/integration/amqp/config/compose.yaml index c6561ae225008..3eb859f93aa60 100644 --- a/scripts/integration/amqp/compose.yaml +++ b/tests/integration/amqp/config/compose.yaml @@ -12,7 +12,7 @@ services: - RABBITMQ_SSL_CACERTFILE=/code/tests/data/ca/intermediate_server/certs/ca-chain.cert.pem - RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=false volumes: - - ../../..:/code + - ../../../..:/code networks: default: diff --git a/scripts/integration/amqp/test.yaml b/tests/integration/amqp/config/test.yaml similarity index 100% rename from scripts/integration/amqp/test.yaml rename to tests/integration/amqp/config/test.yaml diff --git a/scripts/integration/appsignal/test.yaml b/tests/integration/appsignal/config/test.yaml similarity index 100% rename from scripts/integration/appsignal/test.yaml rename to tests/integration/appsignal/config/test.yaml diff --git a/scripts/integration/aws/compose.yaml b/tests/integration/aws/config/compose.yaml similarity index 100% rename from scripts/integration/aws/compose.yaml rename to tests/integration/aws/config/compose.yaml diff --git a/scripts/integration/aws/test.yaml b/tests/integration/aws/config/test.yaml similarity index 96% rename from scripts/integration/aws/test.yaml rename to tests/integration/aws/config/test.yaml index ba1d901dfc217..956f724978fa3 100644 --- a/scripts/integration/aws/test.yaml +++ b/tests/integration/aws/config/test.yaml @@ -29,4 +29,4 @@ paths: - "src/sinks/aws_*/**" - "src/sinks/util/**" - "src/transforms/aws_*" -- "scripts/integration/aws/**" +- "tests/integration/aws/**" diff --git a/scripts/integration/axiom/test.yaml b/tests/integration/axiom/config/test.yaml similarity index 92% rename from scripts/integration/axiom/test.yaml rename to tests/integration/axiom/config/test.yaml index 1e8c3e1d8eec5..934bd739eeaca 100644 --- a/scripts/integration/axiom/test.yaml +++ b/tests/integration/axiom/config/test.yaml @@ -18,4 +18,4 @@ matrix: paths: - "src/sinks/axiom.rs" - "src/sinks/util/**" -- "scripts/integration/axiom/**" +- "tests/integration/axiom/**" diff --git a/scripts/integration/azure/compose.yaml b/tests/integration/azure/config/compose.yaml similarity index 100% rename from scripts/integration/azure/compose.yaml rename to tests/integration/azure/config/compose.yaml diff --git a/scripts/integration/azure/test.yaml b/tests/integration/azure/config/test.yaml similarity index 92% rename from scripts/integration/azure/test.yaml rename to tests/integration/azure/config/test.yaml index de3f8796763a9..191bea7256cea 100644 --- a/scripts/integration/azure/test.yaml +++ b/tests/integration/azure/config/test.yaml @@ -16,4 +16,4 @@ matrix: paths: - "src/sinks/azure_**" - "src/sinks/util/**" -- "scripts/integration/azure/**" +- "tests/integration/azure/**" diff --git a/scripts/integration/clickhouse/compose.yaml b/tests/integration/clickhouse/config/compose.yaml similarity index 100% rename from scripts/integration/clickhouse/compose.yaml rename to tests/integration/clickhouse/config/compose.yaml diff --git a/scripts/integration/clickhouse/test.yaml b/tests/integration/clickhouse/config/test.yaml similarity index 90% rename from scripts/integration/clickhouse/test.yaml rename to tests/integration/clickhouse/config/test.yaml index 7b106b914d4dd..3418d810fa638 100644 --- a/scripts/integration/clickhouse/test.yaml +++ b/tests/integration/clickhouse/config/test.yaml @@ -14,4 +14,4 @@ matrix: paths: - "src/sinks/clickhouse/**" - "src/sinks/util/**" -- "scripts/integration/clickhouse/**" +- "tests/integration/clickhouse/**" diff --git a/scripts/integration/databend/compose.yaml b/tests/integration/databend/config/compose.yaml similarity index 91% rename from scripts/integration/databend/compose.yaml rename to tests/integration/databend/config/compose.yaml index 5304afc507960..213fb20af70fe 100644 --- a/scripts/integration/databend/compose.yaml +++ b/tests/integration/databend/config/compose.yaml @@ -5,7 +5,7 @@ services: image: docker.io/minio/minio command: server /data volumes: - - ../../../tests/data/databend/miniodat/:/data + - ../data/miniodat/:/data databend: image: docker.io/datafuselabs/databend:${CONFIG_VERSION} environment: diff --git a/scripts/integration/databend/test.yaml b/tests/integration/databend/config/test.yaml similarity index 91% rename from scripts/integration/databend/test.yaml rename to tests/integration/databend/config/test.yaml index 545813974eef5..eaa9f5ad50d86 100644 --- a/scripts/integration/databend/test.yaml +++ b/tests/integration/databend/config/test.yaml @@ -15,4 +15,4 @@ matrix: paths: - "src/sinks/databend/**" - "src/sinks/util/**" -- "scripts/integration/databend/**" +- "tests/integration/databend/**" diff --git a/tests/data/databend/miniodat/databend/.keep b/tests/integration/databend/data/miniodat/databend/.keep similarity index 100% rename from tests/data/databend/miniodat/databend/.keep rename to tests/integration/databend/data/miniodat/databend/.keep diff --git a/scripts/integration/datadog-agent/compose.yaml b/tests/integration/datadog-agent/config/compose.yaml similarity index 90% rename from scripts/integration/datadog-agent/compose.yaml rename to tests/integration/datadog-agent/config/compose.yaml index 0fe91c2d44c49..4ccf95ff24e00 100644 --- a/scripts/integration/datadog-agent/compose.yaml +++ b/tests/integration/datadog-agent/config/compose.yaml @@ -16,7 +16,7 @@ services: - DD_HOSTNAME=datadog-agent - DD_SERIALIZER_COMPRESSOR_KIND=zstd volumes: - - ../../../tests/data/datadog-agent/conf.yaml:/etc/datadog-agent/conf.d/test.d/conf.yaml + - ../data/conf.yaml:/etc/datadog-agent/conf.d/test.d/conf.yaml datadog-trace-agent: image: docker.io/datadog/agent:7.31.0 environment: diff --git a/scripts/integration/datadog-agent/test.yaml b/tests/integration/datadog-agent/config/test.yaml similarity index 93% rename from scripts/integration/datadog-agent/test.yaml rename to tests/integration/datadog-agent/config/test.yaml index 6c4b399fdfb3e..c1d3ec50d7592 100644 --- a/scripts/integration/datadog-agent/test.yaml +++ b/tests/integration/datadog-agent/config/test.yaml @@ -19,4 +19,4 @@ paths: - "src/common/datadog.rs" - "src/internal_events/datadog_*" - "src/sources/datadog_agent/**" -- "scripts/integration/datadog-agent/**" +- "tests/integration/datadog-agent/**" diff --git a/tests/data/datadog-agent/conf.yaml b/tests/integration/datadog-agent/data/conf.yaml similarity index 100% rename from tests/data/datadog-agent/conf.yaml rename to tests/integration/datadog-agent/data/conf.yaml diff --git a/scripts/integration/datadog-logs/test.yaml b/tests/integration/datadog-logs/config/test.yaml similarity index 91% rename from scripts/integration/datadog-logs/test.yaml rename to tests/integration/datadog-logs/config/test.yaml index 7937db5d87158..f10db04ef5661 100644 --- a/scripts/integration/datadog-logs/test.yaml +++ b/tests/integration/datadog-logs/config/test.yaml @@ -17,4 +17,4 @@ paths: - "src/internal_events/datadog_*" - "src/sinks/datadog/logs/**" - "src/sinks/util/**" -- "scripts/integration/datadog-logs/**" +- "tests/integration/datadog-logs/**" diff --git a/scripts/integration/datadog-metrics/test.yaml b/tests/integration/datadog-metrics/config/test.yaml similarity index 91% rename from scripts/integration/datadog-metrics/test.yaml rename to tests/integration/datadog-metrics/config/test.yaml index a45e55a9797f2..55282a361eccb 100644 --- a/scripts/integration/datadog-metrics/test.yaml +++ b/tests/integration/datadog-metrics/config/test.yaml @@ -17,4 +17,4 @@ paths: - "src/internal_events/datadog_*" - "src/sinks/datadog/metrics/**" - "src/sinks/util/**" -- "scripts/integration/datadog-metrics/**" +- "tests/integration/datadog-metrics/**" diff --git a/scripts/integration/datadog-traces/compose.yaml b/tests/integration/datadog-traces/config/compose.yaml similarity index 100% rename from scripts/integration/datadog-traces/compose.yaml rename to tests/integration/datadog-traces/config/compose.yaml diff --git a/scripts/integration/datadog-traces/test.yaml b/tests/integration/datadog-traces/config/test.yaml similarity index 93% rename from scripts/integration/datadog-traces/test.yaml rename to tests/integration/datadog-traces/config/test.yaml index 5374a6716caab..2a3f162e61b30 100644 --- a/scripts/integration/datadog-traces/test.yaml +++ b/tests/integration/datadog-traces/config/test.yaml @@ -21,4 +21,4 @@ paths: - "src/internal_events/datadog_*" - "src/sinks/datadog/traces/**" - "src/sinks/util/**" -- "scripts/integration/datadog-traces/**" +- "tests/integration/datadog-traces/**" diff --git a/scripts/integration/dnstap/compose.yaml b/tests/integration/dnstap/config/compose.yaml similarity index 90% rename from scripts/integration/dnstap/compose.yaml rename to tests/integration/dnstap/config/compose.yaml index 00264121cb474..b821e292b1ff8 100644 --- a/scripts/integration/dnstap/compose.yaml +++ b/tests/integration/dnstap/config/compose.yaml @@ -3,7 +3,7 @@ version: "3" services: dnstap: build: - context: ../../../tests/data/dnstap + context: ../data container_name: vector_dnstap hostname: ns.example.com volumes: diff --git a/scripts/integration/dnstap/test.yaml b/tests/integration/dnstap/config/test.yaml similarity index 93% rename from scripts/integration/dnstap/test.yaml rename to tests/integration/dnstap/config/test.yaml index dab2f93e5e2ff..ed0de1d3ea411 100644 --- a/scripts/integration/dnstap/test.yaml +++ b/tests/integration/dnstap/config/test.yaml @@ -19,4 +19,4 @@ paths: - "src/internal_events/dnstap.rs" - "src/sources/dnstap/**" - "src/sources/util/**" -- "scripts/integration/dnstap/**" +- "tests/integration/dnstap/**" diff --git a/tests/data/dnstap/Dockerfile b/tests/integration/dnstap/data/Dockerfile similarity index 100% rename from tests/data/dnstap/Dockerfile rename to tests/integration/dnstap/data/Dockerfile diff --git a/tests/data/dnstap/configure_bind.sh b/tests/integration/dnstap/data/configure_bind.sh similarity index 100% rename from tests/data/dnstap/configure_bind.sh rename to tests/integration/dnstap/data/configure_bind.sh diff --git a/tests/data/dnstap/db.example.com b/tests/integration/dnstap/data/db.example.com similarity index 100% rename from tests/data/dnstap/db.example.com rename to tests/integration/dnstap/data/db.example.com diff --git a/tests/data/dnstap/named.conf.local b/tests/integration/dnstap/data/named.conf.local similarity index 100% rename from tests/data/dnstap/named.conf.local rename to tests/integration/dnstap/data/named.conf.local diff --git a/tests/data/dnstap/named.conf.options.template b/tests/integration/dnstap/data/named.conf.options.template similarity index 100% rename from tests/data/dnstap/named.conf.options.template rename to tests/integration/dnstap/data/named.conf.options.template diff --git a/tests/data/dnstap/nsupdate.txt b/tests/integration/dnstap/data/nsupdate.txt similarity index 100% rename from tests/data/dnstap/nsupdate.txt rename to tests/integration/dnstap/data/nsupdate.txt diff --git a/tests/data/dnstap/run_bind.sh b/tests/integration/dnstap/data/run_bind.sh similarity index 100% rename from tests/data/dnstap/run_bind.sh rename to tests/integration/dnstap/data/run_bind.sh diff --git a/scripts/integration/docker-logs/test.yaml b/tests/integration/docker-logs/config/test.yaml similarity index 91% rename from scripts/integration/docker-logs/test.yaml rename to tests/integration/docker-logs/config/test.yaml index 15a432f955c70..0ac64071bd516 100644 --- a/scripts/integration/docker-logs/test.yaml +++ b/tests/integration/docker-logs/config/test.yaml @@ -16,4 +16,4 @@ paths: - "src/internal_events/docker_logs.rs" - "src/sources/docker_logs/**" - "src/sources/util/**" -- "scripts/integration/docker-logs/**" +- "tests/integration/docker-logs/**" diff --git a/scripts/integration/elasticsearch/compose.yaml b/tests/integration/elasticsearch/config/compose.yaml similarity index 94% rename from scripts/integration/elasticsearch/compose.yaml rename to tests/integration/elasticsearch/config/compose.yaml index 6db0746fe8b10..4dcb6636bf4cb 100644 --- a/scripts/integration/elasticsearch/compose.yaml +++ b/tests/integration/elasticsearch/config/compose.yaml @@ -24,7 +24,7 @@ services: - ELASTIC_PASSWORD=vector - ES_JAVA_OPTS=-Xms400m -Xmx400m volumes: - - ../../../tests/data/ca:/usr/share/elasticsearch/config/certs:ro + - ../../../data/ca:/usr/share/elasticsearch/config/certs:ro networks: default: diff --git a/scripts/integration/elasticsearch/test.yaml b/tests/integration/elasticsearch/config/test.yaml similarity index 93% rename from scripts/integration/elasticsearch/test.yaml rename to tests/integration/elasticsearch/config/test.yaml index 6b27bca77e0dd..cdf1eedbbb4a7 100644 --- a/scripts/integration/elasticsearch/test.yaml +++ b/tests/integration/elasticsearch/config/test.yaml @@ -18,4 +18,4 @@ matrix: paths: - "src/sinks/elasticsearch/**" - "src/sinks/util/**" -- "scripts/integration/elasticsearch/**" +- "tests/integration/elasticsearch/**" diff --git a/scripts/integration/eventstoredb/compose.yaml b/tests/integration/eventstoredb/config/compose.yaml similarity index 84% rename from scripts/integration/eventstoredb/compose.yaml rename to tests/integration/eventstoredb/config/compose.yaml index eb0ae7b945650..01e6a14ea4117 100644 --- a/scripts/integration/eventstoredb/compose.yaml +++ b/tests/integration/eventstoredb/config/compose.yaml @@ -5,7 +5,7 @@ services: image: docker.io/eventstore/eventstore:${CONFIG_VERSION} command: --insecure --stats-period-sec=1 volumes: - - ../../../tests/data:/etc/vector:ro + - ../../../data:/etc/vector:ro networks: default: diff --git a/scripts/integration/eventstoredb/test.yaml b/tests/integration/eventstoredb/config/test.yaml similarity index 90% rename from scripts/integration/eventstoredb/test.yaml rename to tests/integration/eventstoredb/config/test.yaml index 9a734db4881eb..2dab755bfd032 100644 --- a/scripts/integration/eventstoredb/test.yaml +++ b/tests/integration/eventstoredb/config/test.yaml @@ -12,4 +12,4 @@ paths: - "src/internal_events/eventstoredb_metrics.rs" - "src/sources/eventstoredb_metrics/**" - "src/sources/util/**" -- "scripts/integration/eventstoredb/**" +- "tests/integration/eventstoredb/**" diff --git a/scripts/integration/fluent/test.yaml b/tests/integration/fluent/config/test.yaml similarity index 92% rename from scripts/integration/fluent/test.yaml rename to tests/integration/fluent/config/test.yaml index 6593f8379b216..f5f977aa1f637 100644 --- a/scripts/integration/fluent/test.yaml +++ b/tests/integration/fluent/config/test.yaml @@ -17,4 +17,4 @@ paths: - "src/internal_events/fluent.rs" - "src/sources/fluent/**" - "src/sources/util/**" -- "scripts/integration/fluent/**" +- "tests/integration/fluent/**" diff --git a/tests/data/fluent/fluent-bit.conf b/tests/integration/fluent/data/fluent-bit.conf similarity index 100% rename from tests/data/fluent/fluent-bit.conf rename to tests/integration/fluent/data/fluent-bit.conf diff --git a/tests/data/fluent/fluentd-gzip.conf b/tests/integration/fluent/data/fluentd-gzip.conf similarity index 100% rename from tests/data/fluent/fluentd-gzip.conf rename to tests/integration/fluent/data/fluentd-gzip.conf diff --git a/tests/data/fluent/fluentd-plain.conf b/tests/integration/fluent/data/fluentd-plain.conf similarity index 100% rename from tests/data/fluent/fluentd-plain.conf rename to tests/integration/fluent/data/fluentd-plain.conf diff --git a/scripts/integration/gcp/auth.json b/tests/integration/gcp/config/auth.json similarity index 100% rename from scripts/integration/gcp/auth.json rename to tests/integration/gcp/config/auth.json diff --git a/scripts/integration/gcp/compose.yaml b/tests/integration/gcp/config/compose.yaml similarity index 100% rename from scripts/integration/gcp/compose.yaml rename to tests/integration/gcp/config/compose.yaml diff --git a/scripts/integration/gcp/invalidauth.json b/tests/integration/gcp/config/invalidauth.json similarity index 100% rename from scripts/integration/gcp/invalidauth.json rename to tests/integration/gcp/config/invalidauth.json diff --git a/scripts/integration/gcp/public.pem b/tests/integration/gcp/config/public.pem similarity index 100% rename from scripts/integration/gcp/public.pem rename to tests/integration/gcp/config/public.pem diff --git a/scripts/integration/gcp/test.yaml b/tests/integration/gcp/config/test.yaml similarity index 94% rename from scripts/integration/gcp/test.yaml rename to tests/integration/gcp/config/test.yaml index 772a416d2162a..be5d1fcd2cc82 100644 --- a/scripts/integration/gcp/test.yaml +++ b/tests/integration/gcp/config/test.yaml @@ -20,4 +20,4 @@ paths: - "src/sinks/gcp/**" - "src/sinks/util/**" - "src/gcp.rs" -- "scripts/integration/gcp/**" +- "tests/integration/gcp/**" diff --git a/scripts/integration/greptimedb/compose.yaml b/tests/integration/greptimedb/config/compose.yaml similarity index 100% rename from scripts/integration/greptimedb/compose.yaml rename to tests/integration/greptimedb/config/compose.yaml diff --git a/scripts/integration/greptimedb/test.yaml b/tests/integration/greptimedb/config/test.yaml similarity index 93% rename from scripts/integration/greptimedb/test.yaml rename to tests/integration/greptimedb/config/test.yaml index eb1754364172a..17593b891df8c 100644 --- a/scripts/integration/greptimedb/test.yaml +++ b/tests/integration/greptimedb/config/test.yaml @@ -18,4 +18,4 @@ matrix: # expressions are evaluated using https://github.com/micromatch/picomatch paths: - "src/sinks/greptimedb/**" -- "scripts/integration/greptimedb/**" +- "tests/integration/greptimedb/**" diff --git a/scripts/integration/http-client/compose.yaml b/tests/integration/http-client/config/compose.yaml similarity index 61% rename from scripts/integration/http-client/compose.yaml rename to tests/integration/http-client/config/compose.yaml index 11ad2bc76591a..7ad5dfc47386b 100644 --- a/scripts/integration/http-client/compose.yaml +++ b/tests/integration/http-client/config/compose.yaml @@ -6,7 +6,7 @@ services: command: - /data volumes: - - ../../../tests/data/http-client/serve:/data + - ../data/serve:/data dufs-auth: image: docker.io/sigoden/dufs:${CONFIG_VERSION} command: @@ -16,7 +16,7 @@ services: - basic - /data volumes: - - ../../../tests/data/http-client/serve:/data + - ../data/serve:/data dufs-https: image: docker.io/sigoden/dufs:${CONFIG_VERSION} command: @@ -26,9 +26,9 @@ services: - /certs/ca.key.pem - /data volumes: - - ../../../tests/data/http-client/serve:/data - - ../../../tests/data/ca/intermediate_server/certs/dufs-https-chain.cert.pem:/certs/ca.cert.pem - - ../../../tests/data/ca/intermediate_server/private/dufs-https.key.pem:/certs/ca.key.pem + - ../data/serve:/data + - ../../../data/ca/intermediate_server/certs/dufs-https-chain.cert.pem:/certs/ca.cert.pem + - ../../../data/ca/intermediate_server/private/dufs-https.key.pem:/certs/ca.key.pem networks: default: diff --git a/scripts/integration/http-client/test.yaml b/tests/integration/http-client/config/test.yaml similarity index 91% rename from scripts/integration/http-client/test.yaml rename to tests/integration/http-client/config/test.yaml index 0ae2b49bf2c76..a56570dc5cf81 100644 --- a/scripts/integration/http-client/test.yaml +++ b/tests/integration/http-client/config/test.yaml @@ -16,4 +16,4 @@ matrix: paths: - "src/sources/http_client/**" - "src/sources/util/**" -- "scripts/integration/http-client/**" +- "tests/integration/http-client/**" diff --git a/tests/data/http-client/certs/invalid-ca-cert.pem b/tests/integration/http-client/data/certs/invalid-ca-cert.pem similarity index 100% rename from tests/data/http-client/certs/invalid-ca-cert.pem rename to tests/integration/http-client/data/certs/invalid-ca-cert.pem diff --git a/tests/data/http-client/serve/logs/bytes b/tests/integration/http-client/data/serve/logs/bytes similarity index 100% rename from tests/data/http-client/serve/logs/bytes rename to tests/integration/http-client/data/serve/logs/bytes diff --git a/tests/data/http-client/serve/logs/json.json b/tests/integration/http-client/data/serve/logs/json.json similarity index 100% rename from tests/data/http-client/serve/logs/json.json rename to tests/integration/http-client/data/serve/logs/json.json diff --git a/tests/data/http-client/serve/metrics/native.json b/tests/integration/http-client/data/serve/metrics/native.json similarity index 100% rename from tests/data/http-client/serve/metrics/native.json rename to tests/integration/http-client/data/serve/metrics/native.json diff --git a/tests/data/http-client/serve/traces/native.json b/tests/integration/http-client/data/serve/traces/native.json similarity index 100% rename from tests/data/http-client/serve/traces/native.json rename to tests/integration/http-client/data/serve/traces/native.json diff --git a/scripts/integration/humio/compose.yaml b/tests/integration/humio/config/compose.yaml similarity index 100% rename from scripts/integration/humio/compose.yaml rename to tests/integration/humio/config/compose.yaml diff --git a/scripts/integration/humio/test.yaml b/tests/integration/humio/config/test.yaml similarity index 91% rename from scripts/integration/humio/test.yaml rename to tests/integration/humio/config/test.yaml index ad242e622f5b2..a8cdebeba3b19 100644 --- a/scripts/integration/humio/test.yaml +++ b/tests/integration/humio/config/test.yaml @@ -15,4 +15,4 @@ matrix: paths: - "src/sinks/humio/**" - "src/sinks/util/**" -- "scripts/integration/humio/**" +- "tests/integration/humio/**" diff --git a/scripts/integration/influxdb/compose.yaml b/tests/integration/influxdb/config/compose.yaml similarity index 94% rename from scripts/integration/influxdb/compose.yaml rename to tests/integration/influxdb/config/compose.yaml index 7499564740491..b5c31c006ab06 100644 --- a/scripts/integration/influxdb/compose.yaml +++ b/tests/integration/influxdb/config/compose.yaml @@ -13,7 +13,7 @@ services: - INFLUXDB_HTTP_HTTPS_CERTIFICATE=/etc/ssl/intermediate_server/certs/influxdb-v1-tls-chain.cert.pem - INFLUXDB_HTTP_HTTPS_PRIVATE_KEY=/etc/ssl/intermediate_server/private/influxdb-v1-tls.key.pem volumes: - - ../../../tests/data/ca:/etc/ssl:ro + - ../../../data/ca:/etc/ssl:ro influxdb-v2: image: docker.io/influxdb:2.0 command: influxd --reporting-disabled diff --git a/scripts/integration/influxdb/test.yaml b/tests/integration/influxdb/config/test.yaml similarity index 93% rename from scripts/integration/influxdb/test.yaml rename to tests/integration/influxdb/config/test.yaml index 3dad78af2e2e8..35bf2f72691bf 100644 --- a/scripts/integration/influxdb/test.yaml +++ b/tests/integration/influxdb/config/test.yaml @@ -17,4 +17,4 @@ paths: - "src/internal_events/influxdb.rs" - "src/sinks/influxdb/**" - "src/sinks/util/**" -- "scripts/integration/influxdb/**" +- "tests/integration/influxdb/**" diff --git a/scripts/integration/shutdown/compose.yaml b/tests/integration/kafka/config/compose.yaml similarity index 80% rename from scripts/integration/shutdown/compose.yaml rename to tests/integration/kafka/config/compose.yaml index 0437b8b866f7a..81a35dd64ec94 100644 --- a/scripts/integration/shutdown/compose.yaml +++ b/tests/integration/kafka/config/compose.yaml @@ -31,9 +31,9 @@ services: - 9092:9092 - 9093:9093 volumes: - - ../../../tests/data/ca/intermediate_server/private/kafka.pass:/etc/kafka/secrets/kafka.pass:ro - - ../../../tests/data/ca/intermediate_server/private/kafka.p12:/etc/kafka/secrets/kafka.p12:ro - - ../../../tests/data/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf + - ../../../data/ca/intermediate_server/private/kafka.pass:/etc/kafka/secrets/kafka.pass:ro + - ../../../data/ca/intermediate_server/private/kafka.p12:/etc/kafka/secrets/kafka.p12:ro + - ../../shared/data/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf networks: default: diff --git a/scripts/integration/kafka/test.yaml b/tests/integration/kafka/config/test.yaml similarity index 92% rename from scripts/integration/kafka/test.yaml rename to tests/integration/kafka/config/test.yaml index a52131e6e5a1b..9619b4d7ea0f1 100644 --- a/scripts/integration/kafka/test.yaml +++ b/tests/integration/kafka/config/test.yaml @@ -18,4 +18,4 @@ paths: - "src/sources/kafka.rs" - "src/sources/util/**" - "src/kafka.rs" -- "scripts/integration/kafka/**" +- "tests/integration/kafka/**" diff --git a/scripts/integration/logstash/compose.yaml b/tests/integration/logstash/config/compose.yaml similarity index 60% rename from scripts/integration/logstash/compose.yaml rename to tests/integration/logstash/config/compose.yaml index 1e7a17918435b..af024d0855453 100644 --- a/scripts/integration/logstash/compose.yaml +++ b/tests/integration/logstash/config/compose.yaml @@ -5,13 +5,13 @@ services: image: docker.elastic.co/beats/heartbeat:${CONFIG_VERSION} command: -environment=container -strict.perms=false volumes: - - ../../../tests/data/logstash/heartbeat.yml:/usr/share/heartbeat/heartbeat.yml:ro + - ../data/heartbeat.yml:/usr/share/heartbeat/heartbeat.yml:ro logstash: image: docker.elastic.co/logstash/logstash:7.13.1 volumes: - /dev/null:/usr/share/logstash/pipeline/logstash.yml - - ../../../tests/data/host.docker.internal.crt:/tmp/logstash.crt - - ../../../tests/data/logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf + - ../../shared/data/host.docker.internal.crt:/tmp/logstash.crt + - ../data/logstash.conf:/usr/share/logstash/pipeline/logstash.conf networks: default: diff --git a/scripts/integration/logstash/test.yaml b/tests/integration/logstash/config/test.yaml similarity index 91% rename from scripts/integration/logstash/test.yaml rename to tests/integration/logstash/config/test.yaml index a73a040b6a996..e0f26afbec7f7 100644 --- a/scripts/integration/logstash/test.yaml +++ b/tests/integration/logstash/config/test.yaml @@ -15,4 +15,4 @@ matrix: paths: - "src/sources/logstash.rs" - "src/sources/util/**" -- "scripts/integration/logstash/**" +- "tests/integration/logstash/**" diff --git a/tests/data/logstash/heartbeat.yml b/tests/integration/logstash/data/heartbeat.yml similarity index 100% rename from tests/data/logstash/heartbeat.yml rename to tests/integration/logstash/data/heartbeat.yml diff --git a/tests/data/logstash/logstash.conf b/tests/integration/logstash/data/logstash.conf similarity index 100% rename from tests/data/logstash/logstash.conf rename to tests/integration/logstash/data/logstash.conf diff --git a/scripts/integration/loki/compose.yaml b/tests/integration/loki/config/compose.yaml similarity index 100% rename from scripts/integration/loki/compose.yaml rename to tests/integration/loki/config/compose.yaml diff --git a/scripts/integration/loki/test.yaml b/tests/integration/loki/config/test.yaml similarity index 91% rename from scripts/integration/loki/test.yaml rename to tests/integration/loki/config/test.yaml index 60f762dd569b7..9a86d9ce6c29d 100644 --- a/scripts/integration/loki/test.yaml +++ b/tests/integration/loki/config/test.yaml @@ -15,4 +15,4 @@ paths: - "src/internal_events/loki.rs" - "src/sinks/loki/**" - "src/sinks/util/**" -- "scripts/integration/loki/**" +- "tests/integration/loki/**" diff --git a/scripts/integration/mongodb/compose.yaml b/tests/integration/mongodb/config/compose.yaml similarity index 100% rename from scripts/integration/mongodb/compose.yaml rename to tests/integration/mongodb/config/compose.yaml diff --git a/scripts/integration/mongodb/test.yaml b/tests/integration/mongodb/config/test.yaml similarity index 93% rename from scripts/integration/mongodb/test.yaml rename to tests/integration/mongodb/config/test.yaml index 76ddefab16f46..10da02aca38dc 100644 --- a/scripts/integration/mongodb/test.yaml +++ b/tests/integration/mongodb/config/test.yaml @@ -16,4 +16,4 @@ paths: - "src/internal_events/mongodb_metrics.rs" - "src/sources/mongodb_metrics/**" - "src/sources/util/**" -- "scripts/integration/mongodb/**" +- "tests/integration/mongodb/**" diff --git a/scripts/integration/mqtt/compose.yaml b/tests/integration/mqtt/config/compose.yaml similarity index 100% rename from scripts/integration/mqtt/compose.yaml rename to tests/integration/mqtt/config/compose.yaml diff --git a/scripts/integration/mqtt/test.yaml b/tests/integration/mqtt/config/test.yaml similarity index 100% rename from scripts/integration/mqtt/test.yaml rename to tests/integration/mqtt/config/test.yaml diff --git a/scripts/integration/nats/compose.yaml b/tests/integration/nats/config/compose.yaml similarity index 81% rename from scripts/integration/nats/compose.yaml rename to tests/integration/nats/config/compose.yaml index 24a0495772ba6..f77020b57afee 100644 --- a/scripts/integration/nats/compose.yaml +++ b/tests/integration/nats/config/compose.yaml @@ -21,35 +21,35 @@ services: - --config - /usr/share/nats/config/nats-nkey.conf volumes: - - ../../../tests/data/nats:/usr/share/nats/config + - ../data:/usr/share/nats/config nats-tls: image: docker.io/library/nats:${CONFIG_VERSION} command: - --config - /usr/share/nats/config/nats-tls.conf volumes: - - ../../../tests/data/nats:/usr/share/nats/config + - ../data:/usr/share/nats/config nats-tls-client-cert: image: docker.io/library/nats:${CONFIG_VERSION} command: - --config - /usr/share/nats/config/nats-tls-client-cert.conf volumes: - - ../../../tests/data/nats:/usr/share/nats/config + - ../data:/usr/share/nats/config nats-jwt: image: docker.io/library/nats:${CONFIG_VERSION} command: - --config - /usr/share/nats/config/nats-jwt.conf volumes: - - ../../../tests/data/nats:/usr/share/nats/config + - ../data:/usr/share/nats/config nats-jetstream-test: image: docker.io/library/nats:${CONFIG_VERSION} command: - --config - /usr/share/nats/config/nats-jetstream.conf volumes: - - ../../../tests/data/nats:/usr/share/nats/config + - ../data:/usr/share/nats/config networks: default: diff --git a/scripts/integration/nats/test.yaml b/tests/integration/nats/config/test.yaml similarity index 95% rename from scripts/integration/nats/test.yaml rename to tests/integration/nats/config/test.yaml index 171a1c4ae6a3e..2f67676fa5cd2 100644 --- a/scripts/integration/nats/test.yaml +++ b/tests/integration/nats/config/test.yaml @@ -25,4 +25,4 @@ paths: - "src/sinks/nats/**" - "src/sinks/util/**" - "src/nats.rs" - - "scripts/integration/nats/**" + - "tests/integration/nats/**" diff --git a/tests/data/nats/README.md b/tests/integration/nats/data/README.md similarity index 100% rename from tests/data/nats/README.md rename to tests/integration/nats/data/README.md diff --git a/tests/data/nats/nats-bad.creds b/tests/integration/nats/data/nats-bad.creds similarity index 100% rename from tests/data/nats/nats-bad.creds rename to tests/integration/nats/data/nats-bad.creds diff --git a/tests/data/nats/nats-client-cert.pem b/tests/integration/nats/data/nats-client-cert.pem similarity index 100% rename from tests/data/nats/nats-client-cert.pem rename to tests/integration/nats/data/nats-client-cert.pem diff --git a/tests/data/nats/nats-client.key b/tests/integration/nats/data/nats-client.key similarity index 100% rename from tests/data/nats/nats-client.key rename to tests/integration/nats/data/nats-client.key diff --git a/tests/data/nats/nats-client.pem b/tests/integration/nats/data/nats-client.pem similarity index 100% rename from tests/data/nats/nats-client.pem rename to tests/integration/nats/data/nats-client.pem diff --git a/tests/data/nats/nats-jetstream.conf b/tests/integration/nats/data/nats-jetstream.conf similarity index 100% rename from tests/data/nats/nats-jetstream.conf rename to tests/integration/nats/data/nats-jetstream.conf diff --git a/tests/data/nats/nats-jwt.conf b/tests/integration/nats/data/nats-jwt.conf similarity index 100% rename from tests/data/nats/nats-jwt.conf rename to tests/integration/nats/data/nats-jwt.conf diff --git a/tests/data/nats/nats-nkey.conf b/tests/integration/nats/data/nats-nkey.conf similarity index 100% rename from tests/data/nats/nats-nkey.conf rename to tests/integration/nats/data/nats-nkey.conf diff --git a/tests/data/nats/nats-server.key b/tests/integration/nats/data/nats-server.key similarity index 100% rename from tests/data/nats/nats-server.key rename to tests/integration/nats/data/nats-server.key diff --git a/tests/data/nats/nats-server.pem b/tests/integration/nats/data/nats-server.pem similarity index 100% rename from tests/data/nats/nats-server.pem rename to tests/integration/nats/data/nats-server.pem diff --git a/tests/data/nats/nats-tls-client-cert.conf b/tests/integration/nats/data/nats-tls-client-cert.conf similarity index 100% rename from tests/data/nats/nats-tls-client-cert.conf rename to tests/integration/nats/data/nats-tls-client-cert.conf diff --git a/tests/data/nats/nats-tls.conf b/tests/integration/nats/data/nats-tls.conf similarity index 100% rename from tests/data/nats/nats-tls.conf rename to tests/integration/nats/data/nats-tls.conf diff --git a/tests/data/nats/nats.creds b/tests/integration/nats/data/nats.creds similarity index 100% rename from tests/data/nats/nats.creds rename to tests/integration/nats/data/nats.creds diff --git a/tests/data/nats/rootCA.pem b/tests/integration/nats/data/rootCA.pem similarity index 100% rename from tests/data/nats/rootCA.pem rename to tests/integration/nats/data/rootCA.pem diff --git a/scripts/integration/nginx/compose.yaml b/tests/integration/nginx/config/compose.yaml similarity index 81% rename from scripts/integration/nginx/compose.yaml rename to tests/integration/nginx/config/compose.yaml index 78efa46a1c578..3caf63a002f11 100644 --- a/scripts/integration/nginx/compose.yaml +++ b/tests/integration/nginx/config/compose.yaml @@ -11,13 +11,13 @@ services: nginx: image: docker.io/nginx:${CONFIG_VERSION} volumes: - - ../../../tests/data/nginx/:/etc/nginx:ro + - ../data/:/etc/nginx:ro networks: - default nginx-proxy: image: docker.io/nginx:${CONFIG_VERSION} volumes: - - ../../../tests/data/nginx/:/etc/nginx:ro + - ../data/:/etc/nginx:ro networks: - proxy diff --git a/scripts/integration/nginx/test.yaml b/tests/integration/nginx/config/test.yaml similarity index 93% rename from scripts/integration/nginx/test.yaml rename to tests/integration/nginx/config/test.yaml index 934873608d5ef..8d9014b657787 100644 --- a/scripts/integration/nginx/test.yaml +++ b/tests/integration/nginx/config/test.yaml @@ -18,4 +18,4 @@ paths: - "src/internal_events/nginx_metrics.rs" - "src/sources/nginx_metrics/**" - "src/sources/util/**" -- "scripts/integration/nginx/**" +- "tests/integration/nginx/**" diff --git a/tests/data/nginx/nginx.conf b/tests/integration/nginx/data/nginx.conf similarity index 100% rename from tests/data/nginx/nginx.conf rename to tests/integration/nginx/data/nginx.conf diff --git a/tests/data/nginx/nginx_auth_basic.conf b/tests/integration/nginx/data/nginx_auth_basic.conf similarity index 100% rename from tests/data/nginx/nginx_auth_basic.conf rename to tests/integration/nginx/data/nginx_auth_basic.conf diff --git a/scripts/integration/opentelemetry/compose.yaml b/tests/integration/opentelemetry/config/compose.yaml similarity index 71% rename from scripts/integration/opentelemetry/compose.yaml rename to tests/integration/opentelemetry/config/compose.yaml index b85cc417cedaf..d8f0c95d20fbc 100644 --- a/scripts/integration/opentelemetry/compose.yaml +++ b/tests/integration/opentelemetry/config/compose.yaml @@ -4,7 +4,7 @@ services: opentelemetry-collector: image: docker.io/otel/opentelemetry-collector-contrib:${CONFIG_VERSION} volumes: - - ../../../tests/data/opentelemetry/config.yaml:/etc/otelcol-contrib/config.yaml + - ../data/config.yaml:/etc/otelcol-contrib/config.yaml networks: default: diff --git a/scripts/integration/opentelemetry/test.yaml b/tests/integration/opentelemetry/config/test.yaml similarity index 91% rename from scripts/integration/opentelemetry/test.yaml rename to tests/integration/opentelemetry/config/test.yaml index e586c444affac..91a2968fdfae0 100644 --- a/scripts/integration/opentelemetry/test.yaml +++ b/tests/integration/opentelemetry/config/test.yaml @@ -16,4 +16,4 @@ matrix: paths: - "src/sources/opentelemetry/**" - "src/sources/util/**" -- "scripts/integration/opentelemetry/**" +- "tests/integration/opentelemetry/**" diff --git a/tests/data/opentelemetry/config.yaml b/tests/integration/opentelemetry/data/config.yaml similarity index 100% rename from tests/data/opentelemetry/config.yaml rename to tests/integration/opentelemetry/data/config.yaml diff --git a/scripts/integration/postgres/compose.yaml b/tests/integration/postgres/config/compose.yaml similarity index 83% rename from scripts/integration/postgres/compose.yaml rename to tests/integration/postgres/config/compose.yaml index 8c3ccb12a7913..017e497c81811 100644 --- a/scripts/integration/postgres/compose.yaml +++ b/tests/integration/postgres/config/compose.yaml @@ -9,8 +9,8 @@ services: - POSTGRES_PASSWORD=vector volumes: - socket:/var/run/postgresql - - ../../../tests/data/postgres-init.sh:/postgres-init.sh:ro - - ../../../tests/data/ca:/certs:ro + - ../data/postgres-init.sh:/postgres-init.sh:ro + - ../../../data/ca:/certs:ro volumes: # Use external volume 'postgres_socket' that's shared with the test runner diff --git a/scripts/integration/postgres/test.yaml b/tests/integration/postgres/config/test.yaml similarity index 93% rename from scripts/integration/postgres/test.yaml rename to tests/integration/postgres/config/test.yaml index a89049f545948..85c012a11fa5f 100644 --- a/scripts/integration/postgres/test.yaml +++ b/tests/integration/postgres/config/test.yaml @@ -22,4 +22,4 @@ paths: - "src/sinks/postgres/**" - "src/sources/postgresql_metrics.rs" - "src/sources/util/**" -- "scripts/integration/postgres/**" +- "tests/integration/postgres/**" diff --git a/tests/data/postgres-init.sh b/tests/integration/postgres/data/postgres-init.sh similarity index 100% rename from tests/data/postgres-init.sh rename to tests/integration/postgres/data/postgres-init.sh diff --git a/scripts/integration/prometheus/compose.yaml b/tests/integration/prometheus/config/compose.yaml similarity index 89% rename from scripts/integration/prometheus/compose.yaml rename to tests/integration/prometheus/config/compose.yaml index 9f618f8f521f0..a945f730ce834 100644 --- a/scripts/integration/prometheus/compose.yaml +++ b/tests/integration/prometheus/config/compose.yaml @@ -15,12 +15,12 @@ services: - INFLUXDB_HTTP_HTTPS_CERTIFICATE=/etc/ssl/intermediate_server/certs/influxdb-v1-tls-chain.cert.pem - INFLUXDB_HTTP_HTTPS_PRIVATE_KEY=/etc/ssl/intermediate_server/private/influxdb-v1-tls.key.pem volumes: - - ../../../tests/data/ca:/etc/ssl:ro + - ../../../data/ca:/etc/ssl:ro prometheus: image: docker.io/prom/prometheus:${CONFIG_PROMETHEUS} command: --config.file=/etc/vector/prometheus.yaml volumes: - - ../../../tests/data:/etc/vector:ro + - ../data/prometheus.yaml:/etc/vector/prometheus.yaml:ro networks: default: diff --git a/scripts/integration/prometheus/test.yaml b/tests/integration/prometheus/config/test.yaml similarity index 92% rename from scripts/integration/prometheus/test.yaml rename to tests/integration/prometheus/config/test.yaml index d2db2d9282b6b..455338053e2ff 100644 --- a/scripts/integration/prometheus/test.yaml +++ b/tests/integration/prometheus/config/test.yaml @@ -18,4 +18,4 @@ paths: - "src/sources/util/**" - "src/sinks/prometheus/**" - "src/sinks/util/**" -- "scripts/integration/prometheus/**" +- "tests/integration/prometheus/**" diff --git a/tests/data/prometheus.yaml b/tests/integration/prometheus/data/prometheus.yaml similarity index 100% rename from tests/data/prometheus.yaml rename to tests/integration/prometheus/data/prometheus.yaml diff --git a/scripts/integration/pulsar/compose.yaml b/tests/integration/pulsar/config/compose.yaml similarity index 64% rename from scripts/integration/pulsar/compose.yaml rename to tests/integration/pulsar/config/compose.yaml index 1e4742c8fc3fe..cca9c9de8d713 100644 --- a/scripts/integration/pulsar/compose.yaml +++ b/tests/integration/pulsar/config/compose.yaml @@ -13,9 +13,9 @@ services: - PULSAR_PREFIX_tlsCertificateFilePath=/etc/pulsar/certs/pulsar.cert.pem - PULSAR_PREFIX_tlsTrustCertsFilePath=/etc/pulsar/certs/ca-chain.cert.pem volumes: - - ../../../tests/data/ca/intermediate_server/private/pulsar.key.pem:/etc/pulsar/certs/pulsar.key.pem:ro - - ../../../tests/data//ca/intermediate_server/certs/pulsar.cert.pem:/etc/pulsar/certs/pulsar.cert.pem:ro - - ../../../tests/data/ca/intermediate_server/certs/ca-chain.cert.pem:/etc/pulsar/certs/ca-chain.cert.pem:ro + - ../../../data/ca/intermediate_server/private/pulsar.key.pem:/etc/pulsar/certs/pulsar.key.pem:ro + - ../../../data/ca/intermediate_server/certs/pulsar.cert.pem:/etc/pulsar/certs/pulsar.cert.pem:ro + - ../../../data/ca/intermediate_server/certs/ca-chain.cert.pem:/etc/pulsar/certs/ca-chain.cert.pem:ro networks: default: diff --git a/scripts/integration/pulsar/test.yaml b/tests/integration/pulsar/config/test.yaml similarity index 91% rename from scripts/integration/pulsar/test.yaml rename to tests/integration/pulsar/config/test.yaml index b629a582d0a62..9d7f33f60dac5 100644 --- a/scripts/integration/pulsar/test.yaml +++ b/tests/integration/pulsar/config/test.yaml @@ -15,4 +15,4 @@ paths: - "src/internal_events/pulsar.rs" - "src/sinks/pulsar/**" - "src/sinks/util/**" -- "scripts/integration/pulsar/**" +- "tests/integration/pulsar/**" diff --git a/scripts/integration/redis/compose.yaml b/tests/integration/redis/config/compose.yaml similarity index 100% rename from scripts/integration/redis/compose.yaml rename to tests/integration/redis/config/compose.yaml diff --git a/scripts/integration/redis/test.yaml b/tests/integration/redis/config/test.yaml similarity index 93% rename from scripts/integration/redis/test.yaml rename to tests/integration/redis/config/test.yaml index 6b2d9bfa3745a..fdfc54e0446ec 100644 --- a/scripts/integration/redis/test.yaml +++ b/tests/integration/redis/config/test.yaml @@ -18,4 +18,4 @@ paths: - "src/sources/util/**" - "src/sinks/redis.rs" - "src/sinks/util/**" -- "scripts/integration/redis/**" +- "tests/integration/redis/**" diff --git a/tests/data/host.docker.internal.crt b/tests/integration/shared/data/host.docker.internal.crt similarity index 100% rename from tests/data/host.docker.internal.crt rename to tests/integration/shared/data/host.docker.internal.crt diff --git a/tests/data/host.docker.internal.key b/tests/integration/shared/data/host.docker.internal.key similarity index 100% rename from tests/data/host.docker.internal.key rename to tests/integration/shared/data/host.docker.internal.key diff --git a/tests/data/kafka_server_jaas.conf b/tests/integration/shared/data/kafka_server_jaas.conf similarity index 100% rename from tests/data/kafka_server_jaas.conf rename to tests/integration/shared/data/kafka_server_jaas.conf diff --git a/scripts/integration/kafka/compose.yaml b/tests/integration/shutdown/config/compose.yaml similarity index 80% rename from scripts/integration/kafka/compose.yaml rename to tests/integration/shutdown/config/compose.yaml index 0437b8b866f7a..81a35dd64ec94 100644 --- a/scripts/integration/kafka/compose.yaml +++ b/tests/integration/shutdown/config/compose.yaml @@ -31,9 +31,9 @@ services: - 9092:9092 - 9093:9093 volumes: - - ../../../tests/data/ca/intermediate_server/private/kafka.pass:/etc/kafka/secrets/kafka.pass:ro - - ../../../tests/data/ca/intermediate_server/private/kafka.p12:/etc/kafka/secrets/kafka.p12:ro - - ../../../tests/data/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf + - ../../../data/ca/intermediate_server/private/kafka.pass:/etc/kafka/secrets/kafka.pass:ro + - ../../../data/ca/intermediate_server/private/kafka.p12:/etc/kafka/secrets/kafka.p12:ro + - ../../shared/data/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf networks: default: diff --git a/scripts/integration/shutdown/test.yaml b/tests/integration/shutdown/config/test.yaml similarity index 100% rename from scripts/integration/shutdown/test.yaml rename to tests/integration/shutdown/config/test.yaml diff --git a/scripts/integration/splunk/compose.yaml b/tests/integration/splunk/config/compose.yaml similarity index 83% rename from scripts/integration/splunk/compose.yaml rename to tests/integration/splunk/config/compose.yaml index cfae5c07b545d..a252674d2bffa 100644 --- a/scripts/integration/splunk/compose.yaml +++ b/tests/integration/splunk/config/compose.yaml @@ -8,7 +8,7 @@ services: - SPLUNK_PASSWORD=password - SPLUNK_HEC_TOKEN=abcd1234 volumes: - - ../../../tests/data/splunk/default.yml:/tmp/defaults/default.yml + - ../data/splunk/default.yml:/tmp/defaults/default.yml ports: - 8000:8000 - 8088:8088 diff --git a/scripts/integration/splunk/test.yaml b/tests/integration/splunk/config/test.yaml similarity index 93% rename from scripts/integration/splunk/test.yaml rename to tests/integration/splunk/config/test.yaml index 85de787cbe1e6..71a5c7994fe4a 100644 --- a/scripts/integration/splunk/test.yaml +++ b/tests/integration/splunk/config/test.yaml @@ -18,4 +18,4 @@ paths: - "src/sources/util/**" - "src/sinks/splunk_hec/**" - "src/sinks/util/**" -- "scripts/integration/splunk/**" +- "tests/integration/splunk/**" diff --git a/tests/data/splunk/default.yml b/tests/integration/splunk/data/default.yml similarity index 93% rename from tests/data/splunk/default.yml rename to tests/integration/splunk/data/default.yml index 0a7a05f2eb3f9..dfd83a6360ed0 100644 --- a/tests/data/splunk/default.yml +++ b/tests/integration/splunk/data/default.yml @@ -13,10 +13,10 @@ splunk: useACK: true - key: indexes # https://docs.splunk.com/Documentation/Splunk/latest/Admin/Indexesconf - value: + value: directory: /opt/splunk/etc/system/local - content: - custom_index: + content: + custom_index: homePath: $SPLUNK_DB/custom_index/db coldPath: $SPLUNK_DB/custom_index/colddb thawedPath: $SPLUNK_DB/custom_index/thaweddb diff --git a/scripts/integration/webhdfs/compose.yaml b/tests/integration/webhdfs/config/compose.yaml similarity index 89% rename from scripts/integration/webhdfs/compose.yaml rename to tests/integration/webhdfs/config/compose.yaml index 082816fd01e1f..f3b5a51519513 100644 --- a/scripts/integration/webhdfs/compose.yaml +++ b/tests/integration/webhdfs/config/compose.yaml @@ -10,7 +10,7 @@ services: environment: - CLUSTER_NAME=test env_file: - - ../../../tests/data/webhdfs/hadoop.env + - ../data/hadoop.env healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9870"] interval: 5s @@ -24,7 +24,7 @@ services: environment: - SERVICE_PRECONDITION=namenode.local:9870 env_file: - - ../../../tests/data/webhdfs/hadoop.env + - ../data/hadoop.env healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9864"] interval: 5s diff --git a/scripts/integration/webhdfs/test.yaml b/tests/integration/webhdfs/config/test.yaml similarity index 91% rename from scripts/integration/webhdfs/test.yaml rename to tests/integration/webhdfs/config/test.yaml index fc6a3193f9139..f1bd9f88b2ff9 100644 --- a/scripts/integration/webhdfs/test.yaml +++ b/tests/integration/webhdfs/config/test.yaml @@ -14,4 +14,4 @@ matrix: paths: - "src/sinks/webhdfs/**" - "src/sinks/util/**" -- "scripts/integration/webhdfs/**" +- "tests/integration/webhdfs/**" diff --git a/tests/data/webhdfs/hadoop.env b/tests/integration/webhdfs/data/hadoop.env similarity index 100% rename from tests/data/webhdfs/hadoop.env rename to tests/integration/webhdfs/data/hadoop.env diff --git a/vdev/src/commands/integration/mod.rs b/vdev/src/commands/integration/mod.rs index b517fc475a5db..7fc2ea0ec071c 100644 --- a/vdev/src/commands/integration/mod.rs +++ b/vdev/src/commands/integration/mod.rs @@ -2,8 +2,8 @@ crate::cli_subcommands! { r"Manage integration test environments... These test setups are organized into a set of integrations, located in subdirectories -`scripts/integration`. For each integration, there is a matrix of environments, described in the -`matrix` setting in the `test.yaml` file contained therein." +`tests/integration`. For each integration, there is a matrix of environments, described in the +`matrix` setting in the `test.yaml` file contained in the `config/` subdirectory." mod show, mod build, diff --git a/vdev/src/testing/config.rs b/vdev/src/testing/config.rs index 68167838de2f2..facae0bb8a460 100644 --- a/vdev/src/testing/config.rs +++ b/vdev/src/testing/config.rs @@ -13,10 +13,22 @@ use serde_yaml::Value; use crate::{app, environment::Environment, util}; const FILE_NAME: &str = "test.yaml"; +const CONFIG_SUBDIR: &str = "config"; pub const INTEGRATION_TESTS_DIR: &str = "integration"; pub const E2E_TESTS_DIR: &str = "e2e"; +/// Returns the base directory and whether to use config subdirectory for the given test type. +/// Integration tests are in tests/integration with config/ subdirectories. +/// E2E tests are in scripts/e2e without config/ subdirectories. +fn test_dir_config(root_dir: &str) -> (&'static str, bool) { + if root_dir == INTEGRATION_TESTS_DIR { + ("tests", true) + } else { + ("scripts", false) + } +} + #[derive(Deserialize, Debug)] pub struct RustToolchainRootConfig { pub toolchain: RustToolchainConfig, @@ -205,7 +217,9 @@ impl ComposeTestConfig { } pub fn load(root_dir: &str, integration: &str) -> Result<(PathBuf, Self)> { - let test_dir: PathBuf = [app::path(), "scripts", root_dir, integration] + let (base_dir, use_config_subdir) = test_dir_config(root_dir); + + let test_dir: PathBuf = [app::path(), base_dir, root_dir, integration] .iter() .collect(); @@ -213,16 +227,30 @@ impl ComposeTestConfig { bail!("unknown integration: {}", integration); } - let config = Self::parse_file(&test_dir.join(FILE_NAME))?; - Ok((test_dir, config)) + let config_dir = if use_config_subdir { + test_dir.join(CONFIG_SUBDIR) + } else { + test_dir.clone() + }; + let config = Self::parse_file(&config_dir.join(FILE_NAME))?; + Ok((config_dir, config)) } - fn collect_all_dir(tests_dir: &Path, configs: &mut BTreeMap) -> Result<()> { + fn collect_all_dir( + tests_dir: &Path, + configs: &mut BTreeMap, + use_config_subdir: bool, + ) -> Result<()> { for entry in tests_dir.read_dir()? { let entry = entry?; if entry.path().is_dir() { - let config_file: PathBuf = - [entry.path().to_str().unwrap(), FILE_NAME].iter().collect(); + let config_file: PathBuf = if use_config_subdir { + [entry.path().to_str().unwrap(), CONFIG_SUBDIR, FILE_NAME] + .iter() + .collect() + } else { + [entry.path().to_str().unwrap(), FILE_NAME].iter().collect() + }; if util::exists(&config_file)? { let config = Self::parse_file(&config_file)?; configs.insert(entry.file_name().into_string().unwrap(), config); @@ -235,9 +263,10 @@ impl ComposeTestConfig { pub fn collect_all(root_dir: &str) -> Result> { let mut configs = BTreeMap::new(); - let tests_dir: PathBuf = [app::path(), "scripts", root_dir].iter().collect(); + let (base_dir, use_config_subdir) = test_dir_config(root_dir); + let tests_dir: PathBuf = [app::path(), base_dir, root_dir].iter().collect(); - Self::collect_all_dir(&tests_dir, &mut configs)?; + Self::collect_all_dir(&tests_dir, &mut configs, use_config_subdir)?; Ok(configs) } diff --git a/vdev/src/testing/integration.rs b/vdev/src/testing/integration.rs index ebab1320a99f2..f63c8fdb6366c 100644 --- a/vdev/src/testing/integration.rs +++ b/vdev/src/testing/integration.rs @@ -37,7 +37,7 @@ pub(crate) struct ComposeTestLocalConfig { } impl ComposeTestLocalConfig { - /// Integration tests are located in the `scripts/integration` dir, + /// Integration tests are located in the `tests/integration` dir, /// and are the full feature flag is `all-integration-tests`. pub(crate) fn integration() -> Self { Self { From 2ed1eb47e3eb40b9fd4a2cd7832c562b09c40bef Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Mon, 3 Nov 2025 20:39:45 -0500 Subject: [PATCH 037/227] chore(ci): fix gcp test filter and ignore failing tests (#24134) * chore(ci): fix gcp test filter and ignore failing tests * even more failed on CI --- src/sinks/gcp_chronicle/chronicle_unstructured.rs | 2 ++ src/sources/gcp_pubsub.rs | 6 ++++++ tests/integration/gcp/config/test.yaml | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/sinks/gcp_chronicle/chronicle_unstructured.rs b/src/sinks/gcp_chronicle/chronicle_unstructured.rs index 399034f33ce54..d57ac7f5c3f0e 100644 --- a/src/sinks/gcp_chronicle/chronicle_unstructured.rs +++ b/src/sinks/gcp_chronicle/chronicle_unstructured.rs @@ -713,6 +713,7 @@ mod integration_tests { config(log_type, auth_path).build(cx).await } + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn publish_events() { trace_init(); @@ -753,6 +754,7 @@ mod integration_tests { assert!(sink.is_err()) } + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn publish_invalid_events() { trace_init(); diff --git a/src/sources/gcp_pubsub.rs b/src/sources/gcp_pubsub.rs index eddceab6e7ef5..445d8d8f9caab 100644 --- a/src/sources/gcp_pubsub.rs +++ b/src/sources/gcp_pubsub.rs @@ -877,6 +877,7 @@ mod integration_tests { LazyLock::new(|| format!("{}/v1/projects/{}", *gcp::PUBSUB_ADDRESS, PROJECT)); static ACK_DEADLINE: LazyLock = LazyLock::new(|| Duration::from_secs(10)); // Minimum custom deadline allowed by Pub/Sub + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn oneshot() { assert_source_compliance(&SOURCE_TAGS, async move { @@ -888,6 +889,7 @@ mod integration_tests { .await; } + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn shuts_down_before_data_received() { let (tester, mut rx, shutdown) = setup(EventStatus::Delivered).await; @@ -900,6 +902,7 @@ mod integration_tests { assert_eq!(tester.pull_count(1).await, 1); } + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn shuts_down_after_data_received() { assert_source_compliance(&SOURCE_TAGS, async move { @@ -923,6 +926,7 @@ mod integration_tests { .await; } + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn streams_data() { assert_source_compliance(&SOURCE_TAGS, async move { @@ -936,6 +940,7 @@ mod integration_tests { .await; } + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn sends_attributes() { assert_source_compliance(&SOURCE_TAGS, async move { @@ -952,6 +957,7 @@ mod integration_tests { .await; } + #[ignore = "https://github.com/vectordotdev/vector/issues/24133"] #[tokio::test] async fn acks_received() { assert_source_compliance(&SOURCE_TAGS, async move { diff --git a/tests/integration/gcp/config/test.yaml b/tests/integration/gcp/config/test.yaml index be5d1fcd2cc82..b700b8283f831 100644 --- a/tests/integration/gcp/config/test.yaml +++ b/tests/integration/gcp/config/test.yaml @@ -2,7 +2,7 @@ features: - gcp-integration-tests - chronicle-integration-tests -test_filter: '::gcp::' +test_filter: '::gcp' env: EMULATOR_ADDRESS: http://gcloud-pubsub:8681 From 9d50f2d4bfd5fdadf72cf5b06af12b96e2958fac Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 4 Nov 2025 13:44:44 -0500 Subject: [PATCH 038/227] chore(releasing): rebuild manifests for 0.51.0 (#24142) chore(releasing): cargo vdev build manifests --- distribution/kubernetes/vector-agent/README.md | 2 +- distribution/kubernetes/vector-agent/configmap.yaml | 2 +- distribution/kubernetes/vector-agent/daemonset.yaml | 4 ++-- distribution/kubernetes/vector-agent/rbac.yaml | 4 ++-- distribution/kubernetes/vector-agent/service-headless.yaml | 2 +- distribution/kubernetes/vector-agent/serviceaccount.yaml | 2 +- distribution/kubernetes/vector-aggregator/README.md | 2 +- distribution/kubernetes/vector-aggregator/configmap.yaml | 2 +- .../kubernetes/vector-aggregator/service-headless.yaml | 2 +- distribution/kubernetes/vector-aggregator/service.yaml | 2 +- distribution/kubernetes/vector-aggregator/serviceaccount.yaml | 2 +- distribution/kubernetes/vector-aggregator/statefulset.yaml | 4 ++-- distribution/kubernetes/vector-stateless-aggregator/README.md | 2 +- .../kubernetes/vector-stateless-aggregator/configmap.yaml | 2 +- .../kubernetes/vector-stateless-aggregator/deployment.yaml | 4 ++-- .../vector-stateless-aggregator/service-headless.yaml | 2 +- .../kubernetes/vector-stateless-aggregator/service.yaml | 2 +- .../vector-stateless-aggregator/serviceaccount.yaml | 2 +- 18 files changed, 22 insertions(+), 22 deletions(-) diff --git a/distribution/kubernetes/vector-agent/README.md b/distribution/kubernetes/vector-agent/README.md index f1e50a64b0a42..4df9696bd4d9d 100644 --- a/distribution/kubernetes/vector-agent/README.md +++ b/distribution/kubernetes/vector-agent/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.46.0 with the following `values.yaml`: +version 0.47.0 with the following `values.yaml`: ```yaml role: Agent diff --git a/distribution/kubernetes/vector-agent/configmap.yaml b/distribution/kubernetes/vector-agent/configmap.yaml index 88a30526cac48..9fed2898f23c9 100644 --- a/distribution/kubernetes/vector-agent/configmap.yaml +++ b/distribution/kubernetes/vector-agent/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" data: agent.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-agent/daemonset.yaml b/distribution/kubernetes/vector-agent/daemonset.yaml index 6e6d3059aa173..d1b88bb142f57 100644 --- a/distribution/kubernetes/vector-agent/daemonset.yaml +++ b/distribution/kubernetes/vector-agent/daemonset.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" spec: selector: matchLabels: @@ -30,7 +30,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.50.0-distroless-libc" + image: "timberio/vector:0.51.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-agent/rbac.yaml b/distribution/kubernetes/vector-agent/rbac.yaml index 61e2d56f8778e..6d205e7f63c56 100644 --- a/distribution/kubernetes/vector-agent/rbac.yaml +++ b/distribution/kubernetes/vector-agent/rbac.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" rules: - apiGroups: - "" @@ -31,7 +31,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/distribution/kubernetes/vector-agent/service-headless.yaml b/distribution/kubernetes/vector-agent/service-headless.yaml index f7353c743e6a2..b0baeabe79689 100644 --- a/distribution/kubernetes/vector-agent/service-headless.yaml +++ b/distribution/kubernetes/vector-agent/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-agent/serviceaccount.yaml b/distribution/kubernetes/vector-agent/serviceaccount.yaml index eb95e58c49229..479763fa8002d 100644 --- a/distribution/kubernetes/vector-agent/serviceaccount.yaml +++ b/distribution/kubernetes/vector-agent/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/README.md b/distribution/kubernetes/vector-aggregator/README.md index 80dc74083d340..1e07093f77327 100644 --- a/distribution/kubernetes/vector-aggregator/README.md +++ b/distribution/kubernetes/vector-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.46.0 with the following `values.yaml`: +version 0.47.0 with the following `values.yaml`: ```yaml diff --git a/distribution/kubernetes/vector-aggregator/configmap.yaml b/distribution/kubernetes/vector-aggregator/configmap.yaml index 84f37d7728ea5..5da2774211df2 100644 --- a/distribution/kubernetes/vector-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-aggregator/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-aggregator/service-headless.yaml b/distribution/kubernetes/vector-aggregator/service-headless.yaml index 25e1c5039a6e7..50f3708832c30 100644 --- a/distribution/kubernetes/vector-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-aggregator/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-aggregator/service.yaml b/distribution/kubernetes/vector-aggregator/service.yaml index b5d84bf45c413..8866f1c2dfdf3 100644 --- a/distribution/kubernetes/vector-aggregator/service.yaml +++ b/distribution/kubernetes/vector-aggregator/service.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml index ba114d904dc1f..596c835ea3118 100644 --- a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/statefulset.yaml b/distribution/kubernetes/vector-aggregator/statefulset.yaml index b5a9c2944fe53..e0a59ecb2792f 100644 --- a/distribution/kubernetes/vector-aggregator/statefulset.yaml +++ b/distribution/kubernetes/vector-aggregator/statefulset.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -34,7 +34,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.50.0-distroless-libc" + image: "timberio/vector:0.51.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/README.md b/distribution/kubernetes/vector-stateless-aggregator/README.md index 735fa8cc8333d..6e0faa79156e0 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/README.md +++ b/distribution/kubernetes/vector-stateless-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.46.0 with the following `values.yaml`: +version 0.47.0 with the following `values.yaml`: ```yaml role: Stateless-Aggregator diff --git a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml index d9246ba371dcb..b5ea6e39e2dcf 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml index ad9d539987417..aeccf39e7c839 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -32,7 +32,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.50.0-distroless-libc" + image: "timberio/vector:0.51.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml index cb759eac742d4..0218afcacd0c8 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-stateless-aggregator/service.yaml b/distribution/kubernetes/vector-stateless-aggregator/service.yaml index 49fb889b8a10d..cb4e3b611a5d2 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml index 10aaa571c1b9c..0630b5396a163 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.50.0-distroless-libc" + app.kubernetes.io/version: "0.51.0-distroless-libc" automountServiceAccountToken: true From 817be3846e8b932253d3f15ec915e84566675831 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 4 Nov 2025 14:00:32 -0500 Subject: [PATCH 039/227] chore(ci): reorg e2e tests (#24136) * chore(ci): reorg e2e tests * free up space * fmt * improve readme * remove commit from other branch * datadog-logs fixes * fix paths * let all tests run for comment triggers --- .github/workflows/ci-integration-review.yml | 2 + scripts/e2e/README.md | 9 ----- {scripts => tests}/e2e/Dockerfile | 0 tests/e2e/README.md | 38 +++++++++++++++++++ {scripts => tests}/e2e/datadog-logs/README.md | 0 .../e2e/datadog-logs/config}/compose.yaml | 12 +++--- .../e2e/datadog-logs/config}/test.yaml | 7 +--- .../datadog-logs/data}/agent_only.yaml | 0 .../datadog-logs/data}/agent_vector.yaml | 0 .../data}/logs.conf.d/custom_logs.d/conf.yaml | 0 .../datadog-logs/data}/vector.toml | 0 .../e2e/datadog-metrics/README.md | 0 .../e2e/datadog-metrics/config}/compose.yaml | 12 +++--- .../e2e/datadog-metrics/config}/test.yaml | 4 +- .../datadog-metrics/data}/agent_only.yaml | 0 .../datadog-metrics/data}/agent_vector.yaml | 0 .../datadog-metrics/data}/vector.toml | 0 .../dogstatsd_client/Dockerfile | 0 .../dogstatsd_client/client.py | 0 .../dogstatsd_client/requirements.txt | 0 .../opentelemetry-common/collector.Dockerfile | 0 .../telemetrygen.Dockerfile | 0 .../e2e/opentelemetry-logs/README.md | 0 .../opentelemetry-logs/config}/compose.yaml | 14 +++---- .../e2e/opentelemetry-logs/config}/test.yaml | 3 +- .../data}/collector-sink.yaml | 0 .../data}/collector-source.yaml | 0 .../data}/vector_default.yaml | 0 .../opentelemetry-logs/data}/vector_otlp.yaml | 0 .../config}/compose.yaml | 14 +++---- .../opentelemetry-metrics/config}/test.yaml | 3 +- .../data}/collector-sink.yaml | 0 .../data}/collector-source.yaml | 0 .../data}/vector_otlp.yaml | 0 vdev/src/commands/e2e/mod.rs | 2 +- vdev/src/testing/build.rs | 4 +- vdev/src/testing/config.rs | 11 ++---- vdev/src/testing/integration.rs | 4 +- vdev/src/testing/mod.rs | 6 +-- 39 files changed, 80 insertions(+), 65 deletions(-) delete mode 100644 scripts/e2e/README.md rename {scripts => tests}/e2e/Dockerfile (100%) create mode 100644 tests/e2e/README.md rename {scripts => tests}/e2e/datadog-logs/README.md (100%) rename {scripts/e2e/datadog-logs => tests/e2e/datadog-logs/config}/compose.yaml (90%) rename {scripts/e2e/datadog-logs => tests/e2e/datadog-logs/config}/test.yaml (67%) rename tests/{data/e2e/datadog/logs => e2e/datadog-logs/data}/agent_only.yaml (100%) rename tests/{data/e2e/datadog/logs => e2e/datadog-logs/data}/agent_vector.yaml (100%) rename tests/{data/e2e/datadog/logs => e2e/datadog-logs/data}/logs.conf.d/custom_logs.d/conf.yaml (100%) rename tests/{data/e2e/datadog/logs => e2e/datadog-logs/data}/vector.toml (100%) rename {scripts => tests}/e2e/datadog-metrics/README.md (100%) rename {scripts/e2e/datadog-metrics => tests/e2e/datadog-metrics/config}/compose.yaml (86%) rename {scripts/e2e/datadog-metrics => tests/e2e/datadog-metrics/config}/test.yaml (86%) rename tests/{data/e2e/datadog/metrics => e2e/datadog-metrics/data}/agent_only.yaml (100%) rename tests/{data/e2e/datadog/metrics => e2e/datadog-metrics/data}/agent_vector.yaml (100%) rename tests/{data/e2e/datadog/metrics => e2e/datadog-metrics/data}/vector.toml (100%) rename {scripts => tests}/e2e/datadog-metrics/dogstatsd_client/Dockerfile (100%) rename {scripts => tests}/e2e/datadog-metrics/dogstatsd_client/client.py (100%) rename {scripts => tests}/e2e/datadog-metrics/dogstatsd_client/requirements.txt (100%) rename {scripts => tests}/e2e/opentelemetry-common/collector.Dockerfile (100%) rename {scripts => tests}/e2e/opentelemetry-common/telemetrygen.Dockerfile (100%) rename {scripts => tests}/e2e/opentelemetry-logs/README.md (100%) rename {scripts/e2e/opentelemetry-logs => tests/e2e/opentelemetry-logs/config}/compose.yaml (82%) rename {scripts/e2e/opentelemetry-logs => tests/e2e/opentelemetry-logs/config}/test.yaml (89%) rename tests/{data/e2e/opentelemetry/logs => e2e/opentelemetry-logs/data}/collector-sink.yaml (100%) rename tests/{data/e2e/opentelemetry/logs => e2e/opentelemetry-logs/data}/collector-source.yaml (100%) rename tests/{data/e2e/opentelemetry/logs => e2e/opentelemetry-logs/data}/vector_default.yaml (100%) rename tests/{data/e2e/opentelemetry/logs => e2e/opentelemetry-logs/data}/vector_otlp.yaml (100%) rename {scripts/e2e/opentelemetry-metrics => tests/e2e/opentelemetry-metrics/config}/compose.yaml (88%) rename {scripts/e2e/opentelemetry-metrics => tests/e2e/opentelemetry-metrics/config}/test.yaml (87%) rename tests/{data/e2e/opentelemetry/metrics => e2e/opentelemetry-metrics/data}/collector-sink.yaml (100%) rename tests/{data/e2e/opentelemetry/metrics => e2e/opentelemetry-metrics/data}/collector-source.yaml (100%) rename tests/{data/e2e/opentelemetry/metrics => e2e/opentelemetry-metrics/data}/vector_otlp.yaml (100%) diff --git a/.github/workflows/ci-integration-review.yml b/.github/workflows/ci-integration-review.yml index 933caab0801ae..0bc2425927877 100644 --- a/.github/workflows/ci-integration-review.yml +++ b/.github/workflows/ci-integration-review.yml @@ -95,6 +95,7 @@ jobs: runs-on: ubuntu-24.04 timeout-minutes: 90 strategy: + fail-fast: false matrix: service: [ "amqp", "appsignal", "aws", "axiom", "azure", "clickhouse", "databend", "datadog-agent", @@ -134,6 +135,7 @@ jobs: runs-on: ubuntu-24.04-8core timeout-minutes: 30 strategy: + fail-fast: false matrix: service: [ "datadog-logs", "datadog-metrics", "opentelemetry-logs", "opentelemetry-metrics" diff --git a/scripts/e2e/README.md b/scripts/e2e/README.md deleted file mode 100644 index 62e7f8e9f64c7..0000000000000 --- a/scripts/e2e/README.md +++ /dev/null @@ -1,9 +0,0 @@ -This directory contains a set of end-to-end test frameworks for vector which are executed by the -`vdev` tool. - -These end-to-end (e2e) tests are executed with the `vdev e2e` subcommand, which behaves -identically to the `vdev integration` subcommand. See the README in the `scripts/integration` -subdirectory for more information. - -The e2e tests are more of a black box test, in which we spin up a full vector instance as one -of the compose services that runs alongside the others. diff --git a/scripts/e2e/Dockerfile b/tests/e2e/Dockerfile similarity index 100% rename from scripts/e2e/Dockerfile rename to tests/e2e/Dockerfile diff --git a/tests/e2e/README.md b/tests/e2e/README.md new file mode 100644 index 0000000000000..da156f25eb8f4 --- /dev/null +++ b/tests/e2e/README.md @@ -0,0 +1,38 @@ +This directory contains a set of end-to-end test frameworks for vector which are executed by the +`vdev` tool. + +Each directory contains: + +1. A `config/` subdirectory with: + - A `compose.yaml` file containing the instructions to `docker compose` or `podman compose` for how + to set up the containers in which to run the tests + - A `test.yaml` file that describes how to run the end-to-end tests, including a matrix of + software versions or other parameters over which the tests will be run +2. A `data/` subdirectory (optional) containing test data files, configuration files, and other + resources needed by the test + +You can list these tests with `cargo vdev e2e show`, which provides a list of all the +end-to-end test names followed by the extrapolated matrix of environments. + +Each test can be run using one of the following: + +1. Run a single test environment from the above list with `cargo vdev e2e test NAME ENV` +2. Run all the environments for one test with `cargo vdev e2e test NAME` +3. Run all the steps individually using the `start`, `test`, and then `stop` subcommands with the + same parameters as above (see below). This allows developers to start the environment once and + then repeat the testing step while working on a component. + +```shell +cargo vdev e2e start NAME ENVIRONMENT +cargo vdev e2e test NAME [ENVIRONMENT] +cargo vdev e2e stop NAME [ENVIRONMENT] +``` + +If no environment is named for the `test` and `stop` subcommands, all active environments are used. + +## E2E vs Integration Tests + +The end-to-end (E2E) tests are black box tests that spin up a full Vector instance as one of the +Docker Compose services, running alongside external systems (e.g., Datadog Agent, Splunk, OTEL +collectors). This differs from integration tests, which compile and run Vector within a test runner +container to test individual components or integrations in isolation. diff --git a/scripts/e2e/datadog-logs/README.md b/tests/e2e/datadog-logs/README.md similarity index 100% rename from scripts/e2e/datadog-logs/README.md rename to tests/e2e/datadog-logs/README.md diff --git a/scripts/e2e/datadog-logs/compose.yaml b/tests/e2e/datadog-logs/config/compose.yaml similarity index 90% rename from scripts/e2e/datadog-logs/compose.yaml rename to tests/e2e/datadog-logs/config/compose.yaml index ac1cb148134f5..a144abed68cc5 100644 --- a/scripts/e2e/datadog-logs/compose.yaml +++ b/tests/e2e/datadog-logs/config/compose.yaml @@ -36,13 +36,13 @@ services: volumes: # The Agent config file - type: bind - source: ../../../tests/data/e2e/datadog/logs/agent_only.yaml + source: ../data/agent_only.yaml target: /etc/datadog-agent/datadog.yaml read_only: true # The custom logs check - type: bind - source: ../../../tests/data/e2e/datadog/logs/logs.conf.d + source: ../data/logs.conf.d target: /conf.d read_only: true @@ -66,13 +66,13 @@ services: volumes: # The Agent config file - type: bind - source: ../../../tests/data/e2e/datadog/logs/agent_vector.yaml + source: ../data/agent_vector.yaml target: /etc/datadog-agent/datadog.yaml read_only: true # The custom logs check - type: bind - source: ../../../tests/data/e2e/datadog/logs/logs.conf.d + source: ../data/logs.conf.d target: /conf.d read_only: true @@ -96,10 +96,10 @@ services: - "/usr/bin/vector" - "-vvv" - "-c" - - "/home/vector/tests/data/e2e/datadog/logs/vector.toml" + - "/home/vector/tests/e2e/datadog-logs/data/vector.toml" volumes: - type: bind - source: ../../.. + source: ../../../.. target: /home/vector # Receives log data from the `datadog-agent` service. Is queried by the test runner diff --git a/scripts/e2e/datadog-logs/test.yaml b/tests/e2e/datadog-logs/config/test.yaml similarity index 67% rename from scripts/e2e/datadog-logs/test.yaml rename to tests/e2e/datadog-logs/config/test.yaml index 0610bb939ec2c..2ce4641b04e96 100644 --- a/scripts/e2e/datadog-logs/test.yaml +++ b/tests/e2e/datadog-logs/config/test.yaml @@ -25,9 +25,4 @@ paths: - "src/internal_events/datadog_*" - "src/sinks/datadog/logs/**" - "src/sinks/util/**" -# NOTE: currently we need the prefix 'e2e' even though it looks redundant, -# because the vdev code does not otherwise have a way to distinguish between -# the other `datadog-logs` int test. -# but once GH issue 18829 is completed, this will become unecessary. -- "scripts/e2e/e2e-datadog-logs/**" -- "tests/data/e2e/datadog/logs/**" +- "tests/e2e/datadog-logs/**" diff --git a/tests/data/e2e/datadog/logs/agent_only.yaml b/tests/e2e/datadog-logs/data/agent_only.yaml similarity index 100% rename from tests/data/e2e/datadog/logs/agent_only.yaml rename to tests/e2e/datadog-logs/data/agent_only.yaml diff --git a/tests/data/e2e/datadog/logs/agent_vector.yaml b/tests/e2e/datadog-logs/data/agent_vector.yaml similarity index 100% rename from tests/data/e2e/datadog/logs/agent_vector.yaml rename to tests/e2e/datadog-logs/data/agent_vector.yaml diff --git a/tests/data/e2e/datadog/logs/logs.conf.d/custom_logs.d/conf.yaml b/tests/e2e/datadog-logs/data/logs.conf.d/custom_logs.d/conf.yaml similarity index 100% rename from tests/data/e2e/datadog/logs/logs.conf.d/custom_logs.d/conf.yaml rename to tests/e2e/datadog-logs/data/logs.conf.d/custom_logs.d/conf.yaml diff --git a/tests/data/e2e/datadog/logs/vector.toml b/tests/e2e/datadog-logs/data/vector.toml similarity index 100% rename from tests/data/e2e/datadog/logs/vector.toml rename to tests/e2e/datadog-logs/data/vector.toml diff --git a/scripts/e2e/datadog-metrics/README.md b/tests/e2e/datadog-metrics/README.md similarity index 100% rename from scripts/e2e/datadog-metrics/README.md rename to tests/e2e/datadog-metrics/README.md diff --git a/scripts/e2e/datadog-metrics/compose.yaml b/tests/e2e/datadog-metrics/config/compose.yaml similarity index 86% rename from scripts/e2e/datadog-metrics/compose.yaml rename to tests/e2e/datadog-metrics/config/compose.yaml index 209cfb1785450..d1beb57030c42 100644 --- a/scripts/e2e/datadog-metrics/compose.yaml +++ b/tests/e2e/datadog-metrics/config/compose.yaml @@ -4,7 +4,7 @@ services: # Emits metrics to the Agent only path dogstatsd-client-agent: - build: ./dogstatsd_client + build: ../dogstatsd_client environment: - STATSD_HOST=datadog-agent depends_on: @@ -12,7 +12,7 @@ services: # Emits metrics to the Agent-Vector path dogstatsd-client-vector: - build: ./dogstatsd_client + build: ../dogstatsd_client environment: - STATSD_HOST=datadog-agent-vector depends_on: @@ -28,7 +28,7 @@ services: - DD_HOSTNAME=datadog-agent volumes: # The Agent config file - - ../../../tests/data/e2e/datadog/metrics/agent_only.yaml:/etc/datadog-agent/datadog.yaml + - ../data/agent_only.yaml:/etc/datadog-agent/datadog.yaml # Sends metric data received from the Emitter to the `vector` service datadog-agent-vector: @@ -40,7 +40,7 @@ services: - DD_HOSTNAME=datadog-agent-vector volumes: # The Agent config file - - ../../../tests/data/e2e/datadog/metrics/agent_vector.yaml:/etc/datadog-agent/datadog.yaml + - ../data/agent_vector.yaml:/etc/datadog-agent/datadog.yaml # Receives metric data from the `datadog-agent-vector` service and sends # to the `fakeintake-vector` service. @@ -57,9 +57,9 @@ services: - "/usr/bin/vector" - "-vvv" - "-c" - - "/home/vector/tests/data/e2e/datadog/metrics/vector.toml" + - "/home/vector/tests/e2e/datadog-metrics/data/vector.toml" volumes: - - ../../..:/home/vector + - ../../../..:/home/vector # Receives metric data from the `datadog-agent` service. Is queried by the test runner # which does the validation of consistency with the other fakeintake service. diff --git a/scripts/e2e/datadog-metrics/test.yaml b/tests/e2e/datadog-metrics/config/test.yaml similarity index 86% rename from scripts/e2e/datadog-metrics/test.yaml rename to tests/e2e/datadog-metrics/config/test.yaml index 88b119411fff1..0199618fe7f42 100644 --- a/scripts/e2e/datadog-metrics/test.yaml +++ b/tests/e2e/datadog-metrics/config/test.yaml @@ -23,6 +23,4 @@ paths: - "src/internal_events/datadog_*" - "src/sinks/datadog/metrics/**" - "src/sinks/util/**" -- "scripts/e2e/datadog-metrics/**" -- "tests/e2e/datadog/metrics/**" -- "tests/data/e2e/datadog/metrics/**" +- "tests/e2e/datadog-metrics/**" diff --git a/tests/data/e2e/datadog/metrics/agent_only.yaml b/tests/e2e/datadog-metrics/data/agent_only.yaml similarity index 100% rename from tests/data/e2e/datadog/metrics/agent_only.yaml rename to tests/e2e/datadog-metrics/data/agent_only.yaml diff --git a/tests/data/e2e/datadog/metrics/agent_vector.yaml b/tests/e2e/datadog-metrics/data/agent_vector.yaml similarity index 100% rename from tests/data/e2e/datadog/metrics/agent_vector.yaml rename to tests/e2e/datadog-metrics/data/agent_vector.yaml diff --git a/tests/data/e2e/datadog/metrics/vector.toml b/tests/e2e/datadog-metrics/data/vector.toml similarity index 100% rename from tests/data/e2e/datadog/metrics/vector.toml rename to tests/e2e/datadog-metrics/data/vector.toml diff --git a/scripts/e2e/datadog-metrics/dogstatsd_client/Dockerfile b/tests/e2e/datadog-metrics/dogstatsd_client/Dockerfile similarity index 100% rename from scripts/e2e/datadog-metrics/dogstatsd_client/Dockerfile rename to tests/e2e/datadog-metrics/dogstatsd_client/Dockerfile diff --git a/scripts/e2e/datadog-metrics/dogstatsd_client/client.py b/tests/e2e/datadog-metrics/dogstatsd_client/client.py similarity index 100% rename from scripts/e2e/datadog-metrics/dogstatsd_client/client.py rename to tests/e2e/datadog-metrics/dogstatsd_client/client.py diff --git a/scripts/e2e/datadog-metrics/dogstatsd_client/requirements.txt b/tests/e2e/datadog-metrics/dogstatsd_client/requirements.txt similarity index 100% rename from scripts/e2e/datadog-metrics/dogstatsd_client/requirements.txt rename to tests/e2e/datadog-metrics/dogstatsd_client/requirements.txt diff --git a/scripts/e2e/opentelemetry-common/collector.Dockerfile b/tests/e2e/opentelemetry-common/collector.Dockerfile similarity index 100% rename from scripts/e2e/opentelemetry-common/collector.Dockerfile rename to tests/e2e/opentelemetry-common/collector.Dockerfile diff --git a/scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile b/tests/e2e/opentelemetry-common/telemetrygen.Dockerfile similarity index 100% rename from scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile rename to tests/e2e/opentelemetry-common/telemetrygen.Dockerfile diff --git a/scripts/e2e/opentelemetry-logs/README.md b/tests/e2e/opentelemetry-logs/README.md similarity index 100% rename from scripts/e2e/opentelemetry-logs/README.md rename to tests/e2e/opentelemetry-logs/README.md diff --git a/scripts/e2e/opentelemetry-logs/compose.yaml b/tests/e2e/opentelemetry-logs/config/compose.yaml similarity index 82% rename from scripts/e2e/opentelemetry-logs/compose.yaml rename to tests/e2e/opentelemetry-logs/config/compose.yaml index e947c2293439c..62b3d48c44aa6 100644 --- a/scripts/e2e/opentelemetry-logs/compose.yaml +++ b/tests/e2e/opentelemetry-logs/config/compose.yaml @@ -6,7 +6,7 @@ services: init: true volumes: - type: bind - source: ../../../tests/data/e2e/opentelemetry/logs/collector-source.yaml + source: ../data/collector-source.yaml target: /etc/otelcol-contrib/config.yaml read_only: true ports: @@ -17,8 +17,8 @@ services: logs-generator: container_name: logs-generator build: - context: ../../../ - dockerfile: ./scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile + context: ../../../.. + dockerfile: ./tests/e2e/opentelemetry-common/telemetrygen.Dockerfile init: true depends_on: otel-collector-source: @@ -43,8 +43,8 @@ services: otel-collector-sink: container_name: otel-collector-sink build: - context: ../../../ - dockerfile: ./scripts/e2e/opentelemetry-common/collector.Dockerfile + context: ../../../.. + dockerfile: ./tests/e2e/opentelemetry-common/collector.Dockerfile args: CONFIG_COLLECTOR_VERSION: ${CONFIG_COLLECTOR_VERSION} init: true @@ -52,7 +52,7 @@ services: command: [ "--config", "/etc/otelcol-contrib/config.yaml" ] volumes: - type: bind - source: ../../../tests/data/e2e/opentelemetry/logs/collector-sink.yaml + source: ../data/collector-sink.yaml target: /etc/otelcol-contrib/config.yaml read_only: true - type: volume @@ -67,7 +67,7 @@ services: init: true volumes: - type: bind - source: ../../../tests/data/e2e/opentelemetry/logs/${CONFIG_VECTOR_CONFIG} + source: ../data/${CONFIG_VECTOR_CONFIG} target: /etc/vector/vector.yaml read_only: true - type: volume diff --git a/scripts/e2e/opentelemetry-logs/test.yaml b/tests/e2e/opentelemetry-logs/config/test.yaml similarity index 89% rename from scripts/e2e/opentelemetry-logs/test.yaml rename to tests/e2e/opentelemetry-logs/config/test.yaml index 13f618cf72a93..54aa7063e632c 100644 --- a/scripts/e2e/opentelemetry-logs/test.yaml +++ b/tests/e2e/opentelemetry-logs/config/test.yaml @@ -22,6 +22,5 @@ paths: - "src/sources/opentelemetry/**" - "src/sinks/opentelemetry/**" - "src/internal_events/opentelemetry_*" - - "tests/e2e/opentelemetry/logs/**" - - "scripts/e2e/opentelemetry-logs/**" + - "tests/e2e/opentelemetry-logs/**" - "lib/codecs/src/**/otlp.rs" diff --git a/tests/data/e2e/opentelemetry/logs/collector-sink.yaml b/tests/e2e/opentelemetry-logs/data/collector-sink.yaml similarity index 100% rename from tests/data/e2e/opentelemetry/logs/collector-sink.yaml rename to tests/e2e/opentelemetry-logs/data/collector-sink.yaml diff --git a/tests/data/e2e/opentelemetry/logs/collector-source.yaml b/tests/e2e/opentelemetry-logs/data/collector-source.yaml similarity index 100% rename from tests/data/e2e/opentelemetry/logs/collector-source.yaml rename to tests/e2e/opentelemetry-logs/data/collector-source.yaml diff --git a/tests/data/e2e/opentelemetry/logs/vector_default.yaml b/tests/e2e/opentelemetry-logs/data/vector_default.yaml similarity index 100% rename from tests/data/e2e/opentelemetry/logs/vector_default.yaml rename to tests/e2e/opentelemetry-logs/data/vector_default.yaml diff --git a/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml b/tests/e2e/opentelemetry-logs/data/vector_otlp.yaml similarity index 100% rename from tests/data/e2e/opentelemetry/logs/vector_otlp.yaml rename to tests/e2e/opentelemetry-logs/data/vector_otlp.yaml diff --git a/scripts/e2e/opentelemetry-metrics/compose.yaml b/tests/e2e/opentelemetry-metrics/config/compose.yaml similarity index 88% rename from scripts/e2e/opentelemetry-metrics/compose.yaml rename to tests/e2e/opentelemetry-metrics/config/compose.yaml index 0652811ff4c27..fba9c2a19b247 100644 --- a/scripts/e2e/opentelemetry-metrics/compose.yaml +++ b/tests/e2e/opentelemetry-metrics/config/compose.yaml @@ -6,7 +6,7 @@ services: init: true volumes: - type: bind - source: ../../../tests/data/e2e/opentelemetry/metrics/collector-source.yaml + source: ../data/collector-source.yaml target: /etc/otelcol-contrib/config.yaml read_only: true ports: @@ -17,8 +17,8 @@ services: metrics-generator: container_name: metrics-generator build: - context: ../../../ - dockerfile: ./scripts/e2e/opentelemetry-common/telemetrygen.Dockerfile + context: ../../../.. + dockerfile: ./tests/e2e/opentelemetry-common/telemetrygen.Dockerfile init: true depends_on: otel-collector-source: @@ -80,8 +80,8 @@ services: otel-collector-sink: container_name: otel-collector-sink build: - context: ../../../ - dockerfile: ./scripts/e2e/opentelemetry-common/collector.Dockerfile + context: ../../../.. + dockerfile: ./tests/e2e/opentelemetry-common/collector.Dockerfile args: CONFIG_COLLECTOR_VERSION: ${CONFIG_COLLECTOR_VERSION} init: true @@ -89,7 +89,7 @@ services: command: [ "--config", "/etc/otelcol-contrib/config.yaml" ] volumes: - type: bind - source: ../../../tests/data/e2e/opentelemetry/metrics/collector-sink.yaml + source: ../data/collector-sink.yaml target: /etc/otelcol-contrib/config.yaml read_only: true - type: volume @@ -104,7 +104,7 @@ services: init: true volumes: - type: bind - source: ../../../tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml + source: ../data/vector_otlp.yaml target: /etc/vector/vector.yaml read_only: true - type: volume diff --git a/scripts/e2e/opentelemetry-metrics/test.yaml b/tests/e2e/opentelemetry-metrics/config/test.yaml similarity index 87% rename from scripts/e2e/opentelemetry-metrics/test.yaml rename to tests/e2e/opentelemetry-metrics/config/test.yaml index 3481aa607771e..507cddfd2b21d 100644 --- a/scripts/e2e/opentelemetry-metrics/test.yaml +++ b/tests/e2e/opentelemetry-metrics/config/test.yaml @@ -21,6 +21,5 @@ paths: - "src/sources/opentelemetry/**" - "src/sinks/opentelemetry/**" - "src/internal_events/opentelemetry_*" - - "tests/e2e/opentelemetry/metrics/**" - - "scripts/e2e/opentelemetry-metrics/**" + - "tests/e2e/opentelemetry-metrics/**" - "lib/codecs/src/**/otlp.rs" diff --git a/tests/data/e2e/opentelemetry/metrics/collector-sink.yaml b/tests/e2e/opentelemetry-metrics/data/collector-sink.yaml similarity index 100% rename from tests/data/e2e/opentelemetry/metrics/collector-sink.yaml rename to tests/e2e/opentelemetry-metrics/data/collector-sink.yaml diff --git a/tests/data/e2e/opentelemetry/metrics/collector-source.yaml b/tests/e2e/opentelemetry-metrics/data/collector-source.yaml similarity index 100% rename from tests/data/e2e/opentelemetry/metrics/collector-source.yaml rename to tests/e2e/opentelemetry-metrics/data/collector-source.yaml diff --git a/tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml b/tests/e2e/opentelemetry-metrics/data/vector_otlp.yaml similarity index 100% rename from tests/data/e2e/opentelemetry/metrics/vector_otlp.yaml rename to tests/e2e/opentelemetry-metrics/data/vector_otlp.yaml diff --git a/vdev/src/commands/e2e/mod.rs b/vdev/src/commands/e2e/mod.rs index 1e84af9c0e96e..2804d9b2d750a 100644 --- a/vdev/src/commands/e2e/mod.rs +++ b/vdev/src/commands/e2e/mod.rs @@ -2,7 +2,7 @@ crate::cli_subcommands! { r"Manage end-to-end test environments... These test setups are organized into a set of integrations, located in subdirectories -`scripts/e2e`. For each integration, there is a matrix of environments, described in the +`tests/e2e`. For each integration, there is a matrix of environments, described in the `matrix` setting in the `test.yaml` file contained therein." mod show, diff --git a/vdev/src/testing/build.rs b/vdev/src/testing/build.rs index a066239b409b2..bddf44ce8f641 100644 --- a/vdev/src/testing/build.rs +++ b/vdev/src/testing/build.rs @@ -15,7 +15,7 @@ pub const ALL_INTEGRATIONS_FEATURE_FLAG: &str = "all-integration-tests"; /// Construct (but do not run) the `docker build` command for a test-runner image. /// - `image` is the full tag (e.g. `"vector-test-runner-1.86.0:latest"`). -/// - `dockerfile` is the path to the Dockerfile (e.g. `scripts/e2e/Dockerfile`). +/// - `dockerfile` is the path to the Dockerfile (e.g. `tests/e2e/Dockerfile`). /// - `features` controls the `FEATURES` build-arg (pass `None` for an empty list). /// - `build` controls whether to build the Vector binary in the image. pub fn prepare_build_command( @@ -59,7 +59,7 @@ pub fn prepare_build_command( command } -/// Build the integration test‐runner image from `scripts/e2e/Dockerfile` +/// Build the integration test‐runner image from `tests/e2e/Dockerfile` pub fn build_integration_image() -> Result<()> { let dockerfile = test_runner_dockerfile(); let image = format!("vector-test-runner-{}", RustToolchainConfig::rust_version()); diff --git a/vdev/src/testing/config.rs b/vdev/src/testing/config.rs index facae0bb8a460..e40e282677a4c 100644 --- a/vdev/src/testing/config.rs +++ b/vdev/src/testing/config.rs @@ -19,14 +19,9 @@ pub const INTEGRATION_TESTS_DIR: &str = "integration"; pub const E2E_TESTS_DIR: &str = "e2e"; /// Returns the base directory and whether to use config subdirectory for the given test type. -/// Integration tests are in tests/integration with config/ subdirectories. -/// E2E tests are in scripts/e2e without config/ subdirectories. -fn test_dir_config(root_dir: &str) -> (&'static str, bool) { - if root_dir == INTEGRATION_TESTS_DIR { - ("tests", true) - } else { - ("scripts", false) - } +/// All tests (integration and E2E) are now in tests/ with config/ subdirectories. +fn test_dir_config(_root_dir: &str) -> (&'static str, bool) { + ("tests", true) } #[derive(Deserialize, Debug)] diff --git a/vdev/src/testing/integration.rs b/vdev/src/testing/integration.rs index f63c8fdb6366c..d28f0eddea5b3 100644 --- a/vdev/src/testing/integration.rs +++ b/vdev/src/testing/integration.rs @@ -47,8 +47,8 @@ impl ComposeTestLocalConfig { } } - /// E2E tests are located in the `scripts/e2e` dir, - /// and are the full feature flag is `all-e2e-tests`. + /// E2E tests are located in the `tests/e2e` dir, + /// and the full feature flag is `all-e2e-tests`. pub(crate) fn e2e() -> Self { Self { kind: ComposeTestKind::E2E, diff --git a/vdev/src/testing/mod.rs b/vdev/src/testing/mod.rs index 5d61cf01df51d..105a6efe33cd6 100644 --- a/vdev/src/testing/mod.rs +++ b/vdev/src/testing/mod.rs @@ -9,9 +9,7 @@ pub mod integration; pub mod runner; /// Returns the path to the unified test runner Dockerfile. -/// Both integration and E2E tests use the same Dockerfile at `scripts/e2e/Dockerfile`. +/// Both integration and E2E tests use the same Dockerfile at `tests/e2e/Dockerfile`. pub fn test_runner_dockerfile() -> PathBuf { - [app::path(), "scripts", "e2e", "Dockerfile"] - .iter() - .collect() + [app::path(), "tests", "e2e", "Dockerfile"].iter().collect() } From 7e2b3223565396db8be2dd130a579e3364cf4a7c Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 4 Nov 2025 14:53:53 -0500 Subject: [PATCH 040/227] chore(ci): typo fix (#24146) --- .github/workflows/changes.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index 01919d4cb0b75..0bd82a9881cb9 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -413,7 +413,7 @@ jobs: # creates a yaml file that contains the filters for each test, # extracted from the output of the `vdev int ci-paths` command, which - # sources the paths from the scripts/e2e/.../test.yaml files + # sources the paths from the tests/e2e/.../test.yaml files - name: Create filter rules for e2e tests run: vdev e2e ci-paths > int_test_filters.yaml From 90b395120c694878e2c262cad7ade1c142ef6b7b Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 4 Nov 2025 15:09:46 -0500 Subject: [PATCH 041/227] chore(releasing): v0.51.0 (#24145) * chore(releasing): prepare v0.51.0 release (#24131) * chore(releasing): Pinned VRL version to 0.28.0 * chore(releasing): Generated release CUE file * chore(releasing): Updated website/cue/reference/administration/interfaces/kubectl.cue vector version to 0.51.0 * chore(releasing): Updated distribution/install.sh vector version to 0.51.0 * chore(releasing): Add 0.51.0 to versions.cue * chore(releasing): Created release md file * fix/improve wording * Apply suggestions from code review * Add release highlights * Add telemetry highlight * Add note to otlp support page * Fix whitespace * Make file source breaking version more cleaer * Merge memory enrichment table highlights * Add upgrade guide * Add config watcher fix to highlights * Rename file to match release date * Update website/content/en/highlights/2025-11-04-0-51-0-upgrade-guide.md * Fix author spellcheck * Specify debug and trace for component_id logs * cue fmt * Remove backtick lang * Update multi-line config block upgrade guide * Remove config watcher highlight * Add backticks to debug and trace * Merge highlights * Revert "Remove backtick lang" This reverts commit 11ff870d2ab66337a9c6a5b7b8a9d43a8456bfd1. * Move VRL breaking change to the bottom * Fix markdown check * Update breaking changes wording * Add VRL info block * Bump Vector version --- Cargo.lock | 12 +- Cargo.toml | 2 +- .../23671_file_sink_truncate.feature.md | 3 - .../23717_vector_test_color.feature.md | 3 - ...748_add_indexer_ack_compression.feature.md | 3 - ...emory_enrichment_expired_output.feature.md | 3 - .../23820_sighup_reload_transforms.feature.md | 3 - .../23820_watch_config_handle_events.fix.md | 3 - .../23838_mqtt_sink_client_cert_auth.fix.md | 3 - changelog.d/23863_memory_tables_tap.fix.md | 3 - ...3865_azure_blob_use_correct_feature.fix.md | 3 - .../23872_buffer_counter_underflowed.fix.md | 3 - ...ource_remove_legacy_checkpoint.breaking.md | 34 -- ...1_improve_journald_source_debug.feature.md | 3 - ...gent_split_metric_namespace.enhancement.md | 4 - .../24014_memory_table_source_reload.fix.md | 3 - .../24026_fix_docker_logs_socket_path.fix.md | 3 - changelog.d/24073_utilization_bounds.fix.md | 3 - .../24080_utilization_on_reload.fix.md | 3 - ...119_improve_avro_encoding_error.feature.md | 3 - ...c_vector_config_reload_rejected.feature.md | 3 - ..._source_exponential_backoff.enhancement.md | 5 - .../env_var_multiline_rejection.breaking.md | 5 - .../fix_fluent_received_events_count.fix.md | 3 - ...nternal_log_component_id_field.breaking.md | 4 - ...telemetry_source_http_decompression.fix.md | 4 - changelog.d/otlp_decoding.feature.md | 3 - changelog.d/otlp_encoding.feature.md | 4 - ...eus_remote_write_metadata_conflicts.fix.md | 3 - .../prometheus_remote_write_path.feature.md | 3 - .../protobuf_use_json_names.enhancement.md | 5 - changelog.d/utilization.breaking.md | 3 - changelog.d/x86_64_apple_builds.breaking.md | 4 - distribution/install.sh | 2 +- .../en/highlights/2025-09-23-otlp-support.md | 2 + .../2025-11-04-0-51-0-upgrade-guide.md | 115 ++++ website/content/en/releases/0.51.0.md | 4 + .../administration/interfaces/kubectl.cue | 2 +- website/cue/reference/releases/0.51.0.cue | 512 ++++++++++++++++++ website/cue/reference/versions.cue | 1 + 40 files changed, 643 insertions(+), 144 deletions(-) delete mode 100644 changelog.d/23671_file_sink_truncate.feature.md delete mode 100644 changelog.d/23717_vector_test_color.feature.md delete mode 100644 changelog.d/23748_add_indexer_ack_compression.feature.md delete mode 100644 changelog.d/23815_memory_enrichment_expired_output.feature.md delete mode 100644 changelog.d/23820_sighup_reload_transforms.feature.md delete mode 100644 changelog.d/23820_watch_config_handle_events.fix.md delete mode 100644 changelog.d/23838_mqtt_sink_client_cert_auth.fix.md delete mode 100644 changelog.d/23863_memory_tables_tap.fix.md delete mode 100644 changelog.d/23865_azure_blob_use_correct_feature.fix.md delete mode 100644 changelog.d/23872_buffer_counter_underflowed.fix.md delete mode 100644 changelog.d/23874_file_source_remove_legacy_checkpoint.breaking.md delete mode 100644 changelog.d/23941_improve_journald_source_debug.feature.md delete mode 100644 changelog.d/23986_datadog_agent_split_metric_namespace.enhancement.md delete mode 100644 changelog.d/24014_memory_table_source_reload.fix.md delete mode 100644 changelog.d/24026_fix_docker_logs_socket_path.fix.md delete mode 100644 changelog.d/24073_utilization_bounds.fix.md delete mode 100644 changelog.d/24080_utilization_on_reload.fix.md delete mode 100644 changelog.d/24119_improve_avro_encoding_error.feature.md delete mode 100644 changelog.d/add_metric_vector_config_reload_rejected.feature.md delete mode 100644 changelog.d/aws_s3_source_exponential_backoff.enhancement.md delete mode 100644 changelog.d/env_var_multiline_rejection.breaking.md delete mode 100644 changelog.d/fix_fluent_received_events_count.fix.md delete mode 100644 changelog.d/internal_log_component_id_field.breaking.md delete mode 100644 changelog.d/opentelemetry_source_http_decompression.fix.md delete mode 100644 changelog.d/otlp_decoding.feature.md delete mode 100644 changelog.d/otlp_encoding.feature.md delete mode 100644 changelog.d/prometheus_remote_write_metadata_conflicts.fix.md delete mode 100644 changelog.d/prometheus_remote_write_path.feature.md delete mode 100644 changelog.d/protobuf_use_json_names.enhancement.md delete mode 100644 changelog.d/utilization.breaking.md delete mode 100644 changelog.d/x86_64_apple_builds.breaking.md create mode 100644 website/content/en/highlights/2025-11-04-0-51-0-upgrade-guide.md create mode 100644 website/content/en/releases/0.51.0.md create mode 100644 website/cue/reference/releases/0.51.0.cue diff --git a/Cargo.lock b/Cargo.lock index 2cf85a4ded1e9..9d07efa9c837d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4981,7 +4981,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.10", + "socket2 0.4.10", "tokio", "tower-service", "tracing 0.1.41", @@ -8168,7 +8168,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" dependencies = [ "heck 0.5.0", - "itertools 0.14.0", + "itertools 0.10.5", "log", "multimap", "once_cell", @@ -8214,7 +8214,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.10.5", "proc-macro2 1.0.101", "quote 1.0.40", "syn 2.0.106", @@ -12185,7 +12185,7 @@ dependencies = [ [[package]] name = "vector" -version = "0.51.0" +version = "0.52.0" dependencies = [ "apache-avro 0.16.0", "approx", @@ -12770,8 +12770,8 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" -version = "0.27.0" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#90ea34f09903e3ee85f9b3e4813f44992f6e7c68" +version = "0.28.0" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#d6cae023d596fdc73bf19501498e2db962fe1d54" dependencies = [ "aes", "aes-siv", diff --git a/Cargo.toml b/Cargo.toml index 14e1e219f1297..518440b77a194 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vector" -version = "0.51.0" +version = "0.52.0" authors = ["Vector Contributors "] edition = "2024" description = "A lightweight and ultra-fast tool for building observability pipelines" diff --git a/changelog.d/23671_file_sink_truncate.feature.md b/changelog.d/23671_file_sink_truncate.feature.md deleted file mode 100644 index a7b5367aca34a..0000000000000 --- a/changelog.d/23671_file_sink_truncate.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Added `truncate` options to `file` sink to truncate output files after some time. - -authors: esensar Quad9DNS diff --git a/changelog.d/23717_vector_test_color.feature.md b/changelog.d/23717_vector_test_color.feature.md deleted file mode 100644 index 5b694d9619407..0000000000000 --- a/changelog.d/23717_vector_test_color.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Disable ANSI color for `vector test` when running non-interactively. Honor `--color {auto|always|never}` and `VECTOR_COLOR`; VRL diagnostics no longer include ANSI sequences when color is disabled. - -authors: VanjaRo diff --git a/changelog.d/23748_add_indexer_ack_compression.feature.md b/changelog.d/23748_add_indexer_ack_compression.feature.md deleted file mode 100644 index 5b9068623a09a..0000000000000 --- a/changelog.d/23748_add_indexer_ack_compression.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Adds proper support for compression of HEC indexer ack queries, using the sink's configured `compression` setting. - -authors: sbalmos diff --git a/changelog.d/23815_memory_enrichment_expired_output.feature.md b/changelog.d/23815_memory_enrichment_expired_output.feature.md deleted file mode 100644 index 6bd44c05abceb..0000000000000 --- a/changelog.d/23815_memory_enrichment_expired_output.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Added `expired` output to the memory enrichment table source, to export items as they expire in the cache. - -authors: esensar Quad9DNS diff --git a/changelog.d/23820_sighup_reload_transforms.feature.md b/changelog.d/23820_sighup_reload_transforms.feature.md deleted file mode 100644 index 3efef1b42162d..0000000000000 --- a/changelog.d/23820_sighup_reload_transforms.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -On receiving SIGHUP Vector now also reloads transform components with external VRL files. - -authors: nekorro diff --git a/changelog.d/23820_watch_config_handle_events.fix.md b/changelog.d/23820_watch_config_handle_events.fix.md deleted file mode 100644 index 24c92dbbf2277..0000000000000 --- a/changelog.d/23820_watch_config_handle_events.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -The configuration watcher now collects event paths even during the delay period. These were previously ignored and prevented components from reloading. - -authors: nekorro diff --git a/changelog.d/23838_mqtt_sink_client_cert_auth.fix.md b/changelog.d/23838_mqtt_sink_client_cert_auth.fix.md deleted file mode 100644 index 4273f5deaf714..0000000000000 --- a/changelog.d/23838_mqtt_sink_client_cert_auth.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Enable unused TLS settings for perform client authentication by SSL certificate in `mqtt` sink. - -authors: ValentinChernovNTQ diff --git a/changelog.d/23863_memory_tables_tap.fix.md b/changelog.d/23863_memory_tables_tap.fix.md deleted file mode 100644 index e5a171a065cfc..0000000000000 --- a/changelog.d/23863_memory_tables_tap.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Memory enrichment table's outputs are now visible to the `vector tap` command. - -authors: esensar Quad9DNS diff --git a/changelog.d/23865_azure_blob_use_correct_feature.fix.md b/changelog.d/23865_azure_blob_use_correct_feature.fix.md deleted file mode 100644 index ec0b2ae7cdf85..0000000000000 --- a/changelog.d/23865_azure_blob_use_correct_feature.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed a panic in the `azure_blob` sink by enabling a missing required crate feature. - -authors: thomasqueirozb diff --git a/changelog.d/23872_buffer_counter_underflowed.fix.md b/changelog.d/23872_buffer_counter_underflowed.fix.md deleted file mode 100644 index cf0a3a4449b16..0000000000000 --- a/changelog.d/23872_buffer_counter_underflowed.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fix buffer counter underflowed, caused by the counter has not been updated(increase) timely when new event is coming. - -authors: sialais diff --git a/changelog.d/23874_file_source_remove_legacy_checkpoint.breaking.md b/changelog.d/23874_file_source_remove_legacy_checkpoint.breaking.md deleted file mode 100644 index fabb3a8bf852d..0000000000000 --- a/changelog.d/23874_file_source_remove_legacy_checkpoint.breaking.md +++ /dev/null @@ -1,34 +0,0 @@ -* Dropped support for `file` source legacy checkpoints stored in the `checkpoints` folder (Vector `< 0.11`) which is located inside the `data_dir`. -* Removed the legacy checkpoint checksum format (Vector `< 0.15`). -* The intentionally hidden `fingerprint.bytes` option was also removed. - -### How to upgrade - -You can stop reading if you - -* have started using the `file` source on or after version `0.15`, or -* have cleared your `data_dir` on or after version `0.15`, or -* don't care about the file positions and don't care about current state of your checkpoints, meaning you accept that files could be read from the beginning again after the upgrade. - * Vector will re-read all files from the beginning if/when any `checkpoints.json` files nested inside `data_dir` fail to load due to legacy/corrupted data. - -You are only affected if your Vector version is: - -1. `>= 0.11` and `< 0.15`, then your checkpoints are using the legacy checkpoint checksum CRC format. -2. `>= 0.11` and `< 0.15`, then the `checksum` key is present under `checkpoints.fingerprint` in your `checkpoints.json` (instead of `first_lines_checksum`). -3. **or ever was** `< 0.11` and you are using the legacy `checkpoints` folder and/or the `unknown` key is present under `checkpoints.fingerprint` in any `checkpoints.json` files nested inside `data_dir`. - -#### If you are affected by `#1` or `#2` - -Run the `file` source with any version of Vector `>= 0.15`, but strictly before `0.51` and the checkpoints should be automatically updated. -For example, if you’re on Vector `0.10` and want to upgrade, keep upgrading Vector until `0.14` and Vector will automatically convert your checkpoints. -When upgrading, we recommend stepping through minor versions as these can each contain breaking changes while Vector is pre-1.0. These breaking changes are noted in their respective upgrade guides. - -Odds are the `file` source automatically converted checkpoints to the new format if you are using a recent version and you are not affected by this at all. - -#### If you are affected by `#3` - -You should manually delete the `unknown` checkpoint records from all `checkpoints.json` files nested inside `data_dir` -and then follow the upgrade guide for `#1` and `#2`. If you were using a recent version of Vector and `unknown` -was present it wasn't being used anyways. - -authors: thomasqueirozb diff --git a/changelog.d/23941_improve_journald_source_debug.feature.md b/changelog.d/23941_improve_journald_source_debug.feature.md deleted file mode 100644 index f996a21295c92..0000000000000 --- a/changelog.d/23941_improve_journald_source_debug.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -The `journald` source now provides better error visibility by capturing and displaying stderr output from the underlying `journalctl` process as warning messages. - -authors: titaneric diff --git a/changelog.d/23986_datadog_agent_split_metric_namespace.enhancement.md b/changelog.d/23986_datadog_agent_split_metric_namespace.enhancement.md deleted file mode 100644 index dbd70462c4bc0..0000000000000 --- a/changelog.d/23986_datadog_agent_split_metric_namespace.enhancement.md +++ /dev/null @@ -1,4 +0,0 @@ -Added a new `split_metric_namespace` option to the `datadog_agent` source to -optionally disable the existing default metric name split behavior. - -authors: bruceg diff --git a/changelog.d/24014_memory_table_source_reload.fix.md b/changelog.d/24014_memory_table_source_reload.fix.md deleted file mode 100644 index 56b0403da230e..0000000000000 --- a/changelog.d/24014_memory_table_source_reload.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed a crash on configuration reload when memory enrichment tables are configured to be used as a source. - -authors: esensar Quad9DNS diff --git a/changelog.d/24026_fix_docker_logs_socket_path.fix.md b/changelog.d/24026_fix_docker_logs_socket_path.fix.md deleted file mode 100644 index a6ef009511bd6..0000000000000 --- a/changelog.d/24026_fix_docker_logs_socket_path.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed an issue in the `docker_logs` source where the `docker_host` option and `DOCKER_HOST` environment variable were ignored if they started with `unix://` or `npipe://`. In those cases the default location for the Docker socket was used - -authors: titaneric diff --git a/changelog.d/24073_utilization_bounds.fix.md b/changelog.d/24073_utilization_bounds.fix.md deleted file mode 100644 index 2d84b0cb109b4..0000000000000 --- a/changelog.d/24073_utilization_bounds.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed an issue where utilization could report negative values. This could happen if messages from components were processed too late and were accounted for wrong utilization measurement period. These messages are now moved to the current utilization period, meaning there might be some inaccuracy in the resulting utilization metric, but it was never meant to be precise. - -authors: esensar Quad9DNS diff --git a/changelog.d/24080_utilization_on_reload.fix.md b/changelog.d/24080_utilization_on_reload.fix.md deleted file mode 100644 index de09ebce3752f..0000000000000 --- a/changelog.d/24080_utilization_on_reload.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed a bug where utilization metric could be lost for changed components on configuration reload. - -authors: esensar Quad9DNS diff --git a/changelog.d/24119_improve_avro_encoding_error.feature.md b/changelog.d/24119_improve_avro_encoding_error.feature.md deleted file mode 100644 index d51a9870480f9..0000000000000 --- a/changelog.d/24119_improve_avro_encoding_error.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Improve Avro encoding error where schema and value are included in the message - -authors: titaneric diff --git a/changelog.d/add_metric_vector_config_reload_rejected.feature.md b/changelog.d/add_metric_vector_config_reload_rejected.feature.md deleted file mode 100644 index 2d2d74e5113ef..0000000000000 --- a/changelog.d/add_metric_vector_config_reload_rejected.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Vector now emits `config_reload_rejected` and `config_reloaded` counters. - -authors: suikammd diff --git a/changelog.d/aws_s3_source_exponential_backoff.enhancement.md b/changelog.d/aws_s3_source_exponential_backoff.enhancement.md deleted file mode 100644 index fa2b139dde6ba..0000000000000 --- a/changelog.d/aws_s3_source_exponential_backoff.enhancement.md +++ /dev/null @@ -1,5 +0,0 @@ -The `aws_s3` source now uses exponential backoff when retrying failed SQS `receive_message` operations. Previously, the source used a fixed 500ms delay between retries. - -The new behavior starts at 500ms and doubles with each consecutive failure, capping at 30 seconds. This prevents excessive API calls during prolonged AWS SQS outages, invalid IAM permissions, or throttling scenarios, while still being responsive when the service recovers. - -authors: medzin pront diff --git a/changelog.d/env_var_multiline_rejection.breaking.md b/changelog.d/env_var_multiline_rejection.breaking.md deleted file mode 100644 index 8c6ae4608178e..0000000000000 --- a/changelog.d/env_var_multiline_rejection.breaking.md +++ /dev/null @@ -1,5 +0,0 @@ -Environment variable interpolation in configuration files now rejects values containing newline characters. This prevents configuration -injection attacks where environment variables could inject malicious multi-line configurations. If you need to inject multi-line -configuration blocks, use a config pre-processing tool like `envsubst` instead. - -authors: pront diff --git a/changelog.d/fix_fluent_received_events_count.fix.md b/changelog.d/fix_fluent_received_events_count.fix.md deleted file mode 100644 index 37d830be6982f..0000000000000 --- a/changelog.d/fix_fluent_received_events_count.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed duplicate reporting of received event count in the `fluent` source. - -authors: gwenaskell diff --git a/changelog.d/internal_log_component_id_field.breaking.md b/changelog.d/internal_log_component_id_field.breaking.md deleted file mode 100644 index 5237aa144c86c..0000000000000 --- a/changelog.d/internal_log_component_id_field.breaking.md +++ /dev/null @@ -1,4 +0,0 @@ -Vector's internal topology logs now use the `component_id` field name instead of `component` or `key`. -If you are monitoring or filtering Vector's internal logs based on these field names, update your queries to use `component_id`. - -authors: pront diff --git a/changelog.d/opentelemetry_source_http_decompression.fix.md b/changelog.d/opentelemetry_source_http_decompression.fix.md deleted file mode 100644 index ebf4bbd922e96..0000000000000 --- a/changelog.d/opentelemetry_source_http_decompression.fix.md +++ /dev/null @@ -1,4 +0,0 @@ -Fixed a `opentelemetry` source bug where HTTP payloads were not decompressed according to the request headers. -This only applied when `use_otlp_decoding` (recently added) was set to `true`. - -authors: pront diff --git a/changelog.d/otlp_decoding.feature.md b/changelog.d/otlp_decoding.feature.md deleted file mode 100644 index c13f55dee313d..0000000000000 --- a/changelog.d/otlp_decoding.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Added `otlp` codec for decoding OTLP format to Vector events, complementing the existing OTLP encoder. - -authors: pront diff --git a/changelog.d/otlp_encoding.feature.md b/changelog.d/otlp_encoding.feature.md deleted file mode 100644 index 513bcf4379d93..0000000000000 --- a/changelog.d/otlp_encoding.feature.md +++ /dev/null @@ -1,4 +0,0 @@ -Added `otlp` codec for encoding Vector events to OTLP format. -The codec can be used with sinks that support encoding configuration. - -authors: pront diff --git a/changelog.d/prometheus_remote_write_metadata_conflicts.fix.md b/changelog.d/prometheus_remote_write_metadata_conflicts.fix.md deleted file mode 100644 index eeb457c61028d..0000000000000 --- a/changelog.d/prometheus_remote_write_metadata_conflicts.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -The `prometheus_remote_write` source now has a `metadata_conflict_strategy` option so you can determine how to handle conflicting metric metadata. By default, the source continues to reject requests with conflicting metadata (HTTP 400 error) to maintain backwards compatibility. Set `metadata_conflict_strategy` to `ignore` to align with Prometheus/Thanos behavior, which silently ignores metadata conflicts. - -authors: elohmeier diff --git a/changelog.d/prometheus_remote_write_path.feature.md b/changelog.d/prometheus_remote_write_path.feature.md deleted file mode 100644 index 0d8c2522e5b3c..0000000000000 --- a/changelog.d/prometheus_remote_write_path.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Added `path` configuration option to `prometheus_remote_write` source to allow accepting metrics on custom URL paths instead of only the root path. This enables configuration of endpoints like `/api/v1/write` to match standard Prometheus remote write conventions. - -authors: elohmeier diff --git a/changelog.d/protobuf_use_json_names.enhancement.md b/changelog.d/protobuf_use_json_names.enhancement.md deleted file mode 100644 index 2e3a3d30e2b88..0000000000000 --- a/changelog.d/protobuf_use_json_names.enhancement.md +++ /dev/null @@ -1,5 +0,0 @@ -Added `use_json_names` option to protobuf encoding and decoding. -When enabled, the codec uses JSON field names (camelCase) instead of protobuf field names (snake_case). -This is useful when working with data that uses JSON naming conventions. - -authors: pront diff --git a/changelog.d/utilization.breaking.md b/changelog.d/utilization.breaking.md deleted file mode 100644 index e07082da222e1..0000000000000 --- a/changelog.d/utilization.breaking.md +++ /dev/null @@ -1,3 +0,0 @@ -The `utilization` metric is now capped at 4 decimal digit precision. - -authors: pront diff --git a/changelog.d/x86_64_apple_builds.breaking.md b/changelog.d/x86_64_apple_builds.breaking.md deleted file mode 100644 index 275df179c0974..0000000000000 --- a/changelog.d/x86_64_apple_builds.breaking.md +++ /dev/null @@ -1,4 +0,0 @@ -Following [this announcement](https://blog.rust-lang.org/2025/09/18/Rust-1.90.0/#demoting-x86-64-apple-darwin-to-tier-2-with-host-tools), we will no longer publish `x86_64-apple-darwin` builds. -This means we will not be validating if Vector builds and works correctly on that platform. - -authors: pront diff --git a/distribution/install.sh b/distribution/install.sh index b400b41d26862..c3d80833ea432 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -13,7 +13,7 @@ set -u # If PACKAGE_ROOT is unset or empty, default it. PACKAGE_ROOT="${PACKAGE_ROOT:-"https://packages.timber.io/vector"}" # If VECTOR_VERSION is unset or empty, default it. -VECTOR_VERSION="${VECTOR_VERSION:-"0.50.0"}" +VECTOR_VERSION="${VECTOR_VERSION:-"0.51.0"}" _divider="--------------------------------------------------------------------------------" _prompt=">>>" _indent=" " diff --git a/website/content/en/highlights/2025-09-23-otlp-support.md b/website/content/en/highlights/2025-09-23-otlp-support.md index b12c08a6ffa5d..fa3a1add99544 100644 --- a/website/content/en/highlights/2025-09-23-otlp-support.md +++ b/website/content/en/highlights/2025-09-23-otlp-support.md @@ -47,6 +47,8 @@ sinks: codec: otlp ``` +The above configuration will only work with Vector versions >= `0.51`. + ## Example Configuration 2 Here is another pipeline configuration that can achieve the same as the above: diff --git a/website/content/en/highlights/2025-11-04-0-51-0-upgrade-guide.md b/website/content/en/highlights/2025-11-04-0-51-0-upgrade-guide.md new file mode 100644 index 0000000000000..c514e2d711cd2 --- /dev/null +++ b/website/content/en/highlights/2025-11-04-0-51-0-upgrade-guide.md @@ -0,0 +1,115 @@ +--- +date: "2025-11-04" +title: "0.51 Upgrade Guide" +description: "An upgrade guide that addresses breaking changes in 0.51.0" +authors: ["thomasqueirozb"] +release: "0.51.0" +hide_on_release_notes: false +badges: + type: breaking change +--- + + +## Vector breaking changes + +1. [Environment variable interpolation security hardening](#env-var-interpolation) +2. [Internal topology logs field name change](#topology-logs-field) +3. [Utilization metric precision cap](#utilization-metric-precision) +4. [Legacy file source fingerprint support dropped](#legacy-fingerprints) +5. [macOS x86_64 builds discontinued](#macos-x86-builds) + +## Vector upgrade guide + +### Environment variable interpolation security hardening {#env-var-interpolation} + +Environment variable interpolation in configuration files now rejects values containing newline characters. This prevents configuration injection attacks where environment variables could inject malicious multi-line configurations. + +#### Action needed + +If you need to inject multi-line configuration blocks, use a config pre-processing tool like `envsubst` instead or update your configuration files so that they don't rely on block injections. + +### Internal topology logs field name change {#topology-logs-field} + +Vector's internal topology `debug!` and `trace!` logs now use the `component_id` field name instead of `component` or `key`. + +#### Action needed + +If you are monitoring or filtering Vector's internal logs based on these field names, update your queries to use `component_id`. + +### Utilization metric precision cap {#utilization-metric-precision} + +The `utilization` metric is now capped at 4 decimal digit precision. + +#### Action needed + +No action required. This change improves metric consistency and readability. + +### Legacy file source fingerprint support dropped {#legacy-fingerprints} + +* Dropped support for `file` source legacy checkpoints stored in the `checkpoints` folder (Vector `< 0.11`) which is located inside the `data_dir`. +* Removed the legacy checkpoint checksum format (Vector `< 0.15`). +* The intentionally hidden `fingerprint.bytes` option was also removed. + +You can stop reading this section of the upgrade guide if you + +* have started using the `file` source on or after version `0.15`, or +* have cleared your `data_dir` on or after version `0.15`, or +* don't care about the file positions and don't care about current state of your checkpoints, meaning you accept that files could be read from the beginning again after the upgrade. + * Vector will re-read all files from the beginning if/when any `checkpoints.json` files nested inside `data_dir` fail to load due to legacy/corrupted data. + +You are only affected if your Vector version is: + +1. `>= 0.11` and `< 0.15`, then your checkpoints are using the legacy checkpoint checksum CRC format. +2. `>= 0.11` and `< 0.15`, then the `checksum` key is present under `checkpoints.fingerprint` in your `checkpoints.json` (instead of `first_lines_checksum`). +3. **or ever was** `< 0.11` and you are using the legacy `checkpoints` folder and/or the `unknown` key is present under `checkpoints.fingerprint` in any `checkpoints.json` files nested inside `data_dir`. + +#### If you are affected by `#1` or `#2` + +Run the `file` source with any version of Vector `>= 0.15`, but strictly before `0.51` and the checkpoints should be automatically updated. +For example, if you’re on Vector `0.10` and want to upgrade, keep upgrading Vector until `0.14` and Vector will automatically convert your checkpoints. +When upgrading, we recommend stepping through minor versions as these can each contain breaking changes while Vector is pre-1.0. These breaking changes are noted in their respective upgrade guides. + +Odds are the `file` source automatically converted checkpoints to the new format if you are using a recent version and you are not affected by this at all. + +#### If you are affected by `#3` + +You should manually delete the `unknown` checkpoint records from all `checkpoints.json` files nested inside `data_dir` +and then follow the upgrade guide for `#1` and `#2`. If you were using a recent version of Vector and `unknown` +was present it wasn't being used anyways. + +### macOS x86_64 builds discontinued {#macos-x86-builds} + +Following [this announcement](https://blog.rust-lang.org/2025/09/18/Rust-1.90.0/#demoting-x86-64-apple-darwin-to-tier-2-with-host-tools), we will no longer publish `x86_64-apple-darwin` builds. This means we will not be validating if Vector builds and works correctly on that platform. + +#### Action needed + +If you are running Vector on macOS with Intel processors (x86_64), consider migrating to ARM-based macOS hardware or use alternative deployment platforms. + +## VRL breaking changes + +{{< info >}} +VRL version `0.28.0` includes a breaking change to the `find` function. +{{< /info >}} + +### VRL `find` function return value change {#vrl-find-function} + +The return value of the `find` function has been changed from `-1` to `null` when there is no match. This improves consistency with VRL's type system and idiomatic null handling. + +#### Previous Behavior + +```coffee +result = find("hello world", "xyz") +# result = -1 +``` + +#### New Behavior + +```coffee +result = find("hello world", "xyz") +# result = null +``` + +#### Action needed + +Update any code that checks for `-1` as the return value of `find` to check for `null` instead: + diff --git a/website/content/en/releases/0.51.0.md b/website/content/en/releases/0.51.0.md new file mode 100644 index 0000000000000..b30d57830fe9f --- /dev/null +++ b/website/content/en/releases/0.51.0.md @@ -0,0 +1,4 @@ +--- +title: Vector v0.51.0 release notes +weight: 29 +--- diff --git a/website/cue/reference/administration/interfaces/kubectl.cue b/website/cue/reference/administration/interfaces/kubectl.cue index b14690091ed07..e10737bd72906 100644 --- a/website/cue/reference/administration/interfaces/kubectl.cue +++ b/website/cue/reference/administration/interfaces/kubectl.cue @@ -19,7 +19,7 @@ administration: interfaces: kubectl: { role_implementations: [Name=string]: { commands: { _deployment_variant: string - _vector_version: "0.50" + _vector_version: "0.51" _namespace: string | *"vector" _controller_resource_type: string _controller_resource_name: string | *_deployment_variant diff --git a/website/cue/reference/releases/0.51.0.cue b/website/cue/reference/releases/0.51.0.cue new file mode 100644 index 0000000000000..b936c466dcbb8 --- /dev/null +++ b/website/cue/reference/releases/0.51.0.cue @@ -0,0 +1,512 @@ +package metadata + +releases: "0.51.0": { + date: "2025-11-04" + codename: "" + + whats_next: [] + + description: """ + The Vector team is excited to announce version `0.51.0`! + + Please refer to the [upgrade guide](/highlights/2025-11-04-0-51-0-upgrade-guide) for breaking changes in this release. + + ## Release highlights + + - Enhanced OpenTelemetry Protocol (OTLP) support with the introduction of the `otlp` codec, enabling + bidirectional conversion between Vector events and OTLP format for seamless integration with + OpenTelemetry collectors and instrumentation. + - Improved Vector's internal telemetry with new `config_reload_rejected` and `config_reloaded` counters, + and fixed issues where utilization metrics reported negative values and buffer counters underflowed. + - Enhanced memory enrichment tables with an `expired` output for exporting expired cache items, + and made enrichment table outputs accessible via `vector tap`. + + ## Breaking Changes + + - Environment variable interpolation in configuration files now rejects values containing newline characters. This prevents configuration + injection attacks where environment variables could inject malicious multi-line configurations. If you need to inject multi-line + configuration blocks, use a config pre-processing tool like `envsubst` instead + or update your configuration files so that they don't rely on block injections. + + - Vector's internal topology `debug!` and `trace!` logs now use the `component_id` field name instead of `component` or `key`. + If you are monitoring or filtering Vector's internal logs based on these field names, update your queries to use `component_id`. + + - The `utilization` metric is now capped at 4 decimal digit precision. + + - Support for legacy fingerprints in the `file` source was dropped. Affected users may be + ones that have been running Vector since version 0.14 or earlier. Consult the upgrade guide for more details. + + - Following [this announcement](https://blog.rust-lang.org/2025/09/18/Rust-1.90.0/#demoting-x86-64-apple-darwin-to-tier-2-with-host-tools), we will no longer publish `x86_64-apple-darwin` builds. + this means we will not be validating if Vector builds and works correctly on that platform. + """ + + changelog: [ + { + type: "feat" + description: """ + Added `truncate` options to `file` sink to truncate output files after some time. + """ + contributors: ["esensar", "Quad9DNS"] + }, + { + type: "feat" + description: """ + Disabled ANSI color for `vector test` when running non-interactively. Honor `--color {auto|always|never}` and `VECTOR_COLOR`; VRL diagnostics no longer include ANSI sequences when color is disabled. + """ + contributors: ["VanjaRo"] + }, + { + type: "feat" + description: """ + Added proper support for compression of HEC indexer ack queries, using the sink's configured `compression` setting. + """ + contributors: ["sbalmos"] + }, + { + type: "feat" + description: """ + Added `expired` output to the memory enrichment table source, to export items as they expire in the cache. + """ + contributors: ["esensar", "Quad9DNS"] + }, + { + type: "feat" + description: """ + On receiving SIGHUP Vector now also reloads transform components with external VRL files. + """ + contributors: ["nekorro"] + }, + { + type: "fix" + description: """ + The configuration watcher now collects event paths even during the delay period. These were previously ignored and prevented components from reloading. + """ + contributors: ["nekorro"] + }, + { + type: "fix" + description: """ + Enabled unused TLS settings to perform client authentication by SSL certificate in `mqtt` sink. + """ + contributors: ["ValentinChernovNTQ"] + }, + { + type: "fix" + description: """ + Memory enrichment tables' outputs are now visible to the `vector tap` command. + """ + contributors: ["esensar", "Quad9DNS"] + }, + { + type: "fix" + description: """ + Fixed a panic in the `azure_blob` sink by enabling a missing required crate feature. + """ + contributors: ["thomasqueirozb"] + }, + { + type: "fix" + description: """ + Fixed an issue where the buffer counter underflowed. This was caused by the counter not being increased before a new event was observed. + """ + contributors: ["sialais"] + }, + { + type: "chore" + description: """ + * Dropped support for `file` source legacy checkpoints stored in the `checkpoints` folder (Vector `< 0.11`) which is located inside the `data_dir`. + * Removed the legacy checkpoint checksum format (Vector `< 0.15`). + * The intentionally hidden `fingerprint.bytes` option was also removed. + + ### How to upgrade + + You can stop reading if you + + * have started using the `file` source on or after version `0.15`, or + * have cleared your `data_dir` on or after version `0.15`, or + * don't care about the file positions and don't care about current state of your checkpoints, meaning you accept that files could be read from the beginning again after the upgrade. + * Vector will re-read all files from the beginning if/when any `checkpoints.json` files nested inside `data_dir` fail to load due to legacy/corrupted data. + + You are only affected if your Vector version is: + + 1. `>= 0.11` and `< 0.15`, then your checkpoints are using the legacy checkpoint checksum CRC format. + 2. `>= 0.11` and `< 0.15`, then the `checksum` key is present under `checkpoints.fingerprint` in your `checkpoints.json` (instead of `first_lines_checksum`). + 3. **or ever was** `< 0.11` and you are using the legacy `checkpoints` folder and/or the `unknown` key is present under `checkpoints.fingerprint` in any `checkpoints.json` files nested inside `data_dir`. + + #### If you are affected by `#1` or `#2` + + Run the `file` source with any version of Vector `>= 0.15`, but strictly before `0.51` and the checkpoints should be automatically updated. + For example, if you’re on Vector `0.10` and want to upgrade, keep upgrading Vector until `0.14` and Vector will automatically convert your checkpoints. + When upgrading, we recommend stepping through minor versions as these can each contain breaking changes while Vector is pre-1.0. These breaking changes are noted in their respective upgrade guides. + + Odds are the `file` source automatically converted checkpoints to the new format if you are using a recent version and you are not affected by this at all. + + #### If you are affected by `#3` + + You should manually delete the `unknown` checkpoint records from all `checkpoints.json` files nested inside `data_dir` + and then follow the upgrade guide for `#1` and `#2`. If you were using a recent version of Vector and `unknown` + was present it wasn't being used anyways. + """ + contributors: ["thomasqueirozb"] + }, + { + type: "feat" + description: """ + The `journald` source now provides better error visibility by capturing and displaying stderr output from the underlying `journalctl` process as warning messages. + """ + contributors: ["titaneric"] + }, + { + type: "enhancement" + description: """ + Added a new `split_metric_namespace` option to the `datadog_agent` source to + optionally disable the existing default metric name split behavior. + """ + contributors: ["bruceg"] + }, + { + type: "fix" + description: """ + Fixed a crash on configuration reload when memory enrichment tables are configured to be used as a source. + """ + contributors: ["esensar", "Quad9DNS"] + }, + { + type: "fix" + description: """ + Fixed an issue in the `docker_logs` source where the `docker_host` option and `DOCKER_HOST` environment variable were ignored if they started with `unix://` or `npipe://`. In those cases the default location for the Docker socket was used + """ + contributors: ["titaneric"] + }, + { + type: "fix" + description: """ + Fixed an issue where utilization could report negative values. This could happen if messages from components were processed too late and were accounted for wrong utilization measurement period. These messages are now moved to the current utilization period, meaning there might be some inaccuracy in the resulting utilization metric, but it was never meant to be precise. + """ + contributors: ["esensar", "Quad9DNS"] + }, + { + type: "fix" + description: """ + Fixed a bug where utilization metric could be lost for changed components on configuration reload. + """ + contributors: ["esensar", "Quad9DNS"] + }, + { + type: "feat" + description: """ + Improved Avro encoding error. Schema and value are now included in the message + """ + contributors: ["titaneric"] + }, + { + type: "feat" + description: """ + Vector now emits `config_reload_rejected` and `config_reloaded` counters. + """ + contributors: ["suikammd"] + }, + { + type: "enhancement" + description: """ + The `aws_s3` source now uses exponential backoff when retrying failed SQS `receive_message` operations. Previously, the source used a fixed 500ms delay between retries. + + The new behavior starts at 500ms and doubles with each consecutive failure, capping at 30 seconds. This prevents excessive API calls during prolonged AWS SQS outages, invalid IAM permissions, or throttling scenarios, while still being responsive when the service recovers. + """ + contributors: ["medzin", "pront"] + }, + { + type: "chore" + description: """ + Environment variable interpolation in configuration files now rejects values containing newline characters. This prevents configuration + injection attacks where environment variables could inject malicious multi-line configurations. If you need to inject multi-line + configuration blocks, use a config pre-processing tool like `envsubst` instead. + """ + contributors: ["pront"] + }, + { + type: "fix" + description: """ + Fixed duplicate reporting of received event count in the `fluent` source. + """ + contributors: ["gwenaskell"] + }, + { + type: "chore" + description: """ + Vector's internal topology `debug!` and `trace!` logs now use the `component_id` field name instead of `component` or `key`. + If you are monitoring or filtering Vector's internal logs based on these field names, update your queries to use `component_id`. + """ + contributors: ["pront"] + }, + { + type: "fix" + description: """ + Fixed a `opentelemetry` source bug where HTTP payloads were not decompressed according to the request headers. + This only applied when `use_otlp_decoding` (recently added) was set to `true`. + """ + contributors: ["pront"] + }, + { + type: "feat" + description: """ + Added `otlp` codec for decoding OTLP format to Vector events, complementing the existing OTLP encoder. + """ + contributors: ["pront"] + }, + { + type: "feat" + description: """ + Added `otlp` codec for encoding Vector events to OTLP format. + The codec can be used with sinks that support encoding configuration. + """ + contributors: ["pront"] + }, + { + type: "fix" + description: """ + The `prometheus_remote_write` source now has a `metadata_conflict_strategy` option so you can determine how to handle conflicting metric metadata. By default, the source continues to reject requests with conflicting metadata (HTTP 400 error) to maintain backwards compatibility. Set `metadata_conflict_strategy` to `ignore` to align with Prometheus/Thanos behavior, which silently ignores metadata conflicts. + """ + contributors: ["elohmeier"] + }, + { + type: "feat" + description: """ + Added `path` configuration option to `prometheus_remote_write` source to allow accepting metrics on custom URL paths instead of only the root path. This enables configuration of endpoints like `/api/v1/write` to match standard Prometheus remote write conventions. + """ + contributors: ["elohmeier"] + }, + { + type: "enhancement" + description: """ + Added `use_json_names` option to protobuf encoding and decoding. + When enabled, the codec uses JSON field names (camelCase) instead of protobuf field names (snake_case). + This is useful when working with data that uses JSON naming conventions. + """ + contributors: ["pront"] + }, + { + type: "chore" + description: """ + The `utilization` metric is now capped at 4 decimal digit precision. + """ + contributors: ["pront"] + }, + { + type: "chore" + description: """ + Following [this announcement](https://blog.rust-lang.org/2025/09/18/Rust-1.90.0/#demoting-x86-64-apple-darwin-to-tier-2-with-host-tools), we will no longer publish `x86_64-apple-darwin` builds. + This means we will not be validating if Vector builds and works correctly on that platform. + """ + contributors: ["pront"] + }, + ] + + vrl_changelog: """ + ### [0.28.0 (2025-11-03)] + + #### Breaking Changes & Upgrade Guide + + - The return value of the `find` function has been changed to `null` instead of `-1` if there is no match. + + authors: titaneric (https://github.com/vectordotdev/vrl/pull/1514) + + #### New Features + + - Introduced the `basename` function to get the last component of a path. + + authors: titaneric (https://github.com/vectordotdev/vrl/pull/1531) + - Introduced the `dirname` function to get the directory component of a path. + + authors: titaneric (https://github.com/vectordotdev/vrl/pull/1532) + - Introduced the `split_path` function to split a path into its components. + + authors: titaneric (https://github.com/vectordotdev/vrl/pull/1533) + + #### Enhancements + + - Added optional `http_proxy` and `https_proxy` parameters to `http_request` for setting the proxies used for a request. (https://github.com/vectordotdev/vrl/pull/1534) + - Added support for encoding a VRL `Integer` into a protobuf `double` when using `encode_proto` + + authors: thomasqueirozb (https://github.com/vectordotdev/vrl/pull/1545) + + #### Fixes + + - Fixed `parse_glog` to accept space-padded thread-id. (https://github.com/vectordotdev/vrl/pull/1515) + + + ### [0.27.0 (2025-09-18)] + """ + + commits: [ + {sha: "8b25a7e918bfbd2732de8e5f7ab8de5c6becd563", date: "2025-09-19 18:09:13 UTC", description: "add timeout to component features job", pr_number: 23814, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "25f0353fa129a9318fb9f3c97cfe2b2facf89940", date: "2025-09-20 00:49:21 UTC", description: "add example for AWS Secrets Manager backend", pr_number: 23548, scopes: ["external"], type: "docs", breaking_change: false, author: "Gary Sassano", files_count: 3, insertions_count: 501, deletions_count: 0}, + {sha: "203b2bcbb0f453939fdcea7175b489c37df54400", date: "2025-09-19 21:11:02 UTC", description: "increase timeouts in file_start_position_server_restart_unfinalized", pr_number: 23812, scopes: ["tests"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 3, deletions_count: 3}, + {sha: "4eadd70b7444a9fa6a2dee22bd3f3c7a803dc188", date: "2025-09-19 22:03:27 UTC", description: "run IT suite once", pr_number: 23818, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 3, deletions_count: 22}, + {sha: "a70eca4ea9900075ac8954c74238b2742fb244cd", date: "2025-09-19 23:09:01 UTC", description: "consolidate usage of VECTOR_LOG in tests and remove TEST_LOG", pr_number: 23804, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 12, insertions_count: 12, deletions_count: 12}, + {sha: "e1cd39c78e439a8cb054aef69782cc00524ddb11", date: "2025-09-20 00:21:33 UTC", description: "enable colors when running in nextest", pr_number: 23819, scopes: ["dev"], type: "feat", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 5, deletions_count: 0}, + {sha: "52049ad615a6c31eda3ca7c45150e2c201c309d0", date: "2025-09-22 18:32:47 UTC", description: "improve indexing for memory table docs", pr_number: 23827, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "be2dde4a0b4bcc40c5e20aa69b385bf083c1b414", date: "2025-09-22 23:01:23 UTC", description: "fix vector diagram", pr_number: 23830, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 9667, deletions_count: 1574}, + {sha: "b86a6aa199d0d38cbe86b8dd68a52bb3211c698c", date: "2025-09-23 17:50:06 UTC", description: "minor release template fixes", pr_number: 23831, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 3, deletions_count: 3}, + {sha: "a52a7494adc133c765b9cdcf70ce1cf8fbc504a8", date: "2025-09-24 00:28:50 UTC", description: "add options to truncate files in some conditions", pr_number: 23671, scopes: ["file sink"], type: "feat", breaking_change: false, author: "Ensar Sarajčić", files_count: 5, insertions_count: 162, deletions_count: 36}, + {sha: "8387b5e4be4abb70d90bb419646b4e512ffacabb", date: "2025-09-23 20:10:36 UTC", description: "extract homebrew publishing into a new workflow", pr_number: 23833, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 38, deletions_count: 21}, + {sha: "80fee2733787d7468bba971f5766373f2c27cf0d", date: "2025-09-23 22:32:28 UTC", description: "allow manual homebrew runs", pr_number: 23835, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 11, deletions_count: 0}, + {sha: "74380c218d9626f61686a60c10b3b7e7ef907953", date: "2025-09-23 22:44:17 UTC", description: "post release steps", pr_number: 23834, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 51, insertions_count: 554, deletions_count: 110}, + {sha: "56a7af50c8a36bb09843bc8c5b524a2c9ecf46c1", date: "2025-09-24 17:28:23 UTC", description: "fix typo", pr_number: 23841, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "592dab79583c9f7e221bb00ec38e94b41473081d", date: "2025-09-24 18:03:35 UTC", description: "spellchecker fix", pr_number: 23842, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 19, deletions_count: 10}, + {sha: "e6da13867c68dff362263006a8e6350e0bbce1f8", date: "2025-09-24 17:36:32 UTC", description: "homebrew workflow fixes", pr_number: 23836, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 10, deletions_count: 9}, + {sha: "c52d405e5a54287c668f20ac95a8a81c3c142236", date: "2025-09-25 06:37:15 UTC", description: "emit config_reload_rejected and config_reloaded counters", pr_number: 23500, scopes: ["config"], type: "feat", breaking_change: false, author: "Suika", files_count: 5, insertions_count: 117, deletions_count: 12}, + {sha: "76dc8b7291e2b3015c5d49f1a3ea6a3247bad97e", date: "2025-09-24 21:35:31 UTC", description: "add .md authors spelling pattern", pr_number: 23843, scopes: ["dev"], type: "fix", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 3, deletions_count: 14}, + {sha: "d12c8f14ce1af42cfb1e1b38b11115a5ea884b66", date: "2025-09-24 23:40:53 UTC", description: "Expose a public way to load a config from str", pr_number: 23825, scopes: ["config"], type: "chore", breaking_change: false, author: "Rob Blafford", files_count: 5, insertions_count: 86, deletions_count: 13}, + {sha: "a7d91b343abeb321ba53919924e633b62671e2ba", date: "2025-09-25 17:36:15 UTC", description: "spread out schedules", pr_number: 23852, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 2, deletions_count: 4}, + {sha: "a4ded4a8dfeb8e4ea19bae53c285ffb378f3cc75", date: "2025-09-26 05:14:49 UTC", description: "add expired items output to memory enrichment table", pr_number: 23815, scopes: ["enrichment tables"], type: "feat", breaking_change: false, author: "Ensar Sarajčić", files_count: 7, insertions_count: 287, deletions_count: 107}, + {sha: "fad8439c7051a8a3968b9184f257239e0bc173b7", date: "2025-09-26 01:00:29 UTC", description: "use shared volume in `opentelemetry-logs` E2E test", pr_number: 23854, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 10, insertions_count: 90, deletions_count: 108}, + {sha: "bd6a8f51d6c16106de76de3330d27415f53940ad", date: "2025-09-26 17:15:53 UTC", description: "increase e2e timeout", pr_number: 23857, scopes: ["ci"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "78935388ac68c7e110fe148b56d564a887f02a11", date: "2025-09-30 09:09:30 UTC", description: "vrl pop() function", pr_number: 23727, scopes: ["external"], type: "docs", breaking_change: false, author: "jlambatl", files_count: 1, insertions_count: 34, deletions_count: 0}, + {sha: "5ce51c046d3e686ed248d9f82800581e5acdc231", date: "2025-09-29 18:28:31 UTC", description: "Document best-practice of not ending with _config in config spec", pr_number: 23866, scopes: ["config"], type: "docs", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "ae9010cb89ce19e9254731214861085b1fd8f82f", date: "2025-09-30 00:37:20 UTC", description: "windows rustup stable not installed by default", pr_number: 23868, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 21, deletions_count: 8}, + {sha: "a17a1844efffe0ce4b3cf4851991ccc7c2f838fd", date: "2025-09-29 21:40:52 UTC", description: "Remove `_config` suffix from `truncate_config`", pr_number: 23864, scopes: ["file sink"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 4, insertions_count: 14, deletions_count: 14}, + {sha: "a3ee7ab9854b9ffb4f411733f45811ddbcb9d3e9", date: "2025-09-30 00:48:25 UTC", description: "improvements", pr_number: 23869, scopes: ["external"], type: "docs", breaking_change: false, author: "Pavlos Rontidis", files_count: 91, insertions_count: 459, deletions_count: 488}, + {sha: "4985e40303afa0bd6aabfc4d9aea3a99b855e973", date: "2025-09-30 01:03:24 UTC", description: "use correct reqwest feature", pr_number: 23865, scopes: ["azure_blob sink"], type: "fix", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 4, deletions_count: 1}, + {sha: "1d86067671a36ea9c35687a11616d18ca9e262f0", date: "2025-09-30 01:14:54 UTC", description: "use rust 1.90", pr_number: 23870, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 17, insertions_count: 19, deletions_count: 39}, + {sha: "ba1131964447904a26a871f1b97f8bd4cca8a796", date: "2025-10-01 03:53:31 UTC", description: "tls auth by client cert", pr_number: 23839, scopes: ["mqtt sink"], type: "fix", breaking_change: false, author: "ValentinChernovNTQ", files_count: 3, insertions_count: 5, deletions_count: 1}, + {sha: "e4e01fa3fc2eb799fe66df6163891f6c67a9fa75", date: "2025-09-30 17:05:32 UTC", description: "remove support for x86_64-apple-darwin", pr_number: 23867, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 26, insertions_count: 37, deletions_count: 73}, + {sha: "1a2dccb548e0e2d51b3a96aebe91b10218548b2c", date: "2025-09-30 19:54:28 UTC", description: "make fingerprinter buffer internal", pr_number: 23859, scopes: ["file source"], type: "feat", breaking_change: false, author: "Thomas", files_count: 5, insertions_count: 155, deletions_count: 198}, + {sha: "2e605f52128deff9ecd7fada87b102357dca8dd9", date: "2025-09-30 23:44:07 UTC", description: "move e2e.yml logic to integration.yml", pr_number: 23873, scopes: ["ci"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 37, deletions_count: 129}, + {sha: "36459cc67e8f41140fdf6e990efddace6334670c", date: "2025-10-01 17:20:30 UTC", description: "Bump tempfile from 3.21.0 to 3.23.0", pr_number: 23889, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 6, deletions_count: 6}, + {sha: "398b81b50b9feb44a5e1fc81e08061edc053ebcc", date: "2025-10-01 17:21:44 UTC", description: "Bump the clap group with 2 updates", pr_number: 23881, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 8, deletions_count: 8}, + {sha: "2855aef0fd5f5693544f3cfc049982c41b1238b9", date: "2025-10-01 21:35:26 UTC", description: "Bump humantime from 2.2.0 to 2.3.0", pr_number: 23895, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "64463fb0b4383c3c7f3cbcb5bc742c19e368e07b", date: "2025-10-01 22:33:22 UTC", description: "update VRL", pr_number: 23903, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 12, deletions_count: 7}, + {sha: "28de351c85cfb06e3825551c467e2d7b1b237b8c", date: "2025-10-02 04:34:09 UTC", description: "stop counting received events twice", pr_number: 23900, scopes: ["fluent source"], type: "fix", breaking_change: false, author: "Yoenn Burban", files_count: 2, insertions_count: 3, deletions_count: 1}, + {sha: "5156c8b5a13dc1cf8f1c8907106a21f793ebce14", date: "2025-10-02 18:43:29 UTC", description: "Ignore E2E datadog-metrics", pr_number: 23917, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 1}, + {sha: "2845c58505b2e6fd78391fa6c6da5deeffb3e31f", date: "2025-10-02 19:09:55 UTC", description: "binstall cargo nextest in int/e2e tests", pr_number: 23913, scopes: ["tests"], type: "feat", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 12, deletions_count: 4}, + {sha: "1e4920444f357f6f75c98e555c073e00fd670b07", date: "2025-10-02 19:36:54 UTC", description: "0.50.0 release typos", pr_number: 23918, scopes: ["website"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "2dfb9fcb96f5198eaf68ba19c873e3b97cdd197e", date: "2025-10-02 20:16:41 UTC", description: "use 8core runners for int tests", pr_number: 23909, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "c66f4a34bb2cb72202c7d108e984b5c4baeb9b33", date: "2025-10-02 20:18:40 UTC", description: "Bump aws-smithy-runtime from 1.9.1 to 1.9.2 in the aws group", pr_number: 23879, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "ee390b82fd199b397cae09f3a8303800c501a0d1", date: "2025-10-02 20:19:00 UTC", description: "Bump sysinfo from 0.36.1 to 0.37.1", pr_number: 23892, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "8ff0e90ee298a9937654f601578ea32efe9f58d8", date: "2025-10-02 21:28:45 UTC", description: "Bump amannn/action-semantic-pull-request from 5.5.3 to 6.1.1", pr_number: 23907, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "2e2e5bb1e0421a99c1eecf8847d13425c62dd447", date: "2025-10-02 21:28:55 UTC", description: "Bump github/codeql-action from 3.30.0 to 3.30.5", pr_number: 23908, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "0346d5420980e19d447479561af61e7c218e8a07", date: "2025-10-03 02:20:11 UTC", description: "Bump actions/labeler from 5.0.0 to 6.0.1", pr_number: 23905, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "2332719568154106fc5bdc115823e568b5b206ea", date: "2025-10-03 02:37:31 UTC", description: "Bump security-framework from 3.3.0 to 3.5.1", pr_number: 23887, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 10, deletions_count: 10}, + {sha: "9ead2129b552ecfc561f44bcd0aab614cd8984b7", date: "2025-10-02 22:37:36 UTC", description: "datadog-metrics e2e test fixes", pr_number: 23919, scopes: ["ci"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 5, insertions_count: 94, deletions_count: 16}, + {sha: "759c6b137a9c740a98bccd9b17371f94e7e84b6c", date: "2025-10-03 02:37:43 UTC", description: "Bump proptest from 1.7.0 to 1.8.0", pr_number: 23890, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 5, insertions_count: 7, deletions_count: 7}, + {sha: "03e8021571f30069b5ee47f1e2340ea5ca2e98bc", date: "2025-10-03 02:39:48 UTC", description: "Bump bytesize from 2.0.1 to 2.1.0", pr_number: 23885, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "4d20c763bc21159008f150db71178a06d09461a4", date: "2025-10-03 02:42:00 UTC", description: "Bump async-nats from 0.42.0 to 0.43.1", pr_number: 23886, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 4, deletions_count: 3}, + {sha: "f09401d3bf0b84ef0e4a5264aff5c484b03358ec", date: "2025-10-02 23:24:01 UTC", description: "Bump docker/login-action from 3.5.0 to 3.6.0", pr_number: 23904, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 5, deletions_count: 5}, + {sha: "a36ad74e5cdd0d9d254e66a3700ccba560260bce", date: "2025-10-03 03:58:51 UTC", description: "Bump actions/github-script from 7.0.1 to 8.0.0", pr_number: 23906, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 7, deletions_count: 7}, + {sha: "c21ff5b5b19a3c6caf3596b7d1fb85f0b1226bc9", date: "2025-10-03 00:10:12 UTC", description: "batch netlink-* dep updates", pr_number: 23920, scopes: ["deps"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 3, deletions_count: 0}, + {sha: "d4f791d8f387da451b34a1cbea05888743ae92b3", date: "2025-10-03 04:21:45 UTC", description: "Bump warp from 0.3.7 to 0.4.2", pr_number: 23683, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 58, deletions_count: 7}, + {sha: "bff362389f2834e506f25a8454968ac1696e90a2", date: "2025-10-03 01:19:01 UTC", description: "bump sysinfo to 0.37.2", pr_number: 23921, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "12fd410016520652344b752ad6b6ae84cd21ccf9", date: "2025-10-03 05:40:03 UTC", description: "Bump ossf/scorecard-action from 2.4.2 to 2.4.3", pr_number: 23925, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "7cf189fe4aba3bf29acac9a20025bd9fc7625cfa", date: "2025-10-03 05:40:50 UTC", description: "Bump actions/setup-python from 5 to 6", pr_number: 23924, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "9783640c57de3df503465b43c40d6e1e5bb40fd6", date: "2025-10-03 12:18:34 UTC", description: "Bump github/codeql-action from 3.30.5 to 3.30.6", pr_number: 23926, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "a7b4dc34cbc9cfe791dfda6864627d6146059334", date: "2025-10-03 12:21:05 UTC", description: "Bump aws-actions/configure-aws-credentials from 4.3.1 to 5.0.0", pr_number: 23928, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 6, deletions_count: 6}, + {sha: "0fc7111eb9a4393b31cfab4b3b015e8243558944", date: "2025-10-03 20:59:55 UTC", description: "Bump actions/cache from 4.2.4 to 4.3.0", pr_number: 23927, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 6, insertions_count: 6, deletions_count: 6}, + {sha: "f054f9c1371be239eeac85fb79ab20d101cb71df", date: "2025-10-04 00:40:41 UTC", description: "properly enable memory enrichment table for `vector tap`", pr_number: 23863, scopes: ["enrichment tables"], type: "fix", breaking_change: false, author: "Ensar Sarajčić", files_count: 2, insertions_count: 36, deletions_count: 2}, + {sha: "0c652ce808a73501bc7ca3e4a5d1de66e5e42472", date: "2025-10-04 00:32:06 UTC", description: "only run changed integrations in the MQ", pr_number: 23937, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 4}, + {sha: "deab79c5172837a44fcd0458c0b2215e6d1a456e", date: "2025-10-07 05:33:16 UTC", description: "fix empty collection rendering by isset", pr_number: 23945, scopes: ["external docs"], type: "docs", breaking_change: false, author: "Huang Chen-Yi", files_count: 1, insertions_count: 4, deletions_count: 4}, + {sha: "50f9a8c5dae8506e3ab7a11d61c43afe07982baa", date: "2025-10-07 05:58:05 UTC", description: "add requirement for docker logs source", pr_number: 23944, scopes: ["docker_logs source"], type: "docs", breaking_change: false, author: "Huang Chen-Yi", files_count: 1, insertions_count: 5, deletions_count: 1}, + {sha: "549381ecd2f84a7c3f03866ad3cd0a6decb2c54b", date: "2025-10-06 18:05:25 UTC", description: "bump VRL to latest sha", pr_number: 23947, scopes: ["deps"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 4, deletions_count: 4}, + {sha: "a5fb0ecc511ad9fde1c79a68074f641bb916f84c", date: "2025-10-06 18:18:53 UTC", description: "show both author name and handle", pr_number: 23948, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 15, deletions_count: 1}, + {sha: "8dff36b9c06df052b4158557fd0b82df5aedf5ad", date: "2025-10-07 06:24:52 UTC", description: "fix `external` type in `networks` in docker compose file", pr_number: 23942, scopes: ["integration test"], type: "fix", breaking_change: false, author: "Huang Chen-Yi", files_count: 2, insertions_count: 4, deletions_count: 3}, + {sha: "5aa7244511bb282658a87053033928c1f80fbbc1", date: "2025-10-06 20:03:56 UTC", description: "merge both cue.sh scripts", pr_number: 23951, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 4, insertions_count: 32, deletions_count: 105}, + {sha: "5c1e1ee7d0543ab8e4b4af85fca46c6c10140e0f", date: "2025-10-06 22:24:57 UTC", description: "Fix incorrect cue.sh path", pr_number: 23953, scopes: ["dev"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "59689d3c5ec29790eb5e109001a0c2993b062a39", date: "2025-10-07 10:34:24 UTC", description: "add new path related documents", pr_number: 23935, scopes: ["vrl"], type: "docs", breaking_change: false, author: "Huang Chen-Yi", files_count: 3, insertions_count: 166, deletions_count: 0}, + {sha: "152cc39965b516840cdc2bc7d66dda3e9b97415b", date: "2025-10-06 23:13:29 UTC", description: "remove check-version script", pr_number: 23940, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 7, insertions_count: 1, deletions_count: 102}, + {sha: "cd2471ab3ed81cc55b13eb4f094af735a210b61e", date: "2025-10-06 23:24:58 UTC", description: "add highlights to typesense", pr_number: 23952, scopes: ["website"], type: "feat", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 20, deletions_count: 2}, + {sha: "90f59d5ca899ef4120840cdf2ffd4ddee3232328", date: "2025-10-07 00:05:28 UTC", description: "re-organize and improve aws guides", pr_number: 23954, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 5, insertions_count: 9, deletions_count: 5}, + {sha: "6b807ef93ce21c70e143c4fb4c67d8baaca14a9b", date: "2025-10-07 00:33:24 UTC", description: "only run tests when change conditions are met", pr_number: 23939, scopes: ["ci"], type: "feat", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 7, deletions_count: 0}, + {sha: "6a8dccc503d0a9386a83a52c274085d435ceda42", date: "2025-10-08 08:04:42 UTC", description: " respect color flag for tests", pr_number: 23957, scopes: ["unit tests"], type: "feat", breaking_change: false, author: "Ivan Rozhnovskiy", files_count: 7, insertions_count: 56, deletions_count: 31}, + {sha: "515a54850a8b1bbd2bc8b5469d966c65adb16c0a", date: "2025-10-08 17:04:37 UTC", description: " respect color flag for tests", pr_number: 23964, scopes: ["unit tests"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 7, insertions_count: 31, deletions_count: 56}, + {sha: "cdb9e3c2ea32101595d5129d980c1c68ac26260d", date: "2025-10-08 21:21:05 UTC", description: "run expensive Component Features check weekly", pr_number: 23963, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "f56549ca18ed3a6969ee70532eee50f0a85449b9", date: "2025-10-09 09:22:12 UTC", description: "fix warning for kubernetes logs source", pr_number: 23965, scopes: ["kubernetes_logs source"], type: "docs", breaking_change: false, author: "Huang Chen-Yi", files_count: 1, insertions_count: 4, deletions_count: 2}, + {sha: "b18ada85600888142703fe5f8276bc670d9330bc", date: "2025-10-08 21:43:05 UTC", description: "introduce `otlp` encoder ", pr_number: 23850, scopes: ["opentelemetry sink"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 48, insertions_count: 585, deletions_count: 110}, + {sha: "2527653b27302989b17666c3534285924c7106b8", date: "2025-10-09 10:30:05 UTC", description: "print error with `Debug` trait to improve the user diagnostic experience", pr_number: 23949, scopes: ["docker_logs source"], type: "feat", breaking_change: false, author: "Huang Chen-Yi", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "4044e43bb7d57cfdbdc5ec1edd0ac356ddc5e73b", date: "2025-10-09 06:30:10 UTC", description: "respect color flag for tests (recreated)", pr_number: 23966, scopes: ["unit tests"], type: "feat", breaking_change: false, author: "Ivan Rozhnovskiy", files_count: 7, insertions_count: 56, deletions_count: 31}, + {sha: "9c0dffb72d42bb12120d6b3af96e3541094268ef", date: "2025-10-08 23:57:58 UTC", description: "separate vector-top into it's own module", pr_number: 23969, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 15, insertions_count: 113, deletions_count: 57}, + {sha: "33692fab7dfe897a7e4c9154a559017e4136e981", date: "2025-10-09 01:10:33 UTC", description: "use telemetrygen and delete custom log generator", pr_number: 23968, scopes: ["tests"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 7, insertions_count: 66, deletions_count: 196}, + {sha: "68f0b4cf6a9c5fec461bf7b81617c889bcfc9ebb", date: "2025-10-09 01:18:58 UTC", description: "small vdev improvements and refactor", pr_number: 23912, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 12, deletions_count: 17}, + {sha: "3cd1a3135885f8e0007e7b73fb6eca137f02734d", date: "2025-10-10 01:46:36 UTC", description: "remove legacy checksum/fingerprinting", pr_number: 23874, scopes: ["file source"], type: "chore", breaking_change: false, author: "Thomas", files_count: 10, insertions_count: 68, deletions_count: 572}, + {sha: "a9d635a85c83072b744075e7e10cb38be1b06c79", date: "2025-10-10 18:33:07 UTC", description: "use ` instead of \" in aws guide", pr_number: 23983, scopes: ["website"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "64cb8fa7af4e67f7df30713654bbd3cdab7869ad", date: "2025-10-10 21:41:54 UTC", description: "add maxwidth format option", pr_number: 23985, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "2275c69b7ae5180373f44763bcd9fb7b16025d53", date: "2025-10-10 23:47:29 UTC", description: "latest-vector_default.yaml case was silently failing", pr_number: 23984, scopes: ["ci"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 5, insertions_count: 204, deletions_count: 105}, + {sha: "cbfcb8c182442df88b12e5096af2284d68752d82", date: "2025-10-10 21:54:10 UTC", description: "Add control for metric name splitting", pr_number: 23986, scopes: ["datadog_agent source"], type: "enhancement", breaking_change: false, author: "Bruce Guenter", files_count: 5, insertions_count: 498, deletions_count: 369}, + {sha: "a0fd6992cf3a02d7acb9aa7fffcc5999782fde2d", date: "2025-10-11 00:48:00 UTC", description: "scripts/run-integration-test.sh must fail early (not skip)", pr_number: 23977, scopes: ["ci"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 49, deletions_count: 21}, + {sha: "0b75760095b019d3ae2caed1e596c4ce4dec85fc", date: "2025-10-11 01:58:12 UTC", description: "misc tests now run in parallel", pr_number: 23987, scopes: ["ci"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 70, deletions_count: 15}, + {sha: "2e128dc63a7d495c9a320f443643b65a89ccc794", date: "2025-10-11 00:15:39 UTC", description: "Export the `top` function for external reuse", pr_number: 23988, scopes: ["top"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "ab8c4da85996c5c853017e38085a60e49ebb3fc5", date: "2025-10-13 18:02:00 UTC", description: "Add HEC indexer ack query compression", pr_number: 23823, scopes: ["splunk_hec sink"], type: "feat", breaking_change: false, author: "Scott Balmos", files_count: 2, insertions_count: 16, deletions_count: 11}, + {sha: "a121acf807d0acd0fda58d743ffb258094fecb54", date: "2025-10-14 19:35:55 UTC", description: "remove redundant setup steps", pr_number: 23999, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 34, deletions_count: 23}, + {sha: "7224315c3336c4d296006ba9997c4f985c6c9ceb", date: "2025-10-14 20:45:13 UTC", description: "add retry delay in sqs::Ingestor", pr_number: 23996, scopes: ["aws_sqs source"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 80, deletions_count: 11}, + {sha: "c1b8027680e8f5cd3d8725f889219b159b544d93", date: "2025-10-14 20:49:36 UTC", description: "refactoring - move code out of mod.rs", pr_number: 24000, scopes: ["codecs"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 6, insertions_count: 811, deletions_count: 662}, + {sha: "e1ecf8e536e3ff424d32c25e5cff9ceac8f4ae27", date: "2025-10-15 01:18:36 UTC", description: "add 'use_json_names' options to protobuf codecs", pr_number: 24002, scopes: ["codecs"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 66, insertions_count: 847, deletions_count: 90}, + {sha: "d9e0e3af30f3dbefe50dfab97b8990b5e672e492", date: "2025-10-15 22:46:10 UTC", description: "improve output type sections", pr_number: 24006, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 16, insertions_count: 32, deletions_count: 32}, + {sha: "45093c9f8cc8769cd08eed6026c6dc7cfff44e77", date: "2025-10-15 22:39:31 UTC", description: "add flattened and unflattened key examples to datadog_search tests", pr_number: 24008, scopes: ["datadog service"], type: "chore", breaking_change: false, author: "Tess Neau", files_count: 1, insertions_count: 12, deletions_count: 0}, + {sha: "f8f23df24c7c0542cc0a935677c42b1fc52f248f", date: "2025-10-16 17:57:40 UTC", description: "enable wrap to help with long strings", pr_number: 24013, scopes: ["vrl playground"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "90648c96440bd9bedec5c8a66eec6622e6220d15", date: "2025-10-16 18:33:50 UTC", description: "fix timezone dropdown pop up", pr_number: 24015, scopes: ["vrl playground"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 13, deletions_count: 1}, + {sha: "73c468cd2df1b55d81ecafcc046019bdabfbf82b", date: "2025-10-16 18:57:31 UTC", description: "introduce OTLP decoder", pr_number: 24003, scopes: ["codecs"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 31, insertions_count: 598, deletions_count: 52}, + {sha: "fcb9dfdcc8f5f40afa3a34f9560831bd647762e3", date: "2025-10-16 22:23:55 UTC", description: "add e2e-tests should run filter", pr_number: 24016, scopes: ["ci"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 57, deletions_count: 14}, + {sha: "1bed43c2fc907005f01da5ebfa65a8ec38641581", date: "2025-10-16 23:28:49 UTC", description: "remove build directives from datadog compose files", pr_number: 24018, scopes: ["e2e"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 0, deletions_count: 4}, + {sha: "dc60da02e07c58ae8d96059b2228c64fcd680c05", date: "2025-10-17 17:58:23 UTC", description: "refactor to avoid temp files and leverage docker APIs", pr_number: 23976, scopes: ["vdev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 41, insertions_count: 408, deletions_count: 238}, + {sha: "778b94446db40ec65df0758cd1c5192d401a9955", date: "2025-10-17 19:46:12 UTC", description: "add signal priority option to OTLP decoder", pr_number: 24019, scopes: ["codecs"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 27, insertions_count: 632, deletions_count: 60}, + {sha: "ea91a4d3661362cfdc9b570dfddc761a24556a1f", date: "2025-10-17 20:03:59 UTC", description: "parse_aws_alb_log strict_mode", pr_number: 24021, scopes: ["external docs"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 50, deletions_count: 0}, + {sha: "01b736903adf5012a81ecc32ba15cc0d7cdad4d4", date: "2025-10-17 20:25:44 UTC", description: "make workflows run when yml files change", pr_number: 24017, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 4, insertions_count: 82, deletions_count: 21}, + {sha: "8c909f2641c25abd9eceebe271fd99584076380a", date: "2025-10-17 23:20:38 UTC", description: "improve internal_log_rate_limit docs", pr_number: 24023, scopes: ["external docs"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 14, deletions_count: 1}, + {sha: "09bdb9610a09c92c411133630071e5552b0870bf", date: "2025-10-20 20:50:33 UTC", description: "Run deny on nightly schedule", pr_number: 24029, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "359fc8a47420d3285a6dd4b83d9a6313a38de50b", date: "2025-10-20 21:07:33 UTC", description: "make labeler action glob ci files correctly", pr_number: 24030, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "c6fb99628eeed263a9265af08c378627d63ba36d", date: "2025-10-20 22:15:47 UTC", description: "add setup action", pr_number: 23707, scopes: ["ci"], type: "feat", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 259, deletions_count: 26}, + {sha: "0c99f1646c58d170f91db1827d9778e60f1dbbd4", date: "2025-10-22 08:39:16 UTC", description: "fix tls how it work", pr_number: 24036, scopes: ["external docs"], type: "docs", breaking_change: false, author: "Eric Huang", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "6f7ef56fab687db65808b24ffb8e54c67ee545f2", date: "2025-10-22 02:57:01 UTC", description: "add path configuration option", pr_number: 23956, scopes: ["prometheus_remote_write source"], type: "feat", breaking_change: false, author: "elohmeier", files_count: 3, insertions_count: 116, deletions_count: 1}, + {sha: "192dd25a3eb21e83912f054ae4a1b4d37cf3d3ba", date: "2025-10-21 22:52:13 UTC", description: "add workflow to build and push test runner image", pr_number: 24042, scopes: ["ci"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 78, deletions_count: 0}, + {sha: "e162bda8216e4a9d1d73587b717766b28b08bacc", date: "2025-10-21 23:16:36 UTC", description: "add aggregated test detection outputs to changes.yml", pr_number: 24040, scopes: ["ci"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 18, deletions_count: 7}, + {sha: "a9d244a7d5f07b3ee50dd8137ae0de8a5814e057", date: "2025-10-22 18:23:51 UTC", description: "fix environment image and add test", pr_number: 24033, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 3, insertions_count: 44, deletions_count: 10}, + {sha: "770ae9643cb867b07444b2f9af6da899e1852fb0", date: "2025-10-22 19:57:01 UTC", description: "capture stderr and refactor", pr_number: 24045, scopes: ["vdev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 43, deletions_count: 32}, + {sha: "13f77b9815c56e0355a6cd52801cafb01a8d6a2e", date: "2025-10-23 02:38:29 UTC", description: "prevent crash on config reload with enrichment table sources", pr_number: 24014, scopes: ["enrichment tables"], type: "fix", breaking_change: false, author: "Ensar Sarajčić", files_count: 3, insertions_count: 140, deletions_count: 20}, + {sha: "2ead14508c2e6c235a025263d38626fd6970526b", date: "2025-10-23 08:41:02 UTC", description: "fix docker client with specified socket path", pr_number: 24026, scopes: ["docker_logs source"], type: "fix", breaking_change: false, author: "Eric Huang", files_count: 2, insertions_count: 5, deletions_count: 1}, + {sha: "cd5d44276e77ece2e58ec9cf2d2e25ad9298ccef", date: "2025-10-22 21:05:44 UTC", description: "guides and highlights author/date fixes", pr_number: 24047, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 10, insertions_count: 68, deletions_count: 9}, + {sha: "61e7bf349c2c5858f96050ab2e82e96704cf1bcb", date: "2025-10-22 21:16:39 UTC", description: "Remove references to soak-builder", pr_number: 24032, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 3}, + {sha: "4cd5f675d3a40e648acd405353f1b4d3f631740a", date: "2025-10-22 21:48:13 UTC", description: "fix creation dates for a few md files", pr_number: 24048, scopes: ["website"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "9b775a572b235d8e89b9f0b554fc6b296274b3d5", date: "2025-10-22 21:53:38 UTC", description: "add --reuse-image flag for CI optimization", pr_number: 24041, scopes: ["vdev"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 13, insertions_count: 127, deletions_count: 23}, + {sha: "4c34fc60899d104ed4c571660f6e8e761e6ec719", date: "2025-10-23 10:42:26 UTC", description: "improve error handling for journald source by spawn new stderr handler", pr_number: 23941, scopes: ["journald source"], type: "feat", breaking_change: false, author: "Eric Huang", files_count: 2, insertions_count: 55, deletions_count: 12}, + {sha: "c6a6a85e3b55463db3eada8e400e46daab0c30aa", date: "2025-10-23 01:03:29 UTC", description: "optimize integration tests by reusing test-runner images", pr_number: 24052, scopes: ["ci"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 97, deletions_count: 5}, + {sha: "5e792078f7e75f692720adba1fdc21c29d4cb636", date: "2025-10-23 01:37:20 UTC", description: "remove CARGO_NET_GIT_FETCH_WITH_CLI", pr_number: 24055, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 6, insertions_count: 0, deletions_count: 18}, + {sha: "6ec5b90bf8cf54ba6a5d1c780eab57651c7acbdd", date: "2025-10-23 18:38:26 UTC", description: "add RUST_BACKTRACE/CARGO_TERM_COLOR to setup action", pr_number: 24056, scopes: ["ci"], type: "feat", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 8, deletions_count: 2}, + {sha: "4bde7f97513ed48eb851fa26ae21363694ed4683", date: "2025-10-23 18:51:29 UTC", description: "Remove RUST_VERSION from int/e2e Dockerfile", pr_number: 24057, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 0, deletions_count: 2}, + {sha: "5e1c3bf2768af4130420f06cd742b4111ac2d561", date: "2025-10-24 01:24:55 UTC", description: "ignore conflicting metadata instead of returning HTTP 400", pr_number: 23773, scopes: ["prometheus_remote_write source"], type: "fix", breaking_change: false, author: "elohmeier", files_count: 5, insertions_count: 412, deletions_count: 41}, + {sha: "b08819ef49e221f0542da48291f4c9d84eae8994", date: "2025-10-23 20:15:04 UTC", description: " revert \"Remove RUST_VERSION from int/e2e Dockerfile (#24057)\"", pr_number: 24062, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 2, deletions_count: 0}, + {sha: "98a8d4119612c353e4a036e14b9f90030d9e4ae5", date: "2025-10-23 23:36:34 UTC", description: "Add missing step to last needs of integration.yml", pr_number: 24065, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "c7889dba2f59e6ba6f54ea532f65a9a759208584", date: "2025-10-24 00:07:13 UTC", description: "update internal_log_rate_limit tags", pr_number: 24050, scopes: ["internal_logs source"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 110, insertions_count: 122, deletions_count: 367}, + {sha: "573241a1fe4ed280eb24fefbc3e927d236e4e2a3", date: "2025-10-24 00:29:21 UTC", description: "bump fakeintake version (updated sha)", pr_number: 23922, scopes: ["deps"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "b9737f74a94260221298b1e29a06de8208825ad4", date: "2025-10-25 00:51:44 UTC", description: "reload transforms with external VRL on SIGHUP", pr_number: 23898, scopes: ["remap transform"], type: "feat", breaking_change: false, author: "Andrey Shibalov", files_count: 5, insertions_count: 34, deletions_count: 1}, + {sha: "f3d26082c81a0dc75c05c1d3dd432278ff3208b4", date: "2025-10-25 05:34:41 UTC", description: "watch-config file events handling", pr_number: 23899, scopes: ["config"], type: "fix", breaking_change: false, author: "Andrey Shibalov", files_count: 3, insertions_count: 32, deletions_count: 9}, + {sha: "52a1c65e96ff295094879634fb14ce33f0d2d6b8", date: "2025-10-24 23:24:02 UTC", description: "fix HTTP not decompressing payloads", pr_number: 24068, scopes: ["opentelemetry source"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 5, insertions_count: 99, deletions_count: 59}, + {sha: "825f4a332846a806eaf9584d829a4e4f4241ae54", date: "2025-10-27 17:53:32 UTC", description: "import only if flag is set", pr_number: 24082, scopes: ["prometheus_remote_write source"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "834529fb97699cd89ffb9ef41377869e98a91dbe", date: "2025-10-27 19:31:09 UTC", description: "Remove RUST_VERSION from int/e2e Dockerfile", pr_number: 24083, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 3, insertions_count: 8, deletions_count: 12}, + {sha: "a1ca14fb704d77d974a52c30bd9c82add879f0b8", date: "2025-10-27 20:16:44 UTC", description: "use builder pattern to avoid large list of arguments", pr_number: 24084, scopes: ["topology"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 7, insertions_count: 116, deletions_count: 48}, + {sha: "e618fdfd6b38b981a9cda3fdd3018d28744fe573", date: "2025-10-27 21:19:28 UTC", description: "reuse code from util/http/encoding.rs", pr_number: 24071, scopes: ["datadog_agent source"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 8, insertions_count: 25, deletions_count: 36}, + {sha: "e732a6ebddd8640a56eceb2283c343846d5a7621", date: "2025-10-27 22:01:36 UTC", description: "refactor utilization.rs and add tests", pr_number: 24085, scopes: ["internal_metrics source"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 4, insertions_count: 128, deletions_count: 26}, + {sha: "97c8d28c7765b3cf7dc519d60259703a0a9a0cfb", date: "2025-10-28 00:14:48 UTC", description: "corrects stop logic", pr_number: 24086, scopes: ["vdev"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 2, deletions_count: 5}, + {sha: "db89076a80aa7e48c866be9f847020fe66ba3e0c", date: "2025-10-28 00:50:52 UTC", description: "add user facing change explanation in PR template", pr_number: 24070, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 0}, + {sha: "e43d490fba57820ea6b39f3f15f4c46a6a815dc2", date: "2025-10-28 00:58:00 UTC", description: "use official squid image", pr_number: 24090, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "64aaccac438efa6ab03bfc1f9d9a7dd60698d0c7", date: "2025-10-28 01:01:23 UTC", description: "disable config error log rate limit", pr_number: 24091, scopes: ["config"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "22cb8c5d83e469390a4c85fb269434dfd35154de", date: "2025-10-28 06:06:03 UTC", description: "prevent negative utilization on late messages", pr_number: 24073, scopes: ["metrics"], type: "fix", breaking_change: false, author: "Ensar Sarajčić", files_count: 2, insertions_count: 54, deletions_count: 4}, + {sha: "f6be7db2b3d2e357316d3e151ff410f8631118ba", date: "2025-10-28 21:59:44 UTC", description: "Fix local mqtt int test", pr_number: 24096, scopes: ["dev"], type: "fix", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 2, deletions_count: 0}, + {sha: "11c55214bec4ae90c83dc12b00d817dbb0fb9ccb", date: "2025-10-29 18:21:44 UTC", description: "run mqtt int tests", pr_number: 24102, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "caf159276d07d8993c4b0a59f51f61696b2f7d23", date: "2025-10-29 18:45:55 UTC", description: "use one dockerfile for e2e and int", pr_number: 24101, scopes: ["vdev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 7, insertions_count: 43, deletions_count: 65}, + {sha: "31e8d2e03703c9ae879533d82e0112ba16b28165", date: "2025-10-29 21:27:08 UTC", description: "cache vdev", pr_number: 24103, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 5, insertions_count: 56, deletions_count: 5}, + {sha: "6547b81f2a807f411bd81f65059cdea53e5976ec", date: "2025-10-29 22:56:10 UTC", description: "check modified files only for style", pr_number: 24106, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 48, deletions_count: 7}, + {sha: "9fdced84af203e07ba81e8052b634314b8b8e42d", date: "2025-10-30 18:02:29 UTC", description: "run K8s e2e test suite only on MQ", pr_number: 24110, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 3, deletions_count: 3}, + {sha: "fb74a1eab52c20b985b63ebd39d013b6f64c56c0", date: "2025-10-30 23:55:36 UTC", description: "prevent utilization metric loss on configuration reload", pr_number: 24080, scopes: ["metrics"], type: "fix", breaking_change: false, author: "Ensar Sarajčić", files_count: 4, insertions_count: 114, deletions_count: 45}, + {sha: "bfaefdc8d4bbf0d97cdc848e020f1559b9f37383", date: "2025-10-30 19:57:41 UTC", description: "vdev build on cache miss", pr_number: 24113, scopes: ["ci"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "5d70d07c3806f09ddf0597374de4553ed1394399", date: "2025-10-30 19:51:26 UTC", description: "add opentelemetry metrics e2e tests", pr_number: 24109, scopes: ["dev"], type: "feat", breaking_change: false, author: "Thomas", files_count: 15, insertions_count: 707, deletions_count: 117}, + {sha: "ae8ad712906742fd77f38f58120db6af27cd757b", date: "2025-10-30 20:06:05 UTC", description: "multicast_and_unicast_udp_message no longer hangs on macOS", pr_number: 24112, scopes: ["dev"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 8, deletions_count: 3}, + {sha: "d8abed57442322105ad05992368ec98f4c3227f6", date: "2025-10-30 21:47:38 UTC", description: "parallelize e2e tests (ci-integration-review)", pr_number: 24115, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 10, deletions_count: 27}, + {sha: "e486428e05061d4810ea943250afbe5167e08d97", date: "2025-10-30 21:22:52 UTC", description: "fix journald tests for local macOS", pr_number: 24114, scopes: ["dev"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 5, deletions_count: 3}, + {sha: "42f71067ca5a9c9f989578bbce90ab84b503ecaf", date: "2025-10-30 21:49:31 UTC", description: "aws-kinesis-firehose tests", pr_number: 24117, scopes: ["dev"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "086d8f3c928167e5290647a205132e3466549412", date: "2025-10-30 23:05:57 UTC", description: "add always build option to scripts/run-integration-test.sh", pr_number: 24120, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 9, deletions_count: 4}, + {sha: "d6421e32e38ed924893cc94ec3959c09fde16c33", date: "2025-10-31 19:26:35 UTC", description: "Update `dd-rust-license-tool` to v1.0.4", pr_number: 24122, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 3, insertions_count: 112, deletions_count: 2}, + {sha: "8c5af6208d673f488e980ae866ae881946710e78", date: "2025-11-02 01:40:32 UTC", description: "fix path in datadog-metrics e2e test.yaml", pr_number: 24127, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "bd9b87700a4178decb54547bc158385156ad96f9", date: "2025-11-04 11:37:00 UTC", description: "Buffer counter underflowed (#23872)", pr_number: 23973, scopes: ["instrument"], type: "fix", breaking_change: false, author: "silas.u", files_count: 2, insertions_count: 22, deletions_count: 23}, + {sha: "5cf227e646734e13b47de0d559c353b20aae3461", date: "2025-11-03 22:37:13 UTC", description: "vdev caching", pr_number: 24126, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 20, deletions_count: 3}, + {sha: "aef66cfae8f68a7006b9c1cebba9ff022e0520da", date: "2025-11-04 11:57:01 UTC", description: "bump `avro-rs` crate to improve avro encoding error", pr_number: 24119, scopes: ["codecs"], type: "feat", breaking_change: false, author: "Eric Huang", files_count: 5, insertions_count: 210, deletions_count: 55}, + ] +} diff --git a/website/cue/reference/versions.cue b/website/cue/reference/versions.cue index 71f50b879de41..35a03f4948505 100644 --- a/website/cue/reference/versions.cue +++ b/website/cue/reference/versions.cue @@ -2,6 +2,7 @@ package metadata // This has to be maintained manually because there's currently no way to sort versions programmatically versions: [string, ...string] & [ + "0.51.0", "0.50.0", "0.49.0", "0.48.0", From dc09a9a8f06c01897493f301522dbbf95225fcef Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 4 Nov 2025 15:13:53 -0500 Subject: [PATCH 042/227] chore(vdev): move all utils in a new utils folder (#24143) * chore(vdev): move all utils in a new utils folder * move remaining files to utils dir * move remaining files to utils dir * fmt --- vdev/src/app.rs | 10 +- vdev/src/commands/build/publish_metadata.rs | 6 +- vdev/src/commands/build/vector.rs | 2 +- vdev/src/commands/check/component_docs.rs | 2 +- vdev/src/commands/check/component_features.rs | 2 +- vdev/src/commands/check/markdown.rs | 2 +- vdev/src/commands/check/rust.rs | 2 +- vdev/src/commands/check/scripts.rs | 2 +- vdev/src/commands/compose_tests/show.rs | 2 +- vdev/src/commands/config/find.rs | 2 +- vdev/src/commands/config/set/org.rs | 2 +- vdev/src/commands/config/set/repo.rs | 2 +- vdev/src/commands/crate_versions.rs | 4 +- vdev/src/commands/features.rs | 2 +- vdev/src/commands/info.rs | 2 +- vdev/src/commands/meta/starship.rs | 2 +- vdev/src/commands/release/channel.rs | 2 +- vdev/src/commands/release/github.rs | 4 +- vdev/src/commands/release/homebrew.rs | 2 +- vdev/src/commands/release/prepare.rs | 13 +-- vdev/src/commands/release/push.rs | 2 +- vdev/src/commands/run.rs | 2 +- vdev/src/commands/status.rs | 2 +- vdev/src/commands/test.rs | 2 +- vdev/src/main.rs | 18 +-- vdev/src/testing/build.rs | 8 +- vdev/src/testing/config.rs | 7 +- vdev/src/testing/integration.rs | 4 +- vdev/src/testing/runner.rs | 7 +- vdev/src/util.rs | 110 ------------------ vdev/src/utils/cargo.rs | 38 ++++++ vdev/src/utils/command.rs | 48 ++++++++ vdev/src/{ => utils}/config.rs | 0 vdev/src/{ => utils}/environment.rs | 0 vdev/src/{ => utils}/features.rs | 0 vdev/src/{ => utils}/git.rs | 18 ++- vdev/src/{ => utils}/macros.rs | 0 vdev/src/utils/mod.rs | 32 +++++ vdev/src/utils/paths.rs | 51 ++++++++ vdev/src/{ => utils}/platform.rs | 0 40 files changed, 242 insertions(+), 174 deletions(-) delete mode 100644 vdev/src/util.rs create mode 100644 vdev/src/utils/cargo.rs create mode 100644 vdev/src/utils/command.rs rename vdev/src/{ => utils}/config.rs (100%) rename vdev/src/{ => utils}/environment.rs (100%) rename vdev/src/{ => utils}/features.rs (100%) rename vdev/src/{ => utils}/git.rs (92%) rename vdev/src/{ => utils}/macros.rs (100%) create mode 100644 vdev/src/utils/mod.rs create mode 100644 vdev/src/utils/paths.rs rename vdev/src/{ => utils}/platform.rs (100%) diff --git a/vdev/src/app.rs b/vdev/src/app.rs index 7511fac95e9ca..1a67832593b52 100644 --- a/vdev/src/app.rs +++ b/vdev/src/app.rs @@ -13,7 +13,7 @@ use anyhow::{Context as _, Result, bail}; use indicatif::{ProgressBar, ProgressStyle}; use log::LevelFilter; -use crate::{config::Config, git, platform, util}; +use crate::utils::{self, config::Config, platform}; // Use the `bash` interpreter included as part of the standard `git` install for our default shell // if nothing is specified in the environment. @@ -50,12 +50,12 @@ pub fn set_repo_dir() -> Result<()> { } pub fn version() -> Result { - let mut version = util::get_version()?; + let mut version = utils::cargo::get_version()?; - let channel = util::get_channel(); + let channel = utils::git::get_channel(); if channel == "release" { - let head = util::git_head()?; + let head = utils::git::git_head()?; if !head.status.success() { let error = String::from_utf8_lossy(&head.stderr); bail!("Error running `git describe`:\n{error}"); @@ -69,7 +69,7 @@ pub fn version() -> Result { // extend version for custom builds if not already } else if channel == "custom" && !version.contains("custom") { - let sha = git::get_git_sha()?; + let sha = utils::git::get_git_sha()?; // use '.' instead of '-' or '_' to avoid issues with rpm and deb package naming // format requirements. diff --git a/vdev/src/commands/build/publish_metadata.rs b/vdev/src/commands/build/publish_metadata.rs index 41d1bce66d0b8..c9fd8e1222998 100644 --- a/vdev/src/commands/build/publish_metadata.rs +++ b/vdev/src/commands/build/publish_metadata.rs @@ -1,7 +1,7 @@ use anyhow::Result; use chrono::prelude::*; -use crate::{git, util}; +use crate::utils::{cargo, git}; use std::env; use std::fs::OpenOptions; use std::io::{self, Write}; @@ -19,14 +19,14 @@ pub struct Cli {} impl Cli { pub fn exec(self) -> Result<()> { // Generate the Vector version and build description. - let version = util::get_version()?; + let version = cargo::get_version()?; let git_sha = git::get_git_sha()?; let current_date = Local::now().naive_local().to_string(); let build_desc = format!("{git_sha} {current_date}"); // Figure out what our release channel is. - let channel = util::get_channel(); + let channel = git::get_channel(); let mut output_file: Box = match env::var("GITHUB_OUTPUT") { Ok(file_name) if !file_name.is_empty() => { diff --git a/vdev/src/commands/build/vector.rs b/vdev/src/commands/build/vector.rs index 100357386b2f3..d06a8206f1b48 100644 --- a/vdev/src/commands/build/vector.rs +++ b/vdev/src/commands/build/vector.rs @@ -3,7 +3,7 @@ use std::process::Command; use anyhow::Result; use clap::Args; -use crate::{app::CommandExt as _, platform}; +use crate::{app::CommandExt as _, utils::platform}; /// Build the `vector` executable. #[derive(Args, Debug)] diff --git a/vdev/src/commands/check/component_docs.rs b/vdev/src/commands/check/component_docs.rs index fecf76b4d836e..85a8ab280b1d8 100644 --- a/vdev/src/commands/check/component_docs.rs +++ b/vdev/src/commands/check/component_docs.rs @@ -1,4 +1,4 @@ -use crate::git; +use crate::utils::git; use anyhow::{Ok, Result}; /// Check that component documentation is up-to-date diff --git a/vdev/src/commands/check/component_features.rs b/vdev/src/commands/check/component_features.rs index ca8f3398d465d..873ab99d99c65 100644 --- a/vdev/src/commands/check/component_features.rs +++ b/vdev/src/commands/check/component_features.rs @@ -2,7 +2,7 @@ use std::env; use anyhow::Result; -use crate::{app, util::CargoToml}; +use crate::{app, utils::cargo::CargoToml}; const CARGO: &str = "cargo"; const BASE_ARGS: [&str; 5] = [ diff --git a/vdev/src/commands/check/markdown.rs b/vdev/src/commands/check/markdown.rs index d992885d38550..c5009da8e9a81 100644 --- a/vdev/src/commands/check/markdown.rs +++ b/vdev/src/commands/check/markdown.rs @@ -1,7 +1,7 @@ use anyhow::Result; use crate::app; -use crate::git::git_ls_files; +use crate::utils::git::git_ls_files; /// Check that markdown is styled properly #[derive(clap::Args, Debug)] diff --git a/vdev/src/commands/check/rust.rs b/vdev/src/commands/check/rust.rs index 4ae904b080ec6..aaa24365e849e 100644 --- a/vdev/src/commands/check/rust.rs +++ b/vdev/src/commands/check/rust.rs @@ -1,7 +1,7 @@ use anyhow::Result; use std::ffi::OsString; -use crate::{app, git, util::ChainArgs as _}; +use crate::{app, utils::command::ChainArgs as _, utils::git}; /// Check the Rust code for errors #[derive(clap::Args, Debug)] diff --git a/vdev/src/commands/check/scripts.rs b/vdev/src/commands/check/scripts.rs index a2c3778cbeef4..0774be4d38616 100644 --- a/vdev/src/commands/check/scripts.rs +++ b/vdev/src/commands/check/scripts.rs @@ -1,6 +1,6 @@ use anyhow::Result; -use crate::{app, git}; +use crate::{app, utils::git}; /// Check that shell scripts do not have common mistakes #[derive(clap::Args, Debug)] diff --git a/vdev/src/commands/compose_tests/show.rs b/vdev/src/commands/compose_tests/show.rs index 0fcc831e6cb3b..ecdaac717f034 100644 --- a/vdev/src/commands/compose_tests/show.rs +++ b/vdev/src/commands/compose_tests/show.rs @@ -1,7 +1,7 @@ use anyhow::Result; use std::collections::HashSet; -use crate::{environment::Environment, testing::config::ComposeTestConfig}; +use crate::{testing::config::ComposeTestConfig, utils::environment::Environment}; use super::active_projects::{find_active_environment, load_active_projects}; diff --git a/vdev/src/commands/config/find.rs b/vdev/src/commands/config/find.rs index 60d1e2b6c192d..25fc3756f5e78 100644 --- a/vdev/src/commands/config/find.rs +++ b/vdev/src/commands/config/find.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::Args; -use crate::config; +use crate::utils::config; /// Locate the config file #[derive(Args, Debug)] diff --git a/vdev/src/commands/config/set/org.rs b/vdev/src/commands/config/set/org.rs index fc6e37fb02c4f..e65dab551b5a7 100644 --- a/vdev/src/commands/config/set/org.rs +++ b/vdev/src/commands/config/set/org.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::Args; -use crate::{app, config}; +use crate::{app, utils::config}; /// Set the target Datadog org #[derive(Args, Debug)] diff --git a/vdev/src/commands/config/set/repo.rs b/vdev/src/commands/config/set/repo.rs index 521ddc84edac5..fd6fc7ef68807 100644 --- a/vdev/src/commands/config/set/repo.rs +++ b/vdev/src/commands/config/set/repo.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::Args; -use crate::{app, config, platform}; +use crate::{app, utils::{config, platform}}; /// Set the path to the Vector repository #[derive(Args, Debug)] diff --git a/vdev/src/commands/crate_versions.rs b/vdev/src/commands/crate_versions.rs index f12442967effa..fd7cbcc4edd92 100644 --- a/vdev/src/commands/crate_versions.rs +++ b/vdev/src/commands/crate_versions.rs @@ -5,7 +5,7 @@ use clap::Args; use itertools::Itertools as _; use regex::Regex; -use crate::{app::CommandExt as _, util}; +use crate::{app::CommandExt as _, utils}; /// Show information about crates versions pulled in by all dependencies #[derive(Args, Debug)] @@ -46,7 +46,7 @@ impl Cli { } let width = versions.keys().map(String::len).max().unwrap_or(0).max(7); - if *util::IS_A_TTY { + if *utils::IS_A_TTY { println!("{:width$} Version(s)", "Package"); println!("{:width$} ----------", "-------"); } diff --git a/vdev/src/commands/features.rs b/vdev/src/commands/features.rs index 5445e5d4a2533..657e205bade9c 100644 --- a/vdev/src/commands/features.rs +++ b/vdev/src/commands/features.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use anyhow::Result; use clap::Args; -use crate::features; +use crate::utils::features; /// Extract the set of features required to run a given config #[derive(Args, Debug)] diff --git a/vdev/src/commands/info.rs b/vdev/src/commands/info.rs index e25992a91ab56..91f2aac6eff75 100644 --- a/vdev/src/commands/info.rs +++ b/vdev/src/commands/info.rs @@ -2,7 +2,7 @@ use anyhow::Result; use clap::Args; use crate::testing::docker::CONTAINER_TOOL; -use crate::{app, config, platform}; +use crate::{app, utils::{config, platform}}; /// Show `vdev` command configuration #[derive(Args, Debug)] diff --git a/vdev/src/commands/meta/starship.rs b/vdev/src/commands/meta/starship.rs index e0d3e12fd6240..5cc663f79da91 100644 --- a/vdev/src/commands/meta/starship.rs +++ b/vdev/src/commands/meta/starship.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::Args; -use crate::util::CargoToml; +use crate::utils::cargo::CargoToml; /// Custom Starship prompt plugin #[derive(Args, Debug)] diff --git a/vdev/src/commands/release/channel.rs b/vdev/src/commands/release/channel.rs index 5a21e6f544cad..f0f67375ad98f 100644 --- a/vdev/src/commands/release/channel.rs +++ b/vdev/src/commands/release/channel.rs @@ -1,6 +1,6 @@ use anyhow::Result; -use crate::util::get_channel; +use crate::utils::git::get_channel; /// Provide the release channel (release/nightly/custom). /// This command is intended for use only within GitHub build workflows. diff --git a/vdev/src/commands/release/github.rs b/vdev/src/commands/release/github.rs index c529b80635c46..f03211e7cbed7 100644 --- a/vdev/src/commands/release/github.rs +++ b/vdev/src/commands/release/github.rs @@ -1,5 +1,5 @@ use crate::app::CommandExt as _; -use crate::util; +use crate::utils::cargo; use anyhow::{anyhow, Ok, Result}; use glob::glob; use std::process::Command; @@ -20,7 +20,7 @@ impl Cli { .collect::, _>>() .map_err(|e| anyhow!("failed to turn path into string: {:?}", e))?; - let version = util::get_version()?; + let version = cargo::get_version()?; let mut command = Command::new("gh"); command.in_repo(); command.args( diff --git a/vdev/src/commands/release/homebrew.rs b/vdev/src/commands/release/homebrew.rs index a041916ef431f..3191be355ab52 100644 --- a/vdev/src/commands/release/homebrew.rs +++ b/vdev/src/commands/release/homebrew.rs @@ -1,4 +1,4 @@ -use crate::git; +use crate::utils::git; use anyhow::Result; use hex; use reqwest; diff --git a/vdev/src/commands/release/prepare.rs b/vdev/src/commands/release/prepare.rs index cd518a921a24d..a60130c049967 100644 --- a/vdev/src/commands/release/prepare.rs +++ b/vdev/src/commands/release/prepare.rs @@ -1,8 +1,7 @@ #![allow(clippy::print_stdout)] #![allow(clippy::print_stderr)] -use crate::git; -use crate::util::run_command; +use crate::utils::{git, paths}; use anyhow::{anyhow, Result}; use reqwest::blocking::Client; use semver::Version; @@ -15,6 +14,7 @@ use std::process::Command; use std::{env, fs}; use toml::map::Map; use toml::Value; +use crate::utils::command::run_command; const ALPINE_PREFIX: &str = "FROM docker.io/alpine:"; const ALPINE_DOCKERFILE: &str = "distribution/docker/alpine/Dockerfile"; @@ -57,9 +57,8 @@ struct Prepare { impl Cli { pub fn exec(self) -> Result<()> { - - let repo_root = get_repo_root(); - env::set_current_dir(repo_root.clone())?; + let repo_root = paths::find_repo_root()?; + env::set_current_dir(&repo_root)?; let prepare = Prepare { new_vector_version: self.version.clone(), @@ -389,10 +388,6 @@ impl Prepare { // FREE FUNCTIONS AFTER THIS LINE -fn get_repo_root() -> PathBuf { - Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap().to_path_buf() -} - fn get_latest_version_from_vector_tags() -> Result { let tags = run_command("git tag --list --sort=-v:refname"); let latest_tag = tags diff --git a/vdev/src/commands/release/push.rs b/vdev/src/commands/release/push.rs index 66d3b5fae7fb9..12c41f8e345a5 100644 --- a/vdev/src/commands/release/push.rs +++ b/vdev/src/commands/release/push.rs @@ -2,7 +2,7 @@ use anyhow::Result; use clap::Args; use crate::app; -use crate::git; +use crate::utils::git; use itertools::Itertools; /// Pushes new versions produced by `make release` to the repository diff --git a/vdev/src/commands/run.rs b/vdev/src/commands/run.rs index f259134b28991..fe9092ca4774e 100644 --- a/vdev/src/commands/run.rs +++ b/vdev/src/commands/run.rs @@ -3,7 +3,7 @@ use std::{path::PathBuf, process::Command}; use anyhow::{bail, Result}; use clap::Args; -use crate::{app::CommandExt as _, features}; +use crate::{app::CommandExt as _, utils::features}; /// Run `vector` with the minimum set of features required by the config file #[derive(Args, Debug)] diff --git a/vdev/src/commands/status.rs b/vdev/src/commands/status.rs index 640e02f667723..a4010effc9a26 100644 --- a/vdev/src/commands/status.rs +++ b/vdev/src/commands/status.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::Args; -use crate::git; +use crate::utils::git; /// Show information about the current environment #[derive(Args, Debug)] diff --git a/vdev/src/commands/test.rs b/vdev/src/commands/test.rs index 88d277a527f3a..8edaec3a84bf3 100644 --- a/vdev/src/commands/test.rs +++ b/vdev/src/commands/test.rs @@ -2,8 +2,8 @@ use anyhow::Result; use clap::Args; use std::collections::BTreeMap; -use crate::platform; use crate::testing::runner::get_agent_test_runner; +use crate::utils::platform; /// Execute tests #[derive(Args, Debug)] diff --git a/vdev/src/main.rs b/vdev/src/main.rs index da9bd1932033e..9bfec21b6f7be 100644 --- a/vdev/src/main.rs +++ b/vdev/src/main.rs @@ -7,18 +7,11 @@ )] #[macro_use] -mod macros; +mod utils; + mod app; mod commands; -mod config; -mod environment; -mod features; -mod git; -mod platform; mod testing; -mod util; - -use std::env; use anyhow::Result; use clap::Parser; @@ -28,13 +21,10 @@ fn main() -> Result<()> { let cli = Cli::parse(); app::set_global_verbosity(cli.verbose.log_level_filter()); - app::set_global_config(config::load()?); + app::set_global_config(utils::config::load()?); let path = if app::config().repo.is_empty() { - env::current_dir() - .expect("Could not determine current directory") - .display() - .to_string() + utils::paths::find_repo_root()?.display().to_string() } else { app::config().repo.clone() }; diff --git a/vdev/src/testing/build.rs b/vdev/src/testing/build.rs index bddf44ce8f641..384accce47da7 100644 --- a/vdev/src/testing/build.rs +++ b/vdev/src/testing/build.rs @@ -6,9 +6,11 @@ use crate::testing::test_runner_dockerfile; use crate::{ app, app::CommandExt, - environment::{Environment, extract_present}, testing::{config::RustToolchainConfig, docker::docker_command}, - util::IS_A_TTY, + utils::{ + self, + environment::{Environment, extract_present}, + }, }; pub const ALL_INTEGRATIONS_FEATURE_FLAG: &str = "all-integration-tests"; @@ -32,7 +34,7 @@ pub fn prepare_build_command( command.current_dir(app::path()); // If we're attached to a TTY, show fancy progress - if *IS_A_TTY { + if *utils::IS_A_TTY { command.args(["--progress", "tty"]); } diff --git a/vdev/src/testing/config.rs b/vdev/src/testing/config.rs index e40e282677a4c..4a1280406ef1a 100644 --- a/vdev/src/testing/config.rs +++ b/vdev/src/testing/config.rs @@ -10,7 +10,10 @@ use itertools::{self, Itertools}; use serde::{Deserialize, Serialize}; use serde_yaml::Value; -use crate::{app, environment::Environment, util}; +use crate::{ + app, + utils::{environment::Environment, paths}, +}; const FILE_NAME: &str = "test.yaml"; const CONFIG_SUBDIR: &str = "config"; @@ -246,7 +249,7 @@ impl ComposeTestConfig { } else { [entry.path().to_str().unwrap(), FILE_NAME].iter().collect() }; - if util::exists(&config_file)? { + if paths::exists(&config_file)? { let config = Self::parse_file(&config_file)?; configs.insert(entry.file_name().into_string().unwrap(), config); } diff --git a/vdev/src/testing/integration.rs b/vdev/src/testing/integration.rs index d28f0eddea5b3..71294e7f81b90 100644 --- a/vdev/src/testing/integration.rs +++ b/vdev/src/testing/integration.rs @@ -13,11 +13,11 @@ use super::{ }; use crate::{ app::CommandExt as _, - environment::{Environment, extract_present, rename_environment_keys}, testing::{ build::ALL_INTEGRATIONS_FEATURE_FLAG, docker::{CONTAINER_TOOL, DOCKER_SOCKET}, }, + utils::environment::{Environment, extract_present, rename_environment_keys}, }; const NETWORK_ENV_VAR: &str = "VECTOR_NETWORK"; @@ -377,8 +377,8 @@ mod unix { use super::super::config::ComposeConfig; use crate::{ - environment::{Environment, resolve_placeholders}, testing::config::VolumeMount, + utils::environment::{Environment, resolve_placeholders}, }; /// Unix permissions mask to allow everybody to read a file diff --git a/vdev/src/testing/runner.rs b/vdev/src/testing/runner.rs index 905bc9b395e16..cbe2cfcdfa50b 100644 --- a/vdev/src/testing/runner.rs +++ b/vdev/src/testing/runner.rs @@ -3,14 +3,17 @@ use std::{collections::HashSet, env, process::Command}; use super::config::{IntegrationRunnerConfig, RustToolchainConfig}; use crate::testing::test_runner_dockerfile; +use crate::utils::IS_A_TTY; use crate::{ app::{self, CommandExt as _}, - environment::{Environment, append_environment_variables}, testing::{ build::prepare_build_command, docker::{DOCKER_SOCKET, docker_command}, }, - util::{ChainArgs as _, IS_A_TTY}, + utils::{ + command::ChainArgs as _, + environment::{Environment, append_environment_variables}, + }, }; const MOUNT_PATH: &str = "/home/vector"; diff --git a/vdev/src/util.rs b/vdev/src/util.rs deleted file mode 100644 index 1eb8a2b9cc575..0000000000000 --- a/vdev/src/util.rs +++ /dev/null @@ -1,110 +0,0 @@ -#![allow(clippy::print_stderr)] -#![allow(clippy::print_stdout)] - -use std::{ - collections::BTreeMap, - ffi::{OsStr, OsString}, - fmt::Debug, - fs, - io::{ErrorKind, IsTerminal}, - path::Path, - process, - process::{Command, Output}, - sync::LazyLock, -}; - -use anyhow::{Context as _, Result}; -use serde::Deserialize; -use serde_json::Value; - -pub static IS_A_TTY: LazyLock = LazyLock::new(|| std::io::stdout().is_terminal()); - -#[derive(Deserialize)] -pub struct CargoTomlPackage { - pub version: String, -} - -/// The bits of the top-level `Cargo.toml` configuration that `vdev` uses to drive its features. -#[derive(Deserialize)] -pub struct CargoToml { - pub package: CargoTomlPackage, - pub features: BTreeMap, -} - -impl CargoToml { - pub fn load() -> Result { - let text = fs::read_to_string("Cargo.toml").context("Could not read `Cargo.toml`")?; - toml::from_str::(&text).context("Invalid contents in `Cargo.toml`") - } -} - -/// Read the version string from `Cargo.toml` -pub fn read_version() -> Result { - CargoToml::load().map(|cargo| cargo.package.version) -} - -/// Use the version provided by env vars or default to reading from `Cargo.toml`. -pub fn get_version() -> Result { - std::env::var("VERSION") - .or_else(|_| std::env::var("VECTOR_VERSION")) - .or_else(|_| read_version()) -} - -pub fn git_head() -> Result { - Command::new("git") - .args(["describe", "--exact-match", "--tags", "HEAD"]) - .output() - .context("Could not execute `git`") -} - -pub fn get_channel() -> String { - std::env::var("CHANNEL").unwrap_or_else(|_| "custom".to_string()) -} - -pub fn exists(path: impl AsRef + Debug) -> Result { - match fs::metadata(path.as_ref()) { - Ok(_) => Ok(true), - Err(error) if error.kind() == ErrorKind::NotFound => Ok(false), - Err(error) => Err(error).context(format!("Could not stat {path:?}")), - } -} - -pub trait ChainArgs { - fn chain_args>(&self, args: impl IntoIterator) -> Vec; -} - -impl> ChainArgs for Vec { - fn chain_args>(&self, args: impl IntoIterator) -> Vec { - self.iter() - .map(Into::into) - .chain(args.into_iter().map(Into::into)) - .collect() - } -} - -impl> ChainArgs for [T] { - fn chain_args>(&self, args: impl IntoIterator) -> Vec { - self.iter() - .map(Into::into) - .chain(args.into_iter().map(Into::into)) - .collect() - } -} - -pub fn run_command(cmd: &str) -> String { - let output = Command::new("sh") - .arg("-c") - .arg(cmd) - .output() - .expect("Failed to execute command"); - - if !output.status.success() { - eprintln!( - "Command failed: {cmd} - Error: {}", - String::from_utf8_lossy(&output.stderr) - ); - process::exit(1); - } - - String::from_utf8_lossy(&output.stdout).to_string() -} diff --git a/vdev/src/utils/cargo.rs b/vdev/src/utils/cargo.rs new file mode 100644 index 0000000000000..f60824315cb42 --- /dev/null +++ b/vdev/src/utils/cargo.rs @@ -0,0 +1,38 @@ +//! Cargo.toml parsing and version utilities + +use std::{collections::BTreeMap, fs}; + +use anyhow::{Context, Result}; +use serde::Deserialize; +use serde_json::Value; + +#[derive(Deserialize)] +pub struct CargoTomlPackage { + pub version: String, +} + +/// The bits of the top-level `Cargo.toml` configuration that `vdev` uses to drive its features. +#[derive(Deserialize)] +pub struct CargoToml { + pub package: CargoTomlPackage, + pub features: BTreeMap, +} + +impl CargoToml { + pub fn load() -> Result { + let text = fs::read_to_string("Cargo.toml").context("Could not read `Cargo.toml`")?; + toml::from_str::(&text).context("Invalid contents in `Cargo.toml`") + } +} + +/// Read the version string from `Cargo.toml` +pub fn read_version() -> Result { + CargoToml::load().map(|cargo| cargo.package.version) +} + +/// Use the version provided by env vars or default to reading from `Cargo.toml`. +pub fn get_version() -> Result { + std::env::var("VERSION") + .or_else(|_| std::env::var("VECTOR_VERSION")) + .or_else(|_| read_version()) +} diff --git a/vdev/src/utils/command.rs b/vdev/src/utils/command.rs new file mode 100644 index 0000000000000..00ab3cb82b566 --- /dev/null +++ b/vdev/src/utils/command.rs @@ -0,0 +1,48 @@ +//! Command execution utilities + +use std::{ + ffi::{OsStr, OsString}, + process::{self, Command}, +}; + +/// Trait for chaining command arguments +pub trait ChainArgs { + fn chain_args>(&self, args: impl IntoIterator) -> Vec; +} + +impl> ChainArgs for Vec { + fn chain_args>(&self, args: impl IntoIterator) -> Vec { + self.iter() + .map(Into::into) + .chain(args.into_iter().map(Into::into)) + .collect() + } +} + +impl> ChainArgs for [T] { + fn chain_args>(&self, args: impl IntoIterator) -> Vec { + self.iter() + .map(Into::into) + .chain(args.into_iter().map(Into::into)) + .collect() + } +} + +/// Run a shell command and return its output or exit on failure +pub fn run_command(cmd: &str) -> String { + let output = Command::new("sh") + .arg("-c") + .arg(cmd) + .output() + .expect("Failed to execute command"); + + if !output.status.success() { + eprintln!( + "Command failed: {cmd} - Error: {}", + String::from_utf8_lossy(&output.stderr) + ); + process::exit(1); + } + + String::from_utf8_lossy(&output.stdout).to_string() +} diff --git a/vdev/src/config.rs b/vdev/src/utils/config.rs similarity index 100% rename from vdev/src/config.rs rename to vdev/src/utils/config.rs diff --git a/vdev/src/environment.rs b/vdev/src/utils/environment.rs similarity index 100% rename from vdev/src/environment.rs rename to vdev/src/utils/environment.rs diff --git a/vdev/src/features.rs b/vdev/src/utils/features.rs similarity index 100% rename from vdev/src/features.rs rename to vdev/src/utils/features.rs diff --git a/vdev/src/git.rs b/vdev/src/utils/git.rs similarity index 92% rename from vdev/src/git.rs rename to vdev/src/utils/git.rs index 81bad95a84612..54216c76e2da6 100644 --- a/vdev/src/git.rs +++ b/vdev/src/utils/git.rs @@ -1,10 +1,25 @@ +//! Git utilities + use std::{collections::HashSet, process::Command}; -use anyhow::{Result, anyhow, bail}; +use anyhow::{Context, Result, anyhow, bail}; use git2::{BranchType, ErrorCode, Repository}; use crate::app::CommandExt as _; +/// Get the git HEAD tag if it exists +pub fn git_head() -> Result { + Command::new("git") + .args(["describe", "--exact-match", "--tags", "HEAD"]) + .output() + .context("Could not execute `git`") +} + +/// Get the release channel from environment or default to "custom" +pub fn get_channel() -> String { + std::env::var("CHANNEL").unwrap_or_else(|_| "custom".to_string()) +} + pub fn current_branch() -> Result { let output = run_and_check_output(&["rev-parse", "--abbrev-ref", "HEAD"])?; Ok(output.trim_end().to_string()) @@ -160,6 +175,7 @@ pub fn branch_exists(branch: &str) -> Result { Ok(exists) } + pub fn checkout_branch(branch_name: &str) -> Result<()> { let _output = run_and_check_output(&["checkout", branch_name])?; Ok(()) diff --git a/vdev/src/macros.rs b/vdev/src/utils/macros.rs similarity index 100% rename from vdev/src/macros.rs rename to vdev/src/utils/macros.rs diff --git a/vdev/src/utils/mod.rs b/vdev/src/utils/mod.rs new file mode 100644 index 0000000000000..33ea82e4491e4 --- /dev/null +++ b/vdev/src/utils/mod.rs @@ -0,0 +1,32 @@ +//! Utility modules for vdev +//! +//! This module provides various utilities organized by functionality: +//! - `paths`: Path operations and repository root detection +//! - `cargo`: Cargo.toml parsing and version management +//! - `git`: Git operations +//! - `command`: Command execution helpers +//! - `config`: Configuration management +//! - `environment`: Environment utilities +//! - `features`: Feature detection and management +//! - `macros`: Utility macros +//! - `platform`: Platform-specific utilities + +#![allow(clippy::print_stderr)] +#![allow(clippy::print_stdout)] + +use std::{io::IsTerminal, sync::LazyLock}; + +#[macro_use] +pub mod macros; + +pub mod cargo; +pub mod command; +pub mod config; +pub mod environment; +pub mod features; +pub mod git; +pub mod paths; +pub mod platform; + +/// Check if stdout is connected to a TTY +pub static IS_A_TTY: LazyLock = LazyLock::new(|| std::io::stdout().is_terminal()); diff --git a/vdev/src/utils/paths.rs b/vdev/src/utils/paths.rs new file mode 100644 index 0000000000000..412cf1781b487 --- /dev/null +++ b/vdev/src/utils/paths.rs @@ -0,0 +1,51 @@ +//! Path-related utilities + +use std::{ + env, + fmt::Debug, + fs, + io::ErrorKind, + path::{Path, PathBuf}, +}; + +use anyhow::{Context, Result}; + +/// Find the Vector repository root by searching upward for markers like .git or Cargo.toml +/// with a [workspace] section. +pub fn find_repo_root() -> Result { + let mut current = env::current_dir().context("Could not determine current directory")?; + + loop { + // Check for .git directory (most reliable marker) + if current.join(".git").is_dir() { + return Ok(current); + } + + // Check for Cargo.toml with workspace (Vector's root Cargo.toml has [workspace]) + let cargo_toml = current.join("Cargo.toml"); + if cargo_toml.is_file() + && let Ok(contents) = fs::read_to_string(&cargo_toml) + && contents.contains("[workspace]") + { + return Ok(current); + } + + // Move up one directory + if let Some(parent) = current.parent() { + current = parent.to_path_buf(); + } else { + anyhow::bail!( + "Could not find Vector repository root. Please run vdev from within the Vector repository." + ); + } + } +} + +/// Check if a path exists +pub fn exists(path: impl AsRef + Debug) -> Result { + match fs::metadata(path.as_ref()) { + Ok(_) => Ok(true), + Err(error) if error.kind() == ErrorKind::NotFound => Ok(false), + Err(error) => Err(error).context(format!("Could not stat {path:?}")), + } +} diff --git a/vdev/src/platform.rs b/vdev/src/utils/platform.rs similarity index 100% rename from vdev/src/platform.rs rename to vdev/src/utils/platform.rs From f40ea0942430d160f6bc9e8bafd7080067ded76e Mon Sep 17 00:00:00 2001 From: Rob Blafford Date: Wed, 5 Nov 2025 09:42:11 -0500 Subject: [PATCH 043/227] feat(config): Add an option to prevent interpolation of env vars within config loading process (#23910) * feat(config): Add option to prevent env var interpolation - Adds a new command line flag (also can be configured with the env var VECTOR_DISABLE_ENV_VAR_INTERPOLATION) that when enabled will modify the config loading process to skip over routines that interpolate configuration values with values from the global environment. * Add changelog fragment * Update changelog.d/add_disable_interpolate_env_var_switch.feat.md Co-authored-by: Pavlos Rontidis * Conditionally interpolate env in prepare_input * Fix bug where disable_env_var_interpolation value is inversed * Run cargo fmt * rename fragment changelog file * Update changelog.d/add_disable_interpolate_env_var_switch.feature.md Co-authored-by: Bruce Guenter * Use new() as the method to configure a ConfigBuilderLoader * Use disable_env_var_interpolation flag in validate cli * Fix clippy error * Add support for windows builds --------- Co-authored-by: Pavlos Rontidis Co-authored-by: Bruce Guenter --- ...able_interpolate_env_var_switch.feature.md | 3 + src/app.rs | 10 ++ src/cli.rs | 8 ++ src/config/cmd.rs | 22 ++- src/config/loading/config_builder.rs | 15 +- src/config/loading/mod.rs | 133 ++++++++++++------ src/config/loading/secret.rs | 6 +- src/config/mod.rs | 2 +- src/config/unit_test/mod.rs | 13 +- src/graph.rs | 10 +- src/service.rs | 18 ++- src/validate.rs | 15 +- 12 files changed, 186 insertions(+), 69 deletions(-) create mode 100644 changelog.d/add_disable_interpolate_env_var_switch.feature.md diff --git a/changelog.d/add_disable_interpolate_env_var_switch.feature.md b/changelog.d/add_disable_interpolate_env_var_switch.feature.md new file mode 100644 index 0000000000000..ad7bb3eb9d86b --- /dev/null +++ b/changelog.d/add_disable_interpolate_env_var_switch.feature.md @@ -0,0 +1,3 @@ +Added `--disable-env-var-interpolation` CLI option to prevent environment variable interpolation. The `VECTOR_DISABLE_ENV_VAR_INTERPOLATION` environment variable can also be used to disable interpolation. + +authors: graphcareful diff --git a/src/app.rs b/src/app.rs index 55dbd7a70a175..9e8a415608a8f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -82,6 +82,7 @@ impl ApplicationConfig { watcher_conf, opts.require_healthy, opts.allow_empty_config, + !opts.disable_env_var_interpolation, graceful_shutdown_duration, signal_handler, ) @@ -271,6 +272,7 @@ impl Application { signals, topology_controller, allow_empty_config: root_opts.allow_empty_config, + interpolate_env: !root_opts.disable_env_var_interpolation, }) } } @@ -282,6 +284,7 @@ pub struct StartedApplication { pub signals: SignalPair, pub topology_controller: SharedTopologyController, pub allow_empty_config: bool, + pub interpolate_env: bool, } impl StartedApplication { @@ -297,6 +300,7 @@ impl StartedApplication { topology_controller, internal_topologies, allow_empty_config, + interpolate_env, } = self; let mut graceful_crash = UnboundedReceiverStream::new(graceful_crash_receiver); @@ -313,6 +317,7 @@ impl StartedApplication { &config_paths, &mut signal_handler, allow_empty_config, + interpolate_env, ).await { break signal; }, @@ -341,6 +346,7 @@ async fn handle_signal( config_paths: &[ConfigPath], signal_handler: &mut SignalHandler, allow_empty_config: bool, + interpolate_env: bool, ) -> Option { match signal { Ok(SignalTo::ReloadComponents(components_to_reload)) => { @@ -359,6 +365,7 @@ async fn handle_signal( &topology_controller.config_paths, signal_handler, allow_empty_config, + interpolate_env, ) .await; @@ -381,6 +388,7 @@ async fn handle_signal( &topology_controller.config_paths, signal_handler, allow_empty_config, + interpolate_env, ) .await; @@ -539,6 +547,7 @@ pub async fn load_configs( watcher_conf: Option, require_healthy: Option, allow_empty_config: bool, + interpolate_env: bool, graceful_shutdown_duration: Option, signal_handler: &mut SignalHandler, ) -> Result { @@ -558,6 +567,7 @@ pub async fn load_configs( &config_paths, signal_handler, allow_empty_config, + interpolate_env, ) .await .map_err(handle_config_errors)?; diff --git a/src/cli.rs b/src/cli.rs index b13ab7d60bdb3..fd7d8d0ac9ce4 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -137,6 +137,14 @@ pub struct RootOpts { #[arg(short, long, action = ArgAction::Count)] pub quiet: u8, + /// Disable interpolation of environment variables in configuration files. + #[arg( + long, + env = "VECTOR_DISABLE_ENV_VAR_INTERPOLATION", + default_value = "false" + )] + pub disable_env_var_interpolation: bool, + /// Set the logging format #[arg(long, default_value = "text", env = "VECTOR_LOG_FORMAT")] pub log_format: LogFormat, diff --git a/src/config/cmd.rs b/src/config/cmd.rs index 8065b496b5848..4607b5a708249 100644 --- a/src/config/cmd.rs +++ b/src/config/cmd.rs @@ -3,7 +3,9 @@ use std::path::PathBuf; use clap::Parser; use serde_json::Value; -use super::{ConfigBuilder, load_builder_from_paths, load_source_from_paths, process_paths}; +use super::{ + ConfigBuilder, load_builder_from_paths_with_opts, load_source_from_paths, process_paths, +}; use crate::{cli::handle_config_errors, config}; #[derive(Parser, Debug, Clone)] @@ -54,6 +56,14 @@ pub struct Opts { value_delimiter(',') )] pub config_dirs: Vec, + + /// Disable interpolation of environment variables in configuration files. + #[arg( + long, + env = "VECTOR_DISABLE_ENV_VAR_INTERPOLATION", + default_value = "false" + )] + pub disable_env_var_interpolation: bool, } impl Opts { @@ -166,10 +176,12 @@ pub fn cmd(opts: &Opts) -> exitcode::ExitCode { // Start by serializing to a `ConfigBuilder`. This will leverage validation in config // builder fields which we'll use to error out if required. let (paths, builder) = match process_paths(&paths) { - Some(paths) => match load_builder_from_paths(&paths) { - Ok(builder) => (paths, builder), - Err(errs) => return handle_config_errors(errs), - }, + Some(paths) => { + match load_builder_from_paths_with_opts(&paths, !opts.disable_env_var_interpolation) { + Ok(builder) => (paths, builder), + Err(errs) => return handle_config_errors(errs), + } + } None => return exitcode::CONFIG, }; diff --git a/src/config/loading/config_builder.rs b/src/config/loading/config_builder.rs index 17aea4ab12515..62be4e9fcfc89 100644 --- a/src/config/loading/config_builder.rs +++ b/src/config/loading/config_builder.rs @@ -12,20 +12,15 @@ use crate::config::{ pub struct ConfigBuilderLoader { builder: ConfigBuilder, secrets: Option>, + interpolate_env: bool, } impl ConfigBuilderLoader { - pub fn new() -> Self { + pub fn new(interpolate_env: bool, secrets: Option>) -> Self { Self { builder: ConfigBuilder::default(), - secrets: None, - } - } - - pub fn with_secrets(secrets: HashMap) -> Self { - Self { - builder: ConfigBuilder::default(), - secrets: Some(secrets), + secrets, + interpolate_env, } } } @@ -33,7 +28,7 @@ impl ConfigBuilderLoader { impl Process for ConfigBuilderLoader { /// Prepares input for a `ConfigBuilder` by interpolating environment variables. fn prepare(&mut self, input: R) -> Result> { - let prepared_input = prepare_input(input)?; + let prepared_input = prepare_input(input, self.interpolate_env)?; let prepared_input = self .secrets .as_ref() diff --git a/src/config/loading/mod.rs b/src/config/loading/mod.rs index 46f45228143b6..a7138961b2d10 100644 --- a/src/config/loading/mod.rs +++ b/src/config/loading/mod.rs @@ -123,8 +123,11 @@ pub fn process_paths(config_paths: &[ConfigPath]) -> Option> { Some(paths) } -pub fn load_from_paths(config_paths: &[ConfigPath]) -> Result> { - let builder = load_builder_from_paths(config_paths)?; +pub fn load_from_paths( + config_paths: &[ConfigPath], + interpolate_env: bool, +) -> Result> { + let builder = load_builder_from_paths_with_opts(config_paths, interpolate_env)?; let (config, build_warnings) = builder.build_with_warnings()?; for warning in build_warnings { @@ -141,9 +144,11 @@ pub async fn load_from_paths_with_provider_and_secrets( config_paths: &[ConfigPath], signal_handler: &mut signal::SignalHandler, allow_empty: bool, + interpolate_env: bool, ) -> Result> { // Load secret backends first - let mut secrets_backends_loader = load_secret_backends_from_paths(config_paths)?; + let mut secrets_backends_loader = + load_secret_backends_from_paths_with_opts(config_paths, interpolate_env)?; // And then, if needed, retrieve secrets from configured backends let mut builder = if secrets_backends_loader.has_secrets_to_retrieve() { debug!(message = "Secret placeholders found, retrieving secrets from configured backends."); @@ -151,10 +156,14 @@ pub async fn load_from_paths_with_provider_and_secrets( .retrieve(&mut signal_handler.subscribe()) .await .map_err(|e| vec![e])?; - load_builder_from_paths_with_secrets(config_paths, resolved_secrets)? + load_builder_from_paths_with_opts_with_secrets_and_opts( + config_paths, + resolved_secrets, + interpolate_env, + )? } else { debug!(message = "No secret placeholder found, skipping secret resolution."); - load_builder_from_paths(config_paths)? + load_builder_from_paths_with_opts(config_paths, interpolate_env)? }; builder.allow_empty = allow_empty; @@ -184,9 +193,11 @@ pub async fn load_from_str_with_secrets( format: Format, signal_handler: &mut signal::SignalHandler, allow_empty: bool, + interpolate_env: bool, ) -> Result> { // Load secret backends first - let mut secrets_backends_loader = load_secret_backends_from_input(input.as_bytes(), format)?; + let mut secrets_backends_loader = + load_secret_backends_from_input_with_opts(input.as_bytes(), format, interpolate_env)?; // And then, if needed, retrieve secrets from configured backends let mut builder = if secrets_backends_loader.has_secrets_to_retrieve() { debug!(message = "Secret placeholders found, retrieving secrets from configured backends."); @@ -194,10 +205,15 @@ pub async fn load_from_str_with_secrets( .retrieve(&mut signal_handler.subscribe()) .await .map_err(|e| vec![e])?; - load_builder_from_input_with_secrets(input.as_bytes(), format, resolved_secrets)? + load_builder_from_input_with_secrets_and_opts( + input.as_bytes(), + format, + resolved_secrets, + interpolate_env, + )? } else { debug!(message = "No secret placeholder found, skipping secret resolution."); - load_builder_from_input(input.as_bytes(), format)? + load_builder_from_input_with_opts(input.as_bytes(), format, interpolate_env)? }; builder.allow_empty = allow_empty; @@ -260,31 +276,51 @@ where } /// Uses `ConfigBuilderLoader` to process `ConfigPaths`, deserializing to a `ConfigBuilder`. -pub fn load_builder_from_paths(config_paths: &[ConfigPath]) -> Result> { - loader_from_paths(ConfigBuilderLoader::new(), config_paths) +pub fn load_builder_from_paths_with_opts( + config_paths: &[ConfigPath], + interpolate_env: bool, +) -> Result> { + loader_from_paths( + ConfigBuilderLoader::new(interpolate_env, None), + config_paths, + ) } -fn load_builder_from_input( +fn load_builder_from_input_with_opts( input: R, format: Format, + interpolate_env: bool, ) -> Result> { - loader_from_input(ConfigBuilderLoader::new(), input, format) + loader_from_input( + ConfigBuilderLoader::new(interpolate_env, None), + input, + format, + ) } /// Uses `ConfigBuilderLoader` to process `ConfigPaths`, performing secret replacement and deserializing to a `ConfigBuilder` -pub fn load_builder_from_paths_with_secrets( +pub fn load_builder_from_paths_with_opts_with_secrets_and_opts( config_paths: &[ConfigPath], secrets: HashMap, + interpolate_env: bool, ) -> Result> { - loader_from_paths(ConfigBuilderLoader::with_secrets(secrets), config_paths) + loader_from_paths( + ConfigBuilderLoader::new(interpolate_env, Some(secrets)), + config_paths, + ) } -fn load_builder_from_input_with_secrets( +fn load_builder_from_input_with_secrets_and_opts( input: R, format: Format, secrets: HashMap, + interpolate_env: bool, ) -> Result> { - loader_from_input(ConfigBuilderLoader::with_secrets(secrets), input, format) + loader_from_input( + ConfigBuilderLoader::new(interpolate_env, Some(secrets)), + input, + format, + ) } /// Uses `SourceLoader` to process `ConfigPaths`, deserializing to a toml `SourceMap`. @@ -295,17 +331,26 @@ pub fn load_source_from_paths( } /// Uses `SecretBackendLoader` to process `ConfigPaths`, deserializing to a `SecretBackends`. -pub fn load_secret_backends_from_paths( +pub fn load_secret_backends_from_paths_with_opts( config_paths: &[ConfigPath], + interpolate_env: bool, ) -> Result> { - loader_from_paths(SecretBackendLoader::new(), config_paths) + loader_from_paths( + SecretBackendLoader::new_with_opts(interpolate_env), + config_paths, + ) } -fn load_secret_backends_from_input( +fn load_secret_backends_from_input_with_opts( input: R, format: Format, + interpolate_env: bool, ) -> Result> { - loader_from_input(SecretBackendLoader::new(), input, format) + loader_from_input( + SecretBackendLoader::new_with_opts(interpolate_env), + input, + format, + ) } pub fn load_from_str(input: &str, format: Format) -> Result> { @@ -339,32 +384,40 @@ fn load_from_inputs( } } -pub fn prepare_input(mut input: R) -> Result> { +pub fn prepare_input( + mut input: R, + interpolate_env: bool, +) -> Result> { let mut source_string = String::new(); input .read_to_string(&mut source_string) .map_err(|e| vec![e.to_string()])?; - let mut vars: HashMap = std::env::vars_os() - .filter_map(|(k, v)| match (k.into_string(), v.into_string()) { - (Ok(k), Ok(v)) => Some((k, v)), - _ => None, - }) - .collect(); - - if !vars.contains_key("HOSTNAME") - && let Ok(hostname) = crate::get_hostname() - { - vars.insert("HOSTNAME".into(), hostname); + if interpolate_env { + let mut vars: HashMap = std::env::vars_os() + .filter_map(|(k, v)| match (k.into_string(), v.into_string()) { + (Ok(k), Ok(v)) => Some((k, v)), + _ => None, + }) + .collect(); + + if !vars.contains_key("HOSTNAME") + && let Ok(hostname) = crate::get_hostname() + { + vars.insert("HOSTNAME".into(), hostname); + } + vars::interpolate(&source_string, &vars) + } else { + Ok(source_string) } - vars::interpolate(&source_string, &vars) } pub fn load(input: R, format: Format) -> Result> where T: serde::de::DeserializeOwned, { - let with_vars = prepare_input(input)?; + // Via configurations that load from raw string, skip interpolation of env + let with_vars = prepare_input(input, false)?; format::deserialize(&with_vars, format) } @@ -400,7 +453,7 @@ fn default_config_paths() -> Vec { mod tests { use std::path::PathBuf; - use super::load_builder_from_paths; + use super::load_builder_from_paths_with_opts; use crate::config::{ComponentKey, ConfigPath}; #[test] @@ -410,7 +463,7 @@ mod tests { .join("namespacing") .join("success"); let configs = vec![ConfigPath::Dir(path)]; - let builder = load_builder_from_paths(&configs).unwrap(); + let builder = load_builder_from_paths_with_opts(&configs, true).unwrap(); assert!( builder .transforms @@ -436,7 +489,7 @@ mod tests { .join("namespacing") .join("ignore-invalid"); let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths(&configs).unwrap(); + load_builder_from_paths_with_opts(&configs, true).unwrap(); } #[test] @@ -446,7 +499,7 @@ mod tests { .join("config-dir") .join("ignore-unknown"); let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths(&configs).unwrap(); + load_builder_from_paths_with_opts(&configs, true).unwrap(); } #[test] @@ -456,7 +509,7 @@ mod tests { .join("config-dir") .join("globals"); let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths(&configs).unwrap(); + load_builder_from_paths_with_opts(&configs, true).unwrap(); } #[test] @@ -466,6 +519,6 @@ mod tests { .join("config-dir") .join("globals-duplicate"); let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths(&configs).unwrap(); + load_builder_from_paths_with_opts(&configs, true).unwrap(); } } diff --git a/src/config/loading/secret.rs b/src/config/loading/secret.rs index 57fac29238ffd..efd0e92173e2b 100644 --- a/src/config/loading/secret.rs +++ b/src/config/loading/secret.rs @@ -42,13 +42,15 @@ pub(crate) struct SecretBackendOuter { pub struct SecretBackendLoader { backends: IndexMap, pub(crate) secret_keys: HashMap>, + interpolate_env: bool, } impl SecretBackendLoader { - pub(crate) fn new() -> Self { + pub(crate) fn new_with_opts(interpolate_env: bool) -> Self { Self { backends: IndexMap::new(), secret_keys: HashMap::new(), + interpolate_env, } } @@ -89,7 +91,7 @@ impl SecretBackendLoader { impl Process for SecretBackendLoader { fn prepare(&mut self, input: R) -> Result> { - let config_string = prepare_input(input)?; + let config_string = prepare_input(input, self.interpolate_env)?; // Collect secret placeholders just after env var processing collect_secret_keys(&config_string, &mut self.secret_keys); Ok(config_string) diff --git a/src/config/mod.rs b/src/config/mod.rs index 04f6863b8a7c6..7e566fbba6302 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -54,7 +54,7 @@ pub use diff::ConfigDiff; pub use enrichment_table::{EnrichmentTableConfig, EnrichmentTableOuter}; pub use format::{Format, FormatHint}; pub use loading::{ - COLLECTOR, CONFIG_PATHS, load, load_builder_from_paths, load_from_paths, + COLLECTOR, CONFIG_PATHS, load, load_builder_from_paths_with_opts, load_from_paths, load_from_paths_with_provider_and_secrets, load_from_str, load_from_str_with_secrets, load_source_from_paths, merge_path_lists, process_paths, }; diff --git a/src/config/unit_test/mod.rs b/src/config/unit_test/mod.rs index 49646984d3a92..cef38ea6ddce0 100644 --- a/src/config/unit_test/mod.rs +++ b/src/config/unit_test/mod.rs @@ -93,7 +93,7 @@ fn init_log_schema_from_paths( config_paths: &[ConfigPath], deny_if_set: bool, ) -> Result<(), Vec> { - let builder = config::loading::load_builder_from_paths(config_paths)?; + let builder = config::loading::load_builder_from_paths_with_opts(config_paths, true)?; vector_lib::config::init_log_schema(builder.global.log_schema, deny_if_set); Ok(()) } @@ -103,15 +103,20 @@ pub async fn build_unit_tests_main( signal_handler: &mut signal::SignalHandler, ) -> Result, Vec> { init_log_schema_from_paths(paths, false)?; - let mut secrets_backends_loader = loading::load_secret_backends_from_paths(paths)?; + let mut secrets_backends_loader = + loading::load_secret_backends_from_paths_with_opts(paths, true)?; let config_builder = if secrets_backends_loader.has_secrets_to_retrieve() { let resolved_secrets = secrets_backends_loader .retrieve(&mut signal_handler.subscribe()) .await .map_err(|e| vec![e])?; - loading::load_builder_from_paths_with_secrets(paths, resolved_secrets)? + loading::load_builder_from_paths_with_opts_with_secrets_and_opts( + paths, + resolved_secrets, + true, + )? } else { - loading::load_builder_from_paths(paths)? + loading::load_builder_from_paths_with_opts(paths, true)? }; build_unit_tests(config_builder).await diff --git a/src/graph.rs b/src/graph.rs index 1b4856036cd2c..cfdcfffb3c6f5 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -52,6 +52,14 @@ pub struct Opts { /// information on the `mermaid` format. #[arg(id = "format", long, default_value = "dot")] pub format: OutputFormat, + + /// Disable interpolation of environment variables in configuration files. + #[arg( + long, + env = "VECTOR_DISABLE_ENV_VAR_INTERPOLATION", + default_value = "false" + )] + pub disable_env_var_interpolation: bool, } #[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq, Eq)] @@ -93,7 +101,7 @@ pub(crate) fn cmd(opts: &Opts) -> exitcode::ExitCode { None => return exitcode::CONFIG, }; - let config = match config::load_from_paths(&paths) { + let config = match config::load_from_paths(&paths, !opts.disable_env_var_interpolation) { Ok(config) => config, Err(errs) => { #[allow(clippy::print_stderr)] diff --git a/src/service.rs b/src/service.rs index 08283dba9f05b..5f384b888490d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -54,6 +54,14 @@ struct InstallOpts { value_delimiter(',') )] config_dirs: Vec, + + /// Disable interpolation of environment variables in configuration files. + #[arg( + long, + env = "VECTOR_DISABLE_ENV_VAR_INTERPOLATION", + default_value = "false" + )] + pub disable_env_var_interpolation: bool, } impl InstallOpts { @@ -64,7 +72,8 @@ impl InstallOpts { let current_exe = ::std::env::current_exe().unwrap(); let config_paths = self.config_paths_with_formats(); - let arguments = create_service_arguments(&config_paths).unwrap(); + let arguments = + create_service_arguments(&config_paths, self.disable_env_var_interpolation).unwrap(); ServiceInfo { name: OsString::from(service_name), @@ -251,9 +260,12 @@ fn control_service(service: &ServiceInfo, action: ControlAction) -> exitcode::Ex } } -fn create_service_arguments(config_paths: &[config::ConfigPath]) -> Option> { +fn create_service_arguments( + config_paths: &[config::ConfigPath], + disable_env_var_interpolation: bool, +) -> Option> { let config_paths = config::process_paths(config_paths)?; - match config::load_from_paths(&config_paths) { + match config::load_from_paths(&config_paths, !disable_env_var_interpolation) { Ok(_) => Some( config_paths .iter() diff --git a/src/validate.rs b/src/validate.rs index 608abd9f9d757..01da4e6689ab0 100644 --- a/src/validate.rs +++ b/src/validate.rs @@ -78,6 +78,14 @@ pub struct Opts { value_delimiter(',') )] pub config_dirs: Vec, + + /// Disable interpolation of environment variables in configuration files. + #[arg( + long, + env = "VECTOR_DISABLE_ENV_VAR_INTERPOLATION", + default_value = "false" + )] + pub disable_env_var_interpolation: bool, } impl Opts { @@ -143,9 +151,10 @@ pub fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option { fmt.title(format!("Failed to load {:?}", &paths_list)); fmt.sub_error(errors); }; - let builder = config::load_builder_from_paths(&paths) - .map_err(&mut report_error) - .ok()?; + let builder = + config::load_builder_from_paths_with_opts(&paths, !opts.disable_env_var_interpolation) + .map_err(&mut report_error) + .ok()?; config::init_log_schema(builder.global.log_schema.clone(), true); // Build From 749fbb078b5fe2fd0083dc731f747ffed9d34c4d Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 5 Nov 2025 09:52:22 -0500 Subject: [PATCH 044/227] chore(ci): .dockerignore should exlcude target dirs (#24154) * chore(ci): .dockerignore should exlcude target dirs * Update .dockerignore Co-authored-by: Thomas --------- Co-authored-by: Thomas --- .dockerignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.dockerignore b/.dockerignore index f8bae8f065c5c..02c60ccd5e905 100644 --- a/.dockerignore +++ b/.dockerignore @@ -10,3 +10,6 @@ !rust-toolchain.toml !scripts !vdev + +# Exclude target directories to avoid copying build artifacts +target From b300185da65a6037cc350e871c47fc016f41b912 Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 5 Nov 2025 10:54:56 -0500 Subject: [PATCH 045/227] chore(releasing): improve/fix minor release template (#24156) * Fix release.env template * Make cargo vdev build manifests part of the last PR * Fix example command * Reword PR->commit * Add .s * Update Helm charts releasing guide link * Add references to internal releasing doc * Fix newline --- .github/ISSUE_TEMPLATE/minor-release.md | 50 ++++++++++++------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/minor-release.md b/.github/ISSUE_TEMPLATE/minor-release.md index d83bc945879a3..bdf7150d13994 100644 --- a/.github/ISSUE_TEMPLATE/minor-release.md +++ b/.github/ISSUE_TEMPLATE/minor-release.md @@ -11,10 +11,11 @@ labels: "domain: releasing" Note the preparation steps are now automated. First, alter/create release.env ```shell +#!/usr/bin/env bash export NEW_VECTOR_VERSION= # replace this with the actual new version (e.g.: 0.50.0) export NEW_VRL_VERSION= # replace this with the actual new VRL version (e.g.: 0.30.0) export MINOR_VERSION=$(echo "$NEW_VECTOR_VERSION" | cut -d. -f2) -export PREP_BRANCH=prepare-v-0-"${MINOR_VERSION}"-"${NEW_VECTOR_VERSION}"-website +export PREP_BRANCH=prepare-v-"${NEW_VECTOR_VERSION//./-}"-website export RELEASE_BRANCH=v0."${MINOR_VERSION}" ``` @@ -24,7 +25,7 @@ and then source it by running `source ./release.env` ## 1. Manual Steps -- [ ] Cut a new release of [VRL](https://github.com/vectordotdev/vrl) if needed +- [ ] Cut a new release of [VRL](https://github.com/vectordotdev/vrl) if needed. - VRL release steps: https://github.com/vectordotdev/vrl/blob/main/release/README.md ## 2. Automated Steps @@ -37,12 +38,11 @@ cargo vdev release prepare --version "${NEW_VECTOR_VERSION}" --vrl-version "${NE Automated steps include: - [ ] Create a new release branch from master to freeze commits - - `git fetch && git checkout origin/master && git checkout -b "{RELEASE_BRANCH}" && git push -u` + - `git fetch && git checkout origin/master && git checkout -b "${RELEASE_BRANCH}" && git push -u` - [ ] Create a new release preparation branch from `master` - `git checkout -b "${PREP_BRANCH}" && git push -u` - [ ] Pin VRL to latest released version rather than `main` -- [ ] Check if there is a newer version of [Alpine](https://alpinelinux.org/releases/) or - [Debian](https://www.debian.org/releases/) available to update the release images in +- [ ] Check if there is a newer version of [Alpine](https://alpinelinux.org/releases/) or [Debian](https://www.debian.org/releases/) available to update the release images in `distribution/docker/`. Update if so. - [ ] Run `cargo vdev build release-cue` to generate a new cue file for the release - [ ] Copy VRL changelogs from the VRL version in the last Vector release as a new changelog entry @@ -60,47 +60,43 @@ Automated steps include: - [ ] Edit `website/cue/reference/releases/"${NEW_VECTOR_VERSION}".cue` - [ ] Add description key to the generated cue file with a description of the release (see previous releases for examples). - - [ ] Ensure any breaking changes are highlighted in the release upgrade guide - - [ ] Ensure any deprecations are highlighted in the release upgrade guide - - [ ] Review generated changelog entries to ensure they are understandable to end-users + - [ ] Ensure any breaking changes are highlighted in the release upgrade guide. + - [ ] Ensure any deprecations are highlighted in the release upgrade guide. + - [ ] Review generated changelog entries to ensure they are understandable to end-users. - [ ] Ensure the date matches the scheduled release date. - [ ] Add a link to pending deprecation items from [DEPRECATIONS.md](https://github.com/vectordotdev/vector/blob/master/docs/DEPRECATIONS.md). -- [ ] PR review & approval +- [ ] PR review & approval. # On the day of release -- [ ] Make sure the release branch is in sync with origin/master and has only one squashed commit with all commits from the prepare branch. If you made a PR from the prepare branch into the release branch this should already be the case +- [ ] Make sure the release branch is in sync with origin/master and has only one squashed commit with all commits from the prepare branch. If you made a PR from the prepare branch into the release branch this should already be the case. - [ ] `git checkout "${RELEASE_BRANCH}"` - - [ ] `git show --stat HEAD` - This should show the squashed prepare commit + - [ ] `git show --stat HEAD` - This should show the squashed prepare commit. - [ ] Ensure release date in `website/cue/reference/releases/0.XX.Y.cue` matches current date. - - If this needs to be updated commit and squash it in the release branch + - If this needs to be updated commit and squash it in the release branch. - Follow these steps if the release branch needs to be updated - - [ ] Rebase the release preparation branch on the release branch + - [ ] Rebase the release preparation branch on the release branch. - [ ] Squash the release preparation commits (but not the cherry-picked commits!) to a single commit. This makes it easier to cherry-pick to master after the release. - - [ ] Merge release preparation branch into the release branch + - [ ] Merge release preparation branch into the release branch. - `git switch "${RELEASE_BRANCH}" && git merge --ff-only "${PREP_BRANCH}"` - [ ] Tag new release - [ ] `git tag v"${NEW_VECTOR_VERSION}" -a -m v"${NEW_VECTOR_VERSION}"` - [ ] `git push origin v"${NEW_VECTOR_VERSION}"` -- [ ] Wait for release workflow to complete +- [ ] Wait for release workflow to complete. - Discoverable via [release.yml](https://github.com/vectordotdev/vector/actions/workflows/release.yml) - [ ] Reset the `website` branch to the `HEAD` of the release branch to update https://vector.dev - [ ] `git switch website && git reset --hard origin/"${RELEASE_BRANCH}" && git push` - [ ] Confirm that the release changelog was published to https://vector.dev/releases/ - - The deployment is done by Amplify. You can see - the [deployment logs here](https://dd-corpsite.datadoghq.com/logs?query=service%3Awebsites-vector%20branch%3Awebsite&agg_m=count&agg_m_source=base&agg_t=count&cols=host%2Cservice&fromUser=true&messageDisplay=inline&refresh_mode=sliding&storage=hot&stream_sort=time%2Casc&viz=stream). -- [ ] Release Linux packages. See [`vector-release` usage](https://github.com/DataDog/vector-release#usage). - - Note: the pipeline inputs are the version number `v"${NEW_VECTOR_VERSION}"` and a personal GitHub token. - - [ ] Manually trigger the `trigger-package-release-pipeline-prod-stable` job. -- [ ] Release updated Helm chart. See [releasing Helm chart](https://github.com/vectordotdev/helm-charts#releasing). -- [ ] Once Helm chart is released, updated Vector manifests - - Run `cargo vdev build manifests` and open a PR with changes -- [ ] Add docker images to [https://github.com/DataDog/images](https://github.com/DataDog/images/tree/master/vector) to have them available internally. ([Example PR](https://github.com/DataDog/images/pull/7104)) + - Refer to the internal releasing doc to monitor the deployment. +- [ ] Release Linux packages. Refer to the internal releasing doc. +- [ ] Release updated Helm chart. See [releasing Helm chart](https://github.com/vectordotdev/helm-charts/blob/develop/RELEASING.md). +- [ ] Create internal Docker images. Refer to the internal releasing doc. - [ ] Create a new PR with title starting as `chore(releasing):` - - [ ] Cherry-pick any release commits from the release branch that are not on `master`, to `master` + - [ ] Cherry-pick any release commits from the release branch that are not on `master`, to `master`. + - [ ] Run `cargo vdev build manifests` and commit changes. - [ ] Bump the release number in the `Cargo.toml` on master to the next minor release. - - [ ] Also, update `Cargo.lock` with: `cargo update -p vector` + - [ ] Also, update `Cargo.lock` with: `cargo update -p vector`. - [ ] If there is a VRL version update, revert it and make it track the git `main` branch and then run `cargo update -p vrl`. -- [ ] Kick-off post-mortems for any regressions resolved by the release +- [ ] Kick-off post-mortems for any regressions resolved by the release. From 6d332b4c48f8fd375cdf417d13a73655f8fa5fee Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 5 Nov 2025 13:16:07 -0500 Subject: [PATCH 046/227] chore(dev): refactor ConfigBuilderLoader (tech debt) (#24157) * chore(dev): refactor ConfigBuilderLoader (tech debt) * wip * move tests * revert some chages to loading mod * use default vs new --- src/config/cmd.rs | 9 +- src/config/loading/config_builder.rs | 135 ++++++++++++++++++++- src/config/loading/mod.rs | 172 +++++---------------------- src/config/loading/source.rs | 6 + src/config/mod.rs | 8 +- src/config/unit_test/mod.rs | 19 +-- src/validate.rs | 11 +- 7 files changed, 191 insertions(+), 169 deletions(-) diff --git a/src/config/cmd.rs b/src/config/cmd.rs index 4607b5a708249..21ca17cdff391 100644 --- a/src/config/cmd.rs +++ b/src/config/cmd.rs @@ -3,9 +3,7 @@ use std::path::PathBuf; use clap::Parser; use serde_json::Value; -use super::{ - ConfigBuilder, load_builder_from_paths_with_opts, load_source_from_paths, process_paths, -}; +use super::{ConfigBuilder, load_source_from_paths, loading::ConfigBuilderLoader, process_paths}; use crate::{cli::handle_config_errors, config}; #[derive(Parser, Debug, Clone)] @@ -177,7 +175,10 @@ pub fn cmd(opts: &Opts) -> exitcode::ExitCode { // builder fields which we'll use to error out if required. let (paths, builder) = match process_paths(&paths) { Some(paths) => { - match load_builder_from_paths_with_opts(&paths, !opts.disable_env_var_interpolation) { + match ConfigBuilderLoader::default() + .interpolate_env(!opts.disable_env_var_interpolation) + .load_from_paths(&paths) + { Ok(builder) => (paths, builder), Err(errs) => return handle_config_errors(errs), } diff --git a/src/config/loading/config_builder.rs b/src/config/loading/config_builder.rs index 62be4e9fcfc89..3e1db5d694032 100644 --- a/src/config/loading/config_builder.rs +++ b/src/config/loading/config_builder.rs @@ -9,6 +9,7 @@ use crate::config::{ TransformOuter, }; +#[derive(Debug)] pub struct ConfigBuilderLoader { builder: ConfigBuilder, secrets: Option>, @@ -16,11 +17,44 @@ pub struct ConfigBuilderLoader { } impl ConfigBuilderLoader { - pub fn new(interpolate_env: bool, secrets: Option>) -> Self { + /// Sets whether to interpolate environment variables in the config. + pub const fn interpolate_env(mut self, interpolate: bool) -> Self { + self.interpolate_env = interpolate; + self + } + + /// Sets the secrets map for secret interpolation. + pub fn secrets(mut self, secrets: HashMap) -> Self { + self.secrets = Some(secrets); + self + } + + /// Builds the ConfigBuilderLoader and loads configuration from the specified paths. + pub fn load_from_paths( + self, + config_paths: &[super::ConfigPath], + ) -> Result> { + super::loader_from_paths(self, config_paths) + } + + /// Builds the ConfigBuilderLoader and loads configuration from an input reader. + pub fn load_from_input( + self, + input: R, + format: super::Format, + ) -> Result> { + super::loader_from_input(self, input, format) + } +} + +impl Default for ConfigBuilderLoader { + /// Creates a new builder with default settings. + /// By default, environment variable interpolation is enabled. + fn default() -> Self { Self { builder: ConfigBuilder::default(), - secrets, - interpolate_env, + secrets: None, + interpolate_env: true, } } } @@ -84,3 +118,98 @@ impl loader::Loader for ConfigBuilderLoader { self.builder } } + +#[cfg(all( + test, + feature = "sinks-elasticsearch", + feature = "transforms-sample", + feature = "sources-demo_logs", + feature = "sinks-console" +))] +mod tests { + use std::path::PathBuf; + + use super::ConfigBuilderLoader; + use crate::config::{ComponentKey, ConfigPath}; + + #[test] + fn load_namespacing_folder() { + let path = PathBuf::from(".") + .join("tests") + .join("namespacing") + .join("success"); + let configs = vec![ConfigPath::Dir(path)]; + let builder = ConfigBuilderLoader::default() + .interpolate_env(true) + .load_from_paths(&configs) + .unwrap(); + assert!( + builder + .transforms + .contains_key(&ComponentKey::from("apache_parser")) + ); + assert!( + builder + .sources + .contains_key(&ComponentKey::from("apache_logs")) + ); + assert!( + builder + .sinks + .contains_key(&ComponentKey::from("es_cluster")) + ); + assert_eq!(builder.tests.len(), 2); + } + + #[test] + fn load_namespacing_ignore_invalid() { + let path = PathBuf::from(".") + .join("tests") + .join("namespacing") + .join("ignore-invalid"); + let configs = vec![ConfigPath::Dir(path)]; + ConfigBuilderLoader::default() + .interpolate_env(true) + .load_from_paths(&configs) + .unwrap(); + } + + #[test] + fn load_directory_ignores_unknown_file_formats() { + let path = PathBuf::from(".") + .join("tests") + .join("config-dir") + .join("ignore-unknown"); + let configs = vec![ConfigPath::Dir(path)]; + ConfigBuilderLoader::default() + .interpolate_env(true) + .load_from_paths(&configs) + .unwrap(); + } + + #[test] + fn load_directory_globals() { + let path = PathBuf::from(".") + .join("tests") + .join("config-dir") + .join("globals"); + let configs = vec![ConfigPath::Dir(path)]; + ConfigBuilderLoader::default() + .interpolate_env(true) + .load_from_paths(&configs) + .unwrap(); + } + + #[test] + fn load_directory_globals_duplicates() { + let path = PathBuf::from(".") + .join("tests") + .join("config-dir") + .join("globals-duplicate"); + let configs = vec![ConfigPath::Dir(path)]; + ConfigBuilderLoader::default() + .interpolate_env(true) + .load_from_paths(&configs) + .unwrap(); + } +} diff --git a/src/config/loading/mod.rs b/src/config/loading/mod.rs index a7138961b2d10..595e7a8de1ad0 100644 --- a/src/config/loading/mod.rs +++ b/src/config/loading/mod.rs @@ -11,7 +11,7 @@ use std::{ sync::Mutex, }; -use config_builder::ConfigBuilderLoader; +pub use config_builder::ConfigBuilderLoader; use glob::glob; use loader::process::Process; pub use loader::*; @@ -127,7 +127,9 @@ pub fn load_from_paths( config_paths: &[ConfigPath], interpolate_env: bool, ) -> Result> { - let builder = load_builder_from_paths_with_opts(config_paths, interpolate_env)?; + let builder = ConfigBuilderLoader::default() + .interpolate_env(interpolate_env) + .load_from_paths(config_paths)?; let (config, build_warnings) = builder.build_with_warnings()?; for warning in build_warnings { @@ -156,14 +158,15 @@ pub async fn load_from_paths_with_provider_and_secrets( .retrieve(&mut signal_handler.subscribe()) .await .map_err(|e| vec![e])?; - load_builder_from_paths_with_opts_with_secrets_and_opts( - config_paths, - resolved_secrets, - interpolate_env, - )? + ConfigBuilderLoader::default() + .interpolate_env(interpolate_env) + .secrets(resolved_secrets) + .load_from_paths(config_paths)? } else { debug!(message = "No secret placeholder found, skipping secret resolution."); - load_builder_from_paths_with_opts(config_paths, interpolate_env)? + ConfigBuilderLoader::default() + .interpolate_env(interpolate_env) + .load_from_paths(config_paths)? }; builder.allow_empty = allow_empty; @@ -205,15 +208,15 @@ pub async fn load_from_str_with_secrets( .retrieve(&mut signal_handler.subscribe()) .await .map_err(|e| vec![e])?; - load_builder_from_input_with_secrets_and_opts( - input.as_bytes(), - format, - resolved_secrets, - interpolate_env, - )? + ConfigBuilderLoader::default() + .interpolate_env(interpolate_env) + .secrets(resolved_secrets) + .load_from_input(input.as_bytes(), format)? } else { debug!(message = "No secret placeholder found, skipping secret resolution."); - load_builder_from_input_with_opts(input.as_bytes(), format, interpolate_env)? + ConfigBuilderLoader::default() + .interpolate_env(interpolate_env) + .load_from_input(input.as_bytes(), format)? }; builder.allow_empty = allow_empty; @@ -229,7 +232,11 @@ pub async fn load_from_str_with_secrets( Ok(new_config) } -fn loader_from_input(mut loader: L, input: R, format: Format) -> Result> +pub(super) fn loader_from_input( + mut loader: L, + input: R, + format: Format, +) -> Result> where T: serde::de::DeserializeOwned, L: Loader + Process, @@ -239,7 +246,10 @@ where } /// Iterators over `ConfigPaths`, and processes a file/dir according to a provided `Loader`. -fn loader_from_paths(mut loader: L, config_paths: &[ConfigPath]) -> Result> +pub(super) fn loader_from_paths( + mut loader: L, + config_paths: &[ConfigPath], +) -> Result> where T: serde::de::DeserializeOwned, L: Loader + Process, @@ -275,54 +285,6 @@ where } } -/// Uses `ConfigBuilderLoader` to process `ConfigPaths`, deserializing to a `ConfigBuilder`. -pub fn load_builder_from_paths_with_opts( - config_paths: &[ConfigPath], - interpolate_env: bool, -) -> Result> { - loader_from_paths( - ConfigBuilderLoader::new(interpolate_env, None), - config_paths, - ) -} - -fn load_builder_from_input_with_opts( - input: R, - format: Format, - interpolate_env: bool, -) -> Result> { - loader_from_input( - ConfigBuilderLoader::new(interpolate_env, None), - input, - format, - ) -} - -/// Uses `ConfigBuilderLoader` to process `ConfigPaths`, performing secret replacement and deserializing to a `ConfigBuilder` -pub fn load_builder_from_paths_with_opts_with_secrets_and_opts( - config_paths: &[ConfigPath], - secrets: HashMap, - interpolate_env: bool, -) -> Result> { - loader_from_paths( - ConfigBuilderLoader::new(interpolate_env, Some(secrets)), - config_paths, - ) -} - -fn load_builder_from_input_with_secrets_and_opts( - input: R, - format: Format, - secrets: HashMap, - interpolate_env: bool, -) -> Result> { - loader_from_input( - ConfigBuilderLoader::new(interpolate_env, Some(secrets)), - input, - format, - ) -} - /// Uses `SourceLoader` to process `ConfigPaths`, deserializing to a toml `SourceMap`. pub fn load_source_from_paths( config_paths: &[ConfigPath], @@ -442,83 +404,3 @@ fn default_config_paths() -> Vec { vec![ConfigPath::File(default_path, Some(Format::Yaml))] } - -#[cfg(all( - test, - feature = "sinks-elasticsearch", - feature = "transforms-sample", - feature = "sources-demo_logs", - feature = "sinks-console" -))] -mod tests { - use std::path::PathBuf; - - use super::load_builder_from_paths_with_opts; - use crate::config::{ComponentKey, ConfigPath}; - - #[test] - fn load_namespacing_folder() { - let path = PathBuf::from(".") - .join("tests") - .join("namespacing") - .join("success"); - let configs = vec![ConfigPath::Dir(path)]; - let builder = load_builder_from_paths_with_opts(&configs, true).unwrap(); - assert!( - builder - .transforms - .contains_key(&ComponentKey::from("apache_parser")) - ); - assert!( - builder - .sources - .contains_key(&ComponentKey::from("apache_logs")) - ); - assert!( - builder - .sinks - .contains_key(&ComponentKey::from("es_cluster")) - ); - assert_eq!(builder.tests.len(), 2); - } - - #[test] - fn load_namespacing_ignore_invalid() { - let path = PathBuf::from(".") - .join("tests") - .join("namespacing") - .join("ignore-invalid"); - let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths_with_opts(&configs, true).unwrap(); - } - - #[test] - fn load_directory_ignores_unknown_file_formats() { - let path = PathBuf::from(".") - .join("tests") - .join("config-dir") - .join("ignore-unknown"); - let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths_with_opts(&configs, true).unwrap(); - } - - #[test] - fn load_directory_globals() { - let path = PathBuf::from(".") - .join("tests") - .join("config-dir") - .join("globals"); - let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths_with_opts(&configs, true).unwrap(); - } - - #[test] - fn load_directory_globals_duplicates() { - let path = PathBuf::from(".") - .join("tests") - .join("config-dir") - .join("globals-duplicate"); - let configs = vec![ConfigPath::Dir(path)]; - load_builder_from_paths_with_opts(&configs, true).unwrap(); - } -} diff --git a/src/config/loading/source.rs b/src/config/loading/source.rs index 6230c65b9edb8..eee20f0dadbf3 100644 --- a/src/config/loading/source.rs +++ b/src/config/loading/source.rs @@ -15,6 +15,12 @@ impl SourceLoader { } } +impl Default for SourceLoader { + fn default() -> Self { + Self::new() + } +} + impl Process for SourceLoader { /// Prepares input by simply reading bytes to a string. Unlike other loaders, there's no /// interpolation of environment variables. This is on purpose to preserve the original config. diff --git a/src/config/mod.rs b/src/config/mod.rs index 7e566fbba6302..b38ffe9c14248 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -36,7 +36,7 @@ pub mod dot_graph; mod enrichment_table; pub mod format; mod graph; -mod loading; +pub mod loading; pub mod provider; pub mod schema; mod secret; @@ -54,9 +54,9 @@ pub use diff::ConfigDiff; pub use enrichment_table::{EnrichmentTableConfig, EnrichmentTableOuter}; pub use format::{Format, FormatHint}; pub use loading::{ - COLLECTOR, CONFIG_PATHS, load, load_builder_from_paths_with_opts, load_from_paths, - load_from_paths_with_provider_and_secrets, load_from_str, load_from_str_with_secrets, - load_source_from_paths, merge_path_lists, process_paths, + COLLECTOR, CONFIG_PATHS, load, load_from_paths, load_from_paths_with_provider_and_secrets, + load_from_str, load_from_str_with_secrets, load_source_from_paths, merge_path_lists, + process_paths, }; pub use provider::ProviderConfig; pub use secret::SecretBackend; diff --git a/src/config/unit_test/mod.rs b/src/config/unit_test/mod.rs index cef38ea6ddce0..c3bd5dc6e7cdb 100644 --- a/src/config/unit_test/mod.rs +++ b/src/config/unit_test/mod.rs @@ -38,7 +38,7 @@ use crate::{ conditions::Condition, config::{ self, ComponentKey, Config, ConfigBuilder, ConfigPath, SinkOuter, SourceOuter, - TestDefinition, TestInput, TestOutput, loading, + TestDefinition, TestInput, TestOutput, loading, loading::ConfigBuilderLoader, }, event::{Event, EventMetadata, LogEvent}, signal, @@ -93,7 +93,9 @@ fn init_log_schema_from_paths( config_paths: &[ConfigPath], deny_if_set: bool, ) -> Result<(), Vec> { - let builder = config::loading::load_builder_from_paths_with_opts(config_paths, true)?; + let builder = ConfigBuilderLoader::default() + .interpolate_env(true) + .load_from_paths(config_paths)?; vector_lib::config::init_log_schema(builder.global.log_schema, deny_if_set); Ok(()) } @@ -110,13 +112,14 @@ pub async fn build_unit_tests_main( .retrieve(&mut signal_handler.subscribe()) .await .map_err(|e| vec![e])?; - loading::load_builder_from_paths_with_opts_with_secrets_and_opts( - paths, - resolved_secrets, - true, - )? + ConfigBuilderLoader::default() + .interpolate_env(true) + .secrets(resolved_secrets) + .load_from_paths(paths)? } else { - loading::load_builder_from_paths_with_opts(paths, true)? + ConfigBuilderLoader::default() + .interpolate_env(true) + .load_from_paths(paths)? }; build_unit_tests(config_builder).await diff --git a/src/validate.rs b/src/validate.rs index 01da4e6689ab0..a988e48478e78 100644 --- a/src/validate.rs +++ b/src/validate.rs @@ -7,7 +7,7 @@ use colored::*; use exitcode::ExitCode; use crate::{ - config::{self, Config, ConfigDiff}, + config::{self, Config, ConfigDiff, loading::ConfigBuilderLoader}, topology::{ self, builder::{TopologyPieces, TopologyPiecesBuilder}, @@ -151,10 +151,11 @@ pub fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option { fmt.title(format!("Failed to load {:?}", &paths_list)); fmt.sub_error(errors); }; - let builder = - config::load_builder_from_paths_with_opts(&paths, !opts.disable_env_var_interpolation) - .map_err(&mut report_error) - .ok()?; + let builder = ConfigBuilderLoader::default() + .interpolate_env(!opts.disable_env_var_interpolation) + .load_from_paths(&paths) + .map_err(&mut report_error) + .ok()?; config::init_log_schema(builder.global.log_schema.clone(), true); // Build From 2fbe9494c530f1451590ea6932c963957b0c1fb6 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 5 Nov 2025 13:50:23 -0500 Subject: [PATCH 047/227] chore(ci): simplify/improve scripts/ci-free-disk-space.sh (#24159) --- scripts/ci-free-disk-space.sh | 79 +++++++++++++---------------------- 1 file changed, 30 insertions(+), 49 deletions(-) diff --git a/scripts/ci-free-disk-space.sh b/scripts/ci-free-disk-space.sh index 00af4d1b1da25..e955d93813e19 100755 --- a/scripts/ci-free-disk-space.sh +++ b/scripts/ci-free-disk-space.sh @@ -1,57 +1,38 @@ #!/usr/bin/env bash -# -# From: https://github.com/apache/flink -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Based on cleanup script from: https://github.com/apache/flink +# Licensed under Apache License 2.0 - -# -# The Azure provided machines typically have the following disk allocation: -# Total space: 85GB -# Allocated: 67 GB -# Free: 17 GB -# This script frees up 28 GB of disk space by deleting unneeded packages and -# large directories. -# The Flink end to end tests download and generate more than 17 GB of files, -# causing unpredictable behavior and build failures. -# echo "==============================================================================" -echo "Freeing up disk space on CI system" +echo "Freeing up disk space on GitHub Actions runner" echo "==============================================================================" -echo "Listing 100 largest packages" -dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 -df -h -echo "Removing large packages" -sudo apt-get remove -y '^dotnet-.*' -sudo apt-get remove -y '^llvm-.*' -sudo apt-get remove -y 'php.*' -sudo apt-get remove -y '^mongodb-.*' -sudo apt-get remove -y '^mysql-.*' -sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel libgl1-mesa-dri +echo "Disk space before cleanup:" +df -h / + +echo "Removing large packages..." +sudo apt-get remove -y '^dotnet-.*' '^llvm-.*' 'php.*' '^mongodb-.*' '^mysql-.*' \ + azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel libgl1-mesa-dri 2>/dev/null || true sudo apt-get autoremove -y sudo apt-get clean -df -h -echo "Removing large directories" -sudo rm -rf /usr/share/dotnet/ -sudo rm -rf /usr/local/graalvm/ -sudo rm -rf /usr/local/.ghcup/ -sudo rm -rf /usr/local/share/powershell -sudo rm -rf /usr/local/share/chromium -sudo rm -rf /usr/local/lib/android -# sudo rm -rf /usr/local/lib/node_modules # we use node -df -h +echo "Removing large directories..." +sudo rm -rf /usr/share/dotnet/ \ + /usr/local/graalvm/ \ + /usr/local/.ghcup/ \ + /usr/local/share/powershell \ + /usr/local/share/chromium \ + /usr/local/lib/android \ + /opt/hostedtoolcache/CodeQL \ + /usr/local/lib/android/sdk \ + /usr/share/swift \ + /opt/az + +echo "Cleaning Docker artifacts..." +docker system prune -af --volumes || true + +echo "Cleaning swap..." +sudo swapoff -a || true +sudo rm -f /mnt/swapfile || true + +echo "Disk space after cleanup:" +df -h / From c9537a0de423884b0581341a9a09b15a68448094 Mon Sep 17 00:00:00 2001 From: Topper <106534165+toppercodes@users.noreply.github.com> Date: Wed, 5 Nov 2025 20:10:43 +0000 Subject: [PATCH 048/227] feat(axiom): add support for regional edge endpoints in AxiomConfig (#24037) * feat(axiom sink): Add regional edge support with smart URL handling Introduces optional `region` field for regional edge domains and enhances `url` field with intelligent path detection. URLs with custom paths are used as-is, while URLs without paths maintain backwards compatibility by appending the legacy path format. Priority: url > region > default cloud endpoint Fully backwards compatible with existing configurations. * cargo fmt * refactor(axiom): Axiom sink configuration and integration tests - Moved the Axiom sink configuration and integration tests to a new module `axiom` under the `sinks` directory. - Implemented `UrlOrRegion` struct to manage Axiom endpoint configurations, ensuring validation between URL and region settings. - Moved the integration tests to a new module `integration_tests` under the `axiom` module. - Updated documentation for the Axiom sink configuration in the generated CUE files to reflect new fields and usage examples. * cargo fmt * fix(axiom): Update integration tests to include SinkConfig and UrlOrRegion * Update website/cue/reference/components/sinks/generated/axiom.cue Co-authored-by: domalessi <111786334+domalessi@users.noreply.github.com> * fix: Fix wording * fix(axiom): Make AxiomConfig fields public --------- Co-authored-by: Pavlos Rontidis Co-authored-by: domalessi <111786334+domalessi@users.noreply.github.com> --- changelog.d/axiom_regional_edges.feature.md | 3 + src/sinks/axiom.rs | 306 ------------- src/sinks/axiom/config.rs | 408 ++++++++++++++++++ src/sinks/axiom/integration_tests.rs | 119 +++++ src/sinks/axiom/mod.rs | 6 + .../components/sinks/generated/axiom.cue | 17 +- 6 files changed, 551 insertions(+), 308 deletions(-) create mode 100644 changelog.d/axiom_regional_edges.feature.md delete mode 100644 src/sinks/axiom.rs create mode 100644 src/sinks/axiom/config.rs create mode 100644 src/sinks/axiom/integration_tests.rs create mode 100644 src/sinks/axiom/mod.rs diff --git a/changelog.d/axiom_regional_edges.feature.md b/changelog.d/axiom_regional_edges.feature.md new file mode 100644 index 0000000000000..8cda67ffdf8d8 --- /dev/null +++ b/changelog.d/axiom_regional_edges.feature.md @@ -0,0 +1,3 @@ +The `axiom` sink now supports regional edges for data locality. A new optional `region` configuration field allows you to specify the regional edge domain (e.g., `eu-central-1.aws.edge.axiom.co`). When configured, data is sent to `https://{region}/v1/ingest/{dataset}`. The `url` field now intelligently handles paths: URLs with custom paths are used as-is, while URLs without paths maintain backwards compatibility by appending `/v1/datasets/{dataset}/ingest`. + +authors: toppercodes diff --git a/src/sinks/axiom.rs b/src/sinks/axiom.rs deleted file mode 100644 index 5bc03ec00b65b..0000000000000 --- a/src/sinks/axiom.rs +++ /dev/null @@ -1,306 +0,0 @@ -use vector_lib::{ - codecs::{ - MetricTagValues, - encoding::{FramingConfig, JsonSerializerConfig, JsonSerializerOptions, SerializerConfig}, - }, - configurable::configurable_component, - sensitive_string::SensitiveString, -}; - -use crate::{ - codecs::{EncodingConfigWithFraming, Transformer}, - config::{AcknowledgementsConfig, DataType, GenerateConfig, Input, SinkConfig, SinkContext}, - http::Auth as HttpAuthConfig, - sinks::{ - Healthcheck, VectorSink, - http::config::{HttpMethod, HttpSinkConfig}, - util::{ - BatchConfig, Compression, RealtimeSizeBasedDefaultBatchSettings, http::RequestConfig, - }, - }, - tls::TlsConfig, -}; - -static CLOUD_URL: &str = "https://api.axiom.co"; - -/// Configuration for the `axiom` sink. -#[configurable_component(sink("axiom", "Deliver log events to Axiom."))] -#[derive(Clone, Debug, Default)] -pub struct AxiomConfig { - /// URI of the Axiom endpoint to send data to. - /// - /// Only required if not using Axiom Cloud. - #[configurable(validation(format = "uri"))] - #[configurable(metadata(docs::examples = "https://axiom.my-domain.com"))] - #[configurable(metadata(docs::examples = "${AXIOM_URL}"))] - url: Option, - - /// The Axiom organization ID. - /// - /// Only required when using personal tokens. - #[configurable(metadata(docs::examples = "${AXIOM_ORG_ID}"))] - #[configurable(metadata(docs::examples = "123abc"))] - org_id: Option, - - /// The Axiom API token. - #[configurable(metadata(docs::examples = "${AXIOM_TOKEN}"))] - #[configurable(metadata(docs::examples = "123abc"))] - token: SensitiveString, - - /// The Axiom dataset to write to. - #[configurable(metadata(docs::examples = "${AXIOM_DATASET}"))] - #[configurable(metadata(docs::examples = "vector_rocks"))] - dataset: String, - - #[configurable(derived)] - #[serde(default)] - request: RequestConfig, - - /// The compression algorithm to use. - #[configurable(derived)] - #[serde(default = "Compression::zstd_default")] - compression: Compression, - - /// The TLS settings for the connection. - /// - /// Optional, constrains TLS settings for this sink. - #[configurable(derived)] - tls: Option, - - /// The batch settings for the sink. - #[configurable(derived)] - #[serde(default)] - pub batch: BatchConfig, - - /// Controls how acknowledgements are handled for this sink. - #[configurable(derived)] - #[serde( - default, - deserialize_with = "crate::serde::bool_or_struct", - skip_serializing_if = "crate::serde::is_default" - )] - acknowledgements: AcknowledgementsConfig, -} - -impl GenerateConfig for AxiomConfig { - fn generate_config() -> toml::Value { - toml::from_str( - r#"token = "${AXIOM_TOKEN}" - dataset = "${AXIOM_DATASET}" - url = "${AXIOM_URL}" - org_id = "${AXIOM_ORG_ID}""#, - ) - .unwrap() - } -} - -#[async_trait::async_trait] -#[typetag::serde(name = "axiom")] -impl SinkConfig for AxiomConfig { - async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { - let mut request = self.request.clone(); - if let Some(org_id) = &self.org_id { - // NOTE: Only add the org id header if an org id is provided - request - .headers - .insert("X-Axiom-Org-Id".to_string(), org_id.clone()); - } - - // Axiom has a custom high-performance database that can be ingested - // into using the native HTTP ingest endpoint. This configuration wraps - // the vector HTTP sink with the necessary adjustments to send data - // to Axiom, whilst keeping the configuration simple and easy to use - // and maintenance of the vector axiom sink to a minimum. - // - let http_sink_config = HttpSinkConfig { - uri: self.build_endpoint().try_into()?, - compression: self.compression, - auth: Some(HttpAuthConfig::Bearer { - token: self.token.clone(), - }), - method: HttpMethod::Post, - tls: self.tls.clone(), - request, - acknowledgements: self.acknowledgements, - batch: self.batch, - headers: None, - encoding: EncodingConfigWithFraming::new( - Some(FramingConfig::NewlineDelimited), - SerializerConfig::Json(JsonSerializerConfig { - metric_tag_values: MetricTagValues::Single, - options: JsonSerializerOptions { pretty: false }, // Minified JSON - }), - Transformer::default(), - ), - payload_prefix: "".into(), // Always newline delimited JSON - payload_suffix: "".into(), // Always newline delimited JSON - }; - - http_sink_config.build(cx).await - } - - fn input(&self) -> Input { - Input::new(DataType::Metric | DataType::Log | DataType::Trace) - } - - fn acknowledgements(&self) -> &AcknowledgementsConfig { - &self.acknowledgements - } -} - -impl AxiomConfig { - fn build_endpoint(&self) -> String { - let url = if let Some(url) = self.url.as_ref() { - url.clone() - } else { - CLOUD_URL.to_string() - }; - - // NOTE trim any trailing slashes to avoid redundant rewriting or 301 redirects from intermediate proxies - // NOTE Most axiom users will not need to configure a url, this is for the other 1% - let url = url.trim_end_matches('/'); - - format!("{}/v1/datasets/{}/ingest", url, self.dataset) - } -} - -#[cfg(test)] -mod test { - #[test] - fn generate_config() { - crate::test_util::test_generate_config::(); - - let config = super::AxiomConfig { - url: Some("https://axiom.my-domain.com///".to_string()), - org_id: None, - dataset: "vector_rocks".to_string(), - ..Default::default() - }; - let endpoint = config.build_endpoint(); - assert_eq!( - endpoint, - "https://axiom.my-domain.com/v1/datasets/vector_rocks/ingest" - ); - } -} - -#[cfg(feature = "axiom-integration-tests")] -#[cfg(test)] -mod integration_tests { - use std::env; - - use chrono::{DateTime, Duration, Utc}; - use futures::stream; - use serde::{Deserialize, Serialize}; - use vector_lib::event::{BatchNotifier, BatchStatus, Event, LogEvent}; - - use super::*; - use crate::{ - config::SinkContext, - sinks::axiom::AxiomConfig, - test_util::components::{HTTP_SINK_TAGS, run_and_assert_sink_compliance}, - }; - - #[tokio::test] - async fn axiom_logs_put_data() { - let client = reqwest::Client::new(); - let url = env::var("AXIOM_URL").unwrap(); - let token = env::var("AXIOM_TOKEN").expect("AXIOM_TOKEN environment variable to be set"); - assert!(!token.is_empty(), "$AXIOM_TOKEN required"); - let dataset = env::var("AXIOM_DATASET").unwrap(); - let org_id = env::var("AXIOM_ORG_ID").unwrap(); - - let cx = SinkContext::default(); - - let config = AxiomConfig { - url: Some(url.clone()), - token: token.clone().into(), - dataset: dataset.clone(), - org_id: Some(org_id.clone()), - ..Default::default() - }; - - // create unique test id so tests can run in parallel - let test_id = uuid::Uuid::new_v4().to_string(); - - let (sink, _) = config.build(cx).await.unwrap(); - - let (batch, mut receiver) = BatchNotifier::new_with_receiver(); - - let mut event1 = LogEvent::from("message_1").with_batch_notifier(&batch); - event1.insert("host", "aws.cloud.eur"); - event1.insert("source_type", "file"); - event1.insert("test_id", test_id.clone()); - - let mut event2 = LogEvent::from("message_2").with_batch_notifier(&batch); - event2.insert("host", "aws.cloud.eur"); - event2.insert("source_type", "file"); - event2.insert("test_id", test_id.clone()); - - drop(batch); - - let events = vec![Event::Log(event1), Event::Log(event2)]; - - run_and_assert_sink_compliance(sink, stream::iter(events), &HTTP_SINK_TAGS).await; - - assert_eq!(receiver.try_recv(), Ok(BatchStatus::Delivered)); - - #[derive(Serialize)] - struct QueryRequest { - apl: String, - #[serde(rename = "endTime")] - end_time: DateTime, - #[serde(rename = "startTime")] - start_time: DateTime, - // ... - } - - #[derive(Deserialize, Debug)] - struct QueryResponseMatch { - data: serde_json::Value, - // ... - } - - #[derive(Deserialize, Debug)] - struct QueryResponse { - matches: Vec, - // ... - } - - let query_req = QueryRequest { - apl: format!( - "['{dataset}'] | where test_id == '{test_id}' | order by _time desc | limit 2" - ), - start_time: Utc::now() - Duration::minutes(10), - end_time: Utc::now() + Duration::minutes(10), - }; - let query_res: QueryResponse = client - .post(format!("{url}/v1/datasets/_apl?format=legacy")) - .header("X-Axiom-Org-Id", org_id) - .header("Authorization", format!("Bearer {token}")) - .json(&query_req) - .send() - .await - .unwrap() - .error_for_status() - .unwrap() - .json() - .await - .unwrap(); - - assert_eq!(2, query_res.matches.len()); - - let fst = match query_res.matches[0].data { - serde_json::Value::Object(ref obj) => obj, - _ => panic!("Unexpected value, expected object"), - }; - // Note that we order descending, so message_2 comes first - assert_eq!("message_2", fst.get("message").unwrap().as_str().unwrap()); - - let snd = match query_res.matches[1].data { - serde_json::Value::Object(ref obj) => obj, - _ => panic!("Unexpected value, expected object"), - }; - assert_eq!("message_1", snd.get("message").unwrap().as_str().unwrap()); - } -} diff --git a/src/sinks/axiom/config.rs b/src/sinks/axiom/config.rs new file mode 100644 index 0000000000000..0349fc02831b8 --- /dev/null +++ b/src/sinks/axiom/config.rs @@ -0,0 +1,408 @@ +use vector_lib::{ + codecs::{ + MetricTagValues, + encoding::{FramingConfig, JsonSerializerConfig, JsonSerializerOptions, SerializerConfig}, + }, + configurable::configurable_component, + sensitive_string::SensitiveString, +}; + +use crate::{ + codecs::{EncodingConfigWithFraming, Transformer}, + config::{AcknowledgementsConfig, DataType, GenerateConfig, Input, SinkConfig, SinkContext}, + http::Auth as HttpAuthConfig, + sinks::{ + Healthcheck, VectorSink, + http::config::{HttpMethod, HttpSinkConfig}, + util::{ + BatchConfig, Compression, RealtimeSizeBasedDefaultBatchSettings, http::RequestConfig, + }, + }, + tls::TlsConfig, +}; + +static CLOUD_URL: &str = "https://api.axiom.co"; + +/// Configuration of the URL/region to use when interacting with Axiom. +#[configurable_component] +#[derive(Clone, Debug, Default)] +#[serde(default)] +pub struct UrlOrRegion { + /// URI of the Axiom endpoint to send data to. + /// + /// If a path is provided, the URL is used as-is. + /// If no path (or only `/`) is provided, `/v1/datasets/{dataset}/ingest` is appended for backwards compatibility. + /// This takes precedence over `region` if both are set (but both should not be set). + #[configurable(validation(format = "uri"))] + #[configurable(metadata(docs::examples = "https://api.eu.axiom.co"))] + #[configurable(metadata(docs::examples = "http://localhost:3400/ingest"))] + #[configurable(metadata(docs::examples = "${AXIOM_URL}"))] + pub url: Option, + + /// The Axiom regional edge domain to use for ingestion. + /// + /// Specify the domain name only (no scheme, no path). + /// When set, data is sent to `https://{region}/v1/ingest/{dataset}`. + /// Cannot be used together with `url`. + #[configurable(metadata(docs::examples = "${AXIOM_REGION}"))] + #[configurable(metadata(docs::examples = "mumbai.axiom.co"))] + #[configurable(metadata(docs::examples = "eu-central-1.aws.edge.axiom.co"))] + pub region: Option, +} + +impl UrlOrRegion { + /// Validates that url and region are not both set. + fn validate(&self) -> crate::Result<()> { + if self.url.is_some() && self.region.is_some() { + return Err("Cannot set both `url` and `region`. Please use only one.".into()); + } + Ok(()) + } + + /// Returns the url if set. + pub fn url(&self) -> Option<&str> { + self.url.as_deref() + } + + /// Returns the region if set. + pub fn region(&self) -> Option<&str> { + self.region.as_deref() + } +} + +/// Configuration for the `axiom` sink. +#[configurable_component(sink("axiom", "Deliver log events to Axiom."))] +#[derive(Clone, Debug, Default)] +pub struct AxiomConfig { + /// The Axiom organization ID. + /// + /// Only required when using personal tokens. + #[configurable(metadata(docs::examples = "${AXIOM_ORG_ID}"))] + #[configurable(metadata(docs::examples = "123abc"))] + pub org_id: Option, + + /// The Axiom API token. + #[configurable(metadata(docs::examples = "${AXIOM_TOKEN}"))] + #[configurable(metadata(docs::examples = "123abc"))] + pub token: SensitiveString, + + /// The Axiom dataset to write to. + #[configurable(metadata(docs::examples = "${AXIOM_DATASET}"))] + #[configurable(metadata(docs::examples = "vector_rocks"))] + pub dataset: String, + + /// Configuration for the URL or regional edge endpoint. + #[serde(flatten)] + #[configurable(derived)] + pub endpoint: UrlOrRegion, + + #[configurable(derived)] + #[serde(default)] + pub request: RequestConfig, + + /// The compression algorithm to use. + #[configurable(derived)] + #[serde(default = "Compression::zstd_default")] + pub compression: Compression, + + /// The TLS settings for the connection. + /// + /// Optional, constrains TLS settings for this sink. + #[configurable(derived)] + pub tls: Option, + + /// The batch settings for the sink. + #[configurable(derived)] + #[serde(default)] + pub batch: BatchConfig, + + /// Controls how acknowledgements are handled for this sink. + #[configurable(derived)] + #[serde( + default, + deserialize_with = "crate::serde::bool_or_struct", + skip_serializing_if = "crate::serde::is_default" + )] + pub acknowledgements: AcknowledgementsConfig, +} + +impl GenerateConfig for AxiomConfig { + fn generate_config() -> toml::Value { + toml::from_str( + r#"token = "${AXIOM_TOKEN}" + dataset = "${AXIOM_DATASET}" + url = "${AXIOM_URL}" + org_id = "${AXIOM_ORG_ID}""#, + ) + .unwrap() + } +} + +#[async_trait::async_trait] +#[typetag::serde(name = "axiom")] +impl SinkConfig for AxiomConfig { + async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { + // Validate that url and region are not both set + self.endpoint.validate()?; + + let mut request = self.request.clone(); + if let Some(org_id) = &self.org_id { + // NOTE: Only add the org id header if an org id is provided + request + .headers + .insert("X-Axiom-Org-Id".to_string(), org_id.clone()); + } + + // Axiom has a custom high-performance database that can be ingested + // into using the native HTTP ingest endpoint. This configuration wraps + // the vector HTTP sink with the necessary adjustments to send data + // to Axiom, whilst keeping the configuration simple and easy to use + // and maintenance of the vector axiom sink to a minimum. + // + let http_sink_config = HttpSinkConfig { + uri: self.build_endpoint().try_into()?, + compression: self.compression, + auth: Some(HttpAuthConfig::Bearer { + token: self.token.clone(), + }), + method: HttpMethod::Post, + tls: self.tls.clone(), + request, + acknowledgements: self.acknowledgements, + batch: self.batch, + headers: None, + encoding: EncodingConfigWithFraming::new( + Some(FramingConfig::NewlineDelimited), + SerializerConfig::Json(JsonSerializerConfig { + metric_tag_values: MetricTagValues::Single, + options: JsonSerializerOptions { pretty: false }, // Minified JSON + }), + Transformer::default(), + ), + payload_prefix: "".into(), // Always newline delimited JSON + payload_suffix: "".into(), // Always newline delimited JSON + }; + + http_sink_config.build(cx).await + } + + fn input(&self) -> Input { + Input::new(DataType::Metric | DataType::Log | DataType::Trace) + } + + fn acknowledgements(&self) -> &AcknowledgementsConfig { + &self.acknowledgements + } +} + +impl AxiomConfig { + fn build_endpoint(&self) -> String { + // Priority: url > region > default cloud endpoint + + // If url is set, check if it has a path + if let Some(url) = self.endpoint.url() { + let url = url.trim_end_matches('/'); + + // Parse URL to check if path is provided + // If path is empty or just "/", append the legacy format for backwards compatibility + // Otherwise, use the URL as-is + if let Ok(parsed) = url::Url::parse(url) { + let path = parsed.path(); + if path.is_empty() || path == "/" { + // Backwards compatibility: append legacy path format + return format!("{url}/v1/datasets/{}/ingest", self.dataset); + } + } + + // URL has a custom path, use as-is + return url.to_string(); + } + + // If region is set, build the regional edge endpoint + if let Some(region) = self.endpoint.region() { + let region = region.trim_end_matches('/'); + return format!("https://{region}/v1/ingest/{}", self.dataset); + } + + // Default: use cloud endpoint with legacy path format + format!("{CLOUD_URL}/v1/datasets/{}/ingest", self.dataset) + } +} + +#[cfg(test)] +mod test { + #[test] + fn generate_config() { + crate::test_util::test_generate_config::(); + } + + #[test] + fn test_region_domain_only() { + // region: mumbai.axiomdomain.co → https://mumbai.axiomdomain.co/v1/ingest/test-3 + let config = super::AxiomConfig { + endpoint: super::UrlOrRegion { + region: Some("mumbai.axiomdomain.co".to_string()), + url: None, + }, + dataset: "test-3".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!(endpoint, "https://mumbai.axiomdomain.co/v1/ingest/test-3"); + } + + #[test] + fn test_default_no_config() { + // No url, no region → https://api.axiom.co/v1/datasets/foo/ingest + let config = super::AxiomConfig { + dataset: "foo".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!(endpoint, "https://api.axiom.co/v1/datasets/foo/ingest"); + } + + #[test] + fn test_url_with_custom_path() { + // url: http://localhost:3400/ingest → http://localhost:3400/ingest (as-is) + let config = super::AxiomConfig { + endpoint: super::UrlOrRegion { + url: Some("http://localhost:3400/ingest".to_string()), + region: None, + }, + dataset: "meh".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!(endpoint, "http://localhost:3400/ingest"); + } + + #[test] + fn test_url_without_path_backwards_compat() { + // url: https://api.eu.axiom.co/ → https://api.eu.axiom.co/v1/datasets/qoo/ingest + let config = super::AxiomConfig { + endpoint: super::UrlOrRegion { + url: Some("https://api.eu.axiom.co".to_string()), + region: None, + }, + dataset: "qoo".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!(endpoint, "https://api.eu.axiom.co/v1/datasets/qoo/ingest"); + + // Also test with trailing slash + let config = super::AxiomConfig { + endpoint: super::UrlOrRegion { + url: Some("https://api.eu.axiom.co/".to_string()), + region: None, + }, + dataset: "qoo".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!(endpoint, "https://api.eu.axiom.co/v1/datasets/qoo/ingest"); + } + + #[test] + fn test_both_url_and_region_fails_validation() { + // When both url and region are set, validation should fail + let endpoint = super::UrlOrRegion { + url: Some("http://localhost:3400/ingest".to_string()), + region: Some("mumbai.axiomdomain.co".to_string()), + }; + + let result = endpoint.validate(); + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + "Cannot set both `url` and `region`. Please use only one." + ); + } + + #[test] + fn test_url_or_region_deserialization_with_url() { + // Test that url can be deserialized at the top level (flattened) + let config: super::AxiomConfig = toml::from_str( + r#" + token = "test-token" + dataset = "test-dataset" + url = "https://api.eu.axiom.co" + "#, + ) + .unwrap(); + + assert_eq!(config.endpoint.url(), Some("https://api.eu.axiom.co")); + assert_eq!(config.endpoint.region(), None); + } + + #[test] + fn test_url_or_region_deserialization_with_region() { + // Test that region can be deserialized at the top level (flattened) + let config: super::AxiomConfig = toml::from_str( + r#" + token = "test-token" + dataset = "test-dataset" + region = "mumbai.axiom.co" + "#, + ) + .unwrap(); + + assert_eq!(config.endpoint.url(), None); + assert_eq!(config.endpoint.region(), Some("mumbai.axiom.co")); + } + + #[test] + fn test_production_regional_edges() { + // Production AWS edge + let config = super::AxiomConfig { + endpoint: super::UrlOrRegion { + region: Some("eu-central-1.aws.edge.axiom.co".to_string()), + url: None, + }, + dataset: "my-dataset".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!( + endpoint, + "https://eu-central-1.aws.edge.axiom.co/v1/ingest/my-dataset" + ); + } + + #[test] + fn test_staging_environment_edges() { + // Staging environment edge + let config = super::AxiomConfig { + endpoint: super::UrlOrRegion { + region: Some("us-east-1.edge.staging.axiomdomain.co".to_string()), + url: None, + }, + dataset: "test-dataset".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!( + endpoint, + "https://us-east-1.edge.staging.axiomdomain.co/v1/ingest/test-dataset" + ); + } + + #[test] + fn test_dev_environment_edges() { + // Dev environment edge + let config = super::AxiomConfig { + endpoint: super::UrlOrRegion { + region: Some("eu-west-1.edge.dev.axiomdomain.co".to_string()), + url: None, + }, + dataset: "dev-dataset".to_string(), + ..Default::default() + }; + let endpoint = config.build_endpoint(); + assert_eq!( + endpoint, + "https://eu-west-1.edge.dev.axiomdomain.co/v1/ingest/dev-dataset" + ); + } +} diff --git a/src/sinks/axiom/integration_tests.rs b/src/sinks/axiom/integration_tests.rs new file mode 100644 index 0000000000000..f6071bf112ced --- /dev/null +++ b/src/sinks/axiom/integration_tests.rs @@ -0,0 +1,119 @@ +use std::env; + +use chrono::{DateTime, Duration, Utc}; +use futures::stream; +use serde::{Deserialize, Serialize}; +use vector_lib::event::{BatchNotifier, BatchStatus, Event, LogEvent}; + +use super::*; +use crate::{ + config::{SinkConfig, SinkContext}, + sinks::axiom::config::UrlOrRegion, + test_util::components::{HTTP_SINK_TAGS, run_and_assert_sink_compliance}, +}; + +#[tokio::test] +async fn axiom_logs_put_data() { + let client = reqwest::Client::new(); + let url = env::var("AXIOM_URL").unwrap(); + let token = env::var("AXIOM_TOKEN").expect("AXIOM_TOKEN environment variable to be set"); + assert!(!token.is_empty(), "$AXIOM_TOKEN required"); + let dataset = env::var("AXIOM_DATASET").unwrap(); + let org_id = env::var("AXIOM_ORG_ID").unwrap(); + + let cx = SinkContext::default(); + + let config = AxiomConfig { + endpoint: UrlOrRegion { + url: Some(url.clone()), + region: None, + }, + token: token.clone().into(), + dataset: dataset.clone(), + org_id: Some(org_id.clone()), + ..Default::default() + }; + + // create unique test id so tests can run in parallel + let test_id = uuid::Uuid::new_v4().to_string(); + + let (sink, _) = config.build(cx).await.unwrap(); + + let (batch, mut receiver) = BatchNotifier::new_with_receiver(); + + let mut event1 = LogEvent::from("message_1").with_batch_notifier(&batch); + event1.insert("host", "aws.cloud.eur"); + event1.insert("source_type", "file"); + event1.insert("test_id", test_id.clone()); + + let mut event2 = LogEvent::from("message_2").with_batch_notifier(&batch); + event2.insert("host", "aws.cloud.eur"); + event2.insert("source_type", "file"); + event2.insert("test_id", test_id.clone()); + + drop(batch); + + let events = vec![Event::Log(event1), Event::Log(event2)]; + + run_and_assert_sink_compliance(sink, stream::iter(events), &HTTP_SINK_TAGS).await; + + assert_eq!(receiver.try_recv(), Ok(BatchStatus::Delivered)); + + #[derive(Serialize)] + struct QueryRequest { + apl: String, + #[serde(rename = "endTime")] + end_time: DateTime, + #[serde(rename = "startTime")] + start_time: DateTime, + // ... + } + + #[derive(Deserialize, Debug)] + struct QueryResponseMatch { + data: serde_json::Value, + // ... + } + + #[derive(Deserialize, Debug)] + struct QueryResponse { + matches: Vec, + // ... + } + + let query_req = QueryRequest { + apl: format!( + "['{dataset}'] | where test_id == '{test_id}' | order by _time desc | limit 2" + ), + start_time: Utc::now() - Duration::minutes(10), + end_time: Utc::now() + Duration::minutes(10), + }; + let query_res: QueryResponse = client + .post(format!("{url}/v1/datasets/_apl?format=legacy")) + .header("X-Axiom-Org-Id", org_id) + .header("Authorization", format!("Bearer {token}")) + .json(&query_req) + .send() + .await + .unwrap() + .error_for_status() + .unwrap() + .json() + .await + .unwrap(); + + assert_eq!(2, query_res.matches.len()); + + let fst = match query_res.matches[0].data { + serde_json::Value::Object(ref obj) => obj, + _ => panic!("Unexpected value, expected object"), + }; + // Note that we order descending, so message_2 comes first + assert_eq!("message_2", fst.get("message").unwrap().as_str().unwrap()); + + let snd = match query_res.matches[1].data { + serde_json::Value::Object(ref obj) => obj, + _ => panic!("Unexpected value, expected object"), + }; + assert_eq!("message_1", snd.get("message").unwrap().as_str().unwrap()); +} diff --git a/src/sinks/axiom/mod.rs b/src/sinks/axiom/mod.rs new file mode 100644 index 0000000000000..beba0332ea123 --- /dev/null +++ b/src/sinks/axiom/mod.rs @@ -0,0 +1,6 @@ +mod config; +#[cfg(feature = "axiom-integration-tests")] +#[cfg(test)] +mod integration_tests; + +pub use self::config::AxiomConfig; diff --git a/website/cue/reference/components/sinks/generated/axiom.cue b/website/cue/reference/components/sinks/generated/axiom.cue index cf1bd02842570..d4dc62e6294fc 100644 --- a/website/cue/reference/components/sinks/generated/axiom.cue +++ b/website/cue/reference/components/sinks/generated/axiom.cue @@ -97,6 +97,17 @@ generated: components: sinks: axiom: configuration: { required: false type: string: examples: ["${AXIOM_ORG_ID}", "123abc"] } + region: { + description: """ + The Axiom regional edge domain to use for ingestion. + + Specify the domain name only (no scheme, no path). + When set, data is sent to `https://{region}/v1/ingest/{dataset}`. + Cannot be used together with `url`. + """ + required: false + type: string: examples: ["${AXIOM_REGION}", "mumbai.axiom.co", "eu-central-1.aws.edge.axiom.co"] + } request: { description: "Outbound HTTP request settings." required: false @@ -401,9 +412,11 @@ generated: components: sinks: axiom: configuration: { description: """ URI of the Axiom endpoint to send data to. - Only required if not using Axiom Cloud. + If a path is provided, the URL is used as-is. + If no path (or only `/`) is provided, `/v1/datasets/{dataset}/ingest` is appended for backwards compatibility. + This takes precedence over `region` if both are set (but both should not be set). """ required: false - type: string: examples: ["https://axiom.my-domain.com", "${AXIOM_URL}"] + type: string: examples: ["https://api.eu.axiom.co", "http://localhost:3400/ingest", "${AXIOM_URL}"] } } From d43ab9ec84836f484a155c8b2d155189dba1789c Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 5 Nov 2025 15:43:25 -0500 Subject: [PATCH 049/227] chore(deps): update toml to 0.9.8 (#24161) --- Cargo.lock | 109 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 2 +- 2 files changed, 56 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d07efa9c837d..5f31832d545a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -521,7 +521,7 @@ dependencies = [ "futures-timer", "futures-util", "http 1.3.1", - "indexmap 2.11.0", + "indexmap 2.12.0", "mime", "multer", "num-traits", @@ -570,7 +570,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34ecdaff7c9cffa3614a9f9999bf9ee4c3078fe3ce4d6a6e161736b56febf2de" dependencies = [ "bytes 1.10.1", - "indexmap 2.11.0", + "indexmap 2.12.0", "serde", "serde_json", ] @@ -3900,7 +3900,7 @@ dependencies = [ "futures 0.3.31", "futures-util", "glob", - "indexmap 2.11.0", + "indexmap 2.12.0", "libc", "quickcheck", "tempfile", @@ -4488,7 +4488,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.9", - "indexmap 2.11.0", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -4507,7 +4507,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.11.0", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -5395,13 +5395,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -6398,7 +6399,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1ada651cd6bdffe01e5f35067df53491f1fe853d2b154008ca2bd30b3d3fcf6" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.12.0", "itoa", "lockfree-object-pool", "metrics", @@ -6419,7 +6420,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", - "indexmap 2.11.0", + "indexmap 2.12.0", "metrics", "ordered-float 4.6.0", "quanta", @@ -6654,7 +6655,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ebbe97acce52d06aebed4cd4a87c0941f4b2519b59b82b4feb5bd0ce003dfd" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.12.0", "itertools 0.13.0", "ndarray", "noisy_float", @@ -7605,7 +7606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.11.0", + "indexmap 2.12.0", ] [[package]] @@ -8048,7 +8049,7 @@ dependencies = [ name = "prometheus-parser" version = "0.1.0" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.12.0", "nom 8.0.0", "prost 0.12.6", "prost-build 0.12.6", @@ -8377,7 +8378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed1a693391a16317257103ad06a88c6529ac640846021da7c435a06fffdacd7" dependencies = [ "chrono", - "indexmap 2.11.0", + "indexmap 2.12.0", "newtype-uuid", "quick-xml 0.37.4", "strip-ansi-escapes", @@ -8893,7 +8894,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c11639076bf147be211b90e47790db89f4c22b6c8a9ca6e960833869da67166" dependencies = [ "aho-corasick", - "indexmap 2.11.0", + "indexmap 2.12.0", "itertools 0.13.0", "nohash", "regex", @@ -9722,7 +9723,7 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc44799282f511a5d403d72a4ff028dc2c87f7fe6830abe3c33bb2fa6dfccec" dependencies = [ - "toml 0.9.5", + "toml 0.9.8", ] [[package]] @@ -9782,7 +9783,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.12.0", "itoa", "memchr", "ryu", @@ -9851,11 +9852,11 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40734c41988f7306bb04f0ecf60ec0f3f1caa34290e4e8ea471dcd3346483b83" +checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -9890,7 +9891,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.11.0", + "indexmap 2.12.0", "schemars 0.9.0", "schemars 1.0.3", "serde", @@ -9930,7 +9931,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.12.0", "itoa", "ryu", "serde", @@ -10330,7 +10331,7 @@ dependencies = [ "futures-util", "hashbrown 0.15.2", "hashlink", - "indexmap 2.11.0", + "indexmap 2.12.0", "log", "memchr", "once_cell", @@ -11260,17 +11261,17 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.5" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75129e1dc5000bfbaa9fee9d1b21f974f9fbad9daec557a521ee6e080825f6e8" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" dependencies = [ - "indexmap 2.11.0", - "serde", - "serde_spanned 1.0.0", - "toml_datetime 0.7.0", + "indexmap 2.12.0", + "serde_core", + "serde_spanned 1.0.3", + "toml_datetime 0.7.3", "toml_parser", "toml_writer", - "winnow 0.7.10", + "winnow 0.7.13", ] [[package]] @@ -11284,11 +11285,11 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -11297,7 +11298,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.12.0", "toml_datetime 0.6.11", "winnow 0.5.18", ] @@ -11308,21 +11309,21 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.12.0", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", - "winnow 0.7.10", + "winnow 0.7.13", ] [[package]] name = "toml_parser" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b551886f449aa90d4fe2bdaa9f4a2577ad2dde302c61ecf262d80b116db95c10" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" dependencies = [ - "winnow 0.7.10", + "winnow 0.7.13", ] [[package]] @@ -11333,9 +11334,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc842091f2def52017664b53082ecbbeb5c7731092bad69d2c63050401dfd64" +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" [[package]] name = "tonic" @@ -11454,7 +11455,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.11.0", + "indexmap 2.12.0", "pin-project-lite", "slab", "sync_wrapper 1.0.1", @@ -12165,7 +12166,7 @@ dependencies = [ "git2", "glob", "hex", - "indexmap 2.11.0", + "indexmap 2.12.0", "indicatif", "indoc", "itertools 0.14.0", @@ -12180,7 +12181,7 @@ dependencies = [ "serde_yaml", "sha2", "tempfile", - "toml 0.9.5", + "toml 0.9.8", ] [[package]] @@ -12275,7 +12276,7 @@ dependencies = [ "hyper 0.14.28", "hyper-openssl 0.9.2", "hyper-proxy", - "indexmap 2.11.0", + "indexmap 2.12.0", "indoc", "inventory", "ipnet", @@ -12363,7 +12364,7 @@ dependencies = [ "tokio-test", "tokio-tungstenite 0.20.1", "tokio-util", - "toml 0.9.5", + "toml 0.9.8", "tonic 0.11.0", "tonic-build 0.11.0", "tower 0.5.2", @@ -12464,7 +12465,7 @@ dependencies = [ "crossbeam-utils", "derivative", "futures 0.3.31", - "indexmap 2.11.0", + "indexmap 2.12.0", "metrics", "paste", "pin-project", @@ -12487,7 +12488,7 @@ dependencies = [ "chrono-tz", "encoding_rs", "http 0.2.9", - "indexmap 2.11.0", + "indexmap 2.12.0", "inventory", "no-proxy", "num-traits", @@ -12495,7 +12496,7 @@ dependencies = [ "serde_json", "serde_with 3.14.0", "snafu 0.8.9", - "toml 0.9.5", + "toml 0.9.8", "tracing 0.1.41", "url", "vector-config-common", @@ -12554,7 +12555,7 @@ dependencies = [ "headers", "http 0.2.9", "hyper-proxy", - "indexmap 2.11.0", + "indexmap 2.12.0", "inventory", "ipnet", "metrics", @@ -12595,7 +12596,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "toml 0.9.5", + "toml 0.9.8", "tonic 0.11.0", "tracing 0.1.41", "tracing-subscriber", @@ -12812,7 +12813,7 @@ dependencies = [ "hostname 0.4.0", "iana-time-zone", "idna 1.0.3", - "indexmap 2.11.0", + "indexmap 2.12.0", "indoc", "influxdb-line-protocol", "ipcrypt-rs", @@ -13671,9 +13672,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 518440b77a194..42cef293793a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -187,7 +187,7 @@ snafu = { version = "0.8.9", default-features = false, features = ["futures", "s socket2 = { version = "0.5.10", default-features = false } tempfile = "3.23.0" tokio = { version = "1.45.1", default-features = false } -toml = { version = "0.9.5", default-features = false, features = ["serde", "display", "parse"] } +toml = { version = "0.9.8", default-features = false, features = ["serde", "display", "parse"] } tonic = { version = "0.11", default-features = false, features = ["transport", "codegen", "prost", "tls", "tls-roots", "gzip"] } tonic-build = { version = "0.11", default-features = false, features = ["transport", "prost"] } tracing = { version = "0.1.34", default-features = false } From 2c27aab987e8750fdc3933fc70632a4be33e84f4 Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 5 Nov 2025 16:17:02 -0500 Subject: [PATCH 050/227] chore(vdev): make modules visible to rustfmt (#24162) * Remove mod inside cli_subcommands macro * cargo fmt * chore(vdev): apply vdev rust check fixes * Link to rustfmt issue --- vdev/src/commands/build/mod.rs | 13 ++- vdev/src/commands/build/vrl_wasm.rs | 4 +- vdev/src/commands/check/examples.rs | 2 +- vdev/src/commands/check/markdown.rs | 8 +- vdev/src/commands/check/mod.rs | 28 +++++-- vdev/src/commands/complete.rs | 2 +- vdev/src/commands/config/mod.rs | 7 +- vdev/src/commands/config/set/mod.rs | 3 +- vdev/src/commands/config/set/repo.rs | 5 +- vdev/src/commands/e2e/mod.rs | 16 ++-- vdev/src/commands/info.rs | 5 +- vdev/src/commands/integration/mod.rs | 19 +++-- vdev/src/commands/meta/mod.rs | 7 +- vdev/src/commands/mod.rs | 64 ++++++++------ vdev/src/commands/release/github.rs | 2 +- vdev/src/commands/release/homebrew.rs | 17 ++-- vdev/src/commands/release/mod.rs | 16 ++-- vdev/src/commands/release/prepare.rs | 115 +++++++++++++++++--------- vdev/src/commands/run.rs | 2 +- 19 files changed, 218 insertions(+), 117 deletions(-) diff --git a/vdev/src/commands/build/mod.rs b/vdev/src/commands/build/mod.rs index 7ed87a92ace5d..48890c8682b83 100644 --- a/vdev/src/commands/build/mod.rs +++ b/vdev/src/commands/build/mod.rs @@ -1,12 +1,17 @@ +mod licenses; +mod publish_metadata; +mod vector; +mod vrl_wasm; + crate::cli_subcommands! { "Build, generate or regenerate components..." component_docs, - mod licenses, + licenses, manifests, - mod publish_metadata, + publish_metadata, release_cue, - mod vector, - mod vrl_wasm, + vector, + vrl_wasm, } crate::script_wrapper! { diff --git a/vdev/src/commands/build/vrl_wasm.rs b/vdev/src/commands/build/vrl_wasm.rs index 92975b7acd6a9..9f95422484748 100644 --- a/vdev/src/commands/build/vrl_wasm.rs +++ b/vdev/src/commands/build/vrl_wasm.rs @@ -1,6 +1,6 @@ -use std::path::Path; use anyhow::Result; use clap::Args; +use std::path::Path; use crate::app; @@ -14,7 +14,7 @@ impl Cli { let vrl_path = Path::new(app::path()).join("lib").join("vrl"); let args = &["build", "--release", "--target", "wasm32-unknown-unknown"]; - for crate_name in ["compiler", "core", "diagnostic", "parser"] { + for crate_name in ["compiler", "core", "diagnostic", "parser"] { println!("Compiling lib/vrl/{crate_name} to wasm32-unknown-unknown"); std::env::set_current_dir(vrl_path.join(crate_name))?; app::exec("cargo", *args, false)?; diff --git a/vdev/src/commands/check/examples.rs b/vdev/src/commands/check/examples.rs index bd29eb49f4b05..0f080ba10d611 100644 --- a/vdev/src/commands/check/examples.rs +++ b/vdev/src/commands/check/examples.rs @@ -1,6 +1,6 @@ use std::fs; -use anyhow::{bail, Context, Result}; +use anyhow::{Context, Result, bail}; use crate::app; diff --git a/vdev/src/commands/check/markdown.rs b/vdev/src/commands/check/markdown.rs index c5009da8e9a81..af2f84db51ff5 100644 --- a/vdev/src/commands/check/markdown.rs +++ b/vdev/src/commands/check/markdown.rs @@ -20,11 +20,11 @@ impl Cli { "scripts/.markdownlintrc", // We should fix these as well. Previously these files were not linted. "--ignore", - ".github" + ".github", ] - .into_iter() - .chain(files.iter().map(String::as_str)) - .collect(); + .into_iter() + .chain(files.iter().map(String::as_str)) + .collect(); app::exec("markdownlint", &args, true) } diff --git a/vdev/src/commands/check/mod.rs b/vdev/src/commands/check/mod.rs index f31935633ebd6..4d61a76dbf3c2 100644 --- a/vdev/src/commands/check/mod.rs +++ b/vdev/src/commands/check/mod.rs @@ -1,16 +1,26 @@ +mod component_docs; +mod component_features; +mod deny; +mod examples; +mod fmt; +mod licenses; +mod markdown; +mod rust; +mod scripts; + crate::cli_subcommands! { "Check parts of the Vector code base..." - mod component_docs, - mod component_features, - mod deny, + component_docs, + component_features, + deny, docs, events, - mod examples, - mod fmt, - mod licenses, - mod markdown, - mod rust, - mod scripts, + examples, + fmt, + licenses, + markdown, + rust, + scripts, } // These should eventually be migrated to Rust code diff --git a/vdev/src/commands/complete.rs b/vdev/src/commands/complete.rs index e02834c4e6e8a..7b3b756a7bd55 100644 --- a/vdev/src/commands/complete.rs +++ b/vdev/src/commands/complete.rs @@ -1,6 +1,6 @@ use anyhow::Result; use clap::{Args, CommandFactory}; -use clap_complete::{generate, Shell}; +use clap_complete::{Shell, generate}; use std::io; use super::Cli as RootCli; diff --git a/vdev/src/commands/config/mod.rs b/vdev/src/commands/config/mod.rs index 254e33747644c..f9d53961b77fa 100644 --- a/vdev/src/commands/config/mod.rs +++ b/vdev/src/commands/config/mod.rs @@ -1,5 +1,8 @@ +mod find; +mod set; + crate::cli_subcommands! { "Manage the vdev config file..." - mod find, - mod set, + find, + set, } diff --git a/vdev/src/commands/config/set/mod.rs b/vdev/src/commands/config/set/mod.rs index eaf394dd15c05..cdbd02fddadd3 100644 --- a/vdev/src/commands/config/set/mod.rs +++ b/vdev/src/commands/config/set/mod.rs @@ -1,4 +1,5 @@ +mod repo; crate::cli_subcommands! { "Modify the config file..." - mod repo, + repo, } diff --git a/vdev/src/commands/config/set/repo.rs b/vdev/src/commands/config/set/repo.rs index fd6fc7ef68807..fd423cee7931e 100644 --- a/vdev/src/commands/config/set/repo.rs +++ b/vdev/src/commands/config/set/repo.rs @@ -1,7 +1,10 @@ use anyhow::Result; use clap::Args; -use crate::{app, utils::{config, platform}}; +use crate::{ + app, + utils::{config, platform}, +}; /// Set the path to the Vector repository #[derive(Args, Debug)] diff --git a/vdev/src/commands/e2e/mod.rs b/vdev/src/commands/e2e/mod.rs index 2804d9b2d750a..5ed9400348970 100644 --- a/vdev/src/commands/e2e/mod.rs +++ b/vdev/src/commands/e2e/mod.rs @@ -1,3 +1,9 @@ +mod ci_paths; +mod show; +mod start; +mod stop; +mod test; + crate::cli_subcommands! { r"Manage end-to-end test environments... @@ -5,9 +11,9 @@ These test setups are organized into a set of integrations, located in subdirect `tests/e2e`. For each integration, there is a matrix of environments, described in the `matrix` setting in the `test.yaml` file contained therein." - mod show, - mod start, - mod stop, - mod test, - mod ci_paths, + show, + start, + stop, + test, + ci_paths, } diff --git a/vdev/src/commands/info.rs b/vdev/src/commands/info.rs index 91f2aac6eff75..ae725bc224b11 100644 --- a/vdev/src/commands/info.rs +++ b/vdev/src/commands/info.rs @@ -2,7 +2,10 @@ use anyhow::Result; use clap::Args; use crate::testing::docker::CONTAINER_TOOL; -use crate::{app, utils::{config, platform}}; +use crate::{ + app, + utils::{config, platform}, +}; /// Show `vdev` command configuration #[derive(Args, Debug)] diff --git a/vdev/src/commands/integration/mod.rs b/vdev/src/commands/integration/mod.rs index 7fc2ea0ec071c..c1e3e72250fa9 100644 --- a/vdev/src/commands/integration/mod.rs +++ b/vdev/src/commands/integration/mod.rs @@ -1,3 +1,10 @@ +mod build; +mod ci_paths; +mod show; +mod start; +mod stop; +mod test; + crate::cli_subcommands! { r"Manage integration test environments... @@ -5,10 +12,10 @@ These test setups are organized into a set of integrations, located in subdirect `tests/integration`. For each integration, there is a matrix of environments, described in the `matrix` setting in the `test.yaml` file contained in the `config/` subdirectory." - mod show, - mod build, - mod start, - mod stop, - mod test, - mod ci_paths, + show, + build, + start, + stop, + test, + ci_paths, } diff --git a/vdev/src/commands/meta/mod.rs b/vdev/src/commands/meta/mod.rs index f941a1d4b62ee..cc67a0680bf5b 100644 --- a/vdev/src/commands/meta/mod.rs +++ b/vdev/src/commands/meta/mod.rs @@ -1,5 +1,8 @@ +mod install_git_hooks; +mod starship; + crate::cli_subcommands! { "Collection of meta-utilities..." - mod starship, - mod install_git_hooks, + starship, + install_git_hooks, } diff --git a/vdev/src/commands/mod.rs b/vdev/src/commands/mod.rs index 9c5f0fb2e87d9..547d22147901d 100644 --- a/vdev/src/commands/mod.rs +++ b/vdev/src/commands/mod.rs @@ -5,13 +5,9 @@ mod compose_tests; /// This macro simplifies the generation of CLI subcommand invocation structures by combining the /// creation of the command enum and implementation of the dispatch function into one simple list. +// Module declaration in here was removed due to https://github.com/rust-lang/rustfmt/issues/3253 #[macro_export] macro_rules! cli_commands { - // Peel off the list of module identifiers one-by-one - ( :: $( $list:ident, )* :: mod $mod:ident, $( $rest:tt )* ) => { - mod $mod; - $crate::cli_commands! { :: $( $list, )* $mod, :: $( $rest )* } - }; ( :: $( $list:ident, )* :: $mod:ident, $( $rest:tt )* ) => { $crate::cli_commands! { :: $( $list, )* $mod, :: $( $rest )* } }; @@ -71,26 +67,46 @@ pub struct Cli { command: Commands, } +mod build; +mod check; +mod complete; +mod config; +mod crate_versions; +mod e2e; +mod exec; +mod features; +mod fmt; +mod info; +mod integration; +mod meta; +mod package; +mod release; +mod run; +mod status; +mod test; +mod test_vrl; +mod version; + cli_commands! { - mod build, - mod check, - mod complete, - mod config, - mod crate_versions, - mod e2e, - mod exec, - mod features, - mod fmt, - mod info, - mod integration, - mod meta, - mod package, - mod release, - mod run, - mod status, - mod test, - mod test_vrl, - mod version, + build, + check, + complete, + config, + crate_versions, + e2e, + exec, + features, + fmt, + info, + integration, + meta, + package, + release, + run, + status, + test, + test_vrl, + version, } /// This macro creates a wrapper for an existing script. diff --git a/vdev/src/commands/release/github.rs b/vdev/src/commands/release/github.rs index f03211e7cbed7..0adc93a22e9f0 100644 --- a/vdev/src/commands/release/github.rs +++ b/vdev/src/commands/release/github.rs @@ -1,6 +1,6 @@ use crate::app::CommandExt as _; use crate::utils::cargo; -use anyhow::{anyhow, Ok, Result}; +use anyhow::{Ok, Result, anyhow}; use glob::glob; use std::process::Command; diff --git a/vdev/src/commands/release/homebrew.rs b/vdev/src/commands/release/homebrew.rs index 3191be355ab52..69deeb657401e 100644 --- a/vdev/src/commands/release/homebrew.rs +++ b/vdev/src/commands/release/homebrew.rs @@ -1,7 +1,5 @@ use crate::utils::git; use anyhow::Result; -use hex; -use reqwest; use sha2::Digest; use std::path::Path; use std::{env, fs}; @@ -26,7 +24,10 @@ impl Cli { let td = TempDir::new()?; env::set_current_dir(td.path())?; - debug!("Cloning the homebrew repository for username: {}", self.username); + debug!( + "Cloning the homebrew repository for username: {}", + self.username + ); clone_and_setup_git(&self.username)?; let vector_version = env::var("VECTOR_VERSION")?; @@ -41,15 +42,11 @@ impl Cli { } } - /// Clones the repository and sets up Git configuration fn clone_and_setup_git(username: &str) -> Result<()> { - let github_token = env::var("HOMEBREW_PAT") - .or_else(|_| env::var("GITHUB_TOKEN"))?; - let homebrew_repo = format!( - "https://{username}:{github_token}@github.com/{username}/homebrew-brew.git" - ); - + let github_token = env::var("HOMEBREW_PAT").or_else(|_| env::var("GITHUB_TOKEN"))?; + let homebrew_repo = + format!("https://{username}:{github_token}@github.com/{username}/homebrew-brew.git"); git::clone(&homebrew_repo)?; env::set_current_dir("homebrew-brew")?; diff --git a/vdev/src/commands/release/mod.rs b/vdev/src/commands/release/mod.rs index ceb9cbc023039..e9c4f299caa0d 100644 --- a/vdev/src/commands/release/mod.rs +++ b/vdev/src/commands/release/mod.rs @@ -1,13 +1,19 @@ +mod channel; +mod github; +mod homebrew; +mod prepare; +mod push; + crate::cli_subcommands! { "Manage the release process..." generate_cue, - mod channel, + channel, commit, docker, - mod github, - mod homebrew, - mod prepare, - mod push, + github, + homebrew, + prepare, + push, s3, } diff --git a/vdev/src/commands/release/prepare.rs b/vdev/src/commands/release/prepare.rs index a60130c049967..640f19f377386 100644 --- a/vdev/src/commands/release/prepare.rs +++ b/vdev/src/commands/release/prepare.rs @@ -1,8 +1,9 @@ #![allow(clippy::print_stdout)] #![allow(clippy::print_stderr)] +use crate::utils::command::run_command; use crate::utils::{git, paths}; -use anyhow::{anyhow, Result}; +use anyhow::{Result, anyhow}; use reqwest::blocking::Client; use semver::Version; use std::fs::File; @@ -12,9 +13,8 @@ use std::io::Write; use std::path::{Path, PathBuf}; use std::process::Command; use std::{env, fs}; -use toml::map::Map; use toml::Value; -use crate::utils::command::run_command; +use toml::map::Map; const ALPINE_PREFIX: &str = "FROM docker.io/alpine:"; const ALPINE_DOCKERFILE: &str = "distribution/docker/alpine/Dockerfile"; @@ -70,7 +70,10 @@ impl Cli { release_branch: format!("v{}.{}", self.version.major, self.version.minor), // Websites containing `website` will also generate website previews. // Caveat is these branches can only contain alphanumeric chars and dashes. - release_preparation_branch: format!("prepare-v-{}-{}-{}-website", self.version.major, self.version.minor, self.version.patch), + release_preparation_branch: format!( + "prepare-v-{}-{}-{}-website", + self.version.major, self.version.minor, self.version.patch + ), }; prepare.run() } @@ -136,11 +139,16 @@ impl Prepare { for line in &mut lines { if line.trim().starts_with("vrl = { git = ") { if let Ok(mut vrl_toml) = line.parse::() { - let vrl_dependency: &mut Value = vrl_toml.get_mut("vrl").expect("line should start with 'vrl'"); + let vrl_dependency: &mut Value = vrl_toml + .get_mut("vrl") + .expect("line should start with 'vrl'"); let mut new_dependency_value = Map::new(); - new_dependency_value.insert("version".to_string(), Value::String(vrl_version.clone())); - let features = vrl_dependency.get("features").expect("missing 'features' key"); + new_dependency_value + .insert("version".to_string(), Value::String(vrl_version.clone())); + let features = vrl_dependency + .get("features") + .expect("missing 'features' key"); new_dependency_value.insert("features".to_string(), features.clone()); *line = format!("vrl = {}", Value::from(new_dependency_value)); @@ -152,7 +160,9 @@ impl Prepare { lines.push(String::new()); // File should end with a newline. fs::write(cargo_toml_path, lines.join("\n")).expect("Failed to write Cargo.toml"); run_command("cargo update -p vrl"); - git::commit(&format!("chore(releasing): Pinned VRL version to {vrl_version}"))?; + git::commit(&format!( + "chore(releasing): Pinned VRL version to {vrl_version}" + ))?; Ok(()) } @@ -164,7 +174,10 @@ impl Prepare { new_version: Option<&str>, prefix: &str, ) -> Result<()> { - debug!("update_dockerfile_base_version for {}", dockerfile_path.display()); + debug!( + "update_dockerfile_base_version for {}", + dockerfile_path.display() + ); if let Some(version) = new_version { let contents = fs::read_to_string(dockerfile_path)?; @@ -181,9 +194,9 @@ impl Prepare { // Split into prefix, version, and suffix // E.g. "FROM docker.io/alpine:", "3.21", " AS builder" - let after_prefix = first_line - .strip_prefix(prefix) - .ok_or_else(|| anyhow!("Failed to strip prefix in {}", dockerfile_path.display()))?; + let after_prefix = first_line.strip_prefix(prefix).ok_or_else(|| { + anyhow!("Failed to strip prefix in {}", dockerfile_path.display()) + })?; let parts: Vec<&str> = after_prefix.splitn(2, ' ').collect(); let suffix = parts.get(1).unwrap_or(&""); @@ -194,11 +207,13 @@ impl Prepare { fs::write(dockerfile_path, &new_contents)?; git::commit(&format!( "chore(releasing): Bump {} version to {version}", - dockerfile_path.strip_prefix(&self.repo_root).unwrap().display(), + dockerfile_path + .strip_prefix(&self.repo_root) + .unwrap() + .display(), ))?; } else { - debug!( - "No version specified for {dockerfile_path:?}; skipping update"); + debug!("No version specified for {dockerfile_path:?}; skipping update"); } Ok(()) } @@ -209,7 +224,10 @@ impl Prepare { let script = self.repo_root.join(RELEASE_CUE_SCRIPT); let new_vector_version = &self.new_vector_version; if script.is_file() { - run_command(&format!("{} --new-version {new_vector_version} --no-interactive", script.to_string_lossy().as_ref())); + run_command(&format!( + "{} --new-version {new_vector_version} --no-interactive", + script.to_string_lossy().as_ref() + )); } else { return Err(anyhow!("Script not found: {}", script.display())); } @@ -233,11 +251,15 @@ impl Prepare { let new_version_str = format!("{}.{}", new_version.major, new_version.minor); if !contents.contains(&old_version_str) { - return Err(anyhow!("Could not find version {} to update in {}", - latest_version, file_path.display())); + return Err(anyhow!( + "Could not find version {} to update in {}", + latest_version, + file_path.display() + )); } - let updated_contents = contents.replace(&latest_version.to_string(), &new_version.to_string()); + let updated_contents = + contents.replace(&latest_version.to_string(), &new_version.to_string()); let updated_contents = updated_contents.replace(&old_version_str, &new_version_str); fs::write(file_path, updated_contents) @@ -275,14 +297,17 @@ impl Prepare { fs::rename(&temp_file_path, &versions_cue_path)?; - git::commit(&format!("chore(releasing): Add {vector_version} to versions.cue"))?; + git::commit(&format!( + "chore(releasing): Add {vector_version} to versions.cue" + ))?; Ok(()) } /// Step 10: Create a new release md file fn create_new_release_md(&self) -> Result<()> { debug!("create_new_release_md"); - let releases_dir = self.repo_root + let releases_dir = self + .repo_root .join("website") .join("content") .join("en") @@ -292,7 +317,10 @@ impl Prepare { let new_version = &self.new_vector_version; let old_file_path = releases_dir.join(format!("{old_version}.md")); if !old_file_path.exists() { - return Err(anyhow!("Source file not found: {}", old_file_path.display())); + return Err(anyhow!( + "Source file not found: {}", + old_file_path.display() + )); } let content = fs::read_to_string(&old_file_path)?; @@ -304,8 +332,13 @@ impl Prepare { for line in lines { if line.trim().starts_with("weight: ") && !weight_updated { // Extract the current weight value - let weight_str = line.trim().strip_prefix("weight: ").ok_or_else(|| anyhow!("Invalid weight format"))?; - let weight: i32 = weight_str.parse().map_err(|e| anyhow!("Failed to parse weight: {}", e))?; + let weight_str = line + .trim() + .strip_prefix("weight: ") + .ok_or_else(|| anyhow!("Invalid weight format"))?; + let weight: i32 = weight_str + .parse() + .map_err(|e| anyhow!("Failed to parse weight: {}", e))?; // Increase by 1 let new_weight = weight + 1; updated_lines.push(format!("weight: {new_weight}")); @@ -319,7 +352,6 @@ impl Prepare { error!("Couldn't update 'weight' line from {old_file_path:?}"); } - let new_file_path = releases_dir.join(format!("{new_version}.md")); updated_lines.push(String::new()); // File should end with a newline. let updated_content = updated_lines.join("\n"); @@ -446,21 +478,21 @@ fn get_latest_vrl_tag_and_changelog() -> Result { let tags_url = "https://api.github.com/repos/vectordotdev/vrl/tags"; let tags_response = client .get(tags_url) - .header("User-Agent", "rust-reqwest") // GitHub API requires User-Agent + .header("User-Agent", "rust-reqwest") // GitHub API requires User-Agent .send()? .text()?; let tags: Vec = serde_json::from_str(&tags_response)?; - let latest_tag = tags.first() + let latest_tag = tags + .first() .and_then(|tag| tag.get("name")) .and_then(|name| name.as_str()) .ok_or_else(|| anyhow!("Failed to extract latest tag"))? .to_string(); // Step 2: Download CHANGELOG.md for the specific tag - let changelog_url = format!( - "https://raw.githubusercontent.com/vectordotdev/vrl/{latest_tag}/CHANGELOG.md", - ); + let changelog_url = + format!("https://raw.githubusercontent.com/vectordotdev/vrl/{latest_tag}/CHANGELOG.md",); let changelog = client .get(&changelog_url) .header("User-Agent", "rust-reqwest") @@ -494,7 +526,9 @@ fn get_latest_vrl_tag_and_changelog() -> Result { #[cfg(test)] mod tests { - use crate::commands::release::prepare::{format_vrl_changelog_block, insert_block_after_changelog}; + use crate::commands::release::prepare::{ + format_vrl_changelog_block, insert_block_after_changelog, + }; use indoc::indoc; #[test] @@ -503,11 +537,11 @@ mod tests { let vrl_changelog_block = format_vrl_changelog_block(vrl_changelog); let expected = concat!( - "\tvrl_changelog: \"\"\"\n", - "\t\t#### [0.2.0]\n", - "\t\t- Feature\n", - "\t\t- Fix\n", - "\t\t\"\"\"" + "\tvrl_changelog: \"\"\"\n", + "\t\t#### [0.2.0]\n", + "\t\t- Feature\n", + "\t\t- Fix\n", + "\t\t\"\"\"" ); assert_eq!(vrl_changelog_block, expected); @@ -525,7 +559,14 @@ mod tests { // Assert the last 5 lines match the VRL changelog block let expected_lines_len = 5; - let updated_tail: Vec<&str> = updated.lines().rev().take(expected_lines_len).collect::>().into_iter().rev().collect(); + let updated_tail: Vec<&str> = updated + .lines() + .rev() + .take(expected_lines_len) + .collect::>() + .into_iter() + .rev() + .collect(); let expected_lines: Vec<&str> = vrl_changelog_block.lines().collect(); assert_eq!(updated_tail, expected_lines); } diff --git a/vdev/src/commands/run.rs b/vdev/src/commands/run.rs index fe9092ca4774e..f0fdab9cdf268 100644 --- a/vdev/src/commands/run.rs +++ b/vdev/src/commands/run.rs @@ -1,6 +1,6 @@ use std::{path::PathBuf, process::Command}; -use anyhow::{bail, Result}; +use anyhow::{Result, bail}; use clap::Args; use crate::{app::CommandExt as _, utils::features}; From 99b5835af91aa0423400a18c35e6c2b3619b8ed0 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 5 Nov 2025 16:22:35 -0500 Subject: [PATCH 051/227] fix(vdev): remove --reuse-image (#24163) --- scripts/run-integration-test.sh | 13 ++++--------- vdev/src/commands/compose_tests/start.rs | 11 +---------- vdev/src/commands/compose_tests/stop.rs | 11 +---------- vdev/src/commands/compose_tests/test.rs | 2 -- vdev/src/commands/e2e/start.rs | 5 ----- vdev/src/commands/e2e/stop.rs | 5 ----- vdev/src/commands/e2e/test.rs | 5 ----- vdev/src/commands/integration/start.rs | 5 ----- vdev/src/commands/integration/stop.rs | 5 ----- vdev/src/commands/integration/test.rs | 5 ----- vdev/src/commands/test.rs | 5 ----- vdev/src/testing/integration.rs | 11 +---------- vdev/src/testing/runner.rs | 22 ++-------------------- 13 files changed, 9 insertions(+), 96 deletions(-) diff --git a/scripts/run-integration-test.sh b/scripts/run-integration-test.sh index f7ea444478c80..488fd90226380 100755 --- a/scripts/run-integration-test.sh +++ b/scripts/run-integration-test.sh @@ -32,7 +32,6 @@ Options: -v Increase verbosity; repeat for more (e.g. -vv or -vvv) -e One or more environments to run (repeatable or comma-separated). If provided, these are used as TEST_ENVIRONMENTS instead of auto-discovery. - -b Always build images (disables --reuse-image which is enabled by default) Notes: - All existing two-argument invocations remain compatible: @@ -44,8 +43,7 @@ USAGE # Parse options # Note: options must come before positional args (standard getopts behavior) TEST_ENV="" -REUSE_IMAGE="--reuse-image" -while getopts ":hr:v:e:b" opt; do +while getopts ":hr:v:e:" opt; do case "$opt" in h) usage @@ -64,9 +62,6 @@ while getopts ":hr:v:e:b" opt; do e) TEST_ENV="$OPTARG" ;; - b) - REUSE_IMAGE="" - ;; \?) echo "ERROR: unknown option: -$OPTARG" >&2 usage @@ -132,12 +127,12 @@ for TEST_ENV in "${TEST_ENVIRONMENTS[@]}"; do docker run --rm -v vector_target:/output/"${TEST_NAME}" alpine:3.20 \ sh -c "rm -rf /output/${TEST_NAME}/*" - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" start --build-all ${REUSE_IMAGE} "${TEST_NAME}" "${TEST_ENV}" + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" start --build-all "${TEST_NAME}" "${TEST_ENV}" START_RET=$? print_compose_logs_on_failure "$START_RET" if [[ "$START_RET" -eq 0 ]]; then - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" test --retries "$RETRIES" --build-all ${REUSE_IMAGE} "${TEST_NAME}" "${TEST_ENV}" + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" test --retries "$RETRIES" --build-all "${TEST_NAME}" "${TEST_ENV}" RET=$? print_compose_logs_on_failure "$RET" @@ -149,7 +144,7 @@ for TEST_ENV in "${TEST_ENVIRONMENTS[@]}"; do fi # Always stop the environment (best effort cleanup) - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" stop --build-all ${REUSE_IMAGE} "${TEST_NAME}" || true + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" stop --build-all "${TEST_NAME}" || true # Exit early on first failure if [[ "$RET" -ne 0 ]]; then diff --git a/vdev/src/commands/compose_tests/start.rs b/vdev/src/commands/compose_tests/start.rs index d9007500e60c7..b3bb686f3325f 100644 --- a/vdev/src/commands/compose_tests/start.rs +++ b/vdev/src/commands/compose_tests/start.rs @@ -10,7 +10,6 @@ pub(crate) fn exec( integration: &str, environment: Option<&String>, all_features: bool, - reuse_image: bool, ) -> Result<()> { let environment = if let Some(environment) = environment { environment.clone() @@ -22,13 +21,5 @@ pub(crate) fn exec( env.clone() }; debug!("Selected environment: {environment:#?}"); - ComposeTest::generate( - local_config, - integration, - environment, - all_features, - reuse_image, - 0, - )? - .start() + ComposeTest::generate(local_config, integration, environment, all_features, 0)?.start() } diff --git a/vdev/src/commands/compose_tests/stop.rs b/vdev/src/commands/compose_tests/stop.rs index a4e45f90de4a3..7470512efb5f1 100644 --- a/vdev/src/commands/compose_tests/stop.rs +++ b/vdev/src/commands/compose_tests/stop.rs @@ -11,22 +11,13 @@ pub(crate) fn exec( local_config: ComposeTestLocalConfig, test_name: &str, all_features: bool, - reuse_image: bool, ) -> Result<()> { let (_test_dir, config) = ComposeTestConfig::load(local_config.directory, test_name)?; let active_environment = find_active_environment_for_integration(local_config.directory, test_name, &config)?; if let Some(environment) = active_environment { - ComposeTest::generate( - local_config, - test_name, - environment, - all_features, - reuse_image, - 0, - )? - .stop() + ComposeTest::generate(local_config, test_name, environment, all_features, 0)?.stop() } else { println!("No environment for {test_name} is active."); Ok(()) diff --git a/vdev/src/commands/compose_tests/test.rs b/vdev/src/commands/compose_tests/test.rs index 7cb739b2e4042..83381d066bc57 100644 --- a/vdev/src/commands/compose_tests/test.rs +++ b/vdev/src/commands/compose_tests/test.rs @@ -14,7 +14,6 @@ pub fn exec( integration: &str, environment: Option<&String>, all_features: bool, - reuse_image: bool, retries: u8, args: &[String], ) -> Result<()> { @@ -40,7 +39,6 @@ pub fn exec( integration, environment, all_features, - reuse_image, retries, )? .test(args.to_owned())?; diff --git a/vdev/src/commands/e2e/start.rs b/vdev/src/commands/e2e/start.rs index ca044c2e80889..d20aba240718a 100644 --- a/vdev/src/commands/e2e/start.rs +++ b/vdev/src/commands/e2e/start.rs @@ -14,10 +14,6 @@ pub struct Cli { #[arg(short = 'a', long)] build_all: bool, - /// Reuse existing test runner image instead of rebuilding (useful in CI) - #[arg(long)] - reuse_image: bool, - /// The desired environment name to start. If omitted, the first environment name is used. environment: Option, } @@ -29,7 +25,6 @@ impl Cli { &self.test, self.environment.as_ref(), self.build_all, - self.reuse_image, ) } } diff --git a/vdev/src/commands/e2e/stop.rs b/vdev/src/commands/e2e/stop.rs index 9847ccdb3a914..85f28e960a0ba 100644 --- a/vdev/src/commands/e2e/stop.rs +++ b/vdev/src/commands/e2e/stop.rs @@ -13,10 +13,6 @@ pub struct Cli { /// If true, remove the runner container compiled with all integration test features #[arg(short = 'a', long)] build_all: bool, - - /// Reuse existing test runner image instead of rebuilding (useful in CI) - #[arg(long)] - reuse_image: bool, } impl Cli { @@ -25,7 +21,6 @@ impl Cli { ComposeTestLocalConfig::e2e(), &self.test, self.build_all, - self.reuse_image, ) } } diff --git a/vdev/src/commands/e2e/test.rs b/vdev/src/commands/e2e/test.rs index 6c050fd87b436..092d880f557e9 100644 --- a/vdev/src/commands/e2e/test.rs +++ b/vdev/src/commands/e2e/test.rs @@ -24,10 +24,6 @@ pub struct Cli { #[arg(short = 'a', long)] build_all: bool, - /// Reuse existing test runner image instead of rebuilding (useful in CI) - #[arg(long)] - reuse_image: bool, - /// Number of retries to allow on each integration test case. #[arg(short = 'r', long)] retries: Option, @@ -43,7 +39,6 @@ impl Cli { &self.e2e_test, self.environment.as_ref(), self.build_all, - self.reuse_image, self.retries.unwrap_or_default(), &self.args, ) diff --git a/vdev/src/commands/integration/start.rs b/vdev/src/commands/integration/start.rs index 219fa0944da95..dca8ace5e4de2 100644 --- a/vdev/src/commands/integration/start.rs +++ b/vdev/src/commands/integration/start.rs @@ -14,10 +14,6 @@ pub struct Cli { #[arg(short = 'a', long)] build_all: bool, - /// Reuse existing test runner image instead of rebuilding (useful in CI) - #[arg(long)] - reuse_image: bool, - /// The desired environment name to start. If omitted, the first environment name is used. environment: Option, } @@ -29,7 +25,6 @@ impl Cli { &self.integration, self.environment.as_ref(), self.build_all, - self.reuse_image, ) } } diff --git a/vdev/src/commands/integration/stop.rs b/vdev/src/commands/integration/stop.rs index 3f5bf68620cec..da9e403cfb82f 100644 --- a/vdev/src/commands/integration/stop.rs +++ b/vdev/src/commands/integration/stop.rs @@ -13,10 +13,6 @@ pub struct Cli { /// If true, remove the runner container compiled with all integration test features #[arg(short = 'a', long)] build_all: bool, - - /// Reuse existing test runner image instead of rebuilding (useful in CI) - #[arg(long)] - reuse_image: bool, } impl Cli { @@ -25,7 +21,6 @@ impl Cli { ComposeTestLocalConfig::integration(), &self.integration, self.build_all, - self.reuse_image, ) } } diff --git a/vdev/src/commands/integration/test.rs b/vdev/src/commands/integration/test.rs index ae30da545fffb..956b775a8c527 100644 --- a/vdev/src/commands/integration/test.rs +++ b/vdev/src/commands/integration/test.rs @@ -24,10 +24,6 @@ pub struct Cli { #[arg(short = 'a', long)] build_all: bool, - /// Reuse existing test runner image instead of rebuilding (useful in CI) - #[arg(long)] - reuse_image: bool, - /// Number of retries to allow on each integration test case. #[arg(short = 'r', long)] retries: Option, @@ -43,7 +39,6 @@ impl Cli { &self.integration, self.environment.as_ref(), self.build_all, - self.reuse_image, self.retries.unwrap_or_default(), &self.args, ) diff --git a/vdev/src/commands/test.rs b/vdev/src/commands/test.rs index 8edaec3a84bf3..e76d6463f903d 100644 --- a/vdev/src/commands/test.rs +++ b/vdev/src/commands/test.rs @@ -16,10 +16,6 @@ pub struct Cli { #[arg(short = 'C', long)] container: bool, - /// Reuse existing test runner image instead of rebuilding (useful in CI) - #[arg(long)] - reuse_image: bool, - /// Environment variables in the form KEY[=VALUE] #[arg(short, long)] env: Option>, @@ -57,7 +53,6 @@ impl Cli { &BTreeMap::default(), None, &args, - self.reuse_image, false, // Don't pre-build Vector for direct test runs ) } diff --git a/vdev/src/testing/integration.rs b/vdev/src/testing/integration.rs index 71294e7f81b90..b7ddab7fd27c4 100644 --- a/vdev/src/testing/integration.rs +++ b/vdev/src/testing/integration.rs @@ -69,8 +69,6 @@ pub(crate) struct ComposeTest { env_config: Environment, /// When true, uses 'all-integration-tests' or 'all-e2e-tests' feature. When false, uses features from test.yaml. all_features: bool, - /// When true, reuse existing image instead of rebuilding (useful in CI). - reuse_image: bool, retries: u8, } @@ -80,7 +78,6 @@ impl ComposeTest { test_name: impl Into, environment: impl Into, all_features: bool, - reuse_image: bool, retries: u8, ) -> Result { let test_name: String = test_name.into(); @@ -114,7 +111,6 @@ impl ComposeTest { compose, env_config: rename_environment_keys(&env_config), all_features, - reuse_image, retries, }; trace!("Generated {compose_test:#?}"); @@ -218,7 +214,6 @@ impl ComposeTest { &self.config.runner.env, Some(&self.config.features), &args, - self.reuse_image, self.local_config.kind == ComposeTestKind::E2E, )?; @@ -229,15 +224,11 @@ impl ComposeTest { // For end-to-end tests, we want to run vector as a service, leveraging the // image for the runner. So we must build that image before starting the // compose so that it is available. - // - // TODO: Enable image reuse for E2E tests by building a unified image in CI - // that includes the vector binary compiled with all-e2e-tests feature. if self.local_config.kind == ComposeTestKind::E2E { self.runner.build( Some(&self.config.features), &self.env_config, - false, // Always rebuild for E2E tests - true, // E2E tests build Vector in the image + true, // E2E tests build Vector in the image )?; } diff --git a/vdev/src/testing/runner.rs b/vdev/src/testing/runner.rs index cbe2cfcdfa50b..2299269bba985 100644 --- a/vdev/src/testing/runner.rs +++ b/vdev/src/testing/runner.rs @@ -58,7 +58,6 @@ pub trait TestRunner { inner_env: &Environment, features: Option<&[String]>, args: &[String], - reuse_image: bool, build: bool, ) -> Result<()>; } @@ -107,7 +106,6 @@ pub trait ContainerTestRunner: TestRunner { &self, features: Option<&[String]>, config_environment_variables: &Environment, - reuse_image: bool, build: bool, ) -> Result<()> { match self.state()? { @@ -120,7 +118,7 @@ pub trait ContainerTestRunner: TestRunner { self.start()?; } RunnerState::Missing => { - self.build(features, config_environment_variables, reuse_image, build)?; + self.build(features, config_environment_variables, build)?; self.create()?; self.start()?; } @@ -152,24 +150,10 @@ pub trait ContainerTestRunner: TestRunner { &self, features: Option<&[String]>, config_env_vars: &Environment, - reuse_image: bool, build: bool, ) -> Result<()> { let image_name = self.image_name(); - // When reuse_image is true, skip build if image already exists (useful in CI). - // Otherwise, always rebuild to pick up local code changes. - if reuse_image { - let mut check_command = docker_command(["image", "inspect", &image_name]); - if check_command - .output() - .is_ok_and(|output| output.status.success()) - { - info!("Image {image_name} already exists, skipping build"); - return Ok(()); - } - } - let dockerfile = test_runner_dockerfile(); let mut command = prepare_build_command(&image_name, &dockerfile, features, config_env_vars, build); @@ -252,10 +236,9 @@ where config_environment_variables: &Environment, features: Option<&[String]>, args: &[String], - reuse_image: bool, build: bool, ) -> Result<()> { - self.ensure_running(features, config_environment_variables, reuse_image, build)?; + self.ensure_running(features, config_environment_variables, build)?; let mut command = docker_command(["exec"]); if *IS_A_TTY { @@ -413,7 +396,6 @@ impl TestRunner for LocalTestRunner { inner_env: &Environment, _features: Option<&[String]>, args: &[String], - _reuse_image: bool, _build: bool, ) -> Result<()> { let mut command = Command::new(TEST_COMMAND[0]); From 6913528d50b66cc890b8b34f333c2520e2d24a06 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 5 Nov 2025 16:58:47 -0500 Subject: [PATCH 052/227] chore(dev): refactor SecretBackendLoader (tech debt) (#24160) * chore(dev): refactor SecretBackendLoader (tech debt) * refactor more * simplify ConfigBuilderLoader secrets parts * fmt * fix typo --- src/config/loading/config_builder.rs | 23 +++--- src/config/loading/mod.rs | 112 +++++++++------------------ src/config/loading/secret.rs | 52 +++++++++---- src/config/unit_test/mod.rs | 29 ++++--- 4 files changed, 98 insertions(+), 118 deletions(-) diff --git a/src/config/loading/config_builder.rs b/src/config/loading/config_builder.rs index 3e1db5d694032..7df64600dd749 100644 --- a/src/config/loading/config_builder.rs +++ b/src/config/loading/config_builder.rs @@ -12,7 +12,7 @@ use crate::config::{ #[derive(Debug)] pub struct ConfigBuilderLoader { builder: ConfigBuilder, - secrets: Option>, + secrets: HashMap, interpolate_env: bool, } @@ -25,7 +25,13 @@ impl ConfigBuilderLoader { /// Sets the secrets map for secret interpolation. pub fn secrets(mut self, secrets: HashMap) -> Self { - self.secrets = Some(secrets); + self.secrets = secrets; + self + } + + /// Sets whether to allow empty configuration. + pub const fn allow_empty(mut self, allow_empty: bool) -> Self { + self.builder.allow_empty = allow_empty; self } @@ -53,7 +59,7 @@ impl Default for ConfigBuilderLoader { fn default() -> Self { Self { builder: ConfigBuilder::default(), - secrets: None, + secrets: HashMap::new(), interpolate_env: true, } } @@ -63,12 +69,11 @@ impl Process for ConfigBuilderLoader { /// Prepares input for a `ConfigBuilder` by interpolating environment variables. fn prepare(&mut self, input: R) -> Result> { let prepared_input = prepare_input(input, self.interpolate_env)?; - let prepared_input = self - .secrets - .as_ref() - .map(|s| secret::interpolate(&prepared_input, s)) - .unwrap_or(Ok(prepared_input))?; - Ok(prepared_input) + Ok(if self.secrets.is_empty() { + prepared_input + } else { + secret::interpolate(&prepared_input, &self.secrets)? + }) } /// Merge a TOML `Table` with a `ConfigBuilder`. Component types extend specific keys. diff --git a/src/config/loading/mod.rs b/src/config/loading/mod.rs index 595e7a8de1ad0..7b60bc88ab884 100644 --- a/src/config/loading/mod.rs +++ b/src/config/loading/mod.rs @@ -20,9 +20,10 @@ pub use source::*; use vector_lib::configurable::NamedComponent; use super::{ - Config, ConfigPath, Format, FormatHint, builder::ConfigBuilder, format, validation, vars, + Config, ConfigPath, Format, FormatHint, ProviderConfig, builder::ConfigBuilder, format, + validation, vars, }; -use crate::{config::ProviderConfig, signal}; +use crate::signal; pub static CONFIG_PATHS: Mutex> = Mutex::new(Vec::new()); @@ -148,28 +149,20 @@ pub async fn load_from_paths_with_provider_and_secrets( allow_empty: bool, interpolate_env: bool, ) -> Result> { - // Load secret backends first - let mut secrets_backends_loader = - load_secret_backends_from_paths_with_opts(config_paths, interpolate_env)?; - // And then, if needed, retrieve secrets from configured backends - let mut builder = if secrets_backends_loader.has_secrets_to_retrieve() { - debug!(message = "Secret placeholders found, retrieving secrets from configured backends."); - let resolved_secrets = secrets_backends_loader - .retrieve(&mut signal_handler.subscribe()) - .await - .map_err(|e| vec![e])?; - ConfigBuilderLoader::default() - .interpolate_env(interpolate_env) - .secrets(resolved_secrets) - .load_from_paths(config_paths)? - } else { - debug!(message = "No secret placeholder found, skipping secret resolution."); - ConfigBuilderLoader::default() - .interpolate_env(interpolate_env) - .load_from_paths(config_paths)? - }; + let secrets_backends_loader = loader_from_paths( + SecretBackendLoader::default().interpolate_env(interpolate_env), + config_paths, + )?; + let secrets = secrets_backends_loader + .retrieve_secrets(signal_handler) + .await + .map_err(|e| vec![e])?; - builder.allow_empty = allow_empty; + let mut builder = ConfigBuilderLoader::default() + .interpolate_env(interpolate_env) + .allow_empty(allow_empty) + .secrets(secrets) + .load_from_paths(config_paths)?; validation::check_provider(&builder)?; signal_handler.clear(); @@ -180,15 +173,7 @@ pub async fn load_from_paths_with_provider_and_secrets( debug!(message = "Provider configured.", provider = ?provider.get_component_name()); } - let (new_config, build_warnings) = builder.build_with_warnings()?; - - validation::check_buffer_preconditions(&new_config).await?; - - for warning in build_warnings { - warn!("{}", warning); - } - - Ok(new_config) + finalize_config(builder).await } pub async fn load_from_str_with_secrets( @@ -198,29 +183,27 @@ pub async fn load_from_str_with_secrets( allow_empty: bool, interpolate_env: bool, ) -> Result> { - // Load secret backends first - let mut secrets_backends_loader = - load_secret_backends_from_input_with_opts(input.as_bytes(), format, interpolate_env)?; - // And then, if needed, retrieve secrets from configured backends - let mut builder = if secrets_backends_loader.has_secrets_to_retrieve() { - debug!(message = "Secret placeholders found, retrieving secrets from configured backends."); - let resolved_secrets = secrets_backends_loader - .retrieve(&mut signal_handler.subscribe()) - .await - .map_err(|e| vec![e])?; - ConfigBuilderLoader::default() - .interpolate_env(interpolate_env) - .secrets(resolved_secrets) - .load_from_input(input.as_bytes(), format)? - } else { - debug!(message = "No secret placeholder found, skipping secret resolution."); - ConfigBuilderLoader::default() - .interpolate_env(interpolate_env) - .load_from_input(input.as_bytes(), format)? - }; + let secrets_backends_loader = loader_from_input( + SecretBackendLoader::default().interpolate_env(interpolate_env), + input.as_bytes(), + format, + )?; + let secrets = secrets_backends_loader + .retrieve_secrets(signal_handler) + .await + .map_err(|e| vec![e])?; - builder.allow_empty = allow_empty; + let builder = ConfigBuilderLoader::default() + .interpolate_env(interpolate_env) + .allow_empty(allow_empty) + .secrets(secrets) + .load_from_input(input.as_bytes(), format)?; signal_handler.clear(); + + finalize_config(builder).await +} + +async fn finalize_config(builder: ConfigBuilder) -> Result> { let (new_config, build_warnings) = builder.build_with_warnings()?; validation::check_buffer_preconditions(&new_config).await?; @@ -292,29 +275,6 @@ pub fn load_source_from_paths( loader_from_paths(SourceLoader::new(), config_paths) } -/// Uses `SecretBackendLoader` to process `ConfigPaths`, deserializing to a `SecretBackends`. -pub fn load_secret_backends_from_paths_with_opts( - config_paths: &[ConfigPath], - interpolate_env: bool, -) -> Result> { - loader_from_paths( - SecretBackendLoader::new_with_opts(interpolate_env), - config_paths, - ) -} - -fn load_secret_backends_from_input_with_opts( - input: R, - format: Format, - interpolate_env: bool, -) -> Result> { - loader_from_input( - SecretBackendLoader::new_with_opts(interpolate_env), - input, - format, - ) -} - pub fn load_from_str(input: &str, format: Format) -> Result> { let builder = load_from_inputs(std::iter::once((input.as_bytes(), format)))?; let (config, build_warnings) = builder.build_with_warnings()?; diff --git a/src/config/loading/secret.rs b/src/config/loading/secret.rs index efd0e92173e2b..a7e27901b3811 100644 --- a/src/config/loading/secret.rs +++ b/src/config/loading/secret.rs @@ -38,40 +38,50 @@ pub(crate) struct SecretBackendOuter { } /// Loader for secrets backends. -#[derive(Debug, Default, Deserialize, Serialize)] +#[derive(Debug, Deserialize, Serialize)] pub struct SecretBackendLoader { backends: IndexMap, - pub(crate) secret_keys: HashMap>, + secret_keys: HashMap>, interpolate_env: bool, } impl SecretBackendLoader { - pub(crate) fn new_with_opts(interpolate_env: bool) -> Self { - Self { - backends: IndexMap::new(), - secret_keys: HashMap::new(), - interpolate_env, - } + /// Sets whether to interpolate environment variables in the config. + pub const fn interpolate_env(mut self, interpolate: bool) -> Self { + self.interpolate_env = interpolate; + self } - pub(crate) async fn retrieve( - &mut self, - signal_rx: &mut signal::SignalRx, + /// Retrieve secrets from backends. + /// Returns an empty HashMap if there are no secrets to retrieve. + pub(crate) async fn retrieve_secrets( + mut self, + signal_handler: &mut signal::SignalHandler, ) -> Result, String> { + if self.secret_keys.is_empty() { + debug!(message = "No secret placeholder found, skipping secret resolution."); + return Ok(HashMap::new()); + } + + debug!(message = "Secret placeholders found, retrieving secrets from configured backends."); let mut secrets: HashMap = HashMap::new(); + let mut signal_rx = signal_handler.subscribe(); for (backend_name, keys) in &self.secret_keys { - let backend = self.backends + let backend = self + .backends .get_mut(&ComponentKey::from(backend_name.clone())) .ok_or_else(|| { - format!("Backend \"{backend_name}\" is required for secret retrieval but was not found in config.") + format!( + "Backend \"{backend_name}\" is required for secret retrieval but was not found in config." + ) })?; debug!(message = "Retrieving secrets from a backend.", backend = ?backend_name, keys = ?keys); let backend_secrets = backend - .retrieve(keys.clone(), signal_rx) + .retrieve(keys.clone(), &mut signal_rx) .map_err(|e| { - format!("Error while retrieving secret from backend \"{backend_name}\": {e}.",) + format!("Error while retrieving secret from backend \"{backend_name}\": {e}.") }) .await?; @@ -83,9 +93,17 @@ impl SecretBackendLoader { Ok(secrets) } +} - pub(crate) fn has_secrets_to_retrieve(&self) -> bool { - !self.secret_keys.is_empty() +impl Default for SecretBackendLoader { + /// Creates a new SecretBackendLoader with default settings. + /// By default, environment variable interpolation is enabled. + fn default() -> Self { + Self { + backends: IndexMap::new(), + secret_keys: HashMap::new(), + interpolate_env: true, + } } } diff --git a/src/config/unit_test/mod.rs b/src/config/unit_test/mod.rs index c3bd5dc6e7cdb..2be4b76484cc8 100644 --- a/src/config/unit_test/mod.rs +++ b/src/config/unit_test/mod.rs @@ -105,22 +105,19 @@ pub async fn build_unit_tests_main( signal_handler: &mut signal::SignalHandler, ) -> Result, Vec> { init_log_schema_from_paths(paths, false)?; - let mut secrets_backends_loader = - loading::load_secret_backends_from_paths_with_opts(paths, true)?; - let config_builder = if secrets_backends_loader.has_secrets_to_retrieve() { - let resolved_secrets = secrets_backends_loader - .retrieve(&mut signal_handler.subscribe()) - .await - .map_err(|e| vec![e])?; - ConfigBuilderLoader::default() - .interpolate_env(true) - .secrets(resolved_secrets) - .load_from_paths(paths)? - } else { - ConfigBuilderLoader::default() - .interpolate_env(true) - .load_from_paths(paths)? - }; + let secrets_backends_loader = loading::loader_from_paths( + loading::SecretBackendLoader::default().interpolate_env(true), + paths, + )?; + let secrets = secrets_backends_loader + .retrieve_secrets(signal_handler) + .await + .map_err(|e| vec![e])?; + + let config_builder = ConfigBuilderLoader::default() + .interpolate_env(true) + .secrets(secrets) + .load_from_paths(paths)?; build_unit_tests(config_builder).await } From 7594a0b268c478c39316e66778bda2ef5b445c4a Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 5 Nov 2025 17:28:00 -0500 Subject: [PATCH 053/227] fix(vdev): release prepare vrl version pinning (#24158) * Add --dry-run to release prepare * Add error handling and checks to pin_vrl_version * Add wrapper to toml * Remove wrapper, parse as Table instead * Fix vrl pinning logic * Enable preserve_order feature in toml crate * Use dependency instead of whole toml * Fix dry run docs * Fix dry run wording * refactor to use toml_edit * Add update_vrl_to_version to add unit test * Use indoc in prepare.rs * Remove preserve_order feature --- Cargo.lock | 1 + vdev/Cargo.toml | 1 + vdev/src/commands/release/prepare.rs | 97 ++++++++++++++++++---------- 3 files changed, 66 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5f31832d545a9..bdc14fe074433 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12182,6 +12182,7 @@ dependencies = [ "sha2", "tempfile", "toml 0.9.8", + "toml_edit 0.22.27", ] [[package]] diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 5a671f3c03b66..1683ace876a67 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -43,6 +43,7 @@ serde_yaml.workspace = true sha2 = "0.10.9" tempfile.workspace = true toml.workspace = true +toml_edit = { version = "0.22", default-features = false } semver.workspace = true indoc.workspace = true git2 = { version = "0.20.2" } diff --git a/vdev/src/commands/release/prepare.rs b/vdev/src/commands/release/prepare.rs index 640f19f377386..f5b38de9fb073 100644 --- a/vdev/src/commands/release/prepare.rs +++ b/vdev/src/commands/release/prepare.rs @@ -3,7 +3,7 @@ use crate::utils::command::run_command; use crate::utils::{git, paths}; -use anyhow::{Result, anyhow}; +use anyhow::{Context, Result, anyhow}; use reqwest::blocking::Client; use semver::Version; use std::fs::File; @@ -14,7 +14,7 @@ use std::path::{Path, PathBuf}; use std::process::Command; use std::{env, fs}; use toml::Value; -use toml::map::Map; +use toml_edit::DocumentMut; const ALPINE_PREFIX: &str = "FROM docker.io/alpine:"; const ALPINE_DOCKERFILE: &str = "distribution/docker/alpine/Dockerfile"; @@ -42,6 +42,10 @@ pub struct Cli { /// You can find the latest version here: . #[arg(long)] debian_version: Option, + + /// Dry run. Enabling this will make it so no PRs will be created and no branches will be pushed upstream. + #[arg(long, default_value_t = false)] + dry_run: bool, } struct Prepare { @@ -53,6 +57,7 @@ struct Prepare { latest_vector_version: Version, release_branch: String, release_preparation_branch: String, + dry_run: bool, } impl Cli { @@ -74,6 +79,7 @@ impl Cli { "prepare-v-{}-{}-{}-website", self.version.major, self.version.minor, self.version.patch ), + dry_run: self.dry_run, }; prepare.run() } @@ -106,7 +112,11 @@ impl Prepare { self.create_new_release_md()?; - self.open_release_pr() + if !self.dry_run { + self.open_release_pr()?; + } + + Ok(()) } /// Steps 1 & 2 @@ -117,12 +127,16 @@ impl Prepare { git::checkout_main_branch()?; git::checkout_or_create_branch(self.release_branch.as_str())?; - git::push_and_set_upstream(self.release_branch.as_str())?; + if !self.dry_run { + git::push_and_set_upstream(self.release_branch.as_str())?; + } // Step 2: Create a new release preparation branch // The branch website contains 'website' to generate vector.dev preview. git::checkout_or_create_branch(self.release_preparation_branch.as_str())?; - git::push_and_set_upstream(self.release_preparation_branch.as_str())?; + if !self.dry_run { + git::push_and_set_upstream(self.release_preparation_branch.as_str())?; + } Ok(()) } @@ -130,35 +144,11 @@ impl Prepare { fn pin_vrl_version(&self) -> Result<()> { debug!("pin_vrl_version"); let cargo_toml_path = &self.repo_root.join("Cargo.toml"); - let contents = fs::read_to_string(cargo_toml_path).expect("Failed to read Cargo.toml"); - - // Needs this hybrid approach to preserve ordering. - let mut lines: Vec = contents.lines().map(String::from).collect(); - + let contents = fs::read_to_string(cargo_toml_path).context("Failed to read Cargo.toml")?; let vrl_version = self.vrl_version.to_string(); - for line in &mut lines { - if line.trim().starts_with("vrl = { git = ") { - if let Ok(mut vrl_toml) = line.parse::() { - let vrl_dependency: &mut Value = vrl_toml - .get_mut("vrl") - .expect("line should start with 'vrl'"); - - let mut new_dependency_value = Map::new(); - new_dependency_value - .insert("version".to_string(), Value::String(vrl_version.clone())); - let features = vrl_dependency - .get("features") - .expect("missing 'features' key"); - new_dependency_value.insert("features".to_string(), features.clone()); - - *line = format!("vrl = {}", Value::from(new_dependency_value)); - } - break; - } - } + let updated_contents = update_vrl_to_version(&contents, &vrl_version)?; - lines.push(String::new()); // File should end with a newline. - fs::write(cargo_toml_path, lines.join("\n")).expect("Failed to write Cargo.toml"); + fs::write(cargo_toml_path, updated_contents).context("Failed to write Cargo.toml")?; run_command("cargo update -p vrl"); git::commit(&format!( "chore(releasing): Pinned VRL version to {vrl_version}" @@ -420,6 +410,26 @@ impl Prepare { // FREE FUNCTIONS AFTER THIS LINE +/// Transforms a Cargo.toml string by replacing vrl's git dependency with a version dependency. +/// Updates the vrl entry in [workspace.dependencies] from git + branch to a version. +fn update_vrl_to_version(cargo_toml_contents: &str, vrl_version: &str) -> Result { + let mut doc = cargo_toml_contents + .parse::() + .context("Failed to parse Cargo.toml")?; + + // Navigate to workspace.dependencies.vrl + let vrl_table = doc["workspace"]["dependencies"]["vrl"] + .as_inline_table_mut() + .context("vrl in workspace.dependencies should be an inline table")?; + + // Remove git and branch, add version + vrl_table.remove("git"); + vrl_table.remove("branch"); + vrl_table.insert("version", vrl_version.into()); + + Ok(doc.to_string()) +} + fn get_latest_version_from_vector_tags() -> Result { let tags = run_command("git tag --list --sort=-v:refname"); let latest_tag = tags @@ -527,10 +537,31 @@ fn get_latest_vrl_tag_and_changelog() -> Result { #[cfg(test)] mod tests { use crate::commands::release::prepare::{ - format_vrl_changelog_block, insert_block_after_changelog, + format_vrl_changelog_block, insert_block_after_changelog, update_vrl_to_version, }; use indoc::indoc; + #[test] + fn test_update_vrl_to_version() { + let input = indoc! {r#" + [workspace.dependencies] + some-other-dep = "1.0.0" + vrl = { git = "https://github.com/vectordotdev/vrl.git", branch = "main", features = ["arbitrary", "cli", "test", "test_framework"] } + another-dep = "2.0.0" + "#}; + + let result = update_vrl_to_version(input, "0.28.0").expect("should succeed"); + + let expected = indoc! {r#" + [workspace.dependencies] + some-other-dep = "1.0.0" + vrl = { features = ["arbitrary", "cli", "test", "test_framework"] , version = "0.28.0" } + another-dep = "2.0.0" + "#}; + + assert_eq!(result, expected); + } + #[test] fn test_insert_block_after_changelog() { let vrl_changelog = "### [0.2.0]\n- Feature\n- Fix"; From e9c81d25045f29c3b6e83030725857f1d25ebdf0 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 6 Nov 2025 10:34:35 -0500 Subject: [PATCH 054/227] chore(ci): fix failing dependabot dockerfile updates (#24172) --- .github/dependabot.yml | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 11b5b6a980770..3e88b0ed77069 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -86,7 +86,7 @@ updates: patterns: - "zstd*" - package-ecosystem: "docker" - directory: "/distribution/docker/" + directory: "/distribution/docker/alpine" schedule: interval: "monthly" time: "04:00" # UTC @@ -96,6 +96,25 @@ updates: commit-message: prefix: "chore(deps)" open-pull-requests-limit: 100 + groups: + docker-images: + patterns: + - "*" + - package-ecosystem: "docker" + directory: "/distribution/docker/debian" + schedule: + interval: "monthly" + time: "04:00" # UTC + labels: + - "domain: releasing" + - "no-changelog" + commit-message: + prefix: "chore(deps)" + open-pull-requests-limit: 100 + groups: + docker-images: + patterns: + - "*" - package-ecosystem: "github-actions" directory: "/" schedule: From 325c5c296bad7656e947c853449d5f7bb92a2f2f Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 6 Nov 2025 11:01:01 -0500 Subject: [PATCH 055/227] fix(ci): download toolchain only once (#24176) --- tests/e2e/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/e2e/Dockerfile b/tests/e2e/Dockerfile index a77c6ce409a5e..dd564544e480b 100644 --- a/tests/e2e/Dockerfile +++ b/tests/e2e/Dockerfile @@ -27,10 +27,12 @@ COPY scripts/environment/prepare.sh / COPY scripts/environment/binstall.sh / COPY scripts/environment/release-flags.sh / +WORKDIR /vector +COPY rust-toolchain.toml . + RUN bash /prepare.sh --modules=cargo-nextest RUN bash /install-protoc.sh -WORKDIR /vector COPY . . ARG FEATURES ARG BUILD From 81ca9f26c487c3eebdfca6ca8e5f334024bd406c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Nov 2025 16:02:18 +0000 Subject: [PATCH 056/227] chore(ci): bump the artifact group with 2 updates (#24173) Bumps the artifact group with 2 updates: [actions/upload-artifact](https://github.com/actions/upload-artifact) and [actions/download-artifact](https://github.com/actions/download-artifact). Updates `actions/upload-artifact` from 4.6.2 to 5.0.0 - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/ea165f8d65b6e75b540449e92b4886f43607fa02...330a01c490aca151604b8cf639adc76d48f6c5d4) Updates `actions/download-artifact` from 5.0.0 to 6.0.0 - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/634f93cb2916e3fdff6788551b99b062d0335ce0...018cc2cf5baa6db3ef3c5f8a56943fffe632ef53) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major dependency-group: artifact - dependency-name: actions/download-artifact dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major dependency-group: artifact ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/changes.yml | 4 +- .github/workflows/cross.yml | 2 +- .github/workflows/integration.yml | 4 +- .github/workflows/k8s_e2e.yml | 4 +- .github/workflows/preview_site_trigger.yml | 2 +- .github/workflows/publish.yml | 106 ++++++++++----------- .github/workflows/regression.yml | 18 ++-- .github/workflows/scorecard.yml | 2 +- 8 files changed, 71 insertions(+), 71 deletions(-) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index 0bd82a9881cb9..76054ab8a9d64 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -388,7 +388,7 @@ jobs: echo "any=$any_changed" >> $GITHUB_OUTPUT - name: Upload JSON artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: int_tests_changes path: int_tests_changes.json @@ -446,7 +446,7 @@ jobs: echo "any=$any_changed" >> $GITHUB_OUTPUT - name: Upload JSON artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: e2e_tests_changes path: e2e_tests_changes.json diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index ce5797ca367f1..bc50fe8df8bbc 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -64,7 +64,7 @@ jobs: # aarch64 and musl in particular are notoriously hard to link. # While it may be tempting to slot a `check` in here for quickness, please don't. - run: make cross-build-${{ matrix.target }} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: "vector-debug-${{ matrix.target }}" path: "./target/${{ matrix.target }}/debug/vector" diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 805ee63d39b43..6590a73aaf5cf 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -84,7 +84,7 @@ jobs: submodules: "recursive" - name: Download JSON artifact from changes.yml - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 if: github.event_name == 'merge_group' with: name: int_tests_changes @@ -145,7 +145,7 @@ jobs: submodules: "recursive" - name: Download JSON artifact from changes.yml - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 if: github.event_name == 'merge_group' with: name: e2e_tests_changes diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index f3b0959513980..b24a93b42b9be 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -104,7 +104,7 @@ jobs: - run: VECTOR_VERSION="$(vdev version)" make package-deb-x86_64-unknown-linux-gnu - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: e2e-test-deb-package path: target/artifacts/* @@ -206,7 +206,7 @@ jobs: if: ${{ github.event_name != 'pull_request_review' }} uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: e2e-test-deb-package path: target/artifacts diff --git a/.github/workflows/preview_site_trigger.yml b/.github/workflows/preview_site_trigger.yml index 0716a769a685b..2b2c3e4068ae0 100644 --- a/.github/workflows/preview_site_trigger.yml +++ b/.github/workflows/preview_site_trigger.yml @@ -45,7 +45,7 @@ jobs: # Upload the artifact using latest version (only if branch is valid) - name: Upload PR information artifact if: steps.validate.outputs.valid == 'true' - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: pr path: pr/ diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 280b081cec1f8..fde0f10265a69 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -65,7 +65,7 @@ jobs: - name: Build Vector run: make package-x86_64-unknown-linux-musl-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts/vector* @@ -91,7 +91,7 @@ jobs: - name: Build Vector run: make package-x86_64-unknown-linux-gnu-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts/vector* @@ -119,7 +119,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-aarch64-unknown-linux-musl-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts/vector* @@ -147,7 +147,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-aarch64-unknown-linux-gnu-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts/vector* @@ -175,7 +175,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-armv7-unknown-linux-gnueabihf-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts/vector* @@ -203,7 +203,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-armv7-unknown-linux-musleabihf - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts/vector* @@ -231,7 +231,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-arm-unknown-linux-gnueabi-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts/vector* @@ -259,7 +259,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-arm-unknown-linux-musleabi - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts/vector* @@ -305,7 +305,7 @@ jobs: export PATH="$HOME/.cargo/bin:$PATH" make package - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-${{ matrix.architecture }}-apple-darwin path: target/artifacts/vector* @@ -353,7 +353,7 @@ jobs: export PATH="/c/wix:$PATH" ./scripts/package-msi.sh - name: Stage package artifacts for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts/vector* @@ -398,7 +398,7 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts @@ -449,7 +449,7 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts @@ -478,7 +478,7 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (${{ matrix.target }}) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-${{ matrix.target }} path: target/artifacts @@ -533,42 +533,42 @@ jobs: version: latest install: true - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts @@ -611,52 +611,52 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (arm64-apple-darwin) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm64-apple-darwin path: target/artifacts - name: Download staged package artifacts (x86_64-pc-windows-msvc) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts @@ -696,57 +696,57 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (arm64-apple-darwin) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm64-apple-darwin path: target/artifacts - name: Download staged package artifacts (x86_64-pc-windows-msvc) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download artifact checksums - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-SHA256SUMS path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts @@ -779,59 +779,59 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (arm64-apple-darwin) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm64-apple-darwin path: target/artifacts - name: Download staged package artifacts (x86_64-pc-windows-msvc) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts - name: Generate SHA256 checksums for artifacts run: make sha256sum - name: Stage checksum for publish - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-SHA256SUMS path: target/artifacts/vector-${{ env.VECTOR_VERSION }}-SHA256SUMS diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 29e8fe8177398..4e284620d6a2e 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -216,7 +216,7 @@ jobs: vector:${{ needs.resolve-inputs.outputs.baseline-tag }} - name: Upload image as artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: baseline-image path: "${{ runner.temp }}/baseline-image.tar" @@ -255,7 +255,7 @@ jobs: vector:${{ needs.resolve-inputs.outputs.comparison-tag }} - name: Upload image as artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: comparison-image path: "${{ runner.temp }}/comparison-image.tar" @@ -294,7 +294,7 @@ jobs: - build-baseline steps: - name: 'Download baseline image' - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: baseline-image @@ -334,7 +334,7 @@ jobs: - build-comparison steps: - name: 'Download comparison image' - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: comparison-image @@ -408,7 +408,7 @@ jobs: --submission-metadata ${{ runner.temp }}/submission-metadata \ --replicas ${{ env.SMP_REPLICAS }} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: vector-submission-metadata path: ${{ runner.temp }}/submission-metadata @@ -462,7 +462,7 @@ jobs: aws s3 cp s3://smp-cli-releases/v${{ needs.resolve-inputs.outputs.smp-version }}/x86_64-unknown-linux-musl/smp ${{ runner.temp }}/bin/smp - name: Download submission metadata - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-submission-metadata path: ${{ runner.temp }}/ @@ -501,7 +501,7 @@ jobs: aws s3 cp s3://smp-cli-releases/v${{ needs.resolve-inputs.outputs.smp-version }}/x86_64-unknown-linux-musl/smp ${{ runner.temp }}/bin/smp - name: Download submission metadata - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: vector-submission-metadata path: ${{ runner.temp }}/ @@ -523,7 +523,7 @@ jobs: path: ${{ runner.temp }}/outputs/report.md - name: Upload regression report to artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: capture-artifacts path: ${{ runner.temp }}/outputs/* @@ -547,7 +547,7 @@ jobs: steps: - name: Download capture-artifacts continue-on-error: true - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: name: capture-artifacts diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index d1c4dda39357b..f4c44e40831ca 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -59,7 +59,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: SARIF file path: results.sarif From d2b4f6422a6a1af1fdfc565fad2ff733d8eadf3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Nov 2025 11:02:37 -0500 Subject: [PATCH 057/227] chore(ci): bump docker/setup-qemu-action from 3.6.0 to 3.7.0 (#24174) Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 3.6.0 to 3.7.0. - [Release notes](https://github.com/docker/setup-qemu-action/releases) - [Commits](https://github.com/docker/setup-qemu-action/compare/29109295f81e9208d7d86ff1c6c12d2833863392...c7c53464625b32c7a7e944ae62b3e17d2b600130) --- updated-dependencies: - dependency-name: docker/setup-qemu-action dependency-version: 3.7.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/publish.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 3f1ac09b8e78e..3558b70e5d493 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -44,7 +44,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 + uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - name: Login to DockerHub diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index fde0f10265a69..a9a73373c9905 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -523,7 +523,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 + uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 with: platforms: all - name: Set up Docker Buildx From f07e8833e548137ee2c3e8df9585db74b9e8d487 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Nov 2025 16:02:55 +0000 Subject: [PATCH 058/227] chore(ci): bump docker/metadata-action from 5.8.0 to 5.9.0 (#24175) Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 5.8.0 to 5.9.0. - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/c1e51972afc2121e065aed6d45c65596fe445f3f...318604b99e75e41977312d83839a89be02ca4893) --- updated-dependencies: - dependency-name: docker/metadata-action dependency-version: 5.9.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 3558b70e5d493..62db91799fb3d 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -55,7 +55,7 @@ jobs: password: ${{ secrets.CI_DOCKER_PASSWORD }} - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0 + uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0 with: images: timberio/vector-dev flavor: | From 33a957e0d1130d85bf9dd0b62742cb46dd5559cc Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 6 Nov 2025 11:29:00 -0500 Subject: [PATCH 059/227] chore(deps): update VRL to add missing stdlib fns from 0.28 (#24178) --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index bdc14fe074433..068597bcbdeee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12773,7 +12773,7 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" version = "0.28.0" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#d6cae023d596fdc73bf19501498e2db962fe1d54" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#1f0d8a50df5c5cc8a45454bf992261a332013f59" dependencies = [ "aes", "aes-siv", From df6d39b7c6a196db300f5e14e34069a34c9d5447 Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 6 Nov 2025 16:15:12 -0500 Subject: [PATCH 060/227] chore(vdev): delete config subcommand (#24181) * chore(vdev): delete config subcommand * Delete unused function * Remove unused deps * Use all features in toml_edit --- Cargo.lock | 20 -------------------- vdev/Cargo.toml | 5 +---- vdev/src/app.rs | 11 +---------- vdev/src/commands/config/find.rs | 17 ----------------- vdev/src/commands/config/mod.rs | 8 -------- vdev/src/commands/config/set/mod.rs | 5 ----- vdev/src/commands/config/set/org.rs | 21 --------------------- vdev/src/commands/config/set/repo.rs | 26 -------------------------- vdev/src/commands/info.rs | 19 +------------------ vdev/src/commands/mod.rs | 2 -- vdev/src/main.rs | 7 +------ vdev/src/utils/config.rs | 25 ------------------------- vdev/src/utils/mod.rs | 1 - vdev/src/utils/paths.rs | 2 +- vdev/src/utils/platform.rs | 8 -------- 15 files changed, 5 insertions(+), 172 deletions(-) delete mode 100644 vdev/src/commands/config/find.rs delete mode 100644 vdev/src/commands/config/mod.rs delete mode 100644 vdev/src/commands/config/set/mod.rs delete mode 100644 vdev/src/commands/config/set/org.rs delete mode 100644 vdev/src/commands/config/set/repo.rs delete mode 100644 vdev/src/utils/config.rs diff --git a/Cargo.lock b/Cargo.lock index 068597bcbdeee..14844d2706f27 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2508,18 +2508,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "confy" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29222b549d4e3ded127989d523da9e928918d0d0d7f7c1690b439d0d538bae9" -dependencies = [ - "directories", - "serde", - "thiserror 2.0.17", - "toml 0.8.23", -] - [[package]] name = "console" version = "0.15.7" @@ -3451,12 +3439,6 @@ dependencies = [ "shared_child", ] -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "dyn-clone" version = "1.0.20" @@ -12160,9 +12142,7 @@ dependencies = [ "clap", "clap-verbosity-flag", "clap_complete", - "confy", "directories", - "dunce", "git2", "glob", "hex", diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 1683ace876a67..fd9ca3c770b87 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -22,10 +22,7 @@ chrono.workspace = true clap.workspace = true clap-verbosity-flag = "3.0.4" clap_complete = "4.5.58" -confy = "1.0.0" directories = "6.0.0" -# remove this when stabilized https://doc.rust-lang.org/stable/std/path/fn.absolute.html -dunce = "1.0.5" glob.workspace = true hex = "0.4.3" indexmap.workspace = true @@ -43,7 +40,7 @@ serde_yaml.workspace = true sha2 = "0.10.9" tempfile.workspace = true toml.workspace = true -toml_edit = { version = "0.22", default-features = false } +toml_edit = "0.22" semver.workspace = true indoc.workspace = true git2 = { version = "0.20.2" } diff --git a/vdev/src/app.rs b/vdev/src/app.rs index 1a67832593b52..e5360a639844f 100644 --- a/vdev/src/app.rs +++ b/vdev/src/app.rs @@ -13,7 +13,7 @@ use anyhow::{Context as _, Result, bail}; use indicatif::{ProgressBar, ProgressStyle}; use log::LevelFilter; -use crate::utils::{self, config::Config, platform}; +use crate::utils::{self, platform}; // Use the `bash` interpreter included as part of the standard `git` install for our default shell // if nothing is specified in the environment. @@ -30,17 +30,12 @@ pub static SHELL: LazyLock = LazyLock::new(|| env::var_os("SHELL").unwrap_or_else(|| DEFAULT_SHELL.into())); static VERBOSITY: OnceLock = OnceLock::new(); -static CONFIG: OnceLock = OnceLock::new(); static PATH: OnceLock = OnceLock::new(); pub fn verbosity() -> &'static LevelFilter { VERBOSITY.get().expect("verbosity is not initialized") } -pub fn config() -> &'static Config { - CONFIG.get().expect("config is not initialized") -} - pub fn path() -> &'static String { PATH.get().expect("path is not initialized") } @@ -258,10 +253,6 @@ pub fn set_global_verbosity(verbosity: LevelFilter) { VERBOSITY.set(verbosity).expect("could not set verbosity"); } -pub fn set_global_config(config: Config) { - CONFIG.set(config).expect("could not set config"); -} - pub fn set_global_path(path: String) { PATH.set(path).expect("could not set path"); } diff --git a/vdev/src/commands/config/find.rs b/vdev/src/commands/config/find.rs deleted file mode 100644 index 25fc3756f5e78..0000000000000 --- a/vdev/src/commands/config/find.rs +++ /dev/null @@ -1,17 +0,0 @@ -use anyhow::Result; -use clap::Args; - -use crate::utils::config; - -/// Locate the config file -#[derive(Args, Debug)] -#[command()] -pub struct Cli {} - -impl Cli { - pub fn exec(self) -> Result<()> { - println!("{}", config::path()?.display()); - - Ok(()) - } -} diff --git a/vdev/src/commands/config/mod.rs b/vdev/src/commands/config/mod.rs deleted file mode 100644 index f9d53961b77fa..0000000000000 --- a/vdev/src/commands/config/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -mod find; -mod set; - -crate::cli_subcommands! { - "Manage the vdev config file..." - find, - set, -} diff --git a/vdev/src/commands/config/set/mod.rs b/vdev/src/commands/config/set/mod.rs deleted file mode 100644 index cdbd02fddadd3..0000000000000 --- a/vdev/src/commands/config/set/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod repo; -crate::cli_subcommands! { - "Modify the config file..." - repo, -} diff --git a/vdev/src/commands/config/set/org.rs b/vdev/src/commands/config/set/org.rs deleted file mode 100644 index e65dab551b5a7..0000000000000 --- a/vdev/src/commands/config/set/org.rs +++ /dev/null @@ -1,21 +0,0 @@ -use anyhow::Result; -use clap::Args; - -use crate::{app, utils::config}; - -/// Set the target Datadog org -#[derive(Args, Debug)] -#[command()] -pub struct Cli { - name: String, -} - -impl Cli { - pub fn exec(self) -> Result<()> { - let mut config = app::config().clone(); - config.org = self.name.to_string(); - config::save(config)?; - - Ok(()) - } -} diff --git a/vdev/src/commands/config/set/repo.rs b/vdev/src/commands/config/set/repo.rs deleted file mode 100644 index fd423cee7931e..0000000000000 --- a/vdev/src/commands/config/set/repo.rs +++ /dev/null @@ -1,26 +0,0 @@ -use anyhow::Result; -use clap::Args; - -use crate::{ - app, - utils::{config, platform}, -}; - -/// Set the path to the Vector repository -#[derive(Args, Debug)] -#[command()] -pub struct Cli { - path: String, -} - -impl Cli { - pub fn exec(self) -> Result<()> { - let path = platform::canonicalize_path(self.path); - - let mut config = app::config().clone(); - config.repo = path; - config::save(config)?; - - Ok(()) - } -} diff --git a/vdev/src/commands/info.rs b/vdev/src/commands/info.rs index ae725bc224b11..de8091c779c51 100644 --- a/vdev/src/commands/info.rs +++ b/vdev/src/commands/info.rs @@ -2,10 +2,7 @@ use anyhow::Result; use clap::Args; use crate::testing::docker::CONTAINER_TOOL; -use crate::{ - app, - utils::{config, platform}, -}; +use crate::{app, utils::platform}; /// Show `vdev` command configuration #[derive(Args, Debug)] @@ -19,20 +16,6 @@ impl Cli { println!("Repository: {:?}", app::path()); println!("Shell: {}", app::SHELL.display()); - println!("\nConfig:"); - match config::path() { - Ok(path) => { - println!(" Path: {}", path.display()); - match config::load() { - Ok(config) => { - println!(" Repository: {:?}", config.repo); - } - Err(error) => println!(" Could not load: {error}"), - } - } - Err(error) => println!(" Path: Not found: {error}"), - } - println!("\nPlatform:"); println!(" Default target: {}", platform::default_target()); Ok(()) diff --git a/vdev/src/commands/mod.rs b/vdev/src/commands/mod.rs index 547d22147901d..d1ce23ef94231 100644 --- a/vdev/src/commands/mod.rs +++ b/vdev/src/commands/mod.rs @@ -70,7 +70,6 @@ pub struct Cli { mod build; mod check; mod complete; -mod config; mod crate_versions; mod e2e; mod exec; @@ -91,7 +90,6 @@ cli_commands! { build, check, complete, - config, crate_versions, e2e, exec, diff --git a/vdev/src/main.rs b/vdev/src/main.rs index 9bfec21b6f7be..d9213670f4eae 100644 --- a/vdev/src/main.rs +++ b/vdev/src/main.rs @@ -21,13 +21,8 @@ fn main() -> Result<()> { let cli = Cli::parse(); app::set_global_verbosity(cli.verbose.log_level_filter()); - app::set_global_config(utils::config::load()?); - let path = if app::config().repo.is_empty() { - utils::paths::find_repo_root()?.display().to_string() - } else { - app::config().repo.clone() - }; + let path = utils::paths::find_repo_root()?.display().to_string(); app::set_global_path(path); cli.exec() diff --git a/vdev/src/utils/config.rs b/vdev/src/utils/config.rs deleted file mode 100644 index 74cb7a2f15c11..0000000000000 --- a/vdev/src/utils/config.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::path::PathBuf; - -use anyhow::{Context, Result}; -use serde::{Deserialize, Serialize}; - -const APP_NAME: &str = "vdev"; -const FILE_STEM: &str = "config"; - -#[derive(Serialize, Deserialize, Clone, Debug, Default)] -pub struct Config { - pub repo: String, -} - -pub fn path() -> Result { - confy::get_configuration_file_path(APP_NAME, FILE_STEM) - .with_context(|| "unable to find the config file") -} - -pub fn load() -> Result { - confy::load(APP_NAME, FILE_STEM).with_context(|| "unable to load config") -} - -pub fn save(config: Config) -> Result<()> { - confy::store(APP_NAME, FILE_STEM, config).with_context(|| "unable to save config") -} diff --git a/vdev/src/utils/mod.rs b/vdev/src/utils/mod.rs index 33ea82e4491e4..f676f105e7c2d 100644 --- a/vdev/src/utils/mod.rs +++ b/vdev/src/utils/mod.rs @@ -21,7 +21,6 @@ pub mod macros; pub mod cargo; pub mod command; -pub mod config; pub mod environment; pub mod features; pub mod git; diff --git a/vdev/src/utils/paths.rs b/vdev/src/utils/paths.rs index 412cf1781b487..7a1dad6874555 100644 --- a/vdev/src/utils/paths.rs +++ b/vdev/src/utils/paths.rs @@ -11,7 +11,7 @@ use std::{ use anyhow::{Context, Result}; /// Find the Vector repository root by searching upward for markers like .git or Cargo.toml -/// with a [workspace] section. +/// with a `[workspace]` section. pub fn find_repo_root() -> Result { let mut current = env::current_dir().context("Could not determine current directory")?; diff --git a/vdev/src/utils/platform.rs b/vdev/src/utils/platform.rs index 1b278394417ca..c27b88dcee12f 100644 --- a/vdev/src/utils/platform.rs +++ b/vdev/src/utils/platform.rs @@ -6,14 +6,6 @@ use std::{ use directories::ProjectDirs; -pub fn canonicalize_path(path: impl AsRef) -> String { - let path = path.as_ref(); - dunce::canonicalize(path) - .unwrap_or_else(|err| panic!("Could not canonicalize path {}: {err}", path.display())) - .display() - .to_string() -} - pub fn data_dir() -> &'static Path { static DATA_DIR: OnceLock = OnceLock::new(); DATA_DIR.get_or_init(|| { From 02671f454061bdb41f9600cafcff3b4f26bd3773 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Fri, 7 Nov 2025 08:20:48 -0600 Subject: [PATCH 061/227] chore(transforms): Allow `datadog_search` to use `&LogEvent` directly (#24182) The current `datadog_search` matcher interface takes a reference to an `&Event` and then conditionally searches only logs, returning `false` if the event is not a log. If we want to search on type `LogEvent`, we need to first wrap it in an `Event` and then search. By using an `EventRef`, though, we can accomodate searching on both `&Event` and `&LogEvent` directly, avoiding the extraneous conversion. --- src/conditions/datadog_search.rs | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/src/conditions/datadog_search.rs b/src/conditions/datadog_search.rs index 385320026ffa1..886c286096a5f 100644 --- a/src/conditions/datadog_search.rs +++ b/src/conditions/datadog_search.rs @@ -3,7 +3,7 @@ use std::{borrow::Cow, str::FromStr}; use bytes::Bytes; use vector_lib::{ configurable::configurable_component, - event::{Event, LogEvent, Value}, + event::{Event, EventRef, LogEvent, Value}, }; use vrl::{ datadog_filter::{ @@ -33,8 +33,8 @@ impl Default for DatadogSearchConfig { } impl DatadogSearchConfig { - pub fn build_matcher(&self) -> crate::Result>> { - Ok(as_log(build_matcher(&self.source, &EventFilter)?)) + pub fn build_matcher(&self) -> crate::Result>> { + Ok(build_matcher(&self.source, &EventFilter)?) } } @@ -57,7 +57,7 @@ impl_generate_config_from_default!(DatadogSearchConfig); /// a [Datadog Search Syntax query](https://docs.datadoghq.com/logs/explorer/search_syntax/). #[derive(Debug, Clone)] pub struct DatadogSearchRunner { - matcher: Box>, + matcher: Box>, } impl TryFrom<&DatadogSearchConfig> for DatadogSearchRunner { @@ -68,8 +68,11 @@ impl TryFrom<&DatadogSearchConfig> for DatadogSearchRunner { } impl DatadogSearchRunner { - pub fn matches(&self, event: &Event) -> bool { - self.matcher.run(event) + pub fn matches<'a>(&self, event: impl Into>) -> bool { + match event.into() { + EventRef::Log(log) => self.matcher.run(log), + _ => false, + } } } @@ -88,14 +91,6 @@ impl ConditionalConfig for DatadogSearchConfig { } } -/// Run the provided `Matcher` when we're dealing with `LogEvent`s. Otherwise, return false. -fn as_log(matcher: Box>) -> Box> { - Run::boxed(move |ev| match ev { - Event::Log(log) => matcher.run(log), - _ => false, - }) -} - #[derive(Default, Clone)] struct EventFilter; From c1e83f9525037e6e8eecced6804f0fac180ebc0f Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Fri, 7 Nov 2025 08:44:19 -0600 Subject: [PATCH 062/227] chore(sources): Refactor `source_sender` into modules (#24183) --- lib/vector-core/src/task.rs | 26 ++ src/source_sender/builder.rs | 79 ++++ src/source_sender/errors.rs | 3 +- src/source_sender/mod.rs | 702 +---------------------------------- src/source_sender/output.rs | 230 ++++++++++++ src/source_sender/sender.rs | 242 ++++++++++++ src/source_sender/tests.rs | 171 +++++++++ 7 files changed, 757 insertions(+), 696 deletions(-) create mode 100644 lib/vector-core/src/task.rs create mode 100644 src/source_sender/builder.rs create mode 100644 src/source_sender/output.rs create mode 100644 src/source_sender/sender.rs create mode 100644 src/source_sender/tests.rs diff --git a/lib/vector-core/src/task.rs b/lib/vector-core/src/task.rs new file mode 100644 index 0000000000000..f79615a514c19 --- /dev/null +++ b/lib/vector-core/src/task.rs @@ -0,0 +1,26 @@ +use std::{collections::BTreeMap, fmt}; + +#[derive(Clone, Debug)] +pub struct TaskCompletedError { + pub message: String, + pub fields: BTreeMap<&'static str, String>, +} + +impl TaskCompletedError { + pub fn new(message: String, fields: impl IntoIterator) -> Self { + let fields = fields.into_iter().collect(); + Self { message, fields } + } +} + +impl fmt::Display for TaskCompletedError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "{:?}", self.message)?; + let mut sep = " "; + for field in &self.fields { + write!(fmt, "{sep}{} = {:?}", field.0, field.1)?; + sep = ", "; + } + Ok(()) + } +} diff --git a/src/source_sender/builder.rs b/src/source_sender/builder.rs new file mode 100644 index 0000000000000..2a865cbf0e159 --- /dev/null +++ b/src/source_sender/builder.rs @@ -0,0 +1,79 @@ +use std::collections::HashMap; + +use metrics::{Histogram, histogram}; +use vector_lib::{ + buffers::topology::channel::LimitedReceiver, + config::{ComponentKey, OutputId, SourceOutput}, + internal_event::DEFAULT_OUTPUT, +}; + +use super::{CHUNK_SIZE, LAG_TIME_NAME, Output, SourceSender, SourceSenderItem}; + +pub struct Builder { + buf_size: usize, + default_output: Option, + named_outputs: HashMap, + lag_time: Option, +} + +impl Default for Builder { + fn default() -> Self { + Self { + buf_size: CHUNK_SIZE, + default_output: None, + named_outputs: Default::default(), + lag_time: Some(histogram!(LAG_TIME_NAME)), + } + } +} + +impl Builder { + pub const fn with_buffer(mut self, n: usize) -> Self { + self.buf_size = n; + self + } + + pub fn add_source_output( + &mut self, + output: SourceOutput, + component_key: ComponentKey, + ) -> LimitedReceiver { + let lag_time = self.lag_time.clone(); + let log_definition = output.schema_definition.clone(); + let output_id = OutputId { + component: component_key, + port: output.port.clone(), + }; + match output.port { + None => { + let (output, rx) = Output::new_with_buffer( + self.buf_size, + DEFAULT_OUTPUT.to_owned(), + lag_time, + log_definition, + output_id, + ); + self.default_output = Some(output); + rx + } + Some(name) => { + let (output, rx) = Output::new_with_buffer( + self.buf_size, + name.clone(), + lag_time, + log_definition, + output_id, + ); + self.named_outputs.insert(name, output); + rx + } + } + } + + pub fn build(self) -> SourceSender { + SourceSender { + default_output: self.default_output, + named_outputs: self.named_outputs, + } + } +} diff --git a/src/source_sender/errors.rs b/src/source_sender/errors.rs index 4e588c763516c..54d6cd106189d 100644 --- a/src/source_sender/errors.rs +++ b/src/source_sender/errors.rs @@ -2,8 +2,7 @@ use std::fmt; use tokio::sync::mpsc; use vector_lib::buffers::topology::channel::SendError; - -use crate::event::{Event, EventArray}; +use vector_lib::event::{Event, EventArray}; #[derive(Clone, Debug)] pub struct ClosedError; diff --git a/src/source_sender/mod.rs b/src/source_sender/mod.rs index d6c3b13fb0974..16d2ca4792103 100644 --- a/src/source_sender/mod.rs +++ b/src/source_sender/mod.rs @@ -1,38 +1,16 @@ #![allow(missing_docs)] -use std::{collections::HashMap, fmt, num::NonZeroUsize, sync::Arc, time::Instant}; - -use chrono::Utc; -use futures::{Stream, StreamExt}; -use metrics::{Histogram, histogram}; -use tracing::Span; -#[cfg(any(test, feature = "test-utils"))] -use vector_lib::event::{EventStatus, into_event_stream}; -use vector_lib::{ - ByteSizeOf, EstimatedJsonEncodedSizeOf, - buffers::{ - EventCount, - config::MemoryBufferSize, - topology::channel::{self, LimitedReceiver, LimitedSender}, - }, - config::{SourceOutput, log_schema}, - event::{Event, EventArray, EventContainer, EventRef, array, array::EventArrayIntoIter}, - finalization::{AddBatchNotifier, BatchNotifier}, - internal_event::{ - self, ComponentEventsDropped, CountByteSize, DEFAULT_OUTPUT, EventsSent, - InternalEventHandle as _, Registered, UNINTENTIONAL, - }, - json_size::JsonSize, -}; -use vrl::value::Value; +mod builder; mod errors; +mod output; +mod sender; +#[cfg(test)] +mod tests; +pub use builder::Builder; pub use errors::{ClosedError, StreamSendError}; - -use crate::{ - config::{ComponentKey, OutputId}, - schema::Definition, -}; +use output::Output; +pub use sender::{SourceSender, SourceSenderItem}; pub(crate) const CHUNK_SIZE: usize = 1000; @@ -40,667 +18,3 @@ pub(crate) const CHUNK_SIZE: usize = 1000; const TEST_BUFFER_SIZE: usize = 100; const LAG_TIME_NAME: &str = "source_lag_time_seconds"; - -/// SourceSenderItem is a thin wrapper around [EventArray] used to track the send duration of a batch. -/// -/// This is needed because the send duration is calculated as the difference between when the batch -/// is sent from the origin component to when the batch is enqueued on the receiving component's input buffer. -/// For sources in particular, this requires the batch to be enqueued on two channels: the origin component's pump -/// channel and then the receiving component's input buffer. -#[derive(Debug)] -pub struct SourceSenderItem { - /// The batch of events to send. - pub events: EventArray, - /// Reference instant used to calculate send duration. - pub send_reference: Instant, -} - -impl AddBatchNotifier for SourceSenderItem { - fn add_batch_notifier(&mut self, notifier: BatchNotifier) { - self.events.add_batch_notifier(notifier) - } -} - -impl ByteSizeOf for SourceSenderItem { - fn allocated_bytes(&self) -> usize { - self.events.allocated_bytes() - } -} - -impl EventCount for SourceSenderItem { - fn event_count(&self) -> usize { - self.events.event_count() - } -} - -impl EstimatedJsonEncodedSizeOf for SourceSenderItem { - fn estimated_json_encoded_size_of(&self) -> JsonSize { - self.events.estimated_json_encoded_size_of() - } -} - -impl EventContainer for SourceSenderItem { - type IntoIter = EventArrayIntoIter; - - fn len(&self) -> usize { - self.events.len() - } - - fn into_events(self) -> Self::IntoIter { - self.events.into_events() - } -} - -impl From for EventArray { - fn from(val: SourceSenderItem) -> Self { - val.events - } -} - -pub struct Builder { - buf_size: usize, - default_output: Option, - named_outputs: HashMap, - lag_time: Option, -} - -impl Default for Builder { - fn default() -> Self { - Self { - buf_size: CHUNK_SIZE, - default_output: None, - named_outputs: Default::default(), - lag_time: Some(histogram!(LAG_TIME_NAME)), - } - } -} - -impl Builder { - pub const fn with_buffer(mut self, n: usize) -> Self { - self.buf_size = n; - self - } - - pub fn add_source_output( - &mut self, - output: SourceOutput, - component_key: ComponentKey, - ) -> LimitedReceiver { - let lag_time = self.lag_time.clone(); - let log_definition = output.schema_definition.clone(); - let output_id = OutputId { - component: component_key, - port: output.port.clone(), - }; - match output.port { - None => { - let (output, rx) = Output::new_with_buffer( - self.buf_size, - DEFAULT_OUTPUT.to_owned(), - lag_time, - log_definition, - output_id, - ); - self.default_output = Some(output); - rx - } - Some(name) => { - let (output, rx) = Output::new_with_buffer( - self.buf_size, - name.clone(), - lag_time, - log_definition, - output_id, - ); - self.named_outputs.insert(name, output); - rx - } - } - } - - pub fn build(self) -> SourceSender { - SourceSender { - default_output: self.default_output, - named_outputs: self.named_outputs, - } - } -} - -#[derive(Debug, Clone)] -pub struct SourceSender { - // The default output is optional because some sources, e.g. `datadog_agent` - // and `opentelemetry`, can be configured to only output to named outputs. - default_output: Option, - named_outputs: HashMap, -} - -impl SourceSender { - pub fn builder() -> Builder { - Builder::default() - } - - #[cfg(any(test, feature = "test-utils"))] - pub fn new_test_sender_with_buffer(n: usize) -> (Self, LimitedReceiver) { - let lag_time = Some(histogram!(LAG_TIME_NAME)); - let output_id = OutputId { - component: "test".to_string().into(), - port: None, - }; - let (default_output, rx) = - Output::new_with_buffer(n, DEFAULT_OUTPUT.to_owned(), lag_time, None, output_id); - ( - Self { - default_output: Some(default_output), - named_outputs: Default::default(), - }, - rx, - ) - } - - #[cfg(any(test, feature = "test-utils"))] - pub fn new_test() -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); - let recv = recv.into_stream().flat_map(into_event_stream); - (pipe, recv) - } - - #[cfg(any(test, feature = "test-utils"))] - pub fn new_test_finalize(status: EventStatus) -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); - // In a source test pipeline, there is no sink to acknowledge - // events, so we have to add a map to the receiver to handle the - // finalization. - let recv = recv.into_stream().flat_map(move |mut item| { - item.events.iter_events_mut().for_each(|mut event| { - let metadata = event.metadata_mut(); - metadata.update_status(status); - metadata.update_sources(); - }); - into_event_stream(item) - }); - (pipe, recv) - } - - #[cfg(any(test, feature = "test-utils"))] - pub fn new_test_errors( - error_at: impl Fn(usize) -> bool, - ) -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); - // In a source test pipeline, there is no sink to acknowledge - // events, so we have to add a map to the receiver to handle the - // finalization. - let mut count: usize = 0; - let recv = recv.into_stream().flat_map(move |mut item| { - let status = if error_at(count) { - EventStatus::Errored - } else { - EventStatus::Delivered - }; - count += 1; - item.events.iter_events_mut().for_each(|mut event| { - let metadata = event.metadata_mut(); - metadata.update_status(status); - metadata.update_sources(); - }); - into_event_stream(item) - }); - (pipe, recv) - } - - #[cfg(any(test, feature = "test-utils"))] - pub fn add_outputs( - &mut self, - status: EventStatus, - name: String, - ) -> impl Stream + Unpin + use<> { - // The lag_time parameter here will need to be filled in if this function is ever used for - // non-test situations. - let output_id = OutputId { - component: "test".to_string().into(), - port: Some(name.clone()), - }; - let (output, recv) = Output::new_with_buffer(100, name.clone(), None, None, output_id); - let recv = recv.into_stream().map(move |mut item| { - item.events.iter_events_mut().for_each(|mut event| { - let metadata = event.metadata_mut(); - metadata.update_status(status); - metadata.update_sources(); - }); - item - }); - self.named_outputs.insert(name, output); - recv - } - - /// Get a mutable reference to the default output, panicking if none exists. - const fn default_output_mut(&mut self) -> &mut Output { - self.default_output.as_mut().expect("no default output") - } - - /// Send an event to the default output. - /// - /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_event(&mut self, event: impl Into) -> Result<(), ClosedError> { - self.default_output_mut().send_event(event).await - } - - /// Send a stream of events to the default output. - /// - /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_event_stream(&mut self, events: S) -> Result<(), ClosedError> - where - S: Stream + Unpin, - E: Into + ByteSizeOf, - { - self.default_output_mut().send_event_stream(events).await - } - - /// Send a batch of events to the default output. - /// - /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_batch(&mut self, events: I) -> Result<(), ClosedError> - where - E: Into + ByteSizeOf, - I: IntoIterator, - ::IntoIter: ExactSizeIterator, - { - self.default_output_mut().send_batch(events).await - } - - /// Send a batch of events event to a named output. - /// - /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_batch_named(&mut self, name: &str, events: I) -> Result<(), ClosedError> - where - E: Into + ByteSizeOf, - I: IntoIterator, - ::IntoIter: ExactSizeIterator, - { - self.named_outputs - .get_mut(name) - .expect("unknown output") - .send_batch(events) - .await - } -} - -/// UnsentEvents tracks the number of events yet to be sent in the buffer. This is used to -/// increment the appropriate counters when a future is not polled to completion. Particularly, -/// this is known to happen in a Warp server when a client sends a new HTTP request on a TCP -/// connection that already has a pending request. -/// -/// If its internal count is greater than 0 when dropped, the appropriate [ComponentEventsDropped] -/// event is emitted. -struct UnsentEventCount { - count: usize, - span: Span, -} - -impl UnsentEventCount { - fn new(count: usize) -> Self { - Self { - count, - span: Span::current(), - } - } - - const fn decr(&mut self, count: usize) { - self.count = self.count.saturating_sub(count); - } - - const fn discard(&mut self) { - self.count = 0; - } -} - -impl Drop for UnsentEventCount { - fn drop(&mut self) { - if self.count > 0 { - let _enter = self.span.enter(); - emit!(ComponentEventsDropped:: { - count: self.count, - reason: "Source send cancelled." - }); - } - } -} - -#[derive(Clone)] -struct Output { - sender: LimitedSender, - lag_time: Option, - events_sent: Registered, - /// The schema definition that will be attached to Log events sent through here - log_definition: Option>, - /// The OutputId related to this source sender. This is set as the `upstream_id` in - /// `EventMetadata` for all event sent through here. - output_id: Arc, -} - -impl fmt::Debug for Output { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Output") - .field("sender", &self.sender) - .field("output_id", &self.output_id) - // `metrics::Histogram` is missing `impl Debug` - .finish() - } -} - -impl Output { - fn new_with_buffer( - n: usize, - output: String, - lag_time: Option, - log_definition: Option>, - output_id: OutputId, - ) -> (Self, LimitedReceiver) { - let (tx, rx) = channel::limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(n).unwrap())); - ( - Self { - sender: tx, - lag_time, - events_sent: register!(EventsSent::from(internal_event::Output(Some( - output.into() - )))), - log_definition, - output_id: Arc::new(output_id), - }, - rx, - ) - } - - async fn send( - &mut self, - mut events: EventArray, - unsent_event_count: &mut UnsentEventCount, - ) -> Result<(), ClosedError> { - let send_reference = Instant::now(); - let reference = Utc::now().timestamp_millis(); - events - .iter_events() - .for_each(|event| self.emit_lag_time(event, reference)); - - events.iter_events_mut().for_each(|mut event| { - // attach runtime schema definitions from the source - if let Some(log_definition) = &self.log_definition { - event.metadata_mut().set_schema_definition(log_definition); - } - event - .metadata_mut() - .set_upstream_id(Arc::clone(&self.output_id)); - }); - - let byte_size = events.estimated_json_encoded_size_of(); - let count = events.len(); - self.sender - .send(SourceSenderItem { - events, - send_reference, - }) - .await - .map_err(|_| ClosedError)?; - self.events_sent.emit(CountByteSize(count, byte_size)); - unsent_event_count.decr(count); - Ok(()) - } - - async fn send_event(&mut self, event: impl Into) -> Result<(), ClosedError> { - let event: EventArray = event.into(); - // It's possible that the caller stops polling this future while it is blocked waiting - // on `self.send()`. When that happens, we use `UnsentEventCount` to correctly emit - // `ComponentEventsDropped` events. - let mut unsent_event_count = UnsentEventCount::new(event.len()); - self.send(event, &mut unsent_event_count).await - } - - async fn send_event_stream(&mut self, events: S) -> Result<(), ClosedError> - where - S: Stream + Unpin, - E: Into + ByteSizeOf, - { - let mut stream = events.ready_chunks(CHUNK_SIZE); - while let Some(events) = stream.next().await { - self.send_batch(events.into_iter()).await?; - } - Ok(()) - } - - async fn send_batch(&mut self, events: I) -> Result<(), ClosedError> - where - E: Into + ByteSizeOf, - I: IntoIterator, - ::IntoIter: ExactSizeIterator, - { - // It's possible that the caller stops polling this future while it is blocked waiting - // on `self.send()`. When that happens, we use `UnsentEventCount` to correctly emit - // `ComponentEventsDropped` events. - let events = events.into_iter().map(Into::into); - let mut unsent_event_count = UnsentEventCount::new(events.len()); - for events in array::events_into_arrays(events, Some(CHUNK_SIZE)) { - self.send(events, &mut unsent_event_count) - .await - .inspect_err(|_| { - // The unsent event count is discarded here because the callee emits the - // `StreamClosedError`. - unsent_event_count.discard(); - })?; - } - Ok(()) - } - - /// Calculate the difference between the reference time and the - /// timestamp stored in the given event reference, and emit the - /// different, as expressed in milliseconds, as a histogram. - fn emit_lag_time(&self, event: EventRef<'_>, reference: i64) { - if let Some(lag_time_metric) = &self.lag_time { - let timestamp = match event { - EventRef::Log(log) => { - log_schema() - .timestamp_key_target_path() - .and_then(|timestamp_key| { - log.get(timestamp_key).and_then(get_timestamp_millis) - }) - } - EventRef::Metric(metric) => metric - .timestamp() - .map(|timestamp| timestamp.timestamp_millis()), - EventRef::Trace(trace) => { - log_schema() - .timestamp_key_target_path() - .and_then(|timestamp_key| { - trace.get(timestamp_key).and_then(get_timestamp_millis) - }) - } - }; - if let Some(timestamp) = timestamp { - // This will truncate precision for values larger than 2**52, but at that point the user - // probably has much larger problems than precision. - let lag_time = (reference - timestamp) as f64 / 1000.0; - lag_time_metric.record(lag_time); - } - } - } -} - -const fn get_timestamp_millis(value: &Value) -> Option { - match value { - Value::Timestamp(timestamp) => Some(timestamp.timestamp_millis()), - _ => None, - } -} - -#[cfg(test)] -mod tests { - use chrono::{DateTime, Duration}; - use rand::{Rng, rng}; - use tokio::time::timeout; - use vector_lib::event::{LogEvent, Metric, MetricKind, MetricValue, TraceEvent}; - use vrl::event_path; - - use super::*; - use crate::metrics::{self, Controller}; - - #[tokio::test] - async fn emits_lag_time_for_log() { - emit_and_test(|timestamp| { - let mut log = LogEvent::from("Log message"); - log.insert("timestamp", timestamp); - Event::Log(log) - }) - .await; - } - - #[tokio::test] - async fn emits_lag_time_for_metric() { - emit_and_test(|timestamp| { - Event::Metric( - Metric::new( - "name", - MetricKind::Absolute, - MetricValue::Gauge { value: 123.4 }, - ) - .with_timestamp(Some(timestamp)), - ) - }) - .await; - } - - #[tokio::test] - async fn emits_lag_time_for_trace() { - emit_and_test(|timestamp| { - let mut trace = TraceEvent::default(); - trace.insert(event_path!("timestamp"), timestamp); - Event::Trace(trace) - }) - .await; - } - - async fn emit_and_test(make_event: impl FnOnce(DateTime) -> Event) { - metrics::init_test(); - let (mut sender, _stream) = SourceSender::new_test(); - let millis = rng().random_range(10..10000); - let timestamp = Utc::now() - Duration::milliseconds(millis); - let expected = millis as f64 / 1000.0; - - let event = make_event(timestamp); - sender - .send_event(event) - .await - .expect("Send should not fail"); - - let lag_times = Controller::get() - .expect("There must be a controller") - .capture_metrics() - .into_iter() - .filter(|metric| metric.name() == "source_lag_time_seconds") - .collect::>(); - assert_eq!(lag_times.len(), 1); - - let lag_time = &lag_times[0]; - match lag_time.value() { - MetricValue::AggregatedHistogram { - buckets, - count, - sum, - } => { - let mut done = false; - for bucket in buckets { - if !done && bucket.upper_limit >= expected { - assert_eq!(bucket.count, 1); - done = true; - } else { - assert_eq!(bucket.count, 0); - } - } - assert_eq!(*count, 1); - assert!( - (*sum - expected).abs() <= 0.002, - "Histogram sum does not match expected sum: {} vs {}", - *sum, - expected, - ); - } - _ => panic!("source_lag_time_seconds has invalid type"), - } - } - - #[tokio::test] - async fn emits_component_discarded_events_total_for_send_event() { - metrics::init_test(); - let (mut sender, _recv) = SourceSender::new_test_sender_with_buffer(1); - - let event = Event::Metric(Metric::new( - "name", - MetricKind::Absolute, - MetricValue::Gauge { value: 123.4 }, - )); - - // First send will succeed. - sender - .send_event(event.clone()) - .await - .expect("First send should not fail"); - - // Second send will timeout, so the future will not be polled to completion. - let res = timeout( - std::time::Duration::from_millis(100), - sender.send_event(event.clone()), - ) - .await; - assert!(res.is_err(), "Send should have timed out."); - - let component_discarded_events_total = Controller::get() - .expect("There must be a controller") - .capture_metrics() - .into_iter() - .filter(|metric| metric.name() == "component_discarded_events_total") - .collect::>(); - assert_eq!(component_discarded_events_total.len(), 1); - - let component_discarded_events_total = &component_discarded_events_total[0]; - let MetricValue::Counter { value } = component_discarded_events_total.value() else { - panic!("component_discarded_events_total has invalid type") - }; - assert_eq!(*value, 1.0); - } - - #[tokio::test] - async fn emits_component_discarded_events_total_for_send_batch() { - metrics::init_test(); - let (mut sender, _recv) = SourceSender::new_test_sender_with_buffer(1); - - let expected_drop = 100; - let events: Vec = (0..(CHUNK_SIZE + expected_drop)) - .map(|_| { - Event::Metric(Metric::new( - "name", - MetricKind::Absolute, - MetricValue::Gauge { value: 123.4 }, - )) - }) - .collect(); - - // `CHUNK_SIZE` events will be sent into buffer but then the future will not be polled to completion. - let res = timeout( - std::time::Duration::from_millis(100), - sender.send_batch(events), - ) - .await; - assert!(res.is_err(), "Send should have timed out."); - - let component_discarded_events_total = Controller::get() - .expect("There must be a controller") - .capture_metrics() - .into_iter() - .filter(|metric| metric.name() == "component_discarded_events_total") - .collect::>(); - assert_eq!(component_discarded_events_total.len(), 1); - - let component_discarded_events_total = &component_discarded_events_total[0]; - let MetricValue::Counter { value } = component_discarded_events_total.value() else { - panic!("component_discarded_events_total has invalid type") - }; - assert_eq!(*value, expected_drop as f64); - } -} diff --git a/src/source_sender/output.rs b/src/source_sender/output.rs new file mode 100644 index 0000000000000..02854e5d1ea6a --- /dev/null +++ b/src/source_sender/output.rs @@ -0,0 +1,230 @@ +use std::{fmt, num::NonZeroUsize, sync::Arc, time::Instant}; + +use chrono::Utc; +use futures::{Stream, StreamExt as _}; +use metrics::Histogram; +use tracing::Span; +use vector_lib::{ + ByteSizeOf, EstimatedJsonEncodedSizeOf as _, + buffers::config::MemoryBufferSize, + buffers::topology::channel::{self, LimitedReceiver, LimitedSender}, + config::{OutputId, log_schema}, + event::{Event, EventArray, EventContainer as _, EventRef, array}, + internal_event::{ + self, ComponentEventsDropped, CountByteSize, EventsSent, InternalEventHandle as _, + Registered, UNINTENTIONAL, + }, + schema::Definition, +}; +use vrl::value::Value; + +use super::{CHUNK_SIZE, ClosedError, SourceSenderItem}; + +/// UnsentEvents tracks the number of events yet to be sent in the buffer. This is used to +/// increment the appropriate counters when a future is not polled to completion. Particularly, +/// this is known to happen in a Warp server when a client sends a new HTTP request on a TCP +/// connection that already has a pending request. +/// +/// If its internal count is greater than 0 when dropped, the appropriate [ComponentEventsDropped] +/// event is emitted. +pub(super) struct UnsentEventCount { + count: usize, + span: Span, +} + +impl UnsentEventCount { + fn new(count: usize) -> Self { + Self { + count, + span: Span::current(), + } + } + + const fn decr(&mut self, count: usize) { + self.count = self.count.saturating_sub(count); + } + + const fn discard(&mut self) { + self.count = 0; + } +} + +impl Drop for UnsentEventCount { + fn drop(&mut self) { + if self.count > 0 { + let _enter = self.span.enter(); + emit!(ComponentEventsDropped:: { + count: self.count, + reason: "Source send cancelled." + }); + } + } +} + +#[derive(Clone)] +pub(super) struct Output { + sender: LimitedSender, + lag_time: Option, + events_sent: Registered, + /// The schema definition that will be attached to Log events sent through here + log_definition: Option>, + /// The OutputId related to this source sender. This is set as the `upstream_id` in + /// `EventMetadata` for all event sent through here. + output_id: Arc, +} + +impl fmt::Debug for Output { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Output") + .field("sender", &self.sender) + .field("output_id", &self.output_id) + // `metrics::Histogram` is missing `impl Debug` + .finish() + } +} + +impl Output { + pub(super) fn new_with_buffer( + n: usize, + output: String, + lag_time: Option, + log_definition: Option>, + output_id: OutputId, + ) -> (Self, LimitedReceiver) { + let (tx, rx) = channel::limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(n).unwrap())); + ( + Self { + sender: tx, + lag_time, + events_sent: register!(EventsSent::from(internal_event::Output(Some( + output.into() + )))), + log_definition, + output_id: Arc::new(output_id), + }, + rx, + ) + } + + pub(super) async fn send( + &mut self, + mut events: EventArray, + unsent_event_count: &mut UnsentEventCount, + ) -> Result<(), ClosedError> { + let send_reference = Instant::now(); + let reference = Utc::now().timestamp_millis(); + events + .iter_events() + .for_each(|event| self.emit_lag_time(event, reference)); + + events.iter_events_mut().for_each(|mut event| { + // attach runtime schema definitions from the source + if let Some(log_definition) = &self.log_definition { + event.metadata_mut().set_schema_definition(log_definition); + } + event + .metadata_mut() + .set_upstream_id(Arc::clone(&self.output_id)); + }); + + let byte_size = events.estimated_json_encoded_size_of(); + let count = events.len(); + self.sender + .send(SourceSenderItem { + events, + send_reference, + }) + .await + .map_err(|_| ClosedError)?; + self.events_sent.emit(CountByteSize(count, byte_size)); + unsent_event_count.decr(count); + Ok(()) + } + + pub(super) async fn send_event( + &mut self, + event: impl Into, + ) -> Result<(), ClosedError> { + let event: EventArray = event.into(); + // It's possible that the caller stops polling this future while it is blocked waiting + // on `self.send()`. When that happens, we use `UnsentEventCount` to correctly emit + // `ComponentEventsDropped` events. + let mut unsent_event_count = UnsentEventCount::new(event.len()); + self.send(event, &mut unsent_event_count).await + } + + pub(super) async fn send_event_stream(&mut self, events: S) -> Result<(), ClosedError> + where + S: Stream + Unpin, + E: Into + ByteSizeOf, + { + let mut stream = events.ready_chunks(CHUNK_SIZE); + while let Some(events) = stream.next().await { + self.send_batch(events.into_iter()).await?; + } + Ok(()) + } + + pub(super) async fn send_batch(&mut self, events: I) -> Result<(), ClosedError> + where + E: Into + ByteSizeOf, + I: IntoIterator, + ::IntoIter: ExactSizeIterator, + { + // It's possible that the caller stops polling this future while it is blocked waiting + // on `self.send()`. When that happens, we use `UnsentEventCount` to correctly emit + // `ComponentEventsDropped` events. + let events = events.into_iter().map(Into::into); + let mut unsent_event_count = UnsentEventCount::new(events.len()); + for events in array::events_into_arrays(events, Some(CHUNK_SIZE)) { + self.send(events, &mut unsent_event_count) + .await + .inspect_err(|_| { + // The unsent event count is discarded here because the callee emits the + // `StreamClosedError`. + unsent_event_count.discard(); + })?; + } + Ok(()) + } + + /// Calculate the difference between the reference time and the + /// timestamp stored in the given event reference, and emit the + /// different, as expressed in milliseconds, as a histogram. + pub(super) fn emit_lag_time(&self, event: EventRef<'_>, reference: i64) { + if let Some(lag_time_metric) = &self.lag_time { + let timestamp = match event { + EventRef::Log(log) => { + log_schema() + .timestamp_key_target_path() + .and_then(|timestamp_key| { + log.get(timestamp_key).and_then(get_timestamp_millis) + }) + } + EventRef::Metric(metric) => metric + .timestamp() + .map(|timestamp| timestamp.timestamp_millis()), + EventRef::Trace(trace) => { + log_schema() + .timestamp_key_target_path() + .and_then(|timestamp_key| { + trace.get(timestamp_key).and_then(get_timestamp_millis) + }) + } + }; + if let Some(timestamp) = timestamp { + // This will truncate precision for values larger than 2**52, but at that point the user + // probably has much larger problems than precision. + let lag_time = (reference - timestamp) as f64 / 1000.0; + lag_time_metric.record(lag_time); + } + } + } +} + +const fn get_timestamp_millis(value: &Value) -> Option { + match value { + Value::Timestamp(timestamp) => Some(timestamp.timestamp_millis()), + _ => None, + } +} diff --git a/src/source_sender/sender.rs b/src/source_sender/sender.rs new file mode 100644 index 0000000000000..8a5a27f9dbbf0 --- /dev/null +++ b/src/source_sender/sender.rs @@ -0,0 +1,242 @@ +use std::collections::HashMap; +use std::time::Instant; + +use futures::Stream; +#[cfg(any(test, feature = "test-utils"))] +use futures::StreamExt as _; +#[cfg(any(test, feature = "test-utils"))] +use metrics::histogram; +#[cfg(doc)] +use vector_lib::internal_event::{ComponentEventsDropped, EventsSent}; +use vector_lib::{ + ByteSizeOf, EstimatedJsonEncodedSizeOf, + buffers::EventCount, + event::{Event, EventArray, EventContainer, array::EventArrayIntoIter}, + finalization::{AddBatchNotifier, BatchNotifier}, + json_size::JsonSize, +}; +#[cfg(any(test, feature = "test-utils"))] +use vector_lib::{buffers::topology::channel::LimitedReceiver, internal_event::DEFAULT_OUTPUT}; +#[cfg(any(test, feature = "test-utils"))] +use vector_lib::{ + config::OutputId, + event::{EventStatus, into_event_stream}, +}; + +use super::{Builder, ClosedError, Output}; +#[cfg(any(test, feature = "test-utils"))] +use super::{LAG_TIME_NAME, TEST_BUFFER_SIZE}; + +/// SourceSenderItem is a thin wrapper around [EventArray] used to track the send duration of a batch. +/// +/// This is needed because the send duration is calculated as the difference between when the batch +/// is sent from the origin component to when the batch is enqueued on the receiving component's input buffer. +/// For sources in particular, this requires the batch to be enqueued on two channels: the origin component's pump +/// channel and then the receiving component's input buffer. +#[derive(Debug)] +pub struct SourceSenderItem { + /// The batch of events to send. + pub events: EventArray, + /// Reference instant used to calculate send duration. + pub send_reference: Instant, +} + +impl AddBatchNotifier for SourceSenderItem { + fn add_batch_notifier(&mut self, notifier: BatchNotifier) { + self.events.add_batch_notifier(notifier) + } +} + +impl ByteSizeOf for SourceSenderItem { + fn allocated_bytes(&self) -> usize { + self.events.allocated_bytes() + } +} + +impl EventCount for SourceSenderItem { + fn event_count(&self) -> usize { + self.events.event_count() + } +} + +impl EstimatedJsonEncodedSizeOf for SourceSenderItem { + fn estimated_json_encoded_size_of(&self) -> JsonSize { + self.events.estimated_json_encoded_size_of() + } +} + +impl EventContainer for SourceSenderItem { + type IntoIter = EventArrayIntoIter; + + fn len(&self) -> usize { + self.events.len() + } + + fn into_events(self) -> Self::IntoIter { + self.events.into_events() + } +} + +impl From for EventArray { + fn from(val: SourceSenderItem) -> Self { + val.events + } +} + +#[derive(Debug, Clone)] +pub struct SourceSender { + // The default output is optional because some sources, e.g. `datadog_agent` + // and `opentelemetry`, can be configured to only output to named outputs. + pub(super) default_output: Option, + pub(super) named_outputs: HashMap, +} + +impl SourceSender { + pub fn builder() -> Builder { + Builder::default() + } + + #[cfg(any(test, feature = "test-utils"))] + pub fn new_test_sender_with_buffer(n: usize) -> (Self, LimitedReceiver) { + let lag_time = Some(histogram!(LAG_TIME_NAME)); + let output_id = OutputId { + component: "test".to_string().into(), + port: None, + }; + let (default_output, rx) = + Output::new_with_buffer(n, DEFAULT_OUTPUT.to_owned(), lag_time, None, output_id); + ( + Self { + default_output: Some(default_output), + named_outputs: Default::default(), + }, + rx, + ) + } + + #[cfg(any(test, feature = "test-utils"))] + pub fn new_test() -> (Self, impl Stream + Unpin) { + let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); + let recv = recv.into_stream().flat_map(into_event_stream); + (pipe, recv) + } + + #[cfg(any(test, feature = "test-utils"))] + pub fn new_test_finalize(status: EventStatus) -> (Self, impl Stream + Unpin) { + let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); + // In a source test pipeline, there is no sink to acknowledge + // events, so we have to add a map to the receiver to handle the + // finalization. + let recv = recv.into_stream().flat_map(move |mut item| { + item.events.iter_events_mut().for_each(|mut event| { + let metadata = event.metadata_mut(); + metadata.update_status(status); + metadata.update_sources(); + }); + into_event_stream(item) + }); + (pipe, recv) + } + + #[cfg(any(test, feature = "test-utils"))] + pub fn new_test_errors( + error_at: impl Fn(usize) -> bool, + ) -> (Self, impl Stream + Unpin) { + let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); + // In a source test pipeline, there is no sink to acknowledge + // events, so we have to add a map to the receiver to handle the + // finalization. + let mut count: usize = 0; + let recv = recv.into_stream().flat_map(move |mut item| { + let status = if error_at(count) { + EventStatus::Errored + } else { + EventStatus::Delivered + }; + count += 1; + item.events.iter_events_mut().for_each(|mut event| { + let metadata = event.metadata_mut(); + metadata.update_status(status); + metadata.update_sources(); + }); + into_event_stream(item) + }); + (pipe, recv) + } + + #[cfg(any(test, feature = "test-utils"))] + pub fn add_outputs( + &mut self, + status: EventStatus, + name: String, + ) -> impl Stream + Unpin + use<> { + // The lag_time parameter here will need to be filled in if this function is ever used for + // non-test situations. + let output_id = OutputId { + component: "test".to_string().into(), + port: Some(name.clone()), + }; + let (output, recv) = Output::new_with_buffer(100, name.clone(), None, None, output_id); + let recv = recv.into_stream().map(move |mut item| { + item.events.iter_events_mut().for_each(|mut event| { + let metadata = event.metadata_mut(); + metadata.update_status(status); + metadata.update_sources(); + }); + item + }); + self.named_outputs.insert(name, output); + recv + } + + /// Get a mutable reference to the default output, panicking if none exists. + const fn default_output_mut(&mut self) -> &mut Output { + self.default_output.as_mut().expect("no default output") + } + + /// Send an event to the default output. + /// + /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. + pub async fn send_event(&mut self, event: impl Into) -> Result<(), ClosedError> { + self.default_output_mut().send_event(event).await + } + + /// Send a stream of events to the default output. + /// + /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. + pub async fn send_event_stream(&mut self, events: S) -> Result<(), ClosedError> + where + S: Stream + Unpin, + E: Into + ByteSizeOf, + { + self.default_output_mut().send_event_stream(events).await + } + + /// Send a batch of events to the default output. + /// + /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. + pub async fn send_batch(&mut self, events: I) -> Result<(), ClosedError> + where + E: Into + ByteSizeOf, + I: IntoIterator, + ::IntoIter: ExactSizeIterator, + { + self.default_output_mut().send_batch(events).await + } + + /// Send a batch of events event to a named output. + /// + /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. + pub async fn send_batch_named(&mut self, name: &str, events: I) -> Result<(), ClosedError> + where + E: Into + ByteSizeOf, + I: IntoIterator, + ::IntoIter: ExactSizeIterator, + { + self.named_outputs + .get_mut(name) + .expect("unknown output") + .send_batch(events) + .await + } +} diff --git a/src/source_sender/tests.rs b/src/source_sender/tests.rs new file mode 100644 index 0000000000000..42e085647de13 --- /dev/null +++ b/src/source_sender/tests.rs @@ -0,0 +1,171 @@ +use chrono::{DateTime, Duration, Utc}; +use rand::{Rng, rng}; +use tokio::time::timeout; +use vector_lib::event::{Event, LogEvent, Metric, MetricKind, MetricValue, TraceEvent}; +use vector_lib::metrics::{self, Controller}; +use vrl::event_path; + +use super::*; + +#[tokio::test] +async fn emits_lag_time_for_log() { + emit_and_test(|timestamp| { + let mut log = LogEvent::from("Log message"); + log.insert("timestamp", timestamp); + Event::Log(log) + }) + .await; +} + +#[tokio::test] +async fn emits_lag_time_for_metric() { + emit_and_test(|timestamp| { + Event::Metric( + Metric::new( + "name", + MetricKind::Absolute, + MetricValue::Gauge { value: 123.4 }, + ) + .with_timestamp(Some(timestamp)), + ) + }) + .await; +} + +#[tokio::test] +async fn emits_lag_time_for_trace() { + emit_and_test(|timestamp| { + let mut trace = TraceEvent::default(); + trace.insert(event_path!("timestamp"), timestamp); + Event::Trace(trace) + }) + .await; +} + +async fn emit_and_test(make_event: impl FnOnce(DateTime) -> Event) { + metrics::init_test(); + let (mut sender, _stream) = SourceSender::new_test(); + let millis = rng().random_range(10..10000); + let timestamp = Utc::now() - Duration::milliseconds(millis); + let expected = millis as f64 / 1000.0; + + let event = make_event(timestamp); + sender + .send_event(event) + .await + .expect("Send should not fail"); + + let lag_times = Controller::get() + .expect("There must be a controller") + .capture_metrics() + .into_iter() + .filter(|metric| metric.name() == "source_lag_time_seconds") + .collect::>(); + assert_eq!(lag_times.len(), 1); + + let lag_time = &lag_times[0]; + match lag_time.value() { + MetricValue::AggregatedHistogram { + buckets, + count, + sum, + } => { + let mut done = false; + for bucket in buckets { + if !done && bucket.upper_limit >= expected { + assert_eq!(bucket.count, 1); + done = true; + } else { + assert_eq!(bucket.count, 0); + } + } + assert_eq!(*count, 1); + assert!( + (*sum - expected).abs() <= 0.002, + "Histogram sum does not match expected sum: {} vs {}", + *sum, + expected, + ); + } + _ => panic!("source_lag_time_seconds has invalid type"), + } +} + +#[tokio::test] +async fn emits_component_discarded_events_total_for_send_event() { + metrics::init_test(); + let (mut sender, _recv) = SourceSender::new_test_sender_with_buffer(1); + + let event = Event::Metric(Metric::new( + "name", + MetricKind::Absolute, + MetricValue::Gauge { value: 123.4 }, + )); + + // First send will succeed. + sender + .send_event(event.clone()) + .await + .expect("First send should not fail"); + + // Second send will timeout, so the future will not be polled to completion. + let res = timeout( + std::time::Duration::from_millis(100), + sender.send_event(event.clone()), + ) + .await; + assert!(res.is_err(), "Send should have timed out."); + + let component_discarded_events_total = Controller::get() + .expect("There must be a controller") + .capture_metrics() + .into_iter() + .filter(|metric| metric.name() == "component_discarded_events_total") + .collect::>(); + assert_eq!(component_discarded_events_total.len(), 1); + + let component_discarded_events_total = &component_discarded_events_total[0]; + let MetricValue::Counter { value } = component_discarded_events_total.value() else { + panic!("component_discarded_events_total has invalid type") + }; + assert_eq!(*value, 1.0); +} + +#[tokio::test] +async fn emits_component_discarded_events_total_for_send_batch() { + metrics::init_test(); + let (mut sender, _recv) = SourceSender::new_test_sender_with_buffer(1); + + let expected_drop = 100; + let events: Vec = (0..(CHUNK_SIZE + expected_drop)) + .map(|_| { + Event::Metric(Metric::new( + "name", + MetricKind::Absolute, + MetricValue::Gauge { value: 123.4 }, + )) + }) + .collect(); + + // `CHUNK_SIZE` events will be sent into buffer but then the future will not be polled to completion. + let res = timeout( + std::time::Duration::from_millis(100), + sender.send_batch(events), + ) + .await; + assert!(res.is_err(), "Send should have timed out."); + + let component_discarded_events_total = Controller::get() + .expect("There must be a controller") + .capture_metrics() + .into_iter() + .filter(|metric| metric.name() == "component_discarded_events_total") + .collect::>(); + assert_eq!(component_discarded_events_total.len(), 1); + + let component_discarded_events_total = &component_discarded_events_total[0]; + let MetricValue::Counter { value } = component_discarded_events_total.value() else { + panic!("component_discarded_events_total has invalid type") + }; + assert_eq!(*value, expected_drop as f64); +} From a70ae2ee9efff90b0e058a0bbe128da046655c90 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Fri, 7 Nov 2025 11:03:54 -0500 Subject: [PATCH 063/227] chore(ci): temporarily remove homebrew publish step from publish workflow (#24185) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This temporarily removes the publish-homebrew job from the publish workflow to address issue #24139. This is step 1 of the plan to fix the homebrew publishing process. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-authored-by: Claude --- .github/workflows/publish.yml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index a9a73373c9905..c1c50cc964902 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -835,15 +835,3 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-SHA256SUMS path: target/artifacts/vector-${{ env.VECTOR_VERSION }}-SHA256SUMS - - publish-homebrew: - name: Publish to Homebrew - # We only publish versioned releases to Homebrew. - if: ${{ inputs.channel == 'release' }} - uses: ./.github/workflows/publish-homebrew.yml - needs: - - generate-publish-metadata - - publish-s3 - with: - git_ref: ${{ inputs.git_ref }} - vector_version: ${{ needs.generate-publish-metadata.outputs.vector_version }} From f453b8b1179c3ce36c211d21cc246945365db36a Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Fri, 7 Nov 2025 12:03:24 -0600 Subject: [PATCH 064/227] chore(sources): Move `source_sender` into `vector-core` (#24186) --- Cargo.toml | 2 +- lib/vector-core/src/lib.rs | 1 + .../vector-core/src}/source_sender/builder.rs | 11 ++-- .../vector-core/src}/source_sender/errors.rs | 5 +- .../vector-core/src}/source_sender/mod.rs | 11 ++-- .../vector-core/src}/source_sender/output.rs | 41 ++++++++------- .../vector-core/src}/source_sender/sender.rs | 50 ++++++++++--------- .../vector-core/src}/source_sender/tests.rs | 11 ++-- lib/vector-lib/Cargo.toml | 6 +-- lib/vector-lib/src/lib.rs | 2 +- src/lib.rs | 6 +-- src/sources/aws_kinesis_firehose/errors.rs | 2 +- src/sources/aws_s3/sqs.rs | 4 +- src/sources/splunk_hec/mod.rs | 2 +- src/test_util/mock/mod.rs | 6 ++- src/test_util/mock/sources/basic.rs | 6 +-- src/topology/builder.rs | 2 +- src/topology/test/mod.rs | 2 +- 18 files changed, 95 insertions(+), 75 deletions(-) rename {src => lib/vector-core/src}/source_sender/builder.rs (89%) rename {src => lib/vector-core/src}/source_sender/errors.rs (92%) rename {src => lib/vector-core/src}/source_sender/mod.rs (62%) rename {src => lib/vector-core/src}/source_sender/output.rs (90%) rename {src => lib/vector-core/src}/source_sender/sender.rs (90%) rename {src => lib/vector-core/src}/source_sender/tests.rs (95%) diff --git a/Cargo.toml b/Cargo.toml index 42cef293793a7..cbeca874998f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -999,7 +999,7 @@ webhdfs-integration-tests = ["sinks-webhdfs"] disable-resolv-conf = [] shutdown-tests = ["api", "sinks-blackhole", "sinks-console", "sinks-prometheus", "sources", "transforms-lua", "transforms-remap", "unix"] cli-tests = ["sinks-blackhole", "sinks-socket", "sources-demo_logs", "sources-file"] -test-utils = [] +test-utils = ["vector-lib/test"] # End-to-End testing-related features all-e2e-tests = [ diff --git a/lib/vector-core/src/lib.rs b/lib/vector-core/src/lib.rs index ef63141d102ff..177b78f138a99 100644 --- a/lib/vector-core/src/lib.rs +++ b/lib/vector-core/src/lib.rs @@ -37,6 +37,7 @@ pub mod schema; pub mod serde; pub mod sink; pub mod source; +pub mod source_sender; pub mod tcp; #[cfg(test)] mod test_util; diff --git a/src/source_sender/builder.rs b/lib/vector-core/src/source_sender/builder.rs similarity index 89% rename from src/source_sender/builder.rs rename to lib/vector-core/src/source_sender/builder.rs index 2a865cbf0e159..e06d1fa87012b 100644 --- a/src/source_sender/builder.rs +++ b/lib/vector-core/src/source_sender/builder.rs @@ -1,13 +1,11 @@ use std::collections::HashMap; use metrics::{Histogram, histogram}; -use vector_lib::{ - buffers::topology::channel::LimitedReceiver, - config::{ComponentKey, OutputId, SourceOutput}, - internal_event::DEFAULT_OUTPUT, -}; +use vector_buffers::topology::channel::LimitedReceiver; +use vector_common::internal_event::DEFAULT_OUTPUT; use super::{CHUNK_SIZE, LAG_TIME_NAME, Output, SourceSender, SourceSenderItem}; +use crate::config::{ComponentKey, OutputId, SourceOutput}; pub struct Builder { buf_size: usize, @@ -28,7 +26,8 @@ impl Default for Builder { } impl Builder { - pub const fn with_buffer(mut self, n: usize) -> Self { + #[must_use] + pub fn with_buffer(mut self, n: usize) -> Self { self.buf_size = n; self } diff --git a/src/source_sender/errors.rs b/lib/vector-core/src/source_sender/errors.rs similarity index 92% rename from src/source_sender/errors.rs rename to lib/vector-core/src/source_sender/errors.rs index 54d6cd106189d..b0d9052c8747a 100644 --- a/src/source_sender/errors.rs +++ b/lib/vector-core/src/source_sender/errors.rs @@ -1,8 +1,9 @@ use std::fmt; use tokio::sync::mpsc; -use vector_lib::buffers::topology::channel::SendError; -use vector_lib::event::{Event, EventArray}; +use vector_buffers::topology::channel::SendError; + +use crate::event::{Event, EventArray}; #[derive(Clone, Debug)] pub struct ClosedError; diff --git a/src/source_sender/mod.rs b/lib/vector-core/src/source_sender/mod.rs similarity index 62% rename from src/source_sender/mod.rs rename to lib/vector-core/src/source_sender/mod.rs index 16d2ca4792103..b6615c106b608 100644 --- a/src/source_sender/mod.rs +++ b/lib/vector-core/src/source_sender/mod.rs @@ -1,4 +1,9 @@ -#![allow(missing_docs)] +#![allow( + missing_docs, + clippy::missing_errors_doc, + clippy::doc_markdown, + clippy::missing_panics_doc +)] mod builder; mod errors; @@ -12,9 +17,9 @@ pub use errors::{ClosedError, StreamSendError}; use output::Output; pub use sender::{SourceSender, SourceSenderItem}; -pub(crate) const CHUNK_SIZE: usize = 1000; +pub const CHUNK_SIZE: usize = 1000; -#[cfg(any(test, feature = "test-utils"))] +#[cfg(any(test, feature = "test"))] const TEST_BUFFER_SIZE: usize = 100; const LAG_TIME_NAME: &str = "source_lag_time_seconds"; diff --git a/src/source_sender/output.rs b/lib/vector-core/src/source_sender/output.rs similarity index 90% rename from src/source_sender/output.rs rename to lib/vector-core/src/source_sender/output.rs index 02854e5d1ea6a..20a122d7f485f 100644 --- a/src/source_sender/output.rs +++ b/lib/vector-core/src/source_sender/output.rs @@ -4,21 +4,26 @@ use chrono::Utc; use futures::{Stream, StreamExt as _}; use metrics::Histogram; use tracing::Span; -use vector_lib::{ - ByteSizeOf, EstimatedJsonEncodedSizeOf as _, - buffers::config::MemoryBufferSize, - buffers::topology::channel::{self, LimitedReceiver, LimitedSender}, - config::{OutputId, log_schema}, - event::{Event, EventArray, EventContainer as _, EventRef, array}, +use vector_buffers::{ + config::MemoryBufferSize, + topology::channel::{self, LimitedReceiver, LimitedSender}, +}; +use vector_common::{ + byte_size_of::ByteSizeOf, internal_event::{ self, ComponentEventsDropped, CountByteSize, EventsSent, InternalEventHandle as _, Registered, UNINTENTIONAL, }, - schema::Definition, }; use vrl::value::Value; use super::{CHUNK_SIZE, ClosedError, SourceSenderItem}; +use crate::{ + EstimatedJsonEncodedSizeOf, + config::{OutputId, log_schema}, + event::{Event, EventArray, EventContainer as _, EventRef, array}, + schema::Definition, +}; /// UnsentEvents tracks the number of events yet to be sent in the buffer. This is used to /// increment the appropriate counters when a future is not polled to completion. Particularly, @@ -53,9 +58,9 @@ impl Drop for UnsentEventCount { fn drop(&mut self) { if self.count > 0 { let _enter = self.span.enter(); - emit!(ComponentEventsDropped:: { + internal_event::emit(ComponentEventsDropped:: { count: self.count, - reason: "Source send cancelled." + reason: "Source send cancelled.", }); } } @@ -70,14 +75,15 @@ pub(super) struct Output { log_definition: Option>, /// The OutputId related to this source sender. This is set as the `upstream_id` in /// `EventMetadata` for all event sent through here. - output_id: Arc, + id: Arc, } +#[expect(clippy::missing_fields_in_debug)] impl fmt::Debug for Output { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Output") .field("sender", &self.sender) - .field("output_id", &self.output_id) + .field("output_id", &self.id) // `metrics::Histogram` is missing `impl Debug` .finish() } @@ -96,11 +102,11 @@ impl Output { Self { sender: tx, lag_time, - events_sent: register!(EventsSent::from(internal_event::Output(Some( - output.into() - )))), + events_sent: internal_event::register(EventsSent::from(internal_event::Output( + Some(output.into()), + ))), log_definition, - output_id: Arc::new(output_id), + id: Arc::new(output_id), }, rx, ) @@ -122,9 +128,7 @@ impl Output { if let Some(log_definition) = &self.log_definition { event.metadata_mut().set_schema_definition(log_definition); } - event - .metadata_mut() - .set_upstream_id(Arc::clone(&self.output_id)); + event.metadata_mut().set_upstream_id(Arc::clone(&self.id)); }); let byte_size = events.estimated_json_encoded_size_of(); @@ -215,6 +219,7 @@ impl Output { if let Some(timestamp) = timestamp { // This will truncate precision for values larger than 2**52, but at that point the user // probably has much larger problems than precision. + #[expect(clippy::cast_precision_loss)] let lag_time = (reference - timestamp) as f64 / 1000.0; lag_time_metric.record(lag_time); } diff --git a/src/source_sender/sender.rs b/lib/vector-core/src/source_sender/sender.rs similarity index 90% rename from src/source_sender/sender.rs rename to lib/vector-core/src/source_sender/sender.rs index 8a5a27f9dbbf0..88d6f13f97b45 100644 --- a/src/source_sender/sender.rs +++ b/lib/vector-core/src/source_sender/sender.rs @@ -1,31 +1,35 @@ -use std::collections::HashMap; -use std::time::Instant; +use std::{collections::HashMap, time::Instant}; use futures::Stream; -#[cfg(any(test, feature = "test-utils"))] +#[cfg(any(test, feature = "test"))] use futures::StreamExt as _; -#[cfg(any(test, feature = "test-utils"))] +#[cfg(any(test, feature = "test"))] use metrics::histogram; +use vector_buffers::EventCount; +#[cfg(any(test, feature = "test"))] +use vector_buffers::topology::channel::LimitedReceiver; +#[cfg(any(test, feature = "test"))] +use vector_common::internal_event::DEFAULT_OUTPUT; #[cfg(doc)] -use vector_lib::internal_event::{ComponentEventsDropped, EventsSent}; -use vector_lib::{ - ByteSizeOf, EstimatedJsonEncodedSizeOf, - buffers::EventCount, - event::{Event, EventArray, EventContainer, array::EventArrayIntoIter}, +use vector_common::internal_event::{ComponentEventsDropped, EventsSent}; +use vector_common::{ + byte_size_of::ByteSizeOf, finalization::{AddBatchNotifier, BatchNotifier}, json_size::JsonSize, }; -#[cfg(any(test, feature = "test-utils"))] -use vector_lib::{buffers::topology::channel::LimitedReceiver, internal_event::DEFAULT_OUTPUT}; -#[cfg(any(test, feature = "test-utils"))] -use vector_lib::{ - config::OutputId, - event::{EventStatus, into_event_stream}, -}; use super::{Builder, ClosedError, Output}; -#[cfg(any(test, feature = "test-utils"))] +#[cfg(any(test, feature = "test"))] use super::{LAG_TIME_NAME, TEST_BUFFER_SIZE}; +use crate::{ + EstimatedJsonEncodedSizeOf, + event::{Event, EventArray, EventContainer, array::EventArrayIntoIter}, +}; +#[cfg(any(test, feature = "test"))] +use crate::{ + config::OutputId, + event::{EventStatus, into_event_stream}, +}; /// SourceSenderItem is a thin wrapper around [EventArray] used to track the send duration of a batch. /// @@ -43,7 +47,7 @@ pub struct SourceSenderItem { impl AddBatchNotifier for SourceSenderItem { fn add_batch_notifier(&mut self, notifier: BatchNotifier) { - self.events.add_batch_notifier(notifier) + self.events.add_batch_notifier(notifier); } } @@ -96,7 +100,7 @@ impl SourceSender { Builder::default() } - #[cfg(any(test, feature = "test-utils"))] + #[cfg(any(test, feature = "test"))] pub fn new_test_sender_with_buffer(n: usize) -> (Self, LimitedReceiver) { let lag_time = Some(histogram!(LAG_TIME_NAME)); let output_id = OutputId { @@ -114,14 +118,14 @@ impl SourceSender { ) } - #[cfg(any(test, feature = "test-utils"))] + #[cfg(any(test, feature = "test"))] pub fn new_test() -> (Self, impl Stream + Unpin) { let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); let recv = recv.into_stream().flat_map(into_event_stream); (pipe, recv) } - #[cfg(any(test, feature = "test-utils"))] + #[cfg(any(test, feature = "test"))] pub fn new_test_finalize(status: EventStatus) -> (Self, impl Stream + Unpin) { let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); // In a source test pipeline, there is no sink to acknowledge @@ -138,7 +142,7 @@ impl SourceSender { (pipe, recv) } - #[cfg(any(test, feature = "test-utils"))] + #[cfg(any(test, feature = "test"))] pub fn new_test_errors( error_at: impl Fn(usize) -> bool, ) -> (Self, impl Stream + Unpin) { @@ -164,7 +168,7 @@ impl SourceSender { (pipe, recv) } - #[cfg(any(test, feature = "test-utils"))] + #[cfg(any(test, feature = "test"))] pub fn add_outputs( &mut self, status: EventStatus, diff --git a/src/source_sender/tests.rs b/lib/vector-core/src/source_sender/tests.rs similarity index 95% rename from src/source_sender/tests.rs rename to lib/vector-core/src/source_sender/tests.rs index 42e085647de13..a659c196a3a3e 100644 --- a/src/source_sender/tests.rs +++ b/lib/vector-core/src/source_sender/tests.rs @@ -1,11 +1,13 @@ use chrono::{DateTime, Duration, Utc}; use rand::{Rng, rng}; use tokio::time::timeout; -use vector_lib::event::{Event, LogEvent, Metric, MetricKind, MetricValue, TraceEvent}; -use vector_lib::metrics::{self, Controller}; use vrl::event_path; use super::*; +use crate::{ + event::{Event, LogEvent, Metric, MetricKind, MetricValue, TraceEvent}, + metrics::{self, Controller}, +}; #[tokio::test] async fn emits_lag_time_for_log() { @@ -47,6 +49,7 @@ async fn emit_and_test(make_event: impl FnOnce(DateTime) -> Event) { let (mut sender, _stream) = SourceSender::new_test(); let millis = rng().random_range(10..10000); let timestamp = Utc::now() - Duration::milliseconds(millis); + #[expect(clippy::cast_precision_loss)] let expected = millis as f64 / 1000.0; let event = make_event(timestamp); @@ -132,6 +135,7 @@ async fn emits_component_discarded_events_total_for_send_event() { } #[tokio::test] +#[expect(clippy::cast_precision_loss)] async fn emits_component_discarded_events_total_for_send_batch() { metrics::init_test(); let (mut sender, _recv) = SourceSender::new_test_sender_with_buffer(1); @@ -167,5 +171,6 @@ async fn emits_component_discarded_events_total_for_send_batch() { let MetricValue::Counter { value } = component_discarded_events_total.value() else { panic!("component_discarded_events_total has invalid type") }; - assert_eq!(*value, expected_drop as f64); + + assert_eq!(*value, expected_drop as f64,); } diff --git a/lib/vector-lib/Cargo.toml b/lib/vector-lib/Cargo.toml index 3ba208e62c5ad..7088a79978d60 100644 --- a/lib/vector-lib/Cargo.toml +++ b/lib/vector-lib/Cargo.toml @@ -24,14 +24,14 @@ vector-top = { path = "../vector-top", optional = true } vrl = { workspace = true, optional = true } [features] -api = ["vector-tap/api"] +allocation-tracing = ["vector-top?/allocation-tracing"] api-client = ["dep:vector-api-client"] -lua = ["vector-core/lua"] +api = ["vector-tap/api"] file-source = ["dep:file-source", "dep:file-source-common"] +lua = ["vector-core/lua"] opentelemetry = ["dep:opentelemetry-proto", "codecs/opentelemetry"] prometheus = ["dep:prometheus-parser"] proptest = ["vector-lookup/proptest", "vrl/proptest"] syslog = ["codecs/syslog"] test = ["vector-core/test"] vrl = ["vector-core/vrl", "dep:vrl"] -allocation-tracing = ["vector-top?/allocation-tracing"] diff --git a/lib/vector-lib/src/lib.rs b/lib/vector-lib/src/lib.rs index 894fde48c1fb7..5d209ba88253c 100644 --- a/lib/vector-lib/src/lib.rs +++ b/lib/vector-lib/src/lib.rs @@ -22,7 +22,7 @@ pub use vector_core::compile_vrl; pub use vector_core::{ EstimatedJsonEncodedSizeOf, buckets, default_data_dir, emit, event, fanout, ipallowlist, metric_tags, metrics, partition, quantiles, register, samples, schema, serde, sink, source, - tcp, tls, transform, + source_sender, tcp, tls, transform, }; pub use vector_lookup as lookup; pub use vector_stream as stream; diff --git a/src/lib.rs b/src/lib.rs index 6ed250658b65f..0f2dbfb5fa728 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -105,7 +105,6 @@ pub mod signal; pub(crate) mod sink_ext; #[allow(unreachable_pub)] pub mod sinks; -pub mod source_sender; #[allow(unreachable_pub)] pub mod sources; pub mod stats; @@ -128,8 +127,9 @@ pub mod validate; #[cfg(windows)] pub mod vector_windows; -pub use source_sender::SourceSender; -pub use vector_lib::{Error, Result, event, metrics, schema, shutdown, tcp, tls}; +pub use vector_lib::{ + Error, Result, event, metrics, schema, shutdown, source_sender::SourceSender, tcp, tls, +}; static APP_NAME_SLUG: std::sync::OnceLock = std::sync::OnceLock::new(); static USE_COLOR: std::sync::OnceLock = std::sync::OnceLock::new(); diff --git a/src/sources/aws_kinesis_firehose/errors.rs b/src/sources/aws_kinesis_firehose/errors.rs index 0aaa518244414..baf8463e5528b 100644 --- a/src/sources/aws_kinesis_firehose/errors.rs +++ b/src/sources/aws_kinesis_firehose/errors.rs @@ -41,7 +41,7 @@ pub enum RequestError { source ))] ShuttingDown { - source: crate::source_sender::ClosedError, + source: vector_lib::source_sender::ClosedError, request_id: String, }, #[snafu(display("Unsupported encoding: {}", encoding))] diff --git a/src/sources/aws_s3/sqs.rs b/src/sources/aws_s3/sqs.rs index fc697fd726b37..a7d972bef167a 100644 --- a/src/sources/aws_s3/sqs.rs +++ b/src/sources/aws_s3/sqs.rs @@ -232,7 +232,7 @@ pub enum ProcessingError { }, #[snafu(display("Failed to flush all of s3://{}/{}: {}", bucket, key, source))] PipelineSend { - source: crate::source_sender::ClosedError, + source: vector_lib::source_sender::ClosedError, bucket: String, key: String, }, @@ -786,7 +786,7 @@ impl IngestorProcess { Err(_) => { let (count, _) = stream.size_hint(); emit!(StreamClosedError { count }); - Some(crate::source_sender::ClosedError) + Some(vector_lib::source_sender::ClosedError) } }; diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index d6cb758063536..6a1a6646d0d8a 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -31,6 +31,7 @@ use vector_lib::{ lookup::{self, event_path, lookup_v2::OptionalValuePath, owned_value_path}, schema::meaning, sensitive_string::SensitiveString, + source_sender::ClosedError, tls::MaybeTlsIncomingStream, }; use vrl::{ @@ -62,7 +63,6 @@ use crate::{ EventsReceived, HttpBytesReceived, SplunkHecRequestBodyInvalidError, SplunkHecRequestError, }, serde::bool_or_struct, - source_sender::ClosedError, tls::{MaybeTlsSettings, TlsEnableableConfig}, }; diff --git a/src/test_util/mock/mod.rs b/src/test_util/mock/mod.rs index 8b60e213a965b..77a3c861c547e 100644 --- a/src/test_util/mock/mod.rs +++ b/src/test_util/mock/mod.rs @@ -3,7 +3,10 @@ use std::sync::{Arc, atomic::AtomicUsize}; use futures_util::Stream; use stream_cancel::Trigger; use tokio::sync::oneshot::Sender; -use vector_lib::event::EventArray; +use vector_lib::{ + event::EventArray, + source_sender::{SourceSender, SourceSenderItem}, +}; use self::{ sinks::{ @@ -16,7 +19,6 @@ use self::{ }, transforms::{BasicTransformConfig, ErrorDefinitionTransformConfig}, }; -use crate::{SourceSender, source_sender::SourceSenderItem}; pub mod sinks; pub mod sources; diff --git a/src/test_util/mock/sources/basic.rs b/src/test_util/mock/sources/basic.rs index d083cad3ac3a8..5ecc3db219446 100644 --- a/src/test_util/mock/sources/basic.rs +++ b/src/test_util/mock/sources/basic.rs @@ -17,13 +17,11 @@ use vector_lib::{ event::EventContainer, schema::Definition, source::Source, -}; - -use crate::{ - config::{SourceConfig, SourceContext}, source_sender::SourceSenderItem, }; +use crate::config::{SourceConfig, SourceContext}; + /// Configuration for the `test_basic` source. #[configurable_component(source("test_basic", "Test (basic)."))] #[derive(Clone, Debug)] diff --git a/src/topology/builder.rs b/src/topology/builder.rs index 7f3f0097aeb4d..797b0bf3554f7 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -28,6 +28,7 @@ use vector_lib::{ config::LogNamespace, internal_event::{self, CountByteSize, EventsSent, InternalEventHandle as _, Registered}, schema::Definition, + source_sender::{CHUNK_SIZE, SourceSenderItem}, transform::update_runtime_schema_definition, }; @@ -47,7 +48,6 @@ use crate::{ extra_context::ExtraContext, internal_events::EventsReceived, shutdown::SourceShutdownCoordinator, - source_sender::{CHUNK_SIZE, SourceSenderItem}, spawn_named, topology::task::TaskError, transforms::{SyncTransform, TaskTransform, Transform, TransformOutputs, TransformOutputsBuf}, diff --git a/src/topology/test/mod.rs b/src/topology/test/mod.rs index bb418bfddb381..cc9932279f950 100644 --- a/src/topology/test/mod.rs +++ b/src/topology/test/mod.rs @@ -14,13 +14,13 @@ use tokio::{ use vector_lib::{ buffers::{BufferConfig, BufferType, WhenFull}, config::{ComponentKey, OutputId}, + source_sender::SourceSenderItem, }; use crate::{ config::{Config, ConfigDiff, SinkOuter}, event::{Event, EventArray, EventContainer, LogEvent, into_event_stream}, schema::Definition, - source_sender::SourceSenderItem, test_util::{ mock::{ basic_sink, basic_sink_failing_healthcheck, basic_sink_with_data, basic_source, From 1e3f38736ee4b3ef592fc0efd4adbb02bcad138b Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Fri, 7 Nov 2025 13:27:56 -0500 Subject: [PATCH 065/227] chore(website): add log verbosity section to the debugging guide (#24187) --- website/content/en/guides/developer/debugging.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/website/content/en/guides/developer/debugging.md b/website/content/en/guides/developer/debugging.md index 1282d2677af47..4e84514264e02 100644 --- a/website/content/en/guides/developer/debugging.md +++ b/website/content/en/guides/developer/debugging.md @@ -21,6 +21,22 @@ In the following sections we will examine the tools we have at our disposal. debugging-meme +### Controlling Log Verbosity + +Use the `VECTOR_LOG` environment variable to control log verbosity: + +```shell +VECTOR_LOG=debug vector --config path/to/config.yaml +``` + +You can set different verbosity levels for specific components: + +```shell +VECTOR_LOG=info,vector::sources::aws_s3=warn vector --config path/to/config.yaml +``` + +You can find more information on the syntax [here](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#usage-notes). + ### Vector Tools #### Vector top From a488105710032b593051496bad9dc8df5e8cce6c Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Fri, 7 Nov 2025 13:52:13 -0500 Subject: [PATCH 066/227] fix(blackhole sink): disable rate limiting for periodic stats messages (#24190) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(blackhole sink): disable rate limiting for periodic stats messages The blackhole sink's informational "Collected events" messages were being rate-limited since rate limiting was enabled by default in #24024. This is undesirable because: 1. These are deliberately scheduled periodic messages (controlled by the `print_interval_secs` config option), not error conditions that could flood the logs 2. Users explicitly configure the frequency - rate limiting defeats that explicit configuration and breaks user expectations 3. The interval timer already prevents log flooding, making additional rate limiting redundant 4. The blackhole sink is used for debugging/testing, where predictable output is essential This fix adds `internal_log_rate_limit = false` to both info! calls, similar to how the console sink disables rate limiting for its critical operational messages. Fixes #24188 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * chore: add changelog fragment for blackhole sink rate limiting fix --------- Co-authored-by: Claude --- changelog.d/24188_blackhole_sink_rate_limiting.fix.md | 3 +++ src/sinks/blackhole/sink.rs | 2 ++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/24188_blackhole_sink_rate_limiting.fix.md diff --git a/changelog.d/24188_blackhole_sink_rate_limiting.fix.md b/changelog.d/24188_blackhole_sink_rate_limiting.fix.md new file mode 100644 index 0000000000000..9761dc54bd7a8 --- /dev/null +++ b/changelog.d/24188_blackhole_sink_rate_limiting.fix.md @@ -0,0 +1,3 @@ +The `blackhole` sink's periodic statistics messages (controlled by `print_interval_secs`) are no longer incorrectly suppressed by rate limiting. These informational messages now appear at the user-configured interval as expected. + +authors: pront diff --git a/src/sinks/blackhole/sink.rs b/src/sinks/blackhole/sink.rs index f400518332e42..8c934101545eb 100644 --- a/src/sinks/blackhole/sink.rs +++ b/src/sinks/blackhole/sink.rs @@ -65,6 +65,7 @@ impl StreamSink for BlackholeSink { info!( events = total_events.load(Ordering::Relaxed), raw_bytes_collected = total_raw_bytes.load(Ordering::Relaxed), + internal_log_rate_limit = false, "Collected events." ); }, @@ -75,6 +76,7 @@ impl StreamSink for BlackholeSink { info!( events = total_events.load(Ordering::Relaxed), raw_bytes_collected = total_raw_bytes.load(Ordering::Relaxed), + internal_log_rate_limit = false, "Collected events." ); }); From affe70362c0b38bcd529040ddfe01b7d2f80c836 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 10 Nov 2025 10:17:52 -0500 Subject: [PATCH 067/227] chore(internal logs): Disable rate limiting for critical internal error logs (#24192) * Improve debug log in aws_ecs_metrics * Add internal_log_rate_limit = false to config reload errors * Disable rate limiting for lua build failure * Disable rate limiting for amqp build failure * Add internal_log_rate_limit = false to important failures during reload * Disable log rate limit for repeated events in process.rs * Update src/config/watcher.rs * Fix message format --- src/config/watcher.rs | 39 +++++++++++++++++++------- src/internal_events/aws_ecs_metrics.rs | 3 +- src/internal_events/config.rs | 2 ++ src/internal_events/lua.rs | 1 + src/internal_events/process.rs | 10 +++++-- src/sinks/amqp/config.rs | 1 + 6 files changed, 42 insertions(+), 14 deletions(-) diff --git a/src/config/watcher.rs b/src/config/watcher.rs index 5f02b9d4128b3..1a4bc2f0cc734 100644 --- a/src/config/watcher.rs +++ b/src/config/watcher.rs @@ -147,19 +147,38 @@ pub fn spawn_thread<'a>( .all(|(_, t)| *t == ComponentType::EnrichmentTable) { info!("Only enrichment tables have changed."); - _ = signal_tx.send(crate::signal::SignalTo::ReloadEnrichmentTables).map_err(|error| { - error!(message = "Unable to reload enrichment tables.", cause = %error) - }); + _ = signal_tx + .send(crate::signal::SignalTo::ReloadEnrichmentTables) + .map_err(|error| { + error!( + message = "Unable to reload enrichment tables.", + cause = %error, + internal_log_rate_limit = false, + ) + }); } else { - _ = signal_tx.send(crate::signal::SignalTo::ReloadComponents(changed_components.into_keys().collect())).map_err(|error| { - error!(message = "Unable to reload component configuration. Restart Vector to reload it.", cause = %error) - }); + _ = signal_tx + .send(crate::signal::SignalTo::ReloadComponents( + changed_components.into_keys().collect(), + )) + .map_err(|error| { + error!( + message = "Unable to reload component configuration. Restart Vector to reload it.", + cause = %error, + internal_log_rate_limit = false, + ) + }); } } else { - _ = signal_tx.send(crate::signal::SignalTo::ReloadFromDisk) - .map_err(|error| { - error!(message = "Unable to reload configuration file. Restart Vector to reload it.", cause = %error) - }); + _ = signal_tx + .send(crate::signal::SignalTo::ReloadFromDisk) + .map_err(|error| { + error!( + message = "Unable to reload configuration file. Restart Vector to reload it.", + cause = %error, + internal_log_rate_limit = false, + ) + }); } } else { debug!(message = "Ignoring event.", event = ?event) diff --git a/src/internal_events/aws_ecs_metrics.rs b/src/internal_events/aws_ecs_metrics.rs index eba1fa145e0a8..4589a3eda70d2 100644 --- a/src/internal_events/aws_ecs_metrics.rs +++ b/src/internal_events/aws_ecs_metrics.rs @@ -52,8 +52,9 @@ impl InternalEvent for AwsEcsMetricsParseError<'_> { error_type = error_type::PARSER_FAILED, ); debug!( - message = %format!("Failed to parse response:\\n\\n{}\\n\\n", self.body.escape_debug()), + response = %self.body.escape_debug(), endpoint = %self.endpoint, + "Failed to parse response.", ); counter!("parse_errors_total").increment(1); counter!( diff --git a/src/internal_events/config.rs b/src/internal_events/config.rs index c8fb994c85c94..7c9e80dc87788 100644 --- a/src/internal_events/config.rs +++ b/src/internal_events/config.rs @@ -14,6 +14,7 @@ impl InternalEvent for ConfigReloadRejected { message = "Config reload rejected due to non-reloadable global options.", reason = %self.reason.as_str(), changed_fields = %fields.join(", "), + internal_log_rate_limit = false, ); counter!( @@ -27,6 +28,7 @@ impl InternalEvent for ConfigReloadRejected { message = "Config reload rejected due to failed to compute global diff.", reason = %self.reason.as_str(), error = %err, + internal_log_rate_limit = false, ); counter!( diff --git a/src/internal_events/lua.rs b/src/internal_events/lua.rs index 9d202112fea5b..1770761fda489 100644 --- a/src/internal_events/lua.rs +++ b/src/internal_events/lua.rs @@ -58,6 +58,7 @@ impl InternalEvent for LuaBuildError { error_type = error_type::SCRIPT_FAILED, error_code = lua_build_error_code(&self.error), stage = error_stage::PROCESSING, + internal_log_rate_limit = false, ); counter!( "component_errors_total", diff --git a/src/internal_events/process.rs b/src/internal_events/process.rs index 3d5f3100e6f9d..ce8fbd063f438 100644 --- a/src/internal_events/process.rs +++ b/src/internal_events/process.rs @@ -30,7 +30,8 @@ impl InternalEvent for VectorReloaded<'_> { info!( target: "vector", message = "Vector has reloaded.", - path = ?self.config_paths + path = ?self.config_paths, + internal_log_rate_limit = false, ); counter!("reloaded_total").increment(1); } @@ -43,7 +44,7 @@ impl InternalEvent for VectorStopped { fn emit(self) { info!( target: "vector", - message = "Vector has stopped." + message = "Vector has stopped.", ); counter!("stopped_total").increment(1); } @@ -56,7 +57,7 @@ impl InternalEvent for VectorQuit { fn emit(self) { info!( target: "vector", - message = "Vector has quit." + message = "Vector has quit.", ); counter!("quit_total").increment(1); } @@ -72,6 +73,7 @@ impl InternalEvent for VectorReloadError { error_code = "reload", error_type = error_type::CONFIGURATION_FAILED, stage = error_stage::PROCESSING, + internal_log_rate_limit = false, ); counter!( "component_errors_total", @@ -93,6 +95,7 @@ impl InternalEvent for VectorConfigLoadError { error_code = "config_load", error_type = error_type::CONFIGURATION_FAILED, stage = error_stage::PROCESSING, + internal_log_rate_limit = false, ); counter!( "component_errors_total", @@ -114,6 +117,7 @@ impl InternalEvent for VectorRecoveryError { error_code = "recovery", error_type = error_type::CONFIGURATION_FAILED, stage = error_stage::PROCESSING, + internal_log_rate_limit = false, ); counter!( "component_errors_total", diff --git a/src/sinks/amqp/config.rs b/src/sinks/amqp/config.rs index 9e0a2fb273321..8d7442cb37e70 100644 --- a/src/sinks/amqp/config.rs +++ b/src/sinks/amqp/config.rs @@ -45,6 +45,7 @@ impl AmqpPropertiesConfig { error = %error, error_type = error_type::TEMPLATE_FAILED, stage = error_stage::PROCESSING, + internal_log_rate_limit = false, ); Default::default() }); From 35ad95ad00ec2df31df832f1a68c6a924be113c8 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Mon, 10 Nov 2025 13:10:54 -0500 Subject: [PATCH 068/227] fix(tracing): prevent panic for traces without standard fields (#24191) * fix(tracing): prevent panic for traces without std fields * forbid unwrap in this lib * clippy fix * improve events_with_custom_fields_no_message_dont_panic, advance to next window --- changelog.d/config_reload_panic_fix.fix.md | 4 ++ clippy.toml | 1 + lib/tracing-limit/Cargo.toml | 3 ++ lib/tracing-limit/benches/limit.rs | 6 +-- lib/tracing-limit/src/lib.rs | 50 +++++++++++++++++++--- 5 files changed, 56 insertions(+), 8 deletions(-) create mode 100644 changelog.d/config_reload_panic_fix.fix.md diff --git a/changelog.d/config_reload_panic_fix.fix.md b/changelog.d/config_reload_panic_fix.fix.md new file mode 100644 index 0000000000000..1a39a8af479f1 --- /dev/null +++ b/changelog.d/config_reload_panic_fix.fix.md @@ -0,0 +1,4 @@ +Fixed a panic in the tracing rate limiter when config reload failed. While the panic didn't kill Vector (it was caught by tokio's task +runtime), it could cause unexpected behavior. The rate limiter now gracefully handles events without standard message fields. + +authors: pront diff --git a/clippy.toml b/clippy.toml index 20274f646b1d7..0aeae804db3c7 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,4 +1,5 @@ large-error-threshold = 256 # in bytes +allow-unwrap-in-tests = true # for `disallowed_method`: # https://rust-lang.github.io/rust-clippy/master/index.html#disallowed_method diff --git a/lib/tracing-limit/Cargo.toml b/lib/tracing-limit/Cargo.toml index 69311622ab120..77d3d54ef4371 100644 --- a/lib/tracing-limit/Cargo.toml +++ b/lib/tracing-limit/Cargo.toml @@ -6,6 +6,9 @@ edition = "2024" publish = false license = "MPL-2.0" +[lints.clippy] +unwrap-used = "forbid" + [dependencies] tracing-core = { version = "0.1", default-features = false } tracing-subscriber = { workspace = true, features = ["registry", "std"] } diff --git a/lib/tracing-limit/benches/limit.rs b/lib/tracing-limit/benches/limit.rs index 4f959a540d80d..25d905bc2ae85 100644 --- a/lib/tracing-limit/benches/limit.rs +++ b/lib/tracing-limit/benches/limit.rs @@ -114,17 +114,17 @@ where } fn on_new_span(&self, span: &span::Attributes<'_>, _id: &span::Id, _ctx: Context<'_, S>) { - let mut visitor = Visitor(self.mutex.lock().unwrap()); + let mut visitor = Visitor(self.mutex.lock().expect("mutex should not be poisoned")); span.record(&mut visitor); } fn on_record(&self, _id: &span::Id, values: &span::Record<'_>, _ctx: Context<'_, S>) { - let mut visitor = Visitor(self.mutex.lock().unwrap()); + let mut visitor = Visitor(self.mutex.lock().expect("mutex should not be poisoned")); values.record(&mut visitor); } fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) { - let mut visitor = Visitor(self.mutex.lock().unwrap()); + let mut visitor = Visitor(self.mutex.lock().expect("mutex should not be poisoned")); event.record(&mut visitor); } diff --git a/lib/tracing-limit/src/lib.rs b/lib/tracing-limit/src/lib.rs index 62113f36ce7cc..ebb7398dfc7ef 100644 --- a/lib/tracing-limit/src/lib.rs +++ b/lib/tracing-limit/src/lib.rs @@ -377,15 +377,17 @@ where let valueset = fields.value_set(&values); let event = Event::new(metadata, &valueset); self.inner.on_event(&event, ctx.clone()); - } else { - let values = [( - &fields.field(RATE_LIMIT_FIELD).unwrap(), - Some(&rate_limit as &dyn Value), - )]; + } else if let Some(rate_limit_field) = fields.field(RATE_LIMIT_FIELD) { + let values = [(&rate_limit_field, Some(&rate_limit as &dyn Value))]; let valueset = fields.value_set(&values); let event = Event::new(metadata, &valueset); self.inner.on_event(&event, ctx.clone()); + } else { + // If the event metadata has neither a "message" nor "internal_log_rate_limit" field, + // we cannot create a proper synthetic event. This can happen with custom debug events + // that have their own field structure. In this case, we simply skip emitting the + // rate limit notification rather than panicking. } } } @@ -1012,4 +1014,42 @@ mod test { ] ); } + + #[test] + #[serial] + fn events_with_custom_fields_no_message_dont_panic() { + // Verify events without "message" or "internal_log_rate_limit" fields don't panic + // when rate limiting skips suppression notifications. + let (events, sub) = setup_test(1); + tracing::subscriber::with_default(sub, || { + // Use closure to ensure all events share the same callsite + let emit_event = || { + debug!(component_id = "test_component", utilization = 0.85); + }; + + // First window: emit 5 events, only the first one should be logged + for _ in 0..5 { + emit_event(); + MockClock::advance(Duration::from_millis(100)); + } + + // Advance to the next window + MockClock::advance(Duration::from_millis(1000)); + + // Second window: this event should be logged + emit_event(); + }); + + let events = events.lock().unwrap(); + + // First event from window 1, first event from window 2 + // Suppression notifications are skipped (no message field) + assert_eq!( + *events, + vec![ + event!("", component_id: "test_component", utilization: "0.85"), + event!("", component_id: "test_component", utilization: "0.85"), + ] + ); + } } From 0e0861a4c2a5b8498aabe7d834ba4fd65b40e63e Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Mon, 10 Nov 2025 14:18:14 -0500 Subject: [PATCH 069/227] chore(tracing): do not rate limit utlization report (#24202) --- src/utilization.rs | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/src/utilization.rs b/src/utilization.rs index e0212dc5b4d4e..8874f0537f583 100644 --- a/src/utilization.rs +++ b/src/utilization.rs @@ -76,8 +76,6 @@ pub(crate) struct Timer { ewma: stats::Ewma, gauge: Gauge, #[cfg(debug_assertions)] - report_count: u32, - #[cfg(debug_assertions)] component_id: Arc, } @@ -99,8 +97,6 @@ impl Timer { ewma: stats::Ewma::new(0.9), gauge, #[cfg(debug_assertions)] - report_count: 0, - #[cfg(debug_assertions)] component_id, } } @@ -147,17 +143,7 @@ impl Timer { self.total_wait = Duration::new(0, 0); #[cfg(debug_assertions)] - self.report(avg_rounded); - } - - #[cfg(debug_assertions)] - fn report(&mut self, utilization: f64) { - // Note that changing the reporting interval would also affect the actual metric reporting frequency. - // This check reduces debug log spamming. - if self.report_count.is_multiple_of(5) { - debug!(component_id = %self.component_id, %utilization); - } - self.report_count = self.report_count.wrapping_add(1); + debug!(component_id = %self.component_id, utilization = %avg_rounded, internal_log_rate_limit = false); } fn end_span(&mut self, at: Instant) { From f90e7b51b378c2a65bc7d1f91c3c3f4eead75477 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 11 Nov 2025 10:01:07 -0500 Subject: [PATCH 070/227] chore(internal metrics): move config_reload_* metrics to VectorReload* (#24203) * chore(internal metrics): move config_reload_* metrics to VectorReloaded and VectorReloadedError * update topology_doesnt_reload_new_data_dir test * chore(dev): cargo fmt * strengthen tests, since now we have a better interface --- .../unify_reload_metrics.enhancement.md | 16 +++ src/internal_events/config.rs | 90 ------------- src/internal_events/mod.rs | 1 - src/internal_events/process.rs | 6 +- src/topology/controller.rs | 39 +++++- src/topology/mod.rs | 2 +- src/topology/running.rs | 49 +++---- src/topology/test/doesnt_reload.rs | 9 +- src/topology/test/mod.rs | 124 +++++++----------- src/topology/test/reload.rs | 73 +++++------ src/topology/test/transient_state.rs | 40 +++--- 11 files changed, 184 insertions(+), 265 deletions(-) create mode 100644 changelog.d/unify_reload_metrics.enhancement.md delete mode 100644 src/internal_events/config.rs diff --git a/changelog.d/unify_reload_metrics.enhancement.md b/changelog.d/unify_reload_metrics.enhancement.md new file mode 100644 index 0000000000000..7910424e59be8 --- /dev/null +++ b/changelog.d/unify_reload_metrics.enhancement.md @@ -0,0 +1,16 @@ +The `component_errors_total` metric now includes a `reason` tag when `error_code="reload"` to provide more granular information about reload +failures. Possible reasons include: + +- `global_options_changed`: Reload rejected because global options (like `data_dir`) changed +- `global_diff_failed`: Reload rejected because computing global config diff failed +- `topology_build_failed`: Reload rejected because new topology failed to build/healthcheck +- `restore_failed`: Reload failed and could not restore previous config + +Replaced metrics: + +- `config_reload_rejected` was replaced by `component_errors_total` with `error_code="reload"` and a `reason` tag specifying the rejection type +- `config_reloaded` was replaced by the existing `reloaded_total` metric + +Note: The replaced metrics were introduced in v0.50.0 but were never emitted due to a bug. These changes provide consistency across Vector's internal telemetry. + +authors: pront diff --git a/src/internal_events/config.rs b/src/internal_events/config.rs deleted file mode 100644 index 7c9e80dc87788..0000000000000 --- a/src/internal_events/config.rs +++ /dev/null @@ -1,90 +0,0 @@ -use metrics::counter; -use vector_lib::internal_event::InternalEvent; - -#[derive(Debug)] -pub struct ConfigReloadRejected { - reason: ReloadRejectReason, -} - -impl InternalEvent for ConfigReloadRejected { - fn emit(self) { - match &self.reason { - ReloadRejectReason::GlobalOptionsChanged { fields } => { - error!( - message = "Config reload rejected due to non-reloadable global options.", - reason = %self.reason.as_str(), - changed_fields = %fields.join(", "), - internal_log_rate_limit = false, - ); - - counter!( - "config_reload_rejected", - "reason" => self.reason.as_str(), - ) - .increment(1); - } - ReloadRejectReason::FailedToComputeGlobalDiff(err) => { - error!( - message = "Config reload rejected due to failed to compute global diff.", - reason = %self.reason.as_str(), - error = %err, - internal_log_rate_limit = false, - ); - - counter!( - "config_reload_rejected", - "reason" => self.reason.as_str(), - ) - .increment(1); - } - } - } - - fn name(&self) -> Option<&'static str> { - Some("ConfigReloadRejected") - } -} - -impl ConfigReloadRejected { - pub const fn global_options_changed(fields: Vec) -> Self { - Self { - reason: ReloadRejectReason::GlobalOptionsChanged { fields }, - } - } - - pub const fn failed_to_compute_global_diff(error: serde_json::Error) -> Self { - Self { - reason: ReloadRejectReason::FailedToComputeGlobalDiff(error), - } - } -} - -#[derive(Debug)] -enum ReloadRejectReason { - GlobalOptionsChanged { fields: Vec }, - FailedToComputeGlobalDiff(serde_json::Error), -} - -impl ReloadRejectReason { - const fn as_str(&self) -> &'static str { - match self { - Self::GlobalOptionsChanged { fields: _ } => "global_options changed", - Self::FailedToComputeGlobalDiff(_) => "failed to compute global diff", - } - } -} - -#[derive(Debug)] -pub struct ConfigReloaded; - -impl InternalEvent for ConfigReloaded { - fn emit(self) { - info!("New configuration loaded successfully."); - - counter!("config_reloaded",).increment(1); - } - - fn name(&self) -> Option<&'static str> { - Some("ConfigReloaded") - } -} diff --git a/src/internal_events/mod.rs b/src/internal_events/mod.rs index 1647e2b8ff0ca..7a45737149708 100644 --- a/src/internal_events/mod.rs +++ b/src/internal_events/mod.rs @@ -152,7 +152,6 @@ mod file; #[cfg(windows)] mod windows; -pub mod config; #[cfg(any(feature = "transforms-log_to_metric", feature = "sinks-loki"))] mod expansion; #[cfg(feature = "sources-mongodb_metrics")] diff --git a/src/internal_events/process.rs b/src/internal_events/process.rs index ce8fbd063f438..2d1e5249a3613 100644 --- a/src/internal_events/process.rs +++ b/src/internal_events/process.rs @@ -64,12 +64,15 @@ impl InternalEvent for VectorQuit { } #[derive(Debug)] -pub struct VectorReloadError; +pub struct VectorReloadError { + pub reason: &'static str, +} impl InternalEvent for VectorReloadError { fn emit(self) { error!( message = "Reload was not successful.", + reason = self.reason, error_code = "reload", error_type = error_type::CONFIGURATION_FAILED, stage = error_stage::PROCESSING, @@ -80,6 +83,7 @@ impl InternalEvent for VectorReloadError { "error_code" => "reload", "error_type" => error_type::CONFIGURATION_FAILED, "stage" => error_stage::PROCESSING, + "reason" => self.reason, ) .increment(1); } diff --git a/src/topology/controller.rs b/src/topology/controller.rs index b2959b8eda8e6..53acc243a71f8 100644 --- a/src/topology/controller.rs +++ b/src/topology/controller.rs @@ -10,7 +10,7 @@ use crate::{ extra_context::ExtraContext, internal_events::{VectorRecoveryError, VectorReloadError, VectorReloaded}, signal::ShutdownError, - topology::RunningTopology, + topology::{ReloadError, RunningTopology}, }; #[derive(Clone, Debug)] @@ -110,7 +110,7 @@ impl TopologyController { .reload_config_and_respawn(new_config, self.extra_context.clone()) .await { - Ok(true) => { + Ok(()) => { #[cfg(feature = "api")] // Pass the new config to the API server. if let Some(ref api_server) = self.api_server { @@ -122,13 +122,38 @@ impl TopologyController { }); ReloadOutcome::Success } - Ok(false) => { - emit!(VectorReloadError); + Err(ReloadError::GlobalOptionsChanged { changed_fields }) => { + error!( + message = "Config reload rejected due to non-reloadable global options.", + changed_fields = %changed_fields.join(", "), + internal_log_rate_limit = false, + ); + emit!(VectorReloadError { + reason: "global_options_changed", + }); + ReloadOutcome::RolledBack + } + Err(ReloadError::GlobalDiffFailed { source }) => { + error!( + message = "Config reload rejected because computing global diff failed.", + error = %source, + internal_log_rate_limit = false, + ); + emit!(VectorReloadError { + reason: "global_diff_failed", + }); ReloadOutcome::RolledBack } - // Trigger graceful shutdown for what remains of the topology - Err(()) => { - emit!(VectorReloadError); + Err(ReloadError::TopologyBuildFailed) => { + emit!(VectorReloadError { + reason: "topology_build_failed", + }); + ReloadOutcome::RolledBack + } + Err(ReloadError::FailedToRestore) => { + emit!(VectorReloadError { + reason: "restore_failed", + }); emit!(VectorRecoveryError); ReloadOutcome::FatalError(ShutdownError::ReloadFailedToRestore) } diff --git a/src/topology/mod.rs b/src/topology/mod.rs index bf0afb76214ba..15e8c4adf8a86 100644 --- a/src/topology/mod.rs +++ b/src/topology/mod.rs @@ -32,7 +32,7 @@ use self::task::{Task, TaskError, TaskResult}; pub use self::{ builder::TopologyPieces, controller::{ReloadOutcome, SharedTopologyController, TopologyController}, - running::{RunningTopology, ShutdownErrorReceiver}, + running::{ReloadError, RunningTopology, ShutdownErrorReceiver}, }; use crate::{ config::{ComponentKey, Config, ConfigDiff}, diff --git a/src/topology/running.rs b/src/topology/running.rs index 1eeb7bd2f8ef5..7a4f8f58ea3b0 100644 --- a/src/topology/running.rs +++ b/src/topology/running.rs @@ -7,6 +7,7 @@ use std::{ }; use futures::{Future, FutureExt, future}; +use snafu::Snafu; use stream_cancel::Trigger; use tokio::{ sync::{mpsc, watch}, @@ -31,7 +32,6 @@ use crate::{ config::{ComponentKey, Config, ConfigDiff, HealthcheckOptions, Inputs, OutputId, Resource}, event::EventArray, extra_context::ExtraContext, - internal_events::config::{ConfigReloadRejected, ConfigReloaded}, shutdown::SourceShutdownCoordinator, signal::ShutdownError, spawn_named, @@ -40,6 +40,18 @@ use crate::{ pub type ShutdownErrorReceiver = mpsc::UnboundedReceiver; +#[derive(Debug, Snafu)] +pub enum ReloadError { + #[snafu(display("global options changed: {}", changed_fields.join(", ")))] + GlobalOptionsChanged { changed_fields: Vec }, + #[snafu(display("failed to compute global diff: {}", source))] + GlobalDiffFailed { source: serde_json::Error }, + #[snafu(display("topology build failed"))] + TopologyBuildFailed, + #[snafu(display("failed to restore previous config"))] + FailedToRestore, +} + #[allow(dead_code)] pub struct RunningTopology { inputs: HashMap>, @@ -252,35 +264,26 @@ impl RunningTopology { /// Attempts to load a new configuration and update this running topology. /// /// If the new configuration was valid, and all changes were able to be made -- removing of - /// old components, changing of existing components, adding of new components -- then `Ok(true)` - /// is returned. + /// old components, changing of existing components, adding of new components -- then + /// `Ok(())` is returned. /// /// If the new configuration is not valid, or not all of the changes in the new configuration /// were able to be made, then this method will attempt to undo the changes made and bring the - /// topology back to its previous state. If either of these scenarios occur, then `Ok(false)` - /// is returned. + /// topology back to its previous state, returning the appropriate error. /// - /// # Errors - /// - /// If all changes from the new configuration cannot be made, and the current configuration - /// cannot be fully restored, then `Err(())` is returned. + /// If the restore also fails, `ReloadError::FailedToRestore` is returned. pub async fn reload_config_and_respawn( &mut self, new_config: Config, extra_context: ExtraContext, - ) -> Result { + ) -> Result<(), ReloadError> { info!("Reloading running topology with new configuration."); if self.config.global != new_config.global { - match self.config.global.diff(&new_config.global) { - Ok(changed) => { - emit!(ConfigReloadRejected::global_options_changed(changed)); - } - Err(err) => { - emit!(ConfigReloadRejected::failed_to_compute_global_diff(err)); - } - } - return Ok(false); + return match self.config.global.diff(&new_config.global) { + Ok(changed_fields) => Err(ReloadError::GlobalOptionsChanged { changed_fields }), + Err(source) => Err(ReloadError::GlobalDiffFailed { source }), + }; } // Calculate the change between the current configuration and the new configuration, and @@ -324,9 +327,9 @@ impl RunningTopology { self.spawn_diff(&diff, new_pieces); self.config = new_config; - emit!(ConfigReloaded); + info!("New configuration loaded successfully."); - return Ok(true); + return Ok(()); } } @@ -351,7 +354,7 @@ impl RunningTopology { info!("Old configuration restored successfully."); - return Ok(false); + return Err(ReloadError::TopologyBuildFailed); } error!( @@ -359,7 +362,7 @@ impl RunningTopology { internal_log_rate_limit = false ); - Err(()) + Err(ReloadError::FailedToRestore) } /// Attempts to reload enrichment tables. diff --git a/src/topology/test/doesnt_reload.rs b/src/topology/test/doesnt_reload.rs index 788ee4c676075..0d02d167e8d51 100644 --- a/src/topology/test/doesnt_reload.rs +++ b/src/topology/test/doesnt_reload.rs @@ -6,6 +6,7 @@ use crate::{ mock::{basic_sink, basic_source}, start_topology, trace_init, }, + topology::ReloadError::*, }; #[tokio::test] @@ -22,10 +23,12 @@ async fn topology_doesnt_reload_new_data_dir() { new_config.global.data_dir = Some(Path::new("/qwerty").to_path_buf()); - topology + let result = topology .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap(); + .await; + + // Should fail with GlobalOptionsChanged error + assert!(matches!(result, Err(GlobalOptionsChanged { .. }))); assert_eq!( topology.config.global.data_dir, diff --git a/src/topology/test/mod.rs b/src/topology/test/mod.rs index cc9932279f950..738a73125aa8a 100644 --- a/src/topology/test/mod.rs +++ b/src/topology/test/mod.rs @@ -29,7 +29,7 @@ use crate::{ }, start_topology, trace_init, }, - topology::{RunningTopology, builder::TopologyPiecesBuilder}, + topology::{ReloadError::*, RunningTopology, builder::TopologyPiecesBuilder}, }; mod backpressure; @@ -302,12 +302,10 @@ async fn topology_remove_one_source() { config.add_source("in1", basic_source().1); config.add_sink("out1", &["in1"], sink1); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); // Send an event into both source #1 and source #2: let mut event1 = Event::Log(LogEvent::from("this")); @@ -353,12 +351,10 @@ async fn topology_remove_one_sink() { config.add_source("in1", basic_source().1); config.add_sink("out1", &["in1"], basic_sink(10).1); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); let mut event = Event::Log(LogEvent::from("this")); @@ -408,12 +404,10 @@ async fn topology_remove_one_transform() { config.add_transform("t2", &["in1"], transform2); config.add_sink("out1", &["t2"], sink2); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); // Send the same event to both sources: let event = Event::Log(LogEvent::from("this")); @@ -459,12 +453,10 @@ async fn topology_swap_source() { config.add_source("in2", source2); config.add_sink("out1", &["in2"], sink2); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); // Send an event into both source #1 and source #2: let event1 = Event::Log(LogEvent::from("this")); @@ -526,12 +518,10 @@ async fn topology_swap_transform() { config.add_transform("t1", &["in1"], transform2); config.add_sink("out1", &["t1"], sink2); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); // Send an event into both source #1 and source #2: let event1 = Event::Log(LogEvent::from("this")); @@ -580,12 +570,10 @@ async fn topology_swap_sink() { config.add_source("in1", source2); config.add_sink("out1", &["in1"], sink2); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); // Send an event into both source #1 and source #2: let mut event1 = Event::Log(LogEvent::from("this")); @@ -670,12 +658,10 @@ async fn topology_swap_transform_is_atomic() { config.add_transform("t1", &["in1"], transform1v2); config.add_sink("out1", &["t1"], basic_sink(10).1); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); run_control.store(false, Ordering::Release); h_in.await.unwrap(); @@ -708,12 +694,10 @@ async fn topology_rebuild_connected() { config.add_source("in1", source1); config.add_sink("out1", &["in1"], sink1); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); let mut event1 = Event::Log(LogEvent::from("this")); let mut event2 = Event::Log(LogEvent::from("that")); @@ -769,12 +753,10 @@ async fn topology_rebuild_connected_transform() { config.add_transform("t2", &["t1"], transform2); config.add_sink("out1", &["t2"], sink2); - assert!( - topology - .reload_config_and_respawn(config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config.build().unwrap(), Default::default()) + .await + .unwrap(); let mut event = Event::Log(LogEvent::from("this")); let h_out1 = tokio::spawn(out1.flat_map(into_event_stream).collect::>()); @@ -826,12 +808,10 @@ async fn topology_optional_healthcheck_does_not_fail_reload() { let config = basic_config(); let (mut topology, _) = start_topology(config, false).await; let config = basic_config_with_sink_failing_healthcheck(); - assert!( - topology - .reload_config_and_respawn(config, Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config, Default::default()) + .await + .unwrap(); } #[tokio::test] @@ -841,12 +821,10 @@ async fn topology_healthcheck_not_run_on_unchanged_reload() { let (mut topology, _) = start_topology(config, false).await; let mut config = basic_config_with_sink_failing_healthcheck(); config.healthchecks.require_healthy = true; - assert!( - topology - .reload_config_and_respawn(config, Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(config, Default::default()) + .await + .unwrap(); } #[tokio::test] @@ -869,12 +847,12 @@ async fn topology_healthcheck_run_for_changes_on_reload() { let mut config = config.build().unwrap(); config.healthchecks.require_healthy = true; - assert!( - !topology - .reload_config_and_respawn(config, Default::default()) - .await - .unwrap() - ); + let result = topology + .reload_config_and_respawn(config, Default::default()) + .await; + + // Should fail with TopologyBuildFailed error due to healthcheck failure + assert!(matches!(result, Err(TopologyBuildFailed))); } #[tokio::test] diff --git a/src/topology/test/reload.rs b/src/topology/test/reload.rs index 1691801ad8bdc..9cb10cc87370c 100644 --- a/src/topology/test/reload.rs +++ b/src/topology/test/reload.rs @@ -21,6 +21,7 @@ use crate::{ splunk_hec::SplunkConfig, }, test_util::{self, mock::basic_sink, next_addr, start_topology, temp_dir, wait_for_tcp}, + topology::ReloadError::*, }; fn internal_metrics_source() -> InternalMetricsConfig { @@ -67,12 +68,10 @@ async fn topology_reuse_old_port() { new_config.add_sink("out", &["in2"], basic_sink(1).1); let (mut topology, _) = start_topology(old_config.build().unwrap(), false).await; - assert!( - topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await + .unwrap(); } #[tokio::test] @@ -94,12 +93,12 @@ async fn topology_rebuild_old() { let _bind = TcpListener::bind(address_1).unwrap(); let (mut topology, _) = start_topology(old_config.build().unwrap(), false).await; - assert!( - !topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + let result = topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await; + + // Should fail with TopologyBuildFailed error due to port conflict + assert!(matches!(result, Err(TopologyBuildFailed))); } #[tokio::test] @@ -113,12 +112,10 @@ async fn topology_old() { old_config.add_sink("out", &["in"], basic_sink(1).1); let (mut topology, _) = start_topology(old_config.clone().build().unwrap(), false).await; - assert!( - topology - .reload_config_and_respawn(old_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(old_config.build().unwrap(), Default::default()) + .await + .unwrap(); } #[tokio::test] @@ -266,24 +263,20 @@ async fn topology_readd_input() { new_config.add_source("in1", internal_metrics_source()); new_config.add_source("in2", internal_metrics_source()); new_config.add_sink("out", &["in1"], prom_exporter_sink(address_0, 1)); - assert!( - topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await + .unwrap(); // re-add in2 let mut new_config = Config::builder(); new_config.add_source("in1", internal_metrics_source()); new_config.add_source("in2", internal_metrics_source()); new_config.add_sink("out", &["in1", "in2"], prom_exporter_sink(address_0, 1)); - assert!( - topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await + .unwrap(); sleep(Duration::from_secs(1)).await; topology.stop().await; @@ -312,12 +305,10 @@ async fn topology_reload_component() { topology.extend_reload_set(HashSet::from_iter(vec![ComponentKey::from("out")])); - assert!( - topology - .reload_config_and_respawn(old_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(old_config.build().unwrap(), Default::default()) + .await + .unwrap(); // TODO: Implement notification to avoid the sleep() // Give the old topology configuration a chance to shutdown cleanly, etc. @@ -345,12 +336,10 @@ async fn reload_sink_test( sleep(Duration::from_secs(1)).await; // Now reload the topology with the "new" configuration, and make sure that a component is now listening on `new_address`. - assert!( - topology - .reload_config_and_respawn(new_config, Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config, Default::default()) + .await + .unwrap(); // Give the old topology configuration a chance to shutdown cleanly, etc. sleep(Duration::from_secs(2)).await; diff --git a/src/topology/test/transient_state.rs b/src/topology/test/transient_state.rs index ed5daca4c0f62..a5b18fef7d0d8 100644 --- a/src/topology/test/transient_state.rs +++ b/src/topology/test/transient_state.rs @@ -29,12 +29,10 @@ async fn closed_source() { topology.sources_finished().await; - assert!( - topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await + .unwrap(); } #[tokio::test] @@ -53,12 +51,10 @@ async fn remove_sink() { new_config.add_sink("out1", &["trans"], basic_sink(1).1); let (mut topology, _) = start_topology(old_config.build().unwrap(), false).await; - assert!( - topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await + .unwrap(); } #[tokio::test] @@ -78,12 +74,10 @@ async fn remove_transform() { new_config.add_sink("out1", &["trans1"], basic_sink(1).1); let (mut topology, _) = start_topology(old_config.build().unwrap(), false).await; - assert!( - topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await + .unwrap(); } #[tokio::test] @@ -104,10 +98,8 @@ async fn replace_transform() { new_config.add_sink("out1", &["trans1"], basic_sink(1).1); let (mut topology, _) = start_topology(old_config.build().unwrap(), false).await; - assert!( - topology - .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) - .await - .unwrap() - ); + topology + .reload_config_and_respawn(new_config.build().unwrap(), Default::default()) + .await + .unwrap(); } From 61bf5ad14b76ef7f2835eb207e3ebfdc76d538d2 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 11 Nov 2025 10:39:47 -0500 Subject: [PATCH 071/227] chore(ci): remove build-all flag, inspect state instead (#24206) * chore(ci): remove build-all flag, insepct state instead * fmt --- scripts/run-integration-test.sh | 6 ++-- vdev/src/commands/compose_tests/start.rs | 3 +- vdev/src/commands/compose_tests/stop.rs | 8 ++---- vdev/src/commands/compose_tests/test.rs | 11 ++------ vdev/src/commands/e2e/start.rs | 5 ---- vdev/src/commands/e2e/stop.rs | 10 +------ vdev/src/commands/e2e/test.rs | 5 ---- vdev/src/commands/integration/start.rs | 5 ---- vdev/src/commands/integration/stop.rs | 5 ---- vdev/src/commands/integration/test.rs | 5 ---- vdev/src/testing/integration.rs | 35 +++++++++++++++++------- vdev/src/testing/runner.rs | 5 ++++ 12 files changed, 39 insertions(+), 64 deletions(-) diff --git a/scripts/run-integration-test.sh b/scripts/run-integration-test.sh index 488fd90226380..5a5782ec433c2 100755 --- a/scripts/run-integration-test.sh +++ b/scripts/run-integration-test.sh @@ -127,12 +127,12 @@ for TEST_ENV in "${TEST_ENVIRONMENTS[@]}"; do docker run --rm -v vector_target:/output/"${TEST_NAME}" alpine:3.20 \ sh -c "rm -rf /output/${TEST_NAME}/*" - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" start --build-all "${TEST_NAME}" "${TEST_ENV}" + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" start "${TEST_NAME}" "${TEST_ENV}" START_RET=$? print_compose_logs_on_failure "$START_RET" if [[ "$START_RET" -eq 0 ]]; then - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" test --retries "$RETRIES" --build-all "${TEST_NAME}" "${TEST_ENV}" + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" test --retries "$RETRIES" "${TEST_NAME}" "${TEST_ENV}" RET=$? print_compose_logs_on_failure "$RET" @@ -144,7 +144,7 @@ for TEST_ENV in "${TEST_ENVIRONMENTS[@]}"; do fi # Always stop the environment (best effort cleanup) - cargo vdev "${VERBOSITY}" "${TEST_TYPE}" stop --build-all "${TEST_NAME}" || true + cargo vdev "${VERBOSITY}" "${TEST_TYPE}" stop "${TEST_NAME}" || true # Exit early on first failure if [[ "$RET" -ne 0 ]]; then diff --git a/vdev/src/commands/compose_tests/start.rs b/vdev/src/commands/compose_tests/start.rs index b3bb686f3325f..51a4a3c4b2237 100644 --- a/vdev/src/commands/compose_tests/start.rs +++ b/vdev/src/commands/compose_tests/start.rs @@ -9,7 +9,6 @@ pub(crate) fn exec( local_config: ComposeTestLocalConfig, integration: &str, environment: Option<&String>, - all_features: bool, ) -> Result<()> { let environment = if let Some(environment) = environment { environment.clone() @@ -21,5 +20,5 @@ pub(crate) fn exec( env.clone() }; debug!("Selected environment: {environment:#?}"); - ComposeTest::generate(local_config, integration, environment, all_features, 0)?.start() + ComposeTest::generate(local_config, integration, environment, 0)?.start() } diff --git a/vdev/src/commands/compose_tests/stop.rs b/vdev/src/commands/compose_tests/stop.rs index 7470512efb5f1..188b428a71019 100644 --- a/vdev/src/commands/compose_tests/stop.rs +++ b/vdev/src/commands/compose_tests/stop.rs @@ -7,17 +7,13 @@ use crate::testing::{ use super::active_projects::find_active_environment_for_integration; -pub(crate) fn exec( - local_config: ComposeTestLocalConfig, - test_name: &str, - all_features: bool, -) -> Result<()> { +pub(crate) fn exec(local_config: ComposeTestLocalConfig, test_name: &str) -> Result<()> { let (_test_dir, config) = ComposeTestConfig::load(local_config.directory, test_name)?; let active_environment = find_active_environment_for_integration(local_config.directory, test_name, &config)?; if let Some(environment) = active_environment { - ComposeTest::generate(local_config, test_name, environment, all_features, 0)?.stop() + ComposeTest::generate(local_config, test_name, environment, 0)?.stop() } else { println!("No environment for {test_name} is active."); Ok(()) diff --git a/vdev/src/commands/compose_tests/test.rs b/vdev/src/commands/compose_tests/test.rs index 83381d066bc57..e2fb51d006c23 100644 --- a/vdev/src/commands/compose_tests/test.rs +++ b/vdev/src/commands/compose_tests/test.rs @@ -13,7 +13,6 @@ pub fn exec( local_config: ComposeTestLocalConfig, integration: &str, environment: Option<&String>, - all_features: bool, retries: u8, args: &[String], ) -> Result<()> { @@ -34,14 +33,8 @@ pub fn exec( }; for environment in environments { - ComposeTest::generate( - local_config, - integration, - environment, - all_features, - retries, - )? - .test(args.to_owned())?; + ComposeTest::generate(local_config, integration, environment, retries)? + .test(args.to_owned())?; } Ok(()) } diff --git a/vdev/src/commands/e2e/start.rs b/vdev/src/commands/e2e/start.rs index d20aba240718a..b717712a03153 100644 --- a/vdev/src/commands/e2e/start.rs +++ b/vdev/src/commands/e2e/start.rs @@ -10,10 +10,6 @@ pub struct Cli { /// The e2e test name test: String, - /// Whether to compile the test runner with all integration test features - #[arg(short = 'a', long)] - build_all: bool, - /// The desired environment name to start. If omitted, the first environment name is used. environment: Option, } @@ -24,7 +20,6 @@ impl Cli { ComposeTestLocalConfig::e2e(), &self.test, self.environment.as_ref(), - self.build_all, ) } } diff --git a/vdev/src/commands/e2e/stop.rs b/vdev/src/commands/e2e/stop.rs index 85f28e960a0ba..a21c2d9542277 100644 --- a/vdev/src/commands/e2e/stop.rs +++ b/vdev/src/commands/e2e/stop.rs @@ -9,18 +9,10 @@ use crate::testing::integration::ComposeTestLocalConfig; pub struct Cli { /// The e2e test name to stop test: String, - - /// If true, remove the runner container compiled with all integration test features - #[arg(short = 'a', long)] - build_all: bool, } impl Cli { pub fn exec(self) -> Result<()> { - crate::commands::compose_tests::stop::exec( - ComposeTestLocalConfig::e2e(), - &self.test, - self.build_all, - ) + crate::commands::compose_tests::stop::exec(ComposeTestLocalConfig::e2e(), &self.test) } } diff --git a/vdev/src/commands/e2e/test.rs b/vdev/src/commands/e2e/test.rs index 092d880f557e9..b9d0149afbe65 100644 --- a/vdev/src/commands/e2e/test.rs +++ b/vdev/src/commands/e2e/test.rs @@ -20,10 +20,6 @@ pub struct Cli { /// The desired environment (optional) environment: Option, - /// Whether to compile the test runner with all integration test features - #[arg(short = 'a', long)] - build_all: bool, - /// Number of retries to allow on each integration test case. #[arg(short = 'r', long)] retries: Option, @@ -38,7 +34,6 @@ impl Cli { ComposeTestLocalConfig::e2e(), &self.e2e_test, self.environment.as_ref(), - self.build_all, self.retries.unwrap_or_default(), &self.args, ) diff --git a/vdev/src/commands/integration/start.rs b/vdev/src/commands/integration/start.rs index dca8ace5e4de2..15d954aecd622 100644 --- a/vdev/src/commands/integration/start.rs +++ b/vdev/src/commands/integration/start.rs @@ -10,10 +10,6 @@ pub struct Cli { /// The integration name integration: String, - /// Whether to compile the test runner with all integration test features - #[arg(short = 'a', long)] - build_all: bool, - /// The desired environment name to start. If omitted, the first environment name is used. environment: Option, } @@ -24,7 +20,6 @@ impl Cli { ComposeTestLocalConfig::integration(), &self.integration, self.environment.as_ref(), - self.build_all, ) } } diff --git a/vdev/src/commands/integration/stop.rs b/vdev/src/commands/integration/stop.rs index da9e403cfb82f..f197f798a57d8 100644 --- a/vdev/src/commands/integration/stop.rs +++ b/vdev/src/commands/integration/stop.rs @@ -9,10 +9,6 @@ use crate::testing::integration::ComposeTestLocalConfig; pub struct Cli { /// The integration name to stop integration: String, - - /// If true, remove the runner container compiled with all integration test features - #[arg(short = 'a', long)] - build_all: bool, } impl Cli { @@ -20,7 +16,6 @@ impl Cli { crate::commands::compose_tests::stop::exec( ComposeTestLocalConfig::integration(), &self.integration, - self.build_all, ) } } diff --git a/vdev/src/commands/integration/test.rs b/vdev/src/commands/integration/test.rs index 956b775a8c527..05ac2d4df1347 100644 --- a/vdev/src/commands/integration/test.rs +++ b/vdev/src/commands/integration/test.rs @@ -20,10 +20,6 @@ pub struct Cli { /// The desired environment (optional) environment: Option, - /// Whether to compile the test runner with all integration test features - #[arg(short = 'a', long)] - build_all: bool, - /// Number of retries to allow on each integration test case. #[arg(short = 'r', long)] retries: Option, @@ -38,7 +34,6 @@ impl Cli { ComposeTestLocalConfig::integration(), &self.integration, self.environment.as_ref(), - self.build_all, self.retries.unwrap_or_default(), &self.args, ) diff --git a/vdev/src/testing/integration.rs b/vdev/src/testing/integration.rs index b7ddab7fd27c4..2db5f1a7f8887 100644 --- a/vdev/src/testing/integration.rs +++ b/vdev/src/testing/integration.rs @@ -23,6 +23,14 @@ use crate::{ const NETWORK_ENV_VAR: &str = "VECTOR_NETWORK"; const E2E_FEATURE_FLAG: &str = "all-e2e-tests"; +/// Check if a Docker image exists locally +fn docker_image_exists(image_name: &str) -> Result { + use crate::testing::docker::docker_command; + let output = + docker_command(["images", "--format", "{{.Repository}}:{{.Tag}}"]).check_output()?; + Ok(output.lines().any(|line| line == image_name)) +} + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum ComposeTestKind { E2E, @@ -67,8 +75,6 @@ pub(crate) struct ComposeTest { runner: IntegrationTestRunner, compose: Option, env_config: Environment, - /// When true, uses 'all-integration-tests' or 'all-e2e-tests' feature. When false, uses features from test.yaml. - all_features: bool, retries: u8, } @@ -77,7 +83,6 @@ impl ComposeTest { local_config: ComposeTestLocalConfig, test_name: impl Into, environment: impl Into, - all_features: bool, retries: u8, ) -> Result { let test_name: String = test_name.into(); @@ -90,9 +95,20 @@ impl ComposeTest { let network_name = format!("vector-integration-tests-{test_name}"); let compose = Compose::new(test_dir, env_config.clone(), network_name.clone())?; - // When using 'all-*-tests' feature, creates shared image (vector-test-runner-1.90:latest). - // When using test.yaml features, creates per-test image (vector-test-runner-clickhouse-1.90:latest). - let runner_name = (!all_features).then(|| test_name.clone()); + // Auto-detect: If shared image exists, use it. Otherwise use per-test image. + // Shared image: vector-test-runner-1.90:latest (compiled with all-integration-tests) + // Per-test image: vector-test-runner-clickhouse-1.90:latest (compiled with specific features) + let shared_image_name = format!( + "vector-test-runner-{}:latest", + RustToolchainConfig::rust_version() + ); + let runner_name = if docker_image_exists(&shared_image_name).unwrap_or(false) { + info!("Using shared runner image: {shared_image_name}"); + None + } else { + info!("Shared runner image not found, will build image for: {test_name}"); + Some(test_name.clone()) + }; let runner = IntegrationTestRunner::new( runner_name, @@ -110,7 +126,6 @@ impl ComposeTest { runner, compose, env_config: rename_environment_keys(&env_config), - all_features, retries, }; trace!("Generated {compose_test:#?}"); @@ -176,9 +191,9 @@ impl ComposeTest { args.push("--features".to_string()); - // When all_features=true: use 'all-integration-tests' or 'all-e2e-tests' - // When all_features=false: use test-specific features from test.yaml - args.push(if self.all_features { + // If using shared runner: use 'all-integration-tests' or 'all-e2e-tests' + // If using per-test runner: use test-specific features from test.yaml + args.push(if self.runner.is_shared_runner() { self.local_config.feature_flag.to_string() } else { self.config.features.join(",") diff --git a/vdev/src/testing/runner.rs b/vdev/src/testing/runner.rs index 2299269bba985..92f57bfef50bb 100644 --- a/vdev/src/testing/runner.rs +++ b/vdev/src/testing/runner.rs @@ -294,6 +294,11 @@ impl IntegrationTestRunner { }) } + /// Returns true if this runner uses the shared image (with all features) + pub(super) fn is_shared_runner(&self) -> bool { + self.integration.is_none() + } + pub(super) fn ensure_network(&self) -> Result<()> { if let Some(network_name) = &self.network { let mut command = docker_command(["network", "ls", "--format", "{{.Name}}"]); From 98b77a1645f8457c01128a0904ce5fb1f5a8e871 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 11 Nov 2025 12:33:41 -0500 Subject: [PATCH 072/227] enhancement(vdev): run fmt before commiting clippy fixes (#24210) --- vdev/src/commands/check/rust.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/vdev/src/commands/check/rust.rs b/vdev/src/commands/check/rust.rs index aaa24365e849e..4178c4ad0cccc 100644 --- a/vdev/src/commands/check/rust.rs +++ b/vdev/src/commands/check/rust.rs @@ -57,6 +57,7 @@ impl Cli { if self.fix { let has_changes = !git::get_modified_files()?.is_empty(); if has_changes { + app::exec("cargo", ["fmt", "--all"], true)?; git::commit("chore(vdev): apply vdev rust check fixes")?; } } From 4eea77c36cf376e7c411df65a30c4fbdc8596b43 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 11 Nov 2025 12:41:31 -0500 Subject: [PATCH 073/227] chore(dev): upgrade Rust to 1.91.1 (#24209) * chore(dev): upgrade Rust to 1.91.1 * chore(vdev): apply vdev rust check fixes * cargo fmt * Fix rust docs --- lib/vector-common/src/finalization.rs | 16 ++++------------ lib/vector-config-macros/src/lib.rs | 2 +- lib/vector-config/src/named.rs | 2 +- lib/vector-core/src/config/mod.rs | 9 ++------- lib/vector-core/src/event/lua/metric.rs | 2 +- lib/vector-core/src/event/metric/tags.rs | 9 ++------- rust-toolchain.toml | 2 +- src/sinks/databend/compression.rs | 8 ++------ src/sinks/databend/encoding.rs | 8 ++------ src/sinks/elasticsearch/config.rs | 8 ++------ src/sinks/elasticsearch/mod.rs | 16 ++++------------ src/sinks/prometheus/exporter.rs | 6 +++--- src/sinks/util/service/concurrency.rs | 9 ++------- src/sources/fluent/mod.rs | 8 ++++---- src/sources/util/grpc/decompression.rs | 16 ++++++++-------- src/transforms/reduce/transform.rs | 6 +++--- vdev/src/commands/check/rust.rs | 2 +- .../commands/compose_tests/active_projects.rs | 2 +- vdev/src/testing/integration.rs | 4 ++-- 19 files changed, 46 insertions(+), 89 deletions(-) diff --git a/lib/vector-common/src/finalization.rs b/lib/vector-common/src/finalization.rs index 56310a37d31b4..9fc403c99f399 100644 --- a/lib/vector-common/src/finalization.rs +++ b/lib/vector-common/src/finalization.rs @@ -280,10 +280,12 @@ impl Drop for OwnedBatchNotifier { /// The status of an individual batch. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[repr(u8)] +#[derive(Default)] pub enum BatchStatus { /// All events in the batch were accepted. /// /// This is the default. + #[default] Delivered, /// At least one event in the batch had a transient error in delivery. Errored, @@ -291,12 +293,6 @@ pub enum BatchStatus { Rejected, } -impl Default for BatchStatus { - fn default() -> Self { - Self::Delivered - } -} - impl BatchStatus { /// Updates the delivery status based on another batch's delivery status, returning the result. /// @@ -320,10 +316,12 @@ impl BatchStatus { /// The status of an individual event. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[repr(u8)] +#[derive(Default)] pub enum EventStatus { /// All copies of this event were dropped without being finalized. /// /// This is the default. + #[default] Dropped, /// All copies of this event were delivered successfully. Delivered, @@ -335,12 +333,6 @@ pub enum EventStatus { Recorded, } -impl Default for EventStatus { - fn default() -> Self { - Self::Dropped - } -} - impl EventStatus { /// Updates the status based on another event's status, returning the result. /// diff --git a/lib/vector-config-macros/src/lib.rs b/lib/vector-config-macros/src/lib.rs index cfb10e09c258b..65757abe2f70a 100644 --- a/lib/vector-config-macros/src/lib.rs +++ b/lib/vector-config-macros/src/lib.rs @@ -10,7 +10,7 @@ mod configurable_component; /// Designates a type as being part of a Vector configuration. /// -/// This will automatically derive the [`Configurable`][vector-config::Configurable] trait for the given struct/enum, as +/// This will automatically derive the `Configurable` trait for the given struct/enum, as /// well as ensuring that serialization/deserialization (via `serde`) is derived. /// /// ## Basics diff --git a/lib/vector-config/src/named.rs b/lib/vector-config/src/named.rs index 9d791d3c1f9ba..48da2bb4fd6cb 100644 --- a/lib/vector-config/src/named.rs +++ b/lib/vector-config/src/named.rs @@ -1,7 +1,7 @@ /// A component with a well-known name. /// /// Users can derive this trait automatically by using the -/// [`component_name`][vector-config::component_name] macro on their structs/enums. +/// [`NamedComponent`][derive@crate::NamedComponent] derive macro on their structs/enums. pub trait NamedComponent { /// Gets the name of the component. fn get_component_name(&self) -> &'static str; diff --git a/lib/vector-core/src/config/mod.rs b/lib/vector-core/src/config/mod.rs index be04b8c58186f..0c6af5e09c258 100644 --- a/lib/vector-core/src/config/mod.rs +++ b/lib/vector-core/src/config/mod.rs @@ -393,7 +393,7 @@ impl From for AcknowledgementsConfig { } } -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Eq, Default)] pub enum LogNamespace { /// Vector native namespacing /// @@ -405,6 +405,7 @@ pub enum LogNamespace { /// /// All data is set in the root of the event. Since this can lead /// to collisions, deserialized data has priority over metadata + #[default] Legacy, } @@ -420,12 +421,6 @@ impl From for LogNamespace { } } -impl Default for LogNamespace { - fn default() -> Self { - Self::Legacy - } -} - /// A shortcut to specify no `LegacyKey` should be used (since otherwise a turbofish would be required) pub const NO_LEGACY_KEY: Option> = None; diff --git a/lib/vector-core/src/event/lua/metric.rs b/lib/vector-core/src/event/lua/metric.rs index 1bad80f1df191..a968fbed2fbd0 100644 --- a/lib/vector-core/src/event/lua/metric.rs +++ b/lib/vector-core/src/event/lua/metric.rs @@ -88,7 +88,7 @@ impl FromLua for TagValueSet { } Ok(Self::from(string_values)) } - LuaValue::String(x) => Ok(Self::from([x.to_string_lossy().to_string()])), + LuaValue::String(x) => Ok(Self::from([x.to_string_lossy().clone()])), _ => Err(mlua::Error::FromLuaConversionError { from: value.type_name(), to: String::from("metric tag value"), diff --git a/lib/vector-core/src/event/metric/tags.rs b/lib/vector-core/src/event/metric/tags.rs index a88531cf812cd..416f239af5ab5 100644 --- a/lib/vector-core/src/event/metric/tags.rs +++ b/lib/vector-core/src/event/metric/tags.rs @@ -94,9 +94,10 @@ type TagValueRef<'a> = Option<&'a str>; /// Tag values for a metric series. This may be empty, a single value, or a set of values. This is /// used to provide the storage for `TagValueSet`. -#[derive(Clone, Configurable, Debug, Eq, PartialEq)] +#[derive(Clone, Configurable, Debug, Eq, PartialEq, Default)] pub enum TagValueSet { /// This represents a set containing no value. + #[default] Empty, /// This represents a set containing a single value. This is stored separately to avoid the @@ -111,12 +112,6 @@ pub enum TagValueSet { Set(IndexSet), } -impl Default for TagValueSet { - fn default() -> Self { - Self::Empty - } -} - impl Display for TagValueSet { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (i, value) in self.iter().enumerate() { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 29b051764c651..28130b1dcb7cc 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.90" +channel = "1.91" profile = "default" diff --git a/src/sinks/databend/compression.rs b/src/sinks/databend/compression.rs index da47748f44d88..7f086b09c2ad6 100644 --- a/src/sinks/databend/compression.rs +++ b/src/sinks/databend/compression.rs @@ -7,8 +7,10 @@ use vector_lib::configurable::configurable_component; #[configurable(metadata( docs::enum_tag_description = "The compression algorithm to use for sending." ))] +#[derive(Default)] pub enum DatabendCompression { /// No compression. + #[default] None, /// [Gzip][gzip] compression. @@ -16,9 +18,3 @@ pub enum DatabendCompression { /// [gzip]: https://www.gzip.org/ Gzip, } - -impl Default for DatabendCompression { - fn default() -> Self { - Self::None - } -} diff --git a/src/sinks/databend/encoding.rs b/src/sinks/databend/encoding.rs index c634f6ca73e01..07e9df19b5bc2 100644 --- a/src/sinks/databend/encoding.rs +++ b/src/sinks/databend/encoding.rs @@ -75,11 +75,13 @@ impl DatabendEncodingConfig { #[derive(Clone, Debug)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] #[configurable(metadata(docs::enum_tag_description = "How to handle missing fields for NDJson."))] +#[derive(Default)] pub enum DatabendMissingFieldAS { /// Generates an error if a missing field is encountered. Error, /// Interprets missing fields as NULL values. An error will be generated for non-nullable fields. + #[default] Null, /// Uses the default value of the field for missing fields. @@ -89,12 +91,6 @@ pub enum DatabendMissingFieldAS { TypeDefault, } -impl Default for DatabendMissingFieldAS { - fn default() -> Self { - Self::Null - } -} - impl DatabendMissingFieldAS { pub(super) const fn as_str(&self) -> &'static str { match self { diff --git a/src/sinks/elasticsearch/config.rs b/src/sinks/elasticsearch/config.rs index 930d1eed26a1f..f149ea57e1db0 100644 --- a/src/sinks/elasticsearch/config.rs +++ b/src/sinks/elasticsearch/config.rs @@ -46,8 +46,10 @@ pub const DATA_STREAM_TIMESTAMP_KEY: &str = "@timestamp"; #[configurable_component] #[derive(Clone, Debug, Eq, PartialEq)] #[serde(deny_unknown_fields, rename_all = "lowercase")] +#[derive(Default)] pub enum OpenSearchServiceType { /// Elasticsearch or OpenSearch Managed domain + #[default] Managed, /// OpenSearch Serverless collection Serverless, @@ -62,12 +64,6 @@ impl OpenSearchServiceType { } } -impl Default for OpenSearchServiceType { - fn default() -> Self { - Self::Managed - } -} - /// Configuration for the `elasticsearch` sink. #[configurable_component(sink("elasticsearch", "Index observability events in Elasticsearch."))] #[derive(Clone, Debug)] diff --git a/src/sinks/elasticsearch/mod.rs b/src/sinks/elasticsearch/mod.rs index 6748c9fcc0f2e..af5225e75b2a2 100644 --- a/src/sinks/elasticsearch/mod.rs +++ b/src/sinks/elasticsearch/mod.rs @@ -61,9 +61,11 @@ pub enum ElasticsearchAuthConfig { #[configurable_component] #[derive(Clone, Debug, Eq, PartialEq)] #[serde(deny_unknown_fields, rename_all = "snake_case")] +#[derive(Default)] pub enum ElasticsearchMode { /// Ingests documents in bulk, using the bulk API `index` action. #[serde(alias = "normal")] + #[default] Bulk, /// Ingests documents in bulk, using the bulk API `create` action. @@ -75,12 +77,6 @@ pub enum ElasticsearchMode { DataStream, } -impl Default for ElasticsearchMode { - fn default() -> Self { - Self::Bulk - } -} - /// Bulk API actions. #[configurable_component] #[derive(Clone, Copy, Debug, Derivative, Eq, Hash, PartialEq)] @@ -295,6 +291,7 @@ impl ElasticsearchCommonMode { #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(feature = "proptest", derive(proptest_derive::Arbitrary))] #[serde(deny_unknown_fields, rename_all = "snake_case")] +#[derive(Default)] pub enum ElasticsearchApiVersion { /// Auto-detect the API version. /// @@ -305,6 +302,7 @@ pub enum ElasticsearchApiVersion { /// incorrect API calls. /// /// [es_version]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html#cluster-state-api-path-params + #[default] Auto, /// Use the Elasticsearch 6.x API. V6, @@ -314,12 +312,6 @@ pub enum ElasticsearchApiVersion { V8, } -impl Default for ElasticsearchApiVersion { - fn default() -> Self { - Self::Auto - } -} - #[derive(Debug, Snafu)] #[snafu(visibility(pub))] pub enum ParseError { diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index f9d64e9b63dc3..6e140a8181de8 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -1186,7 +1186,7 @@ mod tests { }, ); - let metrics = vec![ + let metrics = [ base_summary_metric.clone(), base_summary_metric .clone() @@ -1305,7 +1305,7 @@ mod tests { }, ); - let metrics = vec![ + let metrics = [ base_summary_metric.clone(), base_summary_metric .clone() @@ -1406,7 +1406,7 @@ mod tests { MetricValue::Gauge { value: -10.0 }, ); - let metrics = vec![ + let metrics = [ base_absolute_gauge_metric.clone(), base_absolute_gauge_metric .clone() diff --git a/src/sinks/util/service/concurrency.rs b/src/sinks/util/service/concurrency.rs index 4e414554c5c43..44c982caff0f9 100644 --- a/src/sinks/util/service/concurrency.rs +++ b/src/sinks/util/service/concurrency.rs @@ -18,7 +18,7 @@ use vector_lib::configurable::{ /// /// This can be set either to one of the below enum values or to a positive integer, which denotes /// a fixed concurrency limit. -#[derive(Clone, Copy, Debug, Derivative, Eq, PartialEq)] +#[derive(Clone, Copy, Debug, Derivative, Eq, PartialEq, Default)] pub enum Concurrency { /// A fixed concurrency of 1. /// @@ -28,6 +28,7 @@ pub enum Concurrency { /// Concurrency is managed by the [Adaptive Request Concurrency][arc] feature. /// /// [arc]: https://vector.dev/docs/architecture/arc/ + #[default] Adaptive, /// A fixed amount of concurrency is allowed. @@ -47,12 +48,6 @@ impl Serialize for Concurrency { } } -impl Default for Concurrency { - fn default() -> Self { - Self::Adaptive - } -} - impl Concurrency { pub const fn parse_concurrency(&self) -> Option { match self { diff --git a/src/sources/fluent/mod.rs b/src/sources/fluent/mod.rs index 2fe63fa50e6fd..3a979b0957ef8 100644 --- a/src/sources/fluent/mod.rs +++ b/src/sources/fluent/mod.rs @@ -926,7 +926,7 @@ mod tests { 250, 129, 167, 109, 101, 115, 115, 97, 103, 101, 163, 98, 97, 122, ]; - let expected = vec![ + let expected = [ mock_event("foo", "2015-09-07T01:23:04Z"), mock_event("bar", "2015-09-07T01:23:05Z"), mock_event("baz", "2015-09-07T01:23:06Z"), @@ -958,7 +958,7 @@ mod tests { 122, 101, 3, ]; - let expected = vec![ + let expected = [ mock_event("foo", "2015-09-07T01:23:04Z"), mock_event("bar", "2015-09-07T01:23:05Z"), mock_event("baz", "2015-09-07T01:23:06Z"), @@ -992,7 +992,7 @@ mod tests { 101, 115, 115, 97, 103, 101, 163, 102, 111, 111, ]; - let expected = vec![ + let expected = [ mock_event("foo", "2015-09-07T01:23:04Z"), mock_event("bar", "2015-09-07T01:23:05Z"), mock_event("baz", "2015-09-07T01:23:06Z"), @@ -1027,7 +1027,7 @@ mod tests { 164, 103, 122, 105, 112, ]; - let expected = vec![ + let expected = [ mock_event("foo", "2015-09-07T01:23:04Z"), mock_event("bar", "2015-09-07T01:23:05Z"), mock_event("baz", "2015-09-07T01:23:06Z"), diff --git a/src/sources/util/grpc/decompression.rs b/src/sources/util/grpc/decompression.rs index 004f27bec502d..535c938f3cbbe 100644 --- a/src/sources/util/grpc/decompression.rs +++ b/src/sources/util/grpc/decompression.rs @@ -66,16 +66,16 @@ impl CompressionScheme { } } +#[derive(Default)] enum State { + #[default] WaitingForHeader, - Forward { overall_len: usize }, - Decompress { remaining: usize }, -} - -impl Default for State { - fn default() -> Self { - Self::WaitingForHeader - } + Forward { + overall_len: usize, + }, + Decompress { + remaining: usize, + }, } fn new_decompressor() -> GzDecoder> { diff --git a/src/transforms/reduce/transform.rs b/src/transforms/reduce/transform.rs index 9f096dc851c71..75bd1c83bd1d3 100644 --- a/src/transforms/reduce/transform.rs +++ b/src/transforms/reduce/transform.rs @@ -451,7 +451,7 @@ group_by = [ "request_id" ] e_5.insert("extra_field", "value1"); e_5.insert("test_end", "yep"); - for event in vec![e_1.into(), e_2.into(), e_3.into(), e_4.into(), e_5.into()] { + for event in [e_1.into(), e_2.into(), e_3.into(), e_4.into(), e_5.into()] { tx.send(event).await.unwrap(); } @@ -676,7 +676,7 @@ max_events = 1 let mut e_3 = LogEvent::from("test 3"); e_3.insert("id", "1"); - for event in vec![e_1.into(), e_2.into(), e_3.into()] { + for event in [e_1.into(), e_2.into(), e_3.into()] { tx.send(event).await.unwrap(); } @@ -729,7 +729,7 @@ max_events = 3 let mut e_6 = LogEvent::from("test 6"); e_6.insert("id", "1"); - for event in vec![ + for event in [ e_1.into(), e_2.into(), e_3.into(), diff --git a/vdev/src/commands/check/rust.rs b/vdev/src/commands/check/rust.rs index 4178c4ad0cccc..d5af700c9bbfb 100644 --- a/vdev/src/commands/check/rust.rs +++ b/vdev/src/commands/check/rust.rs @@ -40,7 +40,7 @@ impl Cli { vec![ "--no-default-features".to_string(), "--features".to_string(), - self.features.join(",").to_string(), + self.features.join(",").clone(), ] }; diff --git a/vdev/src/commands/compose_tests/active_projects.rs b/vdev/src/commands/compose_tests/active_projects.rs index b2698a6cb9266..d0b57c60d0bfd 100644 --- a/vdev/src/commands/compose_tests/active_projects.rs +++ b/vdev/src/commands/compose_tests/active_projects.rs @@ -40,7 +40,7 @@ pub(super) fn find_active_environment( // all environments to find a match after applying the same sanitization for env_name in config.environments().keys() { if env_name.replace('.', "-") == sanitized_env_name { - return Some(env_name.to_string()); + return Some(env_name.clone()); } } } diff --git a/vdev/src/testing/integration.rs b/vdev/src/testing/integration.rs index 2db5f1a7f8887..213d7245abf8c 100644 --- a/vdev/src/testing/integration.rs +++ b/vdev/src/testing/integration.rs @@ -203,14 +203,14 @@ impl ComposeTest { match self.config.test { Some(ref test_arg) => { args.push("--test".to_string()); - args.push(test_arg.to_string()); + args.push(test_arg.clone()); } None => args.push("--lib".to_string()), } // Ensure the test_filter args are passed as well if let Some(ref filter) = self.config.test_filter { - args.push(filter.to_string()); + args.push(filter.clone()); } args.extend(extra_args); From 826e8e8c330d07767d32348ba2e0a99b605d6103 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 11 Nov 2025 13:42:09 -0500 Subject: [PATCH 074/227] chore(releasing): Add 0.51.0 known issues (#24211) * Format fixes * Add 0.51.0 known issues * Add dots * Reword log panic known issue * Reword VRL missing fns * Backtick versions * Update website/cue/reference/releases/0.51.0.cue Co-authored-by: Pavlos Rontidis * Fix cue docs fmt --------- Co-authored-by: Pavlos Rontidis --- website/cue/reference/releases/0.49.0.cue | 10 +++++----- website/cue/reference/releases/0.51.0.cue | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/website/cue/reference/releases/0.49.0.cue b/website/cue/reference/releases/0.49.0.cue index bd203d8dbffa1..4916baf18230b 100644 --- a/website/cue/reference/releases/0.49.0.cue +++ b/website/cue/reference/releases/0.49.0.cue @@ -170,7 +170,7 @@ releases: "0.49.0": { type: "enhancement" description: """ The `nats` sink now supports message headers when publishing to JetStream. - + It introduces a configurable, templated Nats-Msg-Id header that ensures a unique ID for each message. This enables broker-level deduplication, resulting in stronger delivery guarantees and exactly-once semantics when combined with idempotent consumers. """ contributors: ["benjamin-awd"] @@ -286,9 +286,9 @@ releases: "0.49.0": { vrl_changelog: """ ### 0.26.0 - + #### Breaking Changes & Upgrade Guide - + - The `parse_cef` now trims unnecessary whitespace around escaped values in both headers and extension fields, improving accuracy and reliability when dealing with messy input strings. authors: yjagdale (https://github.com/vectordotdev/vrl/pull/1430) @@ -298,9 +298,9 @@ releases: "0.49.0": { - `encode_lz4` no longer prepends the uncompressed size by default, improving compatibility with standard LZ4 tools. A new `prepend_size` flag restores the old behavior if needed. Also, `decode_lz4` now also accepts `prepend_size` and a `buf_size` option (default: 1MB). authors: jlambatl (https://github.com/vectordotdev/vrl/pull/1447) - + #### New Features - + - Added `haversine` function for calculating [haversine](https://en.wikipedia.org/wiki/Haversine_formula) distance and bearing. authors: esensar Quad9DNS (https://github.com/vectordotdev/vrl/pull/1442) diff --git a/website/cue/reference/releases/0.51.0.cue b/website/cue/reference/releases/0.51.0.cue index b936c466dcbb8..a3ce9f190bab5 100644 --- a/website/cue/reference/releases/0.51.0.cue +++ b/website/cue/reference/releases/0.51.0.cue @@ -6,6 +6,28 @@ releases: "0.51.0": { whats_next: [] + known_issues: [ + """ + The newly added `basename`, `dirname` and `split_path` VRL functions are not accessible + because they weren't properly exposed in the latest VRL release (`0.28.0`). + """, + """ + The newly added `config_reload_rejected` and `config_reloaded` counters are not + emitted. These counters will be replaced in the next patch release (`0.51.1`) in favor of + `component_errors_total` with `error_code="reload"` and `reloaded_total` metrics, + respectively. + """, + """ + Blackhole sink periodic statistics messages are incorrectly rate limited. + """, + """ + When running Vector with debug logs enabled (`VECTOR_LOG=debug`), threads panic when log + messages are missing both a message and a rate limit tag. This is known to happen when + the utilization debug log is emitted and in the file server (affecting the `file` and + `kubernetes_logs` sources). + """, + ] + description: """ The Vector team is excited to announce version `0.51.0`! From 927482bb33e3c5c3210c83ccddff43cbc06a2cb6 Mon Sep 17 00:00:00 2001 From: ArunPiduguDD Date: Tue, 11 Nov 2025 13:56:30 -0500 Subject: [PATCH 075/227] enhancement(http_client): Add custom authorization header strategy for http client source (#24201) * Add custom authorization header strategy for http client source * update spell-check + changelog * rename customauth to just custom * remake docs --- ...d_custom_auth_strategy_http.enhancement.md | 3 +++ src/http.rs | 20 +++++++++++++++++++ .../components/sinks/generated/clickhouse.cue | 7 +++++++ .../components/sinks/generated/databend.cue | 7 +++++++ .../components/sinks/generated/http.cue | 7 +++++++ .../components/sinks/generated/loki.cue | 7 +++++++ .../sinks/generated/opentelemetry.cue | 7 +++++++ .../sinks/generated/prometheus_exporter.cue | 7 +++++++ .../components/sinks/generated/websocket.cue | 7 +++++++ .../sources/generated/http_client.cue | 7 +++++++ .../sources/generated/nginx_metrics.cue | 7 +++++++ .../sources/generated/prometheus_scrape.cue | 7 +++++++ .../sources/generated/websocket.cue | 7 +++++++ 13 files changed, 100 insertions(+) create mode 100644 changelog.d/add_custom_auth_strategy_http.enhancement.md diff --git a/changelog.d/add_custom_auth_strategy_http.enhancement.md b/changelog.d/add_custom_auth_strategy_http.enhancement.md new file mode 100644 index 0000000000000..f5cc36fcadd78 --- /dev/null +++ b/changelog.d/add_custom_auth_strategy_http.enhancement.md @@ -0,0 +1,3 @@ +Added a new "Custom Authorization" HTTP auth strategy, allowing users to configure a custom HTTP Authorization Header + +authors: arunpidugu diff --git a/src/http.rs b/src/http.rs index 8aaec6d320b2b..e10af20faaa30 100644 --- a/src/http.rs +++ b/src/http.rs @@ -308,6 +308,14 @@ pub enum Auth { /// The AWS service name to use for signing. service: String, }, + + /// Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >` + Custom { + /// Custom string value of the Authorization header + #[configurable(metadata(docs::examples = "${AUTH_HEADER_VALUE}"))] + #[configurable(metadata(docs::examples = "CUSTOM_PREFIX ${TOKEN}"))] + value: String, + }, } pub trait MaybeAuth: Sized { @@ -346,6 +354,18 @@ impl Auth { Ok(auth) => map.typed_insert(auth), Err(error) => error!(message = "Invalid bearer token.", token = %token, %error), }, + Auth::Custom { value } => { + // The value contains just the value for the Authorization header + // Expected format: "SSWS token123" or "Bearer token123", etc. + match HeaderValue::from_str(value) { + Ok(header_val) => { + map.insert(http::header::AUTHORIZATION, header_val); + } + Err(error) => { + error!(message = "Invalid custom auth header value.", value = %value, %error) + } + } + } #[cfg(feature = "aws-core")] _ => {} } diff --git a/website/cue/reference/components/sinks/generated/clickhouse.cue b/website/cue/reference/components/sinks/generated/clickhouse.cue index ea6373bd3191f..f807b58be475c 100644 --- a/website/cue/reference/components/sinks/generated/clickhouse.cue +++ b/website/cue/reference/components/sinks/generated/clickhouse.cue @@ -188,6 +188,7 @@ generated: components: sinks: clickhouse: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -202,6 +203,12 @@ generated: components: sinks: clickhouse: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } batch: { diff --git a/website/cue/reference/components/sinks/generated/databend.cue b/website/cue/reference/components/sinks/generated/databend.cue index 12514b0b35711..7c4442fc87ccd 100644 --- a/website/cue/reference/components/sinks/generated/databend.cue +++ b/website/cue/reference/components/sinks/generated/databend.cue @@ -183,6 +183,7 @@ generated: components: sinks: databend: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -197,6 +198,12 @@ generated: components: sinks: databend: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } batch: { diff --git a/website/cue/reference/components/sinks/generated/http.cue b/website/cue/reference/components/sinks/generated/http.cue index 68406cb2c5037..cc4da5bf49425 100644 --- a/website/cue/reference/components/sinks/generated/http.cue +++ b/website/cue/reference/components/sinks/generated/http.cue @@ -188,6 +188,7 @@ generated: components: sinks: http: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -202,6 +203,12 @@ generated: components: sinks: http: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } batch: { diff --git a/website/cue/reference/components/sinks/generated/loki.cue b/website/cue/reference/components/sinks/generated/loki.cue index 3a8754491faa8..933394342d1bd 100644 --- a/website/cue/reference/components/sinks/generated/loki.cue +++ b/website/cue/reference/components/sinks/generated/loki.cue @@ -188,6 +188,7 @@ generated: components: sinks: loki: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -202,6 +203,12 @@ generated: components: sinks: loki: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } batch: { diff --git a/website/cue/reference/components/sinks/generated/opentelemetry.cue b/website/cue/reference/components/sinks/generated/opentelemetry.cue index 308f48f0cee6d..897288dbdfe72 100644 --- a/website/cue/reference/components/sinks/generated/opentelemetry.cue +++ b/website/cue/reference/components/sinks/generated/opentelemetry.cue @@ -191,6 +191,7 @@ generated: components: sinks: opentelemetry: configuration: protocol: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -205,6 +206,12 @@ generated: components: sinks: opentelemetry: configuration: protocol: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } batch: { diff --git a/website/cue/reference/components/sinks/generated/prometheus_exporter.cue b/website/cue/reference/components/sinks/generated/prometheus_exporter.cue index 237c41151f65f..7549b728cc4e2 100644 --- a/website/cue/reference/components/sinks/generated/prometheus_exporter.cue +++ b/website/cue/reference/components/sinks/generated/prometheus_exporter.cue @@ -200,6 +200,7 @@ generated: components: sinks: prometheus_exporter: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -214,6 +215,12 @@ generated: components: sinks: prometheus_exporter: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } buckets: { diff --git a/website/cue/reference/components/sinks/generated/websocket.cue b/website/cue/reference/components/sinks/generated/websocket.cue index 3e901d5ba0276..146bba2dada78 100644 --- a/website/cue/reference/components/sinks/generated/websocket.cue +++ b/website/cue/reference/components/sinks/generated/websocket.cue @@ -183,6 +183,7 @@ generated: components: sinks: websocket: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -197,6 +198,12 @@ generated: components: sinks: websocket: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } encoding: { diff --git a/website/cue/reference/components/sources/generated/http_client.cue b/website/cue/reference/components/sources/generated/http_client.cue index 22065f2eb5b0d..198914d9f60b4 100644 --- a/website/cue/reference/components/sources/generated/http_client.cue +++ b/website/cue/reference/components/sources/generated/http_client.cue @@ -157,6 +157,7 @@ generated: components: sources: http_client: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -171,6 +172,12 @@ generated: components: sources: http_client: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } decoding: { diff --git a/website/cue/reference/components/sources/generated/nginx_metrics.cue b/website/cue/reference/components/sources/generated/nginx_metrics.cue index 0bd5cde5535a0..900b5219784f1 100644 --- a/website/cue/reference/components/sources/generated/nginx_metrics.cue +++ b/website/cue/reference/components/sources/generated/nginx_metrics.cue @@ -162,6 +162,7 @@ generated: components: sources: nginx_metrics: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -176,6 +177,12 @@ generated: components: sources: nginx_metrics: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } endpoints: { diff --git a/website/cue/reference/components/sources/generated/prometheus_scrape.cue b/website/cue/reference/components/sources/generated/prometheus_scrape.cue index 7ccfafd6676fa..897949bab1ef4 100644 --- a/website/cue/reference/components/sources/generated/prometheus_scrape.cue +++ b/website/cue/reference/components/sources/generated/prometheus_scrape.cue @@ -162,6 +162,7 @@ generated: components: sources: prometheus_scrape: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -176,6 +177,12 @@ generated: components: sources: prometheus_scrape: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } endpoint_tag: { diff --git a/website/cue/reference/components/sources/generated/websocket.cue b/website/cue/reference/components/sources/generated/websocket.cue index 46a94be6e532a..b81e23e4113b5 100644 --- a/website/cue/reference/components/sources/generated/websocket.cue +++ b/website/cue/reference/components/sources/generated/websocket.cue @@ -157,6 +157,7 @@ generated: components: sources: websocket: configuration: { The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + custom: "Custom Authorization Header Value, will be inserted into the headers as `Authorization: < value >`" } } token: { @@ -171,6 +172,12 @@ generated: components: sources: websocket: configuration: { required: true type: string: examples: ["${USERNAME}", "username"] } + value: { + description: "Custom string value of the Authorization header" + relevant_when: "strategy = \"custom\"" + required: true + type: string: examples: ["${AUTH_HEADER_VALUE}", "CUSTOM_PREFIX ${TOKEN}"] + } } } connect_timeout_secs: { From 6ee7839a2bafced6bed53b4344c05e4f787032e9 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 11 Nov 2025 18:45:02 -0500 Subject: [PATCH 076/227] fix(website): add missing md file for the incremental_to_absolute transform (#24217) fix(website): add missing md file for the incremental_to_absolute trasnform --- .../transforms/incremental_to_absolute.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 website/content/en/docs/reference/configuration/transforms/incremental_to_absolute.md diff --git a/website/content/en/docs/reference/configuration/transforms/incremental_to_absolute.md b/website/content/en/docs/reference/configuration/transforms/incremental_to_absolute.md new file mode 100644 index 0000000000000..bcbbabc1594aa --- /dev/null +++ b/website/content/en/docs/reference/configuration/transforms/incremental_to_absolute.md @@ -0,0 +1,14 @@ +--- +title: Incremental to Absolute +description: Convert incremental metrics to absolute +component_kind: transform +layout: component +tags: ["metrics", "component", "transform"] +--- + +{{/* +This doc is generated using: + +1. The template in layouts/docs/component.html +2. The relevant CUE data in cue/reference/components/... +*/}} From 673a19cfcf5ecdea70ba1bd332645bab61b81ea5 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 12 Nov 2025 12:17:06 -0500 Subject: [PATCH 077/227] feat(website): new blog post - First year of COSE (#24179) * feat(website): first year of COSE highlights * Native OpenTelemetry Support: more natural language * add 0.51.0 version * remove Automatic TLS certificate rotation section, this feature was reverted * Apply suggestions from code review Co-authored-by: May Lee * Apply suggestions from code review Co-authored-by: May Lee * Update website/content/en/blog/cose-first-year.md Co-authored-by: May Lee --------- Co-authored-by: May Lee --- website/content/en/blog/cose-first-year.md | 172 +++++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 website/content/en/blog/cose-first-year.md diff --git a/website/content/en/blog/cose-first-year.md b/website/content/en/blog/cose-first-year.md new file mode 100644 index 0000000000000..880997ce7fba2 --- /dev/null +++ b/website/content/en/blog/cose-first-year.md @@ -0,0 +1,172 @@ +--- +title: Celebrating COSE's First Year +short: COSE Team - First Year Retrospective +description: Celebrating the first year of the COSE team and highlighting our contributions to the Vector open source community +authors: [ "pront" ] +date: "2025-11-04" +badges: + type: retrospective + domains: [ "dev", "community" ] +tags: [ "community", "open source", "cose", "contributions" ] +--- + +## Our journey + +In October 2024, the +[COSE (Community Open Source Engineering)](https://opensource.datadoghq.com/about/#the-community-open-source-engineering-team) +team was formed with the mission to strengthen Vector's open source foundation and improve the developer experience. +Today, we're celebrating our first year by highlighting the contributions we've made to the Vector community. The +COSE team has committed over **550 changes** to Vector, across **8 major releases** (0.43.0 through 0.51.0). + +## By the numbers + +Over the past year, there were: + +- **550+** commits made to Vector +- **8** major releases (0.43.0 through 0.51.0) +- **100+** pull requests merged +- **159** unique community contributors + +## What's new for you + +### Native OpenTelemetry support + +If you're using OpenTelemetry, you can send OTLP data directly to Vector +and get OTLP data out, without needing custom transforms or format conversions. Whether you're collecting logs, +metrics, or traces, Vector now integrates seamlessly into your OpenTelemetry stack as a collector, aggregator, or +transformation layer. + +**What this means for you:** Simpler configurations, faster setup, and native compatibility with the OpenTelemetry +ecosystem you're already using. + +Read more in our [OTLP Support highlight]({{< ref "/highlights/2025-09-23-otlp-support.md" >}}). + +### More reliable operations + +We've made Vector more robust in production: + +- **Smarter configuration reloads**: The `--watch-config` flag now watches external VRL files and enrichment tables + in addition to configuration files, automatically reloading when the file or table has been updated. We've also fixed crashes during enrichment + table reloads and improved file event handling to properly track file changes, even during save operations in the editor. +- **More accurate metrics**: Fixed issues with CPU utilization reporting and metric reliability during reloads. + +**What this means for you:** Easier to manage and have more confidence in your observability +pipeline. + +### Better documentation + +We know good documentation is crucial when you're setting up or troubleshooting Vector, so the following documentation has been added: + +- **[Debugging Guide]({{< ref "/guides/developer/debugging" >}})**: Comprehensive troubleshooting guide with + step-by-step instructions, common issues, and debugging techniques +- **[Config Autocompletion Guide]({{< ref "/guides/developer/config-autocompletion" >}})**: Set up your IDE to get + autocomplete help while writing Vector configurations +- **Improved component output documentation**: Clearer explanations of what data types each component produces +- **Reorganized AWS Guides**: Better organized and more comprehensive guides for AWS integrations +- **Enhanced VRL Documentation**: More examples and clearer explanations for VRL functions, including new functions + like IPCrypt, xxhash, and path manipulation + +**What this means for you:** Faster onboarding, easier troubleshooting, and less time hunting for answers. + +### More powerful VRL functions + +We've expanded VRL (Vector Remap Language) with new capabilities for your data transformations: + +- **CBOR parsing**: Work with CBOR-encoded data in your pipelines +- **LZ4 compression**: Compress and decompress data on the fly +- **Character set encoding**: Handle different text encodings seamlessly +- **Better duration parsing**: Parse complex durations like `1h2m3s` easily +- **Shannon entropy calculations**: Analyze randomness in your data for security use cases + +**What this means for you:** More data transformation options without having to write custom code. + +### We're proud of these community contributions + +We helped bring these community contributions to production (listed alphabetically): + +- **Incremental to absolute transform** ([@DerekZhang](https://github.com/DerekZhang)): Convert incremental metrics + to absolute values with intelligent caching +- **Keep sink** ([@sainad2222](https://github.com/sainad2222)): Send alerts and events to the Keep sink for incident + management +- **Memory enrichment table** ([@esensar](https://github.com/esensar), [@Quad9DNS](https://github.com/Quad9DNS)): Use + Vector for caching and key-value storage, with per-event TTL +- **MQTT source** ([@StormStake](https://github.com/StormStake)): Ingest data from MQTT brokers, helpful for IoT + and edge computing use cases +- **NATS JetStream support** ([@benjamindornel](https://github.com/benjamindornel)): Full JetStream support for + reliable NATS messaging +- **Okta source** ([@JohnSonnenschein](https://github.com/JohnSonnenschein)): Collect security and audit logs + directly from Okta +- **Postgres sink** ([@jorgehermo9](https://github.com/jorgehermo9)): Write logs, metrics, and traces directly to + PostgreSQL databases +- **Redis Sentinel support** ([@JakeHalaska](https://github.com/JakeHalaska)): High-availability Redis configurations + now supported +- **Template URI for HTTP sink** ([@jorgehermo9](https://github.com/jorgehermo9)): Dynamically route HTTP requests + based on event data +- **WebSocket Server sink** ([@esensar](https://github.com/esensar)): Send data to WebSocket clients with ACK + support and buffering +- **WebSocket source** ([@benjamindornel](https://github.com/benjamindornel)): Send data from WebSocket + connections in real time +- **Window transform** ([@zvirblis](https://github.com/zvirblis)): Aggregate events over time windows for temporal + analysis + +**What this means for you:** More integration options, new data sources, and flexible routing capabilities to fit +Vector into your existing infrastructure. + +## Improving Vector development + +While our primary focus is making Vector better for you, we've also invested in making it easier for you to +contribute to Vector. Despite being a small team, we've worked hard to review pull requests quickly and provide +thoughtful feedback to encourage community development. We believe a healthy contributor community means better +software for everyone. + +This benefits you through: + +- **Faster releases**: Contributors get feedback 60% faster, meaning features and fixes reach you sooner +- **Higher quality**: Better testing infrastructure and thorough code review means fewer bugs in releases +- **More contributors**: When contributing is easier and more welcoming, more people can help improve Vector + +_Want to contribute to Vector? Check out our:_ + +- _[Contribution Guide](https://github.com/vectordotdev/vector/blob/master/CONTRIBUTING.md)_ +- _[Debugging Guide]({{< ref "/guides/developer/debugging" >}})_ +- _[Support page]({{< ref "/community" >}})_ + +## Looking ahead + +As we enter our second year, our focus remains on the themes that have guided us so far: + +- **Building a welcoming community**: Making it easier and more rewarding for everyone to contribute to Vector +- **Stability and reliability**: Continuously improving Vector's production readiness and operational experience +- **Performance improvements**: Finding opportunities to make Vector faster and more efficient +- **Better learning resources**: Expanding documentation and guides to help both new and experienced users +- **Supporting contributors**: Helping community ideas and contributions make their way into Vector + +## Thank you + +Thank you to the Vector community for your support, feedback, and contributions. The open source community is what +makes Vector great, and we're honored to be part of it. + +Here's to another year of building great open source software together! 🚀 + +--- + +## Appendix + +For those interested in the technical details: + +### Release Notes + +- [v0.43.0 Release Notes]({{< ref "/releases/0.43.0" >}}) - November 2024 +- [v0.44.0 Release Notes]({{< ref "/releases/0.44.0" >}}) - January 2025 +- [v0.45.0 Release Notes]({{< ref "/releases/0.45.0" >}}) - February 2025 +- [v0.46.0 Release Notes]({{< ref "/releases/0.46.0" >}}) - April 2025 +- [v0.47.0 Release Notes]({{< ref "/releases/0.47.0" >}}) - May 2025 +- [v0.48.0 Release Notes]({{< ref "/releases/0.48.0" >}}) - June 2025 +- [v0.49.0 Release Notes]({{< ref "/releases/0.49.0" >}}) - August 2025 +- [v0.50.0 Release Notes]({{< ref "/releases/0.50.0" >}}) - September 2025 +- [v0.51.0 Release Notes]({{< ref "/releases/0.51.0" >}}) - November 2025 + +### VRL functions added + +For a complete list of VRL functions added during this period, see +the [VRL Changelog](https://github.com/vectordotdev/vrl/blob/main/CHANGELOG.md). From 068475e18e016c1fe72ea4042d1e58bbd4726c5f Mon Sep 17 00:00:00 2001 From: spencerho777 <42759103+spencerho777@users.noreply.github.com> Date: Wed, 12 Nov 2025 11:24:30 -0800 Subject: [PATCH 078/227] feat(trace_to_log transform): introduces transform that converts traces to logs (#24168) * adds ability to transform traces to logs * added changelog * Address comments in original pr * Update Cargo.toml Co-authored-by: Pavlos Rontidis * add concurrency * simplify outputs to naively copy over fields while we finalize internal tracing model * Update changelog.d/add-trace-to-log-transform.feature.md * fix lint * generate component * Add cue file for trace to log transform * newline * Add to description to address commetns * more docs * md file * ./scripts/cue.sh fmt && CI=true make check-docs --------- Co-authored-by: Ramon Iglesias Co-authored-by: Pavlos Rontidis --- Cargo.toml | 2 + .../add-trace-to-log-transform.feature.md | 3 + lib/vector-core/src/event/trace.rs | 6 + src/transforms/mod.rs | 2 + src/transforms/trace_to_log.rs | 135 ++++++++++++++++++ .../configuration/transforms/trace_to_log.md | 14 ++ website/cue/reference.cue | 7 +- .../transforms/generated/trace_to_log.cue | 3 + .../components/transforms/trace_to_log.cue | 85 +++++++++++ 9 files changed, 256 insertions(+), 1 deletion(-) create mode 100644 changelog.d/add-trace-to-log-transform.feature.md create mode 100644 src/transforms/trace_to_log.rs create mode 100644 website/content/en/docs/reference/configuration/transforms/trace_to_log.md create mode 100644 website/cue/reference/components/transforms/generated/trace_to_log.cue create mode 100644 website/cue/reference/components/transforms/trace_to_log.cue diff --git a/Cargo.toml b/Cargo.toml index cbeca874998f5..8e0d690724f7a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -728,6 +728,7 @@ transforms-logs = [ "transforms-exclusive-route", "transforms-sample", "transforms-throttle", + "transforms-trace_to_log" ] transforms-metrics = [ "transforms-aggregate", @@ -757,6 +758,7 @@ transforms-exclusive-route = [] transforms-sample = ["transforms-impl-sample"] transforms-tag_cardinality_limit = ["dep:bloomy", "dep:hashbrown"] transforms-throttle = ["dep:governor"] +transforms-trace_to_log = [] # Implementations of transforms transforms-impl-sample = [] diff --git a/changelog.d/add-trace-to-log-transform.feature.md b/changelog.d/add-trace-to-log-transform.feature.md new file mode 100644 index 0000000000000..161fea308624a --- /dev/null +++ b/changelog.d/add-trace-to-log-transform.feature.md @@ -0,0 +1,3 @@ +Introduced `trace_to_log` transform that allows converting traces to logs. + +authors: huevosabio diff --git a/lib/vector-core/src/event/trace.rs b/lib/vector-core/src/event/trace.rs index 276bac75668b5..026751cb98816 100644 --- a/lib/vector-core/src/event/trace.rs +++ b/lib/vector-core/src/event/trace.rs @@ -131,6 +131,12 @@ impl From for TraceEvent { } } +impl From for LogEvent { + fn from(trace: TraceEvent) -> Self { + trace.0 + } +} + impl From for TraceEvent { fn from(map: ObjectMap) -> Self { Self(map.into()) diff --git a/src/transforms/mod.rs b/src/transforms/mod.rs index 3d5bd83a7a75b..e4f6671828807 100644 --- a/src/transforms/mod.rs +++ b/src/transforms/mod.rs @@ -31,6 +31,8 @@ pub mod route; pub mod tag_cardinality_limit; #[cfg(feature = "transforms-throttle")] pub mod throttle; +#[cfg(feature = "transforms-trace_to_log")] +pub mod trace_to_log; #[cfg(feature = "transforms-window")] pub mod window; diff --git a/src/transforms/trace_to_log.rs b/src/transforms/trace_to_log.rs new file mode 100644 index 0000000000000..7b385bb4cc222 --- /dev/null +++ b/src/transforms/trace_to_log.rs @@ -0,0 +1,135 @@ +use vector_lib::config::{LogNamespace, clone_input_definitions}; +use vector_lib::configurable::configurable_component; + +use crate::config::OutputId; +use crate::{ + config::{DataType, GenerateConfig, Input, TransformConfig, TransformContext, TransformOutput}, + event::{Event, LogEvent}, + schema::Definition, + transforms::{FunctionTransform, OutputBuffer, Transform}, +}; + +/// Configuration for the `trace_to_log` transform. +/// +/// This is a naive implementation that simply converts a `TraceEvent` to a `LogEvent`. +/// The conversion preserves all trace attributes (span IDs, trace IDs, etc.) as log fields without modification. +/// This will need to be updated when Vector's trace data model is finalized to properly handle trace-specific semantics and field mappings. +#[configurable_component(transform("trace_to_log", "Convert trace events to log events."))] +#[derive(Clone, Debug, Default)] +#[serde(deny_unknown_fields)] +pub struct TraceToLogConfig { + /// The namespace to use for logs. This overrides the global setting. + #[serde(default)] + #[configurable(metadata(docs::hidden))] + pub log_namespace: Option, +} + +impl GenerateConfig for TraceToLogConfig { + fn generate_config() -> toml::Value { + toml::Value::try_from(Self { + log_namespace: None, + }) + .unwrap() + } +} + +#[async_trait::async_trait] +#[typetag::serde(name = "trace_to_log")] +impl TransformConfig for TraceToLogConfig { + async fn build(&self, _context: &TransformContext) -> crate::Result { + Ok(Transform::function(TraceToLog)) + } + + fn enable_concurrency(&self) -> bool { + true + } + + fn input(&self) -> Input { + Input::trace() + } + + fn outputs( + &self, + _: vector_lib::enrichment::TableRegistry, + input_definitions: &[(OutputId, Definition)], + _: LogNamespace, + ) -> Vec { + vec![TransformOutput::new( + DataType::Log, + clone_input_definitions(input_definitions), + )] + } +} + +#[derive(Clone, Debug)] +pub struct TraceToLog; + +impl FunctionTransform for TraceToLog { + fn transform(&mut self, output: &mut OutputBuffer, event: Event) { + if let Event::Trace(trace) = event { + output.push(Event::Log(LogEvent::from(trace))); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_util::components::assert_transform_compliance; + use crate::transforms::test::create_topology; + use tokio::sync::mpsc; + use tokio_stream::wrappers::ReceiverStream; + use vector_lib::event::TraceEvent; + + #[test] + fn generate_config() { + crate::test_util::test_generate_config::(); + } + + async fn do_transform(trace: TraceEvent) -> Option { + assert_transform_compliance(async move { + let config = TraceToLogConfig { + log_namespace: Some(false), + }; + let (tx, rx) = mpsc::channel(1); + let (topology, mut out) = create_topology(ReceiverStream::new(rx), config).await; + + tx.send(trace.into()).await.unwrap(); + + let result = out.recv().await; + + drop(tx); + topology.stop().await; + assert_eq!(out.recv().await, None); + + result + }) + .await + .map(|e| e.into_log()) + } + + #[tokio::test] + async fn transform_trace() { + use vrl::btreemap; + + let trace = TraceEvent::from(btreemap! { + "span_id" => "abc123", + "trace_id" => "xyz789", + "span_name" => "test-span", + "service" => "my-service", + }); + + let (expected_map, _) = trace.clone().into_parts(); + + let log = do_transform(trace).await.unwrap(); + let (actual_value, _) = log.into_parts(); + let actual_map = actual_value + .into_object() + .expect("log value should be an object"); + + assert_eq!( + actual_map, expected_map, + "Trace data fields should be preserved" + ); + } +} diff --git a/website/content/en/docs/reference/configuration/transforms/trace_to_log.md b/website/content/en/docs/reference/configuration/transforms/trace_to_log.md new file mode 100644 index 0000000000000..9f8e701304fb0 --- /dev/null +++ b/website/content/en/docs/reference/configuration/transforms/trace_to_log.md @@ -0,0 +1,14 @@ +--- +title: Trace to Log +description: Convert trace events to log events +component_kind: transform +layout: component +tags: ["trace to log", "convert", "component", "transform"] +--- + +{{/* +This doc is generated using: + +1. The template in layouts/docs/component.html +2. The relevant CUE data in cue/reference/components/... +*/}} diff --git a/website/cue/reference.cue b/website/cue/reference.cue index db5563b61cd37..05d22f27e1f1a 100644 --- a/website/cue/reference.cue +++ b/website/cue/reference.cue @@ -87,7 +87,8 @@ _values: { #Event: { {log?: #LogEvent} | - {metric?: #MetricEvent} + {metric?: #MetricEvent} | + {trace?: #TraceEvent} } // `#EventType` represents one of Vector's supported event types. @@ -154,6 +155,10 @@ _values: { ... } +#TraceEvent: { + ... +} + #Map: [string]: string #MetricEvent: { diff --git a/website/cue/reference/components/transforms/generated/trace_to_log.cue b/website/cue/reference/components/transforms/generated/trace_to_log.cue new file mode 100644 index 0000000000000..eaf087edfadd6 --- /dev/null +++ b/website/cue/reference/components/transforms/generated/trace_to_log.cue @@ -0,0 +1,3 @@ +package metadata + +generated: components: transforms: trace_to_log: configuration: {} diff --git a/website/cue/reference/components/transforms/trace_to_log.cue b/website/cue/reference/components/transforms/trace_to_log.cue new file mode 100644 index 0000000000000..a6780673dcc8f --- /dev/null +++ b/website/cue/reference/components/transforms/trace_to_log.cue @@ -0,0 +1,85 @@ +package metadata + +components: transforms: trace_to_log: { + title: "Trace to Log" + + description: """ + Converts a trace event into a log event. This preserves all trace + attributes (span IDs, trace IDs, etc.) as log fields without modification. + This transformation does not add any new fields, nor does it validate the + output events are valid traces. + """ + + classes: { + commonly_used: false + development: "beta" + egress_method: "stream" + stateful: false + } + + features: { + convert: {} + } + + support: { + requirements: [] + warnings: [] + notices: [] + } + + configuration: generated.components.transforms.trace_to_log.configuration + + input: { + logs: false + metrics: null + traces: true + } + + output: { + logs: "": { + description: "The converted `log` event." + } + } + + examples: [ + { + title: "Trace to Log" + + configuration: {} + + input: [ + { + trace: { + span_id: "abc123" + trace_id: "xyz789" + span_name: "test-span" + service: "my-service" + } + }, + ] + + output: [ + { + log: { + span_id: "abc123" + trace_id: "xyz789" + span_name: "test-span" + service: "my-service" + } + }, + ] + }, + ] + + how_it_works: { + conversion_behavior: { + title: "Conversion Behavior" + body: """ + The trace to log conversion is a straightforward transformation that takes all fields + from the trace event and preserves them as fields in the resulting log event. This includes + span IDs, trace IDs, span names, and any other trace attributes. The conversion does not modify + or restructure the data, making it a simple pass-through with a type change from trace to log. + """ + } + } +} From 44f34e823699db88dc382f0da5c23e0734181438 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 12 Nov 2025 14:29:13 -0500 Subject: [PATCH 079/227] chore(dev): group imports (#24219) --- lib/codecs/src/encoding/serializer.rs | 26 ++++++++++--------- .../splunk_hec/common/acknowledgements.rs | 4 +-- tests/e2e/opentelemetry/logs/mod.rs | 7 ++--- tests/e2e/opentelemetry/metrics/mod.rs | 12 ++++----- vdev/src/commands/build/publish_metadata.rs | 8 +++--- vdev/src/commands/check/markdown.rs | 3 +-- vdev/src/commands/check/rust.rs | 5 +++- vdev/src/commands/crate_versions.rs | 5 +++- vdev/src/commands/e2e/show.rs | 6 +++-- vdev/src/commands/info.rs | 3 +-- vdev/src/commands/integration/show.rs | 6 +++-- vdev/src/commands/meta/install_git_hooks.rs | 3 +-- vdev/src/commands/release/github.rs | 3 +-- vdev/src/commands/release/homebrew.rs | 3 +-- vdev/src/commands/release/prepare.rs | 17 ++++++------ vdev/src/commands/release/push.rs | 3 +-- vdev/src/commands/test.rs | 3 +-- vdev/src/testing/build.rs | 3 +-- vdev/src/testing/runner.rs | 4 +-- 19 files changed, 65 insertions(+), 59 deletions(-) diff --git a/lib/codecs/src/encoding/serializer.rs b/lib/codecs/src/encoding/serializer.rs index eef088fca4b72..fdc8397deca5d 100644 --- a/lib/codecs/src/encoding/serializer.rs +++ b/lib/codecs/src/encoding/serializer.rs @@ -4,20 +4,22 @@ use bytes::BytesMut; use vector_config::configurable_component; use vector_core::{config::DataType, event::Event, schema}; -use super::chunking::Chunker; -use super::format::{ - AvroSerializer, AvroSerializerConfig, AvroSerializerOptions, CefSerializer, - CefSerializerConfig, CsvSerializer, CsvSerializerConfig, GelfSerializer, GelfSerializerConfig, - JsonSerializer, JsonSerializerConfig, LogfmtSerializer, LogfmtSerializerConfig, - NativeJsonSerializer, NativeJsonSerializerConfig, NativeSerializer, NativeSerializerConfig, - ProtobufSerializer, ProtobufSerializerConfig, RawMessageSerializer, RawMessageSerializerConfig, - TextSerializer, TextSerializerConfig, -}; #[cfg(feature = "opentelemetry")] use super::format::{OtlpSerializer, OtlpSerializerConfig}; -use super::framing::{ - CharacterDelimitedEncoderConfig, FramingConfig, LengthDelimitedEncoderConfig, - VarintLengthDelimitedEncoderConfig, +use super::{ + chunking::Chunker, + format::{ + AvroSerializer, AvroSerializerConfig, AvroSerializerOptions, CefSerializer, + CefSerializerConfig, CsvSerializer, CsvSerializerConfig, GelfSerializer, + GelfSerializerConfig, JsonSerializer, JsonSerializerConfig, LogfmtSerializer, + LogfmtSerializerConfig, NativeJsonSerializer, NativeJsonSerializerConfig, NativeSerializer, + NativeSerializerConfig, ProtobufSerializer, ProtobufSerializerConfig, RawMessageSerializer, + RawMessageSerializerConfig, TextSerializer, TextSerializerConfig, + }, + framing::{ + CharacterDelimitedEncoderConfig, FramingConfig, LengthDelimitedEncoderConfig, + VarintLengthDelimitedEncoderConfig, + }, }; /// Serializer configuration. diff --git a/src/sinks/splunk_hec/common/acknowledgements.rs b/src/sinks/splunk_hec/common/acknowledgements.rs index 6975653a8b2fc..e0df41e9660e9 100644 --- a/src/sinks/splunk_hec/common/acknowledgements.rs +++ b/src/sinks/splunk_hec/common/acknowledgements.rs @@ -1,8 +1,8 @@ use hyper::Body; use serde::{Deserialize, Serialize}; -use std::io::Write; use std::{ collections::HashMap, + io::Write, num::{NonZeroU8, NonZeroU64}, sync::Arc, time::Duration, @@ -11,7 +11,6 @@ use tokio::sync::{mpsc::Receiver, oneshot::Sender}; use vector_lib::{configurable::configurable_component, event::EventStatus}; use super::service::{HttpRequestBuilder, MetadataFields}; -use crate::sinks::util::Compressor; use crate::{ config::AcknowledgementsConfig, http::HttpClient, @@ -19,6 +18,7 @@ use crate::{ SplunkIndexerAcknowledgementAPIError, SplunkIndexerAcknowledgementAckAdded, SplunkIndexerAcknowledgementAcksRemoved, }, + sinks::util::Compressor, }; /// Splunk HEC acknowledgement configuration. diff --git a/tests/e2e/opentelemetry/logs/mod.rs b/tests/e2e/opentelemetry/logs/mod.rs index 60c6337f7a5b0..c4cdd90c5798a 100644 --- a/tests/e2e/opentelemetry/logs/mod.rs +++ b/tests/e2e/opentelemetry/logs/mod.rs @@ -1,6 +1,7 @@ -use vector_lib::opentelemetry::proto::LOGS_REQUEST_MESSAGE_TYPE; -use vector_lib::opentelemetry::proto::collector::logs::v1::ExportLogsServiceRequest; -use vector_lib::opentelemetry::proto::common::v1::any_value::Value as AnyValueEnum; +use vector_lib::opentelemetry::proto::{ + LOGS_REQUEST_MESSAGE_TYPE, collector::logs::v1::ExportLogsServiceRequest, + common::v1::any_value::Value as AnyValueEnum, +}; use crate::opentelemetry::{ assert_service_name_with, parse_line_to_export_type_request, read_file_helper, diff --git a/tests/e2e/opentelemetry/metrics/mod.rs b/tests/e2e/opentelemetry/metrics/mod.rs index 6bc97a62dedf7..e7f5a5aae5320 100644 --- a/tests/e2e/opentelemetry/metrics/mod.rs +++ b/tests/e2e/opentelemetry/metrics/mod.rs @@ -2,12 +2,12 @@ use crate::opentelemetry::{ assert_service_name_with, parse_line_to_export_type_request, read_file_helper, }; -use vector_lib::opentelemetry::proto::METRICS_REQUEST_MESSAGE_TYPE; -use vector_lib::opentelemetry::proto::collector::metrics::v1::ExportMetricsServiceRequest; -use vector_lib::opentelemetry::proto::common::v1::KeyValue; -use vector_lib::opentelemetry::proto::common::v1::any_value::Value as AnyValueEnum; -use vector_lib::opentelemetry::proto::metrics::v1::metric::Data as MetricData; -use vector_lib::opentelemetry::proto::metrics::v1::{Gauge, Sum}; +use vector_lib::opentelemetry::proto::{ + METRICS_REQUEST_MESSAGE_TYPE, + collector::metrics::v1::ExportMetricsServiceRequest, + common::v1::{KeyValue, any_value::Value as AnyValueEnum}, + metrics::v1::{Gauge, Sum, metric::Data as MetricData}, +}; const EXPECTED_METRIC_COUNT: usize = 400; // 200 via gRPC + 200 via HTTP (50 of each type: Gauge, Sum, Histogram, ExponentialHistogram) diff --git a/vdev/src/commands/build/publish_metadata.rs b/vdev/src/commands/build/publish_metadata.rs index c9fd8e1222998..4f6de426228dc 100644 --- a/vdev/src/commands/build/publish_metadata.rs +++ b/vdev/src/commands/build/publish_metadata.rs @@ -2,9 +2,11 @@ use anyhow::Result; use chrono::prelude::*; use crate::utils::{cargo, git}; -use std::env; -use std::fs::OpenOptions; -use std::io::{self, Write}; +use std::{ + env, + fs::OpenOptions, + io::{self, Write}, +}; /// Setting necessary metadata for our publish workflow in CI. /// diff --git a/vdev/src/commands/check/markdown.rs b/vdev/src/commands/check/markdown.rs index af2f84db51ff5..002ed62908444 100644 --- a/vdev/src/commands/check/markdown.rs +++ b/vdev/src/commands/check/markdown.rs @@ -1,7 +1,6 @@ use anyhow::Result; -use crate::app; -use crate::utils::git::git_ls_files; +use crate::{app, utils::git::git_ls_files}; /// Check that markdown is styled properly #[derive(clap::Args, Debug)] diff --git a/vdev/src/commands/check/rust.rs b/vdev/src/commands/check/rust.rs index d5af700c9bbfb..cdd6e45381683 100644 --- a/vdev/src/commands/check/rust.rs +++ b/vdev/src/commands/check/rust.rs @@ -1,7 +1,10 @@ use anyhow::Result; use std::ffi::OsString; -use crate::{app, utils::command::ChainArgs as _, utils::git}; +use crate::{ + app, + utils::{command::ChainArgs as _, git}, +}; /// Check the Rust code for errors #[derive(clap::Args, Debug)] diff --git a/vdev/src/commands/crate_versions.rs b/vdev/src/commands/crate_versions.rs index fd7cbcc4edd92..9b9fa3f6fea04 100644 --- a/vdev/src/commands/crate_versions.rs +++ b/vdev/src/commands/crate_versions.rs @@ -1,4 +1,7 @@ -use std::{collections::HashMap, collections::HashSet, process::Command}; +use std::{ + collections::{HashMap, HashSet}, + process::Command, +}; use anyhow::Result; use clap::Args; diff --git a/vdev/src/commands/e2e/show.rs b/vdev/src/commands/e2e/show.rs index 3094dbcdef9e6..74a865688fbc2 100644 --- a/vdev/src/commands/e2e/show.rs +++ b/vdev/src/commands/e2e/show.rs @@ -1,5 +1,7 @@ -use crate::commands::compose_tests::show::{exec, exec_environments_only}; -use crate::testing::config::E2E_TESTS_DIR; +use crate::{ + commands::compose_tests::show::{exec, exec_environments_only}, + testing::config::E2E_TESTS_DIR, +}; use anyhow::Result; use clap::Args; diff --git a/vdev/src/commands/info.rs b/vdev/src/commands/info.rs index de8091c779c51..213d4901ce390 100644 --- a/vdev/src/commands/info.rs +++ b/vdev/src/commands/info.rs @@ -1,8 +1,7 @@ use anyhow::Result; use clap::Args; -use crate::testing::docker::CONTAINER_TOOL; -use crate::{app, utils::platform}; +use crate::{app, testing::docker::CONTAINER_TOOL, utils::platform}; /// Show `vdev` command configuration #[derive(Args, Debug)] diff --git a/vdev/src/commands/integration/show.rs b/vdev/src/commands/integration/show.rs index 933eb43b385dd..833a2f3bbf54b 100644 --- a/vdev/src/commands/integration/show.rs +++ b/vdev/src/commands/integration/show.rs @@ -1,5 +1,7 @@ -use crate::commands::compose_tests::show::{exec, exec_environments_only}; -use crate::testing::config::INTEGRATION_TESTS_DIR; +use crate::{ + commands::compose_tests::show::{exec, exec_environments_only}, + testing::config::INTEGRATION_TESTS_DIR, +}; use anyhow::Result; use clap::Args; diff --git a/vdev/src/commands/meta/install_git_hooks.rs b/vdev/src/commands/meta/install_git_hooks.rs index 2182cf87376d1..c12a9885e3156 100644 --- a/vdev/src/commands/meta/install_git_hooks.rs +++ b/vdev/src/commands/meta/install_git_hooks.rs @@ -1,6 +1,5 @@ use anyhow::{Ok, Result}; -use std::fs::File; -use std::io::Write; +use std::{fs::File, io::Write}; #[cfg(unix)] use std::os::unix::fs::PermissionsExt; diff --git a/vdev/src/commands/release/github.rs b/vdev/src/commands/release/github.rs index 0adc93a22e9f0..58bff7fde8f70 100644 --- a/vdev/src/commands/release/github.rs +++ b/vdev/src/commands/release/github.rs @@ -1,5 +1,4 @@ -use crate::app::CommandExt as _; -use crate::utils::cargo; +use crate::{app::CommandExt as _, utils::cargo}; use anyhow::{Ok, Result, anyhow}; use glob::glob; use std::process::Command; diff --git a/vdev/src/commands/release/homebrew.rs b/vdev/src/commands/release/homebrew.rs index 69deeb657401e..cf36b8f75f834 100644 --- a/vdev/src/commands/release/homebrew.rs +++ b/vdev/src/commands/release/homebrew.rs @@ -1,8 +1,7 @@ use crate::utils::git; use anyhow::Result; use sha2::Digest; -use std::path::Path; -use std::{env, fs}; +use std::{env, fs, path::Path}; use tempfile::TempDir; /// Releases latest version to the vectordotdev homebrew tap diff --git a/vdev/src/commands/release/prepare.rs b/vdev/src/commands/release/prepare.rs index f5b38de9fb073..12d9f2bb910ab 100644 --- a/vdev/src/commands/release/prepare.rs +++ b/vdev/src/commands/release/prepare.rs @@ -1,18 +1,17 @@ #![allow(clippy::print_stdout)] #![allow(clippy::print_stderr)] -use crate::utils::command::run_command; -use crate::utils::{git, paths}; +use crate::utils::{command::run_command, git, paths}; use anyhow::{Context, Result, anyhow}; use reqwest::blocking::Client; use semver::Version; -use std::fs::File; -use std::io::BufRead; -use std::io::BufReader; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::process::Command; -use std::{env, fs}; +use std::{ + env, fs, + fs::File, + io::{BufRead, BufReader, Write}, + path::{Path, PathBuf}, + process::Command, +}; use toml::Value; use toml_edit::DocumentMut; diff --git a/vdev/src/commands/release/push.rs b/vdev/src/commands/release/push.rs index 12c41f8e345a5..32dd588efb24d 100644 --- a/vdev/src/commands/release/push.rs +++ b/vdev/src/commands/release/push.rs @@ -1,8 +1,7 @@ use anyhow::Result; use clap::Args; -use crate::app; -use crate::utils::git; +use crate::{app, utils::git}; use itertools::Itertools; /// Pushes new versions produced by `make release` to the repository diff --git a/vdev/src/commands/test.rs b/vdev/src/commands/test.rs index e76d6463f903d..c00274cfcfccf 100644 --- a/vdev/src/commands/test.rs +++ b/vdev/src/commands/test.rs @@ -2,8 +2,7 @@ use anyhow::Result; use clap::Args; use std::collections::BTreeMap; -use crate::testing::runner::get_agent_test_runner; -use crate::utils::platform; +use crate::{testing::runner::get_agent_test_runner, utils::platform}; /// Execute tests #[derive(Args, Debug)] diff --git a/vdev/src/testing/build.rs b/vdev/src/testing/build.rs index 384accce47da7..7b15f0c513d5f 100644 --- a/vdev/src/testing/build.rs +++ b/vdev/src/testing/build.rs @@ -2,11 +2,10 @@ use std::{path::Path, process::Command}; use anyhow::Result; -use crate::testing::test_runner_dockerfile; use crate::{ app, app::CommandExt, - testing::{config::RustToolchainConfig, docker::docker_command}, + testing::{config::RustToolchainConfig, docker::docker_command, test_runner_dockerfile}, utils::{ self, environment::{Environment, extract_present}, diff --git a/vdev/src/testing/runner.rs b/vdev/src/testing/runner.rs index 92f57bfef50bb..cce1c28d2d251 100644 --- a/vdev/src/testing/runner.rs +++ b/vdev/src/testing/runner.rs @@ -2,15 +2,15 @@ use anyhow::Result; use std::{collections::HashSet, env, process::Command}; use super::config::{IntegrationRunnerConfig, RustToolchainConfig}; -use crate::testing::test_runner_dockerfile; -use crate::utils::IS_A_TTY; use crate::{ app::{self, CommandExt as _}, testing::{ build::prepare_build_command, docker::{DOCKER_SOCKET, docker_command}, + test_runner_dockerfile, }, utils::{ + IS_A_TTY, command::ChainArgs as _, environment::{Environment, append_environment_variables}, }, From 78a8a8b948418a0edaa559cfefc06e7ea8f557cd Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 12 Nov 2025 16:07:06 -0500 Subject: [PATCH 080/227] fix(internal_logs source): remove rate limit (#24218) * fix(internal_logs source): remove rate limit * add changelog * Debug commit * Fix validated - Revert "Debug commit" This reverts commit c7b9ec9c74ef31d235df08edc4681d7336c9cc1e. * add unit test * fix check-events --- .../internal_logs_rate_limiting.fix.md | 4 ++ src/sources/internal_logs.rs | 40 +++++++++++++++++++ src/trace.rs | 7 ++-- 3 files changed, 48 insertions(+), 3 deletions(-) create mode 100644 changelog.d/internal_logs_rate_limiting.fix.md diff --git a/changelog.d/internal_logs_rate_limiting.fix.md b/changelog.d/internal_logs_rate_limiting.fix.md new file mode 100644 index 0000000000000..d6276794bdaf1 --- /dev/null +++ b/changelog.d/internal_logs_rate_limiting.fix.md @@ -0,0 +1,4 @@ +The `internal_logs` source now captures all internal Vector logs without rate limiting. Previously, repeated log messages were silently +dropped. + +authors: pront diff --git a/src/sources/internal_logs.rs b/src/sources/internal_logs.rs index a5a3d7ff2932b..e7873a7e8a560 100644 --- a/src/sources/internal_logs.rs +++ b/src/sources/internal_logs.rs @@ -213,6 +213,8 @@ mod tests { use vector_lib::{event::Value, lookup::OwnedTargetPath}; use vrl::value::kind::Collection; + use serial_test::serial; + use super::*; use crate::{ event::Event, @@ -234,6 +236,7 @@ mod tests { // cases because `consume_early_buffer` (called within the // `start_source` helper) panics when called more than once. #[tokio::test] + #[serial] async fn receives_logs() { trace::init(false, false, "debug", 10); trace::reset_early_buffer(); @@ -341,6 +344,43 @@ mod tests { rx } + // NOTE: This test requires #[serial] because it directly interacts with global tracing state. + // This is a pre-existing limitation around tracing initialization in tests. + #[tokio::test] + #[serial] + async fn repeated_logs_are_not_rate_limited() { + trace::init(false, false, "info", 10); + trace::reset_early_buffer(); + + let rx = start_source().await; + + // Generate 20 identical log messages with the same component_id + for _ in 0..20 { + info!(component_id = "test", "Repeated test message."); + } + + sleep(Duration::from_millis(50)).await; + let events = collect_ready(rx).await; + + // Filter to only our test messages + let test_events: Vec<_> = events + .iter() + .filter(|e| { + e.as_log() + .get("message") + .map(|m| m.to_string_lossy() == "Repeated test message.") + .unwrap_or(false) + }) + .collect(); + + // We should receive all 20 messages, no rate limiting. + assert_eq!( + test_events.len(), + 20, + "internal_logs source should capture all repeated messages without rate limiting" + ); + } + #[test] fn output_schema_definition_vector_namespace() { let config = InternalLogsConfig::default(); diff --git a/src/trace.rs b/src/trace.rs index 4cfd445d1fff1..6ecec804569ed 100644 --- a/src/trace.rs +++ b/src/trace.rs @@ -64,9 +64,10 @@ pub fn init(color: bool, json: bool, levels: &str, internal_log_rate_limit: u64) let metrics_layer = metrics_layer_enabled().then(|| MetricsLayer::new().with_filter(LevelFilter::INFO)); - let broadcast_layer = RateLimitedLayer::new(BroadcastLayer::new()) - .with_default_limit(internal_log_rate_limit) - .with_filter(fmt_filter.clone()); + // BroadcastLayer should NOT be rate limited because it feeds the internal_logs source, + // which users rely on to capture ALL internal Vector logs for debugging/monitoring. + // Console output (stdout/stderr) has its own separate rate limiting below. + let broadcast_layer = BroadcastLayer::new().with_filter(fmt_filter.clone()); let subscriber = tracing_subscriber::registry() .with(metrics_layer) From 24099ebe04d83324352612237e3982b1ad4578d1 Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 12 Nov 2025 16:04:05 -0500 Subject: [PATCH 081/227] fix(ci): build-test-runner if condition (#24224) * fix(ci): build-test-runner if condition * Fix indentation * Debug with pull_request * Require build-test-runner success before running tests * Revert "Debug with pull_request" This reverts commit 659d7e5ef762027d1c8d57b288891fb356eea5d7. --- .github/workflows/integration.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 6590a73aaf5cf..2a1330335fe9b 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -46,15 +46,15 @@ jobs: build-test-runner: needs: changes - if: | - ${{ - github.event_name == 'workflow_dispatch' || - (github.event_name == 'merge_group' && - (needs.changes.outputs.dependencies == 'true' || - needs.changes.outputs.integration-yml == 'true' || - needs.changes.outputs.int-tests-any == 'true' || - needs.changes.outputs.e2e-tests-any == 'true')) - }} + if: ${{ + github.event_name == 'workflow_dispatch' || + (github.event_name == 'merge_group' && + (needs.changes.outputs.dependencies == 'true' || + needs.changes.outputs.integration-yml == 'true' || + needs.changes.outputs.int-tests-any == 'true' || + needs.changes.outputs.e2e-tests-any == 'true')) + }} + uses: ./.github/workflows/build-test-runner.yml with: commit_sha: ${{ github.sha }} @@ -65,7 +65,7 @@ jobs: - changes - build-test-runner - if: ${{ !failure() && !cancelled() && (github.event_name == 'merge_group' || github.event_name == 'workflow_dispatch') }} + if: ${{ !failure() && !cancelled() && needs.build-test-runner.result == 'success' && (github.event_name == 'merge_group' || github.event_name == 'workflow_dispatch') }} strategy: matrix: # TODO: Add "splunk" back once https://github.com/vectordotdev/vector/issues/23474 is fixed. @@ -133,7 +133,7 @@ jobs: needs: - changes - build-test-runner - if: ${{ !failure() && !cancelled() && (github.event_name == 'merge_group' || github.event_name == 'workflow_dispatch') }} + if: ${{ !failure() && !cancelled() && needs.build-test-runner.result == 'success' && (github.event_name == 'merge_group' || github.event_name == 'workflow_dispatch') }} strategy: matrix: service: [ "datadog-logs", "datadog-metrics", "opentelemetry-logs", "opentelemetry-metrics" ] From 4d22ce1b28f33c69872e57667a5bedd6a50a4b89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ensar=20Saraj=C4=8Di=C4=87?= Date: Wed, 12 Nov 2025 22:58:19 +0100 Subject: [PATCH 082/227] fix(unit tests): prevent missing components errors for memory tables in tests (#24081) * fix(unit tests): prevent missing components errors for memory tables in tests This ensures that memory enrichment tables are always considered in tests, because we can't be sure if they are used. Related: https://github.com/vectordotdev/vector/discussions/23972 * Add changelog entry * improve comment --------- Co-authored-by: Pavlos Rontidis --- changelog.d/24081_memory_tables_in_tests.fix.md | 3 +++ src/config/unit_test/mod.rs | 9 +++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/24081_memory_tables_in_tests.fix.md diff --git a/changelog.d/24081_memory_tables_in_tests.fix.md b/changelog.d/24081_memory_tables_in_tests.fix.md new file mode 100644 index 0000000000000..0a1d5195c047a --- /dev/null +++ b/changelog.d/24081_memory_tables_in_tests.fix.md @@ -0,0 +1,3 @@ +Fixed an issue in vector tests where memory enrichment tables would report missing components errors. + +authors: esensar Quad9DNS diff --git a/src/config/unit_test/mod.rs b/src/config/unit_test/mod.rs index 2be4b76484cc8..485f8b59e75eb 100644 --- a/src/config/unit_test/mod.rs +++ b/src/config/unit_test/mod.rs @@ -432,6 +432,15 @@ async fn build_unit_test( .collect::>(); valid_components.extend(unexpanded_transforms); + // Enrichment tables consume inputs but are referenced dynamically in VRL transforms + // (via get_enrichment_table_record). Since we can't statically analyze VRL usage, + // we conservatively include all enrichment table inputs as valid components. + config_builder + .enrichment_tables + .iter() + .filter_map(|(key, c)| c.as_sink(key).map(|(_, sink)| sink.inputs)) + .for_each(|i| valid_components.extend(i.into_iter())); + // Remove all transforms that are not relevant to the current test config_builder.transforms = config_builder .transforms From 8d3d623098c4caaed8295a1753235cbde1aa8dc3 Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 13 Nov 2025 12:21:28 -0500 Subject: [PATCH 083/227] chore(releasing): update manifests 0.51.1 (#24233) --- distribution/kubernetes/vector-agent/README.md | 2 +- distribution/kubernetes/vector-agent/configmap.yaml | 2 +- distribution/kubernetes/vector-agent/daemonset.yaml | 4 ++-- distribution/kubernetes/vector-agent/rbac.yaml | 4 ++-- distribution/kubernetes/vector-agent/service-headless.yaml | 2 +- distribution/kubernetes/vector-agent/serviceaccount.yaml | 2 +- distribution/kubernetes/vector-aggregator/README.md | 2 +- distribution/kubernetes/vector-aggregator/configmap.yaml | 2 +- .../kubernetes/vector-aggregator/service-headless.yaml | 2 +- distribution/kubernetes/vector-aggregator/service.yaml | 2 +- distribution/kubernetes/vector-aggregator/serviceaccount.yaml | 2 +- distribution/kubernetes/vector-aggregator/statefulset.yaml | 4 ++-- distribution/kubernetes/vector-stateless-aggregator/README.md | 2 +- .../kubernetes/vector-stateless-aggregator/configmap.yaml | 2 +- .../kubernetes/vector-stateless-aggregator/deployment.yaml | 4 ++-- .../vector-stateless-aggregator/service-headless.yaml | 2 +- .../kubernetes/vector-stateless-aggregator/service.yaml | 2 +- .../vector-stateless-aggregator/serviceaccount.yaml | 2 +- 18 files changed, 22 insertions(+), 22 deletions(-) diff --git a/distribution/kubernetes/vector-agent/README.md b/distribution/kubernetes/vector-agent/README.md index 4df9696bd4d9d..fcb899a9d6e76 100644 --- a/distribution/kubernetes/vector-agent/README.md +++ b/distribution/kubernetes/vector-agent/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.47.0 with the following `values.yaml`: +version 0.48.0 with the following `values.yaml`: ```yaml role: Agent diff --git a/distribution/kubernetes/vector-agent/configmap.yaml b/distribution/kubernetes/vector-agent/configmap.yaml index 9fed2898f23c9..366b0bab698f4 100644 --- a/distribution/kubernetes/vector-agent/configmap.yaml +++ b/distribution/kubernetes/vector-agent/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" data: agent.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-agent/daemonset.yaml b/distribution/kubernetes/vector-agent/daemonset.yaml index d1b88bb142f57..fee139123abd7 100644 --- a/distribution/kubernetes/vector-agent/daemonset.yaml +++ b/distribution/kubernetes/vector-agent/daemonset.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" spec: selector: matchLabels: @@ -30,7 +30,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.51.0-distroless-libc" + image: "timberio/vector:0.51.1-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-agent/rbac.yaml b/distribution/kubernetes/vector-agent/rbac.yaml index 6d205e7f63c56..7da5ea476d22b 100644 --- a/distribution/kubernetes/vector-agent/rbac.yaml +++ b/distribution/kubernetes/vector-agent/rbac.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" rules: - apiGroups: - "" @@ -31,7 +31,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/distribution/kubernetes/vector-agent/service-headless.yaml b/distribution/kubernetes/vector-agent/service-headless.yaml index b0baeabe79689..92b00baf8e3a2 100644 --- a/distribution/kubernetes/vector-agent/service-headless.yaml +++ b/distribution/kubernetes/vector-agent/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-agent/serviceaccount.yaml b/distribution/kubernetes/vector-agent/serviceaccount.yaml index 479763fa8002d..9aab203b7da64 100644 --- a/distribution/kubernetes/vector-agent/serviceaccount.yaml +++ b/distribution/kubernetes/vector-agent/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/README.md b/distribution/kubernetes/vector-aggregator/README.md index 1e07093f77327..0b5ec7a5469b4 100644 --- a/distribution/kubernetes/vector-aggregator/README.md +++ b/distribution/kubernetes/vector-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.47.0 with the following `values.yaml`: +version 0.48.0 with the following `values.yaml`: ```yaml diff --git a/distribution/kubernetes/vector-aggregator/configmap.yaml b/distribution/kubernetes/vector-aggregator/configmap.yaml index 5da2774211df2..f098ca2bc2959 100644 --- a/distribution/kubernetes/vector-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-aggregator/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-aggregator/service-headless.yaml b/distribution/kubernetes/vector-aggregator/service-headless.yaml index 50f3708832c30..748794086e3fe 100644 --- a/distribution/kubernetes/vector-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-aggregator/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-aggregator/service.yaml b/distribution/kubernetes/vector-aggregator/service.yaml index 8866f1c2dfdf3..fa7d5c3ac015e 100644 --- a/distribution/kubernetes/vector-aggregator/service.yaml +++ b/distribution/kubernetes/vector-aggregator/service.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml index 596c835ea3118..278df3aa34777 100644 --- a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/statefulset.yaml b/distribution/kubernetes/vector-aggregator/statefulset.yaml index e0a59ecb2792f..4d5bb24456c25 100644 --- a/distribution/kubernetes/vector-aggregator/statefulset.yaml +++ b/distribution/kubernetes/vector-aggregator/statefulset.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" annotations: {} spec: replicas: 1 @@ -34,7 +34,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.51.0-distroless-libc" + image: "timberio/vector:0.51.1-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/README.md b/distribution/kubernetes/vector-stateless-aggregator/README.md index 6e0faa79156e0..c792cdd1635bd 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/README.md +++ b/distribution/kubernetes/vector-stateless-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.47.0 with the following `values.yaml`: +version 0.48.0 with the following `values.yaml`: ```yaml role: Stateless-Aggregator diff --git a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml index b5ea6e39e2dcf..43e8934cf9ae7 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml index aeccf39e7c839..28f5e985de428 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" annotations: {} spec: replicas: 1 @@ -32,7 +32,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.51.0-distroless-libc" + image: "timberio/vector:0.51.1-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml index 0218afcacd0c8..2ce0cb4ed13a7 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-stateless-aggregator/service.yaml b/distribution/kubernetes/vector-stateless-aggregator/service.yaml index cb4e3b611a5d2..c8cd858bdad46 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml index 0630b5396a163..fe7032a56dc76 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.0-distroless-libc" + app.kubernetes.io/version: "0.51.1-distroless-libc" automountServiceAccountToken: true From 889e6a1915ca2277ca44800fe308d9eea5fe961f Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 13 Nov 2025 13:52:18 -0500 Subject: [PATCH 084/227] chore(website): bump blog post date (#24235) --- website/content/en/blog/cose-first-year.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/en/blog/cose-first-year.md b/website/content/en/blog/cose-first-year.md index 880997ce7fba2..22fe8e97a7b3a 100644 --- a/website/content/en/blog/cose-first-year.md +++ b/website/content/en/blog/cose-first-year.md @@ -3,7 +3,7 @@ title: Celebrating COSE's First Year short: COSE Team - First Year Retrospective description: Celebrating the first year of the COSE team and highlighting our contributions to the Vector open source community authors: [ "pront" ] -date: "2025-11-04" +date: "2025-11-13" badges: type: retrospective domains: [ "dev", "community" ] From 70b26187826a1ac6f047020740ee8bed65641960 Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 13 Nov 2025 15:09:29 -0500 Subject: [PATCH 085/227] chore(releasing): v0.51.1 (#24234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(releasing): prepare v0.51.1 release (#24214) * chore(ci): reorganize integration test files (#24108) * chore(ci): reorganize integration test files * revert nats data move * fix paths to data * fix more paths * amqp fix * Fix logstash int tests shared files paths * Rename scripts/integration -> tests/integration * scripts/integration -> tests/integration in source files * Fix scripts->tests * Fix gcp paths * Fix http-client pem path * move nats data --------- Co-authored-by: Thomas * chore(vdev): move all utils in a new utils folder (#24143) * chore(vdev): move all utils in a new utils folder * move remaining files to utils dir * move remaining files to utils dir * fmt * chore(vdev): make modules visible to rustfmt (#24162) * Remove mod inside cli_subcommands macro * cargo fmt * chore(vdev): apply vdev rust check fixes * Link to rustfmt issue * fix(vdev): release prepare vrl version pinning (#24158) * Add --dry-run to release prepare * Add error handling and checks to pin_vrl_version * Add wrapper to toml * Remove wrapper, parse as Table instead * Fix vrl pinning logic * Enable preserve_order feature in toml crate * Use dependency instead of whole toml * Fix dry run docs * Fix dry run wording * refactor to use toml_edit * Add update_vrl_to_version to add unit test * Use indoc in prepare.rs * Remove preserve_order feature * chore(deps): update VRL to add missing stdlib fns from 0.28 (#24178) * chore(ci): temporarily remove homebrew publish step from publish workflow (#24185) This temporarily removes the publish-homebrew job from the publish workflow to address issue #24139. This is step 1 of the plan to fix the homebrew publishing process. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-authored-by: Claude * fix(blackhole sink): disable rate limiting for periodic stats messages (#24190) * fix(blackhole sink): disable rate limiting for periodic stats messages The blackhole sink's informational "Collected events" messages were being rate-limited since rate limiting was enabled by default in #24024. This is undesirable because: 1. These are deliberately scheduled periodic messages (controlled by the `print_interval_secs` config option), not error conditions that could flood the logs 2. Users explicitly configure the frequency - rate limiting defeats that explicit configuration and breaks user expectations 3. The interval timer already prevents log flooding, making additional rate limiting redundant 4. The blackhole sink is used for debugging/testing, where predictable output is essential This fix adds `internal_log_rate_limit = false` to both info! calls, similar to how the console sink disables rate limiting for its critical operational messages. Fixes #24188 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * chore: add changelog fragment for blackhole sink rate limiting fix --------- Co-authored-by: Claude * chore(internal logs): Disable rate limiting for critical internal error logs (#24192) * Improve debug log in aws_ecs_metrics * Add internal_log_rate_limit = false to config reload errors * Disable rate limiting for lua build failure * Disable rate limiting for amqp build failure * Add internal_log_rate_limit = false to important failures during reload * Disable log rate limit for repeated events in process.rs * Update src/config/watcher.rs * Fix message format * fix(tracing): prevent panic for traces without standard fields (#24191) * fix(tracing): prevent panic for traces without std fields * forbid unwrap in this lib * clippy fix * improve events_with_custom_fields_no_message_dont_panic, advance to next window * chore(tracing): do not rate limit utlization report (#24202) * chore(internal metrics): move config_reload_* metrics to VectorReload* (#24203) * chore(internal metrics): move config_reload_* metrics to VectorReloaded and VectorReloadedError * update topology_doesnt_reload_new_data_dir test * chore(dev): cargo fmt * strengthen tests, since now we have a better interface * chore(releasing): Add 0.51.0 known issues (#24211) * Format fixes * Add 0.51.0 known issues * Add dots * Reword log panic known issue * Reword VRL missing fns * Backtick versions * Update website/cue/reference/releases/0.51.0.cue Co-authored-by: Pavlos Rontidis * Fix cue docs fmt --------- Co-authored-by: Pavlos Rontidis * Bump vector version to 0.51.1 * Bump VRL to 0.28.1 * Generate 0.51.1 cue * Add vrl_changelog * Remove whitespace * chore(releasing): Updated distribution/install.sh vector version to 0.51.1 * chore(releasing): Add 0.51.1 to versions.cue * chore(releasing): Created release md file * Add description * Reorder items and fix format * Finally fix styling * Bump date --------- Co-authored-by: Pavlos Rontidis Co-authored-by: Claude chore(releasing): pull in `internal_logs` fix into 0.51.1 (#24225) * fix(internal_logs source): remove rate limit (#24218) * fix(internal_logs source): remove rate limit * add changelog * Debug commit * Fix validated - Revert "Debug commit" This reverts commit c7b9ec9c74ef31d235df08edc4681d7336c9cc1e. * add unit test * fix check-events * Pull in internal_logs source fix * Add internal_logs known issue * Bump date * Update website/cue/reference/releases/0.51.0.cue Co-authored-by: Pavlos Rontidis --------- Co-authored-by: Pavlos Rontidis * Delete changelog entries --------- Co-authored-by: Pavlos Rontidis --- .../24188_blackhole_sink_rate_limiting.fix.md | 3 - changelog.d/config_reload_panic_fix.fix.md | 4 - .../internal_logs_rate_limiting.fix.md | 4 - .../unify_reload_metrics.enhancement.md | 16 --- distribution/install.sh | 2 +- website/content/en/releases/0.51.1.md | 4 + website/cue/reference/releases/0.51.1.cue | 104 ++++++++++++++++++ website/cue/reference/versions.cue | 1 + 8 files changed, 110 insertions(+), 28 deletions(-) delete mode 100644 changelog.d/24188_blackhole_sink_rate_limiting.fix.md delete mode 100644 changelog.d/config_reload_panic_fix.fix.md delete mode 100644 changelog.d/internal_logs_rate_limiting.fix.md delete mode 100644 changelog.d/unify_reload_metrics.enhancement.md create mode 100644 website/content/en/releases/0.51.1.md create mode 100644 website/cue/reference/releases/0.51.1.cue diff --git a/changelog.d/24188_blackhole_sink_rate_limiting.fix.md b/changelog.d/24188_blackhole_sink_rate_limiting.fix.md deleted file mode 100644 index 9761dc54bd7a8..0000000000000 --- a/changelog.d/24188_blackhole_sink_rate_limiting.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -The `blackhole` sink's periodic statistics messages (controlled by `print_interval_secs`) are no longer incorrectly suppressed by rate limiting. These informational messages now appear at the user-configured interval as expected. - -authors: pront diff --git a/changelog.d/config_reload_panic_fix.fix.md b/changelog.d/config_reload_panic_fix.fix.md deleted file mode 100644 index 1a39a8af479f1..0000000000000 --- a/changelog.d/config_reload_panic_fix.fix.md +++ /dev/null @@ -1,4 +0,0 @@ -Fixed a panic in the tracing rate limiter when config reload failed. While the panic didn't kill Vector (it was caught by tokio's task -runtime), it could cause unexpected behavior. The rate limiter now gracefully handles events without standard message fields. - -authors: pront diff --git a/changelog.d/internal_logs_rate_limiting.fix.md b/changelog.d/internal_logs_rate_limiting.fix.md deleted file mode 100644 index d6276794bdaf1..0000000000000 --- a/changelog.d/internal_logs_rate_limiting.fix.md +++ /dev/null @@ -1,4 +0,0 @@ -The `internal_logs` source now captures all internal Vector logs without rate limiting. Previously, repeated log messages were silently -dropped. - -authors: pront diff --git a/changelog.d/unify_reload_metrics.enhancement.md b/changelog.d/unify_reload_metrics.enhancement.md deleted file mode 100644 index 7910424e59be8..0000000000000 --- a/changelog.d/unify_reload_metrics.enhancement.md +++ /dev/null @@ -1,16 +0,0 @@ -The `component_errors_total` metric now includes a `reason` tag when `error_code="reload"` to provide more granular information about reload -failures. Possible reasons include: - -- `global_options_changed`: Reload rejected because global options (like `data_dir`) changed -- `global_diff_failed`: Reload rejected because computing global config diff failed -- `topology_build_failed`: Reload rejected because new topology failed to build/healthcheck -- `restore_failed`: Reload failed and could not restore previous config - -Replaced metrics: - -- `config_reload_rejected` was replaced by `component_errors_total` with `error_code="reload"` and a `reason` tag specifying the rejection type -- `config_reloaded` was replaced by the existing `reloaded_total` metric - -Note: The replaced metrics were introduced in v0.50.0 but were never emitted due to a bug. These changes provide consistency across Vector's internal telemetry. - -authors: pront diff --git a/distribution/install.sh b/distribution/install.sh index c3d80833ea432..38d765d203645 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -13,7 +13,7 @@ set -u # If PACKAGE_ROOT is unset or empty, default it. PACKAGE_ROOT="${PACKAGE_ROOT:-"https://packages.timber.io/vector"}" # If VECTOR_VERSION is unset or empty, default it. -VECTOR_VERSION="${VECTOR_VERSION:-"0.51.0"}" +VECTOR_VERSION="${VECTOR_VERSION:-"0.51.1"}" _divider="--------------------------------------------------------------------------------" _prompt=">>>" _indent=" " diff --git a/website/content/en/releases/0.51.1.md b/website/content/en/releases/0.51.1.md new file mode 100644 index 0000000000000..b3098b05a39f1 --- /dev/null +++ b/website/content/en/releases/0.51.1.md @@ -0,0 +1,4 @@ +--- +title: Vector v0.51.1 release notes +weight: 30 +--- diff --git a/website/cue/reference/releases/0.51.1.cue b/website/cue/reference/releases/0.51.1.cue new file mode 100644 index 0000000000000..f93558bc64ea4 --- /dev/null +++ b/website/cue/reference/releases/0.51.1.cue @@ -0,0 +1,104 @@ +package metadata + +releases: "0.51.1": { + date: "2025-11-13" + codename: "" + + whats_next: [] + + description: """ + * When Vector is running with debug logs enabled (`VECTOR_LOG=debug`), threads no + longer panic when logging utilization or other debug messages. + + * The `config_reload_rejected` and `config_reloaded` counters added in `0.51.0` were + not being emitted and have been replaced. `component_errors_total` with + `error_code="reload"` now replaces `config_reload_rejected` and `reloaded_total` + replaces `config_reloaded`. + + * The `basename`, `dirname` and `split_path` VRL functions added in `0.51.0` are now + properly exposed. + + * `blackhole` sink's periodic statistics messages are no longer rate limited. + + * The `internal_logs` source now captures all internal Vector logs without rate limiting. + Previously, repeated log messages were silently dropped. + """ + + changelog: [ + { + type: "fix" + description: """ + The `blackhole` sink's periodic statistics messages (controlled by `print_interval_secs`) are no longer incorrectly suppressed by rate limiting. These informational messages now appear at the user-configured interval as expected. + """ + contributors: ["pront"] + }, + { + type: "fix" + description: """ + Fixed a panic in the tracing rate limiter when config reload failed. While the panic didn't kill Vector (it was caught by tokio's task + runtime), it could cause unexpected behavior. The rate limiter now gracefully handles events without standard message fields. + """ + contributors: ["pront"] + }, + { + type: "enhancement" + description: """ + The `component_errors_total` metric now includes a `reason` tag when `error_code="reload"` to provide more granular information about reload + failures. Possible reasons include: + + - `global_options_changed`: Reload rejected because global options (like `data_dir`) changed + - `global_diff_failed`: Reload rejected because computing global config diff failed + - `topology_build_failed`: Reload rejected because new topology failed to build/healthcheck + - `restore_failed`: Reload failed and could not restore previous config + + Replaced metrics: + + - `config_reload_rejected` was replaced by `component_errors_total` with `error_code="reload"` and a `reason` tag specifying the rejection type + - `config_reloaded` was replaced by the existing `reloaded_total` metric + + Note: The replaced metrics were introduced in v0.50.0 but were never emitted due to a bug. These changes provide consistency across Vector's internal telemetry. + """ + contributors: ["pront"] + }, + { + type: "fix" + description: """ + The `internal_logs` source now captures all internal Vector logs without rate limiting. Previously, repeated log messages were silently + dropped. + """ + contributors: ["pront"] + }, + ] + + vrl_changelog: """ + ### [0.28.1 (2025-11-07)] + + #### Fixes + + - Fixed an issue where `split_path`, `basename`, `dirname` had not been added to VRL's standard + library and, therefore, appeared to be missing and were inaccessible in the `0.28.0` release. + + authors: thomasqueirozb (https://github.com/vectordotdev/vrl/pull/1553) + + + ### [0.28.0 (2025-11-03)] + """ + + commits: [ + {sha: "0aedea9561a4834f6abebaa2a0bc5580b9143a9e", date: "2025-11-04 02:03:46 UTC", description: "reorganize integration test files", pr_number: 24108, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 125, insertions_count: 149, deletions_count: 122}, + {sha: "2d3793e96d7047408d6ce24d378d2396ca6830f4", date: "2025-11-05 01:13:53 UTC", description: "move all utils in a new utils folder", pr_number: 24143, scopes: ["vdev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 40, insertions_count: 242, deletions_count: 174}, + {sha: "35a408804d9c4453852ff357c15d7ab3aaad5cbd", date: "2025-11-05 20:54:56 UTC", description: "improve/fix minor release template", pr_number: 24156, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 23, deletions_count: 27}, + {sha: "0cce521b4a2eb2a92cb024e46e7c6ffcb1c64754", date: "2025-11-06 02:17:02 UTC", description: "make modules visible to rustfmt", pr_number: 24162, scopes: ["vdev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 19, insertions_count: 218, deletions_count: 117}, + {sha: "4add1c3aa9ebe05d2e16a56afc3ee8accf7cfeb1", date: "2025-11-06 03:28:00 UTC", description: "release prepare vrl version pinning", pr_number: 24158, scopes: ["vdev"], type: "fix", breaking_change: false, author: "Thomas", files_count: 3, insertions_count: 66, deletions_count: 33}, + {sha: "1e8e99a4898958ae9b56ee33af162c98092ed9b9", date: "2025-11-06 21:29:00 UTC", description: "update VRL to add missing stdlib fns from 0.28", pr_number: 24178, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "fbbcffc1414041be01bff8963727ceedb2f7fe70", date: "2025-11-07 21:03:54 UTC", description: "temporarily remove homebrew publish step from publish workflow", pr_number: 24185, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 0, deletions_count: 12}, + {sha: "c855585c3386324b26ad2b8516c16177bc860d20", date: "2025-11-07 23:52:13 UTC", description: "disable rate limiting for periodic stats messages", pr_number: 24190, scopes: ["blackhole sink"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 5, deletions_count: 0}, + {sha: "a22b790bcf2e111e7c1b6ffc2de1394fe37b7ae2", date: "2025-11-10 20:17:52 UTC", description: "Disable rate limiting for critical internal error logs", pr_number: 24192, scopes: ["internal logs"], type: "chore", breaking_change: false, author: "Thomas", files_count: 6, insertions_count: 42, deletions_count: 14}, + {sha: "87e7cb8733a6e5b0afe075e54be3cc397023c128", date: "2025-11-10 23:10:54 UTC", description: "prevent panic for traces without standard fields", pr_number: 24191, scopes: ["tracing"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 5, insertions_count: 56, deletions_count: 8}, + {sha: "a7e68b17010c58d0ac2a1656fe13468063c6ddf3", date: "2025-11-11 00:18:14 UTC", description: "do not rate limit utlization report", pr_number: 24202, scopes: ["tracing"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 15}, + {sha: "7d657023131d15334255a29a390f2a3604ff67cc", date: "2025-11-11 20:01:07 UTC", description: "move config_reload_* metrics to VectorReload*", pr_number: 24203, scopes: ["internal metrics"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 11, insertions_count: 184, deletions_count: 265}, + {sha: "5432632b414472e004cc68a0cae56f0b4451e8af", date: "2025-11-11 23:42:09 UTC", description: "Add 0.51.0 known issues", pr_number: 24211, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 27, deletions_count: 5}, + {sha: "3e09dc86b8aaf6606abc9c5acc9ddf52f39f6e17", date: "2025-11-12 04:31:25 UTC", description: "prepare v0.51.1 release", pr_number: 24214, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Thomas", files_count: 204, insertions_count: 1055, deletions_count: 756}, + {sha: "3ca3ec6f55522c406680a183416bf7a8b35372ae", date: "2025-11-13 02:07:06 UTC", description: "remove rate limit", pr_number: 24218, scopes: ["internal_logs source"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 48, deletions_count: 3}, + ] +} diff --git a/website/cue/reference/versions.cue b/website/cue/reference/versions.cue index 35a03f4948505..e4bffd0b453f2 100644 --- a/website/cue/reference/versions.cue +++ b/website/cue/reference/versions.cue @@ -2,6 +2,7 @@ package metadata // This has to be maintained manually because there's currently no way to sort versions programmatically versions: [string, ...string] & [ + "0.51.1", "0.51.0", "0.50.0", "0.49.0", From b367f7dddd58fea63045ba8bdda02eaa3c9e679a Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 13 Nov 2025 15:18:15 -0500 Subject: [PATCH 086/227] fix(http_client): fail on VRL compilation errors in query parameters (#24223) * fix(http_client): fail on VRL compilation errors in query parameters * cargo fmt * Use sources::BuildError for Err type instead of String * Update changelog --- .../http_client_vrl_compilation_errors.fix.md | 5 ++ src/sources/http_client/client.rs | 46 +++++++++++-------- src/sources/mod.rs | 4 +- 3 files changed, 35 insertions(+), 20 deletions(-) create mode 100644 changelog.d/http_client_vrl_compilation_errors.fix.md diff --git a/changelog.d/http_client_vrl_compilation_errors.fix.md b/changelog.d/http_client_vrl_compilation_errors.fix.md new file mode 100644 index 0000000000000..45a9aaf2063bc --- /dev/null +++ b/changelog.d/http_client_vrl_compilation_errors.fix.md @@ -0,0 +1,5 @@ +The `http_client` source now fails to start if VRL compilation errors occur in `query` parameters when +type is set to `vrl`, instead of silently logging a warning and continuing with invalid expressions. +This prevents unexpected behavior where malformed VRL would be sent as literal strings in HTTP requests. + +authors: thomasqueirozb diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index 548cbc795e3ab..34c2ee09dbe2a 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -214,17 +214,19 @@ pub struct Query { } impl Query { - pub fn new(params: &HashMap) -> Self { + pub fn new(params: &HashMap) -> Result { let functions = vrl::stdlib::all() .into_iter() .chain(vector_lib::enrichment::vrl_functions()) .chain(vector_vrl_functions::all()) .collect::>(); - let compiled: HashMap = params - .iter() - .map(|(k, v)| (k.clone(), Self::compile_param(v, &functions))) - .collect(); + let mut compiled: HashMap = HashMap::new(); + + for (k, v) in params.iter() { + let compiled_param = Self::compile_param(v, &functions)?; + compiled.insert(k.clone(), compiled_param); + } let has_vrl = compiled.values().any(|compiled| match compiled { CompiledQueryParameterValue::SingleParam(param) => param.program.is_some(), @@ -233,14 +235,17 @@ impl Query { } }); - Query { + Ok(Query { original: params.clone(), compiled, has_vrl, - } + }) } - fn compile_value(param: &ParameterValue, functions: &[Box]) -> CompiledParam { + fn compile_value( + param: &ParameterValue, + functions: &[Box], + ) -> Result { let program = if param.is_vrl() { let state = TypeState::default(); let config = CompileConfig::default(); @@ -256,34 +261,37 @@ impl Query { } Err(diagnostics) => { let error = format_vrl_diagnostics(param.value(), diagnostics); - warn!(message = "VRL compilation failed.", %error); - None + return Err(sources::BuildError::VrlCompilationError { + message: format!("VRL compilation failed: {}", error), + }); } } } else { None }; - CompiledParam { + Ok(CompiledParam { value: param.value().to_string(), program, - } + }) } fn compile_param( value: &QueryParameterValue, functions: &[Box], - ) -> CompiledQueryParameterValue { + ) -> Result { match value { - QueryParameterValue::SingleParam(param) => CompiledQueryParameterValue::SingleParam( - Box::new(Self::compile_value(param, functions)), - ), + QueryParameterValue::SingleParam(param) => { + Ok(CompiledQueryParameterValue::SingleParam(Box::new( + Self::compile_value(param, functions)?, + ))) + } QueryParameterValue::MultiParams(params) => { let compiled = params .iter() .map(|p| Self::compile_value(p, functions)) - .collect(); - CompiledQueryParameterValue::MultiParams(compiled) + .collect::, _>>()?; + Ok(CompiledQueryParameterValue::MultiParams(compiled)) } } } @@ -293,7 +301,7 @@ impl Query { #[typetag::serde(name = "http_client")] impl SourceConfig for HttpClientConfig { async fn build(&self, cx: SourceContext) -> crate::Result { - let query = Query::new(&self.query.clone()); + let query = Query::new(&self.query.clone())?; // Build the base URLs let endpoints = [self.endpoint.clone()]; diff --git a/src/sources/mod.rs b/src/sources/mod.rs index f7b2b6bb534e8..77258bdb77a46 100644 --- a/src/sources/mod.rs +++ b/src/sources/mod.rs @@ -102,7 +102,9 @@ pub use vector_lib::source::Source; #[allow(dead_code)] // Easier than listing out all the features that use this /// Common build errors #[derive(Debug, Snafu)] -enum BuildError { +pub enum BuildError { #[snafu(display("URI parse error: {}", source))] UriParseError { source: ::http::uri::InvalidUri }, + #[snafu(display("VRL compilation error: {}", message))] + VrlCompilationError { message: String }, } From 3ef42ae4c457495a11955fc86d9fdf94cbda1398 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 13 Nov 2025 15:20:02 -0500 Subject: [PATCH 087/227] chore(dev): skip removed files when formatting (#24232) --- scripts/check-style.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/check-style.sh b/scripts/check-style.sh index fd31084379bfb..25a45595af96d 100755 --- a/scripts/check-style.sh +++ b/scripts/check-style.sh @@ -83,6 +83,11 @@ for FILE in $FILES; do continue fi + # Skip files that don't exist (e.g., deleted in this branch). + if [[ ! -f "$FILE" ]]; then + continue + fi + # check that the file contains trailing newline if [ -n "$(tail -c1 "$FILE" | tr -d $'\n')" ]; then case "$MODE" in From 5553521edc2415325dd28179423cdf02b45f56f6 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 13 Nov 2025 16:19:00 -0500 Subject: [PATCH 088/227] fix(dev): eliminate race condition when aqcuiring socket addresses (#24212) * fix(dev): eliminate port allocation race in concurrent tests Replaced portpicker with RAII-based port registry to prevent premature port reuse. Ports are now reserved via guards that must be held for the entire test duration. * fix issues with guards being dropped too early * chore(dev): cargo fmt * only the port should be the key * more guard fixes * fmt on ubuntu, make fmt on macos doesn't change the fmt * rename a few guard variables * add expect --- Cargo.lock | 8 -- Cargo.toml | 2 - benches/http.rs | 6 +- benches/languages.rs | 8 +- lib/portpicker/Cargo.toml | 10 -- lib/portpicker/LICENSE | 24 ---- lib/portpicker/src/lib.rs | 134 ------------------ src/common/http/server_auth.rs | 32 +++-- src/components/validation/runner/config.rs | 8 +- src/components/validation/runner/telemetry.rs | 5 +- src/http.rs | 4 +- src/sinks/appsignal/integration_tests.rs | 5 +- src/sinks/datadog/events/tests.rs | 7 +- src/sinks/datadog/logs/tests.rs | 17 +-- .../datadog/metrics/integration_tests.rs | 5 +- src/sinks/datadog/metrics/tests.rs | 7 +- src/sinks/datadog/traces/tests.rs | 9 +- src/sinks/gcp/stackdriver/metrics/tests.rs | 8 +- src/sinks/http/tests.rs | 17 +-- src/sinks/humio/metrics.rs | 4 +- src/sinks/influxdb/logs.rs | 4 +- src/sinks/loki/tests.rs | 2 +- src/sinks/mezmo.rs | 5 +- src/sinks/postgres/integration_tests.rs | 5 +- src/sinks/prometheus/exporter.rs | 19 ++- src/sinks/prometheus/remote_write/tests.rs | 2 +- src/sinks/sematext/logs.rs | 5 +- src/sinks/sematext/metrics.rs | 5 +- src/sinks/socket.rs | 15 +- src/sinks/splunk_hec/logs/tests.rs | 4 +- src/sinks/splunk_hec/metrics/tests.rs | 4 +- src/sinks/statsd/tests.rs | 5 +- src/sinks/util/http.rs | 4 +- src/sinks/util/tcp.rs | 6 +- src/sinks/vector/mod.rs | 7 +- src/sinks/websocket/sink.rs | 11 +- src/sinks/websocket_server/sink.rs | 21 +-- src/sources/apache_metrics/mod.rs | 9 +- src/sources/aws_ecs_metrics/mod.rs | 5 +- src/sources/aws_kinesis_firehose/mod.rs | 32 +++-- src/sources/datadog_agent/tests.rs | 54 ++++--- src/sources/fluent/mod.rs | 24 ++-- src/sources/heroku_logs.rs | 22 +-- src/sources/host_metrics/tcp.rs | 6 +- src/sources/http_client/tests.rs | 19 +-- src/sources/http_server.rs | 5 +- src/sources/logstash.rs | 5 +- src/sources/okta/tests.rs | 9 +- src/sources/opentelemetry/tests.rs | 10 +- src/sources/prometheus/pushgateway.rs | 2 +- src/sources/prometheus/remote_write.rs | 20 +-- src/sources/prometheus/scrape.rs | 17 +-- src/sources/socket/mod.rs | 73 ++++------ src/sources/splunk_hec/mod.rs | 76 +++++----- src/sources/statsd/mod.rs | 14 +- src/sources/syslog.rs | 7 +- src/sources/util/framestream.rs | 10 +- src/sources/vector/mod.rs | 4 +- src/sources/websocket/source.rs | 16 +-- src/test_util/addr.rs | 107 ++++++++++++++ src/test_util/http.rs | 4 +- src/test_util/mod.rs | 36 ++--- src/topology/test/crash.rs | 23 +-- src/topology/test/end_to_end.rs | 4 +- src/topology/test/reload.rs | 28 ++-- src/transforms/aws_ec2_metadata.rs | 6 +- tests/integration/shutdown.rs | 12 +- 67 files changed, 537 insertions(+), 566 deletions(-) delete mode 100644 lib/portpicker/Cargo.toml delete mode 100644 lib/portpicker/LICENSE delete mode 100644 lib/portpicker/src/lib.rs create mode 100644 src/test_util/addr.rs diff --git a/Cargo.lock b/Cargo.lock index 14844d2706f27..413f09e0fda9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7809,13 +7809,6 @@ dependencies = [ "portable-atomic", ] -[[package]] -name = "portpicker" -version = "1.0.0" -dependencies = [ - "rand 0.9.2", -] - [[package]] name = "postgres-openssl" version = "0.5.1" @@ -12292,7 +12285,6 @@ dependencies = [ "paste", "percent-encoding", "pin-project", - "portpicker", "postgres-openssl", "proptest", "proptest-derive", diff --git a/Cargo.toml b/Cargo.toml index 8e0d690724f7a..0706c76a7f306 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,7 +112,6 @@ members = [ "lib/k8s-e2e-tests", "lib/k8s-test-framework", "lib/loki-logproto", - "lib/portpicker", "lib/prometheus-parser", "lib/opentelemetry-proto", "lib/tracing-limit", @@ -219,7 +218,6 @@ vrl.workspace = true dnsmsg-parser = { path = "lib/dnsmsg-parser", optional = true } dnstap-parser = { path = "lib/dnstap-parser", optional = true } fakedata = { path = "lib/fakedata", optional = true } -portpicker = { path = "lib/portpicker" } tracing-limit = { path = "lib/tracing-limit" } vector-common = { path = "lib/vector-common", default-features = false } vector-lib.workspace = true diff --git a/benches/http.rs b/benches/http.rs index e8ed333af5be5..0bc99dec551cf 100644 --- a/benches/http.rs +++ b/benches/http.rs @@ -15,7 +15,7 @@ use vector::{ }, sources, template::Template, - test_util::{next_addr, random_lines, runtime, send_lines, start_topology, wait_for_tcp}, + test_util::{addr::next_addr, random_lines, runtime, send_lines, start_topology, wait_for_tcp}, }; use vector_lib::codecs::{TextSerializerConfig, encoding::FramingConfig}; @@ -23,8 +23,8 @@ fn benchmark_http(c: &mut Criterion) { let num_lines: usize = 1_000; let line_size: usize = 100; - let in_addr = next_addr(); - let out_addr = next_addr(); + let (_guard_0, in_addr) = next_addr(); + let (_guard_1, out_addr) = next_addr(); let _srv = serve(out_addr); diff --git a/benches/languages.rs b/benches/languages.rs index ae99eb81cd45d..5294742e1b114 100644 --- a/benches/languages.rs +++ b/benches/languages.rs @@ -2,7 +2,9 @@ use criterion::{BatchSize, Criterion, SamplingMode, Throughput, criterion_group, use indoc::indoc; use vector::{ config, - test_util::{CountReceiver, next_addr, runtime, send_lines, start_topology, wait_for_tcp}, + test_util::{ + CountReceiver, addr::next_addr, runtime, send_lines, start_topology, wait_for_tcp, + }, }; criterion_group!( @@ -253,8 +255,8 @@ fn benchmark_configs( let _ = output; let num_lines = 10_000; - let in_addr = next_addr(); - let out_addr = next_addr(); + let (_guard_0, in_addr) = next_addr(); + let (_guard_1, out_addr) = next_addr(); let lines: Vec<_> = std::iter::repeat_n(input.to_string(), num_lines).collect(); diff --git a/lib/portpicker/Cargo.toml b/lib/portpicker/Cargo.toml deleted file mode 100644 index 0b296fdce8301..0000000000000 --- a/lib/portpicker/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "portpicker" -version = "1.0.0" -authors = ["Vector Contributors ", "Hannes Karppila "] -edition = "2024" -publish = false -license = "Unlicense" - -[dependencies] -rand.workspace = true diff --git a/lib/portpicker/LICENSE b/lib/portpicker/LICENSE deleted file mode 100644 index cf1ab25da0349..0000000000000 --- a/lib/portpicker/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff --git a/lib/portpicker/src/lib.rs b/lib/portpicker/src/lib.rs deleted file mode 100644 index 8c83f6e4d137d..0000000000000 --- a/lib/portpicker/src/lib.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Modified by `Vector Contributors `. -// Based on `https://github.com/Dentosal/portpicker-rs` by `Hannes Karppila `. -// `portpicker-rs` LICENSE: -// This is free and unencumbered software released into the public domain. - -// Anyone is free to copy, modify, publish, use, compile, sell, or -// distribute this software, either in source code form or as a compiled -// binary, for any purpose, commercial or non-commercial, and by any -// means. - -// In jurisdictions that recognize copyright laws, the author or authors -// of this software dedicate any and all copyright interest in the -// software to the public domain. We make this dedication for the benefit -// of the public at large and to the detriment of our heirs and -// successors. We intend this dedication to be an overt act of -// relinquishment in perpetuity of all present and future rights to this -// software under copyright law. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -// OTHER DEALINGS IN THE SOFTWARE. - -// For more information, please refer to - -#![deny(warnings)] - -use std::net::{IpAddr, SocketAddr, TcpListener, ToSocketAddrs, UdpSocket}; - -use rand::{Rng, rng}; - -pub type Port = u16; - -// Try to bind to a socket using UDP -fn test_bind_udp(addr: A) -> Option { - Some(UdpSocket::bind(addr).ok()?.local_addr().ok()?.port()) -} - -// Try to bind to a socket using TCP -fn test_bind_tcp(addr: A) -> Option { - Some(TcpListener::bind(addr).ok()?.local_addr().ok()?.port()) -} - -/// Check if a port is free on UDP -pub fn is_free_udp(ip: IpAddr, port: Port) -> bool { - test_bind_udp(SocketAddr::new(ip, port)).is_some() -} - -/// Check if a port is free on TCP -pub fn is_free_tcp(ip: IpAddr, port: Port) -> bool { - test_bind_tcp(SocketAddr::new(ip, port)).is_some() -} - -/// Check if a port is free on both TCP and UDP -pub fn is_free(ip: IpAddr, port: Port) -> bool { - is_free_tcp(ip, port) && is_free_udp(ip, port) -} - -/// Asks the OS for a free port -fn ask_free_tcp_port(ip: IpAddr) -> Option { - test_bind_tcp(SocketAddr::new(ip, 0)) -} - -/// Picks an available port that is available on both TCP and UDP -/// ```rust -/// use portpicker::pick_unused_port; -/// use std::net::{IpAddr, Ipv4Addr}; -/// let port: u16 = pick_unused_port(IpAddr::V4(Ipv4Addr::LOCALHOST)); -/// ``` -pub fn pick_unused_port(ip: IpAddr) -> Port { - let mut rng = rng(); - - loop { - // Try random port first - for _ in 0..10 { - let port = rng.random_range(15000..25000); - if is_free(ip, port) { - return port; - } - } - - // Ask the OS for a port - for _ in 0..10 { - if let Some(port) = ask_free_tcp_port(ip) { - // Test that the udp port is free as well - if is_free_udp(ip, port) { - return port; - } - } - } - } -} - -pub fn bind_unused_udp(ip: IpAddr) -> UdpSocket { - let mut rng = rng(); - - loop { - // Try random port first - for _ in 0..10 { - let port = rng.random_range(15000..25000); - - if let Ok(socket) = UdpSocket::bind(SocketAddr::new(ip, port)) { - return socket; - } - } - - // Ask the OS for a port - for _ in 0..10 { - if let Ok(socket) = UdpSocket::bind(SocketAddr::new(ip, 0)) { - return socket; - } - } - } -} - -#[cfg(test)] -mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - - use super::pick_unused_port; - - #[test] - fn ipv4_localhost() { - pick_unused_port(IpAddr::V4(Ipv4Addr::LOCALHOST)); - } - - #[test] - fn ipv6_localhost() { - pick_unused_port(IpAddr::V6(Ipv6Addr::LOCALHOST)); - } -} diff --git a/src/common/http/server_auth.rs b/src/common/http/server_auth.rs index 8bb66d993d5f3..6b79fb65d9b59 100644 --- a/src/common/http/server_auth.rs +++ b/src/common/http/server_auth.rs @@ -289,7 +289,7 @@ mod tests { use indoc::indoc; use super::*; - use crate::test_util::{next_addr, random_string}; + use crate::test_util::{addr::next_addr, random_string}; impl HttpServerAuthMatcher { fn auth_header(self) -> (HeaderValue, &'static str) { @@ -442,7 +442,8 @@ mod tests { let matcher = basic_auth.build(&Default::default()).unwrap(); - let result = matcher.handle_auth(Some(&next_addr()), &HeaderMap::new(), "/"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &HeaderMap::new(), "/"); assert!(result.is_err()); let error = result.unwrap_err(); @@ -461,7 +462,8 @@ mod tests { let mut headers = HeaderMap::new(); headers.insert(AUTHORIZATION, HeaderValue::from_static("Basic wrong")); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &headers, "/"); assert!(result.is_err()); let error = result.unwrap_err(); @@ -485,7 +487,8 @@ mod tests { AUTHORIZATION, Authorization::basic(&username, &password).0.encode(), ); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &headers, "/"); assert!(result.is_ok()); } @@ -500,14 +503,15 @@ mod tests { let mut headers = HeaderMap::new(); headers.insert(AUTHORIZATION, HeaderValue::from_static("test")); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &headers, "/"); assert!(result.is_ok()); } #[test] fn custom_auth_matcher_should_be_able_to_check_address() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let addr_string = addr.ip().to_string(); let custom_auth = HttpServerAuthConfig::Custom { source: format!(".address == \"{addr_string}\""), @@ -516,14 +520,14 @@ mod tests { let matcher = custom_auth.build(&Default::default()).unwrap(); let headers = HeaderMap::new(); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/"); + let result = matcher.handle_auth(Some(&addr), &headers, "/"); assert!(result.is_ok()); } #[test] fn custom_auth_matcher_should_work_with_missing_address_too() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let addr_string = addr.ip().to_string(); let custom_auth = HttpServerAuthConfig::Custom { source: format!(".address == \"{addr_string}\""), @@ -546,7 +550,8 @@ mod tests { let matcher = custom_auth.build(&Default::default()).unwrap(); let headers = HeaderMap::new(); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/ok"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &headers, "/ok"); assert!(result.is_ok()); } @@ -560,7 +565,8 @@ mod tests { let matcher = custom_auth.build(&Default::default()).unwrap(); let headers = HeaderMap::new(); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/bad"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &headers, "/bad"); assert!(result.is_err()); } @@ -575,7 +581,8 @@ mod tests { let mut headers = HeaderMap::new(); headers.insert(AUTHORIZATION, HeaderValue::from_static("wrong value")); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &headers, "/"); assert!(result.is_err()); let error = result.unwrap_err(); @@ -593,7 +600,8 @@ mod tests { let mut headers = HeaderMap::new(); headers.insert(AUTHORIZATION, HeaderValue::from_static("test")); - let result = matcher.handle_auth(Some(&next_addr()), &headers, "/"); + let (_guard, addr) = next_addr(); + let result = matcher.handle_auth(Some(&addr), &headers, "/"); assert!(result.is_err()); let error = result.unwrap_err(); diff --git a/src/components/validation/runner/config.rs b/src/components/validation/runner/config.rs index 42ca199c77883..f2906cb039ca9 100644 --- a/src/components/validation/runner/config.rs +++ b/src/components/validation/runner/config.rs @@ -14,7 +14,7 @@ use crate::{ config::{BoxedSink, BoxedSource, BoxedTransform, ConfigBuilder}, sinks::vector::VectorConfig as VectorSinkConfig, sources::vector::VectorConfig as VectorSourceConfig, - test_util::next_addr, + test_util::addr::next_addr, }; pub struct TopologyBuilder { @@ -125,7 +125,8 @@ impl TopologyBuilder { } fn build_input_edge(log_namespace: LogNamespace) -> (InputEdge, impl Into) { - let input_listen_addr = GrpcAddress::from(next_addr()); + // TODO: This needs refactoring to properly hold the PortGuard for the lifetime of the topology. + let input_listen_addr = GrpcAddress::from(next_addr().1); debug!(listen_addr = %input_listen_addr, "Creating controlled input edge."); let mut input_source = VectorSourceConfig::from_address(input_listen_addr.as_socket_addr()); @@ -138,7 +139,8 @@ fn build_input_edge(log_namespace: LogNamespace) -> (InputEdge, impl Into (OutputEdge, impl Into) { - let output_listen_addr = GrpcAddress::from(next_addr()); + // TODO: This needs refactoring to properly hold the PortGuard for the lifetime of the topology. + let output_listen_addr = GrpcAddress::from(next_addr().1); debug!(endpoint = %output_listen_addr, "Creating controlled output edge."); let mut output_sink = VectorSinkConfig::from_address(output_listen_addr.as_uri()); diff --git a/src/components/validation/runner/telemetry.rs b/src/components/validation/runner/telemetry.rs index 127654b8bc625..5cd07db27a615 100644 --- a/src/components/validation/runner/telemetry.rs +++ b/src/components/validation/runner/telemetry.rs @@ -13,7 +13,7 @@ use crate::{ proto::vector::Server as VectorServer, sinks::vector::VectorConfig as VectorSinkConfig, sources::{internal_logs::InternalLogsConfig, internal_metrics::InternalMetricsConfig}, - test_util::next_addr, + test_util::addr::next_addr, }; const INTERNAL_LOGS_KEY: &str = "_telemetry_logs"; @@ -32,7 +32,8 @@ pub struct Telemetry { impl Telemetry { /// Creates a telemetry collector by attaching the relevant components to an existing `ConfigBuilder`. pub fn attach_to_config(config_builder: &mut ConfigBuilder) -> Self { - let listen_addr = GrpcAddress::from(next_addr()); + let (_guard, addr) = next_addr(); + let listen_addr = GrpcAddress::from(addr); info!(%listen_addr, "Attaching telemetry components."); // Attach an internal logs and internal metrics source, and send them on to a dedicated Vector diff --git a/src/http.rs b/src/http.rs index e10af20faaa30..1a24e38a37fcb 100644 --- a/src/http.rs +++ b/src/http.rs @@ -706,7 +706,7 @@ mod tests { use tower::ServiceBuilder; use super::*; - use crate::test_util::next_addr; + use crate::test_util::addr::next_addr; #[test] fn test_default_request_headers_defaults() { @@ -894,7 +894,7 @@ mod tests { async fn test_max_connection_age_service_with_hyper_server() { // Create a hyper server with the max connection age layer. let max_connection_age = Duration::from_secs(1); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let make_svc = make_service_fn(move |conn: &AddrStream| { let svc = ServiceBuilder::new() .layer(MaxConnectionAgeLayer::new( diff --git a/src/sinks/appsignal/integration_tests.rs b/src/sinks/appsignal/integration_tests.rs index e14ba583c66e9..b6a651084266c 100644 --- a/src/sinks/appsignal/integration_tests.rs +++ b/src/sinks/appsignal/integration_tests.rs @@ -14,11 +14,12 @@ use crate::{ util::test::{build_test_server_status, load_sink}, }, test_util::{ + addr::next_addr, components::{ COMPONENT_ERROR_TAGS, HTTP_SINK_TAGS, assert_sink_compliance, assert_sink_error, run_and_assert_sink_compliance, }, - generate_lines_with_stream, map_event_batch_stream, next_addr, + generate_lines_with_stream, map_event_batch_stream, }, }; @@ -29,7 +30,7 @@ async fn start_test(events: Vec) -> (Vec, Receiver<(http::request: "#}; let config = config.replace("${TEST_APPSIGNAL_PUSH_API_KEY}", &push_api_key()); let (mut config, cx) = load_sink::(config.as_str()).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Set the endpoint to a local server so we can fetch the sent events later config.endpoint = format!("http://{addr}"); diff --git a/src/sinks/datadog/events/tests.rs b/src/sinks/datadog/events/tests.rs index c3588396c39df..a363307f6c4e7 100644 --- a/src/sinks/datadog/events/tests.rs +++ b/src/sinks/datadog/events/tests.rs @@ -17,8 +17,9 @@ use crate::{ event::EventArray, sinks::util::test::{build_test_server_status, load_sink}, test_util::{ + addr::next_addr, components::{self, COMPONENT_ERROR_TAGS, HTTP_SINK_TAGS}, - next_addr, random_lines_with_stream, + random_lines_with_stream, }, }; @@ -49,7 +50,7 @@ async fn start_test( "#}; let (mut config, cx) = load_sink::(config).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); @@ -114,7 +115,7 @@ async fn api_key_in_metadata() { "#}) .unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); diff --git a/src/sinks/datadog/logs/tests.rs b/src/sinks/datadog/logs/tests.rs index bba15e2164a41..3867ababb48c5 100644 --- a/src/sinks/datadog/logs/tests.rs +++ b/src/sinks/datadog/logs/tests.rs @@ -29,12 +29,13 @@ use crate::{ }, }, test_util::{ + addr::next_addr, components::{ COMPONENT_ERROR_TAGS, DATA_VOLUME_SINK_TAGS, SINK_TAGS, run_and_assert_data_volume_sink_compliance, run_and_assert_sink_compliance, run_and_assert_sink_error, }, - next_addr, random_lines_with_stream, + random_lines_with_stream, }, tls::TlsError, }; @@ -86,7 +87,7 @@ async fn start_test_detail( "#}; let (mut config, cx) = load_sink::(config).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); @@ -242,7 +243,7 @@ async fn api_key_in_metadata_inner(api_status: ApiStatus) { "#}) .unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it to our local server let endpoint = format!("http://{addr}"); config.local_dd_common.endpoint = Some(endpoint.clone()); @@ -321,7 +322,7 @@ async fn multiple_api_keys_inner(api_status: ApiStatus) { "#}) .unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); @@ -375,7 +376,7 @@ async fn headers_inner(api_status: ApiStatus) { "#}) .unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it to our local server let endpoint = format!("http://{addr}"); config.local_dd_common.endpoint = Some(endpoint.clone()); @@ -446,7 +447,7 @@ async fn does_not_send_too_big_payloads() { "#}) .unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let endpoint = format!("http://{addr}"); config.local_dd_common.endpoint = Some(endpoint.clone()); @@ -498,7 +499,7 @@ async fn global_options() { }; let (mut config, cx) = load_sink_with_context::(config, cx).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); @@ -545,7 +546,7 @@ async fn override_global_options() { }; let (mut config, cx) = load_sink_with_context::(config, cx).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); diff --git a/src/sinks/datadog/metrics/integration_tests.rs b/src/sinks/datadog/metrics/integration_tests.rs index 2d40c8085ac16..e20ef106f7ee2 100644 --- a/src/sinks/datadog/metrics/integration_tests.rs +++ b/src/sinks/datadog/metrics/integration_tests.rs @@ -24,11 +24,12 @@ use crate::{ config::SinkConfig, sinks::util::test::{build_test_server_status, load_sink}, test_util::{ + addr::next_addr, components::{ DATA_VOLUME_SINK_TAGS, SINK_TAGS, assert_data_volume_sink_compliance, assert_sink_compliance, }, - map_event_batch_stream, next_addr, + map_event_batch_stream, }, }; @@ -118,7 +119,7 @@ async fn start_test(events: Vec) -> (Vec, Receiver<(http::request: "#}; let (mut config, cx) = load_sink::(config).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); diff --git a/src/sinks/datadog/metrics/tests.rs b/src/sinks/datadog/metrics/tests.rs index a7ebb70913a9a..716c71cc14381 100644 --- a/src/sinks/datadog/metrics/tests.rs +++ b/src/sinks/datadog/metrics/tests.rs @@ -12,8 +12,9 @@ use crate::{ util::test::load_sink_with_context, }, test_util::{ + addr::next_addr, components::{SINK_TAGS, run_and_assert_sink_compliance}, - next_addr, random_metrics_with_stream, + random_metrics_with_stream, }, }; @@ -29,7 +30,7 @@ async fn global_options() { }; let (mut config, cx) = load_sink_with_context::(config, cx).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); @@ -75,7 +76,7 @@ async fn override_global_options() { }; let (mut config, cx) = load_sink_with_context::(config, cx).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); diff --git a/src/sinks/datadog/traces/tests.rs b/src/sinks/datadog/traces/tests.rs index 8bdd181c59d34..8195c4ff72efc 100644 --- a/src/sinks/datadog/traces/tests.rs +++ b/src/sinks/datadog/traces/tests.rs @@ -19,8 +19,9 @@ use crate::{ extra_context::ExtraContext, sinks::util::test::{build_test_server_status, load_sink, load_sink_with_context}, test_util::{ + addr::next_addr, components::{SINK_TAGS, assert_sink_compliance, run_and_assert_sink_compliance}, - map_event_batch_stream, next_addr, + map_event_batch_stream, }, }; @@ -31,7 +32,7 @@ async fn start_test( events: Vec, ) -> Receiver<(http::request::Parts, Bytes)> { assert_sink_compliance(&SINK_TAGS, async { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = format!( indoc! {r#" default_api_key = "atoken" @@ -324,7 +325,7 @@ async fn global_options() { }; let (mut config, cx) = load_sink_with_context::(config, cx).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); @@ -369,7 +370,7 @@ async fn override_global_options() { }; let (mut config, cx) = load_sink_with_context::(config, cx).unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); diff --git a/src/sinks/gcp/stackdriver/metrics/tests.rs b/src/sinks/gcp/stackdriver/metrics/tests.rs index a5b3a40649d1f..52bf5ddb607ae 100644 --- a/src/sinks/gcp/stackdriver/metrics/tests.rs +++ b/src/sinks/gcp/stackdriver/metrics/tests.rs @@ -9,9 +9,9 @@ use crate::{ gcp::GcpAuthConfig, sinks::{prelude::*, util::test::build_test_server}, test_util::{ + addr::next_addr, components::{SINK_TAGS, run_and_assert_sink_compliance}, http::{always_200_response, spawn_blackhole_http_server}, - next_addr, }, }; @@ -49,7 +49,7 @@ async fn component_spec_compliance() { #[tokio::test] async fn sends_metric() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = StackdriverConfig { endpoint: format!("http://{in_addr}"), auth: GcpAuthConfig { @@ -106,7 +106,7 @@ async fn sends_metric() { #[tokio::test] async fn sends_multiple_metrics() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let mut batch = BatchConfig::default(); batch.max_events = Some(5); @@ -193,7 +193,7 @@ async fn sends_multiple_metrics() { #[tokio::test] async fn does_not_aggregate_metrics() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let mut batch = BatchConfig::default(); batch.max_events = Some(5); diff --git a/src/sinks/http/tests.rs b/src/sinks/http/tests.rs index 1b545c4c01257..c7e6862fd79f9 100644 --- a/src/sinks/http/tests.rs +++ b/src/sinks/http/tests.rs @@ -40,11 +40,12 @@ use crate::{ }, }, test_util::{ + addr::next_addr, components::{ self, COMPONENT_ERROR_TAGS, HTTP_SINK_TAGS, init_test, run_and_assert_sink_compliance, run_and_assert_sink_error_with_events, }, - create_events_batch_with_fn, next_addr, random_lines_with_stream, + create_events_batch_with_fn, random_lines_with_stream, }, }; @@ -506,7 +507,7 @@ async fn json_compression(compression: &str) { components::assert_sink_compliance(&HTTP_SINK_TAGS, async { let num_lines = 1000; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = r#" uri = "http://$IN_ADDR/frames" @@ -565,7 +566,7 @@ async fn json_compression_with_payload_wrapper(compression: &str) { components::assert_sink_compliance(&HTTP_SINK_TAGS, async { let num_lines = 1000; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = r#" uri = "http://$IN_ADDR/frames" @@ -635,7 +636,7 @@ async fn templateable_uri_path() { let num_events_per_id = 100; let an_id = 1; let another_id = 2; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = format!( r#" @@ -706,7 +707,7 @@ async fn templateable_uri_auth() { let a_pass = "a_pass"; let another_user = "another_user"; let another_pass = "another_pass"; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = format!( r#" uri = "http://{{{{user}}}}:{{{{pass}}}}@{in_addr}/" @@ -774,7 +775,7 @@ async fn templateable_uri_auth() { async fn missing_field_in_uri_template() { init_test(); - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = format!( r#" uri = "http://{in_addr}/{{{{missing_field}}}}" @@ -819,7 +820,7 @@ async fn missing_field_in_uri_template() { async fn http_uri_auth_conflict() { init_test(); - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = format!( r#" uri = "http://user:pass@{in_addr}/" @@ -914,7 +915,7 @@ async fn run_sink_with_events( } async fn build_sink(extra_config: &str) -> (std::net::SocketAddr, crate::sinks::VectorSink) { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = format!( r#" diff --git a/src/sinks/humio/metrics.rs b/src/sinks/humio/metrics.rs index 6d2968537222d..c88f19aea33b4 100644 --- a/src/sinks/humio/metrics.rs +++ b/src/sinks/humio/metrics.rs @@ -283,7 +283,7 @@ mod tests { "#}) .unwrap(); - let addr = test_util::next_addr(); + let (_guard, addr) = test_util::addr::next_addr(); // Swap out the endpoint so we can force send it // to our local server config.endpoint = format!("http://{addr}"); @@ -349,7 +349,7 @@ mod tests { "#}) .unwrap(); - let addr = test_util::next_addr(); + let (_guard, addr) = test_util::addr::next_addr(); // Swap out the endpoint so we can force send it // to our local server config.endpoint = format!("http://{addr}"); diff --git a/src/sinks/influxdb/logs.rs b/src/sinks/influxdb/logs.rs index 2ee854282c482..0c7f9a115a96b 100644 --- a/src/sinks/influxdb/logs.rs +++ b/src/sinks/influxdb/logs.rs @@ -405,11 +405,11 @@ mod tests { util::test::{build_test_server_status, load_sink}, }, test_util::{ + addr::next_addr, components::{ COMPONENT_ERROR_TAGS, HTTP_SINK_TAGS, run_and_assert_sink_compliance, run_and_assert_sink_error, }, - next_addr, }, }; @@ -760,7 +760,7 @@ mod tests { // Make sure we can build the config _ = config.build(cx.clone()).await.unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the host so we can force send it // to our local server let host = format!("http://{addr}"); diff --git a/src/sinks/loki/tests.rs b/src/sinks/loki/tests.rs index af4d3638b9c9d..a66581bf651f4 100644 --- a/src/sinks/loki/tests.rs +++ b/src/sinks/loki/tests.rs @@ -100,7 +100,7 @@ async fn healthcheck_includes_auth() { ) .unwrap(); - let addr = test_util::next_addr(); + let (_guard, addr) = test_util::addr::next_addr(); let endpoint = format!("http://{addr}"); config.endpoint = endpoint .clone() diff --git a/src/sinks/mezmo.rs b/src/sinks/mezmo.rs index 3661621a9dde9..d24142993623b 100644 --- a/src/sinks/mezmo.rs +++ b/src/sinks/mezmo.rs @@ -406,8 +406,9 @@ mod tests { config::SinkConfig, sinks::util::test::{build_test_server_status, load_sink}, test_util::{ + addr::next_addr, components::{HTTP_SINK_TAGS, assert_sink_compliance}, - next_addr, random_lines, + random_lines, }, }; @@ -479,7 +480,7 @@ mod tests { // Make sure we can build the config _ = config.build(cx.clone()).await.unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the host so we can force send it // to our local server let endpoint = UriSerde { diff --git a/src/sinks/postgres/integration_tests.rs b/src/sinks/postgres/integration_tests.rs index 56c6ed42eda38..dd1b43dfe6a96 100644 --- a/src/sinks/postgres/integration_tests.rs +++ b/src/sinks/postgres/integration_tests.rs @@ -16,11 +16,12 @@ use crate::{ event::{ObjectMap, TraceEvent, Value}, sinks::{postgres::PostgresConfig, util::test::load_sink}, test_util::{ + addr::next_addr, components::{ COMPONENT_ERROR_TAGS, run_and_assert_sink_compliance, run_and_assert_sink_error, }, integration::postgres::pg_url, - next_addr, random_table_name, trace_init, + random_table_name, trace_init, }, }; @@ -202,7 +203,7 @@ async fn healthcheck_fails_unknown_host() { async fn healthcheck_fails_timed_out() { trace_init(); - let free_addr = next_addr(); + let (_guard, free_addr) = next_addr(); let endpoint = format!("postgres://{free_addr}"); let table = random_table_name(); let config_str = format!( diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index 6e140a8181de8..823914f00746e 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -622,8 +622,9 @@ mod tests { http::HttpClient, sinks::prometheus::{distribution_to_agg_histogram, distribution_to_ddsketch}, test_util::{ + addr::next_addr, components::{SINK_TAGS, run_and_assert_sink_compliance}, - next_addr, random_string, trace_init, + random_string, trace_init, }, tls::MaybeTlsSettings, }; @@ -902,7 +903,7 @@ mod tests { let client_settings = MaybeTlsSettings::from_config(tls_config.as_ref(), false).unwrap(); let proto = client_settings.http_protocol_name(); - let address = next_addr(); + let (_guard, address) = next_addr(); let config = PrometheusExporterConfig { address, tls: tls_config, @@ -989,7 +990,7 @@ mod tests { let client_settings = MaybeTlsSettings::from_config(None, false).unwrap(); let proto = client_settings.http_protocol_name(); - let address = next_addr(); + let (_guard, address) = next_addr(); let config = PrometheusExporterConfig { address, auth: server_auth_config, @@ -1105,8 +1106,9 @@ mod tests { #[tokio::test] async fn sink_absolute() { + let (_guard, address) = next_addr(); let config = PrometheusExporterConfig { - address: next_addr(), // Not actually bound, just needed to fill config + address, tls: None, ..Default::default() }; @@ -1158,8 +1160,9 @@ mod tests { // are the same -- without loss of accuracy. // This expects that the default for the sink is to render distributions as aggregated histograms. + let (_guard, address) = next_addr(); let config = PrometheusExporterConfig { - address: next_addr(), // Not actually bound, just needed to fill config + address, tls: None, ..Default::default() }; @@ -1277,8 +1280,9 @@ mod tests { // // The render code is actually what will end up rendering those sketches as aggregated // summaries in the scrape output. + let (_guard, address) = next_addr(); let config = PrometheusExporterConfig { - address: next_addr(), // Not actually bound, just needed to fill config + address, tls: None, distributions_as_summaries: true, ..Default::default() @@ -1386,8 +1390,9 @@ mod tests { // This test ensures that this normalization works correctly when applied to a mix of both // Incremental and Absolute inputs. + let (_guard, address) = next_addr(); let config = PrometheusExporterConfig { - address: next_addr(), // Not actually bound, just needed to fill config + address, tls: None, ..Default::default() }; diff --git a/src/sinks/prometheus/remote_write/tests.rs b/src/sinks/prometheus/remote_write/tests.rs index 2e627dae5acb8..3249c3c8c1f10 100644 --- a/src/sinks/prometheus/remote_write/tests.rs +++ b/src/sinks/prometheus/remote_write/tests.rs @@ -230,7 +230,7 @@ async fn doesnt_aggregate_batches() { async fn send_request(config: &str, events: Vec) -> Vec<(HeaderMap, proto::WriteRequest)> { assert_sink_compliance(&HTTP_SINK_TAGS, async { - let addr = test_util::next_addr(); + let (_guard, addr) = test_util::addr::next_addr(); let (rx, trigger, server) = build_test_server(addr); tokio::spawn(server); diff --git a/src/sinks/sematext/logs.rs b/src/sinks/sematext/logs.rs index 74ac3f8d139e5..0f12de87d3931 100644 --- a/src/sinks/sematext/logs.rs +++ b/src/sinks/sematext/logs.rs @@ -165,8 +165,9 @@ mod tests { config::SinkConfig, sinks::util::test::{build_test_server, load_sink}, test_util::{ + addr::next_addr, components::{self, HTTP_SINK_TAGS}, - next_addr, random_lines_with_stream, + random_lines_with_stream, }, }; @@ -185,7 +186,7 @@ mod tests { // Make sure we can build the config _ = config.build(cx.clone()).await.unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the host so we can force send it // to our local server config.endpoint = Some(format!("http://{addr}")); diff --git a/src/sinks/sematext/metrics.rs b/src/sinks/sematext/metrics.rs index 735713382669a..974372c3be00b 100644 --- a/src/sinks/sematext/metrics.rs +++ b/src/sinks/sematext/metrics.rs @@ -316,8 +316,9 @@ mod tests { event::{Event, metric::MetricKind}, sinks::util::test::{build_test_server, load_sink}, test_util::{ + addr::next_addr, components::{HTTP_SINK_TAGS, assert_sink_compliance}, - next_addr, test_generate_config, + test_generate_config, }, }; @@ -415,7 +416,7 @@ mod tests { "#}) .unwrap(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); // Swap out the endpoint so we can force send it // to our local server let endpoint = format!("http://{addr}"); diff --git a/src/sinks/socket.rs b/src/sinks/socket.rs index b5004b67a88c4..6a478c8091aa2 100644 --- a/src/sinks/socket.rs +++ b/src/sinks/socket.rs @@ -238,8 +238,9 @@ mod test { event::{Event, LogEvent}, test_util::{ CountReceiver, + addr::{next_addr, next_addr_v6}, components::{SINK_TAGS, assert_sink_compliance, run_and_assert_sink_compliance}, - next_addr, next_addr_v6, random_lines_with_stream, trace_init, + random_lines_with_stream, trace_init, }, }; @@ -315,14 +316,16 @@ mod test { async fn udp_ipv4() { trace_init(); - test_datagram(DatagramSocketAddr::Udp(next_addr())).await; + let (_guard, addr) = next_addr(); + test_datagram(DatagramSocketAddr::Udp(addr)).await; } #[tokio::test] async fn udp_ipv6() { trace_init(); - test_datagram(DatagramSocketAddr::Udp(next_addr_v6())).await; + let (_guard, addr) = next_addr_v6(); + test_datagram(DatagramSocketAddr::Udp(addr)).await; } #[cfg(all(unix, not(target_os = "macos")))] @@ -340,7 +343,7 @@ mod test { async fn tcp_stream() { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = SocketSinkConfig { mode: Mode::Tcp(TcpMode { config: TcpSinkConfig::from_address(addr.to_string()), @@ -450,7 +453,7 @@ mod test { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = SocketSinkConfig { mode: Mode::Tcp(TcpMode { config: TcpSinkConfig::new( @@ -584,7 +587,7 @@ mod test { async fn reconnect() { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = SocketSinkConfig { mode: Mode::Tcp(TcpMode { config: TcpSinkConfig::from_address(addr.to_string()), diff --git a/src/sinks/splunk_hec/logs/tests.rs b/src/sinks/splunk_hec/logs/tests.rs index 7766c7d7e1ccd..c89fe43b66e7c 100644 --- a/src/sinks/splunk_hec/logs/tests.rs +++ b/src/sinks/splunk_hec/logs/tests.rs @@ -27,7 +27,7 @@ use crate::{ }, }, template::Template, - test_util::next_addr, + test_util::addr::next_addr, }; #[derive(Deserialize, Debug)] @@ -216,7 +216,7 @@ fn splunk_encode_log_event_text() { #[tokio::test] async fn splunk_passthrough_token() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = HecLogsSinkConfig { default_token: "token".to_string().into(), endpoint: format!("http://{addr}"), diff --git a/src/sinks/splunk_hec/metrics/tests.rs b/src/sinks/splunk_hec/metrics/tests.rs index 8198d2eb5fa1d..16d08be5d683b 100644 --- a/src/sinks/splunk_hec/metrics/tests.rs +++ b/src/sinks/splunk_hec/metrics/tests.rs @@ -21,7 +21,7 @@ use crate::{ util::{Compression, test::build_test_server}, }, template::Template, - test_util::next_addr, + test_util::addr::next_addr, }; fn get_counter() -> Metric { @@ -322,7 +322,7 @@ fn test_encode_event_gauge_overridden_namespace_returns_expected_json() { #[tokio::test] async fn splunk_passthrough_token() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = HecMetricsSinkConfig { default_token: "token".to_owned().into(), endpoint: format!("http://{addr}"), diff --git a/src/sinks/statsd/tests.rs b/src/sinks/statsd/tests.rs index 27ce5ceefff32..8fe211188c5e3 100644 --- a/src/sinks/statsd/tests.rs +++ b/src/sinks/statsd/tests.rs @@ -14,9 +14,10 @@ use crate::{ config::{SinkConfig, SinkContext}, sinks::{statsd::config::Mode, util::service::net::UdpConnectorConfig}, test_util::{ + addr::next_addr, collect_n, components::{SINK_TAGS, assert_sink_compliance}, - next_addr, trace_init, + trace_init, }, }; @@ -34,7 +35,7 @@ fn tags() -> MetricTags { async fn test_send_to_statsd() { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = StatsdSinkConfig { default_namespace: Some("ns".into()), diff --git a/src/sinks/util/http.rs b/src/sinks/util/http.rs index 9766d2fdcb225..12a0d30caec6a 100644 --- a/src/sinks/util/http.rs +++ b/src/sinks/util/http.rs @@ -949,7 +949,7 @@ mod test { }; use super::*; - use crate::{config::ProxyConfig, test_util::next_addr}; + use crate::{config::ProxyConfig, test_util::addr::next_addr}; #[test] fn util_http_retry_logic() { @@ -977,7 +977,7 @@ mod test { #[tokio::test] async fn util_http_it_makes_http_requests() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let uri = format!("http://{}:{}/", addr.ip(), addr.port()) .parse::() diff --git a/src/sinks/util/tcp.rs b/src/sinks/util/tcp.rs index aa8aeff486d3e..1afa8a28479a8 100644 --- a/src/sinks/util/tcp.rs +++ b/src/sinks/util/tcp.rs @@ -339,18 +339,18 @@ mod test { use tokio::net::TcpListener; use super::*; - use crate::test_util::{next_addr, trace_init}; + use crate::test_util::{addr::next_addr, trace_init}; #[tokio::test] async fn healthcheck() { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let _listener = TcpListener::bind(&addr).await.unwrap(); let good = TcpConnector::from_host_port(addr.ip().to_string(), addr.port()); assert!(good.healthcheck().await.is_ok()); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let bad = TcpConnector::from_host_port(addr.ip().to_string(), addr.port()); assert!(bad.healthcheck().await.is_err()); } diff --git a/src/sinks/vector/mod.rs b/src/sinks/vector/mod.rs index e6d2e101007ea..4cf882b1d2ee6 100644 --- a/src/sinks/vector/mod.rs +++ b/src/sinks/vector/mod.rs @@ -56,11 +56,12 @@ mod tests { proto::vector as proto, sinks::util::test::build_test_server_generic, test_util::{ + addr::next_addr, components::{ DATA_VOLUME_SINK_TAGS, HTTP_SINK_TAGS, run_and_assert_data_volume_sink_compliance, run_and_assert_sink_compliance, }, - next_addr, random_lines_with_stream, + random_lines_with_stream, }, }; @@ -80,7 +81,7 @@ mod tests { async fn run_sink_test(test_type: TestType) { let num_lines = 10; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = format!(r#"address = "http://{in_addr}/""#); let config: VectorConfig = toml::from_str(&config).unwrap(); @@ -152,7 +153,7 @@ mod tests { async fn acknowledges_error() { let num_lines = 10; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = format!(r#"address = "http://{in_addr}/""#); let config: VectorConfig = toml::from_str(&config).unwrap(); diff --git a/src/sinks/websocket/sink.rs b/src/sinks/websocket/sink.rs index f127b137ff164..198480ca3204b 100644 --- a/src/sinks/websocket/sink.rs +++ b/src/sinks/websocket/sink.rs @@ -226,8 +226,9 @@ mod tests { http::Auth, test_util::{ CountReceiver, + addr::next_addr, components::{SINK_TAGS, run_and_assert_sink_compliance}, - next_addr, random_lines_with_stream, trace_init, + random_lines_with_stream, trace_init, }, tls::{self, MaybeTlsSettings, TlsConfig, TlsEnableableConfig}, }; @@ -236,7 +237,7 @@ mod tests { async fn test_websocket() { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = WebSocketSinkConfig { common: WebSocketCommonConfig { uri: format!("ws://{addr}"), @@ -261,7 +262,7 @@ mod tests { token: "OiJIUzI1NiIsInR5cCI6IkpXVCJ".to_string().into(), }); let auth_clone = auth.clone(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = WebSocketSinkConfig { common: WebSocketCommonConfig { uri: format!("ws://{addr}"), @@ -282,7 +283,7 @@ mod tests { async fn test_tls_websocket() { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let tls_config = Some(TlsEnableableConfig::test_config()); let tls = MaybeTlsSettings::from_config(tls_config.as_ref(), true).unwrap(); @@ -313,7 +314,7 @@ mod tests { async fn test_websocket_reconnect() { trace_init(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let config = WebSocketSinkConfig { common: WebSocketCommonConfig { uri: format!("ws://{addr}"), diff --git a/src/sinks/websocket_server/sink.rs b/src/sinks/websocket_server/sink.rs index 9821785280835..e00a39127f36d 100644 --- a/src/sinks/websocket_server/sink.rs +++ b/src/sinks/websocket_server/sink.rs @@ -456,8 +456,8 @@ mod tests { config::InternalMetricsConfig, }, test_util::{ + addr::next_addr, components::{SINK_TAGS, run_and_assert_sink_compliance}, - next_addr, }, }; @@ -475,7 +475,7 @@ mod tests { let event = Event::Log(LogEvent::from("foo")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( @@ -502,7 +502,7 @@ mod tests { let event2 = Event::Log(LogEvent::from("foo2")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( @@ -534,7 +534,7 @@ mod tests { let event = Event::Log(LogEvent::from("foo")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( @@ -563,7 +563,7 @@ mod tests { let event = Event::Log(LogEvent::from("foo")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( @@ -601,7 +601,7 @@ mod tests { let event = Event::Log(LogEvent::from("foo")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( @@ -660,9 +660,10 @@ mod tests { async fn sink_spec_compliance() { let event = Event::Log(LogEvent::from("foo")); + let (_guard, address) = next_addr(); let sink = WebSocketListenerSink::new( WebSocketListenerSinkConfig { - address: next_addr(), + address, ..Default::default() }, SinkContext::default(), @@ -683,7 +684,7 @@ mod tests { let event2 = Event::Log(LogEvent::from("foo2")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( @@ -728,7 +729,7 @@ mod tests { let event2 = Event::Log(LogEvent::from("foo2")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( @@ -772,7 +773,7 @@ mod tests { let event3 = Event::Log(LogEvent::from("foo3")); let (mut sender, input_events) = build_test_event_channel(); - let address = next_addr(); + let (_guard, address) = next_addr(); let port = address.port(); let websocket_sink = start_websocket_server_sink( diff --git a/src/sources/apache_metrics/mod.rs b/src/sources/apache_metrics/mod.rs index cea02bdf34f9b..dc8256c535132 100644 --- a/src/sources/apache_metrics/mod.rs +++ b/src/sources/apache_metrics/mod.rs @@ -290,9 +290,10 @@ mod test { Error, config::SourceConfig, test_util::{ + addr::next_addr, collect_ready, components::{HTTP_PULL_SOURCE_TAGS, run_and_assert_source_compliance}, - next_addr, wait_for_tcp, + wait_for_tcp, }, }; @@ -303,7 +304,7 @@ mod test { #[tokio::test] async fn test_apache_up() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let make_svc = make_service_fn(|_| async { Ok::<_, Error>(service_fn(|_| async { @@ -396,7 +397,7 @@ Scoreboard: ____S_____I______R____I_______KK___D__C__G_L____________W___________ #[tokio::test] async fn test_apache_error() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let make_svc = make_service_fn(|_| async { Ok::<_, Error>(service_fn(|_| async { @@ -448,7 +449,7 @@ Scoreboard: ____S_____I______R____I_______KK___D__C__G_L____________W___________ #[tokio::test] async fn test_apache_down() { // will have nothing bound - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let (tx, rx) = SourceSender::new_test(); diff --git a/src/sources/aws_ecs_metrics/mod.rs b/src/sources/aws_ecs_metrics/mod.rs index 7c091a831e969..c64dd853f5aa4 100644 --- a/src/sources/aws_ecs_metrics/mod.rs +++ b/src/sources/aws_ecs_metrics/mod.rs @@ -259,14 +259,15 @@ mod test { Error, event::MetricValue, test_util::{ + addr::next_addr, components::{SOURCE_TAGS, run_and_assert_source_compliance}, - next_addr, wait_for_tcp, + wait_for_tcp, }, }; #[tokio::test] async fn test_aws_ecs_metrics_source() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let make_svc = make_service_fn(|_| async { Ok::<_, Error>(service_fn(|_| async { diff --git a/src/sources/aws_kinesis_firehose/mod.rs b/src/sources/aws_kinesis_firehose/mod.rs index 158ba14cd8b08..4c86b74165e9b 100644 --- a/src/sources/aws_kinesis_firehose/mod.rs +++ b/src/sources/aws_kinesis_firehose/mod.rs @@ -291,9 +291,10 @@ mod tests { event::{Event, EventStatus}, log_event, test_util::{ + addr::{PortGuard, next_addr}, collect_ready, components::{SOURCE_TAGS, assert_source_compliance}, - next_addr, wait_for_tcp, + wait_for_tcp, }, }; @@ -334,11 +335,11 @@ mod tests { record_compression: Compression, delivered: bool, log_namespace: bool, - ) -> (impl Stream + Unpin, SocketAddr) { + ) -> (impl Stream + Unpin, SocketAddr, PortGuard) { use EventStatus::*; let status = if delivered { Delivered } else { Rejected }; let (sender, recv) = SourceSender::new_test_finalize(status); - let address = next_addr(); + let (_guard, address) = next_addr(); let cx = SourceContext::new_test(sender, None); tokio::spawn(async move { AwsKinesisFirehoseConfig { @@ -360,8 +361,9 @@ mod tests { .await .unwrap() }); + // Wait for the component to bind to the port wait_for_tcp(address).await; - (recv, address) + (recv, address, _guard) } /// Sends the body to the address with the appropriate Firehose headers @@ -513,7 +515,7 @@ mod tests { Vec::new(), ), ] { - let (rx, addr) = + let (rx, addr, _guard) = source(None, None, false, source_record_compression, true, false).await; let timestamp: DateTime = Utc::now(); @@ -614,7 +616,8 @@ mod tests { Vec::new(), ), ] { - let (rx, addr) = source(None, None, false, source_record_compression, true, true).await; + let (rx, addr, _guard) = + source(None, None, false, source_record_compression, true, true).await; let timestamp: DateTime = Utc::now(); @@ -686,7 +689,8 @@ mod tests { #[tokio::test] async fn aws_kinesis_firehose_forwards_events_gzip_request() { assert_source_compliance(&SOURCE_TAGS, async move { - let (rx, addr) = source(None, None, false, Default::default(), true, false).await; + let (rx, addr, _guard) = + source(None, None, false, Default::default(), true, false).await; let timestamp: DateTime = Utc::now(); @@ -723,7 +727,7 @@ mod tests { #[tokio::test] async fn aws_kinesis_firehose_rejects_bad_access_key() { - let (_rx, addr) = source( + let (_rx, addr, _guard) = source( Some("an access key".to_string().into()), Some(vec!["an access key in list".to_string().into()]), Default::default(), @@ -751,7 +755,7 @@ mod tests { #[tokio::test] async fn aws_kinesis_firehose_rejects_bad_access_key_from_list() { - let (_rx, addr) = source( + let (_rx, addr, _guard) = source( None, Some(vec!["an access key in list".to_string().into()]), Default::default(), @@ -781,7 +785,7 @@ mod tests { async fn aws_kinesis_firehose_accepts_merged_access_keys() { let valid_access_key = SensitiveString::from(String::from("an access key in list")); - let (_rx, addr) = source( + let (_rx, addr, _guard) = source( Some(valid_access_key.clone()), Some(vec!["valid access key 2".to_string().into()]), Default::default(), @@ -812,7 +816,7 @@ mod tests { async fn aws_kinesis_firehose_accepts_access_keys_from_list() { let valid_access_key = "an access key in list".to_string(); - let (_rx, addr) = source( + let (_rx, addr, _guard) = source( None, Some(vec![ valid_access_key.clone().into(), @@ -846,7 +850,7 @@ mod tests { async fn handles_acknowledgement_failure() { let expected = RECORD.as_bytes().to_owned(); - let (rx, addr) = source(None, None, false, Compression::None, false, false).await; + let (rx, addr, _guard) = source(None, None, false, Compression::None, false, false).await; let timestamp: DateTime = Utc::now(); @@ -882,7 +886,7 @@ mod tests { #[tokio::test] async fn event_access_key_passthrough_enabled() { - let (rx, address) = source( + let (rx, address, _guard) = source( None, Some(vec!["an access key".to_string().into()]), true, @@ -915,7 +919,7 @@ mod tests { #[tokio::test] async fn no_authorization_access_key_passthrough_enabled() { - let (rx, address) = source(None, None, true, Default::default(), true, true).await; + let (rx, address, _guard) = source(None, None, true, Default::default(), true, true).await; let timestamp: DateTime = Utc::now(); diff --git a/src/sources/datadog_agent/tests.rs b/src/sources/datadog_agent/tests.rs index ffa3934d5bcf9..168eb727c1671 100644 --- a/src/sources/datadog_agent/tests.rs +++ b/src/sources/datadog_agent/tests.rs @@ -50,8 +50,9 @@ use crate::{ ddtrace_proto, logs::decode_log_body, metrics::DatadogSeriesRequest, }, test_util::{ + addr::{PortGuard, next_addr}, components::{HTTP_PUSH_SOURCE_TAGS, assert_source_compliance}, - next_addr, spawn_collect_n, trace_init, wait_for_tcp, + spawn_collect_n, trace_init, wait_for_tcp, }, }; @@ -225,6 +226,7 @@ async fn source( Option>, Option>, SocketAddr, + PortGuard, ) { let (mut sender, recv) = SourceSender::new_test_finalize(status); let mut logs_output = None; @@ -241,7 +243,7 @@ async fn source( .flat_map(into_event_stream), ); } - let address = next_addr(); + let (_guard, address) = next_addr(); let config = toml::from_str::(&format!( indoc! { r#" address = "{}" @@ -262,7 +264,7 @@ async fn source( config.build(context).await.unwrap().await.unwrap(); }); wait_for_tcp(address).await; - (recv, logs_output, metrics_output, address) + (recv, logs_output, metrics_output, address, _guard) } async fn send_with_path(address: SocketAddr, body: &str, headers: HeaderMap, path: &str) -> u16 { @@ -304,7 +306,8 @@ fn dd_api_key_headers() -> HeaderMap { #[tokio::test] async fn full_payload_v1() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut events = send_and_collect( addr, @@ -358,7 +361,8 @@ async fn full_payload_v1() { #[tokio::test] async fn full_payload_v2() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut events = send_and_collect( addr, @@ -412,7 +416,8 @@ async fn full_payload_v2() { #[tokio::test] async fn no_api_key() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut events = send_and_collect( addr, @@ -466,7 +471,8 @@ async fn no_api_key() { #[tokio::test] async fn api_key_in_url() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut events = send_and_collect( addr, @@ -523,7 +529,8 @@ async fn api_key_in_url() { #[tokio::test] async fn api_key_in_query_params() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut events = send_and_collect( addr, @@ -580,7 +587,8 @@ async fn api_key_in_query_params() { #[tokio::test] async fn api_key_in_header() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut events = send_and_collect( addr, @@ -637,7 +645,7 @@ async fn api_key_in_header() { #[tokio::test] async fn delivery_failure() { trace_init(); - let (rx, _, _, addr) = source(EventStatus::Rejected, true, true, false, true).await; + let (rx, _, _, addr, _guard) = source(EventStatus::Rejected, true, true, false, true).await; spawn_collect_n( async move { @@ -673,7 +681,8 @@ async fn delivery_failure() { #[tokio::test] async fn ignores_disabled_acknowledgements() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Rejected, false, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Rejected, false, true, false, true).await; let events = send_and_collect( addr, @@ -705,7 +714,8 @@ async fn ignores_disabled_acknowledgements() { #[tokio::test] async fn ignores_api_key() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, false, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, false, false, true).await; let mut events = send_and_collect( addr, @@ -759,7 +769,8 @@ async fn ignores_api_key() { #[tokio::test] async fn decode_series_endpoint_v1() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let dd_metric_request = DatadogSeriesRequest { series: vec![ @@ -962,7 +973,8 @@ async fn decode_series_endpoint_v1() { #[tokio::test] async fn decode_sketches() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut buf = Vec::new(); let sketch = ddmetric_proto::sketch_payload::Sketch { @@ -1062,7 +1074,8 @@ async fn decode_sketches() { #[tokio::test] async fn decode_traces() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let mut headers = dd_api_key_headers(); headers.insert("X-Datadog-Reported-Languages", "ada".parse().unwrap()); @@ -1299,7 +1312,7 @@ async fn decode_traces() { #[tokio::test] async fn split_outputs() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (_, rx_logs, rx_metrics, addr) = + let (_, rx_logs, rx_metrics, addr, _guard) = source(EventStatus::Delivered, true, true, true, true).await; let mut log_event = send_and_collect( @@ -1949,7 +1962,8 @@ fn test_config_outputs() { #[tokio::test] async fn decode_series_endpoint_v2() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, true).await; + let (rx, _, _, addr, _guard) = + source(EventStatus::Delivered, true, true, false, true).await; let series = vec![ ddmetric_proto::metric_payload::MetricSeries { @@ -2402,7 +2416,7 @@ async fn test_series_v1_split_metric_namespace_impl( expected_name: &str, expected_namespace: Option<&str>, ) { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, split).await; + let (rx, _, _, addr, _guard) = source(EventStatus::Delivered, true, true, false, split).await; let dd_metric_request = DatadogSeriesRequest { series: vec![DatadogSeriesMetric { @@ -2454,7 +2468,7 @@ async fn test_series_v2_split_metric_namespace_impl( expected_name: &str, expected_namespace: Option<&str>, ) { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, split).await; + let (rx, _, _, addr, _guard) = source(EventStatus::Delivered, true, true, false, split).await; let series = vec![ddmetric_proto::metric_payload::MetricSeries { resources: vec![ddmetric_proto::metric_payload::Resource { @@ -2515,7 +2529,7 @@ async fn test_sketches_split_metric_namespace_impl( expected_name: &str, expected_namespace: Option<&str>, ) { - let (rx, _, _, addr) = source(EventStatus::Delivered, true, true, false, split).await; + let (rx, _, _, addr, _guard) = source(EventStatus::Delivered, true, true, false, split).await; let mut buf = Vec::new(); let sketch = ddmetric_proto::sketch_payload::Sketch { diff --git a/src/sources/fluent/mod.rs b/src/sources/fluent/mod.rs index 3a979b0957ef8..4f1f3112e6f43 100644 --- a/src/sources/fluent/mod.rs +++ b/src/sources/fluent/mod.rs @@ -844,7 +844,7 @@ mod tests { SourceSender, config::{SourceConfig, SourceContext}, event::EventStatus, - test_util::{self, next_addr, trace_init, wait_for_tcp}, + test_util::{self, addr::next_addr, trace_init, wait_for_tcp}, }; #[test] @@ -1087,7 +1087,7 @@ mod tests { trace_init(); let (sender, recv) = SourceSender::new_test_finalize(status); - let address = next_addr(); + let (_guard, address) = next_addr(); let source = FluentConfig { mode: FluentMode::Tcp(FluentTcpConfig { address: address.into(), @@ -1264,9 +1264,10 @@ mod integration_tests { docker::Container, sources::fluent::{FluentConfig, FluentMode, FluentTcpConfig}, test_util::{ + addr::{PortGuard, next_addr, next_addr_for_ip}, collect_ready, components::{SOCKET_PUSH_SOURCE_TAGS, assert_source_compliance}, - next_addr, next_addr_for_ip, random_string, wait_for_tcp, + random_string, wait_for_tcp, }, }; @@ -1294,8 +1295,8 @@ mod integration_tests { async fn test_fluentbit(status: EventStatus) { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async move { - let test_address = next_addr(); - let (out, source_address) = source(status).await; + let (_guard, test_address) = next_addr(); + let (out, source_address, _guard) = source(status).await; let dir = make_file( "fluent-bit.conf", @@ -1371,8 +1372,8 @@ mod integration_tests { async fn test_fluentd(status: EventStatus, options: &str) { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async move { - let test_address = next_addr(); - let (out, source_address) = source(status).await; + let (_guard, test_address) = next_addr(); + let (out, source_address, _guard) = source(status).await; let config = format!( r#" @@ -1433,9 +1434,12 @@ mod integration_tests { .await; } - async fn source(status: EventStatus) -> (impl Stream + Unpin, SocketAddr) { + async fn source( + status: EventStatus, + ) -> (impl Stream + Unpin, SocketAddr, PortGuard) { let (sender, recv) = SourceSender::new_test_finalize(status); - let address = next_addr_for_ip(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED)); + let (_guard, address) = + next_addr_for_ip(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED)); tokio::spawn(async move { FluentConfig { mode: FluentMode::Tcp(FluentTcpConfig { @@ -1456,6 +1460,6 @@ mod integration_tests { .unwrap() }); wait_for_tcp(address).await; - (recv, address) + (recv, address, _guard) } } diff --git a/src/sources/heroku_logs.rs b/src/sources/heroku_logs.rs index 542f5f6930a3f..02d682cccf967 100644 --- a/src/sources/heroku_logs.rs +++ b/src/sources/heroku_logs.rs @@ -444,8 +444,9 @@ mod tests { config::{SourceConfig, SourceContext, log_schema}, serde::{default_decoding, default_framing_message_based}, test_util::{ + addr::{PortGuard, next_addr}, components::{HTTP_PUSH_SOURCE_TAGS, assert_source_compliance}, - next_addr, random_string, spawn_collect_n, wait_for_tcp, + random_string, spawn_collect_n, wait_for_tcp, }, }; @@ -459,9 +460,9 @@ mod tests { query_parameters: Vec, status: EventStatus, acknowledgements: bool, - ) -> (impl Stream + Unpin, SocketAddr) { + ) -> (impl Stream + Unpin, SocketAddr, PortGuard) { let (sender, recv) = SourceSender::new_test_finalize(status); - let address = next_addr(); + let (_guard, address) = next_addr(); let context = SourceContext::new_test(sender, None); tokio::spawn(async move { LogplexConfig { @@ -482,7 +483,7 @@ mod tests { .unwrap() }); wait_for_tcp(address).await; - (recv, address) + (recv, address, _guard) } async fn send( @@ -521,7 +522,7 @@ mod tests { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let auth = make_auth(); - let (rx, addr) = source( + let (rx, addr, _guard) = source( Some(auth.clone()), vec!["appname".to_string(), "absent".to_string()], EventStatus::Delivered, @@ -567,7 +568,7 @@ mod tests { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let auth = make_auth(); - let (rx, addr) = source( + let (rx, addr, _guard) = source( Some(auth.clone()), vec!["*".to_string()], EventStatus::Delivered, @@ -612,7 +613,8 @@ mod tests { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let auth = make_auth(); - let (rx, addr) = source(Some(auth.clone()), vec![], EventStatus::Rejected, true).await; + let (rx, addr, _guard) = + source(Some(auth.clone()), vec![], EventStatus::Rejected, true).await; let events = spawn_collect_n( async move { @@ -635,7 +637,8 @@ mod tests { async fn logplex_ignores_disabled_acknowledgements() { let auth = make_auth(); - let (rx, addr) = source(Some(auth.clone()), vec![], EventStatus::Rejected, false).await; + let (rx, addr, _guard) = + source(Some(auth.clone()), vec![], EventStatus::Rejected, false).await; let events = spawn_collect_n( async move { @@ -654,7 +657,8 @@ mod tests { #[tokio::test] async fn logplex_auth_failure() { - let (_rx, addr) = source(Some(make_auth()), vec![], EventStatus::Delivered, true).await; + let (_rx, addr, _guard) = + source(Some(make_auth()), vec![], EventStatus::Delivered, true).await; assert_eq!( 401, diff --git a/src/sources/host_metrics/tcp.rs b/src/sources/host_metrics/tcp.rs index 26a499934d50d..c800b5975d081 100644 --- a/src/sources/host_metrics/tcp.rs +++ b/src/sources/host_metrics/tcp.rs @@ -300,7 +300,7 @@ mod tests { }; use crate::{ sources::host_metrics::{HostMetrics, HostMetricsConfig, MetricsBuffer}, - test_util::next_addr, + test_util::addr::next_addr, }; #[test] @@ -344,7 +344,7 @@ mod tests { async fn fetches_nl_net_hdrs() { // start a TCP server - let next_addr = next_addr(); + let (_guard, next_addr) = next_addr(); let listener = TcpListener::bind(next_addr).await.unwrap(); let addr = listener.local_addr().unwrap(); tokio::spawn(async move { @@ -375,7 +375,7 @@ mod tests { } async fn generates_tcp_metrics() { - let next_addr = next_addr(); + let (_guard, next_addr) = next_addr(); let _listener = TcpListener::bind(next_addr).await.unwrap(); let mut buffer = MetricsBuffer::new(None); diff --git a/src/sources/http_client/tests.rs b/src/sources/http_client/tests.rs index bb2112620f65c..1c917bcac6357 100644 --- a/src/sources/http_client/tests.rs +++ b/src/sources/http_client/tests.rs @@ -19,8 +19,9 @@ use crate::{ serde::{default_decoding, default_framing_message_based}, sources::util::http::HttpMethod, test_util::{ + addr::next_addr, components::{HTTP_PULL_SOURCE_TAGS, run_and_assert_source_compliance}, - next_addr, test_generate_config, wait_for_tcp, + test_generate_config, wait_for_tcp, }, }; @@ -80,7 +81,7 @@ register_validatable_component!(HttpClientConfig); /// Bytes should be decoded and HTTP header set to text/plain. #[tokio::test] async fn bytes_decoding() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); // validates the Accept header is set correctly for the Bytes codec let dummy_endpoint = warp::path!("endpoint") @@ -108,7 +109,7 @@ async fn bytes_decoding() { /// JSON with newline delimiter should be decoded and HTTP header set to application/x-ndjson. #[tokio::test] async fn json_decoding_newline_delimited() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); // validates the Content-Type is set correctly for the Json codec let dummy_endpoint = warp::path!("endpoint") @@ -137,7 +138,7 @@ async fn json_decoding_newline_delimited() { /// JSON with character delimiter should be decoded and HTTP header set to application/json. #[tokio::test] async fn json_decoding_character_delimited() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); // validates the Content-Type is set correctly for the Json codec let dummy_endpoint = warp::path!("endpoint") @@ -171,7 +172,7 @@ async fn json_decoding_character_delimited() { /// HTTP request queries configured by the user should be applied correctly. #[tokio::test] async fn request_query_applied() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("endpoint") .and(warp::query::raw()) @@ -238,7 +239,7 @@ async fn request_query_applied() { /// VRL query parameters should be parsed correctly #[tokio::test] async fn request_query_vrl_applied() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("endpoint") .and(warp::query::raw()) @@ -367,7 +368,7 @@ async fn request_query_vrl_applied() { /// VRL query parameters should dynamically update on each request #[tokio::test] async fn request_query_vrl_dynamic_updates() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); // A handler that returns the query parameters as part of the response let dummy_endpoint = warp::path!("endpoint") @@ -433,7 +434,7 @@ async fn request_query_vrl_dynamic_updates() { /// HTTP request headers configured by the user should be applied correctly. #[tokio::test] async fn headers_applied() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("endpoint") .and(warp::header::exact("Accept", "text/plain")) @@ -470,7 +471,7 @@ async fn headers_applied() { /// ACCEPT HTTP request headers configured by the user should take precedence #[tokio::test] async fn accept_header_override() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); // (The Bytes decoder will default to text/plain encoding) let dummy_endpoint = warp::path!("endpoint") diff --git a/src/sources/http_server.rs b/src/sources/http_server.rs index 2aa76a6f455c2..3d77821af5ffa 100644 --- a/src/sources/http_server.rs +++ b/src/sources/http_server.rs @@ -558,8 +558,9 @@ mod tests { event::{Event, EventStatus, Value}, sources::http_server::HttpMethod, test_util::{ + addr::next_addr, components::{self, HTTP_PUSH_SOURCE_TAGS, assert_source_compliance}, - next_addr, spawn_collect_n, wait_for_tcp, + spawn_collect_n, wait_for_tcp, }, }; @@ -585,7 +586,7 @@ mod tests { decoding: Option, ) -> (impl Stream + 'a, SocketAddr) { let (sender, recv) = SourceSender::new_test_finalize(status); - let address = next_addr(); + let (_guard, address) = next_addr(); let path = path.to_owned(); let host_key = OptionalValuePath::from(owned_value_path!(host_key)); let path_key = OptionalValuePath::from(owned_value_path!(path_key)); diff --git a/src/sources/logstash.rs b/src/sources/logstash.rs index 057822a6dfa4f..a643ce0af73ba 100644 --- a/src/sources/logstash.rs +++ b/src/sources/logstash.rs @@ -721,8 +721,9 @@ mod test { SourceSender, event::EventStatus, test_util::{ + addr::next_addr, components::{SOCKET_PUSH_SOURCE_TAGS, assert_source_compliance}, - next_addr, spawn_collect_n, wait_for_tcp, + spawn_collect_n, wait_for_tcp, }, }; @@ -745,7 +746,7 @@ mod test { status: EventStatus, ) -> (SocketAddr, impl Stream + Unpin) { let (sender, recv) = SourceSender::new_test_finalize(status); - let address = next_addr(); + let (_guard, address) = next_addr(); let source = LogstashConfig { address: address.into(), tls: None, diff --git a/src/sources/okta/tests.rs b/src/sources/okta/tests.rs index f4c0f7ee4d7dd..7ff2b626d06d0 100644 --- a/src/sources/okta/tests.rs +++ b/src/sources/okta/tests.rs @@ -12,8 +12,9 @@ use crate::{ config::log_schema, sources::okta::OktaConfig, test_util::{ + addr::next_addr, components::{HTTP_PULL_SOURCE_TAGS, run_and_assert_source_compliance}, - next_addr, test_generate_config, wait_for_tcp, + test_generate_config, wait_for_tcp, }, }; @@ -60,7 +61,7 @@ register_validatable_component!(OktaConfig); #[tokio::test] async fn okta_compliance() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("api" / "v1" / "logs") .and(warp::query::>()) @@ -119,7 +120,7 @@ async fn okta_compliance() { #[tokio::test] async fn okta_follows_rel() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let dummy_endpoint = warp::path!("api" / "v1" / "logs") .and(warp::query::>()) @@ -182,7 +183,7 @@ async fn okta_follows_rel() { async fn okta_persists_rel() { // the client follows `next` links; on the next interval it should pick up where it left off // and not start over from the beginning - let addr = next_addr(); + let (_guard, addr) = next_addr(); let init_guard: Arc = Arc::new(AtomicBool::new(false)); diff --git a/src/sources/opentelemetry/tests.rs b/src/sources/opentelemetry/tests.rs index 81ee8b4e16263..76324210be13a 100644 --- a/src/sources/opentelemetry/tests.rs +++ b/src/sources/opentelemetry/tests.rs @@ -46,8 +46,8 @@ use crate::{ sources::opentelemetry::config::{GrpcConfig, HttpConfig, LOGS, METRICS, OpentelemetryConfig}, test_util::{ self, + addr::next_addr, components::{SOURCE_TAGS, assert_source_compliance}, - next_addr, }, }; @@ -1070,8 +1070,8 @@ async fn receive_summary_metric() { #[tokio::test] async fn http_headers() { assert_source_compliance(&SOURCE_TAGS, async { - let grpc_addr = next_addr(); - let http_addr = next_addr(); + let (_guard_0, grpc_addr) = next_addr(); + let (_guard_1, http_addr) = next_addr(); let mut headers = HeaderMap::new(); headers.insert("User-Agent", "test_client".parse().unwrap()); @@ -1185,8 +1185,8 @@ pub async fn build_otlp_test_env( event_name: &'static str, log_namespace: Option, ) -> OTelTestEnv { - let grpc_addr = next_addr(); - let http_addr = next_addr(); + let (_guard_0, grpc_addr) = next_addr(); + let (_guard_1, http_addr) = next_addr(); let config = OpentelemetryConfig { grpc: GrpcConfig { diff --git a/src/sources/prometheus/pushgateway.rs b/src/sources/prometheus/pushgateway.rs index ad9564a5b11c3..352b7ac8d3386 100644 --- a/src/sources/prometheus/pushgateway.rs +++ b/src/sources/prometheus/pushgateway.rs @@ -364,7 +364,7 @@ mod test { } async fn whole_request_happy_path(tls: Option) { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusPushgatewayConfig { diff --git a/src/sources/prometheus/remote_write.rs b/src/sources/prometheus/remote_write.rs index 9a9313fa2974a..e7a23a094422e 100644 --- a/src/sources/prometheus/remote_write.rs +++ b/src/sources/prometheus/remote_write.rs @@ -234,7 +234,7 @@ mod test { } async fn receives_metrics(tls: Option) { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let proto = MaybeTlsSettings::from_config(tls.as_ref(), true) @@ -433,7 +433,7 @@ mod test { /// we accept the metric, but take the last label in the list. #[tokio::test] async fn receives_metrics_duplicate_labels() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -505,7 +505,7 @@ mod test { #[tokio::test] async fn test_skip_nan_values_enabled() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -576,7 +576,7 @@ mod test { #[tokio::test] async fn test_skip_nan_values_disabled() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -661,7 +661,7 @@ mod test { #[tokio::test] async fn receives_metrics_on_custom_path() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -710,7 +710,7 @@ mod test { #[tokio::test] async fn rejects_metrics_on_wrong_path() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, _rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -750,7 +750,7 @@ mod test { #[tokio::test] async fn receives_metrics_on_default_path() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -784,7 +784,7 @@ mod test { #[tokio::test] async fn rejects_metrics_on_wrong_path_with_skip_nan_enabled() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, _rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -824,7 +824,7 @@ mod test { #[tokio::test] async fn accepts_conflicting_metadata() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { @@ -865,7 +865,7 @@ mod test { #[tokio::test] async fn rejects_conflicting_metadata() { - let address = test_util::next_addr(); + let (_guard, address) = test_util::addr::next_addr(); let (tx, _rx) = SourceSender::new_test_finalize(EventStatus::Delivered); let source = PrometheusRemoteWriteConfig { diff --git a/src/sources/prometheus/scrape.rs b/src/sources/prometheus/scrape.rs index a48256ac6214d..85ace26146721 100644 --- a/src/sources/prometheus/scrape.rs +++ b/src/sources/prometheus/scrape.rs @@ -332,8 +332,9 @@ mod test { http::{ParameterValue, QueryParameterValue}, sinks::prometheus::exporter::PrometheusExporterConfig, test_util::{ + addr::next_addr, components::{HTTP_PULL_SOURCE_TAGS, run_and_assert_source_compliance}, - next_addr, start_topology, trace_init, wait_for_tcp, + start_topology, trace_init, wait_for_tcp, }, }; @@ -344,7 +345,7 @@ mod test { #[tokio::test] async fn test_prometheus_sets_headers() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("metrics").and(warp::header::exact("Accept", "text/plain")).map(|| { r#" @@ -378,7 +379,7 @@ mod test { #[tokio::test] async fn test_prometheus_honor_labels() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("metrics").map(|| { r#" @@ -430,7 +431,7 @@ mod test { #[tokio::test] async fn test_prometheus_do_not_honor_labels() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("metrics").map(|| { r#" @@ -496,7 +497,7 @@ mod test { /// we accept the metric, but take the last label in the list. #[tokio::test] async fn test_prometheus_duplicate_tags() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("metrics").map(|| { r#" @@ -549,7 +550,7 @@ mod test { #[tokio::test] async fn test_prometheus_request_query() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let dummy_endpoint = warp::path!("metrics").and(warp::query::raw()).map(|query| { format!( @@ -630,8 +631,8 @@ mod test { #[tokio::test] async fn test_prometheus_routing() { trace_init(); - let in_addr = next_addr(); - let out_addr = next_addr(); + let (_in_guard, in_addr) = next_addr(); + let (_out_guard, out_addr) = next_addr(); let make_svc = make_service_fn(|_| async { Ok::<_, Error>(service_fn(|_| async { diff --git a/src/sources/socket/mod.rs b/src/sources/socket/mod.rs index c06cee8ce1fd5..b821cf4e0f257 100644 --- a/src/sources/socket/mod.rs +++ b/src/sources/socket/mod.rs @@ -322,7 +322,7 @@ mod test { collections::HashMap, net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, sync::{ - Arc, LazyLock, + Arc, atomic::{AtomicBool, Ordering}, }, thread, @@ -331,13 +331,11 @@ mod test { use approx::assert_relative_eq; use bytes::{BufMut, Bytes, BytesMut}; use futures::{StreamExt, stream}; - use portpicker::pick_unused_port; use rand::{SeedableRng, rngs::SmallRng, seq::SliceRandom}; use serde_json::json; use tokio::{ io::AsyncReadExt, net::TcpStream, - sync::{Mutex, MutexGuard}, task::JoinHandle, time::{Duration, Instant, timeout}, }; @@ -377,6 +375,7 @@ mod test { sinks::util::tcp::TcpSinkConfig, sources::util::net::SocketListenAddr, test_util::{ + addr::{PortGuard, next_addr, next_addr_any}, collect_n, collect_n_limited, components::{ COMPONENT_ERROR_TAGS, SOCKET_PUSH_SOURCE_TAGS, assert_source_compliance, @@ -387,34 +386,21 @@ mod test { tls::{self, TlsConfig, TlsEnableableConfig, TlsSourceConfig}, }; - type Guard<'a> = MutexGuard<'a, ()>; - - async fn wait_for_tcp_and_release<'a>(guard: Guard<'a>, addr: SocketAddr) { + async fn wait_for_tcp_and_release(guard: PortGuard, addr: SocketAddr) { wait_for_tcp(addr).await; - drop(guard) // Now we're sure the socket was bound by the server and we can release the lock - } - - static ADDR_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); - pub async fn next_addr_for_ip<'a>(ip: IpAddr) -> (Guard<'a>, SocketAddr) { - let guard = ADDR_LOCK.lock().await; - let port = pick_unused_port(ip); - (guard, SocketAddr::new(ip, port)) - } - - pub async fn next_addr_any<'a>() -> (Guard<'a>, SocketAddr) { - next_addr_for_ip(IpAddr::V4(Ipv4Addr::UNSPECIFIED)).await - } - - pub async fn next_addr<'a>() -> (Guard<'a>, SocketAddr) { - next_addr_for_ip(IpAddr::V4(Ipv4Addr::LOCALHOST)).await + drop(guard) // Now we're sure the socket was bound by the server and we can release the guard } pub fn bind_unused_udp() -> UdpSocket { - portpicker::bind_unused_udp(IpAddr::V4(Ipv4Addr::LOCALHOST)) + // Bind to port 0 to let the OS assign an available port + UdpSocket::bind((IpAddr::V4(Ipv4Addr::LOCALHOST), 0)) + .expect("Failed to bind UDP socket to OS-assigned port") } pub fn bind_unused_udp_any() -> UdpSocket { - portpicker::bind_unused_udp(IpAddr::V4(Ipv4Addr::UNSPECIFIED)) + // Bind to port 0 to let the OS assign an available port + UdpSocket::bind((IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)) + .expect("Failed to bind UDP socket to OS-assigned port") } fn get_gelf_payload(message: &str) -> String { @@ -474,7 +460,7 @@ mod test { async fn tcp_it_includes_host() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let server = SocketConfig::from(TcpConfig::from_address(addr.into())) .build(SourceContext::new_test(tx, None)) @@ -500,7 +486,7 @@ mod test { async fn tcp_it_includes_vector_namespaced_fields() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let mut conf = TcpConfig::from_address(addr.into()); conf.set_log_namespace(Some(true)); @@ -541,7 +527,7 @@ mod test { async fn tcp_splits_on_newline() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let server = SocketConfig::from(TcpConfig::from_address(addr.into())) .build(SourceContext::new_test(tx, None)) @@ -574,7 +560,7 @@ mod test { async fn tcp_it_includes_source_type() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let server = SocketConfig::from(TcpConfig::from_address(addr.into())) .build(SourceContext::new_test(tx, None)) @@ -600,7 +586,7 @@ mod test { async fn tcp_continue_after_long_line() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let mut config = TcpConfig::from_address(addr.into()); config.set_framing(Some( @@ -641,7 +627,7 @@ mod test { async fn tcp_with_tls() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let mut config = TcpConfig::from_address(addr.into()); config.set_tls(Some(TlsSourceConfig { @@ -705,7 +691,7 @@ mod test { async fn tcp_with_tls_vector_namespace() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let mut config = TcpConfig::from_address(addr.into()); config.set_tls(Some(TlsSourceConfig { @@ -781,7 +767,7 @@ mod test { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let source_id = ComponentKey::from("tcp_shutdown_simple"); let (tx, mut rx) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let (cx, mut shutdown) = SourceContext::new_shutdown(&source_id, tx); // Start TCP Source @@ -823,7 +809,7 @@ mod test { // the source doesn't block on sending the events downstream, otherwise if it was blocked on // doing so, it wouldn't be able to wake up and loop to see that it had been signalled to // shutdown. - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let (source_tx, source_rx) = SourceSender::new_test_sender_with_buffer(10_000); let source_key = ComponentKey::from("tcp_shutdown_infinite_stream"); @@ -901,7 +887,7 @@ mod test { #[tokio::test] async fn tcp_connection_close_after_max_duration() { let (tx, _) = SourceSender::new_test(); - let (guard, addr) = next_addr().await; + let (guard, addr) = next_addr(); let mut source_config = TcpConfig::from_address(addr.into()); source_config.set_max_connection_duration_secs(Some(1)); @@ -1027,7 +1013,7 @@ mod test { _ => panic!("listen address should not be systemd FD offset in tests"), }, None => { - let (guard, address) = next_addr().await; + let (guard, address) = next_addr(); ( Some(guard), address, @@ -1129,7 +1115,7 @@ mod test { async fn udp_max_length() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, rx) = SourceSender::new_test(); - let (_, address) = next_addr().await; + let (_, address) = next_addr(); let mut config = UdpConfig::from_address(address.into()); config.max_length = 11; let address = init_udp_with_config(tx, config).await; @@ -1166,7 +1152,7 @@ mod test { async fn udp_max_length_delimited() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, rx) = SourceSender::new_test(); - let (_, address) = next_addr().await; + let (_, address) = next_addr(); let mut config = UdpConfig::from_address(address.into()); config.max_length = 10; config.framing = Some( @@ -1200,7 +1186,7 @@ mod test { async fn udp_decodes_chunked_gelf_messages() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, rx) = SourceSender::new_test(); - let (_, address) = next_addr().await; + let (_, address) = next_addr(); let mut config = UdpConfig::from_address(address.into()); config.decoding = GelfDeserializerConfig::default().into(); let address = init_udp_with_config(tx, config).await; @@ -1378,14 +1364,13 @@ mod test { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); // The socket address must be `IPADDR_ANY` (0.0.0.0) in order to receive multicast packets - let (guard, socket_address) = next_addr_any().await; + let (_guard, socket_address) = next_addr_any(); let multicast_ip_address: Ipv4Addr = "224.0.0.2".parse().unwrap(); let multicast_socket_address = SocketAddr::new(IpAddr::V4(multicast_ip_address), socket_address.port()); let mut config = UdpConfig::from_address(socket_address.into()); config.multicast_groups = vec![multicast_ip_address]; init_udp_with_config(tx, config).await; - drop(guard); // We must send packets to the same interface the `socket_address` is bound to // in order to receive the multicast packets the `from` socket sends. @@ -1409,7 +1394,7 @@ mod test { async fn multiple_multicast_addresses_udp_message() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, socket_address) = next_addr_any().await; + let (_guard, socket_address) = next_addr_any(); let multicast_ip_addresses = (2..12) .map(|i| format!("224.0.0.{i}").parse().unwrap()) .collect::>(); @@ -1420,7 +1405,6 @@ mod test { let mut config = UdpConfig::from_address(socket_address.into()); config.multicast_groups = multicast_ip_addresses; init_udp_with_config(tx, config).await; - drop(guard); let mut from = bind_unused_udp_any(); for multicast_ip_socket_address in multicast_ip_socket_addresses { @@ -1444,14 +1428,13 @@ mod test { async fn multicast_and_unicast_udp_message() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let (tx, mut rx) = SourceSender::new_test(); - let (guard, socket_address) = next_addr_any().await; + let (_guard, socket_address) = next_addr_any(); let multicast_ip_address: Ipv4Addr = "224.0.0.2".parse().unwrap(); let multicast_socket_address = SocketAddr::new(IpAddr::V4(multicast_ip_address), socket_address.port()); let mut config = UdpConfig::from_address(socket_address.into()); config.multicast_groups = vec![multicast_ip_address]; init_udp_with_config(tx, config).await; - drop(guard); // Send packet to multicast address let _ = send_lines_udp_from( @@ -1485,7 +1468,7 @@ mod test { async fn udp_invalid_multicast_group() { assert_source_error(&COMPONENT_ERROR_TAGS, async { let (tx, _rx) = SourceSender::new_test(); - let (_, socket_address) = next_addr_any().await; + let (_, socket_address) = next_addr_any(); let invalid_multicast_ip_address: Ipv4Addr = "192.168.0.3".parse().unwrap(); let mut config = UdpConfig::from_address(socket_address.into()); config.multicast_groups = vec![invalid_multicast_ip_address]; diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index 6a1a6646d0d8a..f3158a46cf6dc 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -1327,12 +1327,13 @@ mod tests { }, sources::splunk_hec::acknowledgements::{HecAckStatusRequest, HecAckStatusResponse}, test_util::{ + addr::{PortGuard, next_addr}, collect_n, components::{ COMPONENT_ERROR_TAGS, HTTP_PUSH_SOURCE_TAGS, assert_source_compliance, assert_source_error, }, - next_addr, wait_for_tcp, + wait_for_tcp, }, }; @@ -1347,7 +1348,7 @@ mod tests { async fn source( acknowledgements: Option, - ) -> (impl Stream + Unpin, SocketAddr) { + ) -> (impl Stream + Unpin, SocketAddr, PortGuard) { source_with(Some(TOKEN.to_owned().into()), None, acknowledgements, false).await } @@ -1356,9 +1357,13 @@ mod tests { valid_tokens: Option<&[&str]>, acknowledgements: Option, store_hec_token: bool, - ) -> (impl Stream + Unpin + use<>, SocketAddr) { + ) -> ( + impl Stream + Unpin + use<>, + SocketAddr, + PortGuard, + ) { let (sender, recv) = SourceSender::new_test_finalize(EventStatus::Delivered); - let address = next_addr(); + let (_guard, address) = next_addr(); let valid_tokens = valid_tokens.map(|tokens| tokens.iter().map(|v| v.to_string().into()).collect()); let cx = SourceContext::new_test(sender, None); @@ -1380,7 +1385,7 @@ mod tests { .unwrap() }); wait_for_tcp(address).await; - (recv, address) + (recv, address, _guard) } async fn sink( @@ -1417,7 +1422,7 @@ mod tests { compression: Compression, acknowledgements: Option, ) -> (VectorSink, impl Stream + Unpin) { - let (source, address) = source(acknowledgements).await; + let (source, address, _guard) = source(acknowledgements).await; let (sink, health) = sink(address, encoding, compression).await; assert!(health.await.is_ok()); (sink, source) @@ -1718,7 +1723,7 @@ mod tests { async fn raw() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "raw"; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; assert_eq!(200, post(address, "services/collector/raw", message).await); @@ -1742,7 +1747,7 @@ mod tests { async fn root() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = r#"{ "event": { "message": "root"} }"#; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; assert_eq!(200, post(address, "services/collector", message).await); @@ -1766,7 +1771,7 @@ mod tests { async fn channel_header() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "raw"; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), @@ -1788,7 +1793,7 @@ mod tests { async fn xff_header_raw() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "raw"; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), @@ -1814,7 +1819,7 @@ mod tests { async fn xff_header_event_with_host_field() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = r#"{"event":"first", "host": "10.1.0.2"}"#; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), @@ -1840,7 +1845,7 @@ mod tests { async fn xff_header_event_without_host_field() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = r#"{"event":"first", "color": "blue"}"#; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), @@ -1865,7 +1870,7 @@ mod tests { async fn channel_query_param() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "raw"; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; let opts = SendWithOpts { channel: Some(Channel::QueryParam("guid")), @@ -1885,7 +1890,7 @@ mod tests { #[tokio::test] async fn no_data() { - let (_source, address) = source(None).await; + let (_source, address, _guard) = source(None).await; assert_eq!(400, post(address, "services/collector/event", "").await); } @@ -1893,7 +1898,7 @@ mod tests { #[tokio::test] async fn invalid_token() { assert_source_error(&COMPONENT_ERROR_TAGS, async { - let (_source, address) = source(None).await; + let (_source, address, _guard) = source(None).await; let opts = SendWithOpts { channel: Some(Channel::Header("channel")), forwarded_for: None, @@ -1909,7 +1914,7 @@ mod tests { #[tokio::test] async fn health_ignores_token() { - let (_source, address) = source(None).await; + let (_source, address, _guard) = source(None).await; let res = reqwest::Client::new() .get(format!("http://{address}/services/collector/health")) @@ -1923,7 +1928,7 @@ mod tests { #[tokio::test] async fn health() { - let (_source, address) = source(None).await; + let (_source, address, _guard) = source(None).await; let res = reqwest::Client::new() .get(format!("http://{address}/services/collector/health")) @@ -1938,7 +1943,8 @@ mod tests { async fn secondary_token() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = r#"{"event":"first", "color": "blue"}"#; - let (_source, address) = source_with(None, Some(VALID_TOKENS), None, false).await; + let (_source, address, _guard) = + source_with(None, Some(VALID_TOKENS), None, false).await; let options = SendWithOpts { channel: None, forwarded_for: None, @@ -1963,7 +1969,7 @@ mod tests { async fn event_service_token_passthrough_enabled() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "passthrough_token_enabled"; - let (source, address) = source_with(None, Some(VALID_TOKENS), None, true).await; + let (source, address, _guard) = source_with(None, Some(VALID_TOKENS), None, true).await; let (sink, health) = sink( address, TextSerializerConfig::default().into(), @@ -1990,7 +1996,7 @@ mod tests { async fn raw_service_token_passthrough_enabled() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "raw"; - let (source, address) = source_with(None, Some(VALID_TOKENS), None, true).await; + let (source, address, _guard) = source_with(None, Some(VALID_TOKENS), None, true).await; assert_eq!(200, post(address, "services/collector/raw", message).await); @@ -2017,7 +2023,7 @@ mod tests { async fn no_authorization() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "no_authorization"; - let (source, address) = source_with(None, None, None, false).await; + let (source, address, _guard) = source_with(None, None, None, false).await; let (sink, health) = sink( address, TextSerializerConfig::default().into(), @@ -2041,7 +2047,7 @@ mod tests { async fn no_authorization_token_passthrough_enabled() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = "no_authorization"; - let (source, address) = source_with(None, None, None, true).await; + let (source, address, _guard) = source_with(None, None, None, true).await; let (sink, health) = sink( address, TextSerializerConfig::default().into(), @@ -2068,7 +2074,7 @@ mod tests { async fn partial() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = r#"{"event":"first"}{"event":"second""#; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; assert_eq!( 400, @@ -2095,7 +2101,7 @@ mod tests { let message = r#" {"event":"first"} "#; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; assert_eq!( 200, @@ -2120,7 +2126,7 @@ mod tests { async fn handles_spaces() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = r#" {"event":"first"} "#; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; assert_eq!( 200, @@ -2145,7 +2151,7 @@ mod tests { async fn handles_non_utf8() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = b" {\"event\": { \"non\": \"A non UTF8 character \xE4\", \"number\": 2, \"bool\": true } } "; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; let b = reqwest::Client::new() .post(format!( @@ -2173,7 +2179,7 @@ mod tests { async fn default() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { let message = r#"{"event":"first","source":"main"}{"event":"second"}{"event":"third","source":"secondary"}"#; - let (source, address) = source(None).await; + let (source, address, _guard) = source(None).await; assert_eq!( 200, @@ -2286,7 +2292,7 @@ mod tests { enabled: Some(true), ..Default::default() }; - let (source, address) = source(Some(ack_config)).await; + let (source, address, _guard) = source(Some(ack_config)).await; let event_message = r#"{"event":"first", "color": "blue"}{"event":"second"}"#; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), @@ -2331,7 +2337,7 @@ mod tests { enabled: Some(true), ..Default::default() }; - let (source, address) = source(Some(ack_config)).await; + let (source, address, _guard) = source(Some(ack_config)).await; let event_message = "raw event message"; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), @@ -2376,7 +2382,7 @@ mod tests { enabled: Some(true), ..Default::default() }; - let (source, address) = source(Some(ack_config)).await; + let (source, address, _guard) = source(Some(ack_config)).await; let event_message = "raw event message"; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), @@ -2434,7 +2440,7 @@ mod tests { ..Default::default() }; - let (_source, address) = source(Some(ack_config)).await; + let (_source, address, _guard) = source(Some(ack_config)).await; let mut opts = SendWithOpts { channel: Some(Channel::Header("guid")), forwarded_for: None, @@ -2470,7 +2476,7 @@ mod tests { ..Default::default() }; - let (source, address) = source(Some(ack_config)).await; + let (source, address, _guard) = source(Some(ack_config)).await; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), forwarded_for: None, @@ -2542,7 +2548,7 @@ mod tests { enabled: Some(true), ..Default::default() }; - let (source, address) = source(Some(ack_config)).await; + let (source, address, _guard) = source(Some(ack_config)).await; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), forwarded_for: None, @@ -2588,7 +2594,7 @@ mod tests { enabled: Some(true), ..Default::default() }; - let (_, address) = source(Some(ack_config)).await; + let (_, address, _guard) = source(Some(ack_config)).await; let opts = SendWithOpts { channel: None, @@ -2604,7 +2610,7 @@ mod tests { #[tokio::test] async fn ack_service_acknowledgements_disabled() { let message = r#" {"acks":[0]} "#; - let (_, address) = source(None).await; + let (_, address, _guard) = source(None).await; let opts = SendWithOpts { channel: Some(Channel::Header("guid")), diff --git a/src/sources/statsd/mod.rs b/src/sources/statsd/mod.rs index c6c987d92eb64..7f7cf027d397d 100644 --- a/src/sources/statsd/mod.rs +++ b/src/sources/statsd/mod.rs @@ -421,6 +421,7 @@ mod test { use crate::{ series, test_util::{ + addr::next_addr, collect_limited, components::{ COMPONENT_ERROR_TAGS, SOCKET_PUSH_SOURCE_TAGS, assert_source_compliance, @@ -429,7 +430,6 @@ mod test { metrics::{ AbsoluteMetricState, assert_counter, assert_distribution, assert_gauge, assert_set, }, - next_addr, }, }; @@ -441,11 +441,11 @@ mod test { #[tokio::test] async fn test_statsd_udp() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async move { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = StatsdConfig::Udp(UdpConfig::from_address(in_addr.into())); let (sender, mut receiver) = mpsc::channel(200); tokio::spawn(async move { - let bind_addr = next_addr(); + let (_guard, bind_addr) = next_addr(); let socket = UdpSocket::bind(bind_addr).await.unwrap(); socket.connect(in_addr).await.unwrap(); while let Some(bytes) = receiver.next().await { @@ -460,7 +460,7 @@ mod test { #[tokio::test] async fn test_statsd_tcp() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async move { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = StatsdConfig::Tcp(TcpConfig::from_address(in_addr.into())); let (sender, mut receiver) = mpsc::channel(200); tokio::spawn(async move { @@ -481,7 +481,7 @@ mod test { #[tokio::test] async fn test_statsd_error() { assert_source_error(&COMPONENT_ERROR_TAGS, async move { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let config = StatsdConfig::Tcp(TcpConfig::from_address(in_addr.into())); let (sender, mut receiver) = mpsc::channel(200); tokio::spawn(async move { @@ -527,14 +527,14 @@ mod test { #[tokio::test] async fn test_statsd_udp_conversion_disabled() { - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); let mut config = UdpConfig::from_address(in_addr.into()); config.convert_to = ConversionUnit::Milliseconds; let statsd_config = StatsdConfig::Udp(config); let (mut sender, mut receiver) = mpsc::channel(200); tokio::spawn(async move { - let bind_addr = next_addr(); + let (_guard, bind_addr) = next_addr(); let socket = UdpSocket::bind(bind_addr).await.unwrap(); socket.connect(in_addr).await.unwrap(); while let Some(bytes) = receiver.next().await { diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index ecf2fcc767d4f..57ed8d90f3a28 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -471,8 +471,9 @@ mod test { event::{Event, LogEvent}, test_util::{ CountReceiver, + addr::next_addr, components::{SOCKET_PUSH_SOURCE_TAGS, assert_source_compliance}, - next_addr, random_maps, random_string, send_encodable, send_lines, wait_for_tcp, + random_maps, random_string, send_encodable, send_lines, wait_for_tcp, }, }; @@ -1121,7 +1122,7 @@ mod test { async fn test_tcp_syslog() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let num_messages: usize = 10000; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); // Create and spawn the source. let config = SyslogConfig::from_mode(Mode::Tcp { @@ -1266,7 +1267,7 @@ mod test { async fn test_octet_counting_syslog() { assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { let num_messages: usize = 10000; - let in_addr = next_addr(); + let (_guard, in_addr) = next_addr(); // Create and spawn the source. let config = SyslogConfig::from_mode(Mode::Tcp { diff --git a/src/sources/util/framestream.rs b/src/sources/util/framestream.rs index 735a924fff37e..5296d4617d815 100644 --- a/src/sources/util/framestream.rs +++ b/src/sources/util/framestream.rs @@ -973,7 +973,7 @@ mod test { event::{Event, LogEvent}, shutdown::SourceShutdownCoordinator, sources::util::net::SocketListenAddr, - test_util::{collect_n, collect_n_stream, next_addr}, + test_util::{addr::next_addr, collect_n, collect_n_stream}, }; #[derive(Clone)] @@ -1475,7 +1475,7 @@ mod test { async fn blocked_framestream_tcp() { let source_name = "test_source"; let (tx, rx) = SourceSender::new_test(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let (source_handle, shutdown) = init_framestream_tcp( source_name, &addr, @@ -1499,7 +1499,7 @@ mod test { async fn normal_framestream_singlethreaded_tcp() { let source_name = "test_source"; let (tx, rx) = SourceSender::new_test(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let (source_handle, shutdown) = init_framestream_tcp( source_name, &addr, @@ -1542,7 +1542,7 @@ mod test { async fn normal_framestream_multithreaded_tcp() { let source_name = "test_source"; let (tx, rx) = SourceSender::new_test(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let (source_handle, shutdown) = init_framestream_tcp( source_name, &addr, @@ -1585,7 +1585,7 @@ mod test { async fn multiple_content_types_tcp() { let source_name = "test_source"; let (tx, _) = SourceSender::new_test(); - let addr = next_addr(); + let (_guard, addr) = next_addr(); let (source_handle, shutdown) = init_framestream_tcp( source_name, &addr, diff --git a/src/sources/vector/mod.rs b/src/sources/vector/mod.rs index ed6cc7255c02a..519365086a491 100644 --- a/src/sources/vector/mod.rs +++ b/src/sources/vector/mod.rs @@ -321,7 +321,7 @@ mod tests { #[tokio::test] async fn receive_message() { - let addr = test_util::next_addr(); + let (_guard, addr) = test_util::addr::next_addr(); let config = format!(r#"address = "{addr}""#); run_test(&config, addr).await; @@ -329,7 +329,7 @@ mod tests { #[tokio::test] async fn receive_compressed_message() { - let addr = test_util::next_addr(); + let (_guard, addr) = test_util::addr::next_addr(); let config = format!( r#"address = "{addr}" diff --git a/src/sources/websocket/source.rs b/src/sources/websocket/source.rs index 31115fe798097..da515f21f3e37 100644 --- a/src/sources/websocket/source.rs +++ b/src/sources/websocket/source.rs @@ -462,10 +462,10 @@ mod tests { common::websocket::WebSocketCommonConfig, sources::websocket::config::{PongMessage, WebSocketConfig}, test_util::{ + addr::next_addr, components::{ SOURCE_TAGS, run_and_assert_source_compliance, run_and_assert_source_error, }, - next_addr, }, }; @@ -481,7 +481,7 @@ mod tests { /// Starts a WebSocket server that pushes a binary message to the first client. async fn start_binary_push_server() -> String { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); let server_addr = format!("ws://{}", listener.local_addr().unwrap()); @@ -501,7 +501,7 @@ mod tests { /// Starts a WebSocket server that pushes a message to the first client that connects. async fn start_push_server() -> String { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); let server_addr = format!("ws://{}", listener.local_addr().unwrap()); @@ -523,7 +523,7 @@ mod tests { /// Starts a WebSocket server that waits for an initial message from the client, /// and upon receiving it, sends a confirmation message back. async fn start_subscribe_server(initial_message: String, response_message: String) -> String { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); let server_addr = format!("ws://{}", listener.local_addr().unwrap()); @@ -547,7 +547,7 @@ mod tests { } async fn start_reconnect_server() -> String { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); let server_addr = format!("ws://{}", listener.local_addr().unwrap()); @@ -647,7 +647,7 @@ mod tests { } async fn start_reject_initial_message_server() -> String { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); let server_addr = format!("ws://{}", listener.local_addr().unwrap()); @@ -679,7 +679,7 @@ mod tests { } async fn start_unresponsive_server() -> String { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); let server_addr = format!("ws://{}", listener.local_addr().unwrap()); @@ -712,7 +712,7 @@ mod tests { } async fn start_blackhole_server() -> String { - let addr = next_addr(); + let (_guard, addr) = next_addr(); let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); let server_addr = format!("ws://{}", listener.local_addr().unwrap()); diff --git a/src/test_util/addr.rs b/src/test_util/addr.rs new file mode 100644 index 0000000000000..4654858cd99fc --- /dev/null +++ b/src/test_util/addr.rs @@ -0,0 +1,107 @@ +//! Test utilities for allocating unique network addresses. +//! +//! This module provides thread-safe port allocation for tests using a guard pattern +//! to prevent port reuse race conditions. The design eliminates intra-process races by: +//! 1. Binding to get a port while holding a TCP listener +//! 2. Registering the port atomically (while still holding the listener and registry lock) +//! 3. Only then releasing the listener (port now protected by registry entry) +//! +//! This ensures no race window between port allocation and registration. + +use std::{ + collections::HashSet, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener as StdTcpListener}, + sync::{LazyLock, Mutex}, +}; + +/// Maximum number of attempts to allocate a unique port before panicking. +/// This should be far more than needed since port collisions are rare, +/// but provides a safety net against infinite loops. +const MAX_PORT_ALLOCATION_ATTEMPTS: usize = 100; + +/// A guard that reserves a port in the registry, preventing port reuse until dropped. +/// The guard does NOT hold the actual listener - it just marks the port as reserved +/// so that concurrent calls to next_addr() won't return the same port. +pub struct PortGuard { + addr: SocketAddr, +} + +impl PortGuard { + /// Get the socket address that this guard is holding. + pub const fn addr(&self) -> SocketAddr { + self.addr + } +} + +impl Drop for PortGuard { + fn drop(&mut self) { + // Remove from the reserved ports set when dropped + RESERVED_PORTS + .lock() + .expect("poisoned lock potentially due to test panicking") + .remove(&self.addr.port()); + } +} + +/// Global set of reserved ports for collision detection. When a test allocates a port, we check this set to ensure the +/// OS didn't recycle a port that's still in use by another test. +/// Ports are tracked by number only (u16). This means IPv4 and IPv6 may block each other from using the same port. +/// This simplification is acceptable for our tests. +static RESERVED_PORTS: LazyLock>> = LazyLock::new(|| Mutex::new(HashSet::new())); + +/// Allocates a unique port and returns a guard that keeps it reserved. +/// +/// The returned `PortGuard` must be kept alive for as long as you need the port reserved. +/// When the guard is dropped, the port is automatically released. +/// +/// If the OS assigns a port that's already reserved by another test, this function will +/// automatically retry with a new port, ensuring each test gets a unique port. +/// +/// # Example +/// ```ignore +/// let (_guard, addr) = next_addr_for_ip(IpAddr::V4(Ipv4Addr::LOCALHOST)); +/// // Use addr for your test +/// // Port is automatically released when _guard goes out of scope +/// ``` +pub fn next_addr_for_ip(ip: IpAddr) -> (PortGuard, SocketAddr) { + for _ in 0..MAX_PORT_ALLOCATION_ATTEMPTS { + let listener = StdTcpListener::bind((ip, 0)).expect("Failed to bind to OS-assigned port"); + let addr = listener.local_addr().expect("Failed to get local address"); + let port = addr.port(); + + // Check if this port is already reserved by another test WHILE still holding the listener + let mut reserved = RESERVED_PORTS + .lock() + .expect("poisoned lock potentially due to test panicking"); + if reserved.contains(&port) { + // OS recycled a port that's still reserved by another test. + // Lock and listener will be dropped implicitly after continuing + continue; + } + + // Port is unique, mark it as reserved BEFORE dropping the listener + // This ensures no race window between dropping listener and registering the port + reserved.insert(port); + drop(reserved); + + // Now it's safe to drop the listener - the registry protects the port + drop(listener); + + let guard = PortGuard { addr }; + return (guard, addr); + } + + panic!("Failed to allocate a unique port after {MAX_PORT_ALLOCATION_ATTEMPTS} attempts"); +} + +pub fn next_addr() -> (PortGuard, SocketAddr) { + next_addr_for_ip(IpAddr::V4(Ipv4Addr::LOCALHOST)) +} + +pub fn next_addr_any() -> (PortGuard, SocketAddr) { + next_addr_for_ip(IpAddr::V4(Ipv4Addr::UNSPECIFIED)) +} + +pub fn next_addr_v6() -> (PortGuard, SocketAddr) { + next_addr_for_ip(IpAddr::V6(Ipv6Addr::LOCALHOST)) +} diff --git a/src/test_util/http.rs b/src/test_util/http.rs index 592557168d41b..d88f9021f9aa9 100644 --- a/src/test_util/http.rs +++ b/src/test_util/http.rs @@ -6,7 +6,7 @@ use hyper::{ service::{make_service_fn, service_fn}, }; -use super::{next_addr, wait_for_tcp}; +use super::{addr::next_addr, wait_for_tcp}; /// Spawns an HTTP server that uses the given `handler` to respond to requests. /// @@ -17,7 +17,7 @@ where H: Fn(Request) -> F + Clone + Send + 'static, F: Future, Infallible>> + Send + 'static, { - let address = next_addr(); + let (_guard, address) = next_addr(); let uri = Uri::builder() .scheme(Scheme::HTTP) diff --git a/src/test_util/mod.rs b/src/test_util/mod.rs index 120afb00ced93..e9d0f82a561bc 100644 --- a/src/test_util/mod.rs +++ b/src/test_util/mod.rs @@ -1,4 +1,5 @@ #![allow(missing_docs)] + use std::{ collections::HashMap, convert::Infallible, @@ -6,7 +7,7 @@ use std::{ future::{Future, ready}, io::Read, iter, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + net::SocketAddr, path::{Path, PathBuf}, pin::Pin, sync::{ @@ -20,7 +21,6 @@ use chrono::{DateTime, SubsecRound, Utc}; use flate2::read::MultiGzDecoder; use futures::{FutureExt, SinkExt, Stream, StreamExt, TryStreamExt, stream, task::noop_waker_ref}; use openssl::ssl::{SslConnector, SslFiletype, SslMethod, SslVerifyMode}; -use portpicker::pick_unused_port; use rand::{Rng, rng}; use rand_distr::Alphanumeric; use tokio::{ @@ -55,24 +55,21 @@ const WAIT_FOR_SECS: u64 = 5; // The default time to wait in `wait_for` const WAIT_FOR_MIN_MILLIS: u64 = 5; // The minimum time to pause before retrying const WAIT_FOR_MAX_MILLIS: u64 = 500; // The maximum time to pause before retrying +pub mod addr; +pub mod compression; +pub mod stats; + #[cfg(any(test, feature = "test-utils"))] pub mod components; - #[cfg(test)] pub mod http; - +#[cfg(test)] +pub mod integration; #[cfg(test)] pub mod metrics; - #[cfg(test)] pub mod mock; -pub mod compression; -pub mod stats; - -#[cfg(test)] -pub mod integration; - #[macro_export] macro_rules! assert_downcast_matches { ($e:expr_2021, $t:ty, $v:pat) => {{ @@ -117,23 +114,6 @@ pub fn open_fixture(path: impl AsRef) -> crate::Result Ok(value) } -pub fn next_addr_for_ip(ip: IpAddr) -> SocketAddr { - let port = pick_unused_port(ip); - SocketAddr::new(ip, port) -} - -pub fn next_addr() -> SocketAddr { - next_addr_for_ip(IpAddr::V4(Ipv4Addr::LOCALHOST)) -} - -pub fn next_addr_any() -> SocketAddr { - next_addr_for_ip(IpAddr::V4(Ipv4Addr::UNSPECIFIED)) -} - -pub fn next_addr_v6() -> SocketAddr { - next_addr_for_ip(IpAddr::V6(Ipv6Addr::LOCALHOST)) -} - pub fn trace_init() { #[cfg(unix)] let color = { diff --git a/src/topology/test/crash.rs b/src/topology/test/crash.rs index f4b54f5236bff..499ad256f10f8 100644 --- a/src/topology/test/crash.rs +++ b/src/topology/test/crash.rs @@ -8,8 +8,9 @@ use crate::{ sources::socket::SocketConfig, test_util::{ CountReceiver, + addr::next_addr, mock::{error_sink, error_source, panic_sink, panic_source}, - next_addr, random_lines, send_lines, start_topology, trace_init, wait_for_tcp, + random_lines, send_lines, start_topology, trace_init, wait_for_tcp, }, }; @@ -20,8 +21,8 @@ async fn test_source_error() { let num_lines: usize = 10; - let in_addr = next_addr(); - let out_addr = next_addr(); + let (_guard_0, in_addr) = next_addr(); + let (_guard_1, out_addr) = next_addr(); let mut config = Config::builder(); config.add_source("in", SocketConfig::make_basic_tcp_config(in_addr)); @@ -64,8 +65,8 @@ async fn test_source_panic() { let num_lines: usize = 10; - let in_addr = next_addr(); - let out_addr = next_addr(); + let (_guard_0, in_addr) = next_addr(); + let (_guard_1, out_addr) = next_addr(); let mut config = Config::builder(); config.add_source("in", SocketConfig::make_basic_tcp_config(in_addr)); @@ -110,9 +111,9 @@ async fn test_sink_error() { let num_lines: usize = 10; - let in1_addr = next_addr(); - let in2_addr = next_addr(); - let out_addr = next_addr(); + let (_guard_in1, in1_addr) = next_addr(); + let (_guard_in2, in2_addr) = next_addr(); + let (_guard_out, out_addr) = next_addr(); let mut config = Config::builder(); config.add_source("in1", SocketConfig::make_basic_tcp_config(in1_addr)); @@ -158,9 +159,9 @@ async fn test_sink_panic() { let num_lines: usize = 10; - let in1_addr = next_addr(); - let in2_addr = next_addr(); - let out_addr = next_addr(); + let (_guard_in1, in1_addr) = next_addr(); + let (_guard_in2, in2_addr) = next_addr(); + let (_guard_out, out_addr) = next_addr(); let mut config = Config::builder(); config.add_source("in1", SocketConfig::make_basic_tcp_config(in1_addr)); diff --git a/src/topology/test/end_to_end.rs b/src/topology/test/end_to_end.rs index 50a6f5bda1481..f504206056a00 100644 --- a/src/topology/test/end_to_end.rs +++ b/src/topology/test/end_to_end.rs @@ -82,8 +82,8 @@ pub fn http_client( async fn http_to_http(status: StatusCode, response: StatusCode) { test_util::trace_init(); - let address1 = test_util::next_addr(); - let address2 = test_util::next_addr(); + let (_guard_1, address1) = test_util::addr::next_addr(); + let (_guard_2, address2) = test_util::addr::next_addr(); let config = config::load_from_str( &format!( r#" diff --git a/src/topology/test/reload.rs b/src/topology/test/reload.rs index 9cb10cc87370c..907be19f6ec0f 100644 --- a/src/topology/test/reload.rs +++ b/src/topology/test/reload.rs @@ -20,7 +20,7 @@ use crate::{ internal_metrics::InternalMetricsConfig, prometheus::PrometheusRemoteWriteConfig, splunk_hec::SplunkConfig, }, - test_util::{self, mock::basic_sink, next_addr, start_topology, temp_dir, wait_for_tcp}, + test_util::{self, addr::next_addr, mock::basic_sink, start_topology, temp_dir, wait_for_tcp}, topology::ReloadError::*, }; @@ -57,7 +57,7 @@ fn splunk_source_config(addr: SocketAddr) -> SplunkConfig { async fn topology_reuse_old_port() { test_util::trace_init(); - let address = next_addr(); + let (_guard, address) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in1", prom_remote_write_source(address)); @@ -78,8 +78,8 @@ async fn topology_reuse_old_port() { async fn topology_rebuild_old() { test_util::trace_init(); - let address_0 = next_addr(); - let address_1 = next_addr(); + let (_guard_0, address_0) = next_addr(); + let (_guard_1, address_1) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in", splunk_source_config(address_0)); @@ -105,7 +105,7 @@ async fn topology_rebuild_old() { async fn topology_old() { test_util::trace_init(); - let address = next_addr(); + let (_guard, address) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in", prom_remote_write_source(address)); @@ -125,7 +125,7 @@ async fn topology_reuse_old_port_sink() { // is enabled to use `internal_metrics`, otherwise it throws an error when trying to build the component. test_util::trace_init(); - let address = next_addr(); + let (_guard, address) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in", internal_metrics_source()); @@ -152,8 +152,8 @@ async fn topology_reuse_old_port_cross_dependency() { test_util::trace_init(); // Reload with source that uses address of changed sink. - let address_0 = next_addr(); - let address_1 = next_addr(); + let (_guard_0, address_0) = next_addr(); + let (_guard_1, address_1) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in", internal_metrics_source()); @@ -179,8 +179,8 @@ async fn topology_disk_buffer_conflict() { // is enabled to use `internal_metrics`, otherwise it throws an error when trying to build the component. test_util::trace_init(); - let address_0 = next_addr(); - let address_1 = next_addr(); + let (_guard_0, address_0) = next_addr(); + let (_guard_1, address_1) = next_addr(); let data_dir = temp_dir(); std::fs::create_dir(&data_dir).unwrap(); @@ -221,8 +221,8 @@ async fn topology_reload_with_new_components() { // This specifically exercises that we can add new components -- no changed or removed // components -- via the reload mechanism and without any issues. - let address_0 = next_addr(); - let address_1 = next_addr(); + let (_guard_0, address_0) = next_addr(); + let (_guard_1, address_1) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in1", internal_metrics_source()); @@ -250,7 +250,7 @@ async fn topology_readd_input() { // is enabled to use `internal_metrics`, otherwise it throws an error when trying to build the component. test_util::trace_init(); - let address_0 = next_addr(); + let (_guard, address_0) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in1", internal_metrics_source()); @@ -294,7 +294,7 @@ async fn topology_readd_input() { async fn topology_reload_component() { test_util::trace_init(); - let address_0 = next_addr(); + let (_guard, address_0) = next_addr(); let mut old_config = Config::builder(); old_config.add_source("in1", internal_metrics_source()); diff --git a/src/transforms/aws_ec2_metadata.rs b/src/transforms/aws_ec2_metadata.rs index eaec415db4bf0..5890625332f92 100644 --- a/src/transforms/aws_ec2_metadata.rs +++ b/src/transforms/aws_ec2_metadata.rs @@ -762,7 +762,7 @@ mod integration_tests { use super::*; use crate::{ event::{LogEvent, Metric, metric}, - test_util::{components::assert_transform_compliance, next_addr}, + test_util::{addr::next_addr, components::assert_transform_compliance}, transforms::test::create_topology, }; @@ -902,7 +902,7 @@ mod integration_tests { #[tokio::test(flavor = "multi_thread")] async fn timeout() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); async fn sleepy() -> Result { tokio::time::sleep(Duration::from_secs(3)).await; @@ -933,7 +933,7 @@ mod integration_tests { // validates the configuration setting 'required'=false allows vector to run #[tokio::test(flavor = "multi_thread")] async fn not_required() { - let addr = next_addr(); + let (_guard, addr) = next_addr(); async fn sleepy() -> Result { tokio::time::sleep(Duration::from_secs(3)).await; diff --git a/tests/integration/shutdown.rs b/tests/integration/shutdown.rs index ddc930fc9b8f5..fcd456c4dc9c1 100644 --- a/tests/integration/shutdown.rs +++ b/tests/integration/shutdown.rs @@ -15,7 +15,7 @@ use nix::{ }; use serde_json::{Value, json}; use similar_asserts::assert_eq; -use vector::test_util::{next_addr, temp_file}; +use vector::test_util::{addr::next_addr, temp_file}; use crate::{create_directory, create_file, overwrite_file}; @@ -76,7 +76,7 @@ fn source_vector(source: &str) -> Command { } fn vector(config: &str) -> Command { - vector_with(create_file(config), next_addr(), false) + vector_with(create_file(config), next_addr().1, false) } fn vector_with(config_path: PathBuf, address: SocketAddr, quiet: bool) -> Command { @@ -289,7 +289,8 @@ fn configuration_path_recomputed() { ); // Vector command - let mut cmd = vector_with(dir.join("*"), next_addr(), true); + let (_guard, address) = next_addr(); + let mut cmd = vector_with(dir.join("*"), address, true); // Run vector let mut vector = cmd @@ -427,7 +428,7 @@ fn timely_shutdown_journald() { #[test] fn timely_shutdown_prometheus() { - let address = next_addr(); + let (_guard, address) = next_addr(); test_timely_shutdown_with_sub( vector_with(create_file(PROMETHEUS_SINK_CONFIG), address, false), |_| { @@ -612,7 +613,8 @@ fn timely_reload_shutdown() { .as_str(), ); - let mut cmd = vector_with(path.clone(), next_addr(), false); + let (_guard, address) = next_addr(); + let mut cmd = vector_with(path.clone(), address, false); cmd.arg("-w"); test_timely_shutdown_with_sub(cmd, |vector| { From 41e384944f0bce1335b1aadb91f5f091c48d9f9b Mon Sep 17 00:00:00 2001 From: Benjamin Dornel Date: Fri, 14 Nov 2025 15:27:53 +0800 Subject: [PATCH 089/227] feat(codecs): add arrow IPC stream batch encoder (#24124) * enhancement(clickhouse sink): add support for ArrowStream format * refactor: do not allow dynamic database/table for arrow * chore: add type assertions * refactor: use clearer function names in request builder * chore: add snafu for request builder * chore: move imports to top package * chore: handle templates for table and database * build(deps): move arrow under external libs * build(deps): bump arrow/arrow-schema to latest * chore: add support for all timeunits e.g. nanoseconds * chore: add handling for LowCardinality * chore: add support for Clickhouse decimal * chore: add capacity and string zero copy optimization * chore: use logging macros from extern crate * chore: add debug logs for schema * chore: use simpler message for NoSchemaProvided snafu * chore: DRY target scale within decimal builders * docs: add note about upcasting i128 -> i256 * chore: move i256 import statement to top * chore: remove redundant schema log * chore: only log schema a single time at startup * chore: use arrow payload size instead of redundant json size * chore: add support for uint * chore: add support for rfc3339 timestamp * refactor: move arrow to clickhouse sink subfolder * refactor: move Arrow encoder to shared util * chore: remove redundant feature gates * refactor: rename schema.rs -> arrow_schema.rs * chore: add changelog fragment * chore: add helper function to handle clickhouse type parsing * refactor: remove default coercions in arrow_schema * refactor: add constants for decimal precision values * refactor: extract type modifier unwrapping into helper * refactor: remove unnecessary vector allocation * refactor: organize type mapping with comments * refactor: use constants and functional style in decimal parsing * chore: update LICENSE-3rdparty * chore: remove duplicated tests * chore: update generated docs * chore: remove redundant docstring * refactor: create generic arrow codec * chore: remove duplicated changelog * chore: cargo fmt * chore: fix lock file * chore: clippy * chore: add missing int/float types * make fmt * update licenses * chore: remove schema provider logic * chore: add with_capacity for helper functions * chore: add support for null values * chore: clean up encode_events_to_arrow_ipc_stream * chore: use macros for primitives and null constraint checks * add links to changelog * chore: clippy * chore: fix duplicate events * chore: remove duplicated import * chore: clippy --------- Co-authored-by: Pavlos Rontidis --- Cargo.lock | 304 +++- Cargo.toml | 6 +- LICENSE-3rdparty.csv | 24 + .../24074_arrow_batch_codec.feature.md | 6 + lib/codecs/Cargo.toml | 5 +- lib/codecs/src/encoding/format/arrow.rs | 1445 +++++++++++++++++ lib/codecs/src/encoding/format/mod.rs | 4 + lib/codecs/src/encoding/mod.rs | 11 +- lib/codecs/src/encoding/serializer.rs | 49 + lib/vector-lib/Cargo.toml | 1 + src/codecs/encoding/config.rs | 9 +- src/codecs/encoding/encoder.rs | 70 + src/codecs/encoding/mod.rs | 2 +- src/codecs/mod.rs | 3 +- src/internal_events/codecs.rs | 40 +- src/sinks/util/encoding.rs | 61 + 16 files changed, 2029 insertions(+), 11 deletions(-) create mode 100644 changelog.d/24074_arrow_batch_codec.feature.md create mode 100644 lib/codecs/src/encoding/format/arrow.rs diff --git a/Cargo.lock b/Cargo.lock index 413f09e0fda9b..e8085bfd45a0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -94,6 +94,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "const-random", "getrandom 0.2.15", "once_cell", "serde", @@ -362,6 +363,175 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "arrow" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e833808ff2d94ed40d9379848a950d995043c7fb3e81a30b383f4c6033821cc" +dependencies = [ + "arrow-arith", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-ipc", + "arrow-ord", + "arrow-row", + "arrow-schema", + "arrow-select", + "arrow-string", +] + +[[package]] +name = "arrow-arith" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad08897b81588f60ba983e3ca39bda2b179bdd84dced378e7df81a5313802ef8" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "num", +] + +[[package]] +name = "arrow-array" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8548ca7c070d8db9ce7aa43f37393e4bfcf3f2d3681df278490772fd1673d08d" +dependencies = [ + "ahash 0.8.11", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "hashbrown 0.16.0", + "num", +] + +[[package]] +name = "arrow-buffer" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e003216336f70446457e280807a73899dd822feaf02087d31febca1363e2fccc" +dependencies = [ + "bytes 1.10.1", + "half", + "num", +] + +[[package]] +name = "arrow-cast" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "919418a0681298d3a77d1a315f625916cb5678ad0d74b9c60108eb15fd083023" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "atoi", + "base64 0.22.1", + "chrono", + "half", + "lexical-core", + "num", + "ryu", +] + +[[package]] +name = "arrow-data" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5c64fff1d142f833d78897a772f2e5b55b36cb3e6320376f0961ab0db7bd6d0" +dependencies = [ + "arrow-buffer", + "arrow-schema", + "half", + "num", +] + +[[package]] +name = "arrow-ipc" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d3594dcddccc7f20fd069bc8e9828ce37220372680ff638c5e00dea427d88f5" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "flatbuffers", +] + +[[package]] +name = "arrow-ord" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c8f82583eb4f8d84d4ee55fd1cb306720cddead7596edce95b50ee418edf66f" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", +] + +[[package]] +name = "arrow-row" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d07ba24522229d9085031df6b94605e0f4b26e099fb7cdeec37abd941a73753" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "half", +] + +[[package]] +name = "arrow-schema" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3aa9e59c611ebc291c28582077ef25c97f1975383f1479b12f3b9ffee2ffabe" + +[[package]] +name = "arrow-select" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c41dbbd1e97bfcaee4fcb30e29105fb2c75e4d82ae4de70b792a5d3f66b2e7a" +dependencies = [ + "ahash 0.8.11", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "num", +] + +[[package]] +name = "arrow-string" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53f5183c150fbc619eede22b861ea7c0eebed8eaac0333eaa7f6da5205fd504d" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "memchr", + "num", + "regex", + "regex-syntax", +] + [[package]] name = "ascii" version = "0.9.3" @@ -1938,6 +2108,29 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3eeab4423108c5d7c744f4d234de88d18d636100093ae04caf4825134b9c3a32" +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate 3.2.0", + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "brotli" version = "8.0.0" @@ -2361,6 +2554,7 @@ name = "codecs" version = "0.1.0" dependencies = [ "apache-avro 0.20.0", + "arrow", "bytes 1.10.1", "chrono", "csv-core", @@ -2378,6 +2572,7 @@ dependencies = [ "rand 0.9.2", "regex", "rstest", + "rust_decimal", "serde", "serde_json", "serde_with 3.14.0", @@ -2578,6 +2773,26 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom 0.2.15", + "once_cell", + "tiny-keccak", +] + [[package]] name = "convert_case" version = "0.4.0" @@ -3928,6 +4143,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "flatbuffers" +version = "25.9.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b6620799e7340ebd9968d2e0708eb82cf1971e9a16821e2091b6d6e475eed5" +dependencies = [ + "bitflags 2.9.0", + "rustc_version 0.4.1", +] + [[package]] name = "flate2" version = "1.1.2" @@ -4504,6 +4729,7 @@ checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", + "num-traits", ] [[package]] @@ -5974,6 +6200,63 @@ dependencies = [ "spin 0.5.2", ] +[[package]] +name = "lexical-core" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d8d125a277f807e55a77304455eb7b1cb52f2b18c143b60e766c120bd64a594" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52a9f232fbd6f550bc0137dcb5f99ab674071ac2d690ac69704593cb4abbea56" +dependencies = [ + "lexical-parse-integer", + "lexical-util", +] + +[[package]] +name = "lexical-parse-integer" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a7a039f8fb9c19c996cd7b2fcce303c1b2874fe1aca544edc85c4a5f8489b34" +dependencies = [ + "lexical-util", +] + +[[package]] +name = "lexical-util" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2604dd126bb14f13fb5d1bd6a66155079cb9fa655b37f875b3a742c705dbed17" + +[[package]] +name = "lexical-write-float" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c438c87c013188d415fbabbb1dceb44249ab81664efbd31b14ae55dabb6361" +dependencies = [ + "lexical-util", + "lexical-write-integer", +] + +[[package]] +name = "lexical-write-integer" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "409851a618475d2d5796377cad353802345cba92c867d9fbcde9cf4eac4e14df" +dependencies = [ + "lexical-util", +] + [[package]] name = "libc" version = "0.2.175" @@ -9234,12 +9517,18 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.33.1" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4" +checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" dependencies = [ "arrayvec", + "borsh", + "bytes 1.10.1", "num-traits", + "rand 0.8.5", + "rkyv", + "serde", + "serde_json", ] [[package]] @@ -10960,6 +11249,15 @@ dependencies = [ "time-core", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinystr" version = "0.7.6" @@ -12166,6 +12464,7 @@ dependencies = [ "approx", "arc-swap", "arr_macro", + "arrow", "assert_cmd", "async-compression", "async-graphql", @@ -12306,6 +12605,7 @@ dependencies = [ "roaring", "rstest", "rumqttc", + "rust_decimal", "seahash", "semver 1.0.26", "serde", diff --git a/Cargo.toml b/Cargo.toml index 0706c76a7f306..7a781145b881e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -178,6 +178,7 @@ rand = { version = "0.9.2", default-features = false, features = ["small_rng", " rand_distr = { version = "0.5.1", default-features = false } regex = { version = "1.11.2", default-features = false, features = ["std", "perf"] } reqwest = { version = "0.11.26", features = ["json"] } +rust_decimal = { version = "1.33", default-features = false, features = ["std"] } semver = { version = "1.0.26", default-features = false, features = ["serde", "std"] } serde = { version = "1.0.219", default-features = false, features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.143", default-features = false, features = ["raw_value", "std"] } @@ -337,6 +338,7 @@ greptimedb-ingester = { git = "https://github.com/GreptimeTeam/greptimedb-ingest arc-swap = { version = "1.7", default-features = false, optional = true } async-compression = { version = "0.4.27", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } apache-avro = { version = "0.16.0", default-features = false, optional = true } +arrow = { version = "56.2.0", default-features = false, features = ["ipc"], optional = true } axum = { version = "0.6.20", default-features = false } base64 = { workspace = true, optional = true } bloomy = { version = "1.2.0", default-features = false, optional = true } @@ -402,6 +404,7 @@ redis = { version = "0.32.4", default-features = false, features = ["connection- regex.workspace = true roaring = { version = "0.11.2", default-features = false, features = ["std"], optional = true } rumqttc = { version = "0.24.0", default-features = false, features = ["use-rustls"], optional = true } +rust_decimal = { workspace = true, optional = true } seahash = { version = "4.1.0", default-features = false } smallvec = { version = "1", default-features = false, features = ["union", "serde"] } snap = { version = "1.1.1", default-features = false } @@ -578,8 +581,9 @@ enrichment-tables-mmdb = ["dep:maxminddb"] enrichment-tables-memory = ["dep:evmap", "dep:evmap-derive", "dep:thread_local"] # Codecs -codecs-syslog = ["vector-lib/syslog"] +codecs-arrow = ["vector-lib/arrow"] codecs-opentelemetry = ["vector-lib/opentelemetry"] +codecs-syslog = ["vector-lib/syslog"] # Secrets secrets = ["secrets-aws-secrets-manager"] diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index fbf3097a61c5d..d8d6edc54a79d 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -32,6 +32,18 @@ arc-swap,https://github.com/vorner/arc-swap,MIT OR Apache-2.0,Michal 'vorner' Va arr_macro,https://github.com/JoshMcguigan/arr_macro,MIT OR Apache-2.0,Josh Mcguigan arr_macro_impl,https://github.com/JoshMcguigan/arr_macro,MIT OR Apache-2.0,Josh Mcguigan arrayvec,https://github.com/bluss/arrayvec,MIT OR Apache-2.0,bluss +arrow,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-arith,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-array,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-buffer,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-cast,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-data,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-ipc,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-ord,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-row,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-schema,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-select,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow +arrow-string,https://github.com/apache/arrow-rs,Apache-2.0,Apache Arrow ascii,https://github.com/tomprogrammer/rust-ascii,Apache-2.0 OR MIT,"Thomas Bahn , Torbjørn Birch Moltu , Simon Sapin " async-broadcast,https://github.com/smol-rs/async-broadcast,MIT OR Apache-2.0,"Stjepan Glavina , Yoshua Wuyts , Zeeshan Ali Khan " async-channel,https://github.com/smol-rs/async-channel,Apache-2.0 OR MIT,Stjepan Glavina @@ -122,6 +134,8 @@ bollard-stubs,https://github.com/fussybeaver/bollard,Apache-2.0,Bollard contribu bon,https://github.com/elastio/bon,MIT OR Apache-2.0,The bon Authors bon-macros,https://github.com/elastio/bon,MIT OR Apache-2.0,The bon-macros Authors borrow-or-share,https://github.com/yescallop/borrow-or-share,MIT-0,Scallop Ye +borsh,https://github.com/near/borsh-rs,MIT OR Apache-2.0,Near Inc +borsh-derive,https://github.com/near/borsh-rs,Apache-2.0,Near Inc brotli,https://github.com/dropbox/rust-brotli,BSD-3-Clause AND MIT,"Daniel Reiter Horn , The Brotli Authors" brotli-decompressor,https://github.com/dropbox/rust-brotli-decompressor,BSD-3-Clause OR MIT,"Daniel Reiter Horn , The Brotli Authors" bson,https://github.com/mongodb/bson-rust,MIT,"Y. T. Chung , Kevin Yeh , Saghm Rossi , Patrick Freed , Isabel Atkinson , Abraham Egnor " @@ -168,6 +182,8 @@ compression-codecs,https://github.com/Nullus157/async-compression,MIT OR Apache- compression-core,https://github.com/Nullus157/async-compression,MIT OR Apache-2.0,"Wim Looman , Allen Bui " concurrent-queue,https://github.com/smol-rs/concurrent-queue,Apache-2.0 OR MIT,"Stjepan Glavina , Taiki Endo , John Nunley " const-oid,https://github.com/RustCrypto/formats/tree/master/const-oid,Apache-2.0 OR MIT,RustCrypto Developers +const-random,https://github.com/tkaitchuck/constrandom,MIT OR Apache-2.0,Tom Kaitchuck +const-random-macro,https://github.com/tkaitchuck/constrandom,MIT OR Apache-2.0,Tom Kaitchuck convert_case,https://github.com/rutrum/convert-case,MIT,David Purdum convert_case,https://github.com/rutrum/convert-case,MIT,rutrum cookie,https://github.com/SergioBenitez/cookie-rs,MIT OR Apache-2.0,"Sergio Benitez , Alex Crichton " @@ -265,6 +281,7 @@ fastrand,https://github.com/smol-rs/fastrand,Apache-2.0 OR MIT,Stjepan Glavina < ff,https://github.com/zkcrypto/ff,MIT OR Apache-2.0,"Sean Bowe , Jack Grigg " fiat-crypto,https://github.com/mit-plv/fiat-crypto,MIT OR Apache-2.0 OR BSD-1-Clause,Fiat Crypto library authors finl_unicode,https://github.com/dahosek/finl_unicode,MIT OR Apache-2.0,The finl_unicode Authors +flatbuffers,https://github.com/google/flatbuffers,Apache-2.0,"Robert Winslow , FlatBuffers Maintainers" flate2,https://github.com/rust-lang/flate2-rs,MIT OR Apache-2.0,"Alex Crichton , Josh Triplett " float_eq,https://github.com/jtempest/float_eq-rs,MIT OR Apache-2.0,jtempest fluent-uri,https://github.com/yescallop/fluent-uri-rs,MIT,Scallop Ye @@ -406,6 +423,12 @@ kube-runtime,https://github.com/kube-rs/kube,Apache-2.0,"clux lapin,https://github.com/amqp-rs/lapin,MIT,"Geoffroy Couprie , Marc-Antoine Perennou " lazy_static,https://github.com/rust-lang-nursery/lazy-static.rs,MIT OR Apache-2.0,Marvin Löbel +lexical-core,https://github.com/Alexhuszagh/rust-lexical,MIT OR Apache-2.0,Alex Huszagh +lexical-parse-float,https://github.com/Alexhuszagh/rust-lexical,MIT OR Apache-2.0,Alex Huszagh +lexical-parse-integer,https://github.com/Alexhuszagh/rust-lexical,MIT OR Apache-2.0,Alex Huszagh +lexical-util,https://github.com/Alexhuszagh/rust-lexical,MIT OR Apache-2.0,Alex Huszagh +lexical-write-float,https://github.com/Alexhuszagh/rust-lexical,MIT OR Apache-2.0,Alex Huszagh +lexical-write-integer,https://github.com/Alexhuszagh/rust-lexical,MIT OR Apache-2.0,Alex Huszagh libc,https://github.com/rust-lang/libc,MIT OR Apache-2.0,The Rust Project Developers libflate,https://github.com/sile/libflate,MIT,Takeru Ohta libflate_lz77,https://github.com/sile/libflate,MIT,Takeru Ohta @@ -745,6 +768,7 @@ tikv-jemallocator,https://github.com/tikv/jemallocator,MIT OR Apache-2.0,"Alex C time,https://github.com/time-rs/time,MIT OR Apache-2.0,"Jacob Pratt , Time contributors" time-core,https://github.com/time-rs/time,MIT OR Apache-2.0,"Jacob Pratt , Time contributors" time-macros,https://github.com/time-rs/time,MIT OR Apache-2.0,"Jacob Pratt , Time contributors" +tiny-keccak,https://github.com/debris/tiny-keccak,CC0-1.0,debris tinystr,https://github.com/unicode-org/icu4x,Unicode-3.0,The ICU4X Project Developers tinyvec,https://github.com/Lokathor/tinyvec,Zlib OR Apache-2.0 OR MIT,Lokathor tinyvec_macros,https://github.com/Soveu/tinyvec_macros,MIT OR Apache-2.0 OR Zlib,Soveu diff --git a/changelog.d/24074_arrow_batch_codec.feature.md b/changelog.d/24074_arrow_batch_codec.feature.md new file mode 100644 index 0000000000000..f9624481b2947 --- /dev/null +++ b/changelog.d/24074_arrow_batch_codec.feature.md @@ -0,0 +1,6 @@ +A generic [Apache Arrow](https://arrow.apache.org/) codec has been added to +support [Arrow IPC](https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format) serialization across Vector. This enables sinks +like the `clickhouse` sink to use the ArrowStream format endpoint with significantly better performance and smaller payload sizes compared +to JSON-based formats. + +authors: benjamin-awd diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 741c95b84a895..57ce81892c774 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -11,8 +11,10 @@ path = "tests/bin/generate-avro-fixtures.rs" [dependencies] apache-avro = { version = "0.20.0", default-features = false } +arrow = { version = "56.2.0", default-features = false, features = ["ipc"] } bytes.workspace = true chrono.workspace = true +rust_decimal = { version = "1.37", default-features = false, features = ["std"] } csv-core = { version = "0.1.12", default-features = false } derivative.workspace = true dyn-clone = { version = "1", default-features = false } @@ -53,5 +55,6 @@ uuid.workspace = true vrl.workspace = true [features] -syslog = ["dep:syslog_loose"] +arrow = [] opentelemetry = ["dep:opentelemetry-proto"] +syslog = ["dep:syslog_loose"] diff --git a/lib/codecs/src/encoding/format/arrow.rs b/lib/codecs/src/encoding/format/arrow.rs new file mode 100644 index 0000000000000..7588b32b94452 --- /dev/null +++ b/lib/codecs/src/encoding/format/arrow.rs @@ -0,0 +1,1445 @@ +//! Arrow IPC streaming format codec for batched event encoding +//! +//! Provides Apache Arrow IPC stream format encoding with static schema support. +//! This implements the streaming variant of the Arrow IPC protocol, which writes +//! a continuous stream of record batches without a file footer. + +use arrow::{ + array::{ + ArrayRef, BinaryBuilder, BooleanBuilder, Decimal128Builder, Decimal256Builder, + Float32Builder, Float64Builder, Int8Builder, Int16Builder, Int32Builder, Int64Builder, + StringBuilder, TimestampMicrosecondBuilder, TimestampMillisecondBuilder, + TimestampNanosecondBuilder, TimestampSecondBuilder, UInt8Builder, UInt16Builder, + UInt32Builder, UInt64Builder, + }, + datatypes::{DataType, Schema, TimeUnit, i256}, + ipc::writer::StreamWriter, + record_batch::RecordBatch, +}; +use bytes::{BufMut, Bytes, BytesMut}; +use chrono::{DateTime, Utc}; +use rust_decimal::Decimal; +use snafu::Snafu; +use std::sync::Arc; +use vector_config::configurable_component; + +use vector_core::event::{Event, Value}; + +/// Configuration for Arrow IPC stream serialization +#[configurable_component] +#[derive(Clone, Default)] +pub struct ArrowStreamSerializerConfig { + /// The Arrow schema to use for encoding + #[serde(skip)] + #[configurable(derived)] + pub schema: Option>, +} + +impl std::fmt::Debug for ArrowStreamSerializerConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ArrowStreamSerializerConfig") + .field( + "schema", + &self + .schema + .as_ref() + .map(|s| format!("{} fields", s.fields().len())), + ) + .finish() + } +} + +impl ArrowStreamSerializerConfig { + /// Create a new ArrowStreamSerializerConfig with a schema + pub fn new(schema: Arc) -> Self { + Self { + schema: Some(schema), + } + } + + /// The data type of events that are accepted by `ArrowStreamEncoder`. + pub fn input_type(&self) -> vector_core::config::DataType { + vector_core::config::DataType::Log + } + + /// The schema required by the serializer. + pub fn schema_requirement(&self) -> vector_core::schema::Requirement { + vector_core::schema::Requirement::empty() + } +} + +/// Arrow IPC stream batch serializer that holds the schema +#[derive(Clone, Debug)] +pub struct ArrowStreamSerializer { + schema: Arc, +} + +impl ArrowStreamSerializer { + /// Create a new ArrowStreamSerializer with the given configuration + pub fn new(config: ArrowStreamSerializerConfig) -> Result { + let schema = config.schema.ok_or_else(|| { + vector_common::Error::from( + "Arrow serializer requires a schema. Pass a schema or fetch from provider before creating serializer." + ) + })?; + + Ok(Self { schema }) + } +} + +impl tokio_util::codec::Encoder> for ArrowStreamSerializer { + type Error = ArrowEncodingError; + + fn encode(&mut self, events: Vec, buffer: &mut BytesMut) -> Result<(), Self::Error> { + if events.is_empty() { + return Err(ArrowEncodingError::NoEvents); + } + + let bytes = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&self.schema)))?; + + buffer.extend_from_slice(&bytes); + Ok(()) + } +} + +/// Errors that can occur during Arrow encoding +#[derive(Debug, Snafu)] +pub enum ArrowEncodingError { + /// Failed to create Arrow record batch + #[snafu(display("Failed to create Arrow record batch: {}", source))] + RecordBatchCreation { + /// The underlying Arrow error + source: arrow::error::ArrowError, + }, + + /// Failed to write Arrow IPC data + #[snafu(display("Failed to write Arrow IPC data: {}", source))] + IpcWrite { + /// The underlying Arrow error + source: arrow::error::ArrowError, + }, + + /// No events provided for encoding + #[snafu(display("No events provided for encoding"))] + NoEvents, + + /// Schema must be provided before encoding + #[snafu(display("Schema must be provided before encoding"))] + NoSchemaProvided, + + /// Unsupported Arrow data type for field + #[snafu(display( + "Unsupported Arrow data type for field '{}': {:?}", + field_name, + data_type + ))] + UnsupportedType { + /// The field name + field_name: String, + /// The unsupported data type + data_type: DataType, + }, + + /// Null value encountered for non-nullable field + #[snafu(display("Null value for non-nullable field '{}'", field_name))] + NullConstraint { + /// The field name + field_name: String, + }, + + /// IO error during encoding + #[snafu(display("IO error: {}", source))] + Io { + /// The underlying IO error + source: std::io::Error, + }, +} + +impl From for ArrowEncodingError { + fn from(error: std::io::Error) -> Self { + Self::Io { source: error } + } +} + +/// Encodes a batch of events into Arrow IPC streaming format +pub fn encode_events_to_arrow_ipc_stream( + events: &[Event], + schema: Option>, +) -> Result { + if events.is_empty() { + return Err(ArrowEncodingError::NoEvents); + } + + let schema_ref = schema.ok_or(ArrowEncodingError::NoSchemaProvided)?; + + let record_batch = build_record_batch(schema_ref.clone(), events)?; + + let ipc_err = |source| ArrowEncodingError::IpcWrite { source }; + + let mut buffer = BytesMut::new().writer(); + let mut writer = StreamWriter::try_new(&mut buffer, &schema_ref).map_err(ipc_err)?; + writer.write(&record_batch).map_err(ipc_err)?; + writer.finish().map_err(ipc_err)?; + + Ok(buffer.into_inner().freeze()) +} + +/// Builds an Arrow RecordBatch from events +fn build_record_batch( + schema: Arc, + events: &[Event], +) -> Result { + let num_fields = schema.fields().len(); + let mut columns: Vec = Vec::with_capacity(num_fields); + + for field in schema.fields() { + let field_name = field.name(); + let nullable = field.is_nullable(); + let array: ArrayRef = match field.data_type() { + DataType::Timestamp(time_unit, _) => { + build_timestamp_array(events, field_name, *time_unit, nullable)? + } + DataType::Utf8 => build_string_array(events, field_name, nullable)?, + DataType::Int8 => build_int8_array(events, field_name, nullable)?, + DataType::Int16 => build_int16_array(events, field_name, nullable)?, + DataType::Int32 => build_int32_array(events, field_name, nullable)?, + DataType::Int64 => build_int64_array(events, field_name, nullable)?, + DataType::UInt8 => build_uint8_array(events, field_name, nullable)?, + DataType::UInt16 => build_uint16_array(events, field_name, nullable)?, + DataType::UInt32 => build_uint32_array(events, field_name, nullable)?, + DataType::UInt64 => build_uint64_array(events, field_name, nullable)?, + DataType::Float32 => build_float32_array(events, field_name, nullable)?, + DataType::Float64 => build_float64_array(events, field_name, nullable)?, + DataType::Boolean => build_boolean_array(events, field_name, nullable)?, + DataType::Binary => build_binary_array(events, field_name, nullable)?, + DataType::Decimal128(precision, scale) => { + build_decimal128_array(events, field_name, *precision, *scale, nullable)? + } + DataType::Decimal256(precision, scale) => { + build_decimal256_array(events, field_name, *precision, *scale, nullable)? + } + other_type => { + return Err(ArrowEncodingError::UnsupportedType { + field_name: field_name.into(), + data_type: other_type.clone(), + }); + } + }; + + columns.push(array); + } + + RecordBatch::try_new(schema, columns) + .map_err(|source| ArrowEncodingError::RecordBatchCreation { source }) +} + +/// Macro to handle appending null or returning an error for non-nullable fields. +macro_rules! handle_null_constraints { + ($builder:expr, $nullable:expr, $field_name:expr) => {{ + if !$nullable { + return Err(ArrowEncodingError::NullConstraint { + field_name: $field_name.into(), + }); + } + $builder.append_null(); + }}; +} + +/// Macro to generate a `build_*_array` function for primitive types. +macro_rules! define_build_primitive_array_fn { + ( + $fn_name:ident, // The function name (e.g., build_int8_array) + $builder_ty:ty, // The builder type (e.g., Int8Builder) + // One or more match arms for valid Value types + $( $value_pat:pat $(if $guard:expr)? => $append_expr:expr ),+ + ) => { + fn $fn_name( + events: &[Event], + field_name: &str, + nullable: bool, + ) -> Result { + let mut builder = <$builder_ty>::with_capacity(events.len()); + + for event in events { + if let Event::Log(log) = event { + match log.get(field_name) { + $( + $value_pat $(if $guard)? => builder.append_value($append_expr), + )+ + // All other patterns are treated as null/invalid + _ => handle_null_constraints!(builder, nullable, field_name), + } + } + } + Ok(Arc::new(builder.finish())) + } + }; +} + +fn extract_timestamp(value: &Value) -> Option> { + match value { + Value::Timestamp(ts) => Some(*ts), + Value::Bytes(bytes) => std::str::from_utf8(bytes) + .ok() + .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) + .map(|dt| dt.with_timezone(&Utc)), + _ => None, + } +} + +fn build_timestamp_array( + events: &[Event], + field_name: &str, + time_unit: TimeUnit, + nullable: bool, +) -> Result { + macro_rules! build_array { + ($builder:ty, $converter:expr) => {{ + let mut builder = <$builder>::with_capacity(events.len()); + for event in events { + if let Event::Log(log) = event { + let value_to_append = log.get(field_name).and_then(|value| { + // First, try to extract it as a native or string timestamp + if let Some(ts) = extract_timestamp(value) { + $converter(&ts) + } + // Else, fall back to a raw integer + else if let Value::Integer(i) = value { + Some(*i) + } + // Else, it's an unsupported type (e.g., Bool, Float) + else { + None + } + }); + + if value_to_append.is_none() && !nullable { + return Err(ArrowEncodingError::NullConstraint { + field_name: field_name.into(), + }); + } + + builder.append_option(value_to_append); + } + } + Ok(Arc::new(builder.finish())) + }}; + } + + match time_unit { + TimeUnit::Second => { + build_array!(TimestampSecondBuilder, |ts: &DateTime| Some( + ts.timestamp() + )) + } + TimeUnit::Millisecond => { + build_array!(TimestampMillisecondBuilder, |ts: &DateTime| Some( + ts.timestamp_millis() + )) + } + TimeUnit::Microsecond => { + build_array!(TimestampMicrosecondBuilder, |ts: &DateTime| Some( + ts.timestamp_micros() + )) + } + TimeUnit::Nanosecond => { + build_array!(TimestampNanosecondBuilder, |ts: &DateTime| ts + .timestamp_nanos_opt()) + } + } +} + +fn build_string_array( + events: &[Event], + field_name: &str, + nullable: bool, +) -> Result { + let mut builder = StringBuilder::with_capacity(events.len(), 0); + + for event in events { + if let Event::Log(log) = event { + let mut appended = false; + if let Some(value) = log.get(field_name) { + match value { + Value::Bytes(bytes) => { + // Attempt direct UTF-8 conversion first, fallback to lossy + match std::str::from_utf8(bytes) { + Ok(s) => builder.append_value(s), + Err(_) => builder.append_value(&String::from_utf8_lossy(bytes)), + } + appended = true; + } + Value::Object(obj) => { + if let Ok(s) = serde_json::to_string(&obj) { + builder.append_value(s); + appended = true; + } + } + Value::Array(arr) => { + if let Ok(s) = serde_json::to_string(&arr) { + builder.append_value(s); + appended = true; + } + } + _ => { + builder.append_value(&value.to_string_lossy()); + appended = true; + } + } + } + + if !appended { + handle_null_constraints!(builder, nullable, field_name); + } + } + } + + Ok(Arc::new(builder.finish())) +} + +define_build_primitive_array_fn!( + build_int8_array, + Int8Builder, + Some(Value::Integer(i)) if *i >= i8::MIN as i64 && *i <= i8::MAX as i64 => *i as i8 +); + +define_build_primitive_array_fn!( + build_int16_array, + Int16Builder, + Some(Value::Integer(i)) if *i >= i16::MIN as i64 && *i <= i16::MAX as i64 => *i as i16 +); + +define_build_primitive_array_fn!( + build_int32_array, + Int32Builder, + Some(Value::Integer(i)) if *i >= i32::MIN as i64 && *i <= i32::MAX as i64 => *i as i32 +); + +define_build_primitive_array_fn!( + build_int64_array, + Int64Builder, + Some(Value::Integer(i)) => *i +); + +define_build_primitive_array_fn!( + build_uint8_array, + UInt8Builder, + Some(Value::Integer(i)) if *i >= 0 && *i <= u8::MAX as i64 => *i as u8 +); + +define_build_primitive_array_fn!( + build_uint16_array, + UInt16Builder, + Some(Value::Integer(i)) if *i >= 0 && *i <= u16::MAX as i64 => *i as u16 +); + +define_build_primitive_array_fn!( + build_uint32_array, + UInt32Builder, + Some(Value::Integer(i)) if *i >= 0 && *i <= u32::MAX as i64 => *i as u32 +); + +define_build_primitive_array_fn!( + build_uint64_array, + UInt64Builder, + Some(Value::Integer(i)) if *i >= 0 => *i as u64 +); + +define_build_primitive_array_fn!( + build_float32_array, + Float32Builder, + Some(Value::Float(f)) => f.into_inner() as f32, + Some(Value::Integer(i)) => *i as f32 +); + +define_build_primitive_array_fn!( + build_float64_array, + Float64Builder, + Some(Value::Float(f)) => f.into_inner(), + Some(Value::Integer(i)) => *i as f64 +); + +define_build_primitive_array_fn!( + build_boolean_array, + BooleanBuilder, + Some(Value::Boolean(b)) => *b +); + +fn build_binary_array( + events: &[Event], + field_name: &str, + nullable: bool, +) -> Result { + let mut builder = BinaryBuilder::with_capacity(events.len(), 0); + + for event in events { + if let Event::Log(log) = event { + match log.get(field_name) { + Some(Value::Bytes(bytes)) => builder.append_value(bytes), + _ => handle_null_constraints!(builder, nullable, field_name), + } + } + } + + Ok(Arc::new(builder.finish())) +} + +fn build_decimal128_array( + events: &[Event], + field_name: &str, + precision: u8, + scale: i8, + nullable: bool, +) -> Result { + let mut builder = Decimal128Builder::with_capacity(events.len()) + .with_precision_and_scale(precision, scale) + .map_err(|_| ArrowEncodingError::UnsupportedType { + field_name: field_name.into(), + data_type: DataType::Decimal128(precision, scale), + })?; + + let target_scale = scale.unsigned_abs() as u32; + + for event in events { + if let Event::Log(log) = event { + let mut appended = false; + match log.get(field_name) { + Some(Value::Float(f)) => { + if let Ok(mut decimal) = Decimal::try_from(f.into_inner()) { + decimal.rescale(target_scale); + let mantissa = decimal.mantissa(); + builder.append_value(mantissa); + appended = true; + } + } + Some(Value::Integer(i)) => { + let mut decimal = Decimal::from(*i); + decimal.rescale(target_scale); + let mantissa = decimal.mantissa(); + builder.append_value(mantissa); + appended = true; + } + _ => {} + } + + if !appended { + handle_null_constraints!(builder, nullable, field_name); + } + } + } + + Ok(Arc::new(builder.finish())) +} + +fn build_decimal256_array( + events: &[Event], + field_name: &str, + precision: u8, + scale: i8, + nullable: bool, +) -> Result { + let mut builder = Decimal256Builder::with_capacity(events.len()) + .with_precision_and_scale(precision, scale) + .map_err(|_| ArrowEncodingError::UnsupportedType { + field_name: field_name.into(), + data_type: DataType::Decimal256(precision, scale), + })?; + + let target_scale = scale.unsigned_abs() as u32; + + for event in events { + if let Event::Log(log) = event { + let mut appended = false; + match log.get(field_name) { + Some(Value::Float(f)) => { + if let Ok(mut decimal) = Decimal::try_from(f.into_inner()) { + decimal.rescale(target_scale); + let mantissa = decimal.mantissa(); + // rust_decimal does not support i256 natively so we upcast here + builder.append_value(i256::from_i128(mantissa)); + appended = true; + } + } + Some(Value::Integer(i)) => { + let mut decimal = Decimal::from(*i); + decimal.rescale(target_scale); + let mantissa = decimal.mantissa(); + builder.append_value(i256::from_i128(mantissa)); + appended = true; + } + _ => {} + } + + if !appended { + handle_null_constraints!(builder, nullable, field_name); + } + } + } + + Ok(Arc::new(builder.finish())) +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow::{ + array::{ + Array, BinaryArray, BooleanArray, Float64Array, Int64Array, StringArray, + TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray, + TimestampSecondArray, + }, + datatypes::Field, + ipc::reader::StreamReader, + }; + use chrono::Utc; + use std::io::Cursor; + use vector_core::event::LogEvent; + + #[test] + fn test_encode_all_types() { + let mut log = LogEvent::default(); + log.insert("string_field", "test"); + log.insert("int8_field", 127); + log.insert("int16_field", 32000); + log.insert("int32_field", 1000000); + log.insert("int64_field", 42); + log.insert("float32_field", 3.15); + log.insert("float64_field", 3.15); + log.insert("bool_field", true); + log.insert("bytes_field", bytes::Bytes::from("binary")); + log.insert("timestamp_field", Utc::now()); + + let events = vec![Event::Log(log)]; + + let schema = Arc::new(Schema::new(vec![ + Field::new("string_field", DataType::Utf8, true), + Field::new("int8_field", DataType::Int8, true), + Field::new("int16_field", DataType::Int16, true), + Field::new("int32_field", DataType::Int32, true), + Field::new("int64_field", DataType::Int64, true), + Field::new("float32_field", DataType::Float32, true), + Field::new("float64_field", DataType::Float64, true), + Field::new("bool_field", DataType::Boolean, true), + Field::new("bytes_field", DataType::Binary, true), + Field::new( + "timestamp_field", + DataType::Timestamp(TimeUnit::Millisecond, None), + true, + ), + ])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 1); + assert_eq!(batch.num_columns(), 10); + + // Verify string field + assert_eq!( + batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap() + .value(0), + "test" + ); + + // Verify int8 field + assert_eq!( + batch + .column(1) + .as_any() + .downcast_ref::() + .unwrap() + .value(0), + 127 + ); + + // Verify int16 field + assert_eq!( + batch + .column(2) + .as_any() + .downcast_ref::() + .unwrap() + .value(0), + 32000 + ); + + // Verify int32 field + assert_eq!( + batch + .column(3) + .as_any() + .downcast_ref::() + .unwrap() + .value(0), + 1000000 + ); + + // Verify int64 field + assert_eq!( + batch + .column(4) + .as_any() + .downcast_ref::() + .unwrap() + .value(0), + 42 + ); + + // Verify float32 field + assert!( + (batch + .column(5) + .as_any() + .downcast_ref::() + .unwrap() + .value(0) + - 3.15) + .abs() + < 0.001 + ); + + // Verify float64 field + assert!( + (batch + .column(6) + .as_any() + .downcast_ref::() + .unwrap() + .value(0) + - 3.15) + .abs() + < 0.001 + ); + + // Verify boolean field + assert!( + batch + .column(7) + .as_any() + .downcast_ref::() + .unwrap() + .value(0), + "{}", + true + ); + + // Verify binary field + assert_eq!( + batch + .column(8) + .as_any() + .downcast_ref::() + .unwrap() + .value(0), + b"binary" + ); + + // Verify timestamp field + assert!( + !batch + .column(9) + .as_any() + .downcast_ref::() + .unwrap() + .is_null(0) + ); + } + + #[test] + fn test_encode_null_values() { + let mut log1 = LogEvent::default(); + log1.insert("field_a", 1); + // field_b is missing + + let mut log2 = LogEvent::default(); + log2.insert("field_b", 2); + // field_a is missing + + let events = vec![Event::Log(log1), Event::Log(log2)]; + + let schema = Arc::new(Schema::new(vec![ + Field::new("field_a", DataType::Int64, true), + Field::new("field_b", DataType::Int64, true), + ])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 2); + + let field_a = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(field_a.value(0), 1); + assert!(field_a.is_null(1)); + + let field_b = batch + .column(1) + .as_any() + .downcast_ref::() + .unwrap(); + assert!(field_b.is_null(0)); + assert_eq!(field_b.value(1), 2); + } + + #[test] + fn test_encode_type_mismatches() { + let mut log1 = LogEvent::default(); + log1.insert("field", 42); // Integer + + let mut log2 = LogEvent::default(); + log2.insert("field", 3.15); // Float - type mismatch! + + let events = vec![Event::Log(log1), Event::Log(log2)]; + + // Schema expects Int64 + let schema = Arc::new(Schema::new(vec![Field::new( + "field", + DataType::Int64, + true, + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 2); + + let field_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(field_array.value(0), 42); + assert!(field_array.is_null(1)); // Type mismatch becomes null + } + + #[test] + fn test_encode_complex_json_values() { + use serde_json::json; + + let mut log = LogEvent::default(); + log.insert( + "object_field", + json!({"key": "value", "nested": {"count": 42}}), + ); + log.insert("array_field", json!([1, 2, 3])); + + let events = vec![Event::Log(log)]; + + let schema = Arc::new(Schema::new(vec![ + Field::new("object_field", DataType::Utf8, true), + Field::new("array_field", DataType::Utf8, true), + ])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 1); + + let object_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + let object_str = object_array.value(0); + assert!(object_str.contains("key")); + assert!(object_str.contains("value")); + + let array_array = batch + .column(1) + .as_any() + .downcast_ref::() + .unwrap(); + let array_str = array_array.value(0); + assert_eq!(array_str, "[1,2,3]"); + } + + #[test] + fn test_encode_unsupported_type() { + let mut log = LogEvent::default(); + log.insert("field", "value"); + + let events = vec![Event::Log(log)]; + + // Use an unsupported type + let schema = Arc::new(Schema::new(vec![Field::new( + "field", + DataType::Duration(TimeUnit::Millisecond), + true, + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(schema)); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + ArrowEncodingError::UnsupportedType { .. } + )); + } + + #[test] + fn test_encode_without_schema_fails() { + let mut log1 = LogEvent::default(); + log1.insert("message", "hello"); + + let events = vec![Event::Log(log1)]; + + let result = encode_events_to_arrow_ipc_stream(&events, None); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + ArrowEncodingError::NoSchemaProvided + )); + } + + #[test] + fn test_encode_empty_events() { + let events: Vec = vec![]; + let result = encode_events_to_arrow_ipc_stream(&events, None); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), ArrowEncodingError::NoEvents)); + } + + #[test] + fn test_encode_timestamp_precisions() { + let now = Utc::now(); + let mut log = LogEvent::default(); + log.insert("ts_second", now); + log.insert("ts_milli", now); + log.insert("ts_micro", now); + log.insert("ts_nano", now); + + let events = vec![Event::Log(log)]; + + let schema = Arc::new(Schema::new(vec![ + Field::new( + "ts_second", + DataType::Timestamp(TimeUnit::Second, None), + true, + ), + Field::new( + "ts_milli", + DataType::Timestamp(TimeUnit::Millisecond, None), + true, + ), + Field::new( + "ts_micro", + DataType::Timestamp(TimeUnit::Microsecond, None), + true, + ), + Field::new( + "ts_nano", + DataType::Timestamp(TimeUnit::Nanosecond, None), + true, + ), + ])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 1); + assert_eq!(batch.num_columns(), 4); + + let ts_second = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + assert!(!ts_second.is_null(0)); + assert_eq!(ts_second.value(0), now.timestamp()); + + let ts_milli = batch + .column(1) + .as_any() + .downcast_ref::() + .unwrap(); + assert!(!ts_milli.is_null(0)); + assert_eq!(ts_milli.value(0), now.timestamp_millis()); + + let ts_micro = batch + .column(2) + .as_any() + .downcast_ref::() + .unwrap(); + assert!(!ts_micro.is_null(0)); + assert_eq!(ts_micro.value(0), now.timestamp_micros()); + + let ts_nano = batch + .column(3) + .as_any() + .downcast_ref::() + .unwrap(); + assert!(!ts_nano.is_null(0)); + assert_eq!(ts_nano.value(0), now.timestamp_nanos_opt().unwrap()); + } + + #[test] + fn test_encode_mixed_timestamp_string_and_native() { + // Test mixing string timestamps with native Timestamp values + let mut log1 = LogEvent::default(); + log1.insert("ts", "2025-10-22T10:18:44.256Z"); // String + + let mut log2 = LogEvent::default(); + log2.insert("ts", Utc::now()); // Native Timestamp + + let mut log3 = LogEvent::default(); + log3.insert("ts", 1729594724256000000_i64); // Integer (nanoseconds) + + let events = vec![Event::Log(log1), Event::Log(log2), Event::Log(log3)]; + + let schema = Arc::new(Schema::new(vec![Field::new( + "ts", + DataType::Timestamp(TimeUnit::Nanosecond, None), + true, + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 3); + + let ts_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + + // All three should be non-null + assert!(!ts_array.is_null(0)); + assert!(!ts_array.is_null(1)); + assert!(!ts_array.is_null(2)); + + // First one should match the parsed string + let expected = chrono::DateTime::parse_from_rfc3339("2025-10-22T10:18:44.256Z") + .unwrap() + .timestamp_nanos_opt() + .unwrap(); + assert_eq!(ts_array.value(0), expected); + + // Third one should match the integer + assert_eq!(ts_array.value(2), 1729594724256000000_i64); + } + + #[test] + fn test_encode_invalid_string_timestamp() { + // Test that invalid timestamp strings become null + let mut log1 = LogEvent::default(); + log1.insert("timestamp", "not-a-timestamp"); + + let mut log2 = LogEvent::default(); + log2.insert("timestamp", "2025-10-22T10:18:44.256Z"); // Valid + + let mut log3 = LogEvent::default(); + log3.insert("timestamp", "2025-99-99T99:99:99Z"); // Invalid + + let events = vec![Event::Log(log1), Event::Log(log2), Event::Log(log3)]; + + let schema = Arc::new(Schema::new(vec![Field::new( + "timestamp", + DataType::Timestamp(TimeUnit::Nanosecond, None), + true, + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 3); + + let ts_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + + // Invalid timestamps should be null + assert!(ts_array.is_null(0)); + assert!(!ts_array.is_null(1)); // Valid one + assert!(ts_array.is_null(2)); + } + + #[test] + fn test_encode_decimal128_from_integer() { + use arrow::array::Decimal128Array; + + let mut log = LogEvent::default(); + // Store quantity as integer: 1000 + log.insert("quantity", 1000_i64); + + let events = vec![Event::Log(log)]; + + // Decimal(10, 3) - will represent 1000 as 1000.000 + let schema = Arc::new(Schema::new(vec![Field::new( + "quantity", + DataType::Decimal128(10, 3), + true, + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 1); + + let decimal_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + + assert!(!decimal_array.is_null(0)); + // 1000 with scale 3 = 1000 * 10^3 = 1000000 + assert_eq!(decimal_array.value(0), 1000000_i128); + } + + #[test] + fn test_encode_decimal256() { + use arrow::array::Decimal256Array; + + let mut log = LogEvent::default(); + // Very large precision number + log.insert("big_value", 123456789.123456_f64); + + let events = vec![Event::Log(log)]; + + // Decimal256(50, 6) - high precision decimal + let schema = Arc::new(Schema::new(vec![Field::new( + "big_value", + DataType::Decimal256(50, 6), + true, + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 1); + + let decimal_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + + assert!(!decimal_array.is_null(0)); + // Value should be non-null and encoded + let value = decimal_array.value(0); + assert!(value.to_i128().is_some()); + } + + #[test] + fn test_encode_decimal_null_values() { + use arrow::array::Decimal128Array; + + let mut log1 = LogEvent::default(); + log1.insert("price", 99.99_f64); + + let log2 = LogEvent::default(); + // No price field - should be null + + let mut log3 = LogEvent::default(); + log3.insert("price", 50.00_f64); + + let events = vec![Event::Log(log1), Event::Log(log2), Event::Log(log3)]; + + let schema = Arc::new(Schema::new(vec![Field::new( + "price", + DataType::Decimal128(10, 2), + true, + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 3); + + let decimal_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + + // First row: 99.99 + assert!(!decimal_array.is_null(0)); + assert_eq!(decimal_array.value(0), 9999_i128); + + // Second row: null + assert!(decimal_array.is_null(1)); + + // Third row: 50.00 + assert!(!decimal_array.is_null(2)); + assert_eq!(decimal_array.value(2), 5000_i128); + } + + #[test] + fn test_encode_unsigned_integer_types() { + use arrow::array::{UInt8Array, UInt16Array, UInt32Array, UInt64Array}; + + let mut log = LogEvent::default(); + log.insert("uint8_field", 255_i64); + log.insert("uint16_field", 65535_i64); + log.insert("uint32_field", 4294967295_i64); + log.insert("uint64_field", 9223372036854775807_i64); + + let events = vec![Event::Log(log)]; + + let schema = Arc::new(Schema::new(vec![ + Field::new("uint8_field", DataType::UInt8, true), + Field::new("uint16_field", DataType::UInt16, true), + Field::new("uint32_field", DataType::UInt32, true), + Field::new("uint64_field", DataType::UInt64, true), + ])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 1); + assert_eq!(batch.num_columns(), 4); + + // Verify uint8 + let uint8_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(uint8_array.value(0), 255_u8); + + // Verify uint16 + let uint16_array = batch + .column(1) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(uint16_array.value(0), 65535_u16); + + // Verify uint32 + let uint32_array = batch + .column(2) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(uint32_array.value(0), 4294967295_u32); + + // Verify uint64 + let uint64_array = batch + .column(3) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(uint64_array.value(0), 9223372036854775807_u64); + } + + #[test] + fn test_encode_unsigned_integers_with_null_and_overflow() { + use arrow::array::{UInt8Array, UInt32Array}; + + let mut log1 = LogEvent::default(); + log1.insert("uint8_field", 100_i64); + log1.insert("uint32_field", 1000_i64); + + let mut log2 = LogEvent::default(); + log2.insert("uint8_field", 300_i64); // Overflow - should be null + log2.insert("uint32_field", -1_i64); // Negative - should be null + + let log3 = LogEvent::default(); + // Missing fields - should be null + + let events = vec![Event::Log(log1), Event::Log(log2), Event::Log(log3)]; + + let schema = Arc::new(Schema::new(vec![ + Field::new("uint8_field", DataType::UInt8, true), + Field::new("uint32_field", DataType::UInt32, true), + ])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 3); + + // Check uint8 column + let uint8_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(uint8_array.value(0), 100_u8); // Valid + assert!(uint8_array.is_null(1)); // Overflow + assert!(uint8_array.is_null(2)); // Missing + + // Check uint32 column + let uint32_array = batch + .column(1) + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(uint32_array.value(0), 1000_u32); // Valid + assert!(uint32_array.is_null(1)); // Negative + assert!(uint32_array.is_null(2)); // Missing + } + + #[test] + fn test_encode_non_nullable_field_with_null_value() { + // Test that encoding fails when a non-nullable field encounters a null value + let mut log1 = LogEvent::default(); + log1.insert("required_field", 42); + + let log2 = LogEvent::default(); + // log2 is missing required_field - should cause an error + + let events = vec![Event::Log(log1), Event::Log(log2)]; + + // Create schema with non-nullable field + let schema = Arc::new(Schema::new(vec![Field::new( + "required_field", + DataType::Int64, + false, // Not nullable + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(schema)); + assert!(result.is_err()); + + match result.unwrap_err() { + ArrowEncodingError::NullConstraint { field_name } => { + assert_eq!(field_name, "required_field"); + } + other => panic!("Expected NullConstraint error, got: {:?}", other), + } + } + + #[test] + fn test_encode_non_nullable_string_field_with_missing_value() { + // Test that encoding fails for non-nullable string field + let mut log1 = LogEvent::default(); + log1.insert("name", "Alice"); + + let mut log2 = LogEvent::default(); + log2.insert("name", "Bob"); + + let log3 = LogEvent::default(); + // log3 is missing name field + + let events = vec![Event::Log(log1), Event::Log(log2), Event::Log(log3)]; + + let schema = Arc::new(Schema::new(vec![Field::new( + "name", + DataType::Utf8, + false, // Not nullable + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(schema)); + assert!(result.is_err()); + + match result.unwrap_err() { + ArrowEncodingError::NullConstraint { field_name } => { + assert_eq!(field_name, "name"); + } + other => panic!("Expected NullConstraint error, got: {:?}", other), + } + } + + #[test] + fn test_encode_non_nullable_field_all_values_present() { + // Test that encoding succeeds when all values are present for non-nullable field + let mut log1 = LogEvent::default(); + log1.insert("id", 1); + + let mut log2 = LogEvent::default(); + log2.insert("id", 2); + + let mut log3 = LogEvent::default(); + log3.insert("id", 3); + + let events = vec![Event::Log(log1), Event::Log(log2), Event::Log(log3)]; + + let schema = Arc::new(Schema::new(vec![Field::new( + "id", + DataType::Int64, + false, // Not nullable + )])); + + let result = encode_events_to_arrow_ipc_stream(&events, Some(Arc::clone(&schema))); + assert!(result.is_ok()); + + let bytes = result.unwrap(); + let cursor = Cursor::new(bytes); + let mut reader = StreamReader::try_new(cursor, None).unwrap(); + let batch = reader.next().unwrap().unwrap(); + + assert_eq!(batch.num_rows(), 3); + + let id_array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + + assert_eq!(id_array.value(0), 1); + assert_eq!(id_array.value(1), 2); + assert_eq!(id_array.value(2), 3); + assert!(!id_array.is_null(0)); + assert!(!id_array.is_null(1)); + assert!(!id_array.is_null(2)); + } +} diff --git a/lib/codecs/src/encoding/format/mod.rs b/lib/codecs/src/encoding/format/mod.rs index 9377cdca5d906..0d21e8b94e25c 100644 --- a/lib/codecs/src/encoding/format/mod.rs +++ b/lib/codecs/src/encoding/format/mod.rs @@ -3,6 +3,8 @@ #![deny(missing_docs)] +#[cfg(feature = "arrow")] +mod arrow; mod avro; mod cef; mod common; @@ -20,6 +22,8 @@ mod text; use std::fmt::Debug; +#[cfg(feature = "arrow")] +pub use arrow::{ArrowEncodingError, ArrowStreamSerializer, ArrowStreamSerializerConfig}; pub use avro::{AvroSerializer, AvroSerializerConfig, AvroSerializerOptions}; pub use cef::{CefSerializer, CefSerializerConfig}; use dyn_clone::DynClone; diff --git a/lib/codecs/src/encoding/mod.rs b/lib/codecs/src/encoding/mod.rs index 8dd2c4ddc79a5..3fe0baafa8b91 100644 --- a/lib/codecs/src/encoding/mod.rs +++ b/lib/codecs/src/encoding/mod.rs @@ -6,6 +6,8 @@ pub mod format; pub mod framing; pub mod serializer; pub use chunking::{Chunker, Chunking, GelfChunker}; +#[cfg(feature = "arrow")] +pub use format::{ArrowEncodingError, ArrowStreamSerializer, ArrowStreamSerializerConfig}; pub use format::{ AvroSerializer, AvroSerializerConfig, AvroSerializerOptions, CefSerializer, CefSerializerConfig, CsvSerializer, CsvSerializerConfig, GelfSerializer, GelfSerializerConfig, @@ -24,18 +26,22 @@ pub use framing::{ NewlineDelimitedEncoderConfig, VarintLengthDelimitedEncoder, VarintLengthDelimitedEncoderConfig, }; +#[cfg(feature = "arrow")] +pub use serializer::BatchSerializerConfig; pub use serializer::{Serializer, SerializerConfig}; /// An error that occurred while building an encoder. pub type BuildError = Box; -/// An error that occurred while encoding structured events into byte frames. +/// An error that occurred while encoding structured events. #[derive(Debug)] pub enum Error { /// The error occurred while encoding the byte frame boundaries. FramingError(BoxedFramingError), /// The error occurred while serializing a structured event into bytes. SerializingError(vector_common::Error), + /// A schema constraint was violated during encoding (e.g., null value for non-nullable field). + SchemaConstraintViolation(vector_common::Error), } impl std::fmt::Display for Error { @@ -43,6 +49,9 @@ impl std::fmt::Display for Error { match self { Self::FramingError(error) => write!(formatter, "FramingError({error})"), Self::SerializingError(error) => write!(formatter, "SerializingError({error})"), + Self::SchemaConstraintViolation(error) => { + write!(formatter, "SchemaConstraintViolation({error})") + } } } } diff --git a/lib/codecs/src/encoding/serializer.rs b/lib/codecs/src/encoding/serializer.rs index fdc8397deca5d..899e03d60e4ec 100644 --- a/lib/codecs/src/encoding/serializer.rs +++ b/lib/codecs/src/encoding/serializer.rs @@ -4,6 +4,8 @@ use bytes::BytesMut; use vector_config::configurable_component; use vector_core::{config::DataType, event::Event, schema}; +#[cfg(feature = "arrow")] +use super::format::{ArrowStreamSerializer, ArrowStreamSerializerConfig}; #[cfg(feature = "opentelemetry")] use super::format::{OtlpSerializer, OtlpSerializerConfig}; use super::{ @@ -134,6 +136,53 @@ impl Default for SerializerConfig { } } +/// Batch serializer configuration. +#[configurable_component] +#[derive(Clone, Debug)] +#[serde(tag = "codec", rename_all = "snake_case")] +#[configurable(metadata( + docs::enum_tag_description = "The codec to use for batch encoding events." +))] +pub enum BatchSerializerConfig { + /// Encodes events in [Apache Arrow][apache_arrow] IPC streaming format. + /// + /// This is the streaming variant of the Arrow IPC format, which writes + /// a continuous stream of record batches. + /// + /// [apache_arrow]: https://arrow.apache.org/ + #[cfg(feature = "arrow")] + #[serde(rename = "arrow_stream")] + ArrowStream(ArrowStreamSerializerConfig), +} + +#[cfg(feature = "arrow")] +impl BatchSerializerConfig { + /// Build the `ArrowStreamSerializer` from this configuration. + pub fn build( + &self, + ) -> Result> { + match self { + BatchSerializerConfig::ArrowStream(arrow_config) => { + ArrowStreamSerializer::new(arrow_config.clone()) + } + } + } + + /// The data type of events that are accepted by this batch serializer. + pub fn input_type(&self) -> DataType { + match self { + BatchSerializerConfig::ArrowStream(arrow_config) => arrow_config.input_type(), + } + } + + /// The schema required by the batch serializer. + pub fn schema_requirement(&self) -> schema::Requirement { + match self { + BatchSerializerConfig::ArrowStream(arrow_config) => arrow_config.schema_requirement(), + } + } +} + impl From for SerializerConfig { fn from(config: AvroSerializerConfig) -> Self { Self::Avro { avro: config.avro } diff --git a/lib/vector-lib/Cargo.toml b/lib/vector-lib/Cargo.toml index 7088a79978d60..c72af97fdaa62 100644 --- a/lib/vector-lib/Cargo.toml +++ b/lib/vector-lib/Cargo.toml @@ -26,6 +26,7 @@ vrl = { workspace = true, optional = true } [features] allocation-tracing = ["vector-top?/allocation-tracing"] api-client = ["dep:vector-api-client"] +arrow = ["codecs/arrow"] api = ["vector-tap/api"] file-source = ["dep:file-source", "dep:file-source-common"] lua = ["vector-core/lua"] diff --git a/src/codecs/encoding/config.rs b/src/codecs/encoding/config.rs index a04f44315047a..255db45b538ed 100644 --- a/src/codecs/encoding/config.rs +++ b/src/codecs/encoding/config.rs @@ -1,4 +1,4 @@ -use crate::codecs::Transformer; +use crate::codecs::{Encoder, EncoderKind, Transformer}; use vector_lib::{ codecs::{ CharacterDelimitedEncoder, LengthDelimitedEncoder, NewlineDelimitedEncoder, @@ -138,6 +138,13 @@ impl EncodingConfigWithFraming { Ok((framer, serializer)) } + + /// Build the `Transformer` and `EncoderKind` for this config. + pub fn build_encoder(&self, sink_type: SinkType) -> crate::Result<(Transformer, EncoderKind)> { + let (framer, serializer) = self.build(sink_type)?; + let encoder = EncoderKind::Framed(Box::new(Encoder::::new(framer, serializer))); + Ok((self.transformer(), encoder)) + } } /// The way a sink processes outgoing events. diff --git a/src/codecs/encoding/encoder.rs b/src/codecs/encoding/encoder.rs index f1d0741bb669c..333c29b4840cf 100644 --- a/src/codecs/encoding/encoder.rs +++ b/src/codecs/encoding/encoder.rs @@ -1,5 +1,7 @@ use bytes::BytesMut; use tokio_util::codec::Encoder as _; +#[cfg(feature = "codecs-arrow")] +use vector_lib::codecs::encoding::ArrowStreamSerializer; use vector_lib::codecs::{ CharacterDelimitedEncoder, NewlineDelimitedEncoder, TextSerializerConfig, encoding::{Error, Framer, Serializer}, @@ -10,6 +12,74 @@ use crate::{ internal_events::{EncoderFramingError, EncoderSerializeError}, }; +/// Serializers that support batch encoding (encoding all events at once). +#[derive(Debug, Clone)] +pub enum BatchSerializer { + /// Arrow IPC stream format serializer. + #[cfg(feature = "codecs-arrow")] + Arrow(ArrowStreamSerializer), +} + +/// An encoder that encodes batches of events. +#[derive(Debug, Clone)] +pub struct BatchEncoder { + serializer: BatchSerializer, +} + +impl BatchEncoder { + /// Creates a new `BatchEncoder` with the specified batch serializer. + pub const fn new(serializer: BatchSerializer) -> Self { + Self { serializer } + } + + /// Get the batch serializer. + pub const fn serializer(&self) -> &BatchSerializer { + &self.serializer + } + + /// Get the HTTP content type. + #[cfg(feature = "codecs-arrow")] + pub const fn content_type(&self) -> &'static str { + match &self.serializer { + BatchSerializer::Arrow(_) => "application/vnd.apache.arrow.stream", + } + } +} + +impl tokio_util::codec::Encoder> for BatchEncoder { + type Error = Error; + + #[allow(unused_variables)] + fn encode(&mut self, events: Vec, buffer: &mut BytesMut) -> Result<(), Self::Error> { + #[allow(unreachable_patterns)] + match &mut self.serializer { + #[cfg(feature = "codecs-arrow")] + BatchSerializer::Arrow(serializer) => { + serializer.encode(events, buffer).map_err(|err| { + use vector_lib::codecs::encoding::ArrowEncodingError; + match err { + ArrowEncodingError::NullConstraint { .. } => { + Error::SchemaConstraintViolation(Box::new(err)) + } + _ => Error::SerializingError(Box::new(err)), + } + }) + } + _ => unreachable!("BatchSerializer cannot be constructed without encode()"), + } + } +} + +/// An wrapper that supports both framed and batch encoding modes. +#[derive(Debug, Clone)] +pub enum EncoderKind { + /// Uses framing to encode individual events + Framed(Box>), + /// Encodes events in batches without framing + #[cfg(feature = "codecs-arrow")] + Batch(BatchEncoder), +} + #[derive(Debug, Clone)] /// An encoder that can encode structured events into byte frames. pub struct Encoder diff --git a/src/codecs/encoding/mod.rs b/src/codecs/encoding/mod.rs index 69ede063e896b..36d637bd75090 100644 --- a/src/codecs/encoding/mod.rs +++ b/src/codecs/encoding/mod.rs @@ -3,5 +3,5 @@ mod encoder; mod transformer; pub use config::{EncodingConfig, EncodingConfigWithFraming, SinkType}; -pub use encoder::Encoder; +pub use encoder::{BatchEncoder, BatchSerializer, Encoder, EncoderKind}; pub use transformer::{TimestampFormat, Transformer}; diff --git a/src/codecs/mod.rs b/src/codecs/mod.rs index 4247846cca3a8..32b0e9efb7f8b 100644 --- a/src/codecs/mod.rs +++ b/src/codecs/mod.rs @@ -9,6 +9,7 @@ mod ready_frames; pub use decoding::{Decoder, DecodingConfig}; pub use encoding::{ - Encoder, EncodingConfig, EncodingConfigWithFraming, SinkType, TimestampFormat, Transformer, + BatchEncoder, BatchSerializer, Encoder, EncoderKind, EncodingConfig, EncodingConfigWithFraming, + SinkType, TimestampFormat, Transformer, }; pub use ready_frames::ReadyFrames; diff --git a/src/internal_events/codecs.rs b/src/internal_events/codecs.rs index a2c41b7806be1..0d36a0a0b1fa9 100644 --- a/src/internal_events/codecs.rs +++ b/src/internal_events/codecs.rs @@ -84,9 +84,9 @@ pub struct EncoderSerializeError<'a> { impl InternalEvent for EncoderSerializeError<'_> { fn emit(self) { - let reason = "Failed serializing frame."; + const SERIALIZE_REASON: &str = "Failed serializing frame."; error!( - message = reason, + message = SERIALIZE_REASON, error = %self.error, error_code = "encoder_serialize", error_type = error_type::ENCODER_FAILED, @@ -99,7 +99,10 @@ impl InternalEvent for EncoderSerializeError<'_> { "stage" => error_stage::SENDING, ) .increment(1); - emit!(ComponentEventsDropped:: { count: 1, reason }); + emit!(ComponentEventsDropped:: { + count: 1, + reason: SERIALIZE_REASON + }); } } @@ -132,3 +135,34 @@ impl InternalEvent for EncoderWriteError<'_, E> { } } } + +#[cfg(feature = "codecs-arrow")] +#[derive(Debug)] +pub struct EncoderNullConstraintError<'a> { + pub error: &'a crate::Error, +} + +#[cfg(feature = "codecs-arrow")] +impl InternalEvent for EncoderNullConstraintError<'_> { + fn emit(self) { + const CONSTRAINT_REASON: &str = "Schema constraint violation."; + error!( + message = CONSTRAINT_REASON, + error = %self.error, + error_code = "encoding_null_constraint", + error_type = error_type::ENCODER_FAILED, + stage = error_stage::SENDING, + ); + counter!( + "component_errors_total", + "error_code" => "encoding_null_constraint", + "error_type" => error_type::ENCODER_FAILED, + "stage" => error_stage::SENDING, + ) + .increment(1); + emit!(ComponentEventsDropped:: { + count: 1, + reason: CONSTRAINT_REASON + }); + } +} diff --git a/src/sinks/util/encoding.rs b/src/sinks/util/encoding.rs index bb5a938ec017f..6265021ef6f1a 100644 --- a/src/sinks/util/encoding.rs +++ b/src/sinks/util/encoding.rs @@ -8,6 +8,8 @@ use vector_lib::{ request_metadata::GroupedCountByteSize, }; +#[cfg(feature = "codecs-arrow")] +use crate::internal_events::EncoderNullConstraintError; use crate::{codecs::Transformer, event::Event, internal_events::EncoderWriteError}; pub trait Encoder { @@ -97,6 +99,65 @@ impl Encoder for (Transformer, crate::codecs::Encoder<()>) { } } +#[cfg(feature = "codecs-arrow")] +impl Encoder> for (Transformer, crate::codecs::BatchEncoder) { + fn encode_input( + &self, + events: Vec, + writer: &mut dyn io::Write, + ) -> io::Result<(usize, GroupedCountByteSize)> { + use tokio_util::codec::Encoder as _; + + let mut encoder = self.1.clone(); + let mut byte_size = telemetry().create_request_count_byte_size(); + let n_events = events.len(); + let mut transformed_events = Vec::with_capacity(n_events); + + for mut event in events { + self.0.transform(&mut event); + byte_size.add_event(&event, event.estimated_json_encoded_size_of()); + transformed_events.push(event); + } + + let mut bytes = BytesMut::new(); + encoder + .encode(transformed_events, &mut bytes) + .map_err(|error| { + if let vector_lib::codecs::encoding::Error::SchemaConstraintViolation( + ref constraint_error, + ) = error + { + emit!(EncoderNullConstraintError { + error: constraint_error + }); + } + io::Error::new(io::ErrorKind::InvalidData, error) + })?; + + write_all(writer, n_events, &bytes)?; + Ok((bytes.len(), byte_size)) + } +} + +impl Encoder> for (Transformer, crate::codecs::EncoderKind) { + fn encode_input( + &self, + events: Vec, + writer: &mut dyn io::Write, + ) -> io::Result<(usize, GroupedCountByteSize)> { + // Delegate to the specific encoder implementation + match &self.1 { + crate::codecs::EncoderKind::Framed(encoder) => { + (self.0.clone(), *encoder.clone()).encode_input(events, writer) + } + #[cfg(feature = "codecs-arrow")] + crate::codecs::EncoderKind::Batch(encoder) => { + (self.0.clone(), encoder.clone()).encode_input(events, writer) + } + } + } +} + /// Write the buffer to the writer. If the operation fails, emit an internal event which complies with the /// instrumentation spec- as this necessitates both an Error and EventsDropped event. /// From fcd135adadf3c3ff17c6194cc09df0f2597ae99b Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Fri, 14 Nov 2025 09:08:43 -0600 Subject: [PATCH 090/227] chore(datadog_agent source): Refactor handle_request into struct (#24238) Consolidates the common parameters to `handle_request` into a new struct that can be passed around cleanly to the various warp filter builders. No functionality changes, just code moves. Co-authored-by: Pavlos Rontidis --- src/sources/datadog_agent/logs.rs | 15 +--- src/sources/datadog_agent/metrics.rs | 62 ++++++--------- src/sources/datadog_agent/mod.rs | 112 ++++++++++++++------------- src/sources/datadog_agent/traces.rs | 26 +++---- 4 files changed, 94 insertions(+), 121 deletions(-) diff --git a/src/sources/datadog_agent/logs.rs b/src/sources/datadog_agent/logs.rs index d3b653bd0b04a..7802def1dff2b 100644 --- a/src/sources/datadog_agent/logs.rs +++ b/src/sources/datadog_agent/logs.rs @@ -15,20 +15,15 @@ use vector_lib::{ use vrl::core::Value; use warp::{Filter, filters::BoxedFilter, path as warp_path, path::FullPath, reply::Response}; +use super::{ApiKeyQueryParams, DatadogAgentConfig, DatadogAgentSource, LogMsg, RequestHandler}; use crate::{ - SourceSender, common::{datadog::DDTAGS, http::ErrorMessage}, event::Event, internal_events::DatadogAgentJsonParseError, - sources::datadog_agent::{ - ApiKeyQueryParams, DatadogAgentConfig, DatadogAgentSource, LogMsg, handle_request, - }, }; -pub(crate) fn build_warp_filter( - acknowledgements: bool, - multiple_outputs: bool, - out: SourceSender, +pub(super) fn build_warp_filter( + handler: RequestHandler, source: DatadogAgentSource, ) -> BoxedFilter<(Response,)> { warp::post() @@ -58,9 +53,7 @@ pub(crate) fn build_warp_filter( &source, ) }); - - let output = multiple_outputs.then_some(super::LOGS); - handle_request(events, acknowledgements, out.clone(), output) + handler.clone().handle_request(events, super::LOGS) }, ) .boxed() diff --git a/src/sources/datadog_agent/metrics.rs b/src/sources/datadog_agent/metrics.rs index 252c66e60d01b..d931be05fcab7 100644 --- a/src/sources/datadog_agent/metrics.rs +++ b/src/sources/datadog_agent/metrics.rs @@ -13,8 +13,9 @@ use vector_lib::{ }; use warp::{Filter, filters::BoxedFilter, path, path::FullPath, reply::Response}; +use super::ddmetric_proto::{Metadata, MetricPayload, SketchPayload, metric_payload}; +use super::{ApiKeyQueryParams, DatadogAgentSource, RequestHandler}; use crate::{ - SourceSender, common::{ datadog::{DatadogMetricType, DatadogSeriesMetric}, http::ErrorMessage, @@ -26,14 +27,7 @@ use crate::{ }, internal_events::EventsReceived, schema, - sources::{ - datadog_agent::{ - ApiKeyQueryParams, DatadogAgentSource, - ddmetric_proto::{Metadata, MetricPayload, SketchPayload, metric_payload}, - handle_request, - }, - util::extract_tag_key_and_value, - }, + sources::util::extract_tag_key_and_value, }; #[derive(Deserialize, Serialize)] @@ -41,17 +35,13 @@ pub(crate) struct DatadogSeriesRequest { pub(crate) series: Vec, } -pub(crate) fn build_warp_filter( - acknowledgements: bool, - multiple_outputs: bool, - out: SourceSender, +pub(super) fn build_warp_filter( + handler: RequestHandler, source: DatadogAgentSource, ) -> BoxedFilter<(Response,)> { - let output = multiple_outputs.then_some(super::METRICS); - let sketches_service = sketches_service(acknowledgements, output, out.clone(), source.clone()); - let series_v1_service = - series_v1_service(acknowledgements, output, out.clone(), source.clone()); - let series_v2_service = series_v2_service(acknowledgements, output, out, source); + let sketches_service = sketches_service(handler.clone(), source.clone()); + let series_v1_service = series_v1_service(handler.clone(), source.clone()); + let series_v2_service = series_v2_service(handler, source); sketches_service .or(series_v1_service) .unify() @@ -61,9 +51,7 @@ pub(crate) fn build_warp_filter( } fn sketches_service( - acknowledgements: bool, - output: Option<&'static str>, - out: SourceSender, + handler: RequestHandler, source: DatadogAgentSource, ) -> BoxedFilter<(Response,)> { warp::post() @@ -73,7 +61,7 @@ fn sketches_service( .and(warp::header::optional::("dd-api-key")) .and(warp::query::()) .and(warp::body::bytes()) - .and_then( + .and_then({ move |path: FullPath, encoding_header: Option, api_token: Option, @@ -93,16 +81,14 @@ fn sketches_service( &source.events_received, ) }); - handle_request(events, acknowledgements, out.clone(), output) - }, - ) + handler.clone().handle_request(events, super::METRICS) + } + }) .boxed() } fn series_v1_service( - acknowledgements: bool, - output: Option<&'static str>, - out: SourceSender, + handler: RequestHandler, source: DatadogAgentSource, ) -> BoxedFilter<(Response,)> { warp::post() @@ -112,7 +98,7 @@ fn series_v1_service( .and(warp::header::optional::("dd-api-key")) .and(warp::query::()) .and(warp::body::bytes()) - .and_then( + .and_then({ move |path: FullPath, encoding_header: Option, api_token: Option, @@ -135,16 +121,14 @@ fn series_v1_service( &source.events_received, ) }); - handle_request(events, acknowledgements, out.clone(), output) - }, - ) + handler.clone().handle_request(events, super::METRICS) + } + }) .boxed() } fn series_v2_service( - acknowledgements: bool, - output: Option<&'static str>, - out: SourceSender, + handler: RequestHandler, source: DatadogAgentSource, ) -> BoxedFilter<(Response,)> { warp::post() @@ -154,7 +138,7 @@ fn series_v2_service( .and(warp::header::optional::("dd-api-key")) .and(warp::query::()) .and(warp::body::bytes()) - .and_then( + .and_then({ move |path: FullPath, encoding_header: Option, api_token: Option, @@ -174,9 +158,9 @@ fn series_v2_service( &source.events_received, ) }); - handle_request(events, acknowledgements, out.clone(), output) - }, - ) + handler.clone().handle_request(events, super::METRICS) + } + }) .boxed() } diff --git a/src/sources/datadog_agent/mod.rs b/src/sources/datadog_agent/mod.rs index 98fbc40e630de..0ee1ee8bd5ac5 100644 --- a/src/sources/datadog_agent/mod.rs +++ b/src/sources/datadog_agent/mod.rs @@ -425,34 +425,23 @@ impl DatadogAgentSource { acknowledgements: bool, config: &DatadogAgentConfig, ) -> crate::Result> { - let mut filters = (!config.disable_logs).then(|| { - logs::build_warp_filter( - acknowledgements, - config.multiple_outputs, - out.clone(), - self.clone(), - ) - }); + let handler = RequestHandler { + acknowledgements, + multiple_outputs: config.multiple_outputs, + out, + }; + let mut filters = + (!config.disable_logs).then(|| logs::build_warp_filter(handler.clone(), self.clone())); if !config.disable_traces { - let trace_filter = traces::build_warp_filter( - acknowledgements, - config.multiple_outputs, - out.clone(), - self.clone(), - ); + let trace_filter = traces::build_warp_filter(handler.clone(), self.clone()); filters = filters .map(|f| f.or(trace_filter.clone()).unify().boxed()) .or(Some(trace_filter)); } if !config.disable_metrics { - let metrics_filter = metrics::build_warp_filter( - acknowledgements, - config.multiple_outputs, - out, - self.clone(), - ); + let metrics_filter = metrics::build_warp_filter(handler, self.clone()); filters = filters .map(|f| f.or(metrics_filter.clone()).unify().boxed()) .or(Some(metrics_filter)); @@ -509,42 +498,57 @@ impl DatadogAgentSource { } } -pub(crate) async fn handle_request( - events: Result, ErrorMessage>, +#[derive(Clone)] +struct RequestHandler { acknowledgements: bool, - mut out: SourceSender, - output: Option<&str>, -) -> Result { - match events { - Ok(mut events) => { - let receiver = BatchNotifier::maybe_apply_to(acknowledgements, &mut events); - let count = events.len(); - - if let Some(name) = output { - out.send_batch_named(name, events).await - } else { - out.send_batch(events).await - } - .map_err(|_| { - emit!(StreamClosedError { count }); - warp::reject::custom(ApiError::ServerShutdown) - })?; - match receiver { - None => Ok(warp::reply().into_response()), - Some(receiver) => match receiver.await { - BatchStatus::Delivered => Ok(warp::reply().into_response()), - BatchStatus::Errored => Err(warp::reject::custom(ErrorMessage::new( - StatusCode::INTERNAL_SERVER_ERROR, - "Error delivering contents to sink".into(), - ))), - BatchStatus::Rejected => Err(warp::reject::custom(ErrorMessage::new( - StatusCode::BAD_REQUEST, - "Contents failed to deliver to sink".into(), - ))), - }, - } + multiple_outputs: bool, + out: SourceSender, +} + +impl RequestHandler { + async fn handle_request( + mut self, + events: Result, ErrorMessage>, + output: &'static str, + ) -> Result { + match events { + Ok(events) => self.handle_events(events, output).await, + Err(err) => Err(warp::reject::custom(err)), + } + } + + async fn handle_events( + &mut self, + mut events: Vec, + output: &'static str, + ) -> Result { + let receiver = BatchNotifier::maybe_apply_to(self.acknowledgements, &mut events); + let count = events.len(); + let output = self.multiple_outputs.then_some(output); + + if let Some(name) = output { + self.out.send_batch_named(name, events).await + } else { + self.out.send_batch(events).await + } + .map_err(|_| { + emit!(StreamClosedError { count }); + warp::reject::custom(ApiError::ServerShutdown) + })?; + match receiver { + None => Ok(warp::reply().into_response()), + Some(receiver) => match receiver.await { + BatchStatus::Delivered => Ok(warp::reply().into_response()), + BatchStatus::Errored => Err(warp::reject::custom(ErrorMessage::new( + StatusCode::INTERNAL_SERVER_ERROR, + "Error delivering contents to sink".into(), + ))), + BatchStatus::Rejected => Err(warp::reject::custom(ErrorMessage::new( + StatusCode::BAD_REQUEST, + "Contents failed to deliver to sink".into(), + ))), + }, } - Err(err) => Err(warp::reject::custom(err)), } } diff --git a/src/sources/datadog_agent/traces.rs b/src/sources/datadog_agent/traces.rs index b60fd543cc873..5f0bea7ab0824 100644 --- a/src/sources/datadog_agent/traces.rs +++ b/src/sources/datadog_agent/traces.rs @@ -13,31 +13,24 @@ use vector_lib::{ use vrl::event_path; use warp::{Filter, Rejection, Reply, filters::BoxedFilter, path, path::FullPath, reply::Response}; +use super::{ApiKeyQueryParams, DatadogAgentSource, RequestHandler, ddtrace_proto}; use crate::{ - SourceSender, common::http::ErrorMessage, event::{Event, ObjectMap, TraceEvent, Value}, - sources::datadog_agent::{ - ApiKeyQueryParams, DatadogAgentSource, ddtrace_proto, handle_request, - }, }; -pub(crate) fn build_warp_filter( - acknowledgements: bool, - multiple_outputs: bool, - out: SourceSender, +pub(super) fn build_warp_filter( + handler: RequestHandler, source: DatadogAgentSource, ) -> BoxedFilter<(Response,)> { - build_trace_filter(acknowledgements, multiple_outputs, out, source) + build_trace_filter(handler, source) .or(build_stats_filter()) .unify() .boxed() } fn build_trace_filter( - acknowledgements: bool, - multiple_outputs: bool, - out: SourceSender, + handler: RequestHandler, source: DatadogAgentSource, ) -> BoxedFilter<(Response,)> { warp::post() @@ -50,7 +43,7 @@ fn build_trace_filter( )) .and(warp::query::()) .and(warp::body::bytes()) - .and_then( + .and_then({ move |path: FullPath, encoding_header: Option, api_token: Option, @@ -77,10 +70,9 @@ fn build_trace_filter( ) }) }); - let output = multiple_outputs.then_some(super::TRACES); - handle_request(events, acknowledgements, out.clone(), output) - }, - ) + handler.clone().handle_request(events, super::TRACES) + } + }) .boxed() } From 62e34462c4219a5a6e2497b08f7d91e2cb0b082b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ensar=20Saraj=C4=8Di=C4=87?= Date: Fri, 14 Nov 2025 16:13:13 +0100 Subject: [PATCH 091/227] fix(http_client): handle custom auth strategy in all sinks (#24240) Related: #24201 --- src/sinks/databend/config.rs | 3 +++ src/sinks/prometheus/exporter.rs | 1 + src/sinks/websocket/sink.rs | 1 + 3 files changed, 5 insertions(+) diff --git a/src/sinks/databend/config.rs b/src/sinks/databend/config.rs index 598c8f1c88fc2..c00e4dd514f72 100644 --- a/src/sinks/databend/config.rs +++ b/src/sinks/databend/config.rs @@ -131,6 +131,9 @@ impl SinkConfig for DatabendConfig { Some(Auth::Bearer { .. }) => { return Err("Bearer authentication is not supported currently".into()); } + Some(Auth::Custom { .. }) => { + return Err("Custom authentication is not supported currently".into()); + } None => {} #[cfg(feature = "aws-core")] _ => {} diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index 823914f00746e..63d2ed3a1a463 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -323,6 +323,7 @@ fn authorized(req: &Request, auth: &Option) -> bool { Auth::Bearer { token } => Some(HeaderValue::from_str( format!("Bearer {}", token.inner()).as_str(), )), + Auth::Custom { value } => Some(HeaderValue::from_str(value)), #[cfg(feature = "aws-core")] _ => None, }; diff --git a/src/sinks/websocket/sink.rs b/src/sinks/websocket/sink.rs index 198480ca3204b..1371d1a3157d6 100644 --- a/src/sinks/websocket/sink.rs +++ b/src/sinks/websocket/sink.rs @@ -420,6 +420,7 @@ mod tests { user: _user, password: _password, } => { /* Not needed for tests at the moment */ } + Auth::Custom { .. } => { /* Not needed for tests at the moment */ } #[cfg(feature = "aws-core")] _ => {} } From 8a8f223012fab640035e65533edf3ad94c3cd3d1 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 14 Nov 2025 15:49:57 -0500 Subject: [PATCH 092/227] chore(website): Apply review suggestions from PR #24234 (#24244) --- website/cue/reference/releases/0.51.1.cue | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/cue/reference/releases/0.51.1.cue b/website/cue/reference/releases/0.51.1.cue index f93558bc64ea4..d4fb3e354c4a4 100644 --- a/website/cue/reference/releases/0.51.1.cue +++ b/website/cue/reference/releases/0.51.1.cue @@ -11,8 +11,8 @@ releases: "0.51.1": { longer panic when logging utilization or other debug messages. * The `config_reload_rejected` and `config_reloaded` counters added in `0.51.0` were - not being emitted and have been replaced. `component_errors_total` with - `error_code="reload"` now replaces `config_reload_rejected` and `reloaded_total` + not being emitted and have been replaced. `component_errors_total` and + `error_code="reload"` replaces `config_reload_rejected` and `reloaded_total` replaces `config_reloaded`. * The `basename`, `dirname` and `split_path` VRL functions added in `0.51.0` are now @@ -35,7 +35,7 @@ releases: "0.51.1": { { type: "fix" description: """ - Fixed a panic in the tracing rate limiter when config reload failed. While the panic didn't kill Vector (it was caught by tokio's task + Fixed a panic in the tracing rate limiter when a config reload failed. While the panic didn't kill Vector (it was caught by tokio's task runtime), it could cause unexpected behavior. The rate limiter now gracefully handles events without standard message fields. """ contributors: ["pront"] From b9726201642b7e3219f279f9ed7ea3320ed6bdd4 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 14 Nov 2025 16:39:56 -0500 Subject: [PATCH 093/227] fix(dev): flush and sync files in file source tests (#24243) * fix(dev): flush and sync files in file source tests * Remove unecessary sleep * Remove a bunch of sleeps * Remove sleep in file_rotate * Run file_start_position_server_restart_acknowledged and file_start_position_server_restart_no_acknowledge on non-linux * Add missing sleep to file_start_position_server_restart_unfinalized * Try to fix windows test --- src/sources/file.rs | 174 +++++++++++++++++++++++--------------------- 1 file changed, 91 insertions(+), 83 deletions(-) diff --git a/src/sources/file.rs b/src/sources/file.rs index 67bf04531790f..d8a4f6c5bd5c0 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -1157,13 +1157,14 @@ mod tests { let mut file1 = File::create(&path1).unwrap(); let mut file2 = File::create(&path2).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - for i in 0..n { writeln!(&mut file1, "hello {i}").unwrap(); writeln!(&mut file2, "goodbye {i}").unwrap(); } + file1.flush().unwrap(); + file2.flush().unwrap(); + sleep_500_millis().await; }) .await; @@ -1210,12 +1211,11 @@ mod tests { let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - writeln!(&mut file, "line for checkpointing").unwrap(); for _i in 0..n { writeln!(&mut file).unwrap(); } + file.flush().unwrap(); sleep_500_millis().await; }) @@ -1237,23 +1237,24 @@ mod tests { let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at its original length before writing to it - for i in 0..n { writeln!(&mut file, "pretrunc {i}").unwrap(); } + file.flush().unwrap(); sleep_500_millis().await; // The writes must be observed before truncating file.set_len(0).unwrap(); file.seek(std::io::SeekFrom::Start(0)).unwrap(); + file.sync_all().unwrap(); sleep_500_millis().await; // The truncate must be observed before writing again for i in 0..n { writeln!(&mut file, "posttrunc {i}").unwrap(); } + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1299,23 +1300,26 @@ mod tests { let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at its original length before writing to it - for i in 0..n { writeln!(&mut file, "prerot {i}").unwrap(); } + file.flush().unwrap(); sleep_500_millis().await; // The writes must be observed before rotating fs::rename(&path, archive_path).expect("could not rename"); + file.sync_all().unwrap(); + let mut file = File::create(&path).unwrap(); + file.sync_all().unwrap(); sleep_500_millis().await; // The rotation must be observed before writing again for i in 0..n { writeln!(&mut file, "postrot {i}").unwrap(); } + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1367,14 +1371,16 @@ mod tests { let mut file3 = File::create(&path3).unwrap(); let mut file4 = File::create(&path4).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - for i in 0..n { writeln!(&mut file1, "1 {i}").unwrap(); writeln!(&mut file2, "2 {i}").unwrap(); writeln!(&mut file3, "3 {i}").unwrap(); writeln!(&mut file4, "4 {i}").unwrap(); } + file1.flush().unwrap(); + file2.flush().unwrap(); + file3.flush().unwrap(); + file4.flush().unwrap(); sleep_500_millis().await; }) @@ -1415,13 +1421,13 @@ mod tests { let mut file1 = File::create(&path1).unwrap(); let mut file2 = File::create(&path2).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - for i in 0..n { writeln!(&mut file1, "1 {i}").unwrap(); writeln!(&mut file2, "2 {i}").unwrap(); } + file1.flush().unwrap(); + file2.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1466,9 +1472,8 @@ mod tests { let received = run_file_source(&config, true, acks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; - writeln!(&mut file, "hello there").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) @@ -1494,9 +1499,8 @@ mod tests { let received = run_file_source(&config, true, acks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; - writeln!(&mut file, "hello there").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) @@ -1521,10 +1525,9 @@ mod tests { let received = run_file_source(&config, true, acks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; - writeln!(&mut file, "hello there").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1549,19 +1552,16 @@ mod tests { } } - #[cfg(target_os = "linux")] // see #7988 #[tokio::test] async fn file_start_position_server_restart_acknowledged() { file_start_position_server_restart(Acks).await } - #[cfg(target_os = "linux")] // see #7988 #[tokio::test] async fn file_start_position_server_restart_no_acknowledge() { file_start_position_server_restart(NoAcks).await } - #[cfg(target_os = "linux")] // see #7988 async fn file_start_position_server_restart(acking: AckingMode) { let dir = tempdir().unwrap(); let config = file::FileConfig { @@ -1572,13 +1572,14 @@ mod tests { let path = dir.path().join("file"); let mut file = File::create(&path).unwrap(); writeln!(&mut file, "zeroth line").unwrap(); - sleep_500_millis().await; + file.flush().unwrap(); // First time server runs it picks up existing lines. { let received = run_file_source(&config, true, acking, LogNamespace::Legacy, async { sleep_500_millis().await; writeln!(&mut file, "first line").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1591,6 +1592,7 @@ mod tests { let received = run_file_source(&config, true, acking, LogNamespace::Legacy, async { sleep_500_millis().await; writeln!(&mut file, "second line").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1609,6 +1611,7 @@ mod tests { let received = run_file_source(&config, false, acking, LogNamespace::Legacy, async { sleep_500_millis().await; writeln!(&mut file, "third line").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1675,7 +1678,6 @@ mod tests { writeln!(&mut file, "Here's a line for you: {i}").unwrap(); } file.flush().unwrap(); - sleep_500_millis().await; // First time server runs it should pick up a bunch of lines let received = run_file_source( @@ -1731,8 +1733,8 @@ mod tests { { let received = run_file_source(&config, true, acking, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; writeln!(&mut file, "first line").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1747,8 +1749,8 @@ mod tests { { let received = run_file_source(&config, false, acking, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; writeln!(&mut file, "second line").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1773,48 +1775,53 @@ mod tests { ..test_default_file_config(&dir) }; - let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { - let before_path = dir.path().join("before"); - let mut before_file = File::create(&before_path).unwrap(); - let after_path = dir.path().join("after"); - let mut after_file = File::create(&after_path).unwrap(); + let before_path = dir.path().join("before"); + let mut before_file = File::create(&before_path).unwrap(); + let after_path = dir.path().join("after"); + let mut after_file = File::create(&after_path).unwrap(); - writeln!(&mut before_file, "first line").unwrap(); // first few bytes make up unique file fingerprint - writeln!(&mut after_file, "_first line").unwrap(); // and therefore need to be non-identical + writeln!(&mut before_file, "first line").unwrap(); // first few bytes make up unique file fingerprint + writeln!(&mut after_file, "_first line").unwrap(); // and therefore need to be non-identical - { - // Set the modified times - let before = SystemTime::now() - Duration::from_secs(8); - let after = SystemTime::now() - Duration::from_secs(2); - - let before_time = libc::timeval { - tv_sec: before - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() as _, - tv_usec: 0, - }; - let before_times = [before_time, before_time]; - - let after_time = libc::timeval { - tv_sec: after - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() as _, - tv_usec: 0, - }; - let after_times = [after_time, after_time]; + { + // Set the modified times + let before = SystemTime::now() - Duration::from_secs(8); + let after = SystemTime::now() - Duration::from_secs(2); + + let before_time = libc::timeval { + tv_sec: before + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs() as _, + tv_usec: 0, + }; + let before_times = [before_time, before_time]; + + let after_time = libc::timeval { + tv_sec: after + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs() as _, + tv_usec: 0, + }; + let after_times = [after_time, after_time]; - unsafe { - libc::futimes(before_file.as_raw_fd(), before_times.as_ptr()); - libc::futimes(after_file.as_raw_fd(), after_times.as_ptr()); - } + unsafe { + libc::futimes(before_file.as_raw_fd(), before_times.as_ptr()); + libc::futimes(after_file.as_raw_fd(), after_times.as_ptr()); } + } + before_file.sync_all().unwrap(); + after_file.sync_all().unwrap(); + + let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { sleep_500_millis().await; writeln!(&mut before_file, "second line").unwrap(); writeln!(&mut after_file, "_second line").unwrap(); + before_file.flush().unwrap(); + after_file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1850,8 +1857,6 @@ mod tests { let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - writeln!(&mut file, "short").unwrap(); writeln!(&mut file, "this is too long").unwrap(); writeln!(&mut file, "11 eleven11").unwrap(); @@ -1860,11 +1865,13 @@ mod tests { writeln!(&mut file, "exactly 10").unwrap(); writeln!(&mut file, "it can end on a line that's too long").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; sleep_500_millis().await; writeln!(&mut file, "and then continue").unwrap(); writeln!(&mut file, "last short").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; sleep_500_millis().await; @@ -1892,19 +1899,19 @@ mod tests { let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - writeln!(&mut file, "leftover foo").unwrap(); writeln!(&mut file, "INFO hello").unwrap(); writeln!(&mut file, "INFO goodbye").unwrap(); writeln!(&mut file, "part of goodbye").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; writeln!(&mut file, "INFO hi again").unwrap(); writeln!(&mut file, "and some more").unwrap(); writeln!(&mut file, "INFO hello").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; writeln!(&mut file, "too slow").unwrap(); @@ -1912,6 +1919,7 @@ mod tests { writeln!(&mut file, "to be INFO in").unwrap(); writeln!(&mut file, "the middle").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -1951,19 +1959,19 @@ mod tests { let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - writeln!(&mut file, "leftover foo").unwrap(); writeln!(&mut file, "INFO hello").unwrap(); writeln!(&mut file, "INFO goodbye").unwrap(); writeln!(&mut file, "part of goodbye").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; writeln!(&mut file, "INFO hi again").unwrap(); writeln!(&mut file, "and some more").unwrap(); writeln!(&mut file, "INFO hello").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; writeln!(&mut file, "too slow").unwrap(); @@ -1971,6 +1979,7 @@ mod tests { writeln!(&mut file, "to be INFO in").unwrap(); writeln!(&mut file, "the middle").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -2013,6 +2022,8 @@ mod tests { writeln!(&mut file, "INFO hello").unwrap(); writeln!(&mut file, "part of hello").unwrap(); + file.sync_all().unwrap(); + // Read and aggregate existing lines let received = run_file_source( &config, @@ -2032,6 +2043,8 @@ mod tests { let received_after_restart = run_file_source(&config, false, Acks, LogNamespace::Legacy, async { writeln!(&mut file, "INFO goodbye").unwrap(); + file.flush().unwrap(); + sleep_500_millis().await; }) .await; assert_eq!( @@ -2055,20 +2068,18 @@ mod tests { let older_path = dir.path().join("z_older_file"); let mut older = File::create(&older_path).unwrap(); - sleep_500_millis().await; - - let newer_path = dir.path().join("a_newer_file"); - let mut newer = File::create(&newer_path).unwrap(); - writeln!(&mut older, "hello i am the old file").unwrap(); writeln!(&mut older, "i have been around a while").unwrap(); writeln!(&mut older, "you can read newer files at the same time").unwrap(); + older.sync_all().unwrap(); // sync_all is needed due to windows + + let newer_path = dir.path().join("a_newer_file"); + let mut newer = File::create(&newer_path).unwrap(); writeln!(&mut newer, "and i am the new file").unwrap(); writeln!(&mut newer, "this should be interleaved with the old one").unwrap(); writeln!(&mut newer, "which is fine because we want fairness").unwrap(); - - sleep_500_millis().await; + newer.sync_all().unwrap(); // sync_all is needed due to windows let received = run_file_source( &config, @@ -2106,21 +2117,21 @@ mod tests { let older_path = dir.path().join("z_older_file"); let mut older = File::create(&older_path).unwrap(); - - sleep_500_millis().await; + older.sync_all().unwrap(); let newer_path = dir.path().join("a_newer_file"); let mut newer = File::create(&newer_path).unwrap(); + newer.sync_all().unwrap(); writeln!(&mut older, "hello i am the old file").unwrap(); writeln!(&mut older, "i have been around a while").unwrap(); writeln!(&mut older, "you should definitely read all of me first").unwrap(); + older.flush().unwrap(); writeln!(&mut newer, "i'm new").unwrap(); writeln!(&mut newer, "hopefully you read all the old stuff first").unwrap(); writeln!(&mut newer, "because otherwise i'm not going to make sense").unwrap(); - - sleep_500_millis().await; + newer.flush().unwrap(); let received = run_file_source( &config, @@ -2146,8 +2157,6 @@ mod tests { ); } - // Ignoring on mac: https://github.com/vectordotdev/vector/issues/8373 - #[cfg(not(target_os = "macos"))] #[tokio::test] async fn test_split_reads() { let dir = tempdir().unwrap(); @@ -2161,19 +2170,20 @@ mod tests { let mut file = File::create(&path).unwrap(); writeln!(&mut file, "hello i am a normal line").unwrap(); - - sleep_500_millis().await; + file.sync_all().unwrap(); let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { sleep_500_millis().await; write!(&mut file, "i am not a full line").unwrap(); + file.flush().unwrap(); // Longer than the EOF timeout sleep_500_millis().await; writeln!(&mut file, " until now").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -2272,13 +2282,12 @@ mod tests { let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - write!(&mut file, "hello i am a line\r\n").unwrap(); write!(&mut file, "and i am too\r\n").unwrap(); write!(&mut file, "CRLF is how we end\r\n").unwrap(); write!(&mut file, "please treat us well\r\n").unwrap(); + file.flush().unwrap(); sleep_500_millis().await; }) .await; @@ -2312,11 +2321,10 @@ mod tests { let received = run_file_source(&config, false, Acks, LogNamespace::Legacy, async { let mut file = File::create(&path).unwrap(); - sleep_500_millis().await; // The files must be observed at their original lengths before writing to them - for i in 0..n { writeln!(&mut file, "{i}").unwrap(); } + file.flush().unwrap(); drop(file); for _ in 0..10 { From c8cbfbfe624b5d6df38367727d6e67db181e73c5 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 17 Nov 2025 10:41:30 -0500 Subject: [PATCH 094/227] refactor(sources, sinks): add default ExponentialBackoff (#24246) * refactor(sources, sinks): use recommended ExponentialBackoff * Use ExponentialBackoff::default instead of recommended * Make default const * Remove const * Fix unused imports * Revert max_delay change in aws_s3 sqs backoff * Add import --- src/common/backoff.rs | 23 +++++++++++++++-------- src/common/websocket.rs | 9 ++------- src/sinks/util/service/health.rs | 2 +- src/sinks/util/service/net/mod.rs | 5 +---- src/sinks/util/tcp.rs | 7 ++----- src/sinks/util/udp.rs | 7 ++----- src/sinks/util/unix.rs | 7 ++----- src/sources/aws_s3/sqs.rs | 4 +--- 8 files changed, 26 insertions(+), 38 deletions(-) diff --git a/src/common/backoff.rs b/src/common/backoff.rs index 94ed2ff7e4936..ae983b778b30a 100644 --- a/src/common/backoff.rs +++ b/src/common/backoff.rs @@ -15,6 +15,15 @@ pub(crate) struct ExponentialBackoff { max_delay: Option, } +impl Default for ExponentialBackoff { + /// `ExponentialBackoff` instance with sensible default values + fn default() -> Self { + Self::from_millis(2) + .factor(250) + .max_delay(Duration::from_secs(60)) + } +} + impl ExponentialBackoff { /// Constructs a new exponential back-off strategy, /// given a base duration in milliseconds. @@ -86,9 +95,7 @@ mod tests { #[test] fn test_exponential_backoff_sequence() { - let mut backoff = ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(30)); + let mut backoff = ExponentialBackoff::default(); let expected_delays = [ Duration::from_millis(500), // 2 * 250 @@ -97,8 +104,9 @@ mod tests { Duration::from_secs(4), // 16 * 250 Duration::from_secs(8), // 32 * 250 Duration::from_secs(16), // 64 * 250 - Duration::from_secs(30), // 128 * 250 = 32s, capped at 30s - Duration::from_secs(30), // Should stay capped + Duration::from_secs(32), // 128 * 250 + Duration::from_secs(60), // 256 * 250 = 64s, capped at 60 + Duration::from_secs(60), // Should stay capped ]; for expected in expected_delays.iter() { @@ -109,9 +117,8 @@ mod tests { #[test] fn test_backoff_reset() { - let mut backoff = ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(30)); + let mut backoff = ExponentialBackoff::default(); + for _ in 0..2 { backoff.next(); } diff --git a/src/common/websocket.rs b/src/common/websocket.rs index 897f4fbe845e2..7abb8c589cec6 100644 --- a/src/common/websocket.rs +++ b/src/common/websocket.rs @@ -84,12 +84,6 @@ impl WebSocketConnector { Ok((host, port)) } - const fn fresh_backoff() -> ExponentialBackoff { - ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(60)) - } - async fn tls_connect(&self) -> Result, WebSocketError> { let ip = dns::Resolver .lookup_ip(self.host.clone()) @@ -126,7 +120,8 @@ impl WebSocketConnector { } pub(crate) async fn connect_backoff(&self) -> WebSocketStream> { - let mut backoff = Self::fresh_backoff(); + let mut backoff = ExponentialBackoff::default(); + loop { match self.connect().await { Ok(ws_stream) => { diff --git a/src/sinks/util/service/health.rs b/src/sinks/util/service/health.rs index 724370b3d8990..b764ebcad64b5 100644 --- a/src/sinks/util/service/health.rs +++ b/src/sinks/util/service/health.rs @@ -76,7 +76,7 @@ impl HealthConfig { open, // An exponential backoff starting from retry_initial_backoff_sec and doubling every time // up to retry_max_duration_secs. - backoff: ExponentialBackoff::from_millis(2) + backoff: ExponentialBackoff::default() .factor((self.retry_initial_backoff_secs.saturating_mul(1000) / 2).max(1)) .max_delay(self.retry_max_duration_secs), } diff --git a/src/sinks/util/service/net/mod.rs b/src/sinks/util/service/net/mod.rs index 5e94aa3196dd9..8af2e3aa29a58 100644 --- a/src/sinks/util/service/net/mod.rs +++ b/src/sinks/util/service/net/mod.rs @@ -8,7 +8,6 @@ use std::{ io, net::SocketAddr, task::{Context, Poll, ready}, - time::Duration, }; use futures_util::{FutureExt, future::BoxFuture}; @@ -242,9 +241,7 @@ impl NetworkConnector { async fn connect_backoff(&self) -> NetworkConnection { // TODO: Make this configurable. - let mut backoff = ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(60)); + let mut backoff = ExponentialBackoff::default(); loop { match self.connect().await { diff --git a/src/sinks/util/tcp.rs b/src/sinks/util/tcp.rs index 1afa8a28479a8..3f4051a18c5f1 100644 --- a/src/sinks/util/tcp.rs +++ b/src/sinks/util/tcp.rs @@ -3,7 +3,6 @@ use std::{ net::SocketAddr, pin::Pin, task::{Context, Poll}, - time::Duration, }; use async_trait::async_trait; @@ -157,11 +156,9 @@ impl TcpConnector { Self::new(host, port, None, None.into(), None) } - const fn fresh_backoff() -> ExponentialBackoff { + fn fresh_backoff() -> ExponentialBackoff { // TODO: make configurable - ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(60)) + ExponentialBackoff::default() } async fn connect(&self) -> Result, TcpError> { diff --git a/src/sinks/util/udp.rs b/src/sinks/util/udp.rs index 253b78e13c505..0b9441e682cde 100644 --- a/src/sinks/util/udp.rs +++ b/src/sinks/util/udp.rs @@ -1,7 +1,6 @@ use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, pin::Pin, - time::Duration, }; use async_trait::async_trait; @@ -112,11 +111,9 @@ impl UdpConnector { } } - const fn fresh_backoff() -> ExponentialBackoff { + fn fresh_backoff() -> ExponentialBackoff { // TODO: make configurable - ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(60)) + ExponentialBackoff::default() } async fn connect(&self) -> Result { diff --git a/src/sinks/util/unix.rs b/src/sinks/util/unix.rs index 887db46b76b5e..cb9dadb934b3d 100644 --- a/src/sinks/util/unix.rs +++ b/src/sinks/util/unix.rs @@ -3,7 +3,6 @@ use std::{ os::fd::{AsFd, BorrowedFd}, path::PathBuf, pin::Pin, - time::Duration, }; use async_trait::async_trait; @@ -124,11 +123,9 @@ impl UnixConnector { Self { path, mode } } - const fn fresh_backoff() -> ExponentialBackoff { + fn fresh_backoff() -> ExponentialBackoff { // TODO: make configurable - ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(60)) + ExponentialBackoff::default() } async fn connect(&self) -> Result { diff --git a/src/sources/aws_s3/sqs.rs b/src/sources/aws_s3/sqs.rs index a7d972bef167a..ce7dceafbc5e1 100644 --- a/src/sources/aws_s3/sqs.rs +++ b/src/sources/aws_s3/sqs.rs @@ -402,9 +402,7 @@ impl IngestorProcess { log_namespace, bytes_received: register!(BytesReceived::from(Protocol::HTTP)), events_received: register!(EventsReceived), - backoff: ExponentialBackoff::from_millis(2) - .factor(250) - .max_delay(Duration::from_secs(30)), + backoff: ExponentialBackoff::default().max_delay(Duration::from_secs(30)), } } From 9c3e7ee88805609492238e4994fb621df90244e1 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Mon, 17 Nov 2025 10:44:02 -0600 Subject: [PATCH 095/227] enhancement(datadog_agent source): Add request timeout support (#24245) * Add error type for source sender timeout error * Add common internal event for timed out events * Add timeout support to `SourceSender` * Add timeout support to `trait SourceConfig` * Add timeout support to `datadog_agent` source * Rewrite ComponentEventsTimedOut with `registered_event!` * Expanded docs * Fix `SourceSender` not handling timeout on batch sends * Fix type of timeout config parameter * Update docs * Rename `timedout` to `timed_out` * Update changelog * Fix changelog --- .../datadog-agent-timeout.enhancement.md | 7 + .../component_events_timed_out.rs | 23 +++ lib/vector-common/src/internal_event/mod.rs | 2 + lib/vector-core/src/source_sender/builder.rs | 12 +- lib/vector-core/src/source_sender/errors.rs | 59 ++----- lib/vector-core/src/source_sender/mod.rs | 2 +- lib/vector-core/src/source_sender/output.rs | 83 +++++++--- lib/vector-core/src/source_sender/sender.rs | 36 +++-- lib/vector-core/src/source_sender/tests.rs | 86 ++++++++-- src/config/source.rs | 8 +- src/sources/aws_kinesis_firehose/errors.rs | 11 +- src/sources/aws_kinesis_firehose/handlers.rs | 18 ++- src/sources/aws_s3/sqs.rs | 8 +- src/sources/datadog_agent/mod.rs | 56 +++++-- src/sources/datadog_agent/tests.rs | 149 ++++++++++++++++-- src/sources/kafka.rs | 2 +- src/sources/socket/mod.rs | 2 +- src/sources/splunk_hec/mod.rs | 16 +- src/sources/statsd/mod.rs | 6 +- src/test_util/mock/mod.rs | 12 +- src/topology/builder.rs | 11 +- .../components/sources/datadog_agent.cue | 24 +++ .../sources/generated/datadog_agent.cue | 15 ++ .../components/sources/internal_metrics.cue | 12 ++ 24 files changed, 500 insertions(+), 160 deletions(-) create mode 100644 changelog.d/datadog-agent-timeout.enhancement.md create mode 100644 lib/vector-common/src/internal_event/component_events_timed_out.rs diff --git a/changelog.d/datadog-agent-timeout.enhancement.md b/changelog.d/datadog-agent-timeout.enhancement.md new file mode 100644 index 0000000000000..a93475c1840f9 --- /dev/null +++ b/changelog.d/datadog-agent-timeout.enhancement.md @@ -0,0 +1,7 @@ +Added support for configurable request timeouts to the `datadog_agent` source. + + This change also introduces two new internal metrics: + - `component_timed_out_events_total` - Counter tracking the number of events that timed out + - `component_timed_out_requests_total` - Counter tracking the number of requests that timed out + +authors: bruceg diff --git a/lib/vector-common/src/internal_event/component_events_timed_out.rs b/lib/vector-common/src/internal_event/component_events_timed_out.rs new file mode 100644 index 0000000000000..bf138dd1481c7 --- /dev/null +++ b/lib/vector-common/src/internal_event/component_events_timed_out.rs @@ -0,0 +1,23 @@ +use metrics::{Counter, counter}; + +use super::Count; + +crate::registered_event! { + ComponentEventsTimedOut { + reason: &'static str, + } => { + timed_out_events: Counter = counter!("component_timed_out_events_total"), + timed_out_requests: Counter = counter!("component_timed_out_requests_total"), + reason: &'static str = self.reason, + } + + fn emit(&self, data: Count) { + warn!( + message = "Events timed out", + events = data.0, + reason = self.reason, + ); + self.timed_out_events.increment(data.0 as u64); + self.timed_out_requests.increment(1); + } +} diff --git a/lib/vector-common/src/internal_event/mod.rs b/lib/vector-common/src/internal_event/mod.rs index 83c06260c0d5e..d1c7e47913caf 100644 --- a/lib/vector-common/src/internal_event/mod.rs +++ b/lib/vector-common/src/internal_event/mod.rs @@ -2,6 +2,7 @@ mod bytes_received; mod bytes_sent; pub mod cached_event; pub mod component_events_dropped; +pub mod component_events_timed_out; mod events_received; mod events_sent; mod optional_tag; @@ -15,6 +16,7 @@ pub use bytes_sent::BytesSent; #[allow(clippy::module_name_repetitions)] pub use cached_event::{RegisterTaggedInternalEvent, RegisteredEventCache}; pub use component_events_dropped::{ComponentEventsDropped, INTENTIONAL, UNINTENTIONAL}; +pub use component_events_timed_out::ComponentEventsTimedOut; pub use events_received::{EventsReceived, EventsReceivedHandle}; pub use events_sent::{DEFAULT_OUTPUT, EventsSent, TaggedEventsSent}; pub use metrics::SharedString; diff --git a/lib/vector-core/src/source_sender/builder.rs b/lib/vector-core/src/source_sender/builder.rs index e06d1fa87012b..452d890f0d863 100644 --- a/lib/vector-core/src/source_sender/builder.rs +++ b/lib/vector-core/src/source_sender/builder.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::{collections::HashMap, time::Duration}; use metrics::{Histogram, histogram}; use vector_buffers::topology::channel::LimitedReceiver; @@ -12,6 +12,7 @@ pub struct Builder { default_output: Option, named_outputs: HashMap, lag_time: Option, + timeout: Option, } impl Default for Builder { @@ -21,6 +22,7 @@ impl Default for Builder { default_output: None, named_outputs: Default::default(), lag_time: Some(histogram!(LAG_TIME_NAME)), + timeout: None, } } } @@ -32,6 +34,12 @@ impl Builder { self } + #[must_use] + pub fn with_timeout(mut self, timeout: Option) -> Self { + self.timeout = timeout; + self + } + pub fn add_source_output( &mut self, output: SourceOutput, @@ -51,6 +59,7 @@ impl Builder { lag_time, log_definition, output_id, + self.timeout, ); self.default_output = Some(output); rx @@ -62,6 +71,7 @@ impl Builder { lag_time, log_definition, output_id, + self.timeout, ); self.named_outputs.insert(name, output); rx diff --git a/lib/vector-core/src/source_sender/errors.rs b/lib/vector-core/src/source_sender/errors.rs index b0d9052c8747a..57f7be8752d64 100644 --- a/lib/vector-core/src/source_sender/errors.rs +++ b/lib/vector-core/src/source_sender/errors.rs @@ -1,61 +1,26 @@ use std::fmt; -use tokio::sync::mpsc; -use vector_buffers::topology::channel::SendError; +use vector_buffers::topology::channel; -use crate::event::{Event, EventArray}; - -#[derive(Clone, Debug)] -pub struct ClosedError; - -impl fmt::Display for ClosedError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("Sender is closed.") - } -} - -impl std::error::Error for ClosedError {} - -impl From> for ClosedError { - fn from(_: mpsc::error::SendError) -> Self { - Self - } +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum SendError { + Timeout, + Closed, } -impl From> for ClosedError { - fn from(_: mpsc::error::SendError) -> Self { - Self +impl From> for SendError { + fn from(_: channel::SendError) -> Self { + Self::Closed } } -impl From> for ClosedError { - fn from(_: SendError) -> Self { - Self - } -} - -#[derive(Debug)] -pub enum StreamSendError { - Closed(ClosedError), - Stream(E), -} - -impl fmt::Display for StreamSendError -where - E: fmt::Display, -{ +impl fmt::Display for SendError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - StreamSendError::Closed(e) => e.fmt(f), - StreamSendError::Stream(e) => e.fmt(f), + Self::Timeout => f.write_str("Send timed out."), + Self::Closed => f.write_str("Sender is closed."), } } } -impl std::error::Error for StreamSendError where E: std::error::Error {} - -impl From for StreamSendError { - fn from(e: ClosedError) -> Self { - StreamSendError::Closed(e) - } -} +impl std::error::Error for SendError {} diff --git a/lib/vector-core/src/source_sender/mod.rs b/lib/vector-core/src/source_sender/mod.rs index b6615c106b608..c8af2db8bbf87 100644 --- a/lib/vector-core/src/source_sender/mod.rs +++ b/lib/vector-core/src/source_sender/mod.rs @@ -13,7 +13,7 @@ mod sender; mod tests; pub use builder::Builder; -pub use errors::{ClosedError, StreamSendError}; +pub use errors::SendError; use output::Output; pub use sender::{SourceSender, SourceSenderItem}; diff --git a/lib/vector-core/src/source_sender/output.rs b/lib/vector-core/src/source_sender/output.rs index 20a122d7f485f..88556eae08527 100644 --- a/lib/vector-core/src/source_sender/output.rs +++ b/lib/vector-core/src/source_sender/output.rs @@ -1,4 +1,9 @@ -use std::{fmt, num::NonZeroUsize, sync::Arc, time::Instant}; +use std::{ + fmt, + num::NonZeroUsize, + sync::Arc, + time::{Duration, Instant}, +}; use chrono::Utc; use futures::{Stream, StreamExt as _}; @@ -11,13 +16,13 @@ use vector_buffers::{ use vector_common::{ byte_size_of::ByteSizeOf, internal_event::{ - self, ComponentEventsDropped, CountByteSize, EventsSent, InternalEventHandle as _, - Registered, UNINTENTIONAL, + self, ComponentEventsDropped, ComponentEventsTimedOut, Count, CountByteSize, EventsSent, + InternalEventHandle as _, RegisterInternalEvent as _, Registered, UNINTENTIONAL, }, }; use vrl::value::Value; -use super::{CHUNK_SIZE, ClosedError, SourceSenderItem}; +use super::{CHUNK_SIZE, SendError, SourceSenderItem}; use crate::{ EstimatedJsonEncodedSizeOf, config::{OutputId, log_schema}, @@ -52,6 +57,15 @@ impl UnsentEventCount { const fn discard(&mut self) { self.count = 0; } + + fn timed_out(&mut self) { + ComponentEventsTimedOut { + reason: "Source send timed out.", + } + .register() + .emit(Count(self.count)); + self.count = 0; + } } impl Drop for UnsentEventCount { @@ -76,6 +90,7 @@ pub(super) struct Output { /// The OutputId related to this source sender. This is set as the `upstream_id` in /// `EventMetadata` for all event sent through here. id: Arc, + timeout: Option, } #[expect(clippy::missing_fields_in_debug)] @@ -84,6 +99,7 @@ impl fmt::Debug for Output { fmt.debug_struct("Output") .field("sender", &self.sender) .field("output_id", &self.id) + .field("timeout", &self.timeout) // `metrics::Histogram` is missing `impl Debug` .finish() } @@ -96,6 +112,7 @@ impl Output { lag_time: Option, log_definition: Option>, output_id: OutputId, + timeout: Option, ) -> (Self, LimitedReceiver) { let (tx, rx) = channel::limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(n).unwrap())); ( @@ -107,6 +124,7 @@ impl Output { ))), log_definition, id: Arc::new(output_id), + timeout, }, rx, ) @@ -116,7 +134,7 @@ impl Output { &mut self, mut events: EventArray, unsent_event_count: &mut UnsentEventCount, - ) -> Result<(), ClosedError> { + ) -> Result<(), SendError> { let send_reference = Instant::now(); let reference = Utc::now().timestamp_millis(); events @@ -133,31 +151,51 @@ impl Output { let byte_size = events.estimated_json_encoded_size_of(); let count = events.len(); - self.sender - .send(SourceSenderItem { - events, - send_reference, - }) - .await - .map_err(|_| ClosedError)?; + self.send_with_timeout(events, send_reference).await?; self.events_sent.emit(CountByteSize(count, byte_size)); unsent_event_count.decr(count); Ok(()) } + async fn send_with_timeout( + &mut self, + events: EventArray, + send_reference: Instant, + ) -> Result<(), SendError> { + let item = SourceSenderItem { + events, + send_reference, + }; + if let Some(timeout) = self.timeout { + match tokio::time::timeout(timeout, self.sender.send(item)).await { + Ok(Ok(())) => Ok(()), + Ok(Err(error)) => Err(error.into()), + Err(_elapsed) => Err(SendError::Timeout), + } + } else { + self.sender.send(item).await.map_err(Into::into) + } + } + pub(super) async fn send_event( &mut self, event: impl Into, - ) -> Result<(), ClosedError> { + ) -> Result<(), SendError> { let event: EventArray = event.into(); // It's possible that the caller stops polling this future while it is blocked waiting // on `self.send()`. When that happens, we use `UnsentEventCount` to correctly emit // `ComponentEventsDropped` events. let mut unsent_event_count = UnsentEventCount::new(event.len()); - self.send(event, &mut unsent_event_count).await + self.send(event, &mut unsent_event_count) + .await + .inspect_err(|error| { + if let SendError::Timeout = error { + unsent_event_count.timed_out(); + } + }) } - pub(super) async fn send_event_stream(&mut self, events: S) -> Result<(), ClosedError> + pub(super) async fn send_event_stream(&mut self, events: S) -> Result<(), SendError> where S: Stream + Unpin, E: Into + ByteSizeOf, @@ -169,7 +207,7 @@ impl Output { Ok(()) } - pub(super) async fn send_batch(&mut self, events: I) -> Result<(), ClosedError> + pub(super) async fn send_batch(&mut self, events: I) -> Result<(), SendError> where E: Into + ByteSizeOf, I: IntoIterator, @@ -183,10 +221,15 @@ impl Output { for events in array::events_into_arrays(events, Some(CHUNK_SIZE)) { self.send(events, &mut unsent_event_count) .await - .inspect_err(|_| { - // The unsent event count is discarded here because the callee emits the - // `StreamClosedError`. - unsent_event_count.discard(); + .inspect_err(|error| match error { + SendError::Timeout => { + unsent_event_count.timed_out(); + } + SendError::Closed => { + // The unsent event count is discarded here because the callee emits the + // `StreamClosedError`. + unsent_event_count.discard(); + } })?; } Ok(()) diff --git a/lib/vector-core/src/source_sender/sender.rs b/lib/vector-core/src/source_sender/sender.rs index 88d6f13f97b45..8bbf09404e755 100644 --- a/lib/vector-core/src/source_sender/sender.rs +++ b/lib/vector-core/src/source_sender/sender.rs @@ -1,3 +1,5 @@ +#[cfg(any(test, feature = "test"))] +use std::time::Duration; use std::{collections::HashMap, time::Instant}; use futures::Stream; @@ -18,7 +20,7 @@ use vector_common::{ json_size::JsonSize, }; -use super::{Builder, ClosedError, Output}; +use super::{Builder, Output, SendError}; #[cfg(any(test, feature = "test"))] use super::{LAG_TIME_NAME, TEST_BUFFER_SIZE}; use crate::{ @@ -101,14 +103,23 @@ impl SourceSender { } #[cfg(any(test, feature = "test"))] - pub fn new_test_sender_with_buffer(n: usize) -> (Self, LimitedReceiver) { + pub fn new_test_sender_with_options( + n: usize, + timeout: Option, + ) -> (Self, LimitedReceiver) { let lag_time = Some(histogram!(LAG_TIME_NAME)); let output_id = OutputId { component: "test".to_string().into(), port: None, }; - let (default_output, rx) = - Output::new_with_buffer(n, DEFAULT_OUTPUT.to_owned(), lag_time, None, output_id); + let (default_output, rx) = Output::new_with_buffer( + n, + DEFAULT_OUTPUT.to_owned(), + lag_time, + None, + output_id, + timeout, + ); ( Self { default_output: Some(default_output), @@ -120,14 +131,14 @@ impl SourceSender { #[cfg(any(test, feature = "test"))] pub fn new_test() -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); + let (pipe, recv) = Self::new_test_sender_with_options(TEST_BUFFER_SIZE, None); let recv = recv.into_stream().flat_map(into_event_stream); (pipe, recv) } #[cfg(any(test, feature = "test"))] pub fn new_test_finalize(status: EventStatus) -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); + let (pipe, recv) = Self::new_test_sender_with_options(TEST_BUFFER_SIZE, None); // In a source test pipeline, there is no sink to acknowledge // events, so we have to add a map to the receiver to handle the // finalization. @@ -146,7 +157,7 @@ impl SourceSender { pub fn new_test_errors( error_at: impl Fn(usize) -> bool, ) -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); + let (pipe, recv) = Self::new_test_sender_with_options(TEST_BUFFER_SIZE, None); // In a source test pipeline, there is no sink to acknowledge // events, so we have to add a map to the receiver to handle the // finalization. @@ -180,7 +191,8 @@ impl SourceSender { component: "test".to_string().into(), port: Some(name.clone()), }; - let (output, recv) = Output::new_with_buffer(100, name.clone(), None, None, output_id); + let (output, recv) = + Output::new_with_buffer(100, name.clone(), None, None, output_id, None); let recv = recv.into_stream().map(move |mut item| { item.events.iter_events_mut().for_each(|mut event| { let metadata = event.metadata_mut(); @@ -201,14 +213,14 @@ impl SourceSender { /// Send an event to the default output. /// /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_event(&mut self, event: impl Into) -> Result<(), ClosedError> { + pub async fn send_event(&mut self, event: impl Into) -> Result<(), SendError> { self.default_output_mut().send_event(event).await } /// Send a stream of events to the default output. /// /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_event_stream(&mut self, events: S) -> Result<(), ClosedError> + pub async fn send_event_stream(&mut self, events: S) -> Result<(), SendError> where S: Stream + Unpin, E: Into + ByteSizeOf, @@ -219,7 +231,7 @@ impl SourceSender { /// Send a batch of events to the default output. /// /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_batch(&mut self, events: I) -> Result<(), ClosedError> + pub async fn send_batch(&mut self, events: I) -> Result<(), SendError> where E: Into + ByteSizeOf, I: IntoIterator, @@ -231,7 +243,7 @@ impl SourceSender { /// Send a batch of events event to a named output. /// /// This internally handles emitting [EventsSent] and [ComponentEventsDropped] events. - pub async fn send_batch_named(&mut self, name: &str, events: I) -> Result<(), ClosedError> + pub async fn send_batch_named(&mut self, name: &str, events: I) -> Result<(), SendError> where E: Into + ByteSizeOf, I: IntoIterator, diff --git a/lib/vector-core/src/source_sender/tests.rs b/lib/vector-core/src/source_sender/tests.rs index a659c196a3a3e..113fe4f7e7f70 100644 --- a/lib/vector-core/src/source_sender/tests.rs +++ b/lib/vector-core/src/source_sender/tests.rs @@ -1,5 +1,6 @@ use chrono::{DateTime, Duration, Utc}; use rand::{Rng, rng}; +use std::time::{Duration as StdDuration, Instant}; use tokio::time::timeout; use vrl::event_path; @@ -97,7 +98,7 @@ async fn emit_and_test(make_event: impl FnOnce(DateTime) -> Event) { #[tokio::test] async fn emits_component_discarded_events_total_for_send_event() { metrics::init_test(); - let (mut sender, _recv) = SourceSender::new_test_sender_with_buffer(1); + let (mut sender, _recv) = SourceSender::new_test_sender_with_options(1, None); let event = Event::Metric(Metric::new( "name", @@ -138,7 +139,7 @@ async fn emits_component_discarded_events_total_for_send_event() { #[expect(clippy::cast_precision_loss)] async fn emits_component_discarded_events_total_for_send_batch() { metrics::init_test(); - let (mut sender, _recv) = SourceSender::new_test_sender_with_buffer(1); + let (mut sender, _recv) = SourceSender::new_test_sender_with_options(1, None); let expected_drop = 100; let events: Vec = (0..(CHUNK_SIZE + expected_drop)) @@ -159,18 +160,81 @@ async fn emits_component_discarded_events_total_for_send_batch() { .await; assert!(res.is_err(), "Send should have timed out."); - let component_discarded_events_total = Controller::get() + let metrics = get_component_metrics(); + assert_no_metric(&metrics, "component_timed_out_events_total"); + assert_no_metric(&metrics, "component_timed_out_requests_total"); + assert_counter_metric( + &metrics, + "component_discarded_events_total", + expected_drop as f64, + ); +} + +#[tokio::test] +async fn times_out_send_event_with_timeout() { + metrics::init_test(); + + let timeout_duration = StdDuration::from_millis(10); + let (mut sender, _recv) = SourceSender::new_test_sender_with_options(1, Some(timeout_duration)); + + let event = Event::Metric(Metric::new( + "name", + MetricKind::Absolute, + MetricValue::Gauge { value: 123.4 }, + )); + + sender + .send_event(event.clone()) + .await + .expect("First send should succeed"); + + let start = Instant::now(); + let result = sender.send_event(event).await; + let elapsed = start.elapsed(); + + assert!( + matches!(result, Err(SendError::Timeout)), + "Send should return a timeout error." + ); + assert!( + elapsed >= timeout_duration, + "Send did not wait for the configured timeout" + ); + assert!(elapsed <= timeout_duration * 2, "Send waited too long"); + + let metrics = get_component_metrics(); + assert_no_metric(&metrics, "component_discarded_events_total"); + assert_counter_metric(&metrics, "component_timed_out_events_total", 1.0); + assert_counter_metric(&metrics, "component_timed_out_requests_total", 1.0); +} + +fn get_component_metrics() -> Vec { + Controller::get() .expect("There must be a controller") .capture_metrics() .into_iter() - .filter(|metric| metric.name() == "component_discarded_events_total") - .collect::>(); - assert_eq!(component_discarded_events_total.len(), 1); + .filter(|metric| metric.name().starts_with("component_")) + .collect() +} - let component_discarded_events_total = &component_discarded_events_total[0]; - let MetricValue::Counter { value } = component_discarded_events_total.value() else { - panic!("component_discarded_events_total has invalid type") - }; +fn assert_no_metric(metrics: &[Metric], name: &str) { + assert!( + !metrics.iter().any(|metric| metric.name() == name), + "Metric {name} should not be present" + ); +} - assert_eq!(*value, expected_drop as f64,); +fn assert_counter_metric(metrics: &[Metric], name: &str, expected: f64) { + let mut filter = metrics.iter().filter(|metric| metric.name() == name); + let Some(metric) = filter.next() else { + panic!("Metric {name} should be present"); + }; + let MetricValue::Counter { value } = metric.value() else { + panic!("Metric {name} should be a counter"); + }; + assert_eq!(*value, expected); + assert!( + filter.next().is_none(), + "Only one {name} metric should be present" + ); } diff --git a/src/config/source.rs b/src/config/source.rs index 25fd6ab72915b..54e3c409f1e1f 100644 --- a/src/config/source.rs +++ b/src/config/source.rs @@ -1,4 +1,4 @@ -use std::{cell::RefCell, collections::HashMap}; +use std::{cell::RefCell, collections::HashMap, time::Duration}; use async_trait::async_trait; use dyn_clone::DynClone; @@ -120,6 +120,12 @@ pub trait SourceConfig: DynClone + NamedComponent + core::fmt::Debug + Send + Sy /// well as emit contextual warnings when end-to-end acknowledgements are enabled, but the /// topology as configured does not actually support the use of end-to-end acknowledgements. fn can_acknowledge(&self) -> bool; + + /// If this source supports timeout returns from the `SourceSender` and the configuration + /// provides a timeout value, return it here and the `out` channel will be configured with it. + fn send_timeout(&self) -> Option { + None + } } dyn_clone::clone_trait_object!(SourceConfig); diff --git a/src/sources/aws_kinesis_firehose/errors.rs b/src/sources/aws_kinesis_firehose/errors.rs index baf8463e5528b..24c56d5a797de 100644 --- a/src/sources/aws_kinesis_firehose/errors.rs +++ b/src/sources/aws_kinesis_firehose/errors.rs @@ -35,15 +35,8 @@ pub enum RequestError { source: std::io::Error, request_id: String, }, - #[snafu(display( - "Could not forward events for request {}, downstream is closed: {}", - request_id, - source - ))] - ShuttingDown { - source: vector_lib::source_sender::ClosedError, - request_id: String, - }, + #[snafu(display("Could not forward events for request {request_id}, downstream is closed"))] + ShuttingDown { request_id: String }, #[snafu(display("Unsupported encoding: {}", encoding))] UnsupportedEncoding { encoding: String, diff --git a/src/sources/aws_kinesis_firehose/handlers.rs b/src/sources/aws_kinesis_firehose/handlers.rs index 551f96f3cd4c7..b9eefc857f3c3 100644 --- a/src/sources/aws_kinesis_firehose/handlers.rs +++ b/src/sources/aws_kinesis_firehose/handlers.rs @@ -18,6 +18,7 @@ use vector_lib::{ ByteSize, BytesReceived, CountByteSize, InternalEventHandle as _, Registered, }, lookup::{PathPrefix, metadata_path, path}, + source_sender::SendError, }; use vrl::compiler::SecretTarget; use warp::reject; @@ -143,13 +144,16 @@ pub(super) async fn firehose( } let count = events.len(); - if let Err(error) = context.out.send_batch(events).await { - emit!(StreamClosedError { count }); - let error = RequestError::ShuttingDown { - request_id: request_id.clone(), - source: error, - }; - warp::reject::custom(error); + match context.out.send_batch(events).await { + Ok(()) => (), + Err(SendError::Closed) => { + emit!(StreamClosedError { count }); + let error = RequestError::ShuttingDown { + request_id: request_id.clone(), + }; + warp::reject::custom(error); + } + Err(SendError::Timeout) => unreachable!("No timeout is configured here"), } drop(batch); diff --git a/src/sources/aws_s3/sqs.rs b/src/sources/aws_s3/sqs.rs index ce7dceafbc5e1..ed7559226d6a1 100644 --- a/src/sources/aws_s3/sqs.rs +++ b/src/sources/aws_s3/sqs.rs @@ -38,6 +38,7 @@ use vector_lib::{ ByteSize, BytesReceived, CountByteSize, InternalEventHandle as _, Protocol, Registered, }, lookup::{PathPrefix, metadata_path, path}, + source_sender::SendError, }; use crate::{ @@ -232,7 +233,7 @@ pub enum ProcessingError { }, #[snafu(display("Failed to flush all of s3://{}/{}: {}", bucket, key, source))] PipelineSend { - source: vector_lib::source_sender::ClosedError, + source: vector_lib::source_sender::SendError, bucket: String, key: String, }, @@ -781,11 +782,12 @@ impl IngestorProcess { let send_error = match self.out.send_event_stream(&mut stream).await { Ok(_) => None, - Err(_) => { + Err(SendError::Closed) => { let (count, _) = stream.size_hint(); emit!(StreamClosedError { count }); - Some(vector_lib::source_sender::ClosedError) + Some(SendError::Closed) } + Err(SendError::Timeout) => unreachable!("No timeout is configured here"), }; // Up above, `lines` captures `read_error`, and eventually is captured by `stream`, diff --git a/src/sources/datadog_agent/mod.rs b/src/sources/datadog_agent/mod.rs index 0ee1ee8bd5ac5..ac84188df7c78 100644 --- a/src/sources/datadog_agent/mod.rs +++ b/src/sources/datadog_agent/mod.rs @@ -27,6 +27,7 @@ use http::StatusCode; use hyper::{Server, service::make_service_fn}; use regex::Regex; use serde::{Deserialize, Serialize}; +use serde_with::serde_as; use snafu::Snafu; use tokio::net::TcpStream; use tower::ServiceBuilder; @@ -39,6 +40,7 @@ use vector_lib::{ internal_event::{EventsReceived, Registered}, lookup::owned_value_path, schema::meaning, + source_sender::SendError, tls::MaybeTlsIncomingStream, }; use vrl::{ @@ -73,6 +75,7 @@ pub const TRACES: &str = "traces"; "datadog_agent", "Receive logs, metrics, and traces collected by a Datadog Agent." ))] +#[serde_as] #[derive(Clone, Debug)] pub struct DatadogAgentConfig { /// The socket address to accept connections on. @@ -150,6 +153,18 @@ pub struct DatadogAgentConfig { #[configurable(derived)] #[serde(default)] keepalive: KeepaliveConfig, + + /// The timeout before responding to requests with a HTTP 503 Service Unavailable error. + /// + /// If not set, responses to completed requests will block indefinitely until connected + /// transforms or sinks are ready to receive the events. When this happens, the sending Datadog + /// Agent will eventually time out the request and drop the connection, resulting Vector + /// generating an "Events dropped." error and incrementing the `component_discarded_events_total` + /// internal metric. By setting this option to a value less than the Agent's timeout, Vector + /// will instead respond to the Agent with a HTTP 503 Service Unavailable error, emit a warning, + /// and increment the `component_timed_out_events_total` internal metric instead. + #[serde_as(as = "Option>")] + send_timeout_secs: Option, } impl GenerateConfig for DatadogAgentConfig { @@ -169,6 +184,7 @@ impl GenerateConfig for DatadogAgentConfig { split_metric_namespace: true, log_namespace: Some(false), keepalive: KeepaliveConfig::default(), + send_timeout_secs: None, }) .unwrap() } @@ -201,8 +217,12 @@ impl SourceConfig for DatadogAgentConfig { self.split_metric_namespace, ); let listener = tls.bind(&self.address).await?; - let acknowledgements = cx.do_acknowledgements(self.acknowledgements); - let filters = source.build_warp_filters(cx.out, acknowledgements, self)?; + let handler = RequestHandler { + acknowledgements: cx.do_acknowledgements(self.acknowledgements), + multiple_outputs: self.multiple_outputs, + out: cx.out, + }; + let filters = source.build_warp_filters(handler, self)?; let shutdown = cx.shutdown; let keepalive_settings = self.keepalive.clone(); @@ -329,6 +349,10 @@ impl SourceConfig for DatadogAgentConfig { fn can_acknowledge(&self) -> bool { true } + + fn send_timeout(&self) -> Option { + self.send_timeout_secs.map(Duration::from_secs_f64) + } } #[derive(Clone, Copy, Debug, Snafu)] @@ -421,15 +445,9 @@ impl DatadogAgentSource { fn build_warp_filters( &self, - out: SourceSender, - acknowledgements: bool, + handler: RequestHandler, config: &DatadogAgentConfig, ) -> crate::Result> { - let handler = RequestHandler { - acknowledgements, - multiple_outputs: config.multiple_outputs, - out, - }; let mut filters = (!config.disable_logs).then(|| logs::build_warp_filter(handler.clone(), self.clone())); @@ -526,15 +544,25 @@ impl RequestHandler { let count = events.len(); let output = self.multiple_outputs.then_some(output); - if let Some(name) = output { + let result = if let Some(name) = output { self.out.send_batch_named(name, events).await } else { self.out.send_batch(events).await + }; + match result { + Ok(()) => {} + Err(SendError::Closed) => { + emit!(StreamClosedError { count }); + return Err(warp::reject::custom(ApiError::ServerShutdown)); + } + Err(SendError::Timeout) => { + return Ok(warp::reply::with_status( + "Service unavailable", + StatusCode::SERVICE_UNAVAILABLE, + ) + .into_response()); + } } - .map_err(|_| { - emit!(StreamClosedError { count }); - warp::reject::custom(ApiError::ServerShutdown) - })?; match receiver { None => Ok(warp::reply().into_response()), Some(receiver) => match receiver.await { diff --git a/src/sources/datadog_agent/tests.rs b/src/sources/datadog_agent/tests.rs index 168eb727c1671..955f5c636d8c4 100644 --- a/src/sources/datadog_agent/tests.rs +++ b/src/sources/datadog_agent/tests.rs @@ -3,6 +3,7 @@ use std::{ iter::FromIterator, net::SocketAddr, str, + time::Duration, }; use bytes::Bytes; @@ -14,6 +15,7 @@ use ordered_float::NotNan; use prost::Message; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use similar_asserts::assert_eq; +use tokio::time::timeout; use vector_lib::{ codecs::{ BytesDecoder, BytesDeserializer, CharacterDelimitedDecoderConfig, @@ -63,6 +65,7 @@ const DD_API_SERIES_V1_PATH: &str = "/api/v1/series"; const DD_API_SERIES_V2_PATH: &str = "/api/v2/series"; const DD_API_SKETCHES_PATH: &str = "/api/beta/sketches"; const DD_API_TRACES_PATH: &str = "/api/v0.2/traces"; +const HTTP_REQUEST_TIMEOUT: Duration = Duration::from_secs(5); fn test_logs_schema_definition() -> schema::Definition { schema::Definition::empty_legacy_namespace().with_event_field( @@ -228,7 +231,60 @@ async fn source( SocketAddr, PortGuard, ) { - let (mut sender, recv) = SourceSender::new_test_finalize(status); + let (sender, recv) = SourceSender::new_test_finalize(status); + let (logs_output, metrics_output, address, guard) = source_with_sender( + sender, + status, + acknowledgements, + store_api_key, + multiple_outputs, + split_metric_namespace, + ) + .await; + (recv, logs_output, metrics_output, address, guard) +} + +async fn source_with_timeout( + status: EventStatus, + acknowledgements: bool, + store_api_key: bool, + multiple_outputs: bool, + split_metric_namespace: bool, + send_timeout: Duration, +) -> ( + impl Stream + Unpin, + Option>, + Option>, + SocketAddr, + PortGuard, +) { + let (sender, recv) = SourceSender::new_test_sender_with_options(1, Some(send_timeout)); + let (logs_output, metrics_output, address, guard) = source_with_sender( + sender, + status, + acknowledgements, + store_api_key, + multiple_outputs, + split_metric_namespace, + ) + .await; + let recv = recv.into_stream().flat_map(into_event_stream); + (recv, logs_output, metrics_output, address, guard) +} + +async fn source_with_sender( + mut sender: SourceSender, + status: EventStatus, + acknowledgements: bool, + store_api_key: bool, + multiple_outputs: bool, + split_metric_namespace: bool, +) -> ( + Option>, + Option>, + SocketAddr, + PortGuard, +) { let mut logs_output = None; let mut metrics_output = None; if multiple_outputs { @@ -243,7 +299,7 @@ async fn source( .flat_map(into_event_stream), ); } - let (_guard, address) = next_addr(); + let (guard, address) = next_addr(); let config = toml::from_str::(&format!( indoc! { r#" address = "{}" @@ -264,19 +320,23 @@ async fn source( config.build(context).await.unwrap().await.unwrap(); }); wait_for_tcp(address).await; - (recv, logs_output, metrics_output, address, _guard) + (logs_output, metrics_output, address, guard) } async fn send_with_path(address: SocketAddr, body: &str, headers: HeaderMap, path: &str) -> u16 { - reqwest::Client::new() - .post(format!("http://{address}{path}")) - .headers(headers) - .body(body.to_owned()) - .send() - .await - .unwrap() - .status() - .as_u16() + timeout( + HTTP_REQUEST_TIMEOUT, + reqwest::Client::new() + .post(format!("http://{address}{path}")) + .headers(headers) + .body(body.to_owned()) + .send(), + ) + .await + .expect("send_with_path request timed out") + .unwrap() + .status() + .as_u16() } async fn send_and_collect( @@ -678,6 +738,68 @@ async fn delivery_failure() { .await; } +#[tokio::test] +async fn send_timeout_returns_service_unavailable() { + trace_init(); + let (rx, _, _, addr, _guard) = source_with_timeout( + EventStatus::Delivered, + false, + true, + false, + true, + Duration::from_millis(50), + ) + .await; + + let body = serde_json::to_string(&[LogMsg { + message: Bytes::from("foo"), + timestamp: Utc + .timestamp_opt(123, 0) + .single() + .expect("invalid timestamp"), + hostname: Bytes::from("festeburg"), + status: Bytes::from("notice"), + service: Bytes::from("vector"), + ddsource: Bytes::from("curl"), + ddtags: Bytes::from("one,two,three"), + }]) + .unwrap(); + + assert_eq!( + 200, + send_with_path(addr, &body, HeaderMap::new(), DD_API_LOGS_V1_PATH).await + ); + + assert_eq!( + 503, + send_with_path(addr, &body, HeaderMap::new(), DD_API_LOGS_V1_PATH).await + ); + drop(rx); +} + +#[test] +fn parse_config_with_send_timeout_secs() { + let config = toml::from_str::(indoc! { r#" + address = "0.0.0.0:8012" + send_timeout_secs = 1.5 + "#}) + .unwrap(); + + assert_eq!(config.send_timeout_secs, Some(1.5)); + assert_eq!(config.send_timeout(), Some(Duration::from_secs_f64(1.5))); +} + +#[test] +fn parse_config_without_send_timeout_secs() { + let config = toml::from_str::(indoc! { r#" + address = "0.0.0.0:8012" + "#}) + .unwrap(); + + assert_eq!(config.send_timeout_secs, None); + assert_eq!(config.send_timeout(), None); +} + #[tokio::test] async fn ignores_disabled_acknowledgements() { assert_source_compliance(&HTTP_PUSH_SOURCE_TAGS, async { @@ -1498,6 +1620,7 @@ fn test_config_outputs_with_disabled_data_types() { split_metric_namespace: true, log_namespace: Some(false), keepalive: Default::default(), + send_timeout_secs: None, }; let outputs: Vec = config @@ -1941,6 +2064,7 @@ fn test_config_outputs() { split_metric_namespace: true, log_namespace: Some(false), keepalive: Default::default(), + send_timeout_secs: None, }; let mut outputs = config @@ -2613,6 +2737,7 @@ impl ValidatableComponent for DatadogAgentConfig { split_metric_namespace: true, log_namespace: Some(false), keepalive: Default::default(), + send_timeout_secs: None, }; let log_namespace: LogNamespace = config.log_namespace.unwrap_or_default().into(); diff --git a/src/sources/kafka.rs b/src/sources/kafka.rs index f92e85eb2e424..7f4895c8ed698 100644 --- a/src/sources/kafka.rs +++ b/src/sources/kafka.rs @@ -1778,7 +1778,7 @@ mod integration_test { delay: Duration, status: EventStatus, ) -> (SourceSender, impl Stream + Unpin) { - let (pipe, recv) = SourceSender::new_test_sender_with_buffer(100); + let (pipe, recv) = SourceSender::new_test_sender_with_options(100, None); let recv = recv.into_stream(); let recv = recv.then(move |item| async move { let mut events = item.events; diff --git a/src/sources/socket/mod.rs b/src/sources/socket/mod.rs index b821cf4e0f257..166232a89a8ea 100644 --- a/src/sources/socket/mod.rs +++ b/src/sources/socket/mod.rs @@ -811,7 +811,7 @@ mod test { // shutdown. let (guard, addr) = next_addr(); - let (source_tx, source_rx) = SourceSender::new_test_sender_with_buffer(10_000); + let (source_tx, source_rx) = SourceSender::new_test_sender_with_options(10_000, None); let source_key = ComponentKey::from("tcp_shutdown_infinite_stream"); let (source_cx, mut shutdown) = SourceContext::new_shutdown(&source_key, source_tx); diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index f3158a46cf6dc..b020383b9fbda 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -31,7 +31,7 @@ use vector_lib::{ lookup::{self, event_path, lookup_v2::OptionalValuePath, owned_value_path}, schema::meaning, sensitive_string::SensitiveString, - source_sender::ClosedError, + source_sender::SendError, tls::MaybeTlsIncomingStream, }; use vrl::{ @@ -433,10 +433,16 @@ impl SplunkSource { } } - if !events.is_empty() - && let Err(ClosedError) = out.send_batch(events).await - { - return Err(Rejection::from(ApiError::ServerShutdown)); + if !events.is_empty() { + match out.send_batch(events).await { + Ok(()) => (), + Err(SendError::Closed) => { + return Err(Rejection::from(ApiError::ServerShutdown)); + } + Err(SendError::Timeout) => { + unreachable!("No timeout is configured for this source.") + } + } } if let Some(error) = error { diff --git a/src/sources/statsd/mod.rs b/src/sources/statsd/mod.rs index 7f7cf027d397d..2b55b861012a0 100644 --- a/src/sources/statsd/mod.rs +++ b/src/sources/statsd/mod.rs @@ -543,7 +543,7 @@ mod test { }); let component_key = ComponentKey::from("statsd_conversion_disabled"); - let (tx, rx) = SourceSender::new_test_sender_with_buffer(4096); + let (tx, rx) = SourceSender::new_test_sender_with_options(4096, None); let (source_ctx, shutdown) = SourceContext::new_shutdown(&component_key, tx); let sink = statsd_config .build(source_ctx) @@ -580,7 +580,7 @@ mod test { // packet we send has a lot of metrics per packet. We could technically count them all up // and have a more accurate number here, but honestly, who cares? This is big enough. let component_key = ComponentKey::from("statsd"); - let (tx, rx) = SourceSender::new_test_sender_with_buffer(4096); + let (tx, rx) = SourceSender::new_test_sender_with_options(4096, None); let (source_ctx, shutdown) = SourceContext::new_shutdown(&component_key, tx); let sink = statsd_config .build(source_ctx) @@ -674,7 +674,7 @@ mod test { // packet we send has a lot of metrics per packet. We could technically count them all up // and have a more accurate number here, but honestly, who cares? This is big enough. let component_key = ComponentKey::from("statsd"); - let (tx, _rx) = SourceSender::new_test_sender_with_buffer(4096); + let (tx, _rx) = SourceSender::new_test_sender_with_options(4096, None); let (source_ctx, shutdown) = SourceContext::new_shutdown(&component_key, tx); let sink = statsd_config .build(source_ctx) diff --git a/src/test_util/mock/mod.rs b/src/test_util/mock/mod.rs index 77a3c861c547e..daf0fee5927da 100644 --- a/src/test_util/mock/mod.rs +++ b/src/test_util/mock/mod.rs @@ -31,12 +31,12 @@ pub fn backpressure_source(counter: &Arc) -> BackpressureSourceConf } pub fn basic_source() -> (SourceSender, BasicSourceConfig) { - let (tx, rx) = SourceSender::new_test_sender_with_buffer(1); + let (tx, rx) = SourceSender::new_test_sender_with_options(1, None); (tx, BasicSourceConfig::new(rx)) } pub fn basic_source_with_data(data: &str) -> (SourceSender, BasicSourceConfig) { - let (tx, rx) = SourceSender::new_test_sender_with_buffer(1); + let (tx, rx) = SourceSender::new_test_sender_with_options(1, None); (tx, BasicSourceConfig::new_with_data(rx, data)) } @@ -44,7 +44,7 @@ pub fn basic_source_with_event_counter( force_shutdown: bool, ) -> (SourceSender, BasicSourceConfig, Arc) { let event_counter = Arc::new(AtomicUsize::new(0)); - let (tx, rx) = SourceSender::new_test_sender_with_buffer(1); + let (tx, rx) = SourceSender::new_test_sender_with_options(1, None); let mut source = BasicSourceConfig::new_with_event_counter(rx, Arc::clone(&event_counter)); source.set_force_shutdown(force_shutdown); @@ -76,7 +76,7 @@ pub const fn backpressure_sink(num_to_consume: usize) -> BackpressureSinkConfig } pub fn basic_sink(channel_size: usize) -> (impl Stream, BasicSinkConfig) { - let (tx, rx) = SourceSender::new_test_sender_with_buffer(channel_size); + let (tx, rx) = SourceSender::new_test_sender_with_options(channel_size, None); let sink = BasicSinkConfig::new(tx, true); (rx.into_stream(), sink) } @@ -88,7 +88,7 @@ pub fn basic_sink_with_data( impl Stream + use<>, BasicSinkConfig, ) { - let (tx, rx) = SourceSender::new_test_sender_with_buffer(channel_size); + let (tx, rx) = SourceSender::new_test_sender_with_options(channel_size, None); let sink = BasicSinkConfig::new_with_data(tx, true, data); (rx.into_stream(), sink) } @@ -96,7 +96,7 @@ pub fn basic_sink_with_data( pub fn basic_sink_failing_healthcheck( channel_size: usize, ) -> (impl Stream, BasicSinkConfig) { - let (tx, rx) = SourceSender::new_test_sender_with_buffer(channel_size); + let (tx, rx) = SourceSender::new_test_sender_with_options(channel_size, None); let sink = BasicSinkConfig::new(tx, false); (rx.into_stream(), sink) } diff --git a/src/topology/builder.rs b/src/topology/builder.rs index 797b0bf3554f7..ffc8041a70724 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -260,7 +260,9 @@ impl<'a> Builder<'a> { key.id() ); - let mut builder = SourceSender::builder().with_buffer(*SOURCE_SENDER_BUFFER_SIZE); + let mut builder = SourceSender::builder() + .with_buffer(*SOURCE_SENDER_BUFFER_SIZE) + .with_timeout(source.inner.send_timeout()); let mut pumps = Vec::new(); let mut controls = HashMap::new(); let mut schema_definitions = HashMap::with_capacity(source_outputs.len()); @@ -343,8 +345,6 @@ impl<'a> Builder<'a> { }; let pump = Task::new(key.clone(), typetag, pump); - let pipeline = builder.build(); - let (shutdown_signal, force_shutdown_tripwire) = self .shutdown_coordinator .register_source(key, INTERNAL_SOURCES.contains(&typetag)); @@ -354,15 +354,14 @@ impl<'a> Builder<'a> { globals: self.config.global.clone(), enrichment_tables: enrichment_tables.clone(), shutdown: shutdown_signal, - out: pipeline, + out: builder.build(), proxy: ProxyConfig::merge_with_env(&self.config.global.proxy, &source.proxy), acknowledgements: source.sink_acknowledgements, schema_definitions, schema: self.config.schema, extra_context: self.extra_context.clone(), }; - let source = source.inner.build(context).await; - let server = match source { + let server = match source.inner.build(context).await { Err(error) => { self.errors.push(format!("Source \"{key}\": {error}")); continue; diff --git a/website/cue/reference/components/sources/datadog_agent.cue b/website/cue/reference/components/sources/datadog_agent.cue index 96e841889ab2a..52ac91a375bce 100644 --- a/website/cue/reference/components/sources/datadog_agent.cue +++ b/website/cue/reference/components/sources/datadog_agent.cue @@ -220,9 +220,33 @@ components: sources: datadog_agent: { duration distribution). """ } + request_timeouts: { + title: "Request timeout handling" + body: """ + When the Datadog Agent sends a request to this Vector source, and the source + blocks on sending the events in that request to the connected transforms or sinks, + the Agent will eventually time out the request and drop the connection. When that + happens, by default, Vector will emit an "Events dropped." error and increment + the `component_discarded_events_total` internal metric. + + However, while it is technically true that Vector has dropped the events, the + Agent will retry resending that request indefinitely, which means the events will + eventually be received unless the blockage above is permanent or the Agent is + killed before the request is accepted. + + To prevent this potentially misleading telemetry, you can configure + the `send_timeout_secs` option to a + value _less than_ the Agent's timeout, which defaults to 10 seconds. + This will cause Vector to respond to the Agent when such blockages occur with a HTTP 503 + Service Unavailable response, emit a warning instead of an error, + and increment the `component_timed_out_requests_total` internal metric. + """ + } } telemetry: metrics: { + component_timed_out_events_total: components.sources.internal_metrics.output.metrics.component_timed_out_events_total + component_timed_out_requests_total: components.sources.internal_metrics.output.metrics.component_timed_out_requests_total http_server_handler_duration_seconds: components.sources.internal_metrics.output.metrics.http_server_handler_duration_seconds http_server_requests_received_total: components.sources.internal_metrics.output.metrics.http_server_requests_received_total http_server_responses_sent_total: components.sources.internal_metrics.output.metrics.http_server_responses_sent_total diff --git a/website/cue/reference/components/sources/generated/datadog_agent.cue b/website/cue/reference/components/sources/generated/datadog_agent.cue index 32393130abd24..d18e4410ad609 100644 --- a/website/cue/reference/components/sources/generated/datadog_agent.cue +++ b/website/cue/reference/components/sources/generated/datadog_agent.cue @@ -584,6 +584,21 @@ generated: components: sources: datadog_agent: configuration: { required: false type: bool: default: false } + send_timeout_secs: { + description: """ + The timeout before responding to requests with a HTTP 503 Service Unavailable error. + + If not set, responses to completed requests will block indefinitely until connected + transforms or sinks are ready to receive the events. When this happens, the sending Datadog + Agent will eventually time out the request and drop the connection, resulting Vector + generating an "Events dropped." error and incrementing the `component_discarded_events_total` + internal metric. By setting this option to a value less than the Agent's timeout, Vector + will instead respond to the Agent with a HTTP 503 Service Unavailable error, emit a warning, + and increment the `component_timed_out_events_total` internal metric instead. + """ + required: false + type: float: {} + } split_metric_namespace: { description: """ If this is set to `true`, metric names are split at the first '.' into a namespace and name. diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index 7498601b6aa9c..2acd6c2bfbc28 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -84,6 +84,18 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _internal_metrics_tags } + component_timed_out_events_total: { + description: "The total number of events for which this source responded with a timeout error." + type: "counter" + default_namespace: "vector" + tags: _component_tags + } + component_timed_out_requests_total: { + description: "The total number of requests for which this source responded with a timeout error." + type: "counter" + default_namespace: "vector" + tags: _component_tags + } connection_established_total: { description: "The total number of times a connection has been established." type: "counter" From bdb96ce5f6d0ab2558da7e2aba898f51860899db Mon Sep 17 00:00:00 2001 From: Dmitry Sergeenkov <92161795+ds-hystax@users.noreply.github.com> Date: Mon, 17 Nov 2025 22:00:36 +0400 Subject: [PATCH 096/227] enhancement(codecs): introduce an option to relax GELF validation (#24241) * enhancement(codecs): introduce an option to relax GELF validation This change adds a `validation` option to the GELF decoder. It allows disabling validation checks that are not performed by other GELF parsers (e.g., go-gelf). This change addresses issue #23458 * Update changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md Co-authored-by: Pavlos Rontidis * Update lib/codecs/src/decoding/format/gelf.rs Co-authored-by: Pavlos Rontidis * Fix CI checks --------- Co-authored-by: Pavlos Rontidis --- ...dation_option_to_gelf_codec.enhancement.md | 10 ++ lib/codecs/src/decoding/format/gelf.rs | 99 ++++++++++++++++--- .../sinks/generated/websocket_server.cue | 26 ++++- .../components/sources/generated/amqp.cue | 34 +++++-- .../generated/aws_kinesis_firehose.cue | 34 +++++-- .../components/sources/generated/aws_s3.cue | 34 +++++-- .../components/sources/generated/aws_sqs.cue | 34 +++++-- .../sources/generated/datadog_agent.cue | 34 +++++-- .../sources/generated/demo_logs.cue | 34 +++++-- .../components/sources/generated/exec.cue | 34 +++++-- .../sources/generated/file_descriptor.cue | 34 +++++-- .../sources/generated/gcp_pubsub.cue | 34 +++++-- .../sources/generated/heroku_logs.cue | 34 +++++-- .../components/sources/generated/http.cue | 34 +++++-- .../sources/generated/http_client.cue | 34 +++++-- .../sources/generated/http_server.cue | 34 +++++-- .../components/sources/generated/kafka.cue | 34 +++++-- .../components/sources/generated/mqtt.cue | 34 +++++-- .../components/sources/generated/nats.cue | 34 +++++-- .../components/sources/generated/pulsar.cue | 34 +++++-- .../components/sources/generated/redis.cue | 34 +++++-- .../components/sources/generated/socket.cue | 34 +++++-- .../components/sources/generated/stdin.cue | 34 +++++-- .../sources/generated/websocket.cue | 34 +++++-- 24 files changed, 666 insertions(+), 183 deletions(-) create mode 100644 changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md diff --git a/changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md b/changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md new file mode 100644 index 0000000000000..169a703c32d15 --- /dev/null +++ b/changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md @@ -0,0 +1,10 @@ +The GELF decoder now supports a `validation` option with two modes: `strict` (default) and `relaxed`. When set to `relaxed`, the decoder will accept: + +- GELF versions other than 1.1 +- Additional fields without underscore prefixes +- Additional field names with special characters +- Additional field values of any type (not just strings/numbers) + +This allows Vector to parse GELF messages from sources that don't strictly follow the GELF specification. + +authors: ds-hystax diff --git a/lib/codecs/src/decoding/format/gelf.rs b/lib/codecs/src/decoding/format/gelf.rs index 544dd4d169937..377d437104cc4 100644 --- a/lib/codecs/src/decoding/format/gelf.rs +++ b/lib/codecs/src/decoding/format/gelf.rs @@ -36,6 +36,22 @@ pub struct GelfDeserializerConfig { pub gelf: GelfDeserializerOptions, } +/// Configures the decoding validation mode. +#[configurable_component] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +#[serde(rename_all = "snake_case")] +pub enum ValidationMode { + /// Uses strict validation that closely follows the GELF spec. + #[default] + Strict, + + /// Uses more relaxed validation that skips strict GELF specification checks. + /// + /// This mode will not treat specification violations as errors, allowing the decoder + /// to accept messages from sources that don't strictly follow the GELF spec. + Relaxed, +} + impl GelfDeserializerConfig { /// Creates a new `GelfDeserializerConfig`. pub fn new(options: GelfDeserializerOptions) -> Self { @@ -46,6 +62,7 @@ impl GelfDeserializerConfig { pub fn build(&self) -> GelfDeserializer { GelfDeserializer { lossy: self.gelf.lossy, + validation: self.gelf.validation, } } @@ -92,6 +109,10 @@ pub struct GelfDeserializerOptions { )] #[derivative(Default(value = "default_lossy()"))] pub lossy: bool, + + /// Configures the decoding validation mode. + #[serde(default, skip_serializing_if = "vector_core::serde::is_default")] + pub validation: ValidationMode, } /// Deserializer that builds an `Event` from a byte frame containing a GELF log message. @@ -100,12 +121,14 @@ pub struct GelfDeserializerOptions { pub struct GelfDeserializer { #[derivative(Default(value = "default_lossy()"))] lossy: bool, + + validation: ValidationMode, } impl GelfDeserializer { /// Create a new `GelfDeserializer`. - pub fn new(lossy: bool) -> GelfDeserializer { - GelfDeserializer { lossy } + pub fn new(lossy: bool, validation: ValidationMode) -> GelfDeserializer { + GelfDeserializer { lossy, validation } } /// Builds a LogEvent from the parsed GelfMessage. @@ -114,7 +137,7 @@ impl GelfDeserializer { let mut log = LogEvent::from_str_legacy(parsed.short_message.to_string()); // GELF spec defines the version as 1.1 which has not changed since 2013 - if parsed.version != GELF_VERSION { + if self.validation == ValidationMode::Strict && parsed.version != GELF_VERSION { return Err( format!("{VERSION} does not match GELF spec version ({GELF_VERSION})").into(), ); @@ -159,7 +182,7 @@ impl GelfDeserializer { continue; } // per GELF spec, Additional field names must be prefixed with an underscore - if !key.starts_with('_') { + if self.validation == ValidationMode::Strict && !key.starts_with('_') { return Err(format!( "'{key}' field is invalid. \ Additional field names must be prefixed with an underscore." @@ -167,7 +190,7 @@ impl GelfDeserializer { .into()); } // per GELF spec, Additional field names must be characters dashes or dots - if !VALID_FIELD_REGEX.is_match(key) { + if self.validation == ValidationMode::Strict && !VALID_FIELD_REGEX.is_match(key) { return Err(format!( "'{key}' field contains invalid characters. Field names may \ contain only letters, numbers, underscores, dashes and dots." @@ -176,7 +199,7 @@ impl GelfDeserializer { } // per GELF spec, Additional field values must be either strings or numbers - if val.is_string() || val.is_number() { + if self.validation != ValidationMode::Strict || val.is_string() || val.is_number() { let vector_val: Value = val.into(); log.insert(event_path!(key.as_str()), vector_val); } else { @@ -244,8 +267,9 @@ mod tests { fn deserialize_gelf_input( input: &serde_json::Value, + options: GelfDeserializerOptions, ) -> vector_common::Result> { - let config = GelfDeserializerConfig::default(); + let config = GelfDeserializerConfig::new(options); let deserializer = config.build(); let buffer = Bytes::from(serde_json::to_vec(&input).unwrap()); deserializer.parse(buffer, LogNamespace::Legacy) @@ -272,7 +296,7 @@ mod tests { }); // Ensure that we can parse the gelf json successfully - let events = deserialize_gelf_input(&input).unwrap(); + let events = deserialize_gelf_input(&input, GelfDeserializerOptions::default()).unwrap(); assert_eq!(events.len(), 1); let log = events[0].as_log(); @@ -334,7 +358,8 @@ mod tests { SHORT_MESSAGE: "foobar", VERSION: "1.1", }); - let events = deserialize_gelf_input(&input).unwrap(); + let events = + deserialize_gelf_input(&input, GelfDeserializerOptions::default()).unwrap(); assert_eq!(events.len(), 1); let log = events[0].as_log(); assert!(log.contains(log_schema().message_key_target_path().unwrap())); @@ -348,7 +373,8 @@ mod tests { VERSION: "1.1", "_id": "S3creTz", }); - let events = deserialize_gelf_input(&input).unwrap(); + let events = + deserialize_gelf_input(&input, GelfDeserializerOptions::default()).unwrap(); assert_eq!(events.len(), 1); let log = events[0].as_log(); assert!(!log.contains(event_path!("_id"))); @@ -359,7 +385,7 @@ mod tests { #[test] fn gelf_deserializing_err() { fn validate_err(input: &serde_json::Value) { - assert!(deserialize_gelf_input(input).is_err()); + assert!(deserialize_gelf_input(input, GelfDeserializerOptions::default()).is_err()); } // invalid character in field name validate_err(&json!({ @@ -404,4 +430,55 @@ mod tests { LEVEL: "baz", })); } + + /// Validates the relaxed validation mode + #[test] + fn gelf_deserialize_relaxed() { + let incorrect_extra_field = "incorrect^_extra_field"; + let input = json!({ + VERSION: "1.0", + HOST: "example.org", + SHORT_MESSAGE: "A short message that helps you identify what is going on", + FULL_MESSAGE: "Backtrace here\n\nmore stuff", + TIMESTAMP: 1385053862.3072, + LEVEL: 1, + FACILITY: "foo", + LINE: 42, + FILE: "/tmp/bar", + incorrect_extra_field: null, + }); + + assert!( + deserialize_gelf_input( + &input, + GelfDeserializerOptions { + validation: ValidationMode::Strict, + ..Default::default() + } + ) + .is_err() + ); + + let events = deserialize_gelf_input( + &input, + GelfDeserializerOptions { + validation: ValidationMode::Relaxed, + ..Default::default() + }, + ) + .unwrap(); + assert_eq!(events.len(), 1); + + let log = events[0].as_log(); + + assert_eq!( + log.get(VERSION), + Some(&Value::Bytes(Bytes::from_static(b"1.0"))) + ); + + assert_eq!( + log.get(event_path!(incorrect_extra_field)), + Some(&Value::Null) + ); + } } diff --git a/website/cue/reference/components/sinks/generated/websocket_server.cue b/website/cue/reference/components/sinks/generated/websocket_server.cue index 0827d1b9850b8..f822ab29b8626 100644 --- a/website/cue/reference/components/sinks/generated/websocket_server.cue +++ b/website/cue/reference/components/sinks/generated/websocket_server.cue @@ -655,16 +655,34 @@ generated: components: sinks: websocket_server: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ + type: object: options: { + lossy: { + description: """ Determines whether to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ - required: false - type: bool: default: true + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/amqp.cue b/website/cue/reference/components/sources/generated/amqp.cue index aae32d74b7fa9..366c1c0462b6d 100644 --- a/website/cue/reference/components/sources/generated/amqp.cue +++ b/website/cue/reference/components/sources/generated/amqp.cue @@ -175,16 +175,34 @@ generated: components: sources: amqp: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/aws_kinesis_firehose.cue b/website/cue/reference/components/sources/generated/aws_kinesis_firehose.cue index 817403769d616..4464a2305412e 100644 --- a/website/cue/reference/components/sources/generated/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sources/generated/aws_kinesis_firehose.cue @@ -178,16 +178,34 @@ generated: components: sources: aws_kinesis_firehose: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/aws_s3.cue b/website/cue/reference/components/sources/generated/aws_s3.cue index a3d8d5b783a5b..95730d6c68ca6 100644 --- a/website/cue/reference/components/sources/generated/aws_s3.cue +++ b/website/cue/reference/components/sources/generated/aws_s3.cue @@ -293,16 +293,34 @@ generated: components: sources: aws_s3: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/aws_sqs.cue b/website/cue/reference/components/sources/generated/aws_sqs.cue index 137f5ffbf756a..b52cfcea28a31 100644 --- a/website/cue/reference/components/sources/generated/aws_sqs.cue +++ b/website/cue/reference/components/sources/generated/aws_sqs.cue @@ -288,16 +288,34 @@ generated: components: sources: aws_sqs: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/datadog_agent.cue b/website/cue/reference/components/sources/generated/datadog_agent.cue index d18e4410ad609..70bd89821cb1f 100644 --- a/website/cue/reference/components/sources/generated/datadog_agent.cue +++ b/website/cue/reference/components/sources/generated/datadog_agent.cue @@ -160,16 +160,34 @@ generated: components: sources: datadog_agent: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/demo_logs.cue b/website/cue/reference/components/sources/generated/demo_logs.cue index 7d4f22e618684..e1227f9fc26ca 100644 --- a/website/cue/reference/components/sources/generated/demo_logs.cue +++ b/website/cue/reference/components/sources/generated/demo_logs.cue @@ -139,16 +139,34 @@ generated: components: sources: demo_logs: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/exec.cue b/website/cue/reference/components/sources/generated/exec.cue index 3afd00c017658..a5380b6c01f1d 100644 --- a/website/cue/reference/components/sources/generated/exec.cue +++ b/website/cue/reference/components/sources/generated/exec.cue @@ -140,16 +140,34 @@ generated: components: sources: exec: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/file_descriptor.cue b/website/cue/reference/components/sources/generated/file_descriptor.cue index 386ef17388f26..84ecfd0d85e94 100644 --- a/website/cue/reference/components/sources/generated/file_descriptor.cue +++ b/website/cue/reference/components/sources/generated/file_descriptor.cue @@ -130,16 +130,34 @@ generated: components: sources: file_descriptor: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/gcp_pubsub.cue b/website/cue/reference/components/sources/generated/gcp_pubsub.cue index 104d68c750e33..74ceb80df85ff 100644 --- a/website/cue/reference/components/sources/generated/gcp_pubsub.cue +++ b/website/cue/reference/components/sources/generated/gcp_pubsub.cue @@ -206,16 +206,34 @@ generated: components: sources: gcp_pubsub: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/heroku_logs.cue b/website/cue/reference/components/sources/generated/heroku_logs.cue index e75a931396f28..cb44587b15e4b 100644 --- a/website/cue/reference/components/sources/generated/heroku_logs.cue +++ b/website/cue/reference/components/sources/generated/heroku_logs.cue @@ -203,16 +203,34 @@ generated: components: sources: heroku_logs: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/http.cue b/website/cue/reference/components/sources/generated/http.cue index f2d5c03f9d42f..259c800829438 100644 --- a/website/cue/reference/components/sources/generated/http.cue +++ b/website/cue/reference/components/sources/generated/http.cue @@ -204,16 +204,34 @@ generated: components: sources: http: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/http_client.cue b/website/cue/reference/components/sources/generated/http_client.cue index 198914d9f60b4..4034101bf6958 100644 --- a/website/cue/reference/components/sources/generated/http_client.cue +++ b/website/cue/reference/components/sources/generated/http_client.cue @@ -309,16 +309,34 @@ generated: components: sources: http_client: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/http_server.cue b/website/cue/reference/components/sources/generated/http_server.cue index 34b1f9764852e..c84425dd8db2c 100644 --- a/website/cue/reference/components/sources/generated/http_server.cue +++ b/website/cue/reference/components/sources/generated/http_server.cue @@ -204,16 +204,34 @@ generated: components: sources: http_server: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/kafka.cue b/website/cue/reference/components/sources/generated/kafka.cue index ad26dd6f1d26e..7a0ebfdcf1006 100644 --- a/website/cue/reference/components/sources/generated/kafka.cue +++ b/website/cue/reference/components/sources/generated/kafka.cue @@ -184,16 +184,34 @@ generated: components: sources: kafka: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/mqtt.cue b/website/cue/reference/components/sources/generated/mqtt.cue index c9b13c6a4a674..654fe714c8de2 100644 --- a/website/cue/reference/components/sources/generated/mqtt.cue +++ b/website/cue/reference/components/sources/generated/mqtt.cue @@ -135,16 +135,34 @@ generated: components: sources: mqtt: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/nats.cue b/website/cue/reference/components/sources/generated/nats.cue index aa9c4a3065203..c57371bafaa94 100644 --- a/website/cue/reference/components/sources/generated/nats.cue +++ b/website/cue/reference/components/sources/generated/nats.cue @@ -227,16 +227,34 @@ generated: components: sources: nats: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/pulsar.cue b/website/cue/reference/components/sources/generated/pulsar.cue index 25556dcb5494a..f51e1a449dbcb 100644 --- a/website/cue/reference/components/sources/generated/pulsar.cue +++ b/website/cue/reference/components/sources/generated/pulsar.cue @@ -233,16 +233,34 @@ generated: components: sources: pulsar: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/redis.cue b/website/cue/reference/components/sources/generated/redis.cue index 8572e59e176d3..4ddff3bf52d73 100644 --- a/website/cue/reference/components/sources/generated/redis.cue +++ b/website/cue/reference/components/sources/generated/redis.cue @@ -145,16 +145,34 @@ generated: components: sources: redis: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/socket.cue b/website/cue/reference/components/sources/generated/socket.cue index f67e9306327f0..63495e4a1fbaf 100644 --- a/website/cue/reference/components/sources/generated/socket.cue +++ b/website/cue/reference/components/sources/generated/socket.cue @@ -147,16 +147,34 @@ generated: components: sources: socket: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/stdin.cue b/website/cue/reference/components/sources/generated/stdin.cue index b73a0a1dfac35..72de586122b44 100644 --- a/website/cue/reference/components/sources/generated/stdin.cue +++ b/website/cue/reference/components/sources/generated/stdin.cue @@ -130,16 +130,34 @@ generated: components: sources: stdin: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { diff --git a/website/cue/reference/components/sources/generated/websocket.cue b/website/cue/reference/components/sources/generated/websocket.cue index b81e23e4113b5..c770d5558e71a 100644 --- a/website/cue/reference/components/sources/generated/websocket.cue +++ b/website/cue/reference/components/sources/generated/websocket.cue @@ -317,16 +317,34 @@ generated: components: sources: websocket: configuration: { description: "GELF-specific decoding options." relevant_when: "codec = \"gelf\"" required: false - type: object: options: lossy: { - description: """ - Determines whether to replace invalid UTF-8 sequences instead of failing. + type: object: options: { + lossy: { + description: """ + Determines whether to replace invalid UTF-8 sequences instead of failing. - When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character - """ - required: false - type: bool: default: true + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + validation: { + description: "Configures the decoding validation mode." + required: false + type: string: { + default: "strict" + enum: { + relaxed: """ + Uses more relaxed validation that skips strict GELF specification checks. + + This mode will not treat specification violations as errors, allowing the decoder + to accept messages from sources that don't strictly follow the GELF spec. + """ + strict: "Uses strict validation that closely follows the GELF spec." + } + } + } } } influxdb: { From d6c21e50eeb0ea390fc9ba64e19e4f53ecadbc0b Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 17 Nov 2025 13:08:08 -0500 Subject: [PATCH 097/227] chore(dev): delete cue.mod (#24254) --- cue.mod/module.cue | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 cue.mod/module.cue diff --git a/cue.mod/module.cue b/cue.mod/module.cue deleted file mode 100644 index caf275bea6bcf..0000000000000 --- a/cue.mod/module.cue +++ /dev/null @@ -1,10 +0,0 @@ -// This file establishes the Vector repo as a CUE module that can be imported by -// other CUE libraries. This is here largely so that the CUE team can use the -// the Vector docs as an integration test case. See -// https://github.com/vectordotdev/vector/pull/6593. This currently has no effect -// on the Vector docs build. - -module: "vector.dev" -language: { - version: "v0.9.0" -} From ac207396efc9b24b16024d3507cdf6a48c5872a3 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Tue, 18 Nov 2025 02:17:38 +0800 Subject: [PATCH 098/227] feat(docker_logs source): add exponential retry to docker client (#24063) * add exponential backoff to event processing * cohere the exponential backoff handling in event * add changelog Signed-off-by: titaneric * rename changelog Signed-off-by: titaneric * add newline to changelog Signed-off-by: titaneric * update backoff Signed-off-by: titaneric * catch shutdown signal in backoff retry Signed-off-by: titaneric * add indefinite description in changelog Signed-off-by: titaneric * fix make checks error Signed-off-by: titaneric * log retry warning before sleep Signed-off-by: titaneric * change exponential backoff to default one Signed-off-by: titaneric * Remove `message =` from info log --------- Signed-off-by: titaneric Co-authored-by: Thomas --- .../24063_retry_docker_logs_client.feature.md | 3 ++ src/sources/docker_logs/mod.rs | 47 +++++++++++++++++-- 2 files changed, 45 insertions(+), 5 deletions(-) create mode 100644 changelog.d/24063_retry_docker_logs_client.feature.md diff --git a/changelog.d/24063_retry_docker_logs_client.feature.md b/changelog.d/24063_retry_docker_logs_client.feature.md new file mode 100644 index 0000000000000..6b61f1e9e1fb0 --- /dev/null +++ b/changelog.d/24063_retry_docker_logs_client.feature.md @@ -0,0 +1,3 @@ +The `docker_logs` source now includes exponential backoff retry logic for Docker daemon communication failures, with indefinite retry capability. This improves reliability when working with slow or temporarily unresponsive Docker daemons by retrying with increasing delays instead of immediately stopping. + +authors: titaneric diff --git a/src/sources/docker_logs/mod.rs b/src/sources/docker_logs/mod.rs index de361f40e29e9..45268788d45cc 100644 --- a/src/sources/docker_logs/mod.rs +++ b/src/sources/docker_logs/mod.rs @@ -41,6 +41,7 @@ use vrl::{ use super::util::MultilineConfig; use crate::{ SourceSender, + common::backoff::ExponentialBackoff, config::{DataType, SourceConfig, SourceContext, SourceOutput, log_schema}, docker::{DockerTlsConfig, docker}, event::{self, EstimatedJsonEncodedSizeOf, LogEvent, Value, merge_state::LogEventMergeState}, @@ -468,6 +469,8 @@ struct DockerLogsSource { /// It may contain shortened container id. hostname: Option, backoff_duration: Duration, + /// Backoff strategy for events stream retries + events_backoff: ExponentialBackoff, } impl DockerLogsSource { @@ -521,6 +524,7 @@ impl DockerLogsSource { main_recv, hostname, backoff_duration: backoff_secs, + events_backoff: ExponentialBackoff::default(), }) } @@ -620,6 +624,9 @@ impl DockerLogsSource { value = self.events.next() => { match value { Some(Ok(mut event)) => { + // Reset backoff on successful event + self.events_backoff.reset(); + let action = event.action.unwrap(); let actor = event.actor.take().unwrap(); let id = actor.id.unwrap(); @@ -662,13 +669,18 @@ impl DockerLogsSource { error, container_id: None, }); - return; + // Retry events stream with exponential backoff + if !self.retry_events_stream_with_backoff("Docker events stream failed").await { + error!("Docker events stream failed and retry exhausted, shutting down."); + return; + } }, None => { - // TODO: this could be fixed, but should be tried with some timeoff and exponential backoff - error!(message = "Docker log event stream has ended unexpectedly.", internal_log_rate_limit = false); - info!(message = "Shutting down docker_logs source."); - return; + // Retry events stream with exponential backoff + if !self.retry_events_stream_with_backoff("Docker events stream ended").await { + error!("Docker events stream ended and retry exhausted, shutting down."); + return; + } } }; } @@ -676,6 +688,31 @@ impl DockerLogsSource { } } + /// Retry events stream with exponential backoff + /// Returns true if retry was attempted, false if exhausted or shutdown + async fn retry_events_stream_with_backoff(&mut self, reason: &str) -> bool { + if let Some(delay) = self.events_backoff.next() { + warn!( + message = reason, + action = "retrying with backoff", + delay_ms = delay.as_millis() + ); + tokio::select! { + _ = tokio::time::sleep(delay) => { + self.events = Box::pin(self.esb.core.docker_logs_event_stream()); + true + } + _ = self.esb.shutdown.clone() => { + info!("Shutdown signal received during retry backoff."); + false + } + } + } else { + error!(message = "Events stream retry exhausted.", reason = reason); + false + } + } + fn exclude_self(&self, id: &str) -> bool { self.hostname .as_ref() From 67509b09756a5f7d112184dd0b3d70457d8ffba7 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 17 Nov 2025 13:36:30 -0500 Subject: [PATCH 099/227] fix(website): document the global healthcheck option (#24253) --- src/config/mod.rs | 4 ++- .../cue/reference/generated/configuration.cue | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index b38ffe9c14248..a676be24fdb5a 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -270,7 +270,7 @@ impl Config { } /// Healthcheck options. -#[configurable_component] +#[configurable_component(global_option("healthcheck"))] #[derive(Clone, Copy, Debug)] #[serde(default)] pub struct HealthcheckOptions { @@ -309,6 +309,8 @@ impl Default for HealthcheckOptions { } } +impl_generate_config_from_default!(HealthcheckOptions); + /// Unique thing, like port, of which only one owner can be. #[derive(Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] pub enum Resource { diff --git a/website/cue/reference/generated/configuration.cue b/website/cue/reference/generated/configuration.cue index 0dba2b5d42d54..f6c7bb6ce9a96 100644 --- a/website/cue/reference/generated/configuration.cue +++ b/website/cue/reference/generated/configuration.cue @@ -1,6 +1,31 @@ package metadata generated: configuration: configuration: { + healthcheck: { + type: object: options: { + enabled: { + type: bool: default: true + description: """ + Whether or not healthchecks are enabled for all sinks. + + Can be overridden on a per-sink basis. + """ + required: false + } + require_healthy: { + type: bool: default: false + description: """ + Whether or not to require a sink to report as being healthy during startup. + + When enabled and a sink reports not being healthy, Vector will exit during start-up. + + Can be alternatively set, and overridden by, the `--require-healthy` command-line flag. + """ + required: false + } + } + description: "Healthcheck options." + } enrichment_tables: { type: object: options: { file: { From fff7f5a34366cca87a8a71cb18570d8a2f8927c8 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 17 Nov 2025 13:50:48 -0500 Subject: [PATCH 100/227] chore(codecs): forbid unwrap and refactor error handling (#24247) * chore(codecs): forbid unwrap and refactor error handling * Allow unwrap in tests * Fix AvroDeserializerConfig::build return type --- lib/codecs/Cargo.toml | 3 + lib/codecs/src/decoding/format/avro.rs | 10 +- lib/codecs/src/decoding/mod.rs | 2 +- lib/codecs/src/encoding/format/gelf.rs | 6 +- lib/codecs/src/gelf.rs | 2 +- lib/codecs/tests/avro.rs | 6 +- .../tests/bin/generate-avro-fixtures.rs | 153 +++++++++--------- lib/codecs/tests/native.rs | 2 + lib/codecs/tests/native_json.rs | 2 + lib/codecs/tests/protobuf.rs | 2 + lib/codecs/tests/varint_framing.rs | 2 + 11 files changed, 105 insertions(+), 85 deletions(-) diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 57ce81892c774..2cb4ae3bbdb35 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -5,6 +5,9 @@ authors = ["Vector Contributors "] edition = "2024" publish = false +[lints.clippy] +unwrap-used = "deny" + [[bin]] name = "generate-avro-fixtures" path = "tests/bin/generate-avro-fixtures.rs" diff --git a/lib/codecs/src/decoding/format/avro.rs b/lib/codecs/src/decoding/format/avro.rs index dc41693ed3fab..c21c09de21cfb 100644 --- a/lib/codecs/src/decoding/format/avro.rs +++ b/lib/codecs/src/decoding/format/avro.rs @@ -39,14 +39,14 @@ impl AvroDeserializerConfig { } /// Build the `AvroDeserializer` from this configuration. - pub fn build(&self) -> AvroDeserializer { + pub fn build(&self) -> vector_common::Result { let schema = apache_avro::Schema::parse_str(&self.avro_options.schema) - .map_err(|error| format!("Failed building Avro serializer: {error}")) - .unwrap(); - AvroDeserializer { + .map_err(|error| format!("Failed building Avro serializer: {error}"))?; + + Ok(AvroDeserializer { schema, strip_schema_id_prefix: self.avro_options.strip_schema_id_prefix, - } + }) } /// The data type of events that are accepted by `AvroDeserializer`. diff --git a/lib/codecs/src/decoding/mod.rs b/lib/codecs/src/decoding/mod.rs index 1cf799360ffc2..f27c30d93d501 100644 --- a/lib/codecs/src/decoding/mod.rs +++ b/lib/codecs/src/decoding/mod.rs @@ -372,7 +372,7 @@ impl DeserializerConfig { AvroDeserializerConfig { avro_options: avro.clone(), } - .build(), + .build()?, )), DeserializerConfig::Bytes => Ok(Deserializer::Bytes(BytesDeserializerConfig.build())), DeserializerConfig::Json(config) => Ok(Deserializer::Json(config.build())), diff --git a/lib/codecs/src/encoding/format/gelf.rs b/lib/codecs/src/encoding/format/gelf.rs index aedb6d20678b7..966126df4f895 100644 --- a/lib/codecs/src/encoding/format/gelf.rs +++ b/lib/codecs/src/encoding/format/gelf.rs @@ -206,7 +206,11 @@ fn coerce_field_names_and_values( if let Value::Timestamp(ts) = value { let ts_millis = ts.timestamp_millis(); if ts_millis % 1000 != 0 { - *value = Value::Float(NotNan::new(ts_millis as f64 / 1000.0).unwrap()); + // i64 to f64 / 1000.0 will never be NaN + *value = Value::Float( + NotNan::new(ts_millis as f64 / 1000.0) + .expect("i64 -> f64 produced NaN"), + ); } else { // keep full range of representable time if no milliseconds are set // but still convert to numeric according to GELF protocol diff --git a/lib/codecs/src/gelf.rs b/lib/codecs/src/gelf.rs index d4ddc79267359..f97d1c0863d2e 100644 --- a/lib/codecs/src/gelf.rs +++ b/lib/codecs/src/gelf.rs @@ -73,4 +73,4 @@ pub(crate) static GELF_TARGET_PATHS: LazyLock = /// As Graylog itself will produce GELF with any existing field names on the Graylog GELF Output, /// vector is more lenient, too, at least allowing the additional `@` character. pub static VALID_FIELD_REGEX: LazyLock = - LazyLock::new(|| Regex::new(r"^[\w\.\-@]*$").unwrap()); + LazyLock::new(|| Regex::new(r"^[\w\.\-@]*$").expect("valid regex pattern")); diff --git a/lib/codecs/tests/avro.rs b/lib/codecs/tests/avro.rs index 4afa403c616de..fff274706d7d9 100644 --- a/lib/codecs/tests/avro.rs +++ b/lib/codecs/tests/avro.rs @@ -1,3 +1,5 @@ +#![allow(clippy::unwrap_used)] + use std::{ fs::File, io::Read, @@ -33,7 +35,9 @@ fn roundtrip_avro_fixtures( fn roundtrip_avro(data_path: PathBuf, schema_path: PathBuf, reserialize: bool) { let schema = load_file(&schema_path); let schema = from_utf8(&schema).unwrap().to_string(); - let deserializer = AvroDeserializerConfig::new(schema.clone(), false).build(); + let deserializer = AvroDeserializerConfig::new(schema.clone(), false) + .build() + .unwrap(); let mut serializer = AvroSerializerConfig::new(schema.clone()).build().unwrap(); let (buf, event) = load_deserialize(&data_path, &deserializer); diff --git a/lib/codecs/tests/bin/generate-avro-fixtures.rs b/lib/codecs/tests/bin/generate-avro-fixtures.rs index d8797cac43dcc..29f22ef0991c0 100644 --- a/lib/codecs/tests/bin/generate-avro-fixtures.rs +++ b/lib/codecs/tests/bin/generate-avro-fixtures.rs @@ -1,11 +1,12 @@ use std::{fs::File, io::Write, path::PathBuf}; +use vector_common::Result; use apache_avro::{Decimal, Schema, types::Value}; use serde::{Deserialize, Serialize}; const FIXTURES_PATH: &str = "lib/codecs/tests/data/avro/generated"; -fn generate_avro_test_case_boolean() { +fn generate_avro_test_case_boolean() -> Result<()> { let schema = r#" { "type": "record", @@ -20,10 +21,10 @@ fn generate_avro_test_case_boolean() { bool_field: bool, } let value = Test { bool_field: true }; - generate_test_case(schema, value, "boolean"); + generate_test_case(schema, value, "boolean") } -fn generate_avro_test_case_int() { +fn generate_avro_test_case_int() -> Result<()> { let schema = r#" { "type": "record", @@ -38,10 +39,10 @@ fn generate_avro_test_case_int() { int_field: i32, } let value = Test { int_field: 1234 }; - generate_test_case(schema, value, "int"); + generate_test_case(schema, value, "int") } -fn generate_avro_test_case_long() { +fn generate_avro_test_case_long() -> Result<()> { let schema = r#" { "type": "record", @@ -58,10 +59,10 @@ fn generate_avro_test_case_long() { let value = Test { long_field: 42949672960i64, }; - generate_test_case(schema, value, "long"); + generate_test_case(schema, value, "long") } -fn generate_avro_test_case_float() { +fn generate_avro_test_case_float() -> Result<()> { let schema = r#" { "type": "record", @@ -78,10 +79,10 @@ fn generate_avro_test_case_float() { let value = Test { float_field: 123.456, }; - generate_test_case(schema, value, "float"); + generate_test_case(schema, value, "float") } -fn generate_avro_test_case_double() { +fn generate_avro_test_case_double() -> Result<()> { let schema = r#" { "type": "record", @@ -98,10 +99,10 @@ fn generate_avro_test_case_double() { let value = Test { double_field: 123.456f64, }; - generate_test_case(schema, value, "double"); + generate_test_case(schema, value, "double") } -fn generate_avro_test_case_bytes() { +fn generate_avro_test_case_bytes() -> Result<()> { let schema = r#" { "type": "record", @@ -118,10 +119,10 @@ fn generate_avro_test_case_bytes() { let value = Test { bytes_field: vec![1, 2, 3, 4, 5, 6, 6, 7], }; - generate_test_case(schema, value, "bytes"); + generate_test_case(schema, value, "bytes") } -fn generate_avro_test_case_string() { +fn generate_avro_test_case_string() -> Result<()> { let schema = r#" { "type": "record", @@ -138,11 +139,11 @@ fn generate_avro_test_case_string() { let value = Test { string_field: "hello world!".to_string(), }; - generate_test_case(schema, value, "string"); + generate_test_case(schema, value, "string") } #[allow(unused)] -fn generate_avro_test_case_fixed() { +fn generate_avro_test_case_fixed() -> Result<()> { let schema = r#" { "type": "record", @@ -156,10 +157,10 @@ fn generate_avro_test_case_fixed() { "fixed_field".into(), Value::Fixed(16, b"1019181716151413".to_vec()), )]); - generate_test_case_from_value(schema, record, "fixed"); + generate_test_case_from_value(schema, record, "fixed") } -fn generate_avro_test_case_enum() { +fn generate_avro_test_case_enum() -> Result<()> { let schema = r#" { "type": "record", @@ -183,10 +184,10 @@ fn generate_avro_test_case_enum() { let value = Test { enum_field: Value::Hearts, }; - generate_test_case(schema, value, "enum"); + generate_test_case(schema, value, "enum") } -fn generate_avro_test_case_union() { +fn generate_avro_test_case_union() -> Result<()> { let schema = r#" { "type": "record", @@ -207,10 +208,10 @@ fn generate_avro_test_case_union() { let value = Test { union_field: 123456, }; - generate_test_case(schema, value, "union"); + generate_test_case(schema, value, "union") } -fn generate_avro_test_case_array() { +fn generate_avro_test_case_array() -> Result<()> { let schema = r#" { "type": "record", @@ -232,10 +233,10 @@ fn generate_avro_test_case_array() { "codec".to_string(), ], }; - generate_test_case(schema, value, "array"); + generate_test_case(schema, value, "array") } -fn generate_avro_test_case_map() { +fn generate_avro_test_case_map() -> Result<()> { let schema = r#" { "type": "record", @@ -253,10 +254,10 @@ fn generate_avro_test_case_map() { let mut scores = HashMap::new(); scores.insert(String::from("Blue"), 10i64); let value = Test { map_field: scores }; - generate_test_case(schema, value, "map"); + generate_test_case(schema, value, "map") } -fn generate_avro_test_case_record() { +fn generate_avro_test_case_record() -> Result<()> { let schema = r#" { "type": "record", @@ -276,11 +277,11 @@ fn generate_avro_test_case_record() { name: "John".to_string(), age: 23, }; - generate_test_case(schema, value, "record"); + generate_test_case(schema, value, "record") } #[allow(unused)] -fn generate_avro_test_case_date() { +fn generate_avro_test_case_date() -> Result<()> { let schema = r#" { "type": "record", @@ -295,11 +296,11 @@ fn generate_avro_test_case_date() { date_field: i32, } let value = Test { date_field: 19646 }; - generate_test_case(schema, value, "date"); + generate_test_case(schema, value, "date") } #[allow(unused)] -fn generate_avro_test_case_decimal_var() { +fn generate_avro_test_case_decimal_var() -> Result<()> { let schema = r#" { "type": "record", @@ -316,11 +317,11 @@ fn generate_avro_test_case_decimal_var() { 249, 33, 74, 206, 142, 64, 190, 170, 17, 153, ])), )]); - generate_test_case_from_value(schema, record, "decimal_var"); + generate_test_case_from_value(schema, record, "decimal_var") } #[allow(unused)] -fn generate_avro_test_case_time_millis() { +fn generate_avro_test_case_time_millis() -> Result<()> { let schema = r#" { "type": "record", @@ -337,10 +338,10 @@ fn generate_avro_test_case_time_millis() { let value = Test { time_millis_field: 59820123, }; - generate_test_case(schema, value, "time_millis"); + generate_test_case(schema, value, "time_millis") } -fn generate_avro_test_case_time_micros() { +fn generate_avro_test_case_time_micros() -> Result<()> { let schema = r#" { "type": "record", @@ -357,10 +358,10 @@ fn generate_avro_test_case_time_micros() { let value: Test = Test { time_micros_field: 59820123456i64, }; - generate_test_case(schema, value, "time_micros"); + generate_test_case(schema, value, "time_micros") } -fn generate_avro_test_case_timestamp_millis() { +fn generate_avro_test_case_timestamp_millis() -> Result<()> { let schema = r#" { "type": "record", @@ -377,10 +378,10 @@ fn generate_avro_test_case_timestamp_millis() { let value = Test { timestamp_millis_field: 1697445291056i64, }; - generate_test_case(schema, value, "timestamp_millis"); + generate_test_case(schema, value, "timestamp_millis") } -fn generate_avro_test_case_timestamp_micros() { +fn generate_avro_test_case_timestamp_micros() -> Result<()> { let schema = r#" { "type": "record", @@ -397,10 +398,10 @@ fn generate_avro_test_case_timestamp_micros() { let value = Test { timestamp_micros_field: 1697445291056567i64, }; - generate_test_case(schema, value, "timestamp_micros"); + generate_test_case(schema, value, "timestamp_micros") } -fn generate_avro_test_case_local_timestamp_millis() { +fn generate_avro_test_case_local_timestamp_millis() -> Result<()> { let schema = r#" { "type": "record", @@ -417,10 +418,10 @@ fn generate_avro_test_case_local_timestamp_millis() { let value = Test { local_timestamp_millis_field: 1697445291056i64, }; - generate_test_case(schema, value, "local-timestamp_millis"); + generate_test_case(schema, value, "local-timestamp_millis") } -fn generate_avro_test_case_local_timestamp_micros() { +fn generate_avro_test_case_local_timestamp_micros() -> Result<()> { let schema = r#" { "type": "record", @@ -437,10 +438,10 @@ fn generate_avro_test_case_local_timestamp_micros() { let value = Test { local_timestamp_micros_field: 1697445291056567i64, }; - generate_test_case(schema, value, "local-timestamp_micros"); + generate_test_case(schema, value, "local-timestamp_micros") } -fn generate_avro_test_case_uuid() { +fn generate_avro_test_case_uuid() -> Result<()> { let schema = r#" { "type": "record", @@ -459,48 +460,48 @@ fn generate_avro_test_case_uuid() { let value = Test { uuid_field: "550e8400-e29b-41d4-a716-446655440000".into(), }; - generate_test_case(schema, value, "uuid"); + generate_test_case(schema, value, "uuid") } -fn generate_test_case(schema: &str, value: S, filename: &str) { - let value = apache_avro::to_value(value).unwrap(); - generate_test_case_from_value(schema, value, filename); +fn generate_test_case(schema: &str, value: S, filename: &str) -> Result<()> { + let value = apache_avro::to_value(value)?; + generate_test_case_from_value(schema, value, filename) } -fn generate_test_case_from_value(schema: &str, value: Value, filename: &str) { - let schema = Schema::parse_str(schema).unwrap(); +fn generate_test_case_from_value(schema: &str, value: Value, filename: &str) -> Result<()> { + let schema = Schema::parse_str(schema)?; - let value = value.resolve(&schema).unwrap(); - let bytes = apache_avro::to_avro_datum(&schema, value).unwrap(); + let value = value.resolve(&schema)?; + let bytes = apache_avro::to_avro_datum(&schema, value)?; - let mut schema_file = File::create(format!("{FIXTURES_PATH}/{filename}.avsc")).unwrap(); - let mut avro_file = File::create(format!("{FIXTURES_PATH}/{filename}.avro")).unwrap(); - schema_file - .write_all(schema.canonical_form().as_bytes()) - .unwrap(); - avro_file.write_all(&bytes).unwrap(); + let mut schema_file = File::create(format!("{FIXTURES_PATH}/{filename}.avsc"))?; + let mut avro_file = File::create(format!("{FIXTURES_PATH}/{filename}.avro"))?; + schema_file.write_all(schema.canonical_form().as_bytes())?; + avro_file.write_all(&bytes)?; + Ok(()) } -fn main() { +fn main() -> Result<()> { if !PathBuf::from(FIXTURES_PATH).is_dir() { panic!("dir {FIXTURES_PATH} not exist\n"); } - generate_avro_test_case_array(); - generate_avro_test_case_boolean(); - generate_avro_test_case_bytes(); - generate_avro_test_case_double(); - generate_avro_test_case_enum(); - generate_avro_test_case_float(); - generate_avro_test_case_int(); - generate_avro_test_case_long(); - generate_avro_test_case_map(); - generate_avro_test_case_record(); - generate_avro_test_case_string(); - generate_avro_test_case_time_micros(); - generate_avro_test_case_timestamp_micros(); - generate_avro_test_case_timestamp_millis(); - generate_avro_test_case_local_timestamp_micros(); - generate_avro_test_case_local_timestamp_millis(); - generate_avro_test_case_union(); - generate_avro_test_case_uuid(); + generate_avro_test_case_array()?; + generate_avro_test_case_boolean()?; + generate_avro_test_case_bytes()?; + generate_avro_test_case_double()?; + generate_avro_test_case_enum()?; + generate_avro_test_case_float()?; + generate_avro_test_case_int()?; + generate_avro_test_case_long()?; + generate_avro_test_case_map()?; + generate_avro_test_case_record()?; + generate_avro_test_case_string()?; + generate_avro_test_case_time_micros()?; + generate_avro_test_case_timestamp_micros()?; + generate_avro_test_case_timestamp_millis()?; + generate_avro_test_case_local_timestamp_micros()?; + generate_avro_test_case_local_timestamp_millis()?; + generate_avro_test_case_union()?; + generate_avro_test_case_uuid()?; + Ok(()) } diff --git a/lib/codecs/tests/native.rs b/lib/codecs/tests/native.rs index 4f6d61049a38c..d0c6329c35090 100644 --- a/lib/codecs/tests/native.rs +++ b/lib/codecs/tests/native.rs @@ -1,3 +1,5 @@ +#![allow(clippy::unwrap_used)] + use std::{ fs::{self, File}, io::{Read, Write}, diff --git a/lib/codecs/tests/native_json.rs b/lib/codecs/tests/native_json.rs index 4bcf31982aee8..48db364f0a987 100644 --- a/lib/codecs/tests/native_json.rs +++ b/lib/codecs/tests/native_json.rs @@ -1,3 +1,5 @@ +#![allow(clippy::unwrap_used)] + use bytes::BytesMut; use codecs::{ NativeJsonDeserializerConfig, NativeJsonSerializerConfig, decoding::format::Deserializer, diff --git a/lib/codecs/tests/protobuf.rs b/lib/codecs/tests/protobuf.rs index 1f56e7695c6b4..87bc8ee394880 100644 --- a/lib/codecs/tests/protobuf.rs +++ b/lib/codecs/tests/protobuf.rs @@ -1,5 +1,7 @@ //! Tests for the behaviour of Protobuf serializer and deserializer (together). +#![allow(clippy::unwrap_used)] + use std::path::{Path, PathBuf}; use bytes::{Bytes, BytesMut}; diff --git a/lib/codecs/tests/varint_framing.rs b/lib/codecs/tests/varint_framing.rs index 5fd9cc333f2ca..3fd5fba70756a 100644 --- a/lib/codecs/tests/varint_framing.rs +++ b/lib/codecs/tests/varint_framing.rs @@ -1,3 +1,5 @@ +#![allow(clippy::unwrap_used)] + use bytes::BytesMut; use codecs::{ VarintLengthDelimitedDecoder, VarintLengthDelimitedDecoderConfig, From 6996ec55d1424be0a68929169c7119dc6baae637 Mon Sep 17 00:00:00 2001 From: Pascal Bachor Date: Mon, 17 Nov 2025 20:05:41 +0100 Subject: [PATCH 101/227] fix(journald source): journalctl args in case of current_boot_only (#23438) * fix(journald source): journalctl args in case of current_boot_only * fix: clippy * refactor: dedicated helper function * Update changelog.d/18068_journalctl_all_boots.fix.md Co-authored-by: Pavlos Rontidis * fix: warn condition (fixup bf4f814) * journald: raise error in case of unsupported 'all boots' request * fix: markdown formatting * Update src/sources/journald.rs Co-authored-by: Thomas --------- Co-authored-by: Pascal Bachor Co-authored-by: Pavlos Rontidis Co-authored-by: Thomas --- changelog.d/18068_journalctl_all_boots.fix.md | 9 ++ src/sources/journald.rs | 87 ++++++++++++++++++- tests/data/journalctl | 12 ++- 3 files changed, 102 insertions(+), 6 deletions(-) create mode 100644 changelog.d/18068_journalctl_all_boots.fix.md diff --git a/changelog.d/18068_journalctl_all_boots.fix.md b/changelog.d/18068_journalctl_all_boots.fix.md new file mode 100644 index 0000000000000..a4a0fb1f0b3c8 --- /dev/null +++ b/changelog.d/18068_journalctl_all_boots.fix.md @@ -0,0 +1,9 @@ +The `journald` source now correctly respects the `current_boot_only: false` setting on systemd versions >= 258. + +Compatibility notes: + +- **systemd < 250**: Both `current_boot_only: true` and `false` work correctly +- **systemd 250-257**: Due to systemd limitations, `current_boot_only: false` will not work. An error will be raised on startup. +- **systemd >= 258**: Both settings work correctly + +authors: bachorp diff --git a/src/sources/journald.rs b/src/sources/journald.rs index f5e7169e93cbb..e83c2b0dc29e5 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -76,6 +76,8 @@ static JOURNALCTL: LazyLock = LazyLock::new(|| "journalctl".into()); enum BuildError { #[snafu(display("journalctl failed to execute: {}", source))] JournalctlSpawn { source: io::Error }, + #[snafu(display("failed to parse output of `journalctl --version`: {:?}", output))] + JournalctlParseVersion { output: String }, #[snafu(display( "The unit {:?} is duplicated in both include_units and exclude_units", unit @@ -87,6 +89,11 @@ enum BuildError { value, ))] DuplicatedMatches { field: String, value: String }, + #[snafu(display( + "`current_boot_only: false` not supported for systemd versions 250 through 257 (got {}).", + systemd_version + ))] + AllBootsNotSupported { systemd_version: u32 }, } type Matches = HashMap>; @@ -364,8 +371,16 @@ impl SourceConfig for JournaldConfig { .clone() .unwrap_or_else(|| JOURNALCTL.clone()); + let systemd_version = get_systemd_version_from_journalctl(&journalctl_path).await?; + + if !self.current_boot_only && (250..=257).contains(&systemd_version) { + // https://github.com/vectordotdev/vector/issues/18068 + return Err(BuildError::AllBootsNotSupported { systemd_version }.into()); + } + let starter = StartJournalctl::new( journalctl_path, + systemd_version, self.journal_directory.clone(), self.journal_namespace.clone(), self.current_boot_only, @@ -679,6 +694,7 @@ type JournalStream = BoxStream<'static, Result>; struct StartJournalctl { path: PathBuf, + systemd_version: u32, journal_dir: Option, journal_namespace: Option, current_boot_only: bool, @@ -689,6 +705,7 @@ struct StartJournalctl { impl StartJournalctl { const fn new( path: PathBuf, + systemd_version: u32, journal_dir: Option, journal_namespace: Option, current_boot_only: bool, @@ -697,6 +714,7 @@ impl StartJournalctl { ) -> Self { Self { path, + systemd_version, journal_dir, journal_namespace, current_boot_only, @@ -722,8 +740,16 @@ impl StartJournalctl { command.arg(format!("--namespace={namespace}")); } + // By default entries from all boots are included + // systemd 242 introduces support for --boot=all + // systemd 250 lets --follow imply --boot (with no facility to override) + // systemd 258 allows to override --boot as implied by --follow if self.current_boot_only { - command.arg("--boot"); + if self.systemd_version < 250 { + command.arg("--boot"); + } + } else if self.systemd_version >= 258 { + command.arg("--boot=all"); } if let Some(cursor) = checkpoint { @@ -776,6 +802,37 @@ impl Drop for RunningJournalctl { } } +async fn get_systemd_version_from_journalctl(journalctl_path: &PathBuf) -> crate::Result { + let stdout = Command::new(journalctl_path) + .arg("--version") + .output() + .await + .context(JournalctlSpawnSnafu)? + .stdout; + + // output format: `systemd {version_number} ({full_version}){newline}{config ...}` + let stdout = String::from_utf8_lossy(&stdout); + Ok(stdout + .split_whitespace() + .nth(1) + .and_then(|s| s.parse::().ok()) + .ok_or_else(|| BuildError::JournalctlParseVersion { + output: { + let cutoff = 40; + let length = stdout.chars().count(); + format!( + "{}{}", + stdout.chars().take(cutoff).collect::(), + if length > cutoff { + format!(" ..{} more char(s)", length - cutoff) + } else { + "".to_string() + } + ) + }, + })?) +} + fn enrich_log_event(log: &mut LogEvent, log_namespace: LogNamespace) { match log_namespace { LogNamespace::Vector => { @@ -1517,6 +1574,7 @@ mod tests { fn command_options() { let path = PathBuf::from("journalctl"); + let systemd_version = 239; let journal_dir = None; let journal_namespace = None; let current_boot_only = false; @@ -1526,6 +1584,7 @@ mod tests { let command = create_command( &path, + systemd_version, journal_dir, journal_namespace, current_boot_only, @@ -1536,7 +1595,7 @@ mod tests { let cmd_line = format!("{command:?}"); assert!(!cmd_line.contains("--directory=")); assert!(!cmd_line.contains("--namespace=")); - assert!(!cmd_line.contains("--boot")); + assert!(!cmd_line.contains("--boot=all")); assert!(cmd_line.contains("--since=2000-01-01")); let journal_dir = None; @@ -1546,6 +1605,7 @@ mod tests { let command = create_command( &path, + systemd_version, journal_dir, journal_namespace, current_boot_only, @@ -1564,6 +1624,7 @@ mod tests { let command = create_command( &path, + systemd_version, journal_dir, journal_namespace, current_boot_only, @@ -1577,10 +1638,31 @@ mod tests { assert!(cmd_line.contains("--boot")); assert!(cmd_line.contains("--after-cursor=")); assert!(cmd_line.contains("--merge")); + + let systemd_version = 258; + let journal_dir = None; + let journal_namespace = None; + let current_boot_only = false; + let extra_args = vec![]; + + let command = create_command( + &path, + systemd_version, + journal_dir, + journal_namespace, + current_boot_only, + since_now, + cursor, + extra_args, + ); + let cmd_line = format!("{command:?}"); + assert!(cmd_line.contains("--boot=all")); } + #[allow(clippy::too_many_arguments)] fn create_command( path: &Path, + systemd_version: u32, journal_dir: Option, journal_namespace: Option, current_boot_only: bool, @@ -1590,6 +1672,7 @@ mod tests { ) -> Command { StartJournalctl::new( path.into(), + systemd_version, journal_dir, journal_namespace, current_boot_only, diff --git a/tests/data/journalctl b/tests/data/journalctl index a70f6ab87ca64..e8bab74f95aa6 100755 --- a/tests/data/journalctl +++ b/tests/data/journalctl @@ -1,10 +1,14 @@ #!/bin/bash for arg in "$@" do - if [[ $arg = --after-cursor=* ]] - then - after="${arg#--after-cursor=}" - fi + case "$arg" in + --after-cursor=* ) + after="${arg#--after-cursor=}" + ;; + --version ) + printf "systemd 300 (300.3-vector99)\n+XYZ -ABC\n" + exit + esac done lines=( From 61bb16f53d09d009ea4a7a363b83acbb4a753b85 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 18 Nov 2025 11:10:37 -0500 Subject: [PATCH 102/227] chore(journald source): add note to 'include_units' option (#24260) * chore(journald source): add note to 'include_units' option * make generate-component-docs --- src/sources/journald.rs | 4 ++++ .../cue/reference/components/sources/generated/journald.cue | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/sources/journald.rs b/src/sources/journald.rs index e83c2b0dc29e5..c6177fba1eb6c 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -116,6 +116,10 @@ pub struct JournaldConfig { /// If empty or not present, all units are accepted. /// /// Unit names lacking a `.` have `.service` appended to make them a valid service unit name. + /// + /// **Note:** This option matches only the `_SYSTEMD_UNIT` field, which is narrower than `journalctl --unit`. + /// Messages from systemd about unit lifecycle (start/stop) have `_SYSTEMD_UNIT=init.scope` and will not match. + /// To capture these, explicitly include `init.scope` or use `include_matches` for finer control. #[serde(default)] #[configurable(metadata(docs::examples = "ntpd", docs::examples = "sysinit.target"))] pub include_units: Vec, diff --git a/website/cue/reference/components/sources/generated/journald.cue b/website/cue/reference/components/sources/generated/journald.cue index d33c8505f3fd2..c2eeba7a99806 100644 --- a/website/cue/reference/components/sources/generated/journald.cue +++ b/website/cue/reference/components/sources/generated/journald.cue @@ -136,6 +136,10 @@ generated: components: sources: journald: configuration: { If empty or not present, all units are accepted. Unit names lacking a `.` have `.service` appended to make them a valid service unit name. + + **Note:** This option matches only the `_SYSTEMD_UNIT` field, which is narrower than `journalctl --unit`. + Messages from systemd about unit lifecycle (start/stop) have `_SYSTEMD_UNIT=init.scope` and will not match. + To capture these, explicitly include `init.scope` or use `include_matches` for finer control. """ required: false type: array: { From 8a8b981cc15cb4739caa05869721728d46d4fa32 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 18 Nov 2025 11:10:49 -0500 Subject: [PATCH 103/227] chore(exclusive_route transform): improve routes docs (#24259) --- src/transforms/exclusive_route/config.rs | 2 ++ .../reference/components/transforms/exclusive_route.cue | 4 +++- .../components/transforms/generated/exclusive_route.cue | 8 ++++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/transforms/exclusive_route/config.rs b/src/transforms/exclusive_route/config.rs index a62879f0d9526..22bcda0665ce5 100644 --- a/src/transforms/exclusive_route/config.rs +++ b/src/transforms/exclusive_route/config.rs @@ -55,6 +55,8 @@ impl Eq for Route {} #[serde(deny_unknown_fields)] pub struct ExclusiveRouteConfig { /// An array of named routes. The route names are expected to be unique. + /// Routes are evaluated in order from first to last, and only the first matching route receives each event + /// (first-match-wins). #[configurable(metadata(docs::examples = "routes_example()"))] pub routes: Vec, } diff --git a/website/cue/reference/components/transforms/exclusive_route.cue b/website/cue/reference/components/transforms/exclusive_route.cue index 1a30f35e441f0..b02c9db834b11 100644 --- a/website/cue/reference/components/transforms/exclusive_route.cue +++ b/website/cue/reference/components/transforms/exclusive_route.cue @@ -4,7 +4,9 @@ components: transforms: exclusive_route: { title: "Exclusive Route" description: """ - Routes events from one or more streams to unique sub-streams based on a set of user-defined conditions. + Routes events from one or more streams to unique sub-streams based on a set of user-defined conditions. Routes are + evaluated in order and once an event matches a route, it is sent exclusively to that route and no further routes + are evaluated. Also, see the [Route](\(urls.vector_route_transform)) transform for routing an event to multiple streams. """ diff --git a/website/cue/reference/components/transforms/generated/exclusive_route.cue b/website/cue/reference/components/transforms/generated/exclusive_route.cue index 4900c4d8ac12f..70d7b45f14335 100644 --- a/website/cue/reference/components/transforms/generated/exclusive_route.cue +++ b/website/cue/reference/components/transforms/generated/exclusive_route.cue @@ -1,8 +1,12 @@ package metadata generated: components: transforms: exclusive_route: configuration: routes: { - description: "An array of named routes. The route names are expected to be unique." - required: true + description: """ + An array of named routes. The route names are expected to be unique. + Routes are evaluated in order from first to last, and only the first matching route receives each event + (first-match-wins). + """ + required: true type: array: items: type: object: { examples: [{ condition: { From f1efa9dc7badd4358c82838e139bef6739b07692 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 18 Nov 2025 11:35:43 -0500 Subject: [PATCH 104/227] fix(website): fix healthcheck -> healthchecks (#24267) --- src/config/mod.rs | 2 +- website/cue/reference/generated/configuration.cue | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index a676be24fdb5a..fc36a9aa2364b 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -270,7 +270,7 @@ impl Config { } /// Healthcheck options. -#[configurable_component(global_option("healthcheck"))] +#[configurable_component(global_option("healthchecks"))] #[derive(Clone, Copy, Debug)] #[serde(default)] pub struct HealthcheckOptions { diff --git a/website/cue/reference/generated/configuration.cue b/website/cue/reference/generated/configuration.cue index f6c7bb6ce9a96..1e1f7e059bf6c 100644 --- a/website/cue/reference/generated/configuration.cue +++ b/website/cue/reference/generated/configuration.cue @@ -1,7 +1,7 @@ package metadata generated: configuration: configuration: { - healthcheck: { + healthchecks: { type: object: options: { enabled: { type: bool: default: true From e38c093e8857ebbdbbab1ff398639b6181a8cea7 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 18 Nov 2025 11:42:17 -0500 Subject: [PATCH 105/227] chore(dev): add aqua deps (#24269) --- aqua/aqua.yaml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/aqua/aqua.yaml b/aqua/aqua.yaml index 7d3dc9f8b956f..e55f3daaddcad 100644 --- a/aqua/aqua.yaml +++ b/aqua/aqua.yaml @@ -5,10 +5,13 @@ registries: - type: standard ref: v4.268.0 # renovate: depName=aquaproj/aqua-registry packages: - - name: rustwasm/wasm-pack@v0.13.1 - - name: crates.io/cargo-deb@2.9.3 - - name: cross-rs/cross@v0.2.5 - - name: nextest-rs/nextest/cargo-nextest@cargo-nextest-0.9.47 - - name: EmbarkStudios/cargo-deny@0.16.2 - - name: foresterre/cargo-msrv@v0.15.1 - - name: crates.io/dd-rust-license-tool@1.0.4 + - name: rustwasm/wasm-pack@v0.13.1 # Build and package WebAssembly + - name: crates.io/cargo-deb@2.9.3 # Create Debian packages + - name: cross-rs/cross@v0.2.5 # Cross-compile Rust projects + - name: nextest-rs/nextest/cargo-nextest@cargo-nextest-0.9.47 # Rust test runner + - name: EmbarkStudios/cargo-deny@0.16.2 # Lint dependencies for security and licenses + - name: foresterre/cargo-msrv@v0.15.1 # Find minimum supported Rust version + - name: crates.io/dd-rust-license-tool@1.0.4 # Generate license information + - name: bufbuild/buf@v1.50.0 # Protobuf linting and breaking change detection + - name: cue-lang/cue@v0.11.2 # Validate and generate CUE configuration schemas + - name: nodejs/node@v22.12.0 # JavaScript runtime for website builds From 677f21e4c3d9d9b63a1c73b4bef5272b736b58ec Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Tue, 18 Nov 2025 12:29:30 -0500 Subject: [PATCH 106/227] chore(website): improve build from source guide (#24268) * chore(website): improve build from source guide * improve docker section * cleanup * Apply suggestions from code review Co-authored-by: Eva Parish --------- Co-authored-by: Eva Parish --- .../setup/installation/manual/from-source.md | 125 ++++++++++++------ 1 file changed, 84 insertions(+), 41 deletions(-) diff --git a/website/content/en/docs/setup/installation/manual/from-source.md b/website/content/en/docs/setup/installation/manual/from-source.md index 19f1c7c7659ad..5ad19d46bce5c 100644 --- a/website/content/en/docs/setup/installation/manual/from-source.md +++ b/website/content/en/docs/setup/installation/manual/from-source.md @@ -18,50 +18,48 @@ We recommend installing Vector through a supported platform, package manager, or ### Linux -Install Rust: +Install compilation dependencies for your distribution, if they aren't pre-installed on your system: ```shell +# Install Rust curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable -``` - -Install compilation dependencies, specifically C and C++ compilers (GCC or Clang) and GNU `make` if they aren't pre-installed on your system. -Download Vector's source: +# Install protoc +./scripts/environment/install-protoc.sh -```shell -# Latest ({{< version >}}) -mkdir -p vector && \ - curl -sSfL --proto '=https' --tlsv1.2 https://api.github.com/repos/vectordotdev/vector/tarball/v{{< version >}} | \ - tar xzf - -C vector --strip-components=1 - -# Master -mkdir -p vector && \ - curl -sSfL --proto '=https' --tlsv1.2 https://github.com/vectordotdev/vector/archive/master.tar.gz | \ - tar xzf - -C vector --strip-components=1 +# Install other dependencies, example for Ubuntu/Debian: +sudo apt-get update +sudo apt-get install -y build-essential cmake curl git ``` -Change into your Vector directory: +Clone Vector's source: ```shell +git clone https://github.com/vectordotdev/vector cd vector -``` -Compile Vector: +# (Optional) Check out a specific version +# git checkout v0.51.1 -```shell -[FEATURES=",,..."] make build +# Or use the latest release tag +# git checkout $(git describe --tags --abbrev=0) ``` -The `FEATURES` environment variable is optional. You can override the default features using this variable. See [feature flags](#feature-flags) for more info. +Compile and run Vector: -When finished, the Vector binary is placed in `target//release/vector`. If you're building Vector on your Mac, for example, the target triple is `arm64-apple-darwin` and the Vector binary will be located at `target/arm64-apple-darwin/release/vector`. +```shell +make build -Finally, you can start Vector: +# Or specify with custom features +# FEATURES=",,..." make build -```shell -target//release/vector --config config/vector.yaml +# Run your custom build +target/release/vector --config config/vector.yaml ``` +The `FEATURES` environment variable is optional. You can override the default features using this variable. +See [feature flags](#feature-flags) for more info. + ### Windows Install Rust using [`rustup`][rustup]. If you don't have VC++ build tools, the install will prompt you to install them. @@ -106,38 +104,76 @@ Start Vector. After these steps, a binary `vector.exe` in `target\release` would ### Docker -You can build statically linked binaries of Vector for Linux using [cross][] in Docker. If you do so, the dependencies listed in the previous section aren't needed, as all of them would be automatically pulled by Docker. +You can build statically linked binaries of Vector for Linux using [cross][] in Docker. If you do so, the dependencies listed in the +previous section aren't needed, as all of them would be automatically pulled by Docker. -First, download Vector's source: +First, clone Vector's source: ```shell -# Latest ({{< version >}}) -mkdir -p vector && \ - curl -sSfL --proto '=https' --tlsv1.2 https://api.github.com/repos/vectordotdev/vector/tarball/v{{< version >}} | \ - tar xzf - -C vector --strip-components=1 +git clone https://github.com/vectordotdev/vector +cd vector -# Master -mkdir -p vector && \ - curl -sSfL --proto '=https' --tlsv1.2 https://github.com/vectordotdev/vector/archive/master.tar.gz | \ - tar xzf - -C vector --strip-components=1 +# (Optional) Check out a specific version +# git checkout v{{< version >}} + +# Alternative: Download tarball +# mkdir -p vector && \ +# curl -sSfL --proto '=https' --tlsv1.2 https://api.github.com/repos/vectordotdev/vector/tarball/v{{< version >}} | \ +# tar xzf - -C vector --strip-components=1 && cd vector ``` Second, [install cross][cross]. -And then build Vector using [cross]: +Then build Vector using cross for your target architecture: ```shell -# Linux (x86_64) +# Linux x86_64 (musl - fully static) make package-x86_64-unknown-linux-musl-all -# Linux (ARM64) +# Linux x86_64 (glibc - standard) +make package-x86_64-unknown-linux-gnu-all + +# Linux ARM64 (musl) make package-aarch64-unknown-linux-musl-all -# Linux (ARMv7) -make package-armv7-unknown-linux-muslueabihf-all +# Linux ARM64 (glibc) +make package-aarch64-unknown-linux-gnu-all + +# Linux ARMv7 +make package-armv7-unknown-linux-musleabihf-all ``` -The command above builds a Docker image with a Rust toolchain for a Linux target for the corresponding architecture using `musl` as the C library, then starts a container from this image, and then builds inside the container. The target binary is located at `target//release/vector` as in the previous case. +These commands build a Docker image with a Rust toolchain for the target architecture, start a container from this image, and build Vector +inside the container. The musl targets create fully static binaries, while gnu targets link against glibc. + +The compiled packages will be located in `target/artifacts/`. + +#### Building Custom Docker Images + +You can build custom Docker images with Vector. The repository includes Dockerfiles for different base images in the `distribution/docker/` +directory. + +**Using the Alpine Dockerfile (smallest image, musl-based):** + +```shell +# First build the musl binary +make package-x86_64-unknown-linux-musl-all + +# Then build the Docker image +cd distribution/docker/alpine +docker build -t my-vector:alpine . +``` + +**Using the Debian Dockerfile (glibc-based):** + +```shell +# First build the deb package +make package-x86_64-unknown-linux-gnu-all + +# Then build the Docker image +cd distribution/docker/debian +docker build -t my-vector:debian . +``` ## Next steps @@ -201,6 +237,13 @@ Vector supports many feature flags to customize which features are included in a all sources, transforms, and sinks are enabled. To view a complete list of features, they are listed under "[features]" [here](https://github.com/vectordotdev/vector/blob/master/Cargo.toml). +Example of building with only specific components: + +```shell +# Build with only file source, remap transform, and console sink +FEATURES="api,sources-file,transforms-remap,sinks-console" make build +``` + [buffer]: /docs/reference/glossary/#buffer [cmake]: https://cmake.org/ [configuration]: /docs/reference/configuration From 563251a03e2ef88a0adf871f323669a5982bbd04 Mon Sep 17 00:00:00 2001 From: rf-ben Date: Tue, 18 Nov 2025 09:32:44 -0800 Subject: [PATCH 107/227] enhancement(security): rework TlsSettings to carry PEM based objects (#23146) * internal(tlssettings tlsconfig): rework TlsSettings and TlsConfig to use PEM files instead of PKCS12 for FIPS compatibility TlsSettings used a PKCS12 archive to store the TLS identity information. PKCS12 is not Federal Information Processing Standard (FIPS) compliant, and there is significant interest to use vector in such environments. This change makes PEM based certs & keys the format stored in TlsSettings. For compatibility when a PKCS12 (DER) archive is presented in the configuration, the code will extract the components into X509 at load time. This conversion is not FIPS compliant and should be avoided there. In pratice, most use cases do not use FIPS, so this is perfectly fine to use. This change was tested on a regular machine, and on a FIPS enabled kernel where we compiled vector to dynamically link to the system provided OpenSSL. Only the 'fips' and 'base' providers were configured to be available. RUSTFLAGS="-C prefer-dynamic" OPENSSL_NO_VENDOR=1 OPENSSL_STATIC=0 \ cargo build -j8 --release --target x86_64-unknown-linux-gnu \ --no-default-features --features target-x86_64-unknown-linux-gnu Key changes: * IdentityStore now keeps a name, cert, private key, and an optional list of ca certs * PKCS12 configuration will convert to IdentityStore on load * TlsSettings::identity() returns the IdentityStore instead of a PKCS12 archive. This avoids the need to convert to PCKS12 and thus avoids the FIPS incompatibility for future uses * cargo fmt * revive PR * cargo fmt * add changelog --------- Co-authored-by: Pavlos Rontidis Co-authored-by: Thomas --- Cargo.lock | 37 ++++-- .../23146_tls_fips_compliance.enhancement.md | 5 + lib/vector-core/src/tls/settings.rs | 125 ++++++++---------- 3 files changed, 83 insertions(+), 84 deletions(-) create mode 100644 changelog.d/23146_tls_fips_compliance.enhancement.md diff --git a/Cargo.lock b/Cargo.lock index e8085bfd45a0d..6e3b400cbb5ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2402,7 +2402,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link", + "windows-link 0.1.0", ] [[package]] @@ -2717,15 +2717,15 @@ dependencies = [ [[package]] name = "console" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" +checksum = "b430743a6eb14e9764d4260d4c0d8123087d504eeb9c48f2b2a5e810dd369df4" dependencies = [ "encode_unicode 1.0.0", "libc", "once_cell", "unicode-width 0.2.0", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -5619,7 +5619,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70a646d946d06bedbbc4cac4c218acf4bbf2d87757a784857025f4d447e4e1cd" dependencies = [ - "console 0.16.0", + "console 0.16.1", "portable-atomic", "unicode-segmentation", "unicode-width 0.2.0", @@ -13505,7 +13505,7 @@ dependencies = [ "windows-collections", "windows-core 0.60.1", "windows-future", - "windows-link", + "windows-link 0.1.0", "windows-numerics", ] @@ -13535,7 +13535,7 @@ checksum = "ca21a92a9cae9bf4ccae5cf8368dce0837100ddf6e6d57936749e85f152f6247" dependencies = [ "windows-implement", "windows-interface", - "windows-link", + "windows-link 0.1.0", "windows-result 0.3.1", "windows-strings 0.3.1", ] @@ -13547,7 +13547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a787db4595e7eb80239b74ce8babfb1363d8e343ab072f2ffe901400c03349f0" dependencies = [ "windows-core 0.60.1", - "windows-link", + "windows-link 0.1.0", ] [[package]] @@ -13578,6 +13578,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" +[[package]] +name = "windows-link" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" + [[package]] name = "windows-numerics" version = "0.1.1" @@ -13585,7 +13591,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "005dea54e2f6499f2cee279b8f703b3cf3b5734a2d8d21867c8f44003182eeed" dependencies = [ "windows-core 0.60.1", - "windows-link", + "windows-link 0.1.0", ] [[package]] @@ -13614,7 +13620,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06374efe858fab7e4f881500e6e86ec8bc28f9462c47e5a9941a0142ad86b189" dependencies = [ - "windows-link", + "windows-link 0.1.0", ] [[package]] @@ -13644,7 +13650,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" dependencies = [ - "windows-link", + "windows-link 0.1.0", ] [[package]] @@ -13692,6 +13698,15 @@ dependencies = [ "windows-targets 0.53.2", ] +[[package]] +name = "windows-sys" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" +dependencies = [ + "windows-link 0.2.0", +] + [[package]] name = "windows-targets" version = "0.42.2" diff --git a/changelog.d/23146_tls_fips_compliance.enhancement.md b/changelog.d/23146_tls_fips_compliance.enhancement.md new file mode 100644 index 0000000000000..da26a5cd67e09 --- /dev/null +++ b/changelog.d/23146_tls_fips_compliance.enhancement.md @@ -0,0 +1,5 @@ +Vector's TLS implementation now stores credentials in PEM format internally instead of PKCS12, enabling FIPS-compliant operation in +environments with strict cryptographic requirements. This change is transparent to users - both PEM and PKCS12 certificate files continue to +be supported as configuration inputs, with PKCS12 files automatically converted at load time. + +authors: rf-ben diff --git a/lib/vector-core/src/tls/settings.rs b/lib/vector-core/src/tls/settings.rs index 999972b05ad9c..c301537082c6f 100644 --- a/lib/vector-core/src/tls/settings.rs +++ b/lib/vector-core/src/tls/settings.rs @@ -5,10 +5,16 @@ use std::{ path::{Path, PathBuf}, }; +use super::{ + AddCertToStoreSnafu, AddExtraChainCertSnafu, CaStackPushSnafu, EncodeAlpnProtocolsSnafu, + FileOpenFailedSnafu, FileReadFailedSnafu, MaybeTls, NewCaStackSnafu, NewStoreBuilderSnafu, + ParsePkcs12Snafu, PrivateKeyParseSnafu, Result, SetAlpnProtocolsSnafu, SetCertificateSnafu, + SetPrivateKeySnafu, SetVerifyCertSnafu, TlsError, X509ParseSnafu, +}; use cfg_if::cfg_if; use lookup::lookup_v2::OptionalValuePath; use openssl::{ - pkcs12::{ParsedPkcs12_2, Pkcs12}, + pkcs12::Pkcs12, pkey::{PKey, Private}, ssl::{AlpnError, ConnectConfiguration, SslContextBuilder, SslVerifyMode, select_next_proto}, stack::Stack, @@ -17,14 +23,6 @@ use openssl::{ use snafu::ResultExt; use vector_config::configurable_component; -use super::{ - AddCertToStoreSnafu, AddExtraChainCertSnafu, CaStackPushSnafu, DerExportSnafu, - EncodeAlpnProtocolsSnafu, FileOpenFailedSnafu, FileReadFailedSnafu, MaybeTls, NewCaStackSnafu, - NewStoreBuilderSnafu, ParsePkcs12Snafu, Pkcs12Snafu, PrivateKeyParseSnafu, Result, - SetAlpnProtocolsSnafu, SetCertificateSnafu, SetPrivateKeySnafu, SetVerifyCertSnafu, TlsError, - TlsIdentitySnafu, X509ParseSnafu, -}; - pub const PEM_START_MARKER: &str = "-----BEGIN "; pub const TEST_PEM_CA_PATH: &str = "tests/data/ca/certs/ca.cert.pem"; @@ -177,13 +175,18 @@ pub struct TlsSettings { verify_certificate: bool, pub(super) verify_hostname: bool, authorities: Vec, - pub(super) identity: Option, // openssl::pkcs12::ParsedPkcs12 doesn't impl Clone yet + pub(super) identity: Option, alpn_protocols: Option>, server_name: Option, } +/// Identity store in PEM format #[derive(Clone)] -pub(super) struct IdentityStore(Vec, String); +pub(super) struct IdentityStore { + cert: X509, + key: PKey, + ca: Option>, +} impl TlsSettings { /// Generate a filled out settings struct from the given optional @@ -220,37 +223,20 @@ impl TlsSettings { }) } - /// Returns the identity as PKCS12 - /// - /// # Panics - /// - /// Panics if the identity is invalid. - fn identity(&self) -> Option { - // This data was test-built previously, so we can just use it - // here and expect the results will not fail. This can all be - // reworked when `openssl::pkcs12::ParsedPkcs12` gains the Clone - // impl. - self.identity.as_ref().map(|identity| { - Pkcs12::from_der(&identity.0) - .expect("Could not build PKCS#12 archive from parsed data") - .parse2(&identity.1) - .expect("Could not parse stored PKCS#12 archive") - }) - } - - /// Returns the identity as PEM data + /// Returns the identity as PEM encoded byte arrays /// /// # Panics /// /// Panics if the identity is missing, invalid, or the authorities to chain are invalid. pub fn identity_pem(&self) -> Option<(Vec, Vec)> { - self.identity().map(|identity| { - let mut cert = identity - .cert - .expect("Identity required") - .to_pem() + self.identity.as_ref().map(|identity| { + // we have verified correct formatting at ingest time + let mut cert = identity.cert.to_pem().expect("Invalid stored identity"); + let key = identity + .key + .private_key_to_pem_pkcs8() .expect("Invalid stored identity"); - if let Some(chain) = identity.ca { + if let Some(chain) = identity.ca.as_ref() { for authority in chain { cert.extend( authority @@ -259,11 +245,6 @@ impl TlsSettings { ); } } - let key = identity - .pkey - .expect("Private key required") - .private_key_to_pem_pkcs8() - .expect("Invalid stored private key"); (cert, key) }) } @@ -295,18 +276,18 @@ impl TlsSettings { } else { SslVerifyMode::NONE }); - if let Some(identity) = self.identity() { - if let Some(cert) = &identity.cert { - context.set_certificate(cert).context(SetCertificateSnafu)?; - } - if let Some(pkey) = &identity.pkey { - context.set_private_key(pkey).context(SetPrivateKeySnafu)?; - } + if let Some(identity) = &self.identity { + context + .set_certificate(&identity.cert) + .context(SetCertificateSnafu)?; + context + .set_private_key(&identity.key) + .context(SetPrivateKeySnafu)?; - if let Some(chain) = identity.ca { + if let Some(chain) = &identity.ca { for cert in chain { context - .add_extra_chain_cert(cert) + .add_extra_chain_cert(cert.clone()) .context(AddExtraChainCertSnafu)?; } } @@ -401,7 +382,7 @@ impl TlsConfig { let (data, filename) = open_read(filename, "certificate")?; der_or_pem( data, - |der| self.parse_pkcs12_identity(der), + |der| self.parse_pkcs12_identity(&der), |pem| self.parse_pem_identity(&pem, &filename), ) } @@ -430,45 +411,43 @@ impl TlsConfig { match &self.key_file { None => Err(TlsError::MissingKey), Some(key_file) => { - let name = crt_file.to_string_lossy().to_string(); let mut crt_stack = X509::stack_from_pem(pem.as_bytes()) .with_context(|_| X509ParseSnafu { filename: crt_file })? .into_iter(); - let crt = crt_stack.next().ok_or(TlsError::MissingCertificate)?; + let cert = crt_stack.next().ok_or(TlsError::MissingCertificate)?; let key = load_key(key_file.as_path(), self.key_pass.as_ref())?; let mut ca_stack = Stack::new().context(NewCaStackSnafu)?; for intermediate in crt_stack { ca_stack.push(intermediate).context(CaStackPushSnafu)?; } - - let pkcs12 = Pkcs12::builder() - .ca(ca_stack) - .name(&name) - .pkey(&key) - .cert(&crt) - .build2("") - .context(Pkcs12Snafu)?; - let identity = pkcs12.to_der().context(DerExportSnafu)?; - - // Build the resulting parsed PKCS#12 archive, - // but don't store it, as it cannot be cloned. - // This is just for error checking. - pkcs12.parse2("").context(TlsIdentitySnafu)?; - - Ok(Some(IdentityStore(identity, String::new()))) + let ca: Vec = ca_stack + .iter() + .map(std::borrow::ToOwned::to_owned) + .collect(); + Ok(Some(IdentityStore { + cert, + key, + ca: Some(ca), + })) } } } /// Parse identity from a DER encoded PKCS#12 archive - fn parse_pkcs12_identity(&self, der: Vec) -> Result> { - let pkcs12 = Pkcs12::from_der(&der).context(ParsePkcs12Snafu)?; + fn parse_pkcs12_identity(&self, der: &[u8]) -> Result> { + let pkcs12 = Pkcs12::from_der(der).context(ParsePkcs12Snafu)?; // Verify password let key_pass = self.key_pass.as_deref().unwrap_or(""); - pkcs12.parse2(key_pass).context(ParsePkcs12Snafu)?; - Ok(Some(IdentityStore(der, key_pass.to_string()))) + let parsed = pkcs12.parse2(key_pass).context(ParsePkcs12Snafu)?; + // extract cert, key and ca and store as PEM sow e can return an IdentityStore + let cert = parsed.cert.ok_or(TlsError::MissingCertificate)?; + let key = parsed.pkey.ok_or(TlsError::MissingKey)?; + let ca: Option> = parsed + .ca + .map(|stack| stack.iter().map(std::borrow::ToOwned::to_owned).collect()); + Ok(Some(IdentityStore { cert, key, ca })) } } From 8c9bc00b712b57519ed09540dc9967ed3a453c4e Mon Sep 17 00:00:00 2001 From: Paul Taylor <178183+trxcllnt@users.noreply.github.com> Date: Tue, 18 Nov 2025 10:06:19 -0800 Subject: [PATCH 108/227] feat(aws_cloudwatch_metrics sink): Support AWS CloudWatch high-resolution metrics (#23822) * add AWS CloudWatch Metrics sink storage_resolution config * add changelog entry * run make generate-component-docs * validate storage resolutions * make fmt * fix lint * run make generate-component-docs --------- Co-authored-by: Pavlos Rontidis --- ...ws_cloudwatch_metric_resolution.feature.md | 3 ++ src/sinks/aws_cloudwatch_metrics/mod.rs | 35 ++++++++++++++++++- src/sinks/aws_cloudwatch_metrics/tests.rs | 7 +++- .../generated/aws_cloudwatch_metrics.cue | 15 ++++++++ 4 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md diff --git a/changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md b/changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md new file mode 100644 index 0000000000000..a53f8095053b4 --- /dev/null +++ b/changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md @@ -0,0 +1,3 @@ +Add AWS CloudWatch Metrics sink `storage_resolution` config. + +authors: trxcllnt diff --git a/src/sinks/aws_cloudwatch_metrics/mod.rs b/src/sinks/aws_cloudwatch_metrics/mod.rs index 9526603830f05..39b3ac17bd217 100644 --- a/src/sinks/aws_cloudwatch_metrics/mod.rs +++ b/src/sinks/aws_cloudwatch_metrics/mod.rs @@ -15,6 +15,7 @@ use aws_sdk_cloudwatch::{ use aws_smithy_types::DateTime as AwsDateTime; use futures::{FutureExt, SinkExt, stream}; use futures_util::{future, future::BoxFuture}; +use indexmap::IndexMap; use tower::Service; use vector_lib::{ ByteSizeOf, EstimatedJsonEncodedSizeOf, configurable::configurable_component, sink::VectorSink, @@ -113,6 +114,15 @@ pub struct CloudWatchMetricsSinkConfig { skip_serializing_if = "crate::serde::is_default" )] acknowledgements: AcknowledgementsConfig, + + /// A map from metric name to AWS storage resolution. + /// Valid values are 1 (high resolution) and 60 (standard resolution). + /// If unset, the AWS SDK default of 60 (standard resolution) is used. + /// See [AWS Metrics Resolution](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Resolution_definition) + /// See [MetricDatum::storage_resolution](https://docs.rs/aws-sdk-cloudwatch/1.91.0/aws_sdk_cloudwatch/types/struct.MetricDatum.html#structfield.storage_resolution) + #[configurable(metadata(docs::additional_props_description = "An AWS storage resolution."))] + #[serde(default)] + pub storage_resolution: IndexMap, } impl_generate_config_from_default!(CloudWatchMetricsSinkConfig); @@ -223,6 +233,7 @@ fn tags_to_dimensions(tags: &MetricTags) -> Vec { #[derive(Clone)] pub struct CloudWatchMetricsSvc { client: CloudwatchClient, + storage_resolution: IndexMap, } impl CloudWatchMetricsSvc { @@ -234,7 +245,10 @@ impl CloudWatchMetricsSvc { let batch = config.batch.into_batch_settings()?; let request_settings = config.request.into_settings(); - let service = CloudWatchMetricsSvc { client }; + let service = CloudWatchMetricsSvc { + client, + storage_resolution: validate_storage_resolutions(config.storage_resolution)?, + }; let buffer = PartitionBuffer::new(MetricsBuffer::new(batch.size)); let mut normalizer = MetricNormalizer::::default(); @@ -263,6 +277,7 @@ impl CloudWatchMetricsSvc { } fn encode_events(&mut self, events: Vec) -> Vec { + let resolutions = &self.storage_resolution; events .into_iter() .filter_map(|event| { @@ -271,6 +286,7 @@ impl CloudWatchMetricsSvc { .timestamp() .map(|x| AwsDateTime::from_millis(x.timestamp_millis())); let dimensions = event.tags().map(tags_to_dimensions); + let resolution = resolutions.get(&metric_name).copied(); // AwsCloudwatchMetricNormalize converts these to the right MetricKind match event.value() { MetricValue::Counter { value } => Some( @@ -279,6 +295,7 @@ impl CloudWatchMetricsSvc { .value(*value) .set_timestamp(timestamp) .set_dimensions(dimensions) + .set_storage_resolution(resolution) .build(), ), MetricValue::Distribution { @@ -291,6 +308,7 @@ impl CloudWatchMetricsSvc { .set_counts(Some(samples.iter().map(|s| s.rate as f64).collect())) .set_timestamp(timestamp) .set_dimensions(dimensions) + .set_storage_resolution(resolution) .build(), ), MetricValue::Set { values } => Some( @@ -299,6 +317,7 @@ impl CloudWatchMetricsSvc { .value(values.len() as f64) .set_timestamp(timestamp) .set_dimensions(dimensions) + .set_storage_resolution(resolution) .build(), ), MetricValue::Gauge { value } => Some( @@ -307,6 +326,7 @@ impl CloudWatchMetricsSvc { .value(*value) .set_timestamp(timestamp) .set_dimensions(dimensions) + .set_storage_resolution(resolution) .build(), ), _ => None, @@ -347,3 +367,16 @@ impl Service, String>> for CloudWatchMetricsSvc }) } } + +fn validate_storage_resolutions( + storage_resolutions: IndexMap, +) -> crate::Result> { + for (metric_name, storage_resolution) in storage_resolutions.iter() { + if !matches!(storage_resolution, 1 | 60) { + return Err( + format!("Storage resolution for {metric_name} should be '1' or '60'").into(), + ); + } + } + Ok(storage_resolutions) +} diff --git a/src/sinks/aws_cloudwatch_metrics/tests.rs b/src/sinks/aws_cloudwatch_metrics/tests.rs index cbb6bae2c5d25..bae6ebcfd175b 100644 --- a/src/sinks/aws_cloudwatch_metrics/tests.rs +++ b/src/sinks/aws_cloudwatch_metrics/tests.rs @@ -23,6 +23,7 @@ fn config() -> CloudWatchMetricsSinkConfig { CloudWatchMetricsSinkConfig { default_namespace: "vector".into(), region: RegionOrEndpoint::with_region("us-east-1".to_owned()), + storage_resolution: IndexMap::from([("bytes_out".to_owned(), 1)]), ..Default::default() } } @@ -33,7 +34,10 @@ async fn svc() -> CloudWatchMetricsSvc { .create_client(&ProxyConfig::from_env()) .await .unwrap(); - CloudWatchMetricsSvc { client } + CloudWatchMetricsSvc { + client, + storage_resolution: config.storage_resolution, + } } #[tokio::test] @@ -80,6 +84,7 @@ async fn encode_events_basic_counter() { .metric_name("bytes_out") .value(2.5) .timestamp(timestamp("2018-11-14T08:09:10.123Z")) + .storage_resolution(1) .build(), MetricDatum::builder() .metric_name("healthcheck") diff --git a/website/cue/reference/components/sinks/generated/aws_cloudwatch_metrics.cue b/website/cue/reference/components/sinks/generated/aws_cloudwatch_metrics.cue index 37c1da889c9e9..61c27b8ceb7da 100644 --- a/website/cue/reference/components/sinks/generated/aws_cloudwatch_metrics.cue +++ b/website/cue/reference/components/sinks/generated/aws_cloudwatch_metrics.cue @@ -427,6 +427,21 @@ generated: components: sinks: aws_cloudwatch_metrics: configuration: { } } } + storage_resolution: { + description: """ + A map from metric name to AWS storage resolution. + Valid values are 1 (high resolution) and 60 (standard resolution). + If unset, the AWS SDK default of 60 (standard resolution) is used. + See [AWS Metrics Resolution](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Resolution_definition) + See [MetricDatum::storage_resolution](https://docs.rs/aws-sdk-cloudwatch/1.91.0/aws_sdk_cloudwatch/types/struct.MetricDatum.html#structfield.storage_resolution) + """ + required: false + type: object: options: "*": { + description: "An AWS storage resolution." + required: true + type: int: {} + } + } tls: { description: "TLS configuration." required: false From 5edc39344b6b3f5aad0d12decc9f33c930514b76 Mon Sep 17 00:00:00 2001 From: Geoffrey Oxberry Date: Tue, 18 Nov 2025 10:17:18 -0800 Subject: [PATCH 109/227] chore(ci): smp cli: v0.24.1 -> v0.25.1 (#24262) To keep Vector's CI configuration up-to-date with the latest stable version of SMP's CLI binaries, this commit updates the SMP CLI version in Vector's CI configuration from v0.24.1 to v0.25.1. Signed-off-by: Geoffrey M. Oxberry --- .github/workflows/regression.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 4e284620d6a2e..738183d153646 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -105,7 +105,7 @@ jobs: - name: Set SMP version id: experimental-meta run: | - export SMP_CRATE_VERSION="0.24.1" + export SMP_CRATE_VERSION="0.25.1" echo "smp crate version: ${SMP_CRATE_VERSION}" echo "SMP_CRATE_VERSION=${SMP_CRATE_VERSION}" >> $GITHUB_OUTPUT From 16429fa70de7b240e3c2034fcdc9ec05eba150a7 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 18 Nov 2025 16:46:23 -0500 Subject: [PATCH 110/227] fix(dev): handle out of order reads in test_fair_reads (#24270) * fix(dev): handle out of order reads in test_fair_reads * chore(vdev): apply vdev rust check fixes --- src/sources/file.rs | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/src/sources/file.rs b/src/sources/file.rs index d8a4f6c5bd5c0..42b1e03b1c331 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -2071,7 +2071,7 @@ mod tests { writeln!(&mut older, "hello i am the old file").unwrap(); writeln!(&mut older, "i have been around a while").unwrap(); writeln!(&mut older, "you can read newer files at the same time").unwrap(); - older.sync_all().unwrap(); // sync_all is needed due to windows + older.sync_all().unwrap(); let newer_path = dir.path().join("a_newer_file"); let mut newer = File::create(&newer_path).unwrap(); @@ -2079,7 +2079,7 @@ mod tests { writeln!(&mut newer, "and i am the new file").unwrap(); writeln!(&mut newer, "this should be interleaved with the old one").unwrap(); writeln!(&mut newer, "which is fine because we want fairness").unwrap(); - newer.sync_all().unwrap(); // sync_all is needed due to windows + newer.sync_all().unwrap(); let received = run_file_source( &config, @@ -2092,17 +2092,24 @@ mod tests { let received = extract_messages_value(received); - assert_eq!( - received, - vec![ - "hello i am the old file".into(), - "and i am the new file".into(), - "i have been around a while".into(), - "this should be interleaved with the old one".into(), - "you can read newer files at the same time".into(), - "which is fine because we want fairness".into(), - ] - ); + let old_first = vec![ + "hello i am the old file".into(), + "and i am the new file".into(), + "i have been around a while".into(), + "this should be interleaved with the old one".into(), + "you can read newer files at the same time".into(), + "which is fine because we want fairness".into(), + ]; + let new_first: Vec<_> = old_first + .chunks(2) + .flat_map(|chunk| chunk.iter().rev().cloned().collect::>()) + .collect(); + + if received[0] == old_first[0] { + assert_eq!(received, old_first); + } else { + assert_eq!(received, new_first); + } } #[tokio::test] From 821c1f798b5f1814a7a0b26882dfd391a1f61a91 Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 19 Nov 2025 10:52:47 -0500 Subject: [PATCH 111/227] chore(deps): update mongodb crate to 3.3.0 (#24271) * chore(deps): update mongodb crate to 3.3.0 * Add mongodb licenses * Add changelog --- Cargo.lock | 469 +++++++++--------- Cargo.toml | 2 +- LICENSE-3rdparty.csv | 21 +- changelog.d/24271_mongodb_upgrade.breaking.md | 3 + license-tool.toml | 1 + src/sources/mongodb_metrics/mod.rs | 9 +- 6 files changed, 252 insertions(+), 253 deletions(-) create mode 100644 changelog.d/24271_mongodb_upgrade.breaking.md diff --git a/Cargo.lock b/Cargo.lock index 6e3b400cbb5ae..611e2675ab80e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1603,7 +1603,7 @@ dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", "aws-smithy-types", - "rustc_version 0.4.1", + "rustc_version", "tracing 0.1.41", ] @@ -1720,7 +1720,7 @@ dependencies = [ "quick-xml 0.31.0", "rand 0.8.5", "reqwest 0.12.9", - "rustc_version 0.4.1", + "rustc_version", "serde", "serde_json", "time", @@ -1741,7 +1741,7 @@ dependencies = [ "futures 0.3.31", "openssl", "pin-project", - "rustc_version 0.4.1", + "rustc_version", "serde", "serde_json", "tracing 0.1.41", @@ -2074,7 +2074,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "serde_with 3.14.0", + "serde_with", ] [[package]] @@ -2154,18 +2154,20 @@ dependencies = [ [[package]] name = "bson" -version = "2.8.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61570f4de0cc9c03b481c96057b3ae7c6ff7b5b35da8b0832c44f0131987a718" +checksum = "7969a9ba84b0ff843813e7249eed1678d9b6607ce5a3b8f0a47af3fcf7978e6e" dependencies = [ "ahash 0.8.11", - "base64 0.13.1", + "base64 0.22.1", "bitvec", + "getrandom 0.2.15", + "getrandom 0.3.1", "hex", - "indexmap 1.9.3", + "indexmap 2.12.0", "js-sys", "once_cell", - "rand 0.8.5", + "rand 0.9.2", "serde", "serde_bytes", "serde_json", @@ -2282,7 +2284,7 @@ version = "10.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06acb4f71407ba205a07cb453211e0e6a67b21904e47f6ba1f9589e38f2e454" dependencies = [ - "semver 1.0.26", + "semver", "serde", "toml 0.8.23", "url", @@ -2489,7 +2491,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", "terminal_size", ] @@ -2575,7 +2577,7 @@ dependencies = [ "rust_decimal", "serde", "serde_json", - "serde_with 3.14.0", + "serde_with", "similar-asserts", "smallvec", "snafu 0.8.9", @@ -2842,7 +2844,7 @@ checksum = "2eac901828f88a5241ee0600950ab981148a18f2f756900ffba1b125ca6a3ef9" dependencies = [ "cookie", "document-features", - "idna 1.0.3", + "idna", "log", "publicsuffix", "serde", @@ -2917,7 +2919,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" dependencies = [ - "rustc_version 0.4.1", + "rustc_version", ] [[package]] @@ -3170,7 +3172,7 @@ dependencies = [ "curve25519-dalek-derive", "digest", "fiat-crypto", - "rustc_version 0.4.1", + "rustc_version", "subtle", "zeroize", ] @@ -3186,16 +3188,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - [[package]] name = "darling" version = "0.20.11" @@ -3216,20 +3208,6 @@ dependencies = [ "darling_macro 0.21.3", ] -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.101", - "quote 1.0.40", - "strsim 0.10.0", - "syn 1.0.109", -] - [[package]] name = "darling_core" version = "0.20.11" @@ -3240,7 +3218,7 @@ dependencies = [ "ident_case", "proc-macro2 1.0.101", "quote 1.0.40", - "strsim 0.11.1", + "strsim", "syn 2.0.106", ] @@ -3254,21 +3232,10 @@ dependencies = [ "ident_case", "proc-macro2 1.0.101", "quote 1.0.40", - "strsim 0.11.1", + "strsim", "syn 2.0.106", ] -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote 1.0.40", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.20.11" @@ -3335,7 +3302,7 @@ dependencies = [ "parking_lot 0.12.4", "percent-encoding", "reqwest 0.12.9", - "semver 1.0.26", + "semver", "serde", "serde_json", "tokio", @@ -3407,6 +3374,28 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive-syn-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" +dependencies = [ + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + +[[package]] +name = "derive-where" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" +dependencies = [ + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "derive_arbitrary" version = "1.3.2" @@ -3458,7 +3447,7 @@ dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.101", "quote 1.0.40", - "rustc_version 0.4.1", + "rustc_version", "syn 1.0.109", ] @@ -3551,7 +3540,7 @@ version = "0.1.0" dependencies = [ "criterion", "data-encoding", - "hickory-proto", + "hickory-proto 0.25.2", "snafu 0.8.9", ] @@ -3565,7 +3554,7 @@ dependencies = [ "chrono", "chrono-tz", "dnsmsg-parser", - "hickory-proto", + "hickory-proto 0.25.2", "paste", "prost 0.12.6", "prost-build 0.12.6", @@ -3784,18 +3773,6 @@ dependencies = [ "vrl", ] -[[package]] -name = "enum-as-inner" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" -dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.101", - "quote 1.0.40", - "syn 1.0.109", -] - [[package]] name = "enum-as-inner" version = "0.6.0" @@ -4150,7 +4127,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09b6620799e7340ebd9968d2e0708eb82cf1971e9a16821e2091b6d6e475eed5" dependencies = [ "bitflags 2.9.0", - "rustc_version 0.4.1", + "rustc_version", ] [[package]] @@ -4986,6 +4963,30 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hickory-proto" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.8.5", + "thiserror 1.0.68", + "tinyvec", + "tokio", + "tracing 0.1.41", + "url", +] + [[package]] name = "hickory-proto" version = "0.25.2" @@ -4996,11 +4997,11 @@ dependencies = [ "bitflags 2.9.0", "cfg-if", "data-encoding", - "enum-as-inner 0.6.0", + "enum-as-inner", "futures-channel", "futures-io", "futures-util", - "idna 1.0.3", + "idna", "ipnet", "once_cell", "rand 0.9.2", @@ -5013,6 +5014,27 @@ dependencies = [ "url", ] +[[package]] +name = "hickory-resolver" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.24.4", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.4", + "rand 0.8.5", + "resolv-conf", + "smallvec", + "thiserror 1.0.68", + "tokio", + "tracing 0.1.41", +] + [[package]] name = "hkdf" version = "0.12.3" @@ -5189,7 +5211,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.10", "tokio", "tower-service", "tracing 0.1.41", @@ -5558,17 +5580,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.3" @@ -5939,7 +5950,7 @@ dependencies = [ "email_address", "fancy-regex 0.16.1", "fraction", - "idna 1.0.3", + "idna", "itoa", "num-cmp", "num-traits", @@ -6536,6 +6547,54 @@ dependencies = [ "libc", ] +[[package]] +name = "macro_magic" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc33f9f0351468d26fbc53d9ce00a096c8522ecb42f19b50f34f2c422f76d21d" +dependencies = [ + "macro_magic_core", + "macro_magic_macros", + "quote 1.0.40", + "syn 2.0.106", +] + +[[package]] +name = "macro_magic_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1687dc887e42f352865a393acae7cf79d98fab6351cde1f58e9e057da89bf150" +dependencies = [ + "const-random", + "derive-syn-parse", + "macro_magic_core_macros", + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + +[[package]] +name = "macro_magic_core_macros" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" +dependencies = [ + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + +[[package]] +name = "macro_magic_macros" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" +dependencies = [ + "macro_magic_core", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "malloc_buf" version = "0.0.6" @@ -6560,12 +6619,6 @@ dependencies = [ "regex-automata 0.4.8", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matchit" version = "0.7.3" @@ -6803,7 +6856,7 @@ dependencies = [ "once_cell", "parking_lot 0.12.4", "quanta", - "rustc_version 0.4.1", + "rustc_version", "smallvec", "tagptr", "thiserror 1.0.68", @@ -6811,51 +6864,84 @@ dependencies = [ "uuid", ] +[[package]] +name = "mongocrypt" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22426d6318d19c5c0773f783f85375265d6a8f0fa76a733da8dc4355516ec63d" +dependencies = [ + "bson", + "mongocrypt-sys", + "once_cell", + "serde", +] + +[[package]] +name = "mongocrypt-sys" +version = "0.1.4+1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dda42df21d035f88030aad8e877492fac814680e1d7336a57b2a091b989ae388" + [[package]] name = "mongodb" -version = "2.8.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef206acb1b72389b49bc9985efe7eb1f8a9bb18e5680d262fac26c07f44025f1" +checksum = "622f272c59e54a3c85f5902c6b8e7b1653a6b6681f45e4c42d6581301119a4b8" dependencies = [ "async-trait", "base64 0.13.1", "bitflags 1.3.2", "bson", "chrono", - "derivative", + "derive-where", "derive_more", "futures-core", "futures-executor", "futures-io", "futures-util", "hex", + "hickory-proto 0.24.4", + "hickory-resolver", "hmac", - "lazy_static", + "macro_magic", "md-5", + "mongocrypt", + "mongodb-internal-macros", + "once_cell", "pbkdf2", "percent-encoding", "rand 0.8.5", "rustc_version_runtime", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "rustls 0.23.23", + "rustversion", "serde", "serde_bytes", - "serde_with 1.14.0", - "sha-1", + "serde_with", + "sha1", "sha2", - "socket2 0.4.10", + "socket2 0.5.10", "stringprep", - "strsim 0.10.0", + "strsim", "take_mut", "thiserror 1.0.68", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.26.2", "tokio-util", - "trust-dns-proto", - "trust-dns-resolver", - "typed-builder 0.10.0", + "typed-builder 0.20.1", "uuid", - "webpki-roots 0.25.2", + "webpki-roots 0.26.1", +] + +[[package]] +name = "mongodb-internal-macros" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63981427a0f26b89632fd2574280e069d09fb2912a3138da15de0174d11dd077" +dependencies = [ + "macro_magic", + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", ] [[package]] @@ -7545,7 +7631,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_plain", - "serde_with 3.14.0", + "serde_with", "sha2", "subtle", "thiserror 1.0.68", @@ -8427,7 +8513,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" dependencies = [ "heck 0.5.0", - "itertools 0.10.5", + "itertools 0.14.0", "log", "multimap", "once_cell", @@ -8473,7 +8559,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.14.0", "proc-macro2 1.0.101", "quote 1.0.40", "syn 2.0.106", @@ -8561,7 +8647,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42ea446cab60335f76979ec15e12619a2165b5ae2c12166bef27d283a9fadf" dependencies = [ - "idna 1.0.3", + "idna", "psl-types", ] @@ -9322,13 +9408,9 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname 0.3.1", - "quick-error", -] +checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "retry-policies" @@ -9492,7 +9574,7 @@ dependencies = [ "quote 1.0.40", "regex", "relative-path", - "rustc_version 0.4.1", + "rustc_version", "syn 2.0.106", "unicode-ident", ] @@ -9543,32 +9625,23 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.26", + "semver", ] [[package]] name = "rustc_version_runtime" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d31b7153270ebf48bf91c65ae5b0c00e749c4cfad505f66530ac74950249582f" +checksum = "2dd18cd2bae1820af0b6ad5e54f4a51d0f3fcc53b05f845675074efcc7af071d" dependencies = [ - "rustc_version 0.2.3", - "semver 0.9.0", + "rustc_version", + "semver", ] [[package]] @@ -9643,6 +9716,7 @@ version = "0.23.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ + "log", "once_cell", "ring", "rustls-pki-types", @@ -9947,15 +10021,6 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.26" @@ -9965,12 +10030,6 @@ dependencies = [ "serde", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" version = "1.0.228" @@ -10135,16 +10194,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros 1.5.2", -] - [[package]] name = "serde_with" version = "3.14.0" @@ -10161,22 +10210,10 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "serde_with_macros 3.14.0", + "serde_with_macros", "time", ] -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2 1.0.101", - "quote 1.0.40", - "syn 1.0.109", -] - [[package]] name = "serde_with_macros" version = "3.14.0" @@ -10817,12 +10854,6 @@ dependencies = [ "vte", ] -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -11982,51 +12013,6 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b2cb4fbb9995eeb36ac86fadf24031ccd58f99d6b4b2d7b911db70bddb80d90" -[[package]] -name = "trust-dns-proto" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner 0.4.0", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "log", - "rand 0.8.5", - "smallvec", - "thiserror 1.0.68", - "tinyvec", - "tokio", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "log", - "lru-cache", - "parking_lot 0.12.4", - "resolv-conf", - "smallvec", - "thiserror 1.0.68", - "tokio", - "trust-dns-proto", -] - [[package]] name = "try-lock" version = "0.2.4" @@ -12089,22 +12075,20 @@ checksum = "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" [[package]] name = "typed-builder" -version = "0.10.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" +checksum = "34085c17941e36627a879208083e25d357243812c30e7d7387c3b954f30ade16" dependencies = [ - "proc-macro2 1.0.101", - "quote 1.0.40", - "syn 1.0.109", + "typed-builder-macro 0.16.2", ] [[package]] name = "typed-builder" -version = "0.16.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34085c17941e36627a879208083e25d357243812c30e7d7387c3b954f30ade16" +checksum = "cd9d30e3a08026c78f246b173243cf07b3696d274debd26680773b6773c2afc7" dependencies = [ - "typed-builder-macro", + "typed-builder-macro 0.20.1", ] [[package]] @@ -12118,6 +12102,17 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "typed-builder-macro" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c36781cc0e46a83726d9879608e4cf6c2505237e263a8eb8c24502989cfdb28" +dependencies = [ + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "typenum" version = "1.17.0" @@ -12170,7 +12165,7 @@ checksum = "6cbccdbe531c8d553812a609bdb70c0d1002ad91333498e18df42c98744b15cc" dependencies = [ "proc-macro2 1.0.101", "quote 1.0.40", - "rustc_version 0.4.1", + "rustc_version", "syn 2.0.106", ] @@ -12346,7 +12341,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", "serde", ] @@ -12446,7 +12441,7 @@ dependencies = [ "paste", "regex", "reqwest 0.11.26", - "semver 1.0.26", + "semver", "serde", "serde_json", "serde_yaml", @@ -12539,7 +12534,7 @@ dependencies = [ "headers", "heim", "hex", - "hickory-proto", + "hickory-proto 0.25.2", "hostname 0.4.0", "http 0.2.9", "http 1.3.1", @@ -12607,12 +12602,12 @@ dependencies = [ "rumqttc", "rust_decimal", "seahash", - "semver 1.0.26", + "semver", "serde", "serde-toml-merge", "serde_bytes", "serde_json", - "serde_with 3.14.0", + "serde_with", "serde_yaml", "serial_test", "similar-asserts", @@ -12767,7 +12762,7 @@ dependencies = [ "num-traits", "serde", "serde_json", - "serde_with 3.14.0", + "serde_with", "snafu 0.8.9", "toml 0.9.8", "tracing 0.1.41", @@ -12858,7 +12853,7 @@ dependencies = [ "security-framework 3.5.1", "serde", "serde_json", - "serde_with 3.14.0", + "serde_with", "serde_yaml", "similar-asserts", "smallvec", @@ -13085,7 +13080,7 @@ dependencies = [ "hmac", "hostname 0.4.0", "iana-time-zone", - "idna 1.0.3", + "idna", "indexmap 2.12.0", "indoc", "influxdb-line-protocol", diff --git a/Cargo.toml b/Cargo.toml index 7a781145b881e..5a6bc7037e5f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -385,7 +385,7 @@ listenfd = { version = "1.0.2", default-features = false, optional = true } lru = { version = "0.16.0", default-features = false } maxminddb = { version = "0.26.0", default-features = false, optional = true, features = ["simdutf8"] } md-5 = { version = "0.10", default-features = false, optional = true } -mongodb = { version = "2.8.2", default-features = false, features = ["tokio-runtime"], optional = true } +mongodb = { version = "3.3.0", default-features = false, optional = true, features = ["compat-3-0-0", "dns-resolver", "rustls-tls"] } async-nats = { version = "0.42.0", default-features = false, optional = true, features = ["ring"] } nkeys = { version = "0.4.5", default-features = false, optional = true } nom = { workspace = true, optional = true } diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index d8d6edc54a79d..578c814f83225 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -230,6 +230,8 @@ deadpool-runtime,https://github.com/bikeshedder/deadpool,MIT OR Apache-2.0,Micha der,https://github.com/RustCrypto/formats/tree/master/der,Apache-2.0 OR MIT,RustCrypto Developers deranged,https://github.com/jhpratt/deranged,MIT OR Apache-2.0,Jacob Pratt derivative,https://github.com/mcarton/rust-derivative,MIT OR Apache-2.0,mcarton +derive-syn-parse,https://github.com/sharnoff/derive-syn-parse,MIT OR Apache-2.0,sharnoff +derive-where,https://github.com/ModProg/derive-where,MIT OR Apache-2.0,The derive-where Authors derive_arbitrary,https://github.com/rust-fuzz/arbitrary,MIT OR Apache-2.0,"The Rust-Fuzz Project Developers, Nick Fitzgerald , Manish Goregaokar , Andre Bogus , Corey Farwell " derive_builder,https://github.com/colin-kiegel/rust-derive-builder,MIT OR Apache-2.0,"Colin Kiegel , Pascal Hertleif , Jan-Erik Rediger , Ted Driggs " derive_builder_core,https://github.com/colin-kiegel/rust-derive-builder,MIT OR Apache-2.0,"Colin Kiegel , Pascal Hertleif , Jan-Erik Rediger , Ted Driggs " @@ -345,6 +347,7 @@ heim-runtime,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf hickory-proto,https://github.com/hickory-dns/hickory-dns,MIT OR Apache-2.0,The contributors to Hickory DNS +hickory-resolver,https://github.com/hickory-dns/hickory-dns,MIT OR Apache-2.0,The contributors to Hickory DNS hkdf,https://github.com/RustCrypto/KDFs,MIT OR Apache-2.0,RustCrypto Developers hmac,https://github.com/RustCrypto/MACs,MIT OR Apache-2.0,RustCrypto Developers home,https://github.com/rust-lang/cargo,MIT OR Apache-2.0,Brian Anderson @@ -452,10 +455,13 @@ lz4-sys,https://github.com/10xGenomics/lz4-rs,MIT,"Jens Heyens , Arthur Silva , ticki " macaddr,https://github.com/svartalf/rust-macaddr,Apache-2.0 OR MIT,svartalf mach,https://github.com/fitzgen/mach,BSD-2-Clause,"Nick Fitzgerald , David Cuddeback , Gonzalo Brito Gadeschi " +macro_magic,https://github.com/sam0x17/macro_magic,MIT,sam0x17 +macro_magic_core,https://github.com/sam0x17/macro_magic,MIT,The macro_magic_core Authors +macro_magic_core_macros,https://github.com/sam0x17/macro_magic,MIT,The macro_magic_core_macros Authors +macro_magic_macros,https://github.com/sam0x17/macro_magic,MIT,The macro_magic_macros Authors malloc_buf,https://github.com/SSheldon/malloc_buf,MIT,Steven Sheldon match_cfg,https://github.com/gnzlbg/match_cfg,MIT OR Apache-2.0,gnzlbg matchers,https://github.com/hawkw/matchers,MIT,Eliza Weisman -matches,https://github.com/SimonSapin/rust-std-candidates,MIT,The matches Authors matchit,https://github.com/ibraheemdev/matchit,MIT AND BSD-3-Clause,Ibraheem Ahmed maxminddb,https://github.com/oschwald/maxminddb-rust,ISC,Gregory J. Oschwald md-5,https://github.com/RustCrypto/hashes,MIT OR Apache-2.0,RustCrypto Developers @@ -474,7 +480,10 @@ mlua,https://github.com/mlua-rs/mlua,MIT,"Aleksandr Orlenko , kyre mlua-sys,https://github.com/mlua-rs/mlua,MIT,Aleksandr Orlenko mlua_derive,https://github.com/khvzak/mlua,MIT,Aleksandr Orlenko moka,https://github.com/moka-rs/moka,MIT OR Apache-2.0,The moka Authors -mongodb,https://github.com/mongodb/mongo-rust-driver,Apache-2.0,"Saghm Rossi , Patrick Freed , Isabel Atkinson , Abraham Egnor , Kaitlin Mahar " +mongocrypt,https://github.com/mongodb/libmongocrypt-rust,Apache-2.0,"Abraham Egnor , Isabel Atkinson " +mongocrypt-sys,https://github.com/mongodb/libmongocrypt-rust,Apache-2.0,"Abraham Egnor , Isabel Atkinson " +mongodb,https://github.com/mongodb/mongo-rust-driver,Apache-2.0,"Saghm Rossi , Patrick Freed , Isabel Atkinson , Abraham Egnor , Kaitlin Mahar , Patrick Meredith " +mongodb-internal-macros,https://github.com/mongodb/mongo-rust-driver,Apache-2.0,The mongodb-internal-macros Authors multer,https://github.com/rousan/multer-rs,MIT,Rousan Ali native-tls,https://github.com/sfackler/rust-native-tls,MIT OR Apache-2.0,Steven Fackler ndk-context,https://github.com/rust-windowing/android-ndk-rs,MIT OR Apache-2.0,The Rust Windowing contributors @@ -634,7 +643,7 @@ rend,https://github.com/djkoloski/rend,MIT,David Koloski reqwest,https://github.com/seanmonstar/reqwest,MIT OR Apache-2.0,Sean McArthur reqwest-middleware,https://github.com/TrueLayer/reqwest-middleware,MIT OR Apache-2.0,Rodrigo Gryzinski reqwest-retry,https://github.com/TrueLayer/reqwest-middleware,MIT OR Apache-2.0,Rodrigo Gryzinski -resolv-conf,http://github.com/tailhook/resolv-conf,MIT OR Apache-2.0,paul@colomiets.name +resolv-conf,https://github.com/hickory-dns/resolv-conf,MIT OR Apache-2.0,The resolv-conf Authors retry-policies,https://github.com/TrueLayer/retry-policies,MIT OR Apache-2.0,Luca Palmieri rfc6979,https://github.com/RustCrypto/signatures/tree/master/rfc6979,Apache-2.0 OR MIT,RustCrypto Developers ring,https://github.com/briansmith/ring,Apache-2.0 AND ISC,The ring Authors @@ -651,7 +660,6 @@ rumqttc,https://github.com/bytebeamio/rumqtt,Apache-2.0,tekjar rustc-demangle,https://github.com/rust-lang/rustc-demangle,MIT OR Apache-2.0,Alex Crichton rustc-hash,https://github.com/rust-lang/rustc-hash,Apache-2.0 OR MIT,The Rust Project Developers -rustc_version,https://github.com/Kimundi/rustc-version-rs,MIT OR Apache-2.0,Marvin Löbel rustc_version,https://github.com/djc/rustc-version-rs,MIT OR Apache-2.0,The rustc_version Authors rustc_version_runtime,https://github.com/seppo0010/rustc-version-runtime-rs,MIT,Sebastian Waisbrot rustix,https://github.com/bytecodealliance/rustix,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,"Dan Gohman , Jakub Konka " @@ -679,8 +687,6 @@ secrecy,https://github.com/iqlusioninc/crates/tree/main/secrecy,Apache-2.0 OR MI security-framework,https://github.com/kornelski/rust-security-framework,MIT OR Apache-2.0,"Steven Fackler , Kornel " security-framework-sys,https://github.com/kornelski/rust-security-framework,MIT OR Apache-2.0,"Steven Fackler , Kornel " semver,https://github.com/dtolnay/semver,MIT OR Apache-2.0,David Tolnay -semver,https://github.com/steveklabnik/semver,MIT OR Apache-2.0,"Steve Klabnik , The Rust Project Developers" -semver-parser,https://github.com/steveklabnik/semver-parser,MIT OR Apache-2.0,Steve Klabnik serde,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " serde-toml-merge,https://github.com/jdrouet/serde-toml-merge,MIT,Jeremie Drouet serde-value,https://github.com/arcnmx/serde-value,MIT,arcnmx @@ -738,7 +744,6 @@ static_assertions_next,https://github.com/scuffletv/static-assertions,MIT OR Apa stream-cancel,https://github.com/jonhoo/stream-cancel,MIT OR Apache-2.0,Jon Gjengset stringprep,https://github.com/sfackler/rust-stringprep,MIT OR Apache-2.0,Steven Fackler strip-ansi-escapes,https://github.com/luser/strip-ansi-escapes,Apache-2.0 OR MIT,Ted Mielczarek -strsim,https://github.com/dguo/strsim-rs,MIT,Danny Guo strsim,https://github.com/rapidfuzz/strsim-rs,MIT,"Danny Guo , maxbachmann " strum,https://github.com/Peternator7/strum,MIT,Peter Glotfelty strum_macros,https://github.com/Peternator7/strum,MIT,Peter Glotfelty @@ -806,8 +811,6 @@ tracing-serde,https://github.com/tokio-rs/tracing,MIT,Tokio Contributors , David Barsky , Tokio Contributors " tracing-tower,https://github.com/tokio-rs/tracing,MIT,Eliza Weisman triomphe,https://github.com/Manishearth/triomphe,MIT OR Apache-2.0,"Manish Goregaokar , The Servo Project Developers" -trust-dns-proto,https://github.com/bluejekyll/trust-dns,MIT OR Apache-2.0,Benjamin Fry -trust-dns-resolver,https://github.com/bluejekyll/trust-dns,MIT OR Apache-2.0,Benjamin Fry try-lock,https://github.com/seanmonstar/try-lock,MIT,Sean McArthur tryhard,https://github.com/EmbarkStudios/tryhard,MIT OR Apache-2.0,Embark tungstenite,https://github.com/snapview/tungstenite-rs,MIT OR Apache-2.0,"Alexey Galakhov, Daniel Abramov" diff --git a/changelog.d/24271_mongodb_upgrade.breaking.md b/changelog.d/24271_mongodb_upgrade.breaking.md new file mode 100644 index 0000000000000..626df09e468b3 --- /dev/null +++ b/changelog.d/24271_mongodb_upgrade.breaking.md @@ -0,0 +1,3 @@ +The `mongodb_metrics` source now requires MongoDB Server 4.2 or later. MongoDB Server 4.0, the previously supported minimum version, reached end-of-life on April 30, 2022. + +authors: thomasqueirozb diff --git a/license-tool.toml b/license-tool.toml index 70e0ed9a741e5..53f390edeba15 100644 --- a/license-tool.toml +++ b/license-tool.toml @@ -4,6 +4,7 @@ "crunchy" = { origin = "https://github.com/eira-fransham/crunchy" } "openssl-macros" = { origin = "https://github.com/sfackler/rust-openssl" } "serde_nanos" = { origin = "https://github.com/caspervonb/serde_nanos" } +"mongodb-internal-macros" = { origin = "https://github.com/mongodb/mongo-rust-driver" } # rust-license-tool can't find the license for jsonpath-rust 0.5.1 "jsonpath-rust" = { license = "MIT", origin = "https://github.com/besok/jsonpath-rust" } diff --git a/src/sources/mongodb_metrics/mod.rs b/src/sources/mongodb_metrics/mod.rs index e2a53fb293d65..7fd3047b57016 100644 --- a/src/sources/mongodb_metrics/mod.rs +++ b/src/sources/mongodb_metrics/mod.rs @@ -195,7 +195,7 @@ impl MongoDbMetrics { let doc = self .client .database("admin") - .run_command(doc! { "isMaster": 1 }, None) + .run_command(doc! { "isMaster": 1 }) .await .map_err(CollectError::Mongo)?; let msg: CommandIsMaster = from_document(doc).map_err(CollectError::Bson)?; @@ -216,7 +216,7 @@ impl MongoDbMetrics { let doc = self .client .database("admin") - .run_command(doc! { "buildInfo": 1 }, None) + .run_command(doc! { "buildInfo": 1 }) .await .map_err(CollectError::Mongo)?; from_document(doc).map_err(CollectError::Bson) @@ -281,10 +281,7 @@ impl MongoDbMetrics { let command = doc! { "serverStatus": 1, "opLatencies": { "histograms": true }}; let db = self.client.database("admin"); - let doc = db - .run_command(command, None) - .await - .map_err(CollectError::Mongo)?; + let doc = db.run_command(command).await.map_err(CollectError::Mongo)?; let byte_size = document_size(&doc); emit!(EndpointBytesReceived { byte_size, From 12c880f33c5aaa45216d1a97a7977e2a8d1f1855 Mon Sep 17 00:00:00 2001 From: "@Ara Pulido" Date: Wed, 19 Nov 2025 17:07:57 +0100 Subject: [PATCH 112/227] chore(ci): Add CLA signature workflow (#24276) * Add CLA signature workflow * format * Add pull_request for debugging * Revert "Add pull_request for debugging" This reverts commit 482231669c1c453036048dec4f8968232b2e054c. --------- Co-authored-by: Thomas --- .github/workflows/cla.yml | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 .github/workflows/cla.yml diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml new file mode 100644 index 0000000000000..04364b8046aac --- /dev/null +++ b/.github/workflows/cla.yml @@ -0,0 +1,44 @@ +name: "CLA Assistant" +on: + issue_comment: + types: [created] + pull_request_target: + types: [opened, closed, synchronize] + +permissions: + contents: read + pull-requests: write + id-token: write # Needed to federate tokens. + actions: write + +jobs: + CLAAssistant: + runs-on: ubuntu-latest + steps: + - uses: DataDog/dd-octo-sts-action@08f2144903ced3254a3dafec2592563409ba2aa0 # v1.0.1 + id: octo-sts + with: + scope: DataDog/cla-signatures + policy: self.write-signatures-vector + + - name: "CLA Assistant" + if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' + uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PERSONAL_ACCESS_TOKEN: ${{ steps.octo-sts.outputs.token }} + with: + path-to-signatures: 'cla.json' + path-to-document: 'https://gist.github.com/bits-bot/55bdc97a4fdad52d97feb4d6c3d1d618' # e.g. a CLA or a DCO document + branch: 'vector' + remote-repository-name: cla-signatures + remote-organization-name: DataDog + + # the followings are the optional inputs - If the optional inputs are not given, then default values will be taken + #allowlist: user1,bot* + #create-file-commit-message: 'For example: Creating file for storing CLA Signatures' + #signed-commit-message: 'For example: $contributorName has signed the CLA in $owner/$repo#$pullRequestNo' + #custom-notsigned-prcomment: 'pull request comment with Introductory message to ask new contributors to sign' + #custom-pr-sign-comment: 'The signature to be committed in order to sign the CLA' + #custom-allsigned-prcomment: 'pull request comment when all contributors has signed, defaults to **CLA Assistant Lite bot** All Contributors have signed the CLA.' + #lock-pullrequest-aftermerge: false - if you don't want this bot to automatically lock the pull request after merging (default - true) From 870b86ffe1c1c8c609a2b7c4532a9836166392cd Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 19 Nov 2025 15:47:53 -0500 Subject: [PATCH 113/227] fix(ci): Allow CLA check to pass on merge queue events (#24277) * fix(ci): Allow CLA check to pass on merge queue events * Fix merge_group missing types --- .github/workflows/cla.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 04364b8046aac..aa11ac356341f 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -4,6 +4,8 @@ on: types: [created] pull_request_target: types: [opened, closed, synchronize] + merge_group: + types: [checks_requested] permissions: contents: read @@ -15,14 +17,19 @@ jobs: CLAAssistant: runs-on: ubuntu-latest steps: + - name: CLA already verified on PR + if: github.event_name == 'merge_group' + run: echo "CLA verification not needed for merge queue - already checked on PR" + - uses: DataDog/dd-octo-sts-action@08f2144903ced3254a3dafec2592563409ba2aa0 # v1.0.1 + if: github.event_name != 'merge_group' id: octo-sts with: scope: DataDog/cla-signatures policy: self.write-signatures-vector - name: "CLA Assistant" - if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' + if: github.event_name != 'merge_group' && ((github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target') uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From bc654a796aafd331ebae99c77b31ab83e38eb62d Mon Sep 17 00:00:00 2001 From: Thomas Date: Wed, 19 Nov 2025 16:49:53 -0500 Subject: [PATCH 114/227] chore(dnsmsg-parser): forbid unwrap and refactor error handling (#24275) --- lib/dnsmsg-parser/Cargo.toml | 3 +++ lib/dnsmsg-parser/benches/benches.rs | 28 ++++++++++++++++----- lib/dnsmsg-parser/src/dns_message_parser.rs | 7 +++++- 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/lib/dnsmsg-parser/Cargo.toml b/lib/dnsmsg-parser/Cargo.toml index 5dd9051955b05..b34623c4352a0 100644 --- a/lib/dnsmsg-parser/Cargo.toml +++ b/lib/dnsmsg-parser/Cargo.toml @@ -6,6 +6,9 @@ edition = "2024" publish = false license = "MIT" +[lints.clippy] +unwrap-used = "forbid" + [dependencies] data-encoding = "2.9" hickory-proto.workspace = true diff --git a/lib/dnsmsg-parser/benches/benches.rs b/lib/dnsmsg-parser/benches/benches.rs index 29247accdea53..f307aef7e1463 100644 --- a/lib/dnsmsg-parser/benches/benches.rs +++ b/lib/dnsmsg-parser/benches/benches.rs @@ -5,14 +5,20 @@ use hickory_proto::rr::rdata::NULL; fn benchmark_parse_as_query_message(c: &mut Criterion) { let raw_dns_message = "szgAAAABAAAAAAAAAmg1B2V4YW1wbGUDY29tAAAGAAE="; - let raw_query_message = BASE64.decode(raw_dns_message.as_bytes()).unwrap(); + let raw_query_message = BASE64 + .decode(raw_dns_message.as_bytes()) + .expect("invalid base64"); let mut group = c.benchmark_group("dnstap"); group.throughput(Throughput::Bytes(raw_query_message.len() as u64)); group.bench_function("parse_as_query_message", |b| { b.iter_batched( || DnsMessageParser::new(raw_query_message.clone()), - |mut parser| parser.parse_as_query_message().unwrap(), + |mut parser| { + parser + .parse_as_query_message() + .expect("failed to parse as query") + }, BatchSize::SmallInput, ) }); @@ -22,14 +28,20 @@ fn benchmark_parse_as_query_message(c: &mut Criterion) { fn benchmark_parse_as_update_message(c: &mut Criterion) { let raw_dns_message = "xjUoAAABAAAAAQAAB2V4YW1wbGUDY29tAAAGAAECaDXADAD/AP8AAAAAAAA="; - let raw_update_message = BASE64.decode(raw_dns_message.as_bytes()).unwrap(); + let raw_update_message = BASE64 + .decode(raw_dns_message.as_bytes()) + .expect("invalid base64"); let mut group = c.benchmark_group("dnstap"); group.throughput(Throughput::Bytes(raw_update_message.len() as u64)); group.bench_function("parse_as_update_message", |b| { b.iter_batched( || DnsMessageParser::new(raw_update_message.clone()), - |mut parser| parser.parse_as_update_message().unwrap(), + |mut parser| { + parser + .parse_as_update_message() + .expect("failed to parse as update") + }, BatchSize::SmallInput, ) }); @@ -59,7 +71,7 @@ fn benchmark_parse_apl_rdata(c: &mut Criterion) { } fn benchmark_parse_rdata(c: &mut Criterion, data: &str, code: u16, id: &str) { - let raw_rdata = BASE64.decode(data.as_bytes()).unwrap(); + let raw_rdata = BASE64.decode(data.as_bytes()).expect("invalid base64"); let record_rdata = NULL::with(raw_rdata.clone()); @@ -73,7 +85,11 @@ fn benchmark_parse_rdata(c: &mut Criterion, data: &str, code: u16, id: &str) { DnsMessageParser::new(Vec::::new()), ) }, - |(record_rdata, mut parser)| parser.format_unknown_rdata(code, &record_rdata).unwrap(), + |(record_rdata, mut parser)| { + parser + .format_unknown_rdata(code, &record_rdata) + .expect("failed to parse rdata") + }, BatchSize::SmallInput, ) }); diff --git a/lib/dnsmsg-parser/src/dns_message_parser.rs b/lib/dnsmsg-parser/src/dns_message_parser.rs index cf57171b27b67..d0b271b2e7930 100644 --- a/lib/dnsmsg-parser/src/dns_message_parser.rs +++ b/lib/dnsmsg-parser/src/dns_message_parser.rs @@ -219,7 +219,12 @@ impl DnsMessageParser { }; self.raw_message_for_rdata_parsing = Some(raw_message_for_rdata_parsing_data); - BinDecoder::new(self.raw_message_for_rdata_parsing.as_ref().unwrap()).clone(index as u16) + BinDecoder::new( + self.raw_message_for_rdata_parsing + .as_ref() + .expect("None raw_message_for_rdata_parsing"), + ) + .clone(index as u16) } fn parse_wks_rdata( From 2f719e900dda9f7d08540984972668c8be310820 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 20 Nov 2025 11:46:05 -0500 Subject: [PATCH 115/227] use loop pattern --- src/transforms/incremental_to_absolute.rs | 33 ++++++++++++++--------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/transforms/incremental_to_absolute.rs b/src/transforms/incremental_to_absolute.rs index 5040292a98031..c727000db8769 100644 --- a/src/transforms/incremental_to_absolute.rs +++ b/src/transforms/incremental_to_absolute.rs @@ -109,22 +109,31 @@ impl TaskTransform for IncrementalToAbsolute { Self: 'static, { let mut inner = self; + let mut task = task; - // Emit initial metrics - inner.emit_metrics(); + Box::pin(async_stream::stream! { + inner.emit_metrics(); - // Set up periodic metrics emission every 2 seconds - let mut interval = tokio::time::interval(Duration::from_secs(2)); + let mut interval = tokio::time::interval(Duration::from_secs(2)); - Box::pin(task.filter_map(move |v| { - let mut cx = std::task::Context::from_waker(futures::task::noop_waker_ref()); - // Poll the interval and emit metrics if ready - while interval.poll_tick(&mut cx).is_ready() { - inner.emit_metrics(); + loop { + tokio::select! { + _ = interval.tick() => { + inner.emit_metrics(); + }, + maybe_event = task.next() => { + match maybe_event { + Some(event) => { + if let Some(transformed) = inner.transform_one(event) { + yield transformed; + } + }, + None => break, + } + } + } } - // Process the event as before - futures::future::ready(inner.transform_one(v)) - })) + }) } } From 3cfbdf7f87d22b2363fd22e5d005d7b8ba8bc8f6 Mon Sep 17 00:00:00 2001 From: Derek Zhang Date: Thu, 20 Nov 2025 12:38:26 -0500 Subject: [PATCH 116/227] use size_of --- lib/vector-core/src/event/metric/value.rs | 22 +++++++++------------- src/sinks/util/buffer/metrics/normalize.rs | 2 +- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/lib/vector-core/src/event/metric/value.rs b/lib/vector-core/src/event/metric/value.rs index cadd30a28f0a8..ce21e80bbec24 100644 --- a/lib/vector-core/src/event/metric/value.rs +++ b/lib/vector-core/src/event/metric/value.rs @@ -360,16 +360,12 @@ impl MetricValue { impl ByteSizeOf for MetricValue { fn allocated_bytes(&self) -> usize { match self { - Self::Counter { .. } | Self::Gauge { .. } => size_of::(), - Self::Set { values } => values.allocated_bytes() + size_of_val(values), - Self::Distribution { samples, .. } => samples.allocated_bytes() + size_of_val(samples), - Self::AggregatedHistogram { buckets, .. } => { - buckets.allocated_bytes() + size_of_val(buckets) - } - Self::AggregatedSummary { quantiles, .. } => { - quantiles.allocated_bytes() + size_of_val(quantiles) - } - Self::Sketch { sketch } => sketch.allocated_bytes() + size_of_val(sketch), + Self::Counter { .. } | Self::Gauge { .. } => 0, + Self::Set { values } => values.allocated_bytes(), + Self::Distribution { samples, .. } => samples.allocated_bytes(), + Self::AggregatedHistogram { buckets, .. } => buckets.allocated_bytes(), + Self::AggregatedSummary { quantiles, .. } => quantiles.allocated_bytes(), + Self::Sketch { sketch } => sketch.allocated_bytes(), } } } @@ -600,7 +596,7 @@ impl PartialEq for Sample { impl ByteSizeOf for Sample { fn allocated_bytes(&self) -> usize { - size_of::() + 0 } } @@ -674,7 +670,7 @@ impl PartialEq for Bucket { impl ByteSizeOf for Bucket { fn allocated_bytes(&self) -> usize { - size_of::() + 0 } } @@ -743,6 +739,6 @@ impl Quantile { impl ByteSizeOf for Quantile { fn allocated_bytes(&self) -> usize { - size_of::() + 0 } } diff --git a/src/sinks/util/buffer/metrics/normalize.rs b/src/sinks/util/buffer/metrics/normalize.rs index a368bc48fbc16..0148fab12c520 100644 --- a/src/sinks/util/buffer/metrics/normalize.rs +++ b/src/sinks/util/buffer/metrics/normalize.rs @@ -330,7 +330,7 @@ impl CapacityPolicy { /// Gets the total memory size of entry/series, excluding LRU cache overhead. pub fn item_size(&self, series: &MetricSeries, entry: &MetricEntry) -> usize { - entry.allocated_bytes() + series.allocated_bytes() + entry.size_of() + series.size_of() } } From b9ad9b3ec1ade765583e4d06fc7142f8e6b745a2 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 21 Nov 2025 14:12:37 -0500 Subject: [PATCH 117/227] chore(deps): remove number-prefix in favor of unit_prefix (#24293) --- Cargo.lock | 12 +++--------- LICENSE-3rdparty.csv | 2 +- lib/vector-top/Cargo.toml | 2 +- lib/vector-top/src/dashboard.rs | 2 +- 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 611e2675ab80e..2c51ad55c9739 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7453,12 +7453,6 @@ dependencies = [ "libc", ] -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - [[package]] name = "oauth2" version = "4.4.2" @@ -12288,9 +12282,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "unit-prefix" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323402cff2dd658f39ca17c789b502021b3f18707c91cdf22e3838e1b4023817" +checksum = "81e544489bf3d8ef66c953931f56617f423cd4b5494be343d9b9d3dda037b9a3" [[package]] name = "universal-hash" @@ -12968,10 +12962,10 @@ dependencies = [ "humantime", "indoc", "num-format", - "number_prefix", "ratatui", "tokio", "tokio-stream", + "unit-prefix", "url", "vector-api-client", "vector-common", diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 578c814f83225..98e8d456e0b11 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -521,7 +521,6 @@ num_cpus,https://github.com/seanmonstar/num_cpus,MIT OR Apache-2.0,Sean McArthur num_enum,https://github.com/illicitonion/num_enum,BSD-3-Clause OR MIT OR Apache-2.0,"Daniel Wagner-Hall , Daniel Henry-Mantilla , Vincent Esche " num_enum_derive,https://github.com/illicitonion/num_enum,BSD-3-Clause OR MIT OR Apache-2.0,"Daniel Wagner-Hall , Daniel Henry-Mantilla , Vincent Esche " num_threads,https://github.com/jhpratt/num_threads,MIT OR Apache-2.0,Jacob Pratt -number_prefix,https://github.com/ogham/rust-number-prefix,MIT,Benjamin Sago oauth2,https://github.com/ramosbugs/oauth2-rs,MIT OR Apache-2.0,"Alex Crichton , Florin Lipan , David A. Ramos " objc,http://github.com/SSheldon/rust-objc,MIT,Steven Sheldon objc2-core-foundation,https://github.com/madsmtm/objc2,Zlib OR Apache-2.0 OR MIT,The objc2-core-foundation Authors @@ -833,6 +832,7 @@ unicode-normalization,https://github.com/unicode-rs/unicode-normalization,MIT OR unicode-segmentation,https://github.com/unicode-rs/unicode-segmentation,MIT OR Apache-2.0,"kwantam , Manish Goregaokar " unicode-truncate,https://github.com/Aetf/unicode-truncate,MIT OR Apache-2.0,Aetf unicode-width,https://github.com/unicode-rs/unicode-width,MIT OR Apache-2.0,"kwantam , Manish Goregaokar " +unit-prefix,https://codeberg.org/commons-rs/unit-prefix,MIT,"Fabio Valentini , Benjamin Sago " universal-hash,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers unreachable,https://github.com/reem/rust-unreachable,MIT OR Apache-2.0,Jonathan Reem unsafe-libyaml,https://github.com/dtolnay/unsafe-libyaml,MIT,David Tolnay diff --git a/lib/vector-top/Cargo.toml b/lib/vector-top/Cargo.toml index b0c0debf16e76..815c0a4f858a1 100644 --- a/lib/vector-top/Cargo.toml +++ b/lib/vector-top/Cargo.toml @@ -18,7 +18,7 @@ tokio-stream = { version = "0.1.17", default-features = false, features = ["net" url.workspace = true humantime = { version = "2.2.0", default-features = false } crossterm = { version = "0.29.0", default-features = false, features = ["event-stream", "windows"] } -number_prefix = { version = "0.4.0", default-features = false, features = ["std"] } +unit-prefix = { version = "0.5.2", default-features = false, features = ["std"] } num-format = { version = "0.4.4", default-features = false, features = ["with-num-bigint"] } ratatui = { version = "0.29.0", default-features = false, features = ["crossterm"] } vector-common = { path = "../vector-common" } diff --git a/lib/vector-top/src/dashboard.rs b/lib/vector-top/src/dashboard.rs index 7615c53df4d83..418ee0831d7a8 100644 --- a/lib/vector-top/src/dashboard.rs +++ b/lib/vector-top/src/dashboard.rs @@ -9,7 +9,6 @@ use crossterm::{ tty::IsTty, }; use num_format::{Locale, ToFormattedString}; -use number_prefix::NumberPrefix; use ratatui::{ Frame, Terminal, backend::CrosstermBackend, @@ -19,6 +18,7 @@ use ratatui::{ widgets::{Block, Borders, Cell, Paragraph, Row, Table, Wrap}, }; use tokio::sync::oneshot; +use unit_prefix::NumberPrefix; use super::{ events::capture_key_press, From df4f3621e7941c4eb3ba6ad76c739552b427951f Mon Sep 17 00:00:00 2001 From: James <10730172+sanjams2@users.noreply.github.com> Date: Wed, 26 Nov 2025 11:51:00 -0500 Subject: [PATCH 118/227] fix(blackhole sink): implement end-to-end acknowledgements (#24283) Addresses #24281 The blackhole sink was consuming events without updating finalizer status, causing sources that depend on acknowledgements (like aws_s3 with SQS) to never receive delivery confirmation. This resulted in SQS messages never being deleted and queue depth growing indefinitely. Added proper finalizer handling by taking finalizers from events and marking them as delivered after processing, matching the pattern used by other sinks like console. Co-authored-by: sanjams2 --- changelog.d/24281_blackhole_sink_acknowledgements.fix.md | 3 +++ src/sinks/blackhole/mod.rs | 5 +++-- src/sinks/blackhole/sink.rs | 7 +++++-- 3 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 changelog.d/24281_blackhole_sink_acknowledgements.fix.md diff --git a/changelog.d/24281_blackhole_sink_acknowledgements.fix.md b/changelog.d/24281_blackhole_sink_acknowledgements.fix.md new file mode 100644 index 0000000000000..d6b5c58b9cf1c --- /dev/null +++ b/changelog.d/24281_blackhole_sink_acknowledgements.fix.md @@ -0,0 +1,3 @@ +Fixed the blackhole sink to properly implement end-to-end acknowledgements. Previously, the sink consumed events without updating finalizer status, causing sources that depend on acknowledgements (like `aws_s3` with SQS) to never delete processed messages from the queue. + +authors: sanjams2 diff --git a/src/sinks/blackhole/mod.rs b/src/sinks/blackhole/mod.rs index 9d9046e208fde..d82a1c14bd89b 100644 --- a/src/sinks/blackhole/mod.rs +++ b/src/sinks/blackhole/mod.rs @@ -14,7 +14,8 @@ mod tests { blackhole::{config::BlackholeConfig, sink::BlackholeSink}, }, test_util::{ - components::run_and_assert_nonsending_sink_compliance, random_events_with_stream, + components::{SINK_TAGS, run_and_assert_sink_compliance}, + random_events_with_stream, }, }; @@ -29,6 +30,6 @@ mod tests { let sink = VectorSink::Stream(Box::new(sink)); let (_input_lines, events) = random_events_with_stream(100, 10, None); - run_and_assert_nonsending_sink_compliance(sink, events, &[]).await; + run_and_assert_sink_compliance(sink, events, &SINK_TAGS).await; } } diff --git a/src/sinks/blackhole/sink.rs b/src/sinks/blackhole/sink.rs index 8c934101545eb..ff0744913cc8b 100644 --- a/src/sinks/blackhole/sink.rs +++ b/src/sinks/blackhole/sink.rs @@ -21,7 +21,7 @@ use vector_lib::{ }; use crate::{ - event::{EventArray, EventContainer}, + event::{EventArray, EventContainer, EventStatus, Finalizable}, sinks::{blackhole::config::BlackholeConfig, util::StreamSink}, }; @@ -82,7 +82,7 @@ impl StreamSink for BlackholeSink { }); } - while let Some(events) = input.next().await { + while let Some(mut events) = input.next().await { if let Some(rate) = self.config.rate { let factor: f32 = 1.0 / rate as f32; let secs: f32 = factor * (events.len() as f32); @@ -98,6 +98,9 @@ impl StreamSink for BlackholeSink { .total_raw_bytes .fetch_add(message_len.get(), Ordering::AcqRel); + let finalizers = events.take_finalizers(); + finalizers.update_status(EventStatus::Delivered); + events_sent.emit(CountByteSize(events.len(), message_len)); bytes_sent.emit(ByteSize(message_len.get())); } From 84c94441223a4e2f83be5e5ae0e56180c2c45931 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ensar=20Saraj=C4=8Di=C4=87?= Date: Sun, 30 Nov 2025 00:24:37 +0100 Subject: [PATCH 119/227] docs(vrl): fix return type for `mod` function in VRL function reference (#24312) --- website/cue/reference/remap/functions/mod.cue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/cue/reference/remap/functions/mod.cue b/website/cue/reference/remap/functions/mod.cue index 8fdb3fc85e7c3..8241f50a9b52e 100644 --- a/website/cue/reference/remap/functions/mod.cue +++ b/website/cue/reference/remap/functions/mod.cue @@ -25,7 +25,7 @@ remap: functions: mod: { "`modulus` is not an integer or float.", "`modulus` is equal to 0.", ] - return: types: ["string"] + return: types: ["integer", "float"] examples: [ { From 5c16191caea16363da0baa70b5a7be67a945826a Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 1 Dec 2025 13:29:37 -0500 Subject: [PATCH 120/227] chore(ci): use ci-docs-build flow instead of local docs flow (#24319) --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a18c3a913160f..77bd725af8308 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -163,7 +163,7 @@ jobs: - uses: ./.github/actions/setup with: rust: true - - run: cd rust-doc && make docs + - run: cd rust-doc && make ci-docs-build test-vrl: name: VRL - Linux From f4f66206df5dfd235ac6ccd9f47e1a2d20201195 Mon Sep 17 00:00:00 2001 From: Lars Francke Date: Mon, 1 Dec 2025 20:20:32 +0100 Subject: [PATCH 121/227] fix(file source) Fix a data corruption bug with multi-char delimiters (#24028) * Fix problem where multi-chars delimiters fail to be parsed if they happen right at a buffer boundary. * Add changelog entry and remove debug println statements * Update changelog.d/24027_file_source_multi_char_delimiter.fix.md Co-authored-by: Thomas * Add #[cfg(test)] to test method * Expand comments * Add a regression/integration test to file source * Remove cfg test --------- Co-authored-by: Thomas --- ...27_file_source_multi_char_delimiter.fix.md | 3 + lib/file-source-common/src/buffer.rs | 229 +++++++++++++++++- src/sources/file.rs | 89 +++++++ 3 files changed, 314 insertions(+), 7 deletions(-) create mode 100644 changelog.d/24027_file_source_multi_char_delimiter.fix.md diff --git a/changelog.d/24027_file_source_multi_char_delimiter.fix.md b/changelog.d/24027_file_source_multi_char_delimiter.fix.md new file mode 100644 index 0000000000000..bd94e17d6355e --- /dev/null +++ b/changelog.d/24027_file_source_multi_char_delimiter.fix.md @@ -0,0 +1,3 @@ +Fixed a bug in the `file` source, which could silently corrupt data when using multi-char delimiters. + +authors: lfrancke diff --git a/lib/file-source-common/src/buffer.rs b/lib/file-source-common/src/buffer.rs index e2a4e5596c920..097331fbf2be3 100644 --- a/lib/file-source-common/src/buffer.rs +++ b/lib/file-source-common/src/buffer.rs @@ -46,13 +46,56 @@ pub async fn read_until_with_max_size<'a, R: AsyncBufRead + ?Sized + Unpin>( let delim_len = delim.len(); let mut discarded_for_size_and_truncated = Vec::new(); let mut reader = Box::new(reader); + + // Used to track partial delimiter matches across buffer boundaries. + // Data is read in chunks from the reader (see `fill_buf` below). + // A multi-byte delimiter may be split across the "old" and "new" buffers. + // Any potential partial delimiter that was found in the "old" buffer is stored in this variable. + let mut partial_delim: BytesMut = BytesMut::with_capacity(delim_len); + loop { + // Read the next chunk of data let available: &[u8] = match reader.fill_buf().await { Ok(n) => n, Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue, Err(e) => return Err(e), }; + // First, check if we have a partial delimiter from the previous iteration/buffer + if !partial_delim.is_empty() { + let expected_suffix = &delim[partial_delim.len()..]; + let expected_suffix_len = expected_suffix.len(); + + // We already know that we have a partial delimiter match from the previous buffer. + // Here we check what part of the delimiter is missing and whether the new buffer + // contains the remaining part. + if available.len() >= expected_suffix_len + && &available[..expected_suffix_len] == expected_suffix + { + // Complete delimiter found! Consume the remainder of the delimiter so we can start + // processing data after the delimiter. + reader.consume(expected_suffix_len); + *position += expected_suffix_len as u64; + total_read += expected_suffix_len; + partial_delim.clear(); + + // Found a complete delimiter, return the current buffer so we can proceed with the + // next record after this delimiter in the next call. + return Ok(ReadResult { + successfully_read: Some(total_read), + discarded_for_size_and_truncated, + }); + } else { + // Not a complete delimiter after all. + // Add partial_delim to output buffer as it is actual data. + if !discarding { + buf.extend_from_slice(&partial_delim); + } + partial_delim.clear(); + // Continue processing current available buffer + } + } + let (done, used) = { match delim_finder.find(available) { Some(i) => { @@ -62,13 +105,75 @@ pub async fn read_until_with_max_size<'a, R: AsyncBufRead + ?Sized + Unpin>( (true, i + delim_len) } None => { - if !discarding { - buf.extend_from_slice(available); + // No delimiter found in current buffer. But there could be a partial delimiter + // at the end of this buffer. For multi-byte delimiters like \r\n, we need + // to handle the case where the delimiter is split across buffer boundaries + // (e.g. \r in the "old" buffer, then we read new data and find \n in the new + // buffer). + let mut partial_match_len = 0; + + // We only need to check if we're not already at the end of the buffer and if we + // have a delimiter that has more than one byte. + if !available.is_empty() && delim_len > 1 { + // Check if the end of the current buffer matches a prefix of the delimiter + // by testing from longest to shortest possible prefix. + // + // This loop runs at most (delim_len - 1) iterations: + // - 2-byte delimiter (\r\n): 1 iteration max + // - 5-byte delimiter: 4 iterations max + // + // This part of the code is only called if all of these are true: + // + // - We have a new buffer (e.g. every 8kB, i.e. only called once per buffer) + // - We have a multi-byte delimiter + // - This delimiter could not be found in the current buffer + // + // Even for longer delimiters the performance impact is negligible. + // + // Example 1: + // Delimiter: \r\n + // Iteration 1: It checks if the current buffer ends with "\r", + // if it does we have a potential partial delimiter. + // The next chunk will confirm whether this is truly part of a delimiter. + + // Example 2: + // Delimiter: ABCDE + // Iteration 1: It checks if the current buffer ends with "ABCD" (we don't + // need to check "ABCDE" because that would have been caught by + // `delim_finder.find` earlier) + // Iteration 2: It checks if the current buffer ends with "ABC" + // Iterations 3-4: Same for "AB" and "A" + for prefix_len in (1..delim_len).rev() { + if available.len() >= prefix_len + && available.ends_with(&delim[..prefix_len]) + { + partial_match_len = prefix_len; + break; + } + } + } + + let bytes_to_copy = available.len() - partial_match_len; + + if !discarding && bytes_to_copy > 0 { + buf.extend_from_slice(&available[..bytes_to_copy]); } + + // If we found a potential partial delimiter, save it for the next iteration + if partial_match_len > 0 { + partial_delim.clear(); + partial_delim.extend_from_slice(&available[bytes_to_copy..]); + } + (false, available.len()) } } }; + + // Check if we're at EOF before we start processing + // (for borrow checker, has to come before `consume`) + let at_eof = available.is_empty(); + reader.consume(used); *position += used as u64; // do this at exactly same time total_read += used; @@ -92,11 +197,12 @@ pub async fn read_until_with_max_size<'a, R: AsyncBufRead + ?Sized + Unpin>( discarding = false; buf.clear(); } - } else if used == 0 { - // We've hit EOF but not yet seen a newline. This can happen when unlucky timing causes - // us to observe an incomplete write. We return None here and let the loop continue - // next time the method is called. This is safe because the buffer is specific to this - // FileWatcher. + } else if used == 0 && at_eof { + // We've hit EOF but haven't seen a delimiter. This can happen when: + // 1. The file ends without a trailing delimiter + // 2. We're observing an incomplete write + // + // Return None to signal the caller to retry later. return Ok(ReadResult { successfully_read: None, discarded_for_size_and_truncated, @@ -262,4 +368,113 @@ mod test { .await .unwrap() } + + /// Generic test helper that tests delimiter splits across buffer boundaries + /// for any delimiter length. This function: + /// 1. Creates test data with delimiters positioned to split at buffer boundaries + /// 2. Tests multiple iterations to ensure state tracking works correctly + /// 3. Verifies all lines are correctly separated without merging + async fn test_delimiter_boundary_split_helper(delimiter: &[u8], num_lines: usize) { + let delimiter_len = delimiter.len(); + + // Use a buffer capacity that will force splits + // We'll position delimiters to split at this boundary + let buffer_capacity = 10; + + // Build test data where each delimiter is positioned to split across buffer boundary + // Strategy: For each line, calculate position so delimiter starts at boundary - (delimiter_len - 1) + let mut data = Vec::new(); + let mut expected_lines = Vec::new(); + + for i in 0..num_lines { + // Create line content that positions the delimiter to split at buffer boundary + // We want the delimiter to straddle a buffer_capacity boundary + + // Calculate how many bytes until the next buffer boundary + let current_pos = data.len(); + let bytes_until_boundary = buffer_capacity - (current_pos % buffer_capacity); + + // Create line content that will position delimiter to split + // We want (delimiter_len - 1) bytes before boundary, then 1 byte after + let line_content = if bytes_until_boundary > delimiter_len { + let content_len = bytes_until_boundary - (delimiter_len - 1); + format!("line{:0width$}", i, width = content_len.saturating_sub(4)).into_bytes() + } else { + // Not enough room in this buffer, pad to next boundary + let padding = bytes_until_boundary; + let extra_content = buffer_capacity - (delimiter_len - 1); + let mut content = vec![b'X'; padding]; + content.extend_from_slice( + format!("L{:0width$}", i, width = extra_content.saturating_sub(1)).as_bytes(), + ); + content + }; + + expected_lines.push(line_content.clone()); + data.extend_from_slice(&line_content); + data.extend_from_slice(delimiter); + } + + // Now test reading this data + let cursor = Cursor::new(data); + let mut reader = BufReader::with_capacity(buffer_capacity, cursor); + let mut position = 0; + let max_size = 1024; + + // Read each line and verify it matches expected + for (i, expected_line) in expected_lines.iter().enumerate() { + let mut buffer = BytesMut::new(); + let result = read_until_with_max_size( + Box::pin(&mut reader), + &mut position, + delimiter, + &mut buffer, + max_size, + ) + .await + .unwrap(); + + assert_eq!( + buffer.as_ref(), + expected_line.as_slice(), + "Line {} should match expected content. Got: {:?}, Expected: {:?}", + i, + String::from_utf8_lossy(&buffer), + String::from_utf8_lossy(expected_line) + ); + + assert!( + result.successfully_read.is_some(), + "Should find delimiter for line {}", + i + ); + } + } + + #[tokio::test] + async fn test_single_byte_delimiter_boundary() { + // Test single-byte delimiter (should work without any special handling) + test_delimiter_boundary_split_helper(b"\n", 5).await; + } + + #[tokio::test] + async fn test_two_byte_delimiter_boundary() { + // Test two-byte delimiter (CRLF case) + test_delimiter_boundary_split_helper(b"\r\n", 5).await; + } + + #[tokio::test] + async fn test_three_byte_delimiter_boundary() { + test_delimiter_boundary_split_helper(b"|||", 5).await; + } + + #[tokio::test] + async fn test_four_byte_delimiter_boundary() { + test_delimiter_boundary_split_helper(b"<|>|", 5).await; + } + + #[tokio::test] + async fn test_five_byte_delimiter_boundary() { + test_delimiter_boundary_split_helper(b"<<>>>", 5).await; + } } diff --git a/src/sources/file.rs b/src/sources/file.rs index 42b1e03b1c331..31e4666bb659f 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -2312,6 +2312,95 @@ mod tests { ); } + // Regression test for https://github.com/vectordotdev/vector/issues/24027 + // Tests that multi-character delimiters (like \r\n) are correctly handled when + // split across buffer boundaries. Without the fix, events would be merged together. + #[tokio::test] + async fn test_multi_char_delimiter_split_across_buffer_boundary() { + let dir = tempdir().unwrap(); + let config = file::FileConfig { + include: vec![dir.path().join("*")], + line_delimiter: "\r\n".to_string(), + ..test_default_file_config(&dir) + }; + + let path = dir.path().join("file"); + let received = run_file_source(&config, false, NoAcks, LogNamespace::Legacy, async { + let mut file = File::create(&path).unwrap(); + + sleep_500_millis().await; + + // Create data where \r\n is split at 8KB buffer boundary + // This reproduces the exact scenario that caused data corruption: + // - Event 1 ends with \r at byte 8191 + // - The \n appears at byte 8192 (right at the buffer boundary) + // - Without the fix, Event 1 and Event 2 would be merged + + let buffer_size = 8192; + + // Event 1: Position \r\n to split at first boundary + let event1_prefix = "Event 1: "; + let padding1_len = buffer_size - event1_prefix.len() - 1; // -1 for the \r + write!(&mut file, "{}", event1_prefix).unwrap(); + file.write_all(&vec![b'X'; padding1_len]).unwrap(); + write!(&mut file, "\r\n").unwrap(); // \r at byte 8191, \n at byte 8192 + + // Event 2: Position \r\n to split at second boundary + let event2_prefix = "Event 2: "; + let padding2_len = buffer_size - event2_prefix.len() - 1; + write!(&mut file, "{}", event2_prefix).unwrap(); + file.write_all(&vec![b'Y'; padding2_len]).unwrap(); + write!(&mut file, "\r\n").unwrap(); // \r at byte 16383, \n at byte 16384 + + // Event 3: Normal line without boundary split + write!(&mut file, "Event 3: Final\r\n").unwrap(); + + sleep_500_millis().await; + }) + .await; + + let messages = extract_messages_value(received); + + // The bug would cause Events 1 and 2 to be merged into a single message + assert_eq!( + messages.len(), + 3, + "Should receive exactly 3 separate events (bug would merge them)" + ); + + // Verify each event is correctly separated and starts with expected prefix + let msg0 = messages[0].to_string_lossy(); + let msg1 = messages[1].to_string_lossy(); + let msg2 = messages[2].to_string_lossy(); + + assert!( + msg0.starts_with("Event 1: "), + "First event should start with 'Event 1: ', got: {}", + msg0 + ); + assert!( + msg1.starts_with("Event 2: "), + "Second event should start with 'Event 2: ', got: {}", + msg1 + ); + assert_eq!(msg2, "Event 3: Final"); + + // Ensure no event contains embedded CR/LF (sign of incorrect merging) + for (i, msg) in messages.iter().enumerate() { + let msg_str = msg.to_string_lossy(); + assert!( + !msg_str.contains('\r'), + "Event {} should not contain embedded \\r", + i + ); + assert!( + !msg_str.contains('\n'), + "Event {} should not contain embedded \\n", + i + ); + } + } + #[tokio::test] async fn remove_file() { let n = 5; From 08dceb5e786df3f889e86ef68dccf5ae20d67fff Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 1 Dec 2025 14:31:20 -0500 Subject: [PATCH 122/227] chore(dev): add missing --workspace argument to make docs (#24318) --- rust-doc/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-doc/Makefile b/rust-doc/Makefile index eb02019571462..5cc08cf4d4fca 100644 --- a/rust-doc/Makefile +++ b/rust-doc/Makefile @@ -1,7 +1,7 @@ # Full docs build with all dependencies for local development docs: ../scripts/environment/install-protoc.sh ${HOME}/protoc - PATH=${PATH}:${HOME}/protoc/ cargo doc --no-default-features --features="docs" + PATH=${PATH}:${HOME}/protoc/ cargo doc --no-default-features --features="docs" --workspace # rust-doc.vector.dev specific build without the extra dependencies ci-docs-build: ../scripts/environment/install-protoc.sh ${HOME}/protoc From e6277228958fe51d3f37cea69551e57ac51c45f4 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Mon, 1 Dec 2025 13:57:00 -0600 Subject: [PATCH 123/227] enhancement(sources): Add internal metric to record source buffer utilization (#24272) * enhancement(sources): Add internal metric to record source buffer utilization * Tweak wording * Rework and expand the metrics output by the channel * Fix and expand tests * Address review feedback and simplify the metric names * Fix missing metric rename --- ...e-sender-utilization-metric.enhancement.md | 9 + .../src/topology/channel/limited_queue.rs | 169 ++++++++++++------ lib/vector-buffers/src/variants/in_memory.rs | 2 +- lib/vector-core/src/source_sender/output.rs | 5 +- lib/vector-core/src/source_sender/tests.rs | 53 ++++++ src/test_util/components.rs | 72 +++++++- src/test_util/mock/sources/basic.rs | 5 +- website/cue/reference/components/sources.cue | 4 + .../components/sources/internal_metrics.cue | 32 ++++ 9 files changed, 290 insertions(+), 61 deletions(-) create mode 100644 changelog.d/source-sender-utilization-metric.enhancement.md diff --git a/changelog.d/source-sender-utilization-metric.enhancement.md b/changelog.d/source-sender-utilization-metric.enhancement.md new file mode 100644 index 0000000000000..8ad76d9c1af8a --- /dev/null +++ b/changelog.d/source-sender-utilization-metric.enhancement.md @@ -0,0 +1,9 @@ +Added the following metrics to record the utilization level of the buffer that +all sources send into: + +- `source_buffer_max_byte_size` +- `source_buffer_max_event_size` +- `source_buffer_utilization` +- `source_buffer_utilization_level` + +authors: bruceg diff --git a/lib/vector-buffers/src/topology/channel/limited_queue.rs b/lib/vector-buffers/src/topology/channel/limited_queue.rs index 4320936cec750..f6fb80f8ecac9 100644 --- a/lib/vector-buffers/src/topology/channel/limited_queue.rs +++ b/lib/vector-buffers/src/topology/channel/limited_queue.rs @@ -11,6 +11,7 @@ use std::{ use async_stream::stream; use crossbeam_queue::{ArrayQueue, SegQueue}; use futures::Stream; +use metrics::{Gauge, Histogram, gauge, histogram}; use tokio::sync::{Notify, OwnedSemaphorePermit, Semaphore, TryAcquireError}; use crate::{InMemoryBufferable, config::MemoryBufferSize}; @@ -88,12 +89,47 @@ where } } +#[derive(Clone, Debug)] +struct Metrics { + histogram: Histogram, + gauge: Gauge, + // We hold a handle to the max gauge to avoid it being dropped by the metrics collector, but + // since the value is static, we never need to update it. The compiler detects this as an unused + // field, so we need to suppress the warning here. + #[expect(dead_code)] + max_gauge: Gauge, +} + +impl Metrics { + #[expect(clippy::cast_precision_loss)] // We have to convert buffer sizes for a gauge, it's okay to lose precision here. + fn new(limit: MemoryBufferSize, prefix: &'static str, output: &str) -> Self { + let (gauge_suffix, max_value) = match limit { + MemoryBufferSize::MaxEvents(max_events) => ("_max_event_size", max_events.get() as f64), + MemoryBufferSize::MaxSize(max_bytes) => ("_max_byte_size", max_bytes.get() as f64), + }; + let max_gauge = gauge!(format!("{prefix}{gauge_suffix}"), "output" => output.to_string()); + max_gauge.set(max_value); + Self { + histogram: histogram!(format!("{prefix}_utilization"), "output" => output.to_string()), + gauge: gauge!(format!("{prefix}_utilization_level"), "output" => output.to_string()), + max_gauge, + } + } + + #[expect(clippy::cast_precision_loss)] + fn record(&self, value: usize) { + self.histogram.record(value as f64); + self.gauge.set(value as f64); + } +} + #[derive(Debug)] struct Inner { data: Arc>, limit: MemoryBufferSize, limiter: Arc, read_waker: Arc, + metrics: Option, } impl Clone for Inner { @@ -103,6 +139,42 @@ impl Clone for Inner { limit: self.limit, limiter: self.limiter.clone(), read_waker: self.read_waker.clone(), + metrics: self.metrics.clone(), + } + } +} + +impl Inner { + fn new(limit: MemoryBufferSize, metric_name_output: Option<(&'static str, &str)>) -> Self { + let read_waker = Arc::new(Notify::new()); + let metrics = + metric_name_output.map(|(prefix, output)| Metrics::new(limit, prefix, output)); + match limit { + MemoryBufferSize::MaxEvents(max_events) => Inner { + data: Arc::new(ArrayQueue::new(max_events.get())), + limit, + limiter: Arc::new(Semaphore::new(max_events.get())), + read_waker, + metrics, + }, + MemoryBufferSize::MaxSize(max_bytes) => Inner { + data: Arc::new(SegQueue::new()), + limit, + limiter: Arc::new(Semaphore::new(max_bytes.get())), + read_waker, + metrics, + }, + } + } + + fn send_with_permit(&mut self, total: usize, permits: OwnedSemaphorePermit, item: T) { + self.data.push((permits, item)); + self.read_waker.notify_one(); + // Due to the race between getting the available capacity, acquiring the permits, and the + // above push, the total may be inaccurate. Record it anyways as the histogram totals will + // _eventually_ converge on a true picture of the buffer utilization. + if let Some(metrics) = self.metrics.as_ref() { + metrics.record(total); } } } @@ -115,7 +187,7 @@ pub struct LimitedSender { impl LimitedSender { #[allow(clippy::cast_possible_truncation)] - fn get_required_permits_for_item(&self, item: &T) -> u32 { + fn calc_required_permits(&self, item: &T) -> (usize, usize, u32) { // We have to limit the number of permits we ask for to the overall limit since we're always // willing to store more items than the limit if the queue is entirely empty, because // otherwise we might deadlock ourselves by not being able to send a single item. @@ -123,7 +195,8 @@ impl LimitedSender { MemoryBufferSize::MaxSize(max_size) => (max_size, item.allocated_bytes()), MemoryBufferSize::MaxEvents(max_events) => (max_events, item.event_count()), }; - cmp::min(limit.get(), value) as u32 + let limit = limit.get(); + (limit, value, cmp::min(limit, value) as u32) } /// Gets the number of items that this channel could accept. @@ -139,23 +212,22 @@ impl LimitedSender { /// with the given `item`. pub async fn send(&mut self, item: T) -> Result<(), SendError> { // Calculate how many permits we need, and wait until we can acquire all of them. - let permits_required = self.get_required_permits_for_item(&item); - let Ok(permits) = self + let (limit, count, permits_required) = self.calc_required_permits(&item); + let in_use = limit.saturating_sub(self.available_capacity()); + match self .inner .limiter .clone() .acquire_many_owned(permits_required) .await - else { - return Err(SendError(item)); - }; - - self.inner.data.push((permits, item)); - self.inner.read_waker.notify_one(); - - trace!("Sent item."); - - Ok(()) + { + Ok(permits) => { + self.inner.send_with_permit(in_use + count, permits, item); + trace!("Sent item."); + Ok(()) + } + Err(_) => Err(SendError(item)), + } } /// Attempts to send an item into the channel. @@ -172,28 +244,22 @@ impl LimitedSender { /// Will panic if adding ack amount overflows. pub fn try_send(&mut self, item: T) -> Result<(), TrySendError> { // Calculate how many permits we need, and try to acquire them all without waiting. - let permits_required = self.get_required_permits_for_item(&item); - let permits = match self + let (limit, count, permits_required) = self.calc_required_permits(&item); + let in_use = limit.saturating_sub(self.available_capacity()); + match self .inner .limiter .clone() .try_acquire_many_owned(permits_required) { - Ok(permits) => permits, - Err(ae) => { - return match ae { - TryAcquireError::NoPermits => Err(TrySendError::InsufficientCapacity(item)), - TryAcquireError::Closed => Err(TrySendError::Disconnected(item)), - }; + Ok(permits) => { + self.inner.send_with_permit(in_use + count, permits, item); + trace!("Attempt to send item succeeded."); + Ok(()) } - }; - - self.inner.data.push((permits, item)); - self.inner.read_waker.notify_one(); - - trace!("Attempt to send item succeeded."); - - Ok(()) + Err(TryAcquireError::NoPermits) => Err(TrySendError::InsufficientCapacity(item)), + Err(TryAcquireError::Closed) => Err(TrySendError::Disconnected(item)), + } } } @@ -269,21 +335,9 @@ impl Drop for LimitedReceiver { pub fn limited( limit: MemoryBufferSize, + metric_name_output: Option<(&'static str, &str)>, ) -> (LimitedSender, LimitedReceiver) { - let inner = match limit { - MemoryBufferSize::MaxEvents(max_events) => Inner { - data: Arc::new(ArrayQueue::new(max_events.get())), - limit, - limiter: Arc::new(Semaphore::new(max_events.get())), - read_waker: Arc::new(Notify::new()), - }, - MemoryBufferSize::MaxSize(max_size) => Inner { - data: Arc::new(SegQueue::new()), - limit, - limiter: Arc::new(Semaphore::new(max_size.get())), - read_waker: Arc::new(Notify::new()), - }, - }; + let inner = Inner::new(limit, metric_name_output); let sender = LimitedSender { inner: inner.clone(), @@ -310,7 +364,8 @@ mod tests { #[tokio::test] async fn send_receive() { - let (mut tx, mut rx) = limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(2).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(2).unwrap()); + let (mut tx, mut rx) = limited(limit, None); assert_eq!(2, tx.available_capacity()); @@ -344,9 +399,8 @@ mod tests { let max_allowed_bytes = msg_size * max_elements; // With this configuration a maximum of exactly 10 messages can fit in the channel - let (mut tx, mut rx) = limited(MemoryBufferSize::MaxSize( - NonZeroUsize::new(max_allowed_bytes).unwrap(), - )); + let limit = MemoryBufferSize::MaxSize(NonZeroUsize::new(max_allowed_bytes).unwrap()); + let (mut tx, mut rx) = limited(limit, None); assert_eq!(max_allowed_bytes, tx.available_capacity()); @@ -379,7 +433,8 @@ mod tests { #[test] fn sender_waits_for_more_capacity_when_none_available() { - let (mut tx, mut rx) = limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap()); + let (mut tx, mut rx) = limited(limit, None); assert_eq!(1, tx.available_capacity()); @@ -440,7 +495,8 @@ mod tests { #[test] fn sender_waits_for_more_capacity_when_partial_available() { - let (mut tx, mut rx) = limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(7).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(7).unwrap()); + let (mut tx, mut rx) = limited(limit, None); assert_eq!(7, tx.available_capacity()); @@ -528,7 +584,8 @@ mod tests { #[test] fn empty_receiver_returns_none_when_last_sender_drops() { - let (mut tx, mut rx) = limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap()); + let (mut tx, mut rx) = limited(limit, None); assert_eq!(1, tx.available_capacity()); @@ -570,8 +627,8 @@ mod tests { #[test] fn receiver_returns_none_once_empty_when_last_sender_drops() { - let (tx, mut rx) = - limited::(MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap()); + let (tx, mut rx) = limited::(limit, None); assert_eq!(1, tx.available_capacity()); @@ -600,7 +657,8 @@ mod tests { #[test] fn oversized_send_allowed_when_empty() { - let (mut tx, mut rx) = limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(1).unwrap()); + let (mut tx, mut rx) = limited(limit, None); assert_eq!(1, tx.available_capacity()); @@ -632,7 +690,8 @@ mod tests { #[test] fn oversized_send_allowed_when_partial_capacity() { - let (mut tx, mut rx) = limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(2).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(2).unwrap()); + let (mut tx, mut rx) = limited(limit, None); assert_eq!(2, tx.available_capacity()); diff --git a/lib/vector-buffers/src/variants/in_memory.rs b/lib/vector-buffers/src/variants/in_memory.rs index 1c9fd541f263e..30e986196daef 100644 --- a/lib/vector-buffers/src/variants/in_memory.rs +++ b/lib/vector-buffers/src/variants/in_memory.rs @@ -44,7 +44,7 @@ where usage_handle.set_buffer_limits(max_bytes, max_size); - let (tx, rx) = limited(self.capacity); + let (tx, rx) = limited(self.capacity, None); Ok((tx.into(), rx.into())) } } diff --git a/lib/vector-core/src/source_sender/output.rs b/lib/vector-core/src/source_sender/output.rs index 88556eae08527..a6d15e0b19f62 100644 --- a/lib/vector-core/src/source_sender/output.rs +++ b/lib/vector-core/src/source_sender/output.rs @@ -30,6 +30,8 @@ use crate::{ schema::Definition, }; +const UTILIZATION_METRIC_PREFIX: &str = "source_buffer"; + /// UnsentEvents tracks the number of events yet to be sent in the buffer. This is used to /// increment the appropriate counters when a future is not polled to completion. Particularly, /// this is known to happen in a Warp server when a client sends a new HTTP request on a TCP @@ -114,7 +116,8 @@ impl Output { output_id: OutputId, timeout: Option, ) -> (Self, LimitedReceiver) { - let (tx, rx) = channel::limited(MemoryBufferSize::MaxEvents(NonZeroUsize::new(n).unwrap())); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(n).unwrap()); + let (tx, rx) = channel::limited(limit, Some((UTILIZATION_METRIC_PREFIX, &output))); ( Self { sender: tx, diff --git a/lib/vector-core/src/source_sender/tests.rs b/lib/vector-core/src/source_sender/tests.rs index 113fe4f7e7f70..66e4169af7077 100644 --- a/lib/vector-core/src/source_sender/tests.rs +++ b/lib/vector-core/src/source_sender/tests.rs @@ -238,3 +238,56 @@ fn assert_counter_metric(metrics: &[Metric], name: &str, expected: f64) { "Only one {name} metric should be present" ); } + +#[tokio::test] +#[expect(clippy::cast_precision_loss)] +async fn emits_buffer_utilization_histogram_on_send_and_receive() { + metrics::init_test(); + let buffer_size = 2; + let (mut sender, mut recv) = SourceSender::new_test_sender_with_options(buffer_size, None); + + let event = Event::Log(LogEvent::from("test event")); + sender + .send_event(event.clone()) + .await + .expect("first send succeeds"); + sender + .send_event(event) + .await + .expect("second send succeeds"); + + // Drain the channel so both the send and receive paths are exercised. + assert!(recv.next().await.is_some()); + assert!(recv.next().await.is_some()); + + let metrics: Vec<_> = Controller::get() + .expect("metrics controller available") + .capture_metrics() + .into_iter() + .filter(|metric| metric.name().starts_with("source_buffer_")) + .collect(); + assert_eq!(metrics.len(), 3, "expected 3 utilization metrics"); + + let find_metric = |name: &str| { + metrics + .iter() + .find(|m| m.name() == name) + .unwrap_or_else(|| panic!("missing metric: {name}")) + }; + + let metric = find_metric("source_buffer_utilization"); + let tags = metric.tags().expect("utilization histogram has tags"); + assert_eq!(tags.get("output"), Some("_default")); + + let metric = find_metric("source_buffer_utilization_level"); + let MetricValue::Gauge { value } = metric.value() else { + panic!("source_buffer_utilization_level should be a gauge"); + }; + assert_eq!(*value, 2.0); + + let metric = find_metric("source_buffer_max_event_size"); + let MetricValue::Gauge { value } = metric.value() else { + panic!("source_buffer_max_event_size should be a gauge"); + }; + assert_eq!(*value, buffer_size as f64); +} diff --git a/src/test_util/components.rs b/src/test_util/components.rs index 822de8d007110..86a7b0defee25 100644 --- a/src/test_util/components.rs +++ b/src/test_util/components.rs @@ -7,10 +7,11 @@ //! internal events and metrics, and testing that they fit the required //! patterns. -use std::{env, sync::LazyLock, time::Duration}; +use std::{collections::HashSet, env, sync::LazyLock, time::Duration}; use futures::{SinkExt, Stream, StreamExt, stream}; use futures_util::Future; +use itertools::Itertools as _; use tokio::{pin, select, time::sleep}; use vector_lib::event_test_util; @@ -64,6 +65,12 @@ pub const HTTP_SINK_TAGS: [&str; 2] = ["endpoint", "protocol"]; /// The standard set of tags for all `AWS`-based sinks. pub const AWS_SINK_TAGS: [&str; 2] = ["protocol", "region"]; +/// The list of source sender buffer metrics that must be emitted. +const SOURCE_SENDER_BUFFER_METRICS: [&str; 2] = [ + "source_buffer_utilization", + "source_buffer_utilization_level", +]; + /// This struct is used to describe a set of component tests. pub struct ComponentTests<'a, 'b, 'c> { /// The list of event (suffixes) that must be emitted by the component @@ -72,6 +79,8 @@ pub struct ComponentTests<'a, 'b, 'c> { tagged_counters: &'b [&'b str], /// The list of counter metrics (with no particular tags) that must be incremented untagged_counters: &'c [&'c str], + /// Whether the source sender metrics must be emitted + require_source_sender_metrics: bool, } /// The component test specification for all sources. @@ -84,6 +93,7 @@ pub static SOURCE_TESTS: LazyLock = LazyLock::new(|| ComponentTe "component_sent_events_total", "component_sent_event_bytes_total", ], + require_source_sender_metrics: true, }); /// The component error test specification (sources and sinks). @@ -91,6 +101,7 @@ pub static COMPONENT_TESTS_ERROR: LazyLock = LazyLock::new(|| Co events: &["Error"], tagged_counters: &["component_errors_total"], untagged_counters: &[], + require_source_sender_metrics: false, }); /// The component test specification for all transforms. @@ -103,6 +114,7 @@ pub static TRANSFORM_TESTS: LazyLock = LazyLock::new(|| Componen "component_sent_events_total", "component_sent_event_bytes_total", ], + require_source_sender_metrics: false, }); /// The component test specification for sinks that are push-based. @@ -114,6 +126,7 @@ pub static SINK_TESTS: LazyLock = LazyLock::new(|| { "component_sent_events_total", "component_sent_event_bytes_total", ], + require_source_sender_metrics: false, } }); @@ -126,6 +139,7 @@ pub static DATA_VOLUME_SINK_TESTS: LazyLock = LazyLock::new(|| { "component_sent_event_bytes_total", ], untagged_counters: &[], + require_source_sender_metrics: false, } }); @@ -137,6 +151,7 @@ pub static NONSENDING_SINK_TESTS: LazyLock = LazyLock::new(|| Co "component_sent_event_bytes_total", ], untagged_counters: &[], + require_source_sender_metrics: false, }); /// The component test specification for components with multiple outputs. @@ -148,6 +163,7 @@ pub static COMPONENT_MULTIPLE_OUTPUTS_TESTS: LazyLock = "component_sent_event_bytes_total", ], untagged_counters: &[], + require_source_sender_metrics: false, }); impl ComponentTests<'_, '_, '_> { @@ -158,6 +174,9 @@ impl ComponentTests<'_, '_, '_> { test.emitted_all_events(self.events); test.emitted_all_counters(self.tagged_counters, tags); test.emitted_all_counters(self.untagged_counters, &[]); + if self.require_source_sender_metrics { + test.emitted_source_sender_metrics(); + } if !test.errors.is_empty() { panic!( "Failed to assert compliance, errors:\n{}\n", @@ -249,6 +268,56 @@ impl ComponentTester { } } } + + fn emitted_source_sender_metrics(&mut self) { + let mut partial_matches = Vec::new(); + let mut missing: HashSet<&str> = SOURCE_SENDER_BUFFER_METRICS.iter().copied().collect(); + + for metric in self + .metrics + .iter() + .filter(|m| SOURCE_SENDER_BUFFER_METRICS.contains(&m.name())) + { + let tags = metric.tags(); + let has_output_tag = tags.is_some_and(|t| t.contains_key("output")); + let is_histogram = matches!(metric.value(), MetricValue::AggregatedHistogram { .. }); + let is_gauge = matches!(metric.value(), MetricValue::Gauge { .. }); + + if (is_histogram || is_gauge) && has_output_tag { + missing.remove(metric.name()); + continue; + } + + let tags_desc = tags + .map(|t| format!("{{{}}}", itertools::join(t.keys(), ","))) + .unwrap_or_default(); + + let mut reasons = Vec::new(); + if !is_histogram && !is_gauge { + reasons.push(format!("unexpected type `{}`", metric.value().as_name())); + } + if !has_output_tag { + reasons.push("missing `output` tag".to_string()); + } + let detail = if reasons.is_empty() { + String::new() + } else { + format!(" ({})", reasons.join(", ")) + }; + partial_matches.push(format!( + "\n -> Found metric `{}{tags_desc}`{detail}", + metric.name(), + )); + } + + if !missing.is_empty() { + let partial = partial_matches.join(""); + self.errors.push(format!( + " - Missing metric `{}*` with tag `output`{partial}", + missing.iter().join(", ") + )); + } + } } /// Runs and returns a future and asserts that the provided test specification passes. @@ -529,6 +598,7 @@ pub async fn assert_sink_error_with_events( events, tagged_counters: &["component_errors_total"], untagged_counters: &[], + require_source_sender_metrics: false, }; assert_sink_error_with_component_tests(&component_tests, tags, f).await } diff --git a/src/test_util/mock/sources/basic.rs b/src/test_util/mock/sources/basic.rs index 5ecc3db219446..614f5f7387d43 100644 --- a/src/test_util/mock/sources/basic.rs +++ b/src/test_util/mock/sources/basic.rs @@ -45,9 +45,8 @@ pub struct BasicSourceConfig { impl Default for BasicSourceConfig { fn default() -> Self { - let (_, receiver) = limited(MemoryBufferSize::MaxEvents( - NonZeroUsize::new(1000).unwrap(), - )); + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(1000).unwrap()); + let (_, receiver) = limited(limit, None); Self { receiver: Arc::new(Mutex::new(Some(receiver))), event_counter: None, diff --git a/website/cue/reference/components/sources.cue b/website/cue/reference/components/sources.cue index 69fcafd19ccf0..67585490993e5 100644 --- a/website/cue/reference/components/sources.cue +++ b/website/cue/reference/components/sources.cue @@ -417,5 +417,9 @@ components: sources: [Name=string]: { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total source_lag_time_seconds: components.sources.internal_metrics.output.metrics.source_lag_time_seconds + source_buffer_max_byte_size: components.sources.internal_metrics.output.metrics.source_buffer_max_byte_size + source_buffer_max_event_size: components.sources.internal_metrics.output.metrics.source_buffer_max_event_size + source_buffer_utilization: components.sources.internal_metrics.output.metrics.source_buffer_utilization + source_buffer_utilization_level: components.sources.internal_metrics.output.metrics.source_buffer_utilization_level } } diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index 2acd6c2bfbc28..d03036431acf2 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -718,6 +718,38 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _component_tags } + source_buffer_max_byte_size: { + description: "The maximum number of bytes the buffer that the source's outputs send into can hold." + type: "gauge" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } + source_buffer_max_event_size: { + description: "The maximum number of events the buffer that the source's outputs send into can hold." + type: "gauge" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } + source_buffer_utilization: { + description: "The utilization level of the buffer that the source's outputs send into." + type: "histogram" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } + source_buffer_utilization_level: { + description: "The current utilization level of the buffer that the source's outputs send into." + type: "gauge" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } splunk_pending_acks: { description: "The number of outstanding Splunk HEC indexer acknowledgement acks." type: "gauge" From 3eda9d2ec27fe7615f9cf1779d1e9b89ae3ab0a7 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 1 Dec 2025 15:28:13 -0500 Subject: [PATCH 124/227] chore(deps): bump VRL version to include example location (#24317) * chore(deps): bump VRL version to include example location * Update example in dnstap * Add bogus source_file/source_line to vector-vrl docs tests * Use VRL commit before datadog search changes --- Cargo.lock | 4 ++-- lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs | 4 ++-- lib/enrichment/src/find_enrichment_table_records.rs | 4 ++-- lib/enrichment/src/get_enrichment_table_record.rs | 4 ++-- lib/vector-vrl/functions/src/get_secret.rs | 4 ++-- lib/vector-vrl/functions/src/remove_secret.rs | 4 ++-- lib/vector-vrl/functions/src/set_secret.rs | 4 ++-- lib/vector-vrl/functions/src/set_semantic_meaning.rs | 4 ++-- lib/vector-vrl/tests/src/docs.rs | 2 ++ 9 files changed, 18 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c51ad55c9739..87674aedd115c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13033,8 +13033,8 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" -version = "0.28.0" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#1f0d8a50df5c5cc8a45454bf992261a332013f59" +version = "0.28.1" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#d19c4d06c3cbf93d5c44d70dac96c288d2937de4" dependencies = [ "aes", "aes-siv", diff --git a/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs b/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs index e9eecd61339b2..aec6934f23f5b 100644 --- a/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs +++ b/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs @@ -29,7 +29,7 @@ impl Function for ParseDnstap { } fn examples(&self) -> &'static [Example] { - &[Example { + &[example!( title: "Parse dnstap query message", source: r#"parse_dnstap!("ChVqYW1lcy1WaXJ0dWFsLU1hY2hpbmUSC0JJTkQgOS4xNi4zGgBy5wEIAxACGAEiEAAAAAAAAAAAAAAAAAAAAAAqECABBQJwlAAAAAAAAAAAADAw8+0CODVA7+zq9wVNMU3WNlI2kwIAAAABAAAAAAABCWZhY2Vib29rMQNjb20AAAEAAQAAKQIAAACAAAAMAAoACOxjCAG9zVgzWgUDY29tAGAAbQAAAAByZLM4AAAAAQAAAAAAAQJoNQdleGFtcGxlA2NvbQAABgABAAApBNABAUAAADkADwA1AAlubyBTRVAgbWF0Y2hpbmcgdGhlIERTIGZvdW5kIGZvciBkbnNzZWMtZmFpbGVkLm9yZy54AQ==")"#, result: Ok(indoc!( @@ -135,7 +135,7 @@ impl Function for ParseDnstap { "timestamp": "2020-06-30T03:50:07.920014129Z" }"# )), - }] + )] } fn compile( diff --git a/lib/enrichment/src/find_enrichment_table_records.rs b/lib/enrichment/src/find_enrichment_table_records.rs index 7420b18b26fbb..369eb5d21140b 100644 --- a/lib/enrichment/src/find_enrichment_table_records.rs +++ b/lib/enrichment/src/find_enrichment_table_records.rs @@ -82,7 +82,7 @@ impl Function for FindEnrichmentTableRecords { } fn examples(&self) -> &'static [Example] { - &[Example { + &[example!( title: "find records", source: r#"find_enrichment_table_records!("test", {"surname": "Smith"})"#, result: Ok( @@ -90,7 +90,7 @@ impl Function for FindEnrichmentTableRecords { {"id": 2, "firstname": "Fred", "surname": "Smith"}]"#, }, ), - }] + )] } fn compile( diff --git a/lib/enrichment/src/get_enrichment_table_record.rs b/lib/enrichment/src/get_enrichment_table_record.rs index 49ada44993192..efdbc92542a19 100644 --- a/lib/enrichment/src/get_enrichment_table_record.rs +++ b/lib/enrichment/src/get_enrichment_table_record.rs @@ -79,11 +79,11 @@ impl Function for GetEnrichmentTableRecord { } fn examples(&self) -> &'static [Example] { - &[Example { + &[example!( title: "find records", source: r#"get_enrichment_table_record!("test", {"id": 1})"#, result: Ok(r#"{"id": 1, "firstname": "Bob", "surname": "Smith"}"#), - }] + )] } fn compile( diff --git a/lib/vector-vrl/functions/src/get_secret.rs b/lib/vector-vrl/functions/src/get_secret.rs index 28381e64ce16b..a1fbfc593ba70 100644 --- a/lib/vector-vrl/functions/src/get_secret.rs +++ b/lib/vector-vrl/functions/src/get_secret.rs @@ -26,11 +26,11 @@ impl Function for GetSecret { } fn examples(&self) -> &'static [Example] { - &[Example { + &[example!( title: "Get the datadog api key", source: r#"get_secret("datadog_api_key")"#, result: Ok("secret value"), - }] + )] } fn compile( diff --git a/lib/vector-vrl/functions/src/remove_secret.rs b/lib/vector-vrl/functions/src/remove_secret.rs index bd3a9319d48ba..4788c7cedfb48 100644 --- a/lib/vector-vrl/functions/src/remove_secret.rs +++ b/lib/vector-vrl/functions/src/remove_secret.rs @@ -23,11 +23,11 @@ impl Function for RemoveSecret { } fn examples(&self) -> &'static [Example] { - &[Example { + &[example!( title: "Remove the datadog api key", source: r#"remove_secret("datadog_api_key")"#, result: Ok("null"), - }] + )] } fn compile( diff --git a/lib/vector-vrl/functions/src/set_secret.rs b/lib/vector-vrl/functions/src/set_secret.rs index 5b0c56f705627..e6ba1e310c31a 100644 --- a/lib/vector-vrl/functions/src/set_secret.rs +++ b/lib/vector-vrl/functions/src/set_secret.rs @@ -37,11 +37,11 @@ impl Function for SetSecret { } fn examples(&self) -> &'static [Example] { - &[Example { + &[example!( title: "Set the datadog api key", source: r#"set_secret("datadog_api_key", "secret-value")"#, result: Ok("null"), - }] + )] } fn compile( diff --git a/lib/vector-vrl/functions/src/set_semantic_meaning.rs b/lib/vector-vrl/functions/src/set_semantic_meaning.rs index f56a4a57f4484..14d45acbdd519 100644 --- a/lib/vector-vrl/functions/src/set_semantic_meaning.rs +++ b/lib/vector-vrl/functions/src/set_semantic_meaning.rs @@ -50,11 +50,11 @@ impl Function for SetSemanticMeaning { } fn examples(&self) -> &'static [Example] { - &[Example { + &[example!( title: "Sets custom field semantic meaning", source: r#"set_semantic_meaning(.foo, "bar")"#, result: Ok("null"), - }] + )] } fn compile( diff --git a/lib/vector-vrl/tests/src/docs.rs b/lib/vector-vrl/tests/src/docs.rs index 0dacafb10ed5a..e55385c2ffd06 100644 --- a/lib/vector-vrl/tests/src/docs.rs +++ b/lib/vector-vrl/tests/src/docs.rs @@ -192,5 +192,7 @@ fn test_from_cue_example(category: &'static str, name: String, example: Example) skip, read_only_paths: vec![], check_diagnostics: false, + source_file: format!("website/cue/reference/remap/functions/{name}.cue"), + source_line: 1, } } From 3eae9314d1ed3f80d1c1db6ba8ede1e4ccb7183d Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 1 Dec 2025 16:17:45 -0500 Subject: [PATCH 125/227] fix(file source): Fix flaky test_oldest_first by ensuring distinct creation timestamps (#24327) --- src/sources/file.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/sources/file.rs b/src/sources/file.rs index 31e4666bb659f..62a95db362f10 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -2126,6 +2126,9 @@ mod tests { let mut older = File::create(&older_path).unwrap(); older.sync_all().unwrap(); + // Sleep to ensure the creation timestamps are different + sleep_500_millis().await; + let newer_path = dir.path().join("a_newer_file"); let mut newer = File::create(&newer_path).unwrap(); newer.sync_all().unwrap(); From cea65d0b1688d40be2a819cb935b736c4a75d818 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Delafargue?= Date: Tue, 2 Dec 2025 17:39:49 +0100 Subject: [PATCH 126/227] chore(deps): bump maxminddb to 0.27 after RUSTSEC-2025-0132 (#24332) chore: bump maxminddb to 0.27 after RUSTSEC-2025-0132 --- Cargo.lock | 4 +- Cargo.toml | 2 +- src/enrichment_tables/geoip.rs | 114 +++++++++++++++++---------------- src/enrichment_tables/mmdb.rs | 4 +- 4 files changed, 64 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87674aedd115c..bfe9ab8910361 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6637,9 +6637,9 @@ dependencies = [ [[package]] name = "maxminddb" -version = "0.26.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a197e44322788858682406c74b0b59bf8d9b4954fe1f224d9a25147f1880bba" +checksum = "7ef0551fc3e7345a6c854c1026b0ddada1e443e51f4fb4cdcf86cc1a71d4b337" dependencies = [ "ipnetwork", "log", diff --git a/Cargo.toml b/Cargo.toml index 5a6bc7037e5f5..967b291118888 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ k8s-openapi = { version = "0.22.0", default-features = false, features = ["v1_26 kube = { version = "0.93.0", default-features = false, features = ["client", "openssl-tls", "runtime"], optional = true } listenfd = { version = "1.0.2", default-features = false, optional = true } lru = { version = "0.16.0", default-features = false } -maxminddb = { version = "0.26.0", default-features = false, optional = true, features = ["simdutf8"] } +maxminddb = { version = "0.27.0", default-features = false, optional = true, features = ["simdutf8"] } md-5 = { version = "0.10", default-features = false, optional = true } mongodb = { version = "3.3.0", default-features = false, optional = true, features = ["compat-3-0-0", "dns-resolver", "rustls-tls"] } async-nats = { version = "0.42.0", default-features = false, optional = true, features = ["ring"] } diff --git a/src/enrichment_tables/geoip.rs b/src/enrichment_tables/geoip.rs index 96f24fda1ffda..85fa3aaf3548a 100644 --- a/src/enrichment_tables/geoip.rs +++ b/src/enrichment_tables/geoip.rs @@ -4,13 +4,14 @@ //! //! [maxmind]: https://dev.maxmind.com/geoip/geoip2/downloadable //! [geolite]: https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access -use std::{collections::BTreeMap, fs, net::IpAddr, path::PathBuf, sync::Arc, time::SystemTime}; +use std::{fs, net::IpAddr, path::PathBuf, sync::Arc, time::SystemTime}; use maxminddb::{ Reader, - geoip2::{AnonymousIp, City, ConnectionType, Isp}, + geoip2::{AnonymousIp, City, ConnectionType, Isp, Names}, }; use ordered_float::NotNan; +use serde::Deserialize; use vector_lib::{ configurable::configurable_component, enrichment::{Case, Condition, IndexHandle, Table}, @@ -114,6 +115,14 @@ pub struct Geoip { last_modified: SystemTime, } +fn lookup_value<'de, A: Deserialize<'de>>( + dbreader: &'de Reader>, + address: IpAddr, +) -> crate::Result> { + let result = dbreader.lookup(address)?; + Ok(result.decode::()?) +} + impl Geoip { /// Creates a new GeoIP struct from the provided config. pub fn new(config: GeoipConfig) -> crate::Result { @@ -128,22 +137,22 @@ impl Geoip { // Check if we can read database with dummy Ip. let ip = IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED); - let result = match dbkind { - DatabaseKind::Asn | DatabaseKind::Isp => dbreader.lookup::(ip).map(|_| ()), - DatabaseKind::ConnectionType => dbreader.lookup::(ip).map(|_| ()), - DatabaseKind::City => dbreader.lookup::(ip).map(|_| ()), - DatabaseKind::AnonymousIp => dbreader.lookup::(ip).map(|_| ()), - }; - - match result { - Ok(_) => Ok(Geoip { - last_modified: fs::metadata(&config.path)?.modified()?, - dbreader, - dbkind, - config, - }), - Err(error) => Err(error.into()), - } + match dbkind { + // Isp + DatabaseKind::Asn | DatabaseKind::Isp => lookup_value::(&dbreader, ip).map(|_| ()), + DatabaseKind::ConnectionType => { + lookup_value::(&dbreader, ip).map(|_| ()) + } + DatabaseKind::City => lookup_value::(&dbreader, ip).map(|_| ()), + DatabaseKind::AnonymousIp => lookup_value::(&dbreader, ip).map(|_| ()), + }?; + + Ok(Geoip { + last_modified: fs::metadata(&config.path)?.modified()?, + dbreader, + dbkind, + config, + }) } fn lookup(&self, ip: IpAddr, select: Option<&[String]>) -> Option { @@ -165,7 +174,7 @@ impl Geoip { match self.dbkind { DatabaseKind::Asn | DatabaseKind::Isp => { - let data = self.dbreader.lookup::(ip).ok()??; + let data = lookup_value::(&self.dbreader, ip).ok()??; add_field!("autonomous_system_number", data.autonomous_system_number); add_field!( @@ -176,62 +185,52 @@ impl Geoip { add_field!("organization", data.organization); } DatabaseKind::City => { - let data = self.dbreader.lookup::(ip).ok()??; + let data: City = lookup_value::(&self.dbreader, ip).ok()??; - add_field!( - "city_name", - self.take_translation(data.city.as_ref().and_then(|c| c.names.as_ref())) - ); + add_field!("city_name", self.take_translation(&data.city.names)); - add_field!("continent_code", data.continent.and_then(|c| c.code)); + add_field!("continent_code", data.continent.code); - let country = data.country.as_ref(); - add_field!("country_code", country.and_then(|country| country.iso_code)); - add_field!( - "country_name", - self.take_translation(country.and_then(|c| c.names.as_ref())) - ); + let country = data.country; + add_field!("country_code", country.iso_code); + add_field!("country_name", self.take_translation(&country.names)); - let location = data.location.as_ref(); - add_field!("timezone", location.and_then(|location| location.time_zone)); + let location = data.location; + add_field!("timezone", location.time_zone); add_field!( "latitude", - location - .and_then(|location| location.latitude) - .map(|latitude| Value::Float( - NotNan::new(latitude).expect("latitude cannot be Nan") - )) + location.latitude.map(|latitude| Value::Float( + NotNan::new(latitude).expect("latitude cannot be Nan") + )) ); add_field!( "longitude", location - .and_then(|location| location.longitude) + .longitude .map(|longitude| NotNan::new(longitude).expect("longitude cannot be Nan")) ); - add_field!( - "metro_code", - location.and_then(|location| location.metro_code) - ); + add_field!("metro_code", location.metro_code); // last subdivision is most specific per https://github.com/maxmind/GeoIP2-java/blob/39385c6ce645374039450f57208b886cf87ade47/src/main/java/com/maxmind/geoip2/model/AbstractCityResponse.java#L96-L107 - let subdivision = data.subdivisions.as_ref().and_then(|s| s.last()); + let subdivision = data.subdivisions.last(); add_field!( "region_name", - self.take_translation(subdivision.and_then(|s| s.names.as_ref())) + subdivision.map(|s| self.take_translation(&s.names)) ); + add_field!( "region_code", subdivision.and_then(|subdivision| subdivision.iso_code) ); - add_field!("postal_code", data.postal.and_then(|p| p.code)); + add_field!("postal_code", data.postal.code); } DatabaseKind::ConnectionType => { - let data = self.dbreader.lookup::(ip).ok()??; + let data = lookup_value::(&self.dbreader, ip).ok()??; add_field!("connection_type", data.connection_type); } DatabaseKind::AnonymousIp => { - let data = self.dbreader.lookup::(ip).ok()??; + let data = lookup_value::(&self.dbreader, ip).ok()??; add_field!("is_anonymous", data.is_anonymous); add_field!("is_anonymous_vpn", data.is_anonymous_vpn); @@ -245,13 +244,18 @@ impl Geoip { Some(map) } - fn take_translation<'a>( - &self, - translations: Option<&BTreeMap<&str, &'a str>>, - ) -> Option<&'a str> { - translations - .and_then(|translations| translations.get(&*self.config.locale)) - .copied() + fn take_translation<'a>(&self, translations: &'a Names<'a>) -> Option<&'a str> { + match self.config.locale.as_ref() { + "en" => translations.english, + "de" => translations.german, + "es" => translations.spanish, + "fr" => translations.french, + "ja" => translations.japanese, + "pt-BR" => translations.brazilian_portuguese, + "ru" => translations.russian, + "zh-CN" => translations.simplified_chinese, + _ => None, + } } } diff --git a/src/enrichment_tables/mmdb.rs b/src/enrichment_tables/mmdb.rs index b8103cd608001..d5141d53b46e6 100644 --- a/src/enrichment_tables/mmdb.rs +++ b/src/enrichment_tables/mmdb.rs @@ -56,7 +56,7 @@ impl Mmdb { // Check if we can read database with dummy Ip. let ip = IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED); - let result = dbreader.lookup::(ip).map(|_| ()); + let result = dbreader.lookup(ip)?.decode::().map(|_| ()); match result { Ok(_) => Ok(Mmdb { @@ -69,7 +69,7 @@ impl Mmdb { } fn lookup(&self, ip: IpAddr, select: Option<&[String]>) -> Option { - let data = self.dbreader.lookup::(ip).ok()??; + let data = self.dbreader.lookup(ip).ok()?.decode().ok()??; if let Some(fields) = select { let mut filtered = Value::from(ObjectMap::new()); From 80fc73b2ccb2be507b485189de1677900cf24246 Mon Sep 17 00:00:00 2001 From: Yoenn Burban <62966490+gwenaskell@users.noreply.github.com> Date: Tue, 2 Dec 2025 17:51:24 +0100 Subject: [PATCH 127/227] chore(vrl)!: Bump vrl hash and fix datadog search tests (#24334) bump vrl and fix datadog search tests --- Cargo.lock | 2 +- src/conditions/datadog_search.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bfe9ab8910361..727cc212de818 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13034,7 +13034,7 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" version = "0.28.1" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#d19c4d06c3cbf93d5c44d70dac96c288d2937de4" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#5eafcc03be7fd864e51fe86dfc719e941de8e432" dependencies = [ "aes", "aes-siv", diff --git a/src/conditions/datadog_search.rs b/src/conditions/datadog_search.rs index 886c286096a5f..06e0cda4d6a48 100644 --- a/src/conditions/datadog_search.rs +++ b/src/conditions/datadog_search.rs @@ -1268,9 +1268,9 @@ mod test { ), // negate OR of two values ( - "-@field:value1 OR -@field:value2", - log_event!["field" => "value"], - log_event!["field" => "value2"], + "-@field1:value1 OR -@field2:value2", + log_event!["field1" => "value1"], + log_event!["field1" => "value1", "field2" => "value2"], ), // default AND of two values ( From 4902750cf18eae8e201e5849fea1a404bb57afe8 Mon Sep 17 00:00:00 2001 From: Steve Hall <4615775+sghall@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:21:18 -0700 Subject: [PATCH 128/227] fix(syslog source): emit received events/bytes metrics for UDP mode (#24296) * fix for syslog udp metrics * add changelog * add authors * Address review comments - use byte_size directly in SocketEventsReceived metrics and remove SocketBytesReceived * use byte_size directly... * clippy * fmt * add back BytesReceived metrics caught by unit test * Use events.estimated_json_encoded_size_of() * clippy --------- Co-authored-by: Emily Chen Co-authored-by: Thomas --- .../15687_syslog_udp_received_metrics.fix.md | 9 ++ src/sources/syslog.rs | 84 ++++++++++++++++++- 2 files changed, 91 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15687_syslog_udp_received_metrics.fix.md diff --git a/changelog.d/15687_syslog_udp_received_metrics.fix.md b/changelog.d/15687_syslog_udp_received_metrics.fix.md new file mode 100644 index 0000000000000..cd4805bc0675e --- /dev/null +++ b/changelog.d/15687_syslog_udp_received_metrics.fix.md @@ -0,0 +1,9 @@ +The `syslog` source in UDP mode now emits the standard "received" metrics, aligning behavior with TCP and the Component Specification: + +- `component_received_events_total` +- `component_received_event_bytes_total` +- `component_received_bytes_total` + +This makes internal telemetry consistent and restores compliance checks for UDP syslog. + +authors: sghall diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index 57ed8d90f3a28..9a3d78b8e1bad 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -9,12 +9,14 @@ use listenfd::ListenFd; use smallvec::SmallVec; use tokio_util::udp::UdpFramed; use vector_lib::{ + EstimatedJsonEncodedSizeOf, codecs::{ BytesDecoder, OctetCountingDecoder, SyslogDeserializerConfig, decoding::{Deserializer, Framer}, }, config::{LegacyKey, LogNamespace}, configurable::configurable_component, + internal_event::{ByteSize, BytesReceived, InternalEventHandle as _, Protocol}, ipallowlist::IpAllowlistConfig, lookup::{OwnedValuePath, lookup_v2::OptionalValuePath, path}, }; @@ -29,7 +31,9 @@ use crate::{ DataType, GenerateConfig, Resource, SourceConfig, SourceContext, SourceOutput, log_schema, }, event::Event, - internal_events::{SocketBindError, SocketMode, SocketReceiveError, StreamClosedError}, + internal_events::{ + SocketBindError, SocketEventsReceived, SocketMode, SocketReceiveError, StreamClosedError, + }, net, shutdown::ShutdownSignal, sources::util::net::{SocketListenAddr, TcpNullAcker, TcpSource, try_bind_udp_socket}, @@ -341,6 +345,8 @@ pub fn udp( r#type = "udp" ); + let bytes_received = register!(BytesReceived::from(Protocol::UDP)); + let mut stream = UdpFramed::new( socket, Decoder::new( @@ -353,9 +359,17 @@ pub fn udp( .take_until(shutdown) .filter_map(|frame| { let host_key = host_key.clone(); + let bytes_received = bytes_received.clone(); async move { match frame { - Ok(((mut events, _byte_size), received_from)) => { + Ok(((mut events, byte_size), received_from)) => { + let count = events.len(); + bytes_received.emit(ByteSize(byte_size)); + emit!(SocketEventsReceived { + mode: SocketMode::Udp, + byte_size: events.estimated_json_encoded_size_of(), + count, + }); let received_from = received_from.ip().to_string().into(); handle_events(&mut events, &host_key, Some(received_from), log_namespace); Some(events.remove(0)) @@ -1185,6 +1199,72 @@ mod test { .await; } + #[tokio::test] + async fn test_udp_syslog() { + assert_source_compliance(&SOCKET_PUSH_SOURCE_TAGS, async { + let num_messages: usize = 1000; + let (_guard, in_addr) = next_addr(); + + // Create and spawn the source. + let config = SyslogConfig::from_mode(Mode::Udp { + address: in_addr.into(), + receive_buffer_bytes: None, + }); + + let key = ComponentKey::from("in"); + let (tx, rx) = SourceSender::new_test(); + let (context, shutdown) = SourceContext::new_shutdown(&key, tx); + let shutdown_complete = shutdown.shutdown_tripwire(); + + let source = config + .build(context) + .await + .expect("source should not fail to build"); + tokio::spawn(source); + + // Give UDP a brief moment to start listening. + sleep(Duration::from_millis(150)).await; + + let output_events = CountReceiver::receive_events(rx); + + // Craft and send syslog messages as individual UDP datagrams. + let input_messages: Vec = (0..num_messages) + .map(|i| SyslogMessageRfc5424::random(i, 30, 4, 3, 3)) + .collect(); + + let input_lines: Vec = + input_messages.iter().map(|msg| msg.to_string()).collect(); + + let socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await.unwrap(); + for line in input_lines { + socket.send_to(line.as_bytes(), in_addr).await.unwrap(); + } + + // Wait a short period of time to ensure the messages get sent. + sleep(Duration::from_secs(2)).await; + + // Shutdown the source, and make sure we've got all the messages we sent in. + shutdown + .shutdown_all(Some(Instant::now() + Duration::from_millis(100))) + .await; + shutdown_complete.await; + + let output_events = output_events.await; + assert_eq!(output_events.len(), num_messages); + + let output_messages: Vec = output_events + .into_iter() + .map(|mut e| { + e.as_mut_log().remove("hostname"); // Vector adds this field which will cause a parse error. + e.as_mut_log().remove("source_ip"); // Vector adds this field which will cause a parse error. + e.into() + }) + .collect(); + assert_eq!(output_messages, input_messages); + }) + .await; + } + #[cfg(unix)] #[tokio::test] async fn test_unix_stream_syslog() { From ea556a288e2e58b1f09bbb1181add57bd1ff5742 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Wed, 3 Dec 2025 10:43:41 -0600 Subject: [PATCH 129/227] chore(observability): Introduce `trait NamedInternalEvent` and derive (#24313) * chore(observability): Introduce `trait NamedInternalEvent` and derive This change moves the two optional `fn name` methods from the `InternalEvent` and `RegisterInternalEvent` traits into a new `trait NamedInternalEvent` and adds a derive procedural macro to make implementation of that trait trivial. This borrows heavily from #23978 which implemented a `#[internal_event]` procedural macro, but takes it in a different direction and completes the implementation. * Miscellaneous fixes * Drop `DefaultName` wrapper as it is no longer needed * Fix Windows service events --- Cargo.lock | 11 ++ Cargo.toml | 2 + lib/dnstap-parser/Cargo.toml | 2 +- lib/dnstap-parser/src/internal_events.rs | 7 +- lib/vector-buffers/src/internal_events.rs | 6 + lib/vector-common-macros/Cargo.toml | 13 ++ .../src/internal_event.rs | 45 +++++++ lib/vector-common-macros/src/lib.rs | 37 ++++++ lib/vector-common/Cargo.toml | 4 +- lib/vector-common/src/event_test_util.rs | 121 +++++++++++++++--- .../component_events_dropped.rs | 7 +- lib/vector-common/src/internal_event/mod.rs | 61 ++------- .../src/internal_event/service.rs | 13 +- lib/vector-common/src/lib.rs | 2 + lib/vector-core/src/lib.rs | 25 ---- lib/vector-lib/src/lib.rs | 2 +- scripts/check-events | 6 - .../memory/internal_events.rs | 40 ++---- src/internal_events/aggregate.rs | 7 +- src/internal_events/amqp.rs | 9 +- src/internal_events/apache_metrics.rs | 5 +- src/internal_events/api.rs | 3 +- src/internal_events/aws.rs | 2 + src/internal_events/aws_cloudwatch_logs.rs | 3 +- src/internal_events/aws_ec2_metadata.rs | 5 +- src/internal_events/aws_ecs_metrics.rs | 5 +- src/internal_events/aws_kinesis.rs | 3 +- src/internal_events/aws_kinesis_firehose.rs | 7 +- src/internal_events/aws_sqs.rs | 26 ++-- src/internal_events/batch.rs | 3 +- src/internal_events/codecs.rs | 13 +- src/internal_events/common.rs | 13 +- src/internal_events/conditions.rs | 3 +- src/internal_events/datadog_agent.rs | 3 +- src/internal_events/datadog_metrics.rs | 3 +- src/internal_events/datadog_traces.rs | 5 +- src/internal_events/dedupe.rs | 3 +- src/internal_events/demo_logs.rs | 3 +- src/internal_events/dnstap.rs | 3 +- src/internal_events/docker_logs.rs | 17 +-- src/internal_events/encoding_transcode.rs | 7 +- src/internal_events/eventstoredb_metrics.rs | 5 +- src/internal_events/exec.rs | 11 +- src/internal_events/expansion.rs | 2 + src/internal_events/file.rs | 37 +++--- src/internal_events/file_descriptor.rs | 3 +- src/internal_events/fluent.rs | 5 +- src/internal_events/gcp_pubsub.rs | 4 + src/internal_events/grpc.rs | 9 +- src/internal_events/heartbeat.rs | 3 +- src/internal_events/host_metrics.rs | 7 +- src/internal_events/http.rs | 14 +- src/internal_events/http_client.rs | 7 +- src/internal_events/http_client_source.rs | 7 +- src/internal_events/influxdb.rs | 3 +- src/internal_events/internal_logs.rs | 6 +- src/internal_events/journald.rs | 11 +- src/internal_events/kafka.rs | 12 +- src/internal_events/kubernetes_logs.rs | 17 +-- src/internal_events/log_to_metric.rs | 6 + src/internal_events/logplex.rs | 5 +- src/internal_events/loki.rs | 9 +- src/internal_events/lua.rs | 7 +- src/internal_events/metric_to_log.rs | 3 +- src/internal_events/mongodb_metrics.rs | 5 +- src/internal_events/mqtt.rs | 7 +- src/internal_events/nginx_metrics.rs | 5 +- src/internal_events/open.rs | 5 +- src/internal_events/parser.rs | 7 +- src/internal_events/postgresql_metrics.rs | 3 +- src/internal_events/process.rs | 15 ++- src/internal_events/prometheus.rs | 7 +- src/internal_events/pulsar.rs | 4 +- src/internal_events/redis.rs | 3 +- src/internal_events/reduce.rs | 5 +- src/internal_events/remap.rs | 5 +- src/internal_events/sample.rs | 3 +- src/internal_events/sematext_metrics.rs | 5 +- src/internal_events/socket.rs | 18 +-- src/internal_events/splunk_hec.rs | 20 ++- src/internal_events/statsd_sink.rs | 3 +- src/internal_events/tag_cardinality_limit.rs | 4 + src/internal_events/tcp.rs | 15 ++- src/internal_events/template.rs | 2 + src/internal_events/throttle.rs | 3 +- src/internal_events/udp.rs | 8 +- src/internal_events/unix.rs | 13 +- src/internal_events/websocket.rs | 45 ++----- src/internal_events/websocket_server.rs | 31 +---- src/internal_events/windows.rs | 13 +- src/sinks/elasticsearch/mod.rs | 6 +- src/sinks/http/tests.rs | 2 +- 92 files changed, 579 insertions(+), 431 deletions(-) create mode 100644 lib/vector-common-macros/Cargo.toml create mode 100644 lib/vector-common-macros/src/internal_event.rs create mode 100644 lib/vector-common-macros/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 727cc212de818..ff9e8907b9c2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12728,6 +12728,7 @@ dependencies = [ "derivative", "futures 0.3.31", "indexmap 2.12.0", + "itertools 0.14.0", "metrics", "paste", "pin-project", @@ -12737,10 +12738,20 @@ dependencies = [ "stream-cancel", "tokio", "tracing 0.1.41", + "vector-common-macros", "vector-config", "vrl", ] +[[package]] +name = "vector-common-macros" +version = "0.1.0" +dependencies = [ + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "vector-config" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 967b291118888..ab9e6f0d3b2d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,6 +118,7 @@ members = [ "lib/vector-api-client", "lib/vector-buffers", "lib/vector-common", + "lib/vector-common-macros", "lib/vector-config", "lib/vector-config-common", "lib/vector-config-macros", @@ -197,6 +198,7 @@ uuid = { version = "1.18.1", features = ["v4", "v7", "serde"] } vector-config = { path = "lib/vector-config" } vector-config-common = { path = "lib/vector-config-common" } vector-config-macros = { path = "lib/vector-config-macros" } +vector-common-macros = { path = "lib/vector-common-macros" } vector-lib = { path = "lib/vector-lib", default-features = false, features = ["vrl"] } vrl = { git = "https://github.com/vectordotdev/vrl.git", branch = "main", features = ["arbitrary", "cli", "test", "test_framework"] } mock_instant = { version = "0.6" } diff --git a/lib/dnstap-parser/Cargo.toml b/lib/dnstap-parser/Cargo.toml index cc5527f2e5814..64220976d2398 100644 --- a/lib/dnstap-parser/Cargo.toml +++ b/lib/dnstap-parser/Cargo.toml @@ -15,7 +15,7 @@ hickory-proto.workspace = true prost.workspace = true snafu.workspace = true tracing.workspace = true -vector-lib = { path = "../vector-lib" } +vector-lib.workspace = true vrl.workspace = true paste.workspace = true diff --git a/lib/dnstap-parser/src/internal_events.rs b/lib/dnstap-parser/src/internal_events.rs index 8510d1f54db1e..8ff8763de9f71 100644 --- a/lib/dnstap-parser/src/internal_events.rs +++ b/lib/dnstap-parser/src/internal_events.rs @@ -1,7 +1,10 @@ use tracing::warn; -use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; +use vector_lib::{ + NamedInternalEvent, + internal_event::{InternalEvent, error_stage, error_type}, +}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct DnstapParseWarning { pub error: E, } diff --git a/lib/vector-buffers/src/internal_events.rs b/lib/vector-buffers/src/internal_events.rs index cf852d7d04674..e207b8e0fab74 100644 --- a/lib/vector-buffers/src/internal_events.rs +++ b/lib/vector-buffers/src/internal_events.rs @@ -1,11 +1,13 @@ use std::time::Duration; use metrics::{Histogram, counter, gauge, histogram}; +use vector_common::NamedInternalEvent; use vector_common::{ internal_event::{InternalEvent, error_type}, registered_event, }; +#[derive(NamedInternalEvent)] pub struct BufferCreated { pub buffer_id: String, pub idx: usize, @@ -35,6 +37,7 @@ impl InternalEvent for BufferCreated { } } +#[derive(NamedInternalEvent)] pub struct BufferEventsReceived { pub buffer_id: String, pub idx: usize, @@ -75,6 +78,7 @@ impl InternalEvent for BufferEventsReceived { } } +#[derive(NamedInternalEvent)] pub struct BufferEventsSent { pub buffer_id: String, pub idx: usize, @@ -114,6 +118,7 @@ impl InternalEvent for BufferEventsSent { } } +#[derive(NamedInternalEvent)] pub struct BufferEventsDropped { pub buffer_id: String, pub idx: usize, @@ -180,6 +185,7 @@ impl InternalEvent for BufferEventsDropped { } } +#[derive(NamedInternalEvent)] pub struct BufferReadError { pub error_code: &'static str, pub error: String, diff --git a/lib/vector-common-macros/Cargo.toml b/lib/vector-common-macros/Cargo.toml new file mode 100644 index 0000000000000..64ea946ecf3c1 --- /dev/null +++ b/lib/vector-common-macros/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "vector-common-macros" +version = "0.1.0" +edition = "2024" +license = "MPL-2.0" + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = { version = "1.0", default-features = false } +quote = { version = "1.0", default-features = false } +syn = { version = "2.0", default-features = false, features = ["full", "extra-traits", "visit-mut", "visit"] } diff --git a/lib/vector-common-macros/src/internal_event.rs b/lib/vector-common-macros/src/internal_event.rs new file mode 100644 index 0000000000000..c34b8d01a7aee --- /dev/null +++ b/lib/vector-common-macros/src/internal_event.rs @@ -0,0 +1,45 @@ +use proc_macro::TokenStream; +use quote::quote; +use syn::{Data, DeriveInput, parse_macro_input, spanned::Spanned}; + +/// Implements `NamedInternalEvent` for structs via `#[derive(NamedInternalEvent)]`. +pub fn derive_impl_named_internal_event(item: TokenStream) -> TokenStream { + let input = parse_macro_input!(item as DeriveInput); + + if !matches!(input.data, Data::Struct(_)) { + return syn::Error::new( + input.span(), + "#[derive(NamedInternalEvent)] can only be used with structs", + ) + .to_compile_error() + .into(); + } + + let DeriveInput { + ident, generics, .. + } = input; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + // Use a path that works from both vector-common (crate::internal_event) + // and from other crates using vector-lib (vector_lib::internal_event). + // For crates that don't depend on vector-lib but do depend on vector-common, + // we use vector_common::internal_event. + let pkg_name = std::env::var("CARGO_PKG_NAME").unwrap_or_default(); + let internal_event_path = if pkg_name == "vector-common" { + quote! { crate::internal_event } + } else if pkg_name.starts_with("vector-") { + // Most vector-* crates depend on vector-common but not vector-lib + quote! { ::vector_common::internal_event } + } else { + // Main vector crate and its internal modules use vector_lib + quote! { ::vector_lib::internal_event } + }; + + let expanded = quote! { + impl #impl_generics #internal_event_path::NamedInternalEvent for #ident #ty_generics #where_clause { + fn name(&self) -> &'static str { stringify!(#ident) } + } + }; + + TokenStream::from(expanded) +} diff --git a/lib/vector-common-macros/src/lib.rs b/lib/vector-common-macros/src/lib.rs new file mode 100644 index 0000000000000..a34b86d9c94fc --- /dev/null +++ b/lib/vector-common-macros/src/lib.rs @@ -0,0 +1,37 @@ +#![deny(warnings)] + +use proc_macro::TokenStream; + +mod internal_event; + +/// Derives `NamedInternalEvent` so `InternalEvent::name()` returns a stable +/// compile-time identifier for the event type. +/// +/// Apply this derive to any struct that also implements `InternalEvent` or `RegisterInternalEvent`: +/// +/// ```ignore +/// use vector_lib::internal_event::{InternalEvent, NamedInternalEvent}; +/// +/// #[derive(Debug, NamedInternalEvent)] +/// pub struct UdpSendIncompleteError { +/// pub data_size: usize, +/// pub sent: usize, +/// } +/// +/// impl InternalEvent for UdpSendIncompleteError { +/// fn emit(self) { +/// // ... emit metrics/logging ... +/// } +/// } +/// +/// // Later, `UdpSendIncompleteError::name()` returns the string "UdpSendIncompleteError". +/// ``` +/// +/// Notes: +/// - Works with lifetimes and generics. +/// - The generated implementation returns `stringify!(TypeName)` which avoids +/// compiler-version-dependent module paths. +#[proc_macro_derive(NamedInternalEvent)] +pub fn derive_internal_event_name(input: TokenStream) -> TokenStream { + internal_event::derive_impl_named_internal_event(input) +} diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 98f18180000e9..3604dfa94cc1e 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -41,6 +41,7 @@ crossbeam-utils.workspace = true derivative.workspace = true futures.workspace = true indexmap.workspace = true +itertools.workspace = true metrics.workspace = true paste.workspace = true pin-project.workspace = true @@ -51,7 +52,8 @@ stream-cancel = { version = "0.8.2", default-features = false } tokio = { workspace = true, features = ["macros", "time"] } tracing.workspace = true vrl.workspace = true -vector-config = { path = "../vector-config" } +vector-config.workspace = true +vector-common-macros.workspace = true [dev-dependencies] futures = { version = "0.3.31", default-features = false, features = ["async-await"] } diff --git a/lib/vector-common/src/event_test_util.rs b/lib/vector-common/src/event_test_util.rs index fa759fcdf8a8e..4e2e9e728b440 100644 --- a/lib/vector-common/src/event_test_util.rs +++ b/lib/vector-common/src/event_test_util.rs @@ -1,4 +1,6 @@ -use std::{cell::RefCell, collections::HashSet, fmt::Write as _}; +use std::{cell::RefCell, collections::HashSet}; + +use itertools::Itertools as _; thread_local! { /// A buffer for recording internal events emitted by a single test. @@ -12,27 +14,25 @@ thread_local! { /// Will return `Err` if `pattern` is not found in the event record, or is found multiple times. pub fn contains_name_once(pattern: &str) -> Result<(), String> { EVENTS_RECORDED.with(|events| { - let mut n_events = 0; - let mut names = String::new(); - for event in &*events.borrow() { - if event.ends_with(pattern) { - if n_events > 0 { - names.push_str(", "); - } - n_events += 1; - _ = write!(names, "`{event}`"); + let events = events.borrow(); + let matches: Vec<_> = events + .iter() + .filter(|event| event_name_matches(event, pattern)) + .collect(); + match matches.len() { + 0 => Err(format!("Missing event {pattern:?}")), + 1 => Ok(()), + n => { + let names = matches + .into_iter() + .map(|event| format!("{event:?}")) + .join(", "); + Err(format!( + "Multiple ({n}) events matching {pattern:?}: ({names}). Hint! Don't use the `assert_x_` test \ + helpers on round-trip tests (tests that run more than a single component)." + )) } } - if n_events == 0 { - Err(format!("Missing event `{pattern}`")) - } else if n_events > 1 { - Err(format!( - "Multiple ({n_events}) events matching `{pattern}`: ({names}). Hint! Don't use the `assert_x_` \ - test helpers on round-trip tests (tests that run more than a single component)." - )) - } else { - Ok(()) - } }) } @@ -49,6 +49,19 @@ pub fn debug_print_events() { }); } +fn event_name_matches(event: &str, pattern: &str) -> bool { + let segment = event.rsplit_once("::").map_or(event, |(_, suffix)| suffix); + segment == pattern || (segment.ends_with(pattern) && !ignore_prefixed_match(segment, pattern)) +} + +fn ignore_prefixed_match(segment: &str, pattern: &str) -> bool { + // Buffer telemetry emits its own `BufferEvents{{Received|Sent}}` events for destinations in the + // topology. Component compliance only cares about the component-scoped + // `Events{{Received|Sent}}` signals, so we explicitly filter out the buffer-prefixed + // forms when matching these shared names. Other prefixes remain eligible. + matches!(pattern, "EventsReceived" | "EventsSent") && segment.starts_with("Buffer") +} + /// Record an emitted internal event. This is somewhat dumb at this /// point, just recording the pure string value of the `emit!` call /// parameter. At some point, making all internal events implement @@ -66,3 +79,71 @@ pub fn record_internal_event(event: &str) { EVENTS_RECORDED.with(|er| er.borrow_mut().insert(event.trim().into())); } + +#[cfg(test)] +mod tests { + use super::*; + + fn reset_events() { + clear_recorded_events(); + } + + fn insert_raw_event(event: &str) { + super::EVENTS_RECORDED.with(|events| { + events.borrow_mut().insert(event.into()); + }); + } + + #[test] + fn contains_name_once_accepts_exact_match() { + reset_events(); + record_internal_event("EventsReceived"); + assert!(contains_name_once("EventsReceived").is_ok()); + } + + #[test] + fn contains_name_once_ignores_prefix_matches() { + reset_events(); + record_internal_event("EventsReceived"); + record_internal_event("BufferEventsReceived"); + + assert!(contains_name_once("EventsReceived").is_ok()); + } + + #[test] + fn contains_name_once_matches_module_qualified_names() { + reset_events(); + insert_raw_event("vector::internal_events::EventsSent"); + + assert!(contains_name_once("EventsSent").is_ok()); + } + + #[test] + fn contains_name_once_still_flags_multiple_exact_matches() { + reset_events(); + record_internal_event("EventsSent"); + insert_raw_event("vector::internal_events::EventsSent"); + + let err = contains_name_once("EventsSent").unwrap_err(); + assert!( + err.contains("Multiple (2) events matching \"EventsSent\""), + "{err}" + ); + } + + #[test] + fn contains_name_once_matches_prefixed_component_events() { + reset_events(); + record_internal_event("SocketEventsReceived"); + + assert!(contains_name_once("EventsReceived").is_ok()); + } + + #[test] + fn contains_name_once_ignores_buffer_prefixed_events() { + reset_events(); + record_internal_event("BufferEventsReceived"); + + assert!(contains_name_once("EventsReceived").is_err()); + } +} diff --git a/lib/vector-common/src/internal_event/component_events_dropped.rs b/lib/vector-common/src/internal_event/component_events_dropped.rs index fde3434fc4b3e..bf45c054e45e1 100644 --- a/lib/vector-common/src/internal_event/component_events_dropped.rs +++ b/lib/vector-common/src/internal_event/component_events_dropped.rs @@ -1,11 +1,12 @@ use metrics::{Counter, counter}; use super::{Count, InternalEvent, InternalEventHandle, RegisterInternalEvent}; +use crate::NamedInternalEvent; pub const INTENTIONAL: bool = true; pub const UNINTENTIONAL: bool = false; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ComponentEventsDropped<'a, const INTENTIONAL: bool> { pub count: usize, pub reason: &'a str, @@ -16,10 +17,6 @@ impl InternalEvent for ComponentEventsDropped<'_, INTEN let count = self.count; self.register().emit(Count(count)); } - - fn name(&self) -> Option<&'static str> { - Some("ComponentEventsDropped") - } } impl<'a, const INTENTIONAL: bool> From<&'a str> for ComponentEventsDropped<'a, INTENTIONAL> { diff --git a/lib/vector-common/src/internal_event/mod.rs b/lib/vector-common/src/internal_event/mod.rs index d1c7e47913caf..369c4b9063c88 100644 --- a/lib/vector-common/src/internal_event/mod.rs +++ b/lib/vector-common/src/internal_event/mod.rs @@ -26,24 +26,19 @@ pub use service::{CallError, PollReadyError}; use crate::json_size::JsonSize; -pub trait InternalEvent: Sized { - fn emit(self); +pub trait NamedInternalEvent { + fn name(&self) -> &'static str; +} - // Optional for backwards compat until all events implement this - fn name(&self) -> Option<&'static str> { - None - } +pub trait InternalEvent: NamedInternalEvent + Sized { + fn emit(self); } #[allow(clippy::module_name_repetitions)] -pub trait RegisterInternalEvent: Sized { +pub trait RegisterInternalEvent: NamedInternalEvent + Sized { type Handle: InternalEventHandle; fn register(self) -> Self::Handle; - - fn name(&self) -> Option<&'static str> { - None - } } #[allow(clippy::module_name_repetitions)] @@ -52,39 +47,9 @@ pub trait InternalEventHandle: Sized { fn emit(&self, data: Self::Data); } -// Sets the name of an event if it doesn't have one -pub struct DefaultName { - pub name: &'static str, - pub event: E, -} - -impl InternalEvent for DefaultName { - fn emit(self) { - self.event.emit(); - } - - fn name(&self) -> Option<&'static str> { - Some(self.event.name().unwrap_or(self.name)) - } -} - -impl RegisterInternalEvent for DefaultName { - type Handle = E::Handle; - - fn register(self) -> Self::Handle { - self.event.register() - } - - fn name(&self) -> Option<&'static str> { - Some(self.event.name().unwrap_or(self.name)) - } -} - #[cfg(any(test, feature = "test"))] pub fn emit(event: impl InternalEvent) { - if let Some(name) = event.name() { - super::event_test_util::record_internal_event(name); - } + super::event_test_util::record_internal_event(event.name()); event.emit(); } @@ -95,9 +60,7 @@ pub fn emit(event: impl InternalEvent) { #[cfg(any(test, feature = "test"))] pub fn register(event: E) -> E::Handle { - if let Some(name) = event.name() { - super::event_test_util::record_internal_event(name); - } + super::event_test_util::record_internal_event(event.name()); event.register() } @@ -197,7 +160,7 @@ impl From for SharedString { macro_rules! registered_event { // A registered event struct with no fields (zero-sized type). ($event:ident => $($tail:tt)*) => { - #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + #[derive(Debug, Clone, Eq, Hash, $crate::NamedInternalEvent, Ord, PartialEq, PartialOrd)] pub struct $event; $crate::registered_event!(=> $event $($tail)*); @@ -205,7 +168,7 @@ macro_rules! registered_event { // A normal registered event struct. ($event:ident { $( $field:ident: $type:ty, )* } => $($tail:tt)*) => { - #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + #[derive(Debug, Clone, Eq, Hash, $crate::NamedInternalEvent, Ord, PartialEq, PartialOrd)] pub struct $event { $( pub $field: $type, )* } @@ -234,10 +197,6 @@ macro_rules! registered_event { impl $crate::internal_event::RegisterInternalEvent for $event { type Handle = [<$event Handle>]; - fn name(&self) -> Option<&'static str> { - Some(stringify!($event)) - } - fn register($slf) -> Self::Handle { Self::Handle { $( $field: $value, )* diff --git a/lib/vector-common/src/internal_event/service.rs b/lib/vector-common/src/internal_event/service.rs index bf1fdd5234d84..93a3c9b745722 100644 --- a/lib/vector-common/src/internal_event/service.rs +++ b/lib/vector-common/src/internal_event/service.rs @@ -1,8 +1,9 @@ use metrics::counter; use super::{ComponentEventsDropped, InternalEvent, UNINTENTIONAL, emit, error_stage, error_type}; +use crate::NamedInternalEvent; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct PollReadyError { pub error: E, } @@ -22,13 +23,9 @@ impl InternalEvent for PollReadyError { ) .increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("ServicePollReadyError") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct CallError { pub error: E, pub request_id: usize, @@ -57,8 +54,4 @@ impl InternalEvent for CallError { count: self.count, }); } - - fn name(&self) -> Option<&'static str> { - Some("ServiceCallError") - } } diff --git a/lib/vector-common/src/lib.rs b/lib/vector-common/src/lib.rs index 3e6d179f13b95..8216f67f7a2a9 100644 --- a/lib/vector-common/src/lib.rs +++ b/lib/vector-common/src/lib.rs @@ -12,6 +12,8 @@ #![deny(unused_assignments)] #![deny(unused_comparisons)] +pub use vector_common_macros::NamedInternalEvent; + #[cfg(feature = "btreemap")] pub use vrl::btreemap; diff --git a/lib/vector-core/src/lib.rs b/lib/vector-core/src/lib.rs index 177b78f138a99..e12b4dc047519 100644 --- a/lib/vector-core/src/lib.rs +++ b/lib/vector-core/src/lib.rs @@ -69,19 +69,6 @@ pub(crate) fn float_eq(l_value: f64, r_value: f64) -> bool { } // These macros aren't actually usable in lib crates without some `vector_lib` shenanigans. -// This test version won't be needed once all `InternalEvent`s implement `name()`. -#[cfg(feature = "test")] -#[macro_export] -macro_rules! emit { - ($event:expr) => { - vector_lib::internal_event::emit(vector_lib::internal_event::DefaultName { - event: $event, - name: stringify!($event), - }) - }; -} - -#[cfg(not(feature = "test"))] #[macro_export] macro_rules! emit { ($event:expr) => { @@ -89,18 +76,6 @@ macro_rules! emit { }; } -#[cfg(feature = "test")] -#[macro_export] -macro_rules! register { - ($event:expr) => { - vector_lib::internal_event::register(vector_lib::internal_event::DefaultName { - event: $event, - name: stringify!($event), - }) - }; -} - -#[cfg(not(feature = "test"))] #[macro_export] macro_rules! register { ($event:expr) => { diff --git a/lib/vector-lib/src/lib.rs b/lib/vector-lib/src/lib.rs index 5d209ba88253c..4672148bacdf1 100644 --- a/lib/vector-lib/src/lib.rs +++ b/lib/vector-lib/src/lib.rs @@ -10,7 +10,7 @@ pub use vector_buffers as buffers; #[cfg(feature = "test")] pub use vector_common::event_test_util; pub use vector_common::{ - Error, Result, TimeZone, assert_event_data_eq, btreemap, byte_size_of, + Error, NamedInternalEvent, Result, TimeZone, assert_event_data_eq, btreemap, byte_size_of, byte_size_of::ByteSizeOf, conversion, encode_logfmt, finalization, finalizer, id, impl_event_data_eq, internal_event, json_size, registered_event, request_metadata, sensitive_string, shutdown, trigger, diff --git a/scripts/check-events b/scripts/check-events index b0335cd7440a4..5d4411b601802 100755 --- a/scripts/check-events +++ b/scripts/check-events @@ -25,8 +25,6 @@ EVENT_CLASSES = { METRIC_NAME_EVENTS_DROPPED = 'component_discarded_events_total' METRIC_NAME_ERROR = 'component_errors_total' -SKIP_CHECK = [ 'DefaultName' ] - def hash_array_add(hash, key, item) arr = hash.fetch(key, Array::new) arr.append(item) @@ -461,10 +459,6 @@ end $duplicates = Hash::new { |hash, key| hash[key] = [] } $all_events.each do |name, event| - if SKIP_CHECK.include? name - next - end - # Check for duplicated signatures if !event.skip_duplicate_check and (event.impl_internal_event or event.impl_event_handle) signature = event.signature diff --git a/src/enrichment_tables/memory/internal_events.rs b/src/enrichment_tables/memory/internal_events.rs index 455a9069a16c0..0799c6662a6af 100644 --- a/src/enrichment_tables/memory/internal_events.rs +++ b/src/enrichment_tables/memory/internal_events.rs @@ -1,5 +1,7 @@ use metrics::{counter, gauge}; -use vector_lib::{configurable::configurable_component, internal_event::InternalEvent}; +use vector_lib::{ + NamedInternalEvent, configurable::configurable_component, internal_event::InternalEvent, +}; /// Configuration of internal metrics for enrichment memory table. #[configurable_component] @@ -14,7 +16,7 @@ pub struct InternalMetricsConfig { pub include_key_tag: bool, } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct MemoryEnrichmentTableRead<'a> { pub key: &'a str, pub include_key_metric_tag: bool, @@ -32,13 +34,9 @@ impl InternalEvent for MemoryEnrichmentTableRead<'_> { counter!("memory_enrichment_table_reads_total",).increment(1); } } - - fn name(&self) -> Option<&'static str> { - Some("MemoryEnrichmentTableRead") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct MemoryEnrichmentTableInserted<'a> { pub key: &'a str, pub include_key_metric_tag: bool, @@ -56,13 +54,9 @@ impl InternalEvent for MemoryEnrichmentTableInserted<'_> { counter!("memory_enrichment_table_insertions_total",).increment(1); } } - - fn name(&self) -> Option<&'static str> { - Some("MemoryEnrichmentTableInserted") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct MemoryEnrichmentTableFlushed { pub new_objects_count: usize, pub new_byte_size: usize, @@ -74,13 +68,9 @@ impl InternalEvent for MemoryEnrichmentTableFlushed { gauge!("memory_enrichment_table_objects_count",).set(self.new_objects_count as f64); gauge!("memory_enrichment_table_byte_size",).set(self.new_byte_size as f64); } - - fn name(&self) -> Option<&'static str> { - Some("MemoryEnrichmentTableFlushed") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct MemoryEnrichmentTableTtlExpired<'a> { pub key: &'a str, pub include_key_metric_tag: bool, @@ -98,13 +88,9 @@ impl InternalEvent for MemoryEnrichmentTableTtlExpired<'_> { counter!("memory_enrichment_table_ttl_expirations",).increment(1); } } - - fn name(&self) -> Option<&'static str> { - Some("MemoryEnrichmentTableTtlExpired") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct MemoryEnrichmentTableReadFailed<'a> { pub key: &'a str, pub include_key_metric_tag: bool, @@ -122,13 +108,9 @@ impl InternalEvent for MemoryEnrichmentTableReadFailed<'_> { counter!("memory_enrichment_table_failed_reads",).increment(1); } } - - fn name(&self) -> Option<&'static str> { - Some("MemoryEnrichmentTableReadFailed") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct MemoryEnrichmentTableInsertFailed<'a> { pub key: &'a str, pub include_key_metric_tag: bool, @@ -146,8 +128,4 @@ impl InternalEvent for MemoryEnrichmentTableInsertFailed<'_> { counter!("memory_enrichment_table_failed_insertions",).increment(1); } } - - fn name(&self) -> Option<&'static str> { - Some("MemoryEnrichmentTableInsertFailed") - } } diff --git a/src/internal_events/aggregate.rs b/src/internal_events/aggregate.rs index 7fc84bda29abf..1b70c4350f614 100644 --- a/src/internal_events/aggregate.rs +++ b/src/internal_events/aggregate.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AggregateEventRecorded; impl InternalEvent for AggregateEventRecorded { @@ -10,7 +11,7 @@ impl InternalEvent for AggregateEventRecorded { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AggregateFlushed; impl InternalEvent for AggregateFlushed { @@ -19,7 +20,7 @@ impl InternalEvent for AggregateFlushed { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AggregateUpdateFailed; impl InternalEvent for AggregateUpdateFailed { diff --git a/src/internal_events/amqp.rs b/src/internal_events/amqp.rs index 612476bc65217..45acdf201305a 100644 --- a/src/internal_events/amqp.rs +++ b/src/internal_events/amqp.rs @@ -1,9 +1,10 @@ #[cfg(feature = "sources-amqp")] pub mod source { use metrics::counter; + use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct AmqpBytesReceived { pub byte_size: usize, pub protocol: &'static str, @@ -24,7 +25,7 @@ pub mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct AmqpEventError { pub error: lapin::Error, } @@ -45,7 +46,7 @@ pub mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct AmqpAckError { pub error: lapin::Error, } @@ -66,7 +67,7 @@ pub mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct AmqpRejectError { pub error: lapin::Error, } diff --git a/src/internal_events/apache_metrics.rs b/src/internal_events/apache_metrics.rs index a99c948673c0a..dd5e9c34e4cf5 100644 --- a/src/internal_events/apache_metrics.rs +++ b/src/internal_events/apache_metrics.rs @@ -1,12 +1,13 @@ use metrics::counter; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; use crate::sources::apache_metrics; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ApacheMetricsEventsReceived<'a> { pub byte_size: JsonSize, pub count: usize, @@ -30,7 +31,7 @@ impl InternalEvent for ApacheMetricsEventsReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ApacheMetricsParseError<'a> { pub error: apache_metrics::ParseError, pub endpoint: &'a str, diff --git a/src/internal_events/api.rs b/src/internal_events/api.rs index c6163b67925c9..4894fc1de33e6 100644 --- a/src/internal_events/api.rs +++ b/src/internal_events/api.rs @@ -1,9 +1,10 @@ use std::net::SocketAddr; use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ApiStarted { pub addr: SocketAddr, pub playground: bool, diff --git a/src/internal_events/aws.rs b/src/internal_events/aws.rs index 341205f0846ab..ff4da6c01e56b 100644 --- a/src/internal_events/aws.rs +++ b/src/internal_events/aws.rs @@ -1,6 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; +#[derive(NamedInternalEvent)] pub struct AwsBytesSent { pub byte_size: usize, pub region: Option, diff --git a/src/internal_events/aws_cloudwatch_logs.rs b/src/internal_events/aws_cloudwatch_logs.rs index 2c8ae9c4e8e08..6325d6c71b25d 100644 --- a/src/internal_events/aws_cloudwatch_logs.rs +++ b/src/internal_events/aws_cloudwatch_logs.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsCloudwatchLogsMessageSizeError { pub size: usize, pub max_size: usize, diff --git a/src/internal_events/aws_ec2_metadata.rs b/src/internal_events/aws_ec2_metadata.rs index 99c1369e0ea02..43c28aee61272 100644 --- a/src/internal_events/aws_ec2_metadata.rs +++ b/src/internal_events/aws_ec2_metadata.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsEc2MetadataRefreshSuccessful; impl InternalEvent for AwsEc2MetadataRefreshSuccessful { @@ -11,7 +12,7 @@ impl InternalEvent for AwsEc2MetadataRefreshSuccessful { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsEc2MetadataRefreshError { pub error: crate::Error, } diff --git a/src/internal_events/aws_ecs_metrics.rs b/src/internal_events/aws_ecs_metrics.rs index 4589a3eda70d2..86b0d32012713 100644 --- a/src/internal_events/aws_ecs_metrics.rs +++ b/src/internal_events/aws_ecs_metrics.rs @@ -2,11 +2,12 @@ use std::borrow::Cow; use metrics::counter; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsEcsMetricsEventsReceived<'a> { pub byte_size: JsonSize, pub count: usize, @@ -35,7 +36,7 @@ impl InternalEvent for AwsEcsMetricsEventsReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsEcsMetricsParseError<'a> { pub error: serde_json::Error, pub endpoint: &'a str, diff --git a/src/internal_events/aws_kinesis.rs b/src/internal_events/aws_kinesis.rs index 5a89648051f39..9fa2fb4315118 100644 --- a/src/internal_events/aws_kinesis.rs +++ b/src/internal_events/aws_kinesis.rs @@ -1,10 +1,11 @@ /// Used in both `aws_kinesis_streams` and `aws_kinesis_firehose` sinks use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsKinesisStreamNoPartitionKeyError<'a> { pub partition_key_field: &'a str, } diff --git a/src/internal_events/aws_kinesis_firehose.rs b/src/internal_events/aws_kinesis_firehose.rs index e493b3f11194b..6cb4ae43ea51f 100644 --- a/src/internal_events/aws_kinesis_firehose.rs +++ b/src/internal_events/aws_kinesis_firehose.rs @@ -1,10 +1,11 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; use super::prelude::{http_error_code, io_error_code}; use crate::sources::aws_kinesis_firehose::Compression; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsKinesisFirehoseRequestReceived<'a> { pub request_id: Option<&'a str>, pub source_arn: Option<&'a str>, @@ -20,7 +21,7 @@ impl InternalEvent for AwsKinesisFirehoseRequestReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsKinesisFirehoseRequestError<'a> { request_id: Option<&'a str>, error_code: String, @@ -57,7 +58,7 @@ impl InternalEvent for AwsKinesisFirehoseRequestError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AwsKinesisFirehoseAutomaticRecordDecodeError { pub compression: Compression, pub error: std::io::Error, diff --git a/src/internal_events/aws_sqs.rs b/src/internal_events/aws_sqs.rs index dc4fa7c42e48e..e45ce6e68ef77 100644 --- a/src/internal_events/aws_sqs.rs +++ b/src/internal_events/aws_sqs.rs @@ -3,9 +3,9 @@ use metrics::counter; #[cfg(feature = "sources-aws_s3")] pub use s3::*; -use vector_lib::internal_event::InternalEvent; #[cfg(any(feature = "sources-aws_s3", feature = "sources-aws_sqs"))] use vector_lib::internal_event::{error_stage, error_type}; +use vector_lib::{NamedInternalEvent, internal_event::InternalEvent}; #[cfg(feature = "sources-aws_s3")] mod s3 { @@ -17,7 +17,7 @@ mod s3 { use super::*; use crate::sources::aws_s3::sqs::ProcessingError; - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageProcessingError<'a> { pub message_id: &'a str, pub error: &'a ProcessingError, @@ -43,7 +43,7 @@ mod s3 { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageDeleteSucceeded { pub message_ids: Vec, } @@ -59,7 +59,7 @@ mod s3 { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageDeletePartialError { pub entries: Vec, } @@ -86,7 +86,7 @@ mod s3 { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageDeleteBatchError { pub entries: Vec, pub error: E, @@ -115,7 +115,7 @@ mod s3 { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageSentSucceeded { pub message_ids: Vec, } @@ -131,7 +131,7 @@ mod s3 { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageSentPartialError { pub entries: Vec, } @@ -158,7 +158,7 @@ mod s3 { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageSendBatchError { pub entries: Vec, pub error: E, @@ -188,7 +188,7 @@ mod s3 { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SqsMessageReceiveError<'a, E> { pub error: &'a E, } @@ -212,7 +212,7 @@ impl InternalEvent for SqsMessageReceiveError<'_, E> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SqsMessageReceiveSucceeded { pub count: usize, } @@ -225,7 +225,7 @@ impl InternalEvent for SqsMessageReceiveSucceeded { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SqsMessageProcessingSucceeded<'a> { pub message_id: &'a str, } @@ -240,7 +240,7 @@ impl InternalEvent for SqsMessageProcessingSucceeded<'_> { // AWS SQS source #[cfg(feature = "sources-aws_sqs")] -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SqsMessageDeleteError<'a, E> { pub error: &'a E, } @@ -265,7 +265,7 @@ impl InternalEvent for SqsMessageDeleteError<'_, E> { // AWS s3 source -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SqsS3EventRecordInvalidEventIgnored<'a> { pub bucket: &'a str, pub key: &'a str, diff --git a/src/internal_events/batch.rs b/src/internal_events/batch.rs index ac35414649f34..8b7a4decfa770 100644 --- a/src/internal_events/batch.rs +++ b/src/internal_events/batch.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LargeEventDroppedError { pub(crate) length: usize, pub max_length: usize, diff --git a/src/internal_events/codecs.rs b/src/internal_events/codecs.rs index 0d36a0a0b1fa9..27980af51b799 100644 --- a/src/internal_events/codecs.rs +++ b/src/internal_events/codecs.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DecoderFramingError { pub error: E, } @@ -27,7 +28,7 @@ impl InternalEvent for DecoderFramingError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DecoderDeserializeError<'a> { pub error: &'a crate::Error, } @@ -51,7 +52,7 @@ impl InternalEvent for DecoderDeserializeError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EncoderFramingError<'a> { pub error: &'a vector_lib::codecs::encoding::BoxedFramingError, } @@ -77,7 +78,7 @@ impl InternalEvent for EncoderFramingError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EncoderSerializeError<'a> { pub error: &'a crate::Error, } @@ -106,7 +107,7 @@ impl InternalEvent for EncoderSerializeError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EncoderWriteError<'a, E> { pub error: &'a E, pub count: usize, @@ -137,7 +138,7 @@ impl InternalEvent for EncoderWriteError<'_, E> { } #[cfg(feature = "codecs-arrow")] -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EncoderNullConstraintError<'a> { pub error: &'a crate::Error, } diff --git a/src/internal_events/common.rs b/src/internal_events/common.rs index d0b7084557d72..69877e3db9ef5 100644 --- a/src/internal_events/common.rs +++ b/src/internal_events/common.rs @@ -1,12 +1,13 @@ use std::time::Instant; use metrics::{counter, histogram}; +use vector_lib::NamedInternalEvent; pub use vector_lib::internal_event::EventsReceived; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EndpointBytesReceived<'a> { pub byte_size: usize, pub protocol: &'a str, @@ -30,7 +31,7 @@ impl InternalEvent for EndpointBytesReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EndpointBytesSent<'a> { pub byte_size: usize, pub protocol: &'a str, @@ -54,7 +55,7 @@ impl InternalEvent for EndpointBytesSent<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketOutgoingConnectionError { pub error: E, } @@ -80,7 +81,7 @@ impl InternalEvent for SocketOutgoingConnectionError { const STREAM_CLOSED: &str = "stream_closed"; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct StreamClosedError { pub count: usize, } @@ -107,7 +108,7 @@ impl InternalEvent for StreamClosedError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct CollectionCompleted { pub start: Instant, pub end: Instant, @@ -121,7 +122,7 @@ impl InternalEvent for CollectionCompleted { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SinkRequestBuildError { pub error: E, } diff --git a/src/internal_events/conditions.rs b/src/internal_events/conditions.rs index c7e04158539cf..96f7ae0ae0d50 100644 --- a/src/internal_events/conditions.rs +++ b/src/internal_events/conditions.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, NamedInternalEvent)] pub struct VrlConditionExecutionError<'a> { pub error: &'a str, } diff --git a/src/internal_events/datadog_agent.rs b/src/internal_events/datadog_agent.rs index bc132ee644bef..9ee5296407b24 100644 --- a/src/internal_events/datadog_agent.rs +++ b/src/internal_events/datadog_agent.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DatadogAgentJsonParseError<'a> { pub error: &'a serde_json::Error, } diff --git a/src/internal_events/datadog_metrics.rs b/src/internal_events/datadog_metrics.rs index 95e43409e211d..5daa3ab87fa79 100644 --- a/src/internal_events/datadog_metrics.rs +++ b/src/internal_events/datadog_metrics.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DatadogMetricsEncodingError<'a> { pub reason: &'a str, pub error_code: &'static str, diff --git a/src/internal_events/datadog_traces.rs b/src/internal_events/datadog_traces.rs index 47c1b694c2516..06b48552fb840 100644 --- a/src/internal_events/datadog_traces.rs +++ b/src/internal_events/datadog_traces.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DatadogTracesEncodingError { pub error_message: &'static str, pub error_reason: String, @@ -36,7 +37,7 @@ impl InternalEvent for DatadogTracesEncodingError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DatadogTracesAPMStatsError { pub error: E, } diff --git a/src/internal_events/dedupe.rs b/src/internal_events/dedupe.rs index c4a781de2c10b..eb7511578cdd7 100644 --- a/src/internal_events/dedupe.rs +++ b/src/internal_events/dedupe.rs @@ -1,6 +1,7 @@ +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ComponentEventsDropped, INTENTIONAL, InternalEvent}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DedupeEventsDropped { pub count: usize, } diff --git a/src/internal_events/demo_logs.rs b/src/internal_events/demo_logs.rs index 796c9e9fca7bd..406a9856fe6e9 100644 --- a/src/internal_events/demo_logs.rs +++ b/src/internal_events/demo_logs.rs @@ -1,6 +1,7 @@ +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DemoLogsEventProcessed; impl InternalEvent for DemoLogsEventProcessed { diff --git a/src/internal_events/dnstap.rs b/src/internal_events/dnstap.rs index bab6c4154b79f..2fc20b95848d1 100644 --- a/src/internal_events/dnstap.rs +++ b/src/internal_events/dnstap.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct DnstapParseError { pub error: E, } diff --git a/src/internal_events/docker_logs.rs b/src/internal_events/docker_logs.rs index 83ed9c00a67f8..bb44e112e05b8 100644 --- a/src/internal_events/docker_logs.rs +++ b/src/internal_events/docker_logs.rs @@ -2,11 +2,12 @@ use bollard::errors::Error; use chrono::ParseError; use metrics::counter; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsEventsReceived<'a> { pub byte_size: JsonSize, pub container_id: &'a str, @@ -31,7 +32,7 @@ impl InternalEvent for DockerLogsEventsReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsContainerEventReceived<'a> { pub container_id: &'a str, pub action: &'a str, @@ -48,7 +49,7 @@ impl InternalEvent for DockerLogsContainerEventReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsContainerWatch<'a> { pub container_id: &'a str, } @@ -63,7 +64,7 @@ impl InternalEvent for DockerLogsContainerWatch<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsContainerUnwatch<'a> { pub container_id: &'a str, } @@ -78,7 +79,7 @@ impl InternalEvent for DockerLogsContainerUnwatch<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsCommunicationError<'a> { pub error: Error, pub container_id: Option<&'a str>, @@ -102,7 +103,7 @@ impl InternalEvent for DockerLogsCommunicationError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsContainerMetadataFetchError<'a> { pub error: Error, pub container_id: &'a str, @@ -127,7 +128,7 @@ impl InternalEvent for DockerLogsContainerMetadataFetchError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsTimestampParseError<'a> { pub error: ParseError, pub container_id: &'a str, @@ -152,7 +153,7 @@ impl InternalEvent for DockerLogsTimestampParseError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DockerLogsLoggingDriverUnsupportedError<'a> { pub container_id: &'a str, pub error: Error, diff --git a/src/internal_events/encoding_transcode.rs b/src/internal_events/encoding_transcode.rs index 0818579926a13..76ad81792a63a 100644 --- a/src/internal_events/encoding_transcode.rs +++ b/src/internal_events/encoding_transcode.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DecoderBomRemoval { pub from_encoding: &'static str, } @@ -16,7 +17,7 @@ impl InternalEvent for DecoderBomRemoval { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct DecoderMalformedReplacement { pub from_encoding: &'static str, } @@ -33,7 +34,7 @@ impl InternalEvent for DecoderMalformedReplacement { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EncoderUnmappableReplacement { pub to_encoding: &'static str, } diff --git a/src/internal_events/eventstoredb_metrics.rs b/src/internal_events/eventstoredb_metrics.rs index b3eac65e3d6c6..c72b9c7dbeb7c 100644 --- a/src/internal_events/eventstoredb_metrics.rs +++ b/src/internal_events/eventstoredb_metrics.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EventStoreDbMetricsHttpError { pub error: crate::Error, } @@ -23,7 +24,7 @@ impl InternalEvent for EventStoreDbMetricsHttpError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EventStoreDbStatsParsingError { pub error: serde_json::Error, } diff --git a/src/internal_events/exec.rs b/src/internal_events/exec.rs index 0d9631a8ed315..abe226897d5d3 100644 --- a/src/internal_events/exec.rs +++ b/src/internal_events/exec.rs @@ -3,6 +3,7 @@ use std::time::Duration; use metrics::{counter, histogram}; use tokio::time::error::Elapsed; use vector_lib::{ + NamedInternalEvent, internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }, @@ -11,7 +12,7 @@ use vector_lib::{ use super::prelude::io_error_code; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ExecEventsReceived<'a> { pub count: usize, pub command: &'a str, @@ -39,7 +40,7 @@ impl InternalEvent for ExecEventsReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ExecFailedError<'a> { pub command: &'a str, pub error: std::io::Error, @@ -66,7 +67,7 @@ impl InternalEvent for ExecFailedError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ExecTimeoutError<'a> { pub command: &'a str, pub elapsed_seconds: u64, @@ -93,7 +94,7 @@ impl InternalEvent for ExecTimeoutError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ExecCommandExecuted<'a> { pub command: &'a str, pub exit_status: Option, @@ -179,6 +180,7 @@ impl std::fmt::Display for ExecFailedToSignalChild { } } +#[derive(NamedInternalEvent)] pub struct ExecFailedToSignalChildError<'a> { pub command: &'a tokio::process::Command, pub error: ExecFailedToSignalChild, @@ -204,6 +206,7 @@ impl InternalEvent for ExecFailedToSignalChildError<'_> { } } +#[derive(NamedInternalEvent)] pub struct ExecChannelClosedError; impl InternalEvent for ExecChannelClosedError { diff --git a/src/internal_events/expansion.rs b/src/internal_events/expansion.rs index bfc28ae219f57..7b2633dd64f97 100644 --- a/src/internal_events/expansion.rs +++ b/src/internal_events/expansion.rs @@ -1,8 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; +#[derive(NamedInternalEvent)] pub struct PairExpansionError<'a> { pub key: &'a str, pub value: &'a str, diff --git a/src/internal_events/file.rs b/src/internal_events/file.rs index 5152e2618b56d..0e85c67150621 100644 --- a/src/internal_events/file.rs +++ b/src/internal_events/file.rs @@ -4,6 +4,7 @@ use std::borrow::Cow; use metrics::{counter, gauge}; use vector_lib::{ + NamedInternalEvent, configurable::configurable_component, internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, @@ -26,7 +27,7 @@ pub struct FileInternalMetricsConfig { pub include_file_tag: bool, } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct FileOpen { pub count: usize, } @@ -37,7 +38,7 @@ impl InternalEvent for FileOpen { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct FileBytesSent<'a> { pub byte_size: usize, pub file: Cow<'a, str>, @@ -68,7 +69,7 @@ impl InternalEvent for FileBytesSent<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct FileIoError<'a, P> { pub error: std::io::Error, pub code: &'static str, @@ -111,7 +112,7 @@ mod source { use bytes::BytesMut; use metrics::counter; use vector_lib::{ - emit, + NamedInternalEvent, emit, file_source_common::internal_events::FileSourceInternalEvents, internal_event::{ComponentEventsDropped, INTENTIONAL, error_stage, error_type}, json_size::JsonSize, @@ -119,7 +120,7 @@ mod source { use super::{FileOpen, InternalEvent}; - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileBytesReceived<'a> { pub byte_size: usize, pub file: &'a str, @@ -150,7 +151,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileEventsReceived<'a> { pub count: usize, pub file: &'a str, @@ -185,7 +186,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileChecksumFailed<'a> { pub file: &'a Path, pub include_file_metric_tag: bool, @@ -209,7 +210,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileFingerprintReadError<'a> { pub file: &'a Path, pub error: Error, @@ -248,7 +249,7 @@ mod source { const DELETION_FAILED: &str = "deletion_failed"; - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileDeleteError<'a> { pub file: &'a Path, pub error: Error, @@ -285,7 +286,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileDeleted<'a> { pub file: &'a Path, pub include_file_metric_tag: bool, @@ -309,7 +310,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileUnwatched<'a> { pub file: &'a Path, pub include_file_metric_tag: bool, @@ -340,7 +341,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] struct FileWatchError<'a> { pub file: &'a Path, pub error: Error, @@ -377,7 +378,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileResumed<'a> { pub file: &'a Path, pub file_position: u64, @@ -403,7 +404,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileAdded<'a> { pub file: &'a Path, pub include_file_metric_tag: bool, @@ -427,7 +428,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileCheckpointed { pub count: usize, pub duration: Duration, @@ -444,7 +445,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileCheckpointWriteError { pub error: Error, } @@ -468,7 +469,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct PathGlobbingError<'a> { pub path: &'a Path, pub error: &'a Error, @@ -494,7 +495,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct FileLineTooBigError<'a> { pub truncated_bytes: &'a BytesMut, pub configured_limit: usize, diff --git a/src/internal_events/file_descriptor.rs b/src/internal_events/file_descriptor.rs index 6ccb0cbee45f1..7882ed4d95b12 100644 --- a/src/internal_events/file_descriptor.rs +++ b/src/internal_events/file_descriptor.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct FileDescriptorReadError { pub error: E, } diff --git a/src/internal_events/fluent.rs b/src/internal_events/fluent.rs index 9864993051529..6d6bb408edc5f 100644 --- a/src/internal_events/fluent.rs +++ b/src/internal_events/fluent.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; use crate::sources::fluent::DecodeError; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct FluentMessageReceived { pub byte_size: u64, } @@ -14,7 +15,7 @@ impl InternalEvent for FluentMessageReceived { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct FluentMessageDecodeError<'a> { pub error: &'a DecodeError, pub base64_encoded_message: String, diff --git a/src/internal_events/gcp_pubsub.rs b/src/internal_events/gcp_pubsub.rs index e696e03b17649..4066d71936529 100644 --- a/src/internal_events/gcp_pubsub.rs +++ b/src/internal_events/gcp_pubsub.rs @@ -1,6 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; +#[derive(NamedInternalEvent)] pub struct GcpPubsubConnectError { pub error: tonic::transport::Error, } @@ -25,6 +27,7 @@ impl InternalEvent for GcpPubsubConnectError { } } +#[derive(NamedInternalEvent)] pub struct GcpPubsubStreamingPullError { pub error: tonic::Status, } @@ -49,6 +52,7 @@ impl InternalEvent for GcpPubsubStreamingPullError { } } +#[derive(NamedInternalEvent)] pub struct GcpPubsubReceiveError { pub error: tonic::Status, } diff --git a/src/internal_events/grpc.rs b/src/internal_events/grpc.rs index 57fe2d8e5345a..882b1dbfaf33b 100644 --- a/src/internal_events/grpc.rs +++ b/src/internal_events/grpc.rs @@ -3,11 +3,12 @@ use std::time::Duration; use http::response::Response; use metrics::{counter, histogram}; use tonic::Code; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; const GRPC_STATUS_LABEL: &str = "grpc_status"; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct GrpcServerRequestReceived; impl InternalEvent for GrpcServerRequestReceived { @@ -16,7 +17,7 @@ impl InternalEvent for GrpcServerRequestReceived { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct GrpcServerResponseSent<'a, B> { pub response: &'a Response, pub latency: Duration, @@ -38,7 +39,7 @@ impl InternalEvent for GrpcServerResponseSent<'_, B> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct GrpcInvalidCompressionSchemeError<'a> { pub status: &'a tonic::Status, } @@ -60,7 +61,7 @@ impl InternalEvent for GrpcInvalidCompressionSchemeError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct GrpcError { pub error: E, } diff --git a/src/internal_events/heartbeat.rs b/src/internal_events/heartbeat.rs index 8197237bbceff..658a24fcb8762 100644 --- a/src/internal_events/heartbeat.rs +++ b/src/internal_events/heartbeat.rs @@ -1,11 +1,12 @@ use std::time::Instant; use metrics::gauge; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; use crate::built_info; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct Heartbeat { pub since: Instant, } diff --git a/src/internal_events/host_metrics.rs b/src/internal_events/host_metrics.rs index 883a56cc46622..d60771644571e 100644 --- a/src/internal_events/host_metrics.rs +++ b/src/internal_events/host_metrics.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HostMetricsScrapeError { pub message: &'static str, } @@ -23,7 +24,7 @@ impl InternalEvent for HostMetricsScrapeError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HostMetricsScrapeDetailError { pub message: &'static str, pub error: E, @@ -47,7 +48,7 @@ impl InternalEvent for HostMetricsScrapeDetailError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HostMetricsScrapeFilesystemError { pub message: &'static str, pub error: heim::Error, diff --git a/src/internal_events/http.rs b/src/internal_events/http.rs index bfb79822d3d8f..a0d8f5d2a92d3 100644 --- a/src/internal_events/http.rs +++ b/src/internal_events/http.rs @@ -3,13 +3,14 @@ use std::{error::Error, time::Duration}; use http::Response; use metrics::{counter, histogram}; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; const HTTP_STATUS_LABEL: &str = "status"; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpServerRequestReceived; impl InternalEvent for HttpServerRequestReceived { @@ -19,7 +20,7 @@ impl InternalEvent for HttpServerRequestReceived { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpServerResponseSent<'a, B> { pub response: &'a Response, pub latency: Duration, @@ -36,7 +37,7 @@ impl InternalEvent for HttpServerResponseSent<'_, B> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpBytesReceived<'a> { pub byte_size: usize, pub http_path: &'a str, @@ -60,7 +61,7 @@ impl InternalEvent for HttpBytesReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpEventsReceived<'a> { pub count: usize, pub byte_size: JsonSize, @@ -95,7 +96,7 @@ impl InternalEvent for HttpEventsReceived<'_> { } #[cfg(feature = "sources-utils-http")] -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpBadRequest<'a> { code: u16, error_code: String, @@ -134,7 +135,7 @@ impl InternalEvent for HttpBadRequest<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpDecompressError<'a> { pub error: &'a dyn Error, pub encoding: &'a str, @@ -160,6 +161,7 @@ impl InternalEvent for HttpDecompressError<'_> { } } +#[derive(NamedInternalEvent)] pub struct HttpInternalError<'a> { pub message: &'a str, } diff --git a/src/internal_events/http_client.rs b/src/internal_events/http_client.rs index 30db85ac2603a..d1e4c7549d15a 100644 --- a/src/internal_events/http_client.rs +++ b/src/internal_events/http_client.rs @@ -6,9 +6,10 @@ use http::{ }; use hyper::{Error, body::HttpBody}; use metrics::{counter, histogram}; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct AboutToSendHttpRequest<'a, T> { pub request: &'a Request, } @@ -43,7 +44,7 @@ impl InternalEvent for AboutToSendHttpRequest<'_, T> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct GotHttpResponse<'a, T> { pub response: &'a Response, pub roundtrip: Duration, @@ -72,7 +73,7 @@ impl InternalEvent for GotHttpResponse<'_, T> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct GotHttpWarning<'a> { pub error: &'a Error, pub roundtrip: Duration, diff --git a/src/internal_events/http_client_source.rs b/src/internal_events/http_client_source.rs index 8d28e9612e1ce..ff67535c0c14b 100644 --- a/src/internal_events/http_client_source.rs +++ b/src/internal_events/http_client_source.rs @@ -2,13 +2,14 @@ use metrics::counter; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; use super::prelude::http_error_code; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpClientEventsReceived { pub byte_size: JsonSize, pub count: usize, @@ -36,7 +37,7 @@ impl InternalEvent for HttpClientEventsReceived { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpClientHttpResponseError { pub code: hyper::StatusCode, pub url: String, @@ -62,7 +63,7 @@ impl InternalEvent for HttpClientHttpResponseError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HttpClientHttpError { pub error: crate::Error, pub url: String, diff --git a/src/internal_events/influxdb.rs b/src/internal_events/influxdb.rs index 66dc5e2033050..22b34c3dc532c 100644 --- a/src/internal_events/influxdb.rs +++ b/src/internal_events/influxdb.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct InfluxdbEncodingError { pub error_message: &'static str, pub count: usize, diff --git a/src/internal_events/internal_logs.rs b/src/internal_events/internal_logs.rs index afc0d32f8ad2d..63c520723c9c7 100644 --- a/src/internal_events/internal_logs.rs +++ b/src/internal_events/internal_logs.rs @@ -1,7 +1,7 @@ use metrics::counter; -use vector_lib::{internal_event::InternalEvent, json_size::JsonSize}; +use vector_lib::{NamedInternalEvent, internal_event::InternalEvent, json_size::JsonSize}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct InternalLogsBytesReceived { pub byte_size: usize, } @@ -17,7 +17,7 @@ impl InternalEvent for InternalLogsBytesReceived { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct InternalLogsEventsReceived { pub byte_size: JsonSize, pub count: usize, diff --git a/src/internal_events/journald.rs b/src/internal_events/journald.rs index bf89d9b03ae1a..0debba0de5d29 100644 --- a/src/internal_events/journald.rs +++ b/src/internal_events/journald.rs @@ -1,10 +1,11 @@ use metrics::counter; use vector_lib::{ + NamedInternalEvent, codecs::decoding::BoxedFramingError, internal_event::{InternalEvent, error_stage, error_type}, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct JournaldInvalidRecordError { pub error: serde_json::Error, pub text: String, @@ -28,7 +29,7 @@ impl InternalEvent for JournaldInvalidRecordError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct JournaldStartJournalctlError { pub error: crate::Error, } @@ -50,7 +51,7 @@ impl InternalEvent for JournaldStartJournalctlError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct JournaldReadError { pub error: BoxedFramingError, } @@ -72,7 +73,7 @@ impl InternalEvent for JournaldReadError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct JournaldCheckpointSetError { pub error: std::io::Error, pub filename: String, @@ -96,7 +97,7 @@ impl InternalEvent for JournaldCheckpointSetError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct JournaldCheckpointFileOpenError { pub error: std::io::Error, pub path: String, diff --git a/src/internal_events/kafka.rs b/src/internal_events/kafka.rs index 3cfc1b266675b..87ad581e1c453 100644 --- a/src/internal_events/kafka.rs +++ b/src/internal_events/kafka.rs @@ -2,12 +2,13 @@ use metrics::{counter, gauge}; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; use vrl::path::OwnedTargetPath; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KafkaBytesReceived<'a> { pub byte_size: usize, pub protocol: &'static str, @@ -34,7 +35,7 @@ impl InternalEvent for KafkaBytesReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KafkaEventsReceived<'a> { pub byte_size: JsonSize, pub count: usize, @@ -66,7 +67,7 @@ impl InternalEvent for KafkaEventsReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KafkaOffsetUpdateError { pub error: rdkafka::error::KafkaError, } @@ -90,7 +91,7 @@ impl InternalEvent for KafkaOffsetUpdateError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KafkaReadError { pub error: rdkafka::error::KafkaError, } @@ -114,7 +115,7 @@ impl InternalEvent for KafkaReadError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KafkaStatisticsReceived<'a> { pub statistics: &'a rdkafka::Statistics, pub expose_lag_metrics: bool, @@ -150,6 +151,7 @@ impl InternalEvent for KafkaStatisticsReceived<'_> { } } +#[derive(NamedInternalEvent)] pub struct KafkaHeaderExtractionError<'a> { pub header_field: &'a OwnedTargetPath, } diff --git a/src/internal_events/kubernetes_logs.rs b/src/internal_events/kubernetes_logs.rs index 5771ecb05999d..c6edb03724169 100644 --- a/src/internal_events/kubernetes_logs.rs +++ b/src/internal_events/kubernetes_logs.rs @@ -1,5 +1,6 @@ use metrics::counter; use vector_lib::{ + NamedInternalEvent, internal_event::{ ComponentEventsDropped, INTENTIONAL, InternalEvent, UNINTENTIONAL, error_stage, error_type, }, @@ -9,7 +10,7 @@ use vrl::core::Value; use crate::event::Event; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KubernetesLogsEventsReceived<'a> { pub file: &'a str, pub byte_size: JsonSize, @@ -59,7 +60,7 @@ impl InternalEvent for KubernetesLogsEventsReceived<'_> { const ANNOTATION_FAILED: &str = "annotation_failed"; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KubernetesLogsEventAnnotationError<'a> { pub event: &'a Event, } @@ -83,7 +84,7 @@ impl InternalEvent for KubernetesLogsEventAnnotationError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct KubernetesLogsEventNamespaceAnnotationError<'a> { pub event: &'a Event, } @@ -108,7 +109,7 @@ impl InternalEvent for KubernetesLogsEventNamespaceAnnotationError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct KubernetesLogsEventNodeAnnotationError<'a> { pub event: &'a Event, } @@ -133,7 +134,7 @@ impl InternalEvent for KubernetesLogsEventNodeAnnotationError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KubernetesLogsFormatPickerEdgeCase { pub what: &'static str, } @@ -148,7 +149,7 @@ impl InternalEvent for KubernetesLogsFormatPickerEdgeCase { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KubernetesLogsDockerFormatParseError<'a> { pub error: &'a dyn std::error::Error, } @@ -173,7 +174,7 @@ impl InternalEvent for KubernetesLogsDockerFormatParseError<'_> { const KUBERNETES_LIFECYCLE: &str = "kubernetes_lifecycle"; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KubernetesLifecycleError { pub message: &'static str, pub error: E, @@ -203,7 +204,7 @@ impl InternalEvent for KubernetesLifecycleError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct KubernetesMergedLineTooBigError<'a> { pub event: &'a Value, pub configured_limit: usize, diff --git a/src/internal_events/log_to_metric.rs b/src/internal_events/log_to_metric.rs index fbe577e8f1bce..f5db970b46ab3 100644 --- a/src/internal_events/log_to_metric.rs +++ b/src/internal_events/log_to_metric.rs @@ -1,10 +1,12 @@ use std::num::ParseFloatError; use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; +#[derive(NamedInternalEvent)] pub struct LogToMetricFieldNullError<'a> { pub field: &'a str, } @@ -32,6 +34,7 @@ impl InternalEvent for LogToMetricFieldNullError<'_> { } } +#[derive(NamedInternalEvent)] pub struct LogToMetricParseFloatError<'a> { pub field: &'a str, pub error: ParseFloatError, @@ -62,6 +65,7 @@ impl InternalEvent for LogToMetricParseFloatError<'_> { } // Metric Metadata Events and Errors +#[derive(NamedInternalEvent)] pub struct MetricMetadataInvalidFieldValueError<'a> { pub field: &'a str, pub field_value: &'a str, @@ -91,6 +95,7 @@ impl InternalEvent for MetricMetadataInvalidFieldValueError<'_> { } } +#[derive(NamedInternalEvent)] pub struct MetricMetadataParseError<'a> { pub field: &'a str, pub kind: &'a str, @@ -119,6 +124,7 @@ impl InternalEvent for MetricMetadataParseError<'_> { } } +#[derive(NamedInternalEvent)] pub struct MetricMetadataMetricDetailsNotFoundError {} impl InternalEvent for MetricMetadataMetricDetailsNotFoundError { diff --git a/src/internal_events/logplex.rs b/src/internal_events/logplex.rs index 34a41c54790d7..b28588c5c2d1e 100644 --- a/src/internal_events/logplex.rs +++ b/src/internal_events/logplex.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; use super::prelude::io_error_code; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HerokuLogplexRequestReceived<'a> { pub msg_count: usize, pub frame_id: &'a str, @@ -21,7 +22,7 @@ impl InternalEvent for HerokuLogplexRequestReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct HerokuLogplexRequestReadError { pub error: std::io::Error, } diff --git a/src/internal_events/loki.rs b/src/internal_events/loki.rs index c269946985dd9..523efd7e4f59b 100644 --- a/src/internal_events/loki.rs +++ b/src/internal_events/loki.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, INTENTIONAL, InternalEvent, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LokiEventUnlabeledError; impl InternalEvent for LokiEventUnlabeledError { @@ -25,7 +26,7 @@ impl InternalEvent for LokiEventUnlabeledError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LokiOutOfOrderEventDroppedError { pub count: usize, } @@ -56,7 +57,7 @@ impl InternalEvent for LokiOutOfOrderEventDroppedError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LokiOutOfOrderEventRewritten { pub count: usize, } @@ -72,7 +73,7 @@ impl InternalEvent for LokiOutOfOrderEventRewritten { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LokiTimestampNonParsableEventsDropped; impl InternalEvent for LokiTimestampNonParsableEventsDropped { diff --git a/src/internal_events/lua.rs b/src/internal_events/lua.rs index 1770761fda489..feb9d04212b88 100644 --- a/src/internal_events/lua.rs +++ b/src/internal_events/lua.rs @@ -1,11 +1,12 @@ use metrics::{counter, gauge}; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; use crate::transforms::lua::v2::BuildError; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LuaGcTriggered { pub used_memory: usize, } @@ -16,7 +17,7 @@ impl InternalEvent for LuaGcTriggered { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LuaScriptError { pub error: mlua::Error, } @@ -44,7 +45,7 @@ impl InternalEvent for LuaScriptError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct LuaBuildError { pub error: BuildError, } diff --git a/src/internal_events/metric_to_log.rs b/src/internal_events/metric_to_log.rs index a1e6890c4a4c6..a536a1b24df24 100644 --- a/src/internal_events/metric_to_log.rs +++ b/src/internal_events/metric_to_log.rs @@ -1,10 +1,11 @@ use metrics::counter; use serde_json::Error; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct MetricToLogSerializeError { pub error: Error, } diff --git a/src/internal_events/mongodb_metrics.rs b/src/internal_events/mongodb_metrics.rs index 649745c35bcf5..3cad1e4ef97f9 100644 --- a/src/internal_events/mongodb_metrics.rs +++ b/src/internal_events/mongodb_metrics.rs @@ -1,11 +1,12 @@ use metrics::counter; use mongodb::{bson, error::Error as MongoError}; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct MongoDbMetricsEventsReceived<'a> { pub count: usize, pub byte_size: JsonSize, @@ -34,6 +35,7 @@ impl InternalEvent for MongoDbMetricsEventsReceived<'_> { } } +#[derive(NamedInternalEvent)] pub struct MongoDbMetricsRequestError<'a> { pub error: MongoError, pub endpoint: &'a str, @@ -57,6 +59,7 @@ impl InternalEvent for MongoDbMetricsRequestError<'_> { } } +#[derive(NamedInternalEvent)] pub struct MongoDbMetricsBsonParseError<'a> { pub error: bson::de::Error, pub endpoint: &'a str, diff --git a/src/internal_events/mqtt.rs b/src/internal_events/mqtt.rs index 3943ad8aa8cf6..1e406a9494f1f 100644 --- a/src/internal_events/mqtt.rs +++ b/src/internal_events/mqtt.rs @@ -2,9 +2,10 @@ use std::fmt::Debug; use metrics::counter; use rumqttc::ConnectionError; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct MqttConnectionError { pub error: ConnectionError, } @@ -26,8 +27,4 @@ impl InternalEvent for MqttConnectionError { ) .increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("MqttConnectionError") - } } diff --git a/src/internal_events/nginx_metrics.rs b/src/internal_events/nginx_metrics.rs index f127ca23971b0..a3384d4ef0faf 100644 --- a/src/internal_events/nginx_metrics.rs +++ b/src/internal_events/nginx_metrics.rs @@ -1,12 +1,13 @@ use metrics::counter; use vector_lib::{ + NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, json_size::JsonSize, }; use crate::sources::nginx_metrics::parser::ParseError; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct NginxMetricsEventsReceived<'a> { pub byte_size: JsonSize, pub count: usize, @@ -34,6 +35,7 @@ impl InternalEvent for NginxMetricsEventsReceived<'_> { } } +#[derive(NamedInternalEvent)] pub struct NginxMetricsRequestError<'a> { pub error: crate::Error, pub endpoint: &'a str, @@ -58,6 +60,7 @@ impl InternalEvent for NginxMetricsRequestError<'_> { } } +#[derive(NamedInternalEvent)] pub(crate) struct NginxMetricsStubStatusParseError<'a> { pub error: ParseError, pub endpoint: &'a str, diff --git a/src/internal_events/open.rs b/src/internal_events/open.rs index ccd9d8cf5411a..e48e0ac337820 100644 --- a/src/internal_events/open.rs +++ b/src/internal_events/open.rs @@ -7,9 +7,10 @@ use std::{ }; use metrics::gauge; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ConnectionOpen { pub count: usize, } @@ -20,7 +21,7 @@ impl InternalEvent for ConnectionOpen { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct EndpointsActive { pub count: usize, } diff --git a/src/internal_events/parser.rs b/src/internal_events/parser.rs index 0d0bcfd43e8a6..85a3e1383afa3 100644 --- a/src/internal_events/parser.rs +++ b/src/internal_events/parser.rs @@ -3,6 +3,7 @@ use std::borrow::Cow; use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; @@ -20,7 +21,7 @@ fn truncate_string_at(s: &str, maxlen: usize) -> Cow<'_, str> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ParserMatchError<'a> { pub value: &'a [u8], } @@ -49,7 +50,7 @@ pub const DROP_EVENT: bool = true; #[allow(dead_code)] pub const RETAIN_EVENT: bool = false; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ParserMissingFieldError<'a, const DROP_EVENT: bool> { pub field: &'a str, } @@ -79,7 +80,7 @@ impl InternalEvent for ParserMissingFieldError<'_, DROP_ } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ParserConversionError<'a> { pub name: &'a str, pub error: crate::types::Error, diff --git a/src/internal_events/postgresql_metrics.rs b/src/internal_events/postgresql_metrics.rs index 22212bf5bb63f..d6a58f968ab7f 100644 --- a/src/internal_events/postgresql_metrics.rs +++ b/src/internal_events/postgresql_metrics.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct PostgresqlMetricsCollectError<'a> { pub error: String, pub endpoint: &'a str, diff --git a/src/internal_events/process.rs b/src/internal_events/process.rs index 2d1e5249a3613..cc3609737aea9 100644 --- a/src/internal_events/process.rs +++ b/src/internal_events/process.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; use crate::{built_info, config}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct VectorStarted; impl InternalEvent for VectorStarted { @@ -20,7 +21,7 @@ impl InternalEvent for VectorStarted { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct VectorReloaded<'a> { pub config_paths: &'a [config::ConfigPath], } @@ -37,7 +38,7 @@ impl InternalEvent for VectorReloaded<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct VectorStopped; impl InternalEvent for VectorStopped { @@ -50,7 +51,7 @@ impl InternalEvent for VectorStopped { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct VectorQuit; impl InternalEvent for VectorQuit { @@ -63,7 +64,7 @@ impl InternalEvent for VectorQuit { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct VectorReloadError { pub reason: &'static str, } @@ -89,7 +90,7 @@ impl InternalEvent for VectorReloadError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct VectorConfigLoadError; impl InternalEvent for VectorConfigLoadError { @@ -111,7 +112,7 @@ impl InternalEvent for VectorConfigLoadError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct VectorRecoveryError; impl InternalEvent for VectorRecoveryError { diff --git a/src/internal_events/prometheus.rs b/src/internal_events/prometheus.rs index 86d7cbe4136eb..27b28fb5ffafa 100644 --- a/src/internal_events/prometheus.rs +++ b/src/internal_events/prometheus.rs @@ -4,6 +4,7 @@ use std::borrow::Cow; use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; @@ -11,7 +12,7 @@ use vector_lib::internal_event::{ use vector_lib::prometheus::parser::ParserError; #[cfg(feature = "sources-prometheus-scrape")] -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct PrometheusParseError<'a> { pub error: ParserError, pub url: http::Uri, @@ -42,7 +43,7 @@ impl InternalEvent for PrometheusParseError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct PrometheusRemoteWriteParseError { pub error: prost::DecodeError, } @@ -64,7 +65,7 @@ impl InternalEvent for PrometheusRemoteWriteParseError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct PrometheusNormalizationError; impl InternalEvent for PrometheusNormalizationError { diff --git a/src/internal_events/pulsar.rs b/src/internal_events/pulsar.rs index a34a28809428f..a2212ef0d5971 100644 --- a/src/internal_events/pulsar.rs +++ b/src/internal_events/pulsar.rs @@ -3,11 +3,12 @@ #[cfg(feature = "sources-pulsar")] use metrics::Counter; use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct PulsarSendingError { pub count: usize, pub error: vector_lib::Error, @@ -35,6 +36,7 @@ impl InternalEvent for PulsarSendingError { } } +#[derive(NamedInternalEvent)] pub struct PulsarPropertyExtractionError { pub property_field: F, } diff --git a/src/internal_events/redis.rs b/src/internal_events/redis.rs index 914567e7045d4..45d76f3670dcd 100644 --- a/src/internal_events/redis.rs +++ b/src/internal_events/redis.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct RedisReceiveEventError { error: redis::RedisError, error_code: String, diff --git a/src/internal_events/reduce.rs b/src/internal_events/reduce.rs index 31f3f84a03177..8917c5aad0a5a 100644 --- a/src/internal_events/reduce.rs +++ b/src/internal_events/reduce.rs @@ -1,8 +1,9 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; use vrl::{path::PathParseError, value::KeyString}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ReduceStaleEventFlushed; impl InternalEvent for ReduceStaleEventFlushed { @@ -11,7 +12,7 @@ impl InternalEvent for ReduceStaleEventFlushed { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct ReduceAddEventError { pub error: PathParseError, pub path: KeyString, diff --git a/src/internal_events/remap.rs b/src/internal_events/remap.rs index 3b48b8a0b85fc..989a6aa064065 100644 --- a/src/internal_events/remap.rs +++ b/src/internal_events/remap.rs @@ -1,9 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, INTENTIONAL, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct RemapMappingError { /// If set to true, the remap transform has dropped the event after a failed /// mapping. This internal event reflects that in its messaging. @@ -34,7 +35,7 @@ impl InternalEvent for RemapMappingError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct RemapMappingAbort { /// If set to true, the remap transform has dropped the event after an abort /// during mapping. This internal event reflects that in its messaging. diff --git a/src/internal_events/sample.rs b/src/internal_events/sample.rs index c502272908115..3722f9530b668 100644 --- a/src/internal_events/sample.rs +++ b/src/internal_events/sample.rs @@ -1,6 +1,7 @@ +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ComponentEventsDropped, INTENTIONAL, InternalEvent}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SampleEventDiscarded; impl InternalEvent for SampleEventDiscarded { diff --git a/src/internal_events/sematext_metrics.rs b/src/internal_events/sematext_metrics.rs index 2469adc4d8dd4..65d23c53e00ef 100644 --- a/src/internal_events/sematext_metrics.rs +++ b/src/internal_events/sematext_metrics.rs @@ -1,11 +1,12 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; use crate::event::metric::Metric; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SematextMetricsInvalidMetricError<'a> { pub metric: &'a Metric, } @@ -33,7 +34,7 @@ impl InternalEvent for SematextMetricsInvalidMetricError<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SematextMetricsEncodeEventError { pub error: E, } diff --git a/src/internal_events/socket.rs b/src/internal_events/socket.rs index dd1564e885843..f0b58ce33bbd7 100644 --- a/src/internal_events/socket.rs +++ b/src/internal_events/socket.rs @@ -2,6 +2,7 @@ use std::net::Ipv4Addr; use metrics::{counter, histogram}; use vector_lib::{ + NamedInternalEvent, internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }, @@ -25,7 +26,8 @@ impl SocketMode { } } } -#[derive(Debug)] + +#[derive(Debug, NamedInternalEvent)] pub struct SocketBytesReceived { pub mode: SocketMode, pub byte_size: usize, @@ -48,7 +50,7 @@ impl InternalEvent for SocketBytesReceived { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketEventsReceived { pub mode: SocketMode, pub byte_size: JsonSize, @@ -71,7 +73,7 @@ impl InternalEvent for SocketEventsReceived { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketBytesSent { pub mode: SocketMode, pub byte_size: usize, @@ -93,7 +95,7 @@ impl InternalEvent for SocketBytesSent { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketEventsSent { pub mode: SocketMode, pub count: u64, @@ -109,7 +111,7 @@ impl InternalEvent for SocketEventsSent { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketBindError { pub mode: SocketMode, pub error: E, @@ -137,7 +139,7 @@ impl InternalEvent for SocketBindError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketMulticastGroupJoinError { pub error: E, pub group_addr: Ipv4Addr, @@ -174,7 +176,7 @@ impl InternalEvent for SocketMulticastGroupJoinError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketReceiveError { pub mode: SocketMode, pub error: E, @@ -202,7 +204,7 @@ impl InternalEvent for SocketReceiveError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct SocketSendError { pub mode: SocketMode, pub error: E, diff --git a/src/internal_events/splunk_hec.rs b/src/internal_events/splunk_hec.rs index 893e06b797261..ea090684ed85d 100644 --- a/src/internal_events/splunk_hec.rs +++ b/src/internal_events/splunk_hec.rs @@ -9,6 +9,7 @@ pub use self::source::*; mod sink { use metrics::{counter, gauge}; use serde_json::Error; + use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; @@ -18,7 +19,7 @@ mod sink { sinks::splunk_hec::common::acknowledgements::HecAckApiError, }; - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SplunkEventEncodeError { pub error: vector_lib::Error, } @@ -44,7 +45,7 @@ mod sink { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub(crate) struct SplunkInvalidMetricReceivedError<'a> { pub value: &'a MetricValue, pub kind: &'a MetricKind, @@ -76,7 +77,7 @@ mod sink { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SplunkResponseParseError { pub error: Error, } @@ -100,7 +101,7 @@ mod sink { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SplunkIndexerAcknowledgementAPIError { pub message: &'static str, pub error: HecAckApiError, @@ -125,7 +126,7 @@ mod sink { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SplunkIndexerAcknowledgementUnavailableError { pub error: E, } @@ -149,6 +150,7 @@ mod sink { } } + #[derive(NamedInternalEvent)] pub struct SplunkIndexerAcknowledgementAckAdded; impl InternalEvent for SplunkIndexerAcknowledgementAckAdded { @@ -157,6 +159,7 @@ mod sink { } } + #[derive(NamedInternalEvent)] pub struct SplunkIndexerAcknowledgementAcksRemoved { pub count: f64, } @@ -167,6 +170,7 @@ mod sink { } } + #[derive(NamedInternalEvent)] pub struct SplunkEventTimestampInvalidType<'a> { pub r#type: &'a str, } @@ -181,6 +185,7 @@ mod sink { } } + #[derive(NamedInternalEvent)] pub struct SplunkEventTimestampMissing; impl InternalEvent for SplunkEventTimestampMissing { @@ -193,11 +198,12 @@ mod sink { #[cfg(feature = "sources-splunk_hec")] mod source { use metrics::counter; + use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; use crate::sources::splunk_hec::ApiError; - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SplunkHecRequestBodyInvalidError { pub error: std::io::Error, } @@ -221,7 +227,7 @@ mod source { } } - #[derive(Debug)] + #[derive(Debug, NamedInternalEvent)] pub struct SplunkHecRequestError { pub(crate) error: ApiError, } diff --git a/src/internal_events/statsd_sink.rs b/src/internal_events/statsd_sink.rs index 44aaaf09e229a..7f0ee81f48ddc 100644 --- a/src/internal_events/statsd_sink.rs +++ b/src/internal_events/statsd_sink.rs @@ -1,11 +1,12 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; use crate::event::metric::{MetricKind, MetricValue}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct StatsdInvalidMetricError<'a> { pub value: &'a MetricValue, pub kind: MetricKind, diff --git a/src/internal_events/tag_cardinality_limit.rs b/src/internal_events/tag_cardinality_limit.rs index 011c3d6991833..efd00b80fd7a6 100644 --- a/src/internal_events/tag_cardinality_limit.rs +++ b/src/internal_events/tag_cardinality_limit.rs @@ -1,6 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ComponentEventsDropped, INTENTIONAL, InternalEvent}; +#[derive(NamedInternalEvent)] pub struct TagCardinalityLimitRejectingEvent<'a> { pub metric_name: &'a str, pub tag_key: &'a str, @@ -24,6 +26,7 @@ impl InternalEvent for TagCardinalityLimitRejectingEvent<'_> { } } +#[derive(NamedInternalEvent)] pub struct TagCardinalityLimitRejectingTag<'a> { pub metric_name: &'a str, pub tag_key: &'a str, @@ -42,6 +45,7 @@ impl InternalEvent for TagCardinalityLimitRejectingTag<'_> { } } +#[derive(NamedInternalEvent)] pub struct TagCardinalityValueLimitReached<'a> { pub key: &'a str, } diff --git a/src/internal_events/tcp.rs b/src/internal_events/tcp.rs index 7c5cc449a8741..a121eef1e0327 100644 --- a/src/internal_events/tcp.rs +++ b/src/internal_events/tcp.rs @@ -1,11 +1,12 @@ use std::net::SocketAddr; use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; use crate::{internal_events::SocketOutgoingConnectionError, tls::TlsError}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct TcpSocketConnectionEstablished { pub peer_addr: Option, } @@ -21,7 +22,7 @@ impl InternalEvent for TcpSocketConnectionEstablished { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct TcpSocketOutgoingConnectionError { pub error: E, } @@ -34,7 +35,7 @@ impl InternalEvent for TcpSocketOutgoingConnectionError } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct TcpSocketConnectionShutdown; impl InternalEvent for TcpSocketConnectionShutdown { @@ -45,7 +46,7 @@ impl InternalEvent for TcpSocketConnectionShutdown { } #[cfg(all(unix, feature = "sources-dnstap"))] -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct TcpSocketError<'a, E> { pub(crate) error: &'a E, pub peer_addr: SocketAddr, @@ -70,7 +71,7 @@ impl InternalEvent for TcpSocketError<'_, E> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct TcpSocketTlsConnectionError { pub error: TlsError, } @@ -111,7 +112,7 @@ impl InternalEvent for TcpSocketTlsConnectionError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct TcpSendAckError { pub error: std::io::Error, } @@ -136,7 +137,7 @@ impl InternalEvent for TcpSendAckError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct TcpBytesReceived { pub byte_size: usize, pub peer_addr: SocketAddr, diff --git a/src/internal_events/template.rs b/src/internal_events/template.rs index bb1419acdd477..147418042a0c6 100644 --- a/src/internal_events/template.rs +++ b/src/internal_events/template.rs @@ -1,8 +1,10 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; +#[derive(NamedInternalEvent)] pub struct TemplateRenderingError<'a> { pub field: Option<&'a str>, pub drop_event: bool, diff --git a/src/internal_events/throttle.rs b/src/internal_events/throttle.rs index 1b0dd420cfe12..274fedd7741b9 100644 --- a/src/internal_events/throttle.rs +++ b/src/internal_events/throttle.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ComponentEventsDropped, INTENTIONAL, InternalEvent}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub(crate) struct ThrottleEventDiscarded { pub key: String, pub emit_events_discarded_per_key: bool, diff --git a/src/internal_events/udp.rs b/src/internal_events/udp.rs index f77eef6066946..0602d4fdb3329 100644 --- a/src/internal_events/udp.rs +++ b/src/internal_events/udp.rs @@ -1,4 +1,5 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; @@ -7,7 +8,7 @@ use crate::internal_events::SocketOutgoingConnectionError; // TODO: Get rid of this. UDP is connectionless, so there's no "successful" connect event, only // successfully binding a socket that can be used for receiving. -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UdpSocketConnectionEstablished; impl InternalEvent for UdpSocketConnectionEstablished { @@ -19,6 +20,7 @@ impl InternalEvent for UdpSocketConnectionEstablished { // TODO: Get rid of this. UDP is connectionless, so there's no "unsuccessful" connect event, only // unsuccessfully binding a socket that can be used for receiving. +#[derive(NamedInternalEvent)] pub struct UdpSocketOutgoingConnectionError { pub error: E, } @@ -31,7 +33,7 @@ impl InternalEvent for UdpSocketOutgoingConnectionError } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UdpSendIncompleteError { pub data_size: usize, pub sent: usize, @@ -61,7 +63,7 @@ impl InternalEvent for UdpSendIncompleteError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UdpChunkingError { pub error: vector_common::Error, pub data_size: usize, diff --git a/src/internal_events/unix.rs b/src/internal_events/unix.rs index bb1876abc9f17..6fab3348dc4d7 100644 --- a/src/internal_events/unix.rs +++ b/src/internal_events/unix.rs @@ -3,13 +3,14 @@ use std::{io::Error, path::Path}; use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{ ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, }; use crate::internal_events::SocketOutgoingConnectionError; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UnixSocketConnectionEstablished<'a> { pub path: &'a std::path::Path, } @@ -21,7 +22,7 @@ impl InternalEvent for UnixSocketConnectionEstablished<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UnixSocketOutgoingConnectionError { pub error: E, } @@ -38,7 +39,7 @@ impl InternalEvent for UnixSocketOutgoingConnectionError { pub(crate) error: &'a E, pub path: &'a std::path::Path, @@ -66,7 +67,7 @@ impl InternalEvent for UnixSocketError<'_, E> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UnixSocketSendError<'a, E> { pub(crate) error: &'a E, pub path: &'a std::path::Path, @@ -93,7 +94,7 @@ impl InternalEvent for UnixSocketSendError<'_, E> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UnixSendIncompleteError { pub data_size: usize, pub sent: usize, @@ -121,7 +122,7 @@ impl InternalEvent for UnixSendIncompleteError { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct UnixSocketFileDeleteError<'a> { pub path: &'a Path, pub error: Error, diff --git a/src/internal_events/websocket.rs b/src/internal_events/websocket.rs index f08453f0f3585..264e1587eedd7 100644 --- a/src/internal_events/websocket.rs +++ b/src/internal_events/websocket.rs @@ -11,11 +11,12 @@ use vector_common::{ internal_event::{error_stage, error_type}, json_size::JsonSize, }; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::InternalEvent; pub const PROTOCOL: &str = "websocket"; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketConnectionEstablished; impl InternalEvent for WebSocketConnectionEstablished { @@ -23,13 +24,9 @@ impl InternalEvent for WebSocketConnectionEstablished { debug!(message = "Connected."); counter!("connection_established_total").increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketConnectionEstablished") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketConnectionFailedError { pub error: Box, } @@ -51,13 +48,9 @@ impl InternalEvent for WebSocketConnectionFailedError { ) .increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketConnectionFailedError") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketConnectionShutdown; impl InternalEvent for WebSocketConnectionShutdown { @@ -65,13 +58,9 @@ impl InternalEvent for WebSocketConnectionShutdown { warn!(message = "Closed by the server."); counter!("connection_shutdown_total").increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketConnectionShutdown") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketConnectionError { pub error: tokio_tungstenite::tungstenite::Error, } @@ -94,10 +83,6 @@ impl InternalEvent for WebSocketConnectionError { ) .increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketConnectionError") - } } #[allow(dead_code)] @@ -117,7 +102,7 @@ impl Display for WebSocketKind { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketBytesReceived<'a> { pub byte_size: usize, pub url: &'a str, @@ -144,7 +129,7 @@ impl InternalEvent for WebSocketBytesReceived<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketMessageReceived<'a> { pub count: usize, pub byte_size: JsonSize, @@ -181,13 +166,9 @@ impl InternalEvent for WebSocketMessageReceived<'_> { ); counter.increment(self.byte_size.get() as u64); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketMessageReceived") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketReceiveError<'a> { pub error: &'a TungsteniteError, } @@ -210,13 +191,9 @@ impl InternalEvent for WebSocketReceiveError<'_> { ) .increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketReceiveError") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketSendError<'a> { pub error: &'a TungsteniteError, } @@ -238,8 +215,4 @@ impl InternalEvent for WebSocketSendError<'_> { ) .increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketSendError") - } } diff --git a/src/internal_events/websocket_server.rs b/src/internal_events/websocket_server.rs index cffd8d9bf43ae..fd064c41f2f8a 100644 --- a/src/internal_events/websocket_server.rs +++ b/src/internal_events/websocket_server.rs @@ -1,9 +1,10 @@ use std::{error::Error, fmt::Debug}; use metrics::{counter, gauge}; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketListenerConnectionEstablished { pub client_count: usize, pub extra_tags: Vec<(String, String)>, @@ -20,13 +21,9 @@ impl InternalEvent for WebSocketListenerConnectionEstablished { counter!("connection_established_total", &self.extra_tags).increment(1); gauge!("active_clients", &self.extra_tags).set(self.client_count as f64); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketListenerConnectionEstablished") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketListenerConnectionFailedError { pub error: Box, pub extra_tags: Vec<(String, String)>, @@ -54,13 +51,9 @@ impl InternalEvent for WebSocketListenerConnectionFailedError { // ## skip check-validity-events ## counter!("component_errors_total", &all_tags).increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WsListenerConnectionFailed") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketListenerConnectionShutdown { pub client_count: usize, pub extra_tags: Vec<(String, String)>, @@ -77,13 +70,9 @@ impl InternalEvent for WebSocketListenerConnectionShutdown { counter!("connection_shutdown_total", &self.extra_tags).increment(1); gauge!("active_clients", &self.extra_tags).set(self.client_count as f64); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketListenerConnectionShutdown") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketListenerSendError { pub error: Box, } @@ -105,13 +94,9 @@ impl InternalEvent for WebSocketListenerSendError { ) .increment(1); } - - fn name(&self) -> Option<&'static str> { - Some("WsListenerConnectionError") - } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WebSocketListenerMessageSent { pub message_size: usize, pub extra_tags: Vec<(String, String)>, @@ -123,8 +108,4 @@ impl InternalEvent for WebSocketListenerMessageSent { counter!("websocket_bytes_sent_total", &self.extra_tags) .increment(self.message_size as u64); } - - fn name(&self) -> Option<&'static str> { - Some("WebSocketListenerMessageSent") - } } diff --git a/src/internal_events/windows.rs b/src/internal_events/windows.rs index a83b9b5d59025..7b36e2fa62466 100644 --- a/src/internal_events/windows.rs +++ b/src/internal_events/windows.rs @@ -1,7 +1,8 @@ use metrics::counter; +use vector_lib::NamedInternalEvent; use vector_lib::internal_event::{InternalEvent, error_stage, error_type}; -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WindowsServiceStart<'a> { pub already_started: bool, pub name: &'a str, @@ -22,7 +23,7 @@ impl InternalEvent for WindowsServiceStart<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WindowsServiceStop<'a> { pub already_stopped: bool, pub name: &'a str, @@ -43,7 +44,7 @@ impl InternalEvent for WindowsServiceStop<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WindowsServiceRestart<'a> { pub name: &'a str, } @@ -58,7 +59,7 @@ impl InternalEvent for WindowsServiceRestart<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WindowsServiceInstall<'a> { pub name: &'a str, } @@ -73,7 +74,7 @@ impl InternalEvent for WindowsServiceInstall<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WindowsServiceUninstall<'a> { pub name: &'a str, } @@ -88,7 +89,7 @@ impl InternalEvent for WindowsServiceUninstall<'_> { } } -#[derive(Debug)] +#[derive(Debug, NamedInternalEvent)] pub struct WindowsServiceDoesNotExistError<'a> { pub name: &'a str, } diff --git a/src/sinks/elasticsearch/mod.rs b/src/sinks/elasticsearch/mod.rs index af5225e75b2a2..fddae4eb3caf9 100644 --- a/src/sinks/elasticsearch/mod.rs +++ b/src/sinks/elasticsearch/mod.rs @@ -22,7 +22,8 @@ pub use encoder::ElasticsearchEncoder; use http::{Request, uri::InvalidUri}; use snafu::Snafu; use vector_lib::{ - configurable::configurable_component, internal_event, sensitive_string::SensitiveString, + NamedInternalEvent, configurable::configurable_component, internal_event::InternalEvent, + sensitive_string::SensitiveString, }; use crate::{ @@ -177,11 +178,12 @@ pub enum ElasticsearchCommonMode { DataStream(DataStreamConfig), } +#[derive(NamedInternalEvent)] struct VersionValueParseError<'a> { value: &'a str, } -impl internal_event::InternalEvent for VersionValueParseError<'_> { +impl InternalEvent for VersionValueParseError<'_> { fn emit(self) { warn!("{self}") } diff --git a/src/sinks/http/tests.rs b/src/sinks/http/tests.rs index c7e6862fd79f9..7182d608a865a 100644 --- a/src/sinks/http/tests.rs +++ b/src/sinks/http/tests.rs @@ -843,7 +843,7 @@ async fn http_uri_auth_conflict() { tokio::spawn(server); - let expected_emitted_error_events = ["ServiceCallError", "SinkRequestBuildError"]; + let expected_emitted_error_events = ["CallError", "SinkRequestBuildError"]; run_and_assert_sink_error_with_events( sink, stream::once(ready(event)), From 7d1773093745995f8193117855a1436ad71bdbf1 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Thu, 4 Dec 2025 09:01:59 -0600 Subject: [PATCH 130/227] chore(ci): Add missing `deny.toml` entry for the new macro crate (#24339) --- deny.toml | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/deny.toml b/deny.toml index 7e200969f6e01..ef1adf5d5da00 100644 --- a/deny.toml +++ b/deny.toml @@ -22,6 +22,7 @@ exceptions = [ # compliance we cannot be modifying the source files. { allow = ["MPL-2.0"], name = "colored", version = "*" }, { allow = ["MPL-2.0"], name = "webpki-roots", version = "*" }, + { allow = ["MPL-2.0"], name = "vector-common-macros", version = "*" }, { allow = ["MPL-2.0"], name = "vector-config-common", version = "*" }, { allow = ["MPL-2.0"], name = "vector-config-macros", version = "*" }, { allow = ["MPL-2.0"], name = "vrl", version = "*" }, @@ -41,24 +42,10 @@ ignore = [ # There is not fix available yet. # https://github.com/vectordotdev/vector/issues/19262 "RUSTSEC-2023-0071", - # Vulnerability in `tonic` crate: https://rustsec.org/advisories/RUSTSEC-2024-0376 - # There is a fixed version (v0.12.3) but we are blocked from upgrading to `http` v1, which - # `tonic` v0.12 depends on. See https://github.com/vectordotdev/vector/issues/19179 - "RUSTSEC-2024-0376", - # Advisory in rustls crate: https://rustsec.org/advisories/RUSTSEC-2024-0336 If a `close_notify` - # alert is received during a handshake, `complete_io` does not terminate. - # Vulnerable version only used in dev-dependencies - "RUSTSEC-2024-0336", - # idna accepts Punycode labels that do not produce any non-ASCII when decoded - # Need to update some direct dependencies before we can upgrade idna to fix - "RUSTSEC-2024-0421", { id = "RUSTSEC-2021-0139", reason = " ansi_term is unmaintained" }, { id = "RUSTSEC-2024-0388", reason = "derivative is unmaintained" }, { id = "RUSTSEC-2024-0384", reason = "instant is unmaintained" }, { id = "RUSTSEC-2020-0168", reason = "mach is unmaintained" }, - { id = "RUSTSEC-2024-0370", reason = "proc-macro-error is unmaintained" }, - { id = "RUSTSEC-2024-0320", reason = "yaml-rust is unmaintained" }, { id = "RUSTSEC-2024-0436", reason = "paste is unmaintained" }, { id = "RUSTSEC-2025-0012", reason = "backoff is unmaintained" }, - { id = "RUSTSEC-2025-0014", reason = "humantime is unmaintained" }, ] From 72e09673fda9d6fbf933adacea1220bdfae162a8 Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 4 Dec 2025 14:58:22 -0500 Subject: [PATCH 131/227] chore(ci): Improve deny and make it run on PRs when necessary (#24340) --- .github/workflows/changes.yml | 7 +++++ .github/workflows/deny.yml | 56 ++++++++++------------------------- 2 files changed, 23 insertions(+), 40 deletions(-) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index 76054ab8a9d64..65aa38f9c4e9a 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -32,6 +32,8 @@ on: value: ${{ jobs.source.outputs.source }} dependencies: value: ${{ jobs.source.outputs.dependencies }} + deny: + value: ${{ jobs.source.outputs.deny }} internal_events: value: ${{ jobs.source.outputs.internal_events }} cue: @@ -160,6 +162,7 @@ jobs: outputs: source: ${{ steps.filter.outputs.source }} dependencies: ${{ steps.filter.outputs.dependencies }} + deny: ${{ steps.filter.outputs.deny }} internal_events: ${{ steps.filter.outputs.internal_events }} cue: ${{ steps.filter.outputs.cue }} component_docs: ${{ steps.filter.outputs.component_docs }} @@ -209,6 +212,10 @@ jobs: - 'scripts/cross/**' - "vdev/**" - ".github/workflows/changes.yml" + deny: + - '**/Cargo.toml' + - 'Cargo.lock' + - ".github/workflows/deny.yml" cue: - 'website/cue/**' - "vdev/**" diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml index 35a66c2583444..00f0da93714d3 100644 --- a/.github/workflows/deny.yml +++ b/.github/workflows/deny.yml @@ -6,68 +6,44 @@ # - scheduled UTC midnight # - on PR review (see comment-trigger.yml) # - on demand from github actions UI +# - on pull requests when Cargo.toml or Cargo.lock files change name: Deny - Linux on: workflow_call: workflow_dispatch: + pull_request: schedule: # Same schedule as nightly.yml - cron: "0 5 * * 2-6" # Runs at 5:00 AM UTC, Tuesday through Saturday +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + permissions: statuses: write jobs: + changes: + if: ${{ github.event_name == 'pull_request' }} + uses: ./.github/workflows/changes.yml + secrets: inherit + test-deny: runs-on: ubuntu-24.04 timeout-minutes: 30 + if: ${{ always() && (github.event_name != 'pull_request' || needs.changes.outputs.deny == 'true') }} + needs: [changes] env: CARGO_INCREMENTAL: 0 steps: - - name: (PR review) Set latest commit status as pending - if: ${{ github.event_name == 'pull_request_review' }} - uses: myrotvorets/set-commit-status-action@3730c0a348a2ace3c110851bed53331bc6406e9f # v2.0.1 - with: - sha: ${{ github.event.review.commit_id }} - token: ${{ secrets.GITHUB_TOKEN }} - context: Deny - Linux - status: pending - - - name: (PR review) Checkout PR branch - if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - ref: ${{ github.event.review.commit_id }} - - - name: Checkout branch - if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 - name: Cache Cargo registry + index + - uses: ./.github/actions/setup with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - restore-keys: | - ${{ runner.os }}-cargo- + cargo-deny: true - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh - - run: bash scripts/environment/prepare.sh --modules=cargo-deny - - run: echo "::add-matcher::.github/matchers/rust.json" - name: Check cargo deny advisories/licenses run: make check-deny - - - name: (PR review) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@3730c0a348a2ace3c110851bed53331bc6406e9f # v2.0.1 - if: always() && github.event_name == 'pull_request_review' - with: - sha: ${{ github.event.review.commit_id }} - token: ${{ secrets.GITHUB_TOKEN }} - context: Deny - Linux - status: ${{ job.status }} From dbc805a77b51a6b426e067a772ee0eae04f958d1 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Fri, 5 Dec 2025 14:58:01 -0600 Subject: [PATCH 132/227] enhancement(transforms): Add internal metric to record buffer utilization (#24329) * enhancement(transforms): Add internal metric to record buffer utilization This builds on the work in #24272 and adds support for transform buffer utilization. Since these are input buffers they omit the `output` label used by the buffers created by the source senders. * Deduplicate metric creation a bit --- ...m-buffer-utilization-metric.enhancement.md | 8 ++ lib/vector-buffers/src/topology/builder.rs | 37 +++--- .../src/topology/channel/limited_queue.rs | 87 +++++++++++--- .../src/topology/channel/mod.rs | 4 +- .../src/topology/channel/tests.rs | 12 +- lib/vector-buffers/src/topology/test_util.rs | 24 ++-- lib/vector-buffers/src/variants/in_memory.rs | 5 +- lib/vector-core/src/fanout.rs | 44 ++++---- lib/vector-core/src/source_sender/output.rs | 5 +- lib/vector-tap/src/controller.rs | 7 +- src/config/diff.rs | 2 +- src/test_util/components.rs | 106 +++++++++++++----- src/topology/builder.rs | 20 ++-- .../components/sources/internal_metrics.cue | 32 ++++++ .../cue/reference/components/transforms.cue | 4 + 15 files changed, 280 insertions(+), 117 deletions(-) create mode 100644 changelog.d/transform-buffer-utilization-metric.enhancement.md diff --git a/changelog.d/transform-buffer-utilization-metric.enhancement.md b/changelog.d/transform-buffer-utilization-metric.enhancement.md new file mode 100644 index 0000000000000..10242092c8956 --- /dev/null +++ b/changelog.d/transform-buffer-utilization-metric.enhancement.md @@ -0,0 +1,8 @@ +Added metrics to record the utilization level of the buffers that each transform receives from: + +- `transform_buffer_max_byte_size` +- `transform_buffer_max_event_size` +- `transform_buffer_utilization` +- `transform_buffer_utilization_level` + +authors: bruceg diff --git a/lib/vector-buffers/src/topology/builder.rs b/lib/vector-buffers/src/topology/builder.rs index 0d909a7eb1941..c4a42c30fab1f 100644 --- a/lib/vector-buffers/src/topology/builder.rs +++ b/lib/vector-buffers/src/topology/builder.rs @@ -4,12 +4,12 @@ use async_trait::async_trait; use snafu::{ResultExt, Snafu}; use tracing::Span; -use super::channel::{ReceiverAdapter, SenderAdapter}; +use super::channel::{ChannelMetricMetadata, ReceiverAdapter, SenderAdapter}; use crate::{ Bufferable, WhenFull, buffer_usage_data::{BufferUsage, BufferUsageHandle}, - topology::channel::{BufferReceiver, BufferSender}, - variants::MemoryBuffer, + config::MemoryBufferSize, + topology::channel::{BufferReceiver, BufferSender, limited}, }; /// Value that can be used as a stage in a buffer topology. @@ -186,26 +186,25 @@ impl TopologyBuilder { /// create the stage, installing buffer usage metrics that aren't required, and so on. /// #[allow(clippy::print_stderr)] - pub async fn standalone_memory( + pub fn standalone_memory( max_events: NonZeroUsize, when_full: WhenFull, receiver_span: &Span, + metadata: Option, ) -> (BufferSender, BufferReceiver) { let usage_handle = BufferUsageHandle::noop(); + usage_handle.set_buffer_limits(None, Some(max_events.get())); - let memory_buffer = Box::new(MemoryBuffer::with_max_events(max_events)); - let (sender, receiver) = memory_buffer - .into_buffer_parts(usage_handle.clone()) - .await - .unwrap_or_else(|_| unreachable!("should not fail to directly create a memory buffer")); + let limit = MemoryBufferSize::MaxEvents(max_events); + let (sender, receiver) = limited(limit, metadata); let mode = match when_full { WhenFull::Overflow => WhenFull::Block, m => m, }; - let mut sender = BufferSender::new(sender, mode); + let mut sender = BufferSender::new(sender.into(), mode); sender.with_send_duration_instrumentation(0, receiver_span); - let receiver = BufferReceiver::new(receiver); + let receiver = BufferReceiver::new(receiver.into()); (sender, receiver) } @@ -224,23 +223,23 @@ impl TopologyBuilder { /// can simplifying needing to require callers to do all the boilerplate to create the builder, /// create the stage, installing buffer usage metrics that aren't required, and so on. #[cfg(test)] - pub async fn standalone_memory_test( + pub fn standalone_memory_test( max_events: NonZeroUsize, when_full: WhenFull, usage_handle: BufferUsageHandle, + metadata: Option, ) -> (BufferSender, BufferReceiver) { - let memory_buffer = Box::new(MemoryBuffer::with_max_events(max_events)); - let (sender, receiver) = memory_buffer - .into_buffer_parts(usage_handle.clone()) - .await - .unwrap_or_else(|_| unreachable!("should not fail to directly create a memory buffer")); + usage_handle.set_buffer_limits(None, Some(max_events.get())); + + let limit = MemoryBufferSize::MaxEvents(max_events); + let (sender, receiver) = limited(limit, metadata); let mode = match when_full { WhenFull::Overflow => WhenFull::Block, m => m, }; - let mut sender = BufferSender::new(sender, mode); - let mut receiver = BufferReceiver::new(receiver); + let mut sender = BufferSender::new(sender.into(), mode); + let mut receiver = BufferReceiver::new(receiver.into()); sender.with_usage_instrumentation(usage_handle.clone()); receiver.with_usage_instrumentation(usage_handle); diff --git a/lib/vector-buffers/src/topology/channel/limited_queue.rs b/lib/vector-buffers/src/topology/channel/limited_queue.rs index f6fb80f8ecac9..54264e06afe60 100644 --- a/lib/vector-buffers/src/topology/channel/limited_queue.rs +++ b/lib/vector-buffers/src/topology/channel/limited_queue.rs @@ -8,6 +8,9 @@ use std::{ }, }; +#[cfg(test)] +use std::sync::Mutex; + use async_stream::stream; use crossbeam_queue::{ArrayQueue, SegQueue}; use futures::Stream; @@ -89,6 +92,18 @@ where } } +#[derive(Clone, Debug)] +pub struct ChannelMetricMetadata { + prefix: &'static str, + output: Option, +} + +impl ChannelMetricMetadata { + pub fn new(prefix: &'static str, output: Option) -> Self { + Self { prefix, output } + } +} + #[derive(Clone, Debug)] struct Metrics { histogram: Histogram, @@ -98,21 +113,43 @@ struct Metrics { // field, so we need to suppress the warning here. #[expect(dead_code)] max_gauge: Gauge, + #[cfg(test)] + recorded_values: Arc>>, } impl Metrics { #[expect(clippy::cast_precision_loss)] // We have to convert buffer sizes for a gauge, it's okay to lose precision here. - fn new(limit: MemoryBufferSize, prefix: &'static str, output: &str) -> Self { + fn new(limit: MemoryBufferSize, metadata: ChannelMetricMetadata) -> Self { + let ChannelMetricMetadata { prefix, output } = metadata; let (gauge_suffix, max_value) = match limit { MemoryBufferSize::MaxEvents(max_events) => ("_max_event_size", max_events.get() as f64), MemoryBufferSize::MaxSize(max_bytes) => ("_max_byte_size", max_bytes.get() as f64), }; - let max_gauge = gauge!(format!("{prefix}{gauge_suffix}"), "output" => output.to_string()); - max_gauge.set(max_value); - Self { - histogram: histogram!(format!("{prefix}_utilization"), "output" => output.to_string()), - gauge: gauge!(format!("{prefix}_utilization_level"), "output" => output.to_string()), - max_gauge, + let max_gauge_name = format!("{prefix}{gauge_suffix}"); + let histogram_name = format!("{prefix}_utilization"); + let gauge_name = format!("{prefix}_utilization_level"); + #[cfg(test)] + let recorded_values = Arc::new(Mutex::new(Vec::new())); + if let Some(label_value) = output { + let max_gauge = gauge!(max_gauge_name, "output" => label_value.clone()); + max_gauge.set(max_value); + Self { + histogram: histogram!(histogram_name, "output" => label_value.clone()), + gauge: gauge!(gauge_name, "output" => label_value.clone()), + max_gauge, + #[cfg(test)] + recorded_values, + } + } else { + let max_gauge = gauge!(max_gauge_name); + max_gauge.set(max_value); + Self { + histogram: histogram!(histogram_name), + gauge: gauge!(gauge_name), + max_gauge, + #[cfg(test)] + recorded_values, + } } } @@ -120,6 +157,10 @@ impl Metrics { fn record(&self, value: usize) { self.histogram.record(value as f64); self.gauge.set(value as f64); + #[cfg(test)] + if let Ok(mut recorded) = self.recorded_values.lock() { + recorded.push(value); + } } } @@ -145,10 +186,9 @@ impl Clone for Inner { } impl Inner { - fn new(limit: MemoryBufferSize, metric_name_output: Option<(&'static str, &str)>) -> Self { + fn new(limit: MemoryBufferSize, metric_metadata: Option) -> Self { let read_waker = Arc::new(Notify::new()); - let metrics = - metric_name_output.map(|(prefix, output)| Metrics::new(limit, prefix, output)); + let metrics = metric_metadata.map(|metadata| Metrics::new(limit, metadata)); match limit { MemoryBufferSize::MaxEvents(max_events) => Inner { data: Arc::new(ArrayQueue::new(max_events.get())), @@ -167,6 +207,11 @@ impl Inner { } } + /// Records a send after acquiring all required permits. + /// + /// The `total` value represents the channel utilization after this send completes. It may be + /// greater than the configured limit because the channel intentionally allows a single + /// oversized payload to flow through rather than forcing the sender to split it. fn send_with_permit(&mut self, total: usize, permits: OwnedSemaphorePermit, item: T) { self.data.push((permits, item)); self.read_waker.notify_one(); @@ -335,9 +380,9 @@ impl Drop for LimitedReceiver { pub fn limited( limit: MemoryBufferSize, - metric_name_output: Option<(&'static str, &str)>, + metric_metadata: Option, ) -> (LimitedSender, LimitedReceiver) { - let inner = Inner::new(limit, metric_name_output); + let inner = Inner::new(limit, metric_metadata); let sender = LimitedSender { inner: inner.clone(), @@ -355,7 +400,7 @@ mod tests { use tokio_test::{assert_pending, assert_ready, task::spawn}; use vector_common::byte_size_of::ByteSizeOf; - use super::limited; + use super::{ChannelMetricMetadata, limited}; use crate::{ MemoryBufferSize, test::MultiEventRecord, @@ -391,6 +436,22 @@ mod tests { assert_eq!(Some(Sample::new(42)), assert_ready!(recv.poll())); } + #[tokio::test] + async fn records_utilization_on_send() { + let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(2).unwrap()); + let (mut tx, mut rx) = limited( + limit, + Some(ChannelMetricMetadata::new("test_channel", None)), + ); + + let metrics = tx.inner.metrics.as_ref().unwrap().recorded_values.clone(); + + tx.send(Sample::new(1)).await.expect("send should succeed"); + assert_eq!(metrics.lock().unwrap().last().copied(), Some(1)); + + let _ = rx.next().await; + } + #[test] fn test_limiting_by_byte_size() { let max_elements = 10; diff --git a/lib/vector-buffers/src/topology/channel/mod.rs b/lib/vector-buffers/src/topology/channel/mod.rs index b6bb0bb9661a2..303abee288cc4 100644 --- a/lib/vector-buffers/src/topology/channel/mod.rs +++ b/lib/vector-buffers/src/topology/channel/mod.rs @@ -2,7 +2,9 @@ mod limited_queue; mod receiver; mod sender; -pub use limited_queue::{LimitedReceiver, LimitedSender, SendError, limited}; +pub use limited_queue::{ + ChannelMetricMetadata, LimitedReceiver, LimitedSender, SendError, limited, +}; pub use receiver::*; pub use sender::*; diff --git a/lib/vector-buffers/src/topology/channel/tests.rs b/lib/vector-buffers/src/topology/channel/tests.rs index 3ebfdda32add4..d36fd036f33ac 100644 --- a/lib/vector-buffers/src/topology/channel/tests.rs +++ b/lib/vector-buffers/src/topology/channel/tests.rs @@ -90,7 +90,7 @@ where #[tokio::test] async fn test_sender_block() { // Get a non-overflow buffer in blocking mode with a capacity of 3. - let (mut tx, rx, _) = build_buffer(3, WhenFull::Block, None).await; + let (mut tx, rx, _) = build_buffer(3, WhenFull::Block, None); // We should be able to send three messages through unimpeded. assert_current_send_capacity(&mut tx, Some(3), None); @@ -113,7 +113,7 @@ async fn test_sender_block() { #[tokio::test] async fn test_sender_drop_newest() { // Get a non-overflow buffer in "drop newest" mode with a capacity of 3. - let (mut tx, rx, _) = build_buffer(3, WhenFull::DropNewest, None).await; + let (mut tx, rx, _) = build_buffer(3, WhenFull::DropNewest, None); // We should be able to send three messages through unimpeded. assert_current_send_capacity(&mut tx, Some(3), None); @@ -138,7 +138,7 @@ async fn test_sender_drop_newest() { async fn test_sender_overflow_block() { // Get an overflow buffer, where the overflow buffer is in blocking mode, and both the base // and overflow buffers have a capacity of 2. - let (mut tx, rx, _) = build_buffer(2, WhenFull::Overflow, Some(WhenFull::Block)).await; + let (mut tx, rx, _) = build_buffer(2, WhenFull::Overflow, Some(WhenFull::Block)); // We should be able to send four message through unimpeded -- two for the base sender, and // two for the overflow sender. @@ -164,7 +164,7 @@ async fn test_sender_overflow_block() { async fn test_sender_overflow_drop_newest() { // Get an overflow buffer, where the overflow buffer is in "drop newest" mode, and both the // base and overflow buffers have a capacity of 2. - let (mut tx, rx, _) = build_buffer(2, WhenFull::Overflow, Some(WhenFull::DropNewest)).await; + let (mut tx, rx, _) = build_buffer(2, WhenFull::Overflow, Some(WhenFull::DropNewest)); // We should be able to send four message through unimpeded -- two for the base sender, and // two for the overflow sender. @@ -190,7 +190,7 @@ async fn test_sender_overflow_drop_newest() { #[tokio::test] async fn test_buffer_metrics_normal() { // Get a regular blocking buffer. - let (mut tx, rx, handle) = build_buffer(5, WhenFull::Block, None).await; + let (mut tx, rx, handle) = build_buffer(5, WhenFull::Block, None); // Send three items through, and make sure the buffer usage stats reflect that. assert_current_send_capacity(&mut tx, Some(5), None); @@ -217,7 +217,7 @@ async fn test_buffer_metrics_normal() { #[tokio::test] async fn test_buffer_metrics_drop_newest() { // Get a buffer that drops the newest items when full. - let (mut tx, rx, handle) = build_buffer(2, WhenFull::DropNewest, None).await; + let (mut tx, rx, handle) = build_buffer(2, WhenFull::DropNewest, None); // Send three items through, and make sure the buffer usage stats reflect that. assert_current_send_capacity(&mut tx, Some(2), None); diff --git a/lib/vector-buffers/src/topology/test_util.rs b/lib/vector-buffers/src/topology/test_util.rs index 89ae4917af3aa..684aecc977764 100644 --- a/lib/vector-buffers/src/topology/test_util.rs +++ b/lib/vector-buffers/src/topology/test_util.rs @@ -137,7 +137,7 @@ impl error::Error for BasicError {} /// If `mode` is set to `WhenFull::Overflow`, then the buffer will be set to overflow mode, with /// another in-memory channel buffer being used as the overflow buffer. The overflow buffer will /// also use the same capacity as the outer buffer. -pub(crate) async fn build_buffer( +pub(crate) fn build_buffer( capacity: usize, mode: WhenFull, overflow_mode: Option, @@ -154,27 +154,25 @@ pub(crate) async fn build_buffer( NonZeroUsize::new(capacity).expect("capacity must be nonzero"), overflow_mode, handle.clone(), - ) - .await; + None, + ); let (mut base_sender, mut base_receiver) = TopologyBuilder::standalone_memory_test( NonZeroUsize::new(capacity).expect("capacity must be nonzero"), WhenFull::Overflow, handle.clone(), - ) - .await; + None, + ); base_sender.switch_to_overflow(overflow_sender); base_receiver.switch_to_overflow(overflow_receiver); (base_sender, base_receiver) } - m => { - TopologyBuilder::standalone_memory_test( - NonZeroUsize::new(capacity).expect("capacity must be nonzero"), - m, - handle.clone(), - ) - .await - } + m => TopologyBuilder::standalone_memory_test( + NonZeroUsize::new(capacity).expect("capacity must be nonzero"), + m, + handle.clone(), + None, + ), }; (tx, rx, handle) diff --git a/lib/vector-buffers/src/variants/in_memory.rs b/lib/vector-buffers/src/variants/in_memory.rs index 30e986196daef..f6bbe87c15b29 100644 --- a/lib/vector-buffers/src/variants/in_memory.rs +++ b/lib/vector-buffers/src/variants/in_memory.rs @@ -1,4 +1,4 @@ -use std::{error::Error, num::NonZeroUsize}; +use std::error::Error; use async_trait::async_trait; @@ -21,7 +21,8 @@ impl MemoryBuffer { MemoryBuffer { capacity } } - pub fn with_max_events(n: NonZeroUsize) -> Self { + #[cfg(test)] + pub fn with_max_events(n: std::num::NonZeroUsize) -> Self { Self { capacity: MemoryBufferSize::MaxEvents(n), } diff --git a/lib/vector-core/src/fanout.rs b/lib/vector-core/src/fanout.rs index b6061774c2564..d8ec7e57a11ad 100644 --- a/lib/vector-core/src/fanout.rs +++ b/lib/vector-core/src/fanout.rs @@ -482,28 +482,28 @@ mod tests { test_util::{collect_ready, collect_ready_events}, }; - async fn build_sender_pair( + fn build_sender_pair( capacity: usize, ) -> (BufferSender, BufferReceiver) { TopologyBuilder::standalone_memory( NonZeroUsize::new(capacity).expect("capacity must be nonzero"), WhenFull::Block, &Span::current(), + None, ) - .await } - async fn build_sender_pairs( + fn build_sender_pairs( capacities: &[usize], ) -> Vec<(BufferSender, BufferReceiver)> { let mut pairs = Vec::new(); for capacity in capacities { - pairs.push(build_sender_pair(*capacity).await); + pairs.push(build_sender_pair(*capacity)); } pairs } - async fn fanout_from_senders( + fn fanout_from_senders( capacities: &[usize], ) -> ( Fanout, @@ -511,7 +511,7 @@ mod tests { Vec>, ) { let (mut fanout, control) = Fanout::new(); - let pairs = build_sender_pairs(capacities).await; + let pairs = build_sender_pairs(capacities); let mut receivers = Vec::new(); for (i, (sender, receiver)) in pairs.into_iter().enumerate() { @@ -522,13 +522,13 @@ mod tests { (fanout, control, receivers) } - async fn add_sender_to_fanout( + fn add_sender_to_fanout( fanout: &mut Fanout, receivers: &mut Vec>, sender_id: usize, capacity: usize, ) { - let (sender, receiver) = build_sender_pair(capacity).await; + let (sender, receiver) = build_sender_pair(capacity); receivers.push(receiver); fanout.add(ComponentKey::from(sender_id.to_string()), sender); @@ -542,13 +542,13 @@ mod tests { .expect("sending control message should not fail"); } - async fn replace_sender_in_fanout( + fn replace_sender_in_fanout( control: &UnboundedSender, receivers: &mut [BufferReceiver], sender_id: usize, capacity: usize, ) -> BufferReceiver { - let (sender, receiver) = build_sender_pair(capacity).await; + let (sender, receiver) = build_sender_pair(capacity); let old_receiver = mem::replace(&mut receivers[sender_id], receiver); control @@ -567,13 +567,13 @@ mod tests { old_receiver } - async fn start_sender_replace( + fn start_sender_replace( control: &UnboundedSender, receivers: &mut [BufferReceiver], sender_id: usize, capacity: usize, ) -> (BufferReceiver, BufferSender) { - let (sender, receiver) = build_sender_pair(capacity).await; + let (sender, receiver) = build_sender_pair(capacity); let old_receiver = mem::replace(&mut receivers[sender_id], receiver); control @@ -616,7 +616,7 @@ mod tests { #[tokio::test] async fn fanout_writes_to_all() { - let (mut fanout, _, receivers) = fanout_from_senders(&[2, 2]).await; + let (mut fanout, _, receivers) = fanout_from_senders(&[2, 2]); let events = make_event_array(2); let clones = events.clone(); @@ -632,7 +632,7 @@ mod tests { #[tokio::test] async fn fanout_notready() { - let (mut fanout, _, mut receivers) = fanout_from_senders(&[2, 1, 2]).await; + let (mut fanout, _, mut receivers) = fanout_from_senders(&[2, 1, 2]); let events = make_events(2); // First send should immediately complete because all senders have capacity: @@ -661,7 +661,7 @@ mod tests { #[tokio::test] async fn fanout_grow() { - let (mut fanout, _, mut receivers) = fanout_from_senders(&[4, 4]).await; + let (mut fanout, _, mut receivers) = fanout_from_senders(&[4, 4]); let events = make_events(3); // Send in the first two events to our initial two senders: @@ -675,7 +675,7 @@ mod tests { .expect("should not fail"); // Now add a third sender: - add_sender_to_fanout(&mut fanout, &mut receivers, 2, 4).await; + add_sender_to_fanout(&mut fanout, &mut receivers, 2, 4); // Send in the last event which all three senders will now get: fanout @@ -696,7 +696,7 @@ mod tests { #[tokio::test] async fn fanout_shrink() { - let (mut fanout, control, receivers) = fanout_from_senders(&[4, 4]).await; + let (mut fanout, control, receivers) = fanout_from_senders(&[4, 4]); let events = make_events(3); // Send in the first two events to our initial two senders: @@ -772,7 +772,7 @@ mod tests { ]; for (sender_id, should_complete, expected_last_seen) in cases { - let (mut fanout, control, mut receivers) = fanout_from_senders(&[2, 1, 2]).await; + let (mut fanout, control, mut receivers) = fanout_from_senders(&[2, 1, 2]); // First send should immediately complete because all senders have capacity: let mut first_send = spawn(fanout.send(events[0].clone().into(), None)); @@ -827,7 +827,7 @@ mod tests { #[tokio::test] async fn fanout_replace() { - let (mut fanout, control, mut receivers) = fanout_from_senders(&[4, 4, 4]).await; + let (mut fanout, control, mut receivers) = fanout_from_senders(&[4, 4, 4]); let events = make_events(3); // First two sends should immediately complete because all senders have capacity: @@ -841,7 +841,7 @@ mod tests { .expect("should not fail"); // Replace the first sender with a brand new one before polling again: - let old_first_receiver = replace_sender_in_fanout(&control, &mut receivers, 0, 4).await; + let old_first_receiver = replace_sender_in_fanout(&control, &mut receivers, 0, 4); // And do the third send which should also complete since all senders still have capacity: fanout @@ -868,7 +868,7 @@ mod tests { #[tokio::test] async fn fanout_wait() { - let (mut fanout, control, mut receivers) = fanout_from_senders(&[4, 4]).await; + let (mut fanout, control, mut receivers) = fanout_from_senders(&[4, 4]); let events = make_events(3); // First two sends should immediately complete because all senders have capacity: @@ -881,7 +881,7 @@ mod tests { // doesn't let any writes through until we replace it properly. We get back the receiver // we've replaced, but also the sender that we want to eventually install: let (old_first_receiver, new_first_sender) = - start_sender_replace(&control, &mut receivers, 0, 4).await; + start_sender_replace(&control, &mut receivers, 0, 4); // Third send should return pending because now we have an in-flight replacement: let mut third_send = spawn(fanout.send(events[2].clone().into(), None)); diff --git a/lib/vector-core/src/source_sender/output.rs b/lib/vector-core/src/source_sender/output.rs index a6d15e0b19f62..5e470c0231d94 100644 --- a/lib/vector-core/src/source_sender/output.rs +++ b/lib/vector-core/src/source_sender/output.rs @@ -11,7 +11,7 @@ use metrics::Histogram; use tracing::Span; use vector_buffers::{ config::MemoryBufferSize, - topology::channel::{self, LimitedReceiver, LimitedSender}, + topology::channel::{self, ChannelMetricMetadata, LimitedReceiver, LimitedSender}, }; use vector_common::{ byte_size_of::ByteSizeOf, @@ -117,7 +117,8 @@ impl Output { timeout: Option, ) -> (Self, LimitedReceiver) { let limit = MemoryBufferSize::MaxEvents(NonZeroUsize::new(n).unwrap()); - let (tx, rx) = channel::limited(limit, Some((UTILIZATION_METRIC_PREFIX, &output))); + let metrics = ChannelMetricMetadata::new(UTILIZATION_METRIC_PREFIX, Some(output.clone())); + let (tx, rx) = channel::limited(limit, Some(metrics)); ( Self { sender: tx, diff --git a/lib/vector-tap/src/controller.rs b/lib/vector-tap/src/controller.rs index 763f14a98caf1..f6e53b07691b3 100644 --- a/lib/vector-tap/src/controller.rs +++ b/lib/vector-tap/src/controller.rs @@ -356,7 +356,12 @@ async fn tap_handler( // target for the component, and spawn our transformer task which will // wrap each event payload with the necessary metadata before forwarding // it to our global tap receiver. - let (tap_buffer_tx, mut tap_buffer_rx) = TopologyBuilder::standalone_memory(TAP_BUFFER_SIZE, WhenFull::DropNewest, &Span::current()).await; + let (tap_buffer_tx, mut tap_buffer_rx) = TopologyBuilder::standalone_memory( + TAP_BUFFER_SIZE, + WhenFull::DropNewest, + &Span::current(), + None, + ); let mut tap_transformer = TapTransformer::new(tx.clone(), output.clone()); tokio::spawn(async move { diff --git a/src/config/diff.rs b/src/config/diff.rs index 3ecef90ca0174..ebedf3c73d5c3 100644 --- a/src/config/diff.rs +++ b/src/config/diff.rs @@ -225,7 +225,7 @@ fn extract_table_component_keys( .collect() } -#[cfg(test)] +#[cfg(all(test, feature = "enrichment-tables-memory"))] mod tests { use crate::config::ConfigBuilder; use indoc::indoc; diff --git a/src/test_util/components.rs b/src/test_util/components.rs index 86a7b0defee25..8ef396b6c60cc 100644 --- a/src/test_util/components.rs +++ b/src/test_util/components.rs @@ -65,12 +65,36 @@ pub const HTTP_SINK_TAGS: [&str; 2] = ["endpoint", "protocol"]; /// The standard set of tags for all `AWS`-based sinks. pub const AWS_SINK_TAGS: [&str; 2] = ["protocol", "region"]; -/// The list of source sender buffer metrics that must be emitted. -const SOURCE_SENDER_BUFFER_METRICS: [&str; 2] = [ - "source_buffer_utilization", - "source_buffer_utilization_level", +/// The set of suffixes that define the source/transform buffer metric family. +const BUFFER_METRIC_SUFFIXES: [&str; 3] = [ + // While hypothetically possible, the `max_byte_size` metric is never actually emitted, because + // both sources and transforms limit their buffers by event count. If we ever allow + // configuration by byte size, we will need to account for this in these tests. + "max_event_size", + "utilization", + "utilization_level", ]; +/// Buffer metric requirements shared between sources and transforms. +#[derive(Clone, Copy)] +struct BufferMetricRequirement { + prefix: &'static str, + suffixes: &'static [&'static str], + required_tags: &'static [&'static str], +} + +const SOURCE_BUFFER_METRIC_REQUIREMENT: BufferMetricRequirement = BufferMetricRequirement { + prefix: "source_buffer_", + suffixes: &BUFFER_METRIC_SUFFIXES, + required_tags: &["output"], +}; + +const TRANSFORM_BUFFER_METRIC_REQUIREMENT: BufferMetricRequirement = BufferMetricRequirement { + prefix: "transform_buffer_", + suffixes: &BUFFER_METRIC_SUFFIXES, + required_tags: &[], +}; + /// This struct is used to describe a set of component tests. pub struct ComponentTests<'a, 'b, 'c> { /// The list of event (suffixes) that must be emitted by the component @@ -79,8 +103,8 @@ pub struct ComponentTests<'a, 'b, 'c> { tagged_counters: &'b [&'b str], /// The list of counter metrics (with no particular tags) that must be incremented untagged_counters: &'c [&'c str], - /// Whether the source sender metrics must be emitted - require_source_sender_metrics: bool, + /// Optional buffer metric validation requirements. + buffer_metrics: Option, } /// The component test specification for all sources. @@ -93,7 +117,7 @@ pub static SOURCE_TESTS: LazyLock = LazyLock::new(|| ComponentTe "component_sent_events_total", "component_sent_event_bytes_total", ], - require_source_sender_metrics: true, + buffer_metrics: Some(SOURCE_BUFFER_METRIC_REQUIREMENT), }); /// The component error test specification (sources and sinks). @@ -101,7 +125,7 @@ pub static COMPONENT_TESTS_ERROR: LazyLock = LazyLock::new(|| Co events: &["Error"], tagged_counters: &["component_errors_total"], untagged_counters: &[], - require_source_sender_metrics: false, + buffer_metrics: None, }); /// The component test specification for all transforms. @@ -114,7 +138,7 @@ pub static TRANSFORM_TESTS: LazyLock = LazyLock::new(|| Componen "component_sent_events_total", "component_sent_event_bytes_total", ], - require_source_sender_metrics: false, + buffer_metrics: Some(TRANSFORM_BUFFER_METRIC_REQUIREMENT), }); /// The component test specification for sinks that are push-based. @@ -126,7 +150,7 @@ pub static SINK_TESTS: LazyLock = LazyLock::new(|| { "component_sent_events_total", "component_sent_event_bytes_total", ], - require_source_sender_metrics: false, + buffer_metrics: None, } }); @@ -139,7 +163,7 @@ pub static DATA_VOLUME_SINK_TESTS: LazyLock = LazyLock::new(|| { "component_sent_event_bytes_total", ], untagged_counters: &[], - require_source_sender_metrics: false, + buffer_metrics: None, } }); @@ -151,7 +175,7 @@ pub static NONSENDING_SINK_TESTS: LazyLock = LazyLock::new(|| Co "component_sent_event_bytes_total", ], untagged_counters: &[], - require_source_sender_metrics: false, + buffer_metrics: None, }); /// The component test specification for components with multiple outputs. @@ -163,7 +187,7 @@ pub static COMPONENT_MULTIPLE_OUTPUTS_TESTS: LazyLock = "component_sent_event_bytes_total", ], untagged_counters: &[], - require_source_sender_metrics: false, + buffer_metrics: None, }); impl ComponentTests<'_, '_, '_> { @@ -174,8 +198,8 @@ impl ComponentTests<'_, '_, '_> { test.emitted_all_events(self.events); test.emitted_all_counters(self.tagged_counters, tags); test.emitted_all_counters(self.untagged_counters, &[]); - if self.require_source_sender_metrics { - test.emitted_source_sender_metrics(); + if let Some(requirement) = self.buffer_metrics { + test.emitted_buffer_metrics(requirement); } if !test.errors.is_empty() { panic!( @@ -269,21 +293,29 @@ impl ComponentTester { } } - fn emitted_source_sender_metrics(&mut self) { + fn emitted_buffer_metrics(&mut self, requirement: BufferMetricRequirement) { + let expected: HashSet = requirement + .suffixes + .iter() + .map(|suffix| format!("{}{}", requirement.prefix, suffix)) + .collect(); + + let mut missing = expected.clone(); let mut partial_matches = Vec::new(); - let mut missing: HashSet<&str> = SOURCE_SENDER_BUFFER_METRICS.iter().copied().collect(); - for metric in self - .metrics - .iter() - .filter(|m| SOURCE_SENDER_BUFFER_METRICS.contains(&m.name())) - { + for metric in self.metrics.iter().filter(|m| expected.contains(m.name())) { let tags = metric.tags(); - let has_output_tag = tags.is_some_and(|t| t.contains_key("output")); let is_histogram = matches!(metric.value(), MetricValue::AggregatedHistogram { .. }); let is_gauge = matches!(metric.value(), MetricValue::Gauge { .. }); - if (is_histogram || is_gauge) && has_output_tag { + let missing_tags: Vec<_> = requirement + .required_tags + .iter() + .copied() + .filter(|tag| tags.is_none_or(|t| !t.contains_key(tag))) + .collect(); + + if (is_histogram || is_gauge) && missing_tags.is_empty() { missing.remove(metric.name()); continue; } @@ -296,8 +328,13 @@ impl ComponentTester { if !is_histogram && !is_gauge { reasons.push(format!("unexpected type `{}`", metric.value().as_name())); } - if !has_output_tag { - reasons.push("missing `output` tag".to_string()); + if !missing_tags.is_empty() { + reasons.push(format!( + "missing {}", + missing_tags + .iter() + .format_with(", ", |tag, fmt| fmt(&format!("`{tag}`"))) + )); } let detail = if reasons.is_empty() { String::new() @@ -312,9 +349,20 @@ impl ComponentTester { if !missing.is_empty() { let partial = partial_matches.join(""); + let tag_clause = if requirement.required_tags.is_empty() { + String::new() + } else { + format!( + " with tag {}", + requirement + .required_tags + .iter() + .format_with(", ", |tag, fmt| fmt(&format!("`{tag}`"))) + ) + }; self.errors.push(format!( - " - Missing metric `{}*` with tag `output`{partial}", - missing.iter().join(", ") + " - Missing metric `{}`{tag_clause}{partial}", + missing.iter().sorted().join(", ") )); } } @@ -598,7 +646,7 @@ pub async fn assert_sink_error_with_events( events, tagged_counters: &["component_errors_total"], untagged_counters: &[], - require_source_sender_metrics: false, + buffer_metrics: None, }; assert_sink_error_with_component_tests(&component_tests, tags, f).await } diff --git a/src/topology/builder.rs b/src/topology/builder.rs index ffc8041a70724..0620ec7206623 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -22,7 +22,7 @@ use vector_lib::{ BufferType, WhenFull, topology::{ builder::TopologyBuilder, - channel::{BufferReceiver, BufferSender}, + channel::{BufferReceiver, BufferSender, ChannelMetricMetadata}, }, }, config::LogNamespace, @@ -62,6 +62,7 @@ pub(crate) static SOURCE_SENDER_BUFFER_SIZE: LazyLock = const READY_ARRAY_CAPACITY: NonZeroUsize = NonZeroUsize::new(CHUNK_SIZE * 4).unwrap(); pub(crate) const TOPOLOGY_BUFFER_SIZE: NonZeroUsize = NonZeroUsize::new(100).unwrap(); +const TRANSFORM_CHANNEL_METRIC_PREFIX: &str = "transform_buffer"; static TRANSFORM_CONCURRENCY_LIMIT: LazyLock = LazyLock::new(|| { crate::app::worker_threads() @@ -470,6 +471,7 @@ impl<'a> Builder<'a> { component_id = %key.id(), component_type = %transform.inner.get_component_name(), ); + let _span = span.enter(); // Create a map of the outputs to the list of possible definitions from those outputs. let schema_definitions = transform @@ -517,17 +519,19 @@ impl<'a> Builder<'a> { Ok(transform) => transform, }; - let (input_tx, input_rx) = - TopologyBuilder::standalone_memory(TOPOLOGY_BUFFER_SIZE, WhenFull::Block, &span) - .await; + let metrics = ChannelMetricMetadata::new(TRANSFORM_CHANNEL_METRIC_PREFIX, None); + let (input_tx, input_rx) = TopologyBuilder::standalone_memory( + TOPOLOGY_BUFFER_SIZE, + WhenFull::Block, + &span, + Some(metrics), + ); self.inputs .insert(key.clone(), (input_tx, node.inputs.clone())); - let (transform_task, transform_outputs) = { - let _span = span.enter(); - build_transform(transform, node, input_rx, &self.utilization_registry) - }; + let (transform_task, transform_outputs) = + build_transform(transform, node, input_rx, &self.utilization_registry); self.outputs.extend(transform_outputs); self.tasks.insert(key.clone(), transform_task); diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index d03036431acf2..b9335f1918932 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -828,6 +828,38 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _component_tags } + transform_buffer_max_event_size: { + description: "The maximum number of events the buffer that feeds into a transform can hold." + type: "gauge" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } + transform_buffer_max_byte_size: { + description: "The maximum number of bytes the buffer that feeds into a transform can hold." + type: "gauge" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } + transform_buffer_utilization: { + description: "The utilization level of the buffer that feeds into a transform." + type: "histogram" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } + transform_buffer_utilization_level: { + description: "The current utilization level of the buffer that feeds into a transform." + type: "gauge" + default_namespace: "vector" + tags: _component_tags & { + output: _output + } + } uptime_seconds: { description: "The total number of seconds the Vector instance has been up." type: "gauge" diff --git a/website/cue/reference/components/transforms.cue b/website/cue/reference/components/transforms.cue index c227be551e223..5f19812435555 100644 --- a/website/cue/reference/components/transforms.cue +++ b/website/cue/reference/components/transforms.cue @@ -20,6 +20,10 @@ components: transforms: [Name=string]: { component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total + transform_buffer_max_event_size: components.sources.internal_metrics.output.metrics.transform_buffer_max_event_size + transform_buffer_max_byte_size: components.sources.internal_metrics.output.metrics.transform_buffer_max_byte_size + transform_buffer_utilization: components.sources.internal_metrics.output.metrics.transform_buffer_utilization + transform_buffer_utilization_level: components.sources.internal_metrics.output.metrics.transform_buffer_utilization_level utilization: components.sources.internal_metrics.output.metrics.utilization } } From 922d970672a79bf3e88ece9bfd020a73bcd7e8e4 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 8 Dec 2025 17:13:53 -0500 Subject: [PATCH 133/227] chore(ci): Ignore RUSTSEC-2025-0134 for rustls-pemfile (#24352) --- deny.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deny.toml b/deny.toml index ef1adf5d5da00..e571de12a7f71 100644 --- a/deny.toml +++ b/deny.toml @@ -48,4 +48,6 @@ ignore = [ { id = "RUSTSEC-2020-0168", reason = "mach is unmaintained" }, { id = "RUSTSEC-2024-0436", reason = "paste is unmaintained" }, { id = "RUSTSEC-2025-0012", reason = "backoff is unmaintained" }, + # rustls-pemfile is unmaintained. Blocked by both async-nats and http 1.0.0 upgrade. + { id = "RUSTSEC-2025-0134", reason = "rustls-pemfile is unmaintained" }, ] From a7a4106a4c1065fc3e85a933fedda7c2511b7ba1 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 9 Dec 2025 12:05:19 -0500 Subject: [PATCH 134/227] chore(deps): bump hyper, http-body and apply deprecation suggestions (#24351) * bump hyper to 0.14.32 * Bump http-body to 0.4.6 and apply suggestions * Fix clippy * Update LICENSE-3rdparty.csv * cargo fmt --------- Co-authored-by: Benson Fung --- Cargo.lock | 66 +++++++++---------- Cargo.toml | 4 +- LICENSE-3rdparty.csv | 2 + src/components/validation/resources/http.rs | 3 +- src/gcp.rs | 5 +- src/providers/http.rs | 6 +- src/sinks/azure_monitor_logs/tests.rs | 3 +- .../traces/apm_stats/integration_tests.rs | 8 ++- src/sinks/datadog/traces/service.rs | 5 +- src/sinks/elasticsearch/common.rs | 5 +- src/sinks/honeycomb/config.rs | 2 +- src/sinks/keep/config.rs | 2 +- src/sinks/prometheus/exporter.rs | 20 +++--- src/sinks/prometheus/remote_write/service.rs | 2 +- .../splunk_hec/common/acknowledgements.rs | 6 +- src/sinks/util/http.rs | 12 ++-- src/sinks/util/test.rs | 2 +- src/sources/apache_metrics/mod.rs | 2 +- src/sources/aws_ecs_metrics/mod.rs | 6 +- src/sources/eventstoredb_metrics/mod.rs | 6 +- src/sources/gcp_pubsub.rs | 5 +- src/sources/nginx_metrics/mod.rs | 8 ++- src/sources/okta/client.rs | 2 +- src/sources/prometheus/scrape.rs | 3 +- src/sources/util/http_client.rs | 2 +- src/transforms/aws_ec2_metadata.rs | 6 +- 26 files changed, 118 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ff9e8907b9c2f..d35cde9c8e42d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1041,7 +1041,7 @@ dependencies = [ "bytes 1.10.1", "fastrand 2.3.0", "http 0.2.9", - "http-body 0.4.5", + "http-body 0.4.6", "percent-encoding", "pin-project-lite", "tracing 0.1.41", @@ -1069,7 +1069,7 @@ dependencies = [ "fastrand 2.3.0", "flate2", "http 0.2.9", - "http-body 0.4.5", + "http-body 0.4.6", "once_cell", "regex-lite", "tracing 0.1.41", @@ -1216,7 +1216,7 @@ dependencies = [ "hmac", "http 0.2.9", "http 1.3.1", - "http-body 0.4.5", + "http-body 0.4.6", "lru 0.12.5", "once_cell", "percent-encoding", @@ -1411,7 +1411,7 @@ dependencies = [ "crc64fast-nvme", "hex", "http 0.2.9", - "http-body 0.4.5", + "http-body 0.4.6", "md-5", "pin-project-lite", "sha1", @@ -1431,7 +1431,7 @@ dependencies = [ "flate2", "futures-util", "http 0.2.9", - "http-body 0.4.5", + "http-body 0.4.6", "pin-project-lite", "tracing 0.1.41", ] @@ -1461,7 +1461,7 @@ dependencies = [ "futures-core", "http 0.2.9", "http 1.3.1", - "http-body 0.4.5", + "http-body 0.4.6", "percent-encoding", "pin-project-lite", "pin-utils", @@ -1480,8 +1480,8 @@ dependencies = [ "h2 0.3.26", "h2 0.4.12", "http 0.2.9", - "http-body 0.4.5", - "hyper 0.14.28", + "http-body 0.4.6", + "hyper 0.14.32", "hyper-rustls 0.24.2", "pin-project-lite", "rustls 0.21.12", @@ -1533,7 +1533,7 @@ dependencies = [ "fastrand 2.3.0", "http 0.2.9", "http 1.3.1", - "http-body 0.4.5", + "http-body 0.4.6", "http-body 1.0.0", "pin-project-lite", "pin-utils", @@ -1570,7 +1570,7 @@ dependencies = [ "futures-core", "http 0.2.9", "http 1.3.1", - "http-body 0.4.5", + "http-body 0.4.6", "http-body 1.0.0", "http-body-util", "itoa", @@ -1619,8 +1619,8 @@ dependencies = [ "bytes 1.10.1", "futures-util", "http 0.2.9", - "http-body 0.4.5", - "hyper 0.14.28", + "http-body 0.4.6", + "hyper 0.14.32", "itoa", "matchit", "memchr", @@ -1673,7 +1673,7 @@ dependencies = [ "bytes 1.10.1", "futures-util", "http 0.2.9", - "http-body 0.4.5", + "http-body 0.4.6", "mime", "rustversion", "tower-layer", @@ -3497,7 +3497,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.0", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -5108,9 +5108,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes 1.10.1", "http 0.2.9", @@ -5196,9 +5196,9 @@ checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes 1.10.1", "futures-channel", @@ -5206,7 +5206,7 @@ dependencies = [ "futures-util", "h2 0.3.26", "http 0.2.9", - "http-body 0.4.5", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -5263,7 +5263,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" dependencies = [ "http 0.2.9", - "hyper 0.14.28", + "hyper 0.14.32", "linked_hash_set", "once_cell", "openssl", @@ -5303,7 +5303,7 @@ dependencies = [ "futures 0.3.31", "headers", "http 0.2.9", - "hyper 0.14.28", + "hyper 0.14.32", "openssl", "tokio", "tokio-openssl", @@ -5318,7 +5318,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.9", - "hyper 0.14.28", + "hyper 0.14.32", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", @@ -5351,7 +5351,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper 0.14.28", + "hyper 0.14.32", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -5377,7 +5377,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes 1.10.1", - "hyper 0.14.28", + "hyper 0.14.32", "native-tls", "tokio", "tokio-native-tls", @@ -9279,8 +9279,8 @@ dependencies = [ "futures-util", "h2 0.3.26", "http 0.2.9", - "http-body 0.4.5", - "hyper 0.14.28", + "http-body 0.4.6", + "hyper 0.14.32", "hyper-rustls 0.24.2", "hyper-tls 0.5.0", "ipnet", @@ -11110,7 +11110,7 @@ dependencies = [ "getrandom 0.3.1", "once_cell", "rustix 1.0.1", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -11650,8 +11650,8 @@ dependencies = [ "flate2", "h2 0.3.26", "http 0.2.9", - "http-body 0.4.5", - "hyper 0.14.28", + "http-body 0.4.6", + "hyper 0.14.32", "hyper-timeout 0.4.1", "percent-encoding", "pin-project", @@ -11776,7 +11776,7 @@ dependencies = [ "futures-core", "futures-util", "http 0.2.9", - "http-body 0.4.5", + "http-body 0.4.6", "http-range-header", "pin-project-lite", "tokio", @@ -12532,10 +12532,10 @@ dependencies = [ "hostname 0.4.0", "http 0.2.9", "http 1.3.1", - "http-body 0.4.5", + "http-body 0.4.6", "http-serde", "humantime", - "hyper 0.14.28", + "hyper 0.14.32", "hyper-openssl 0.9.2", "hyper-proxy", "indexmap 2.12.0", @@ -13213,7 +13213,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.9", - "hyper 0.14.28", + "hyper 0.14.32", "log", "mime", "mime_guess", diff --git a/Cargo.toml b/Cargo.toml index ab9e6f0d3b2d2..dcf035c37f089 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -372,9 +372,9 @@ hostname = { version = "0.4.0", default-features = false } http = { version = "0.2.9", default-features = false } http-1 = { package = "http", version = "1.0", default-features = false, features = ["std"] } http-serde = "1.1.3" -http-body = { version = "0.4.5", default-features = false } +http-body = { version = "0.4.6", default-features = false } humantime.workspace = true -hyper = { version = "0.14.28", default-features = false, features = ["client", "runtime", "http1", "http2", "server", "stream"] } +hyper = { version = "0.14.32", default-features = false, features = ["client", "runtime", "http1", "http2", "server", "stream", "backports", "deprecated"] } hyper-openssl = { version = "0.9.2", default-features = false } hyper-proxy = { version = "0.9.1", default-features = false, features = ["openssl-tls"] } indexmap.workspace = true diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 98e8d456e0b11..a946220a4da1b 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -886,12 +886,14 @@ windows-future,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,The win windows-implement,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-interface,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-link,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows-link,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,The windows-link Authors windows-numerics,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,The windows-numerics Authors windows-registry,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-result,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-service,https://github.com/mullvad/windows-service-rs,MIT OR Apache-2.0,Mullvad VPN windows-strings,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-sys,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows-sys,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,The windows-sys Authors windows-targets,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows_aarch64_gnullvm,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows_aarch64_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft diff --git a/src/components/validation/resources/http.rs b/src/components/validation/resources/http.rs index a559760b9f91b..7a12f91a1ae35 100644 --- a/src/components/validation/resources/http.rs +++ b/src/components/validation/resources/http.rs @@ -13,6 +13,7 @@ use axum::{ }; use bytes::{BufMut as _, BytesMut}; use http::{Method, Request, StatusCode, Uri}; +use http_body::{Body as _, Collected}; use hyper::{Body, Client, Server}; use tokio::{ select, @@ -328,7 +329,7 @@ impl HttpResourceOutputContext<'_> { let mut decoder = decoder.clone(); async move { - match hyper::body::to_bytes(request.into_body()).await { + match request.into_body().collect().await.map(Collected::to_bytes) { Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), Ok(body) => { let byte_size = body.len(); diff --git a/src/gcp.rs b/src/gcp.rs index 770283e17033b..9a0ff365a69ee 100644 --- a/src/gcp.rs +++ b/src/gcp.rs @@ -12,6 +12,7 @@ use goauth::{ credentials::Credentials, }; use http::{Uri, uri::PathAndQuery}; +use http_body::{Body as _, Collected}; use hyper::header::AUTHORIZATION; use smpl_jwt::Jwt; use snafu::{ResultExt, Snafu}; @@ -296,8 +297,10 @@ async fn get_token_implicit() -> Result { .context(GetImplicitTokenSnafu)?; let body = res.into_body(); - let bytes = hyper::body::to_bytes(body) + let bytes = body + .collect() .await + .map(Collected::to_bytes) .context(GetTokenBytesSnafu)?; // Token::from_str is irresponsible and may panic! diff --git a/src/providers/http.rs b/src/providers/http.rs index e916829ab1cef..c7dfa043eef06 100644 --- a/src/providers/http.rs +++ b/src/providers/http.rs @@ -1,6 +1,7 @@ use async_stream::stream; use bytes::Buf; use futures::Stream; +use http_body::{Body as _, Collected}; use hyper::Body; use indexmap::IndexMap; use tokio::time; @@ -111,8 +112,11 @@ async fn http_request( info!(message = "Response received.", url = ?url.as_str()); - hyper::body::to_bytes(response.into_body()) + response + .into_body() + .collect() .await + .map(Collected::to_bytes) .map_err(|err| { let message = "Error interpreting response."; let cause = err.into_cause(); diff --git a/src/sinks/azure_monitor_logs/tests.rs b/src/sinks/azure_monitor_logs/tests.rs index 31bfede218356..06f099d8db3df 100644 --- a/src/sinks/azure_monitor_logs/tests.rs +++ b/src/sinks/azure_monitor_logs/tests.rs @@ -2,7 +2,6 @@ use std::time::Duration; use futures::{future::ready, stream}; use http::Response; -use hyper::body; use openssl::{base64, hash, pkey, sign}; use tokio::time::timeout; use vector_lib::config::log_schema; @@ -205,7 +204,7 @@ async fn correct_request() { let (parts, body) = request.into_parts(); assert_eq!(&parts.method.to_string(), "POST"); - let body = body::to_bytes(body).await.unwrap(); + let body = http_body::Body::collect(body).await.unwrap().to_bytes(); let json: serde_json::Value = serde_json::from_slice(&body[..]).unwrap(); let expected_json = serde_json::json!([ { diff --git a/src/sinks/datadog/traces/apm_stats/integration_tests.rs b/src/sinks/datadog/traces/apm_stats/integration_tests.rs index bcf1c6937826d..78800c1c048e3 100644 --- a/src/sinks/datadog/traces/apm_stats/integration_tests.rs +++ b/src/sinks/datadog/traces/apm_stats/integration_tests.rs @@ -1,5 +1,7 @@ use std::{collections::HashMap, io::Read, net::SocketAddr, sync::Arc}; +use http_body::Body as _; + use axum::{ Router, body::Body, @@ -122,9 +124,11 @@ async fn process_stats(Extension(state): Extension>, mut request: debug!("`{}` server got stats payload.", state.name); let body = request.body_mut(); - let compressed_body_bytes = hyper::body::to_bytes(body) + let compressed_body_bytes = body + .collect() .await - .expect("could not decode body into bytes"); + .expect("could not decode body into bytes") + .to_bytes(); let mut gz = GzDecoder::new(compressed_body_bytes.as_ref()); let mut decompressed_body_bytes = vec![]; diff --git a/src/sinks/datadog/traces/service.rs b/src/sinks/datadog/traces/service.rs index 1fd61865b747b..9d94ed18a8cfd 100644 --- a/src/sinks/datadog/traces/service.rs +++ b/src/sinks/datadog/traces/service.rs @@ -6,6 +6,7 @@ use std::{ use bytes::{Buf, Bytes}; use futures::future::BoxFuture; use http::{Request, StatusCode, Uri}; +use http_body::{Body as _, Collected}; use hyper::Body; use snafu::ResultExt; use tower::Service; @@ -156,8 +157,10 @@ impl Service for TraceApiService { let response = client.send(http_request).await?; let (parts, body) = response.into_parts(); - let mut body = hyper::body::aggregate(body) + let mut body = body + .collect() .await + .map(Collected::aggregate) .context(CallRequestSnafu)?; let body = body.copy_to_bytes(body.remaining()); diff --git a/src/sinks/elasticsearch/common.rs b/src/sinks/elasticsearch/common.rs index 5657f04394c90..c279a202cf841 100644 --- a/src/sinks/elasticsearch/common.rs +++ b/src/sinks/elasticsearch/common.rs @@ -1,6 +1,7 @@ use bytes::{Buf, Bytes}; use http::{Response, StatusCode, Uri}; -use hyper::{Body, body}; +use http_body::Body as _; +use hyper::Body; use serde::Deserialize; use snafu::ResultExt; use vector_lib::config::{LogNamespace, proxy::ProxyConfig}; @@ -401,7 +402,7 @@ async fn get_version( .map_err(|error| format!("Failed to get Elasticsearch API version: {error}"))?; let (_, body) = response.into_parts(); - let mut body = body::aggregate(body).await?; + let mut body = body.collect().await?.aggregate(); let body = body.copy_to_bytes(body.remaining()); let ResponsePayload { version } = serde_json::from_slice(&body)?; if let Some(version) = version diff --git a/src/sinks/honeycomb/config.rs b/src/sinks/honeycomb/config.rs index e98b2f8cb3e49..05a56cd9df368 100644 --- a/src/sinks/honeycomb/config.rs +++ b/src/sinks/honeycomb/config.rs @@ -167,7 +167,7 @@ async fn healthcheck(uri: Uri, api_key: SensitiveString, client: HttpClient) -> let res = client.send(req).await?; let status = res.status(); - let body = hyper::body::to_bytes(res.into_body()).await?; + let body = http_body::Body::collect(res.into_body()).await?.to_bytes(); if status == StatusCode::BAD_REQUEST { Ok(()) diff --git a/src/sinks/keep/config.rs b/src/sinks/keep/config.rs index d07d27f0b4d60..7b81281253672 100644 --- a/src/sinks/keep/config.rs +++ b/src/sinks/keep/config.rs @@ -143,7 +143,7 @@ async fn healthcheck(uri: Uri, api_key: SensitiveString, client: HttpClient) -> let res = client.send(req).await?; let status = res.status(); - let body = hyper::body::to_bytes(res.into_body()).await?; + let body = http_body::Body::collect(res.into_body()).await?.to_bytes(); match status { StatusCode::OK => Ok(()), // Healthcheck passed diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index 63d2ed3a1a463..6072bc8274ea0 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -962,9 +962,10 @@ mod tests { } let body = result.into_body(); - let bytes = hyper::body::to_bytes(body) + let bytes = http_body::Body::collect(body) .await - .expect("Reading body failed"); + .expect("Reading body failed") + .to_bytes(); sink_handle.await.unwrap(); @@ -1041,9 +1042,10 @@ mod tests { } let body = result.into_body(); - let bytes = hyper::body::to_bytes(body) + let bytes = http_body::Body::collect(body) .await - .expect("Reading body failed"); + .expect("Reading body failed") + .to_bytes(); let result = String::from_utf8(bytes.to_vec()).unwrap(); sink_handle.await.unwrap(); @@ -1495,9 +1497,10 @@ mod integration_tests { .send(request) .await .expect("Could not send request"); - let result = hyper::body::to_bytes(result.into_body()) + let result = http_body::Body::collect(result.into_body()) .await - .expect("Error fetching body"); + .expect("Error fetching body") + .to_bytes(); String::from_utf8_lossy(&result).to_string() } @@ -1516,9 +1519,10 @@ mod integration_tests { .send(request) .await .expect("Could not fetch query"); - let result = hyper::body::to_bytes(result.into_body()) + let result = http_body::Body::collect(result.into_body()) .await - .expect("Error fetching body"); + .expect("Error fetching body") + .to_bytes(); let result = String::from_utf8_lossy(&result); serde_json::from_str(result.as_ref()).expect("Invalid JSON from prometheus") } diff --git a/src/sinks/prometheus/remote_write/service.rs b/src/sinks/prometheus/remote_write/service.rs index 8d9ee0c39fc66..6a0a75106f7b7 100644 --- a/src/sinks/prometheus/remote_write/service.rs +++ b/src/sinks/prometheus/remote_write/service.rs @@ -69,7 +69,7 @@ impl Service for RemoteWriteService { let response = client.send(http_request).await?; let (parts, body) = response.into_parts(); - let body = hyper::body::to_bytes(body).await?; + let body = http_body::Body::collect(body).await?.to_bytes(); let http_response = hyper::Response::from_parts(parts, body); if http_response.status().is_success() { diff --git a/src/sinks/splunk_hec/common/acknowledgements.rs b/src/sinks/splunk_hec/common/acknowledgements.rs index e0df41e9660e9..a61fd38c0e10e 100644 --- a/src/sinks/splunk_hec/common/acknowledgements.rs +++ b/src/sinks/splunk_hec/common/acknowledgements.rs @@ -1,3 +1,4 @@ +use http_body::{Body as _, Collected}; use hyper::Body; use serde::{Deserialize, Serialize}; use std::{ @@ -243,8 +244,11 @@ impl HecAckClient { let status = response.status(); if status.is_success() { - let response_body = hyper::body::to_bytes(response.into_body()) + let response_body = response + .into_body() + .collect() .await + .map(Collected::to_bytes) .map_err(|_| HecAckApiError::ClientParseResponse)?; serde_json::from_slice::(&response_body) .map_err(|_| HecAckApiError::ClientParseResponse) diff --git a/src/sinks/util/http.rs b/src/sinks/util/http.rs index 12a0d30caec6a..e21ef48e4ed66 100644 --- a/src/sinks/util/http.rs +++ b/src/sinks/util/http.rs @@ -6,6 +6,7 @@ use bytes::{Buf, Bytes}; use futures::{Sink, future::BoxFuture}; use headers::HeaderName; use http::{HeaderValue, Request, Response, StatusCode, header}; +use http_body::Body as _; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct OrderedHeaderName(HeaderName); @@ -49,7 +50,7 @@ use std::{ time::Duration, }; -use hyper::{Body, body}; +use hyper::Body; use pin_project::pin_project; use snafu::{ResultExt, Snafu}; use tower::{Service, ServiceBuilder}; @@ -519,7 +520,7 @@ where } let (parts, body) = response.into_parts(); - let mut body = body::aggregate(body).await?; + let mut body = body.collect().await?.aggregate(); Ok(hyper::Response::from_parts( parts, body.copy_to_bytes(body.remaining()), @@ -997,13 +998,14 @@ mod test { let new_service = make_service_fn(move |_| { let tx = tx.clone(); - let svc = service_fn(move |req| { + let svc = service_fn(move |req: http::Request| { let mut tx = tx.clone(); async move { - let mut body = hyper::body::aggregate(req.into_body()) + let mut body = http_body::Body::collect(req.into_body()) .await - .map_err(|error| format!("error: {error}"))?; + .map_err(|error| format!("error: {error}"))? + .aggregate(); let string = String::from_utf8(body.copy_to_bytes(body.remaining()).to_vec()) .map_err(|_| "Wasn't UTF-8".to_string())?; tx.try_send(string).map_err(|_| "Send error".to_string())?; diff --git a/src/sinks/util/test.rs b/src/sinks/util/test.rs index 356a9c71b2f23..477a2cbbd4a00 100644 --- a/src/sinks/util/test.rs +++ b/src/sinks/util/test.rs @@ -93,7 +93,7 @@ where let response = responder(); if response.status().is_success() { tokio::spawn(async move { - let bytes = hyper::body::to_bytes(body).await.unwrap(); + let bytes = http_body::Body::collect(body).await.unwrap().to_bytes(); tx.send((parts, bytes)).await.unwrap(); }); } diff --git a/src/sources/apache_metrics/mod.rs b/src/sources/apache_metrics/mod.rs index dc8256c535132..d9cfb1b1ec387 100644 --- a/src/sources/apache_metrics/mod.rs +++ b/src/sources/apache_metrics/mod.rs @@ -173,7 +173,7 @@ fn apache_metrics( .map_err(crate::Error::from) .and_then(|response| async { let (header, body) = response.into_parts(); - let body = hyper::body::to_bytes(body).await?; + let body = http_body::Body::collect(body).await?.to_bytes(); Ok((header, body)) }) .into_stream() diff --git a/src/sources/aws_ecs_metrics/mod.rs b/src/sources/aws_ecs_metrics/mod.rs index c64dd853f5aa4..86d5ce01f2fd6 100644 --- a/src/sources/aws_ecs_metrics/mod.rs +++ b/src/sources/aws_ecs_metrics/mod.rs @@ -1,6 +1,7 @@ use std::{env, time::Duration}; use futures::StreamExt; +use http_body::Collected; use hyper::{Body, Request}; use serde_with::serde_as; use tokio::time; @@ -193,7 +194,10 @@ async fn aws_ecs_metrics( match http_client.send(request).await { Ok(response) if response.status() == hyper::StatusCode::OK => { - match hyper::body::to_bytes(response).await { + match http_body::Body::collect(response.into_body()) + .await + .map(Collected::to_bytes) + { Ok(body) => { bytes_received.emit(ByteSize(body.len())); diff --git a/src/sources/eventstoredb_metrics/mod.rs b/src/sources/eventstoredb_metrics/mod.rs index 7172c07c8cffe..4f6d093082e1a 100644 --- a/src/sources/eventstoredb_metrics/mod.rs +++ b/src/sources/eventstoredb_metrics/mod.rs @@ -2,6 +2,7 @@ use std::time::Duration; use futures::{FutureExt, StreamExt}; use http::Uri; +use http_body::Collected; use hyper::{Body, Request}; use serde_with::serde_as; use tokio_stream::wrappers::IntervalStream; @@ -113,7 +114,10 @@ fn eventstoredb( } Ok(resp) => { - let bytes = match hyper::body::to_bytes(resp.into_body()).await { + let bytes = match http_body::Body::collect(resp.into_body()) + .await + .map(Collected::to_bytes) + { Ok(b) => b, Err(error) => { emit!(EventStoreDbMetricsHttpError { diff --git a/src/sources/gcp_pubsub.rs b/src/sources/gcp_pubsub.rs index 445d8d8f9caab..b01664b7ade63 100644 --- a/src/sources/gcp_pubsub.rs +++ b/src/sources/gcp_pubsub.rs @@ -1140,7 +1140,10 @@ mod integration_tests { .unwrap(); let response = self.client.send(request).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); - let body = hyper::body::to_bytes(response.into_body()).await.unwrap(); + let body = http_body::Body::collect(response.into_body()) + .await + .unwrap() + .to_bytes(); serde_json::from_str(core::str::from_utf8(&body).unwrap()).unwrap() } diff --git a/src/sources/nginx_metrics/mod.rs b/src/sources/nginx_metrics/mod.rs index f009ae988d6e4..6deecfcb71b64 100644 --- a/src/sources/nginx_metrics/mod.rs +++ b/src/sources/nginx_metrics/mod.rs @@ -7,7 +7,8 @@ use bytes::Bytes; use chrono::Utc; use futures::{StreamExt, TryFutureExt, future::join_all}; use http::{Request, StatusCode}; -use hyper::{Body, Uri, body::to_bytes as body_to_bytes}; +use http_body::Collected; +use hyper::{Body, Uri}; use serde_with::serde_as; use snafu::{ResultExt, Snafu}; use tokio::time; @@ -251,7 +252,10 @@ impl NginxMetrics { let response = self.http_client.send(request).await?; let (parts, body) = response.into_parts(); match parts.status { - StatusCode::OK => body_to_bytes(body).err_into().await, + StatusCode::OK => http_body::Body::collect(body) + .err_into() + .await + .map(Collected::to_bytes), status => Err(Box::new(NginxError::InvalidResponseStatus { status })), } } diff --git a/src/sources/okta/client.rs b/src/sources/okta/client.rs index c13e7a5cefe27..4b06b6a6d71b9 100644 --- a/src/sources/okta/client.rs +++ b/src/sources/okta/client.rs @@ -218,7 +218,7 @@ async fn run_once(url: String, result: OktaTimeoutResult, timeout: Duration) -> next = Some(next_url); }; - let body = hyper::body::to_bytes(body).await?; + let body = http_body::Body::collect(body).await?.to_bytes(); emit!(EndpointBytesReceived { byte_size: body.len(), diff --git a/src/sources/prometheus/scrape.rs b/src/sources/prometheus/scrape.rs index 85ace26146721..414bee899c51e 100644 --- a/src/sources/prometheus/scrape.rs +++ b/src/sources/prometheus/scrape.rs @@ -318,6 +318,7 @@ impl HttpClientContext for PrometheusScrapeContext { #[cfg(all(test, feature = "sinks-prometheus"))] mod test { + use http_body::Body as _; use hyper::{ Body, Client, Response, Server, service::{make_service_fn, service_fn}, @@ -717,7 +718,7 @@ mod test { .unwrap(); assert!(response.status().is_success()); - let body = hyper::body::to_bytes(response.into_body()).await.unwrap(); + let body = response.into_body().collect().await.unwrap().to_bytes(); let lines = std::str::from_utf8(&body) .unwrap() .lines() diff --git a/src/sources/util/http_client.rs b/src/sources/util/http_client.rs index 4ae3cccf24f81..4ff9eb911e433 100644 --- a/src/sources/util/http_client.rs +++ b/src/sources/util/http_client.rs @@ -211,7 +211,7 @@ pub(crate) async fn call< }) .and_then(|response| async move { let (header, body) = response.into_parts(); - let body = hyper::body::to_bytes(body).await?; + let body = http_body::Body::collect(body).await?.to_bytes(); emit!(EndpointBytesReceived { byte_size: body.len(), protocol: "http", diff --git a/src/transforms/aws_ec2_metadata.rs b/src/transforms/aws_ec2_metadata.rs index 5890625332f92..0bbb6bf61376b 100644 --- a/src/transforms/aws_ec2_metadata.rs +++ b/src/transforms/aws_ec2_metadata.rs @@ -10,7 +10,7 @@ use arc_swap::ArcSwap; use bytes::Bytes; use futures::{Stream, StreamExt}; use http::{Request, StatusCode, Uri, uri::PathAndQuery}; -use hyper::{Body, body::to_bytes as body_to_bytes}; +use hyper::Body; use serde::Deserialize; use serde_with::serde_as; use snafu::ResultExt as _; @@ -433,7 +433,7 @@ impl MetadataClient { .into()), })?; - let token = body_to_bytes(res.into_body()).await?; + let token = http_body::Body::collect(res.into_body()).await?.to_bytes(); let next_refresh = Instant::now() + Duration::from_secs(21600); self.token = Some((token.clone(), next_refresh)); @@ -619,7 +619,7 @@ impl MetadataClient { .into()), })? { Some(res) => { - let body = body_to_bytes(res.into_body()).await?; + let body = http_body::Body::collect(res.into_body()).await?.to_bytes(); Ok(Some(body)) } None => Ok(None), From b5d718a2da8897e9631f96402889b496620e13c0 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 9 Dec 2025 12:17:22 -0500 Subject: [PATCH 135/227] feat(ci): use compiled vdev with `make` commands (#24347) * Use VDEV env var in Makefile * Use mold and cargo-cache if vdev needs to be compiled * Setup vdev in test.yml * Fix incorrect return code from inlined if * Add mold: false to deny.yml --- .github/actions/install-vdev/action.yml | 19 +++++++ .github/actions/setup/action.yml | 36 +++++++++--- .github/workflows/deny.yml | 1 + .github/workflows/test.yml | 10 +++- Makefile | 75 ++++++++++++++----------- 5 files changed, 101 insertions(+), 40 deletions(-) diff --git a/.github/actions/install-vdev/action.yml b/.github/actions/install-vdev/action.yml index 0c6875a153b7c..8c5947c07d291 100644 --- a/.github/actions/install-vdev/action.yml +++ b/.github/actions/install-vdev/action.yml @@ -5,11 +5,18 @@ branding: icon: tool color: purple +inputs: + skip-cache: + description: "Skip cache lookup and force compilation" + required: false + default: 'false' + runs: using: "composite" steps: - name: Cache vdev binary id: cache-vdev + if: ${{ inputs.skip-cache != 'true' }} uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: ~/.cargo/bin/vdev @@ -23,3 +30,15 @@ runs: run: | echo "Building vdev from source (cache miss)" cargo install --path vdev --locked --force + + - name: Save vdev to cache + if: ${{ inputs.skip-cache == 'true' }} + uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: ~/.cargo/bin/vdev + key: ${{ runner.os }}-vdev-${{ hashFiles('vdev/**', 'Cargo.toml', 'Cargo.lock') }} + + - name: Set VDEV environment variable + shell: bash + run: | + echo "VDEV=$(which vdev)" >> "$GITHUB_ENV" diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 1405c6461da0a..5fac2767f5287 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -60,7 +60,7 @@ inputs: description: "Install wasm-pack for Rust to WASM bridge." vdev: required: false - default: false + default: true description: "Install vdev CLI tool (cached by vdev/ directory changes)." # prepare.sh - npm @@ -76,13 +76,24 @@ inputs: runs: using: "composite" steps: + - name: Check vdev cache status + id: check-vdev-cache + if: ${{ inputs.vdev == 'true' }} + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: ~/.cargo/bin/vdev + key: ${{ runner.os }}-vdev-${{ hashFiles('vdev/**', 'Cargo.toml', 'Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-vdev- + lookup-only: true + - id: needs name: Compute if rust is needed shell: bash run: | rust="false" - # If any rust-related inputs are true, enable cache regardless + # If any rust-related inputs are true (excluding vdev), enable rust for val in "${{ inputs.mold }}" \ "${{ inputs.rust }}" \ "${{ inputs.cargo-deb }}" \ @@ -91,15 +102,22 @@ runs: "${{ inputs.cargo-deny }}" \ "${{ inputs.cargo-msrv }}" \ "${{ inputs.dd-rust-license-tool }}" \ - "${{ inputs.wasm-pack }}" \ - "${{ inputs.vdev }}"; do + "${{ inputs.wasm-pack }}"; do if [[ "$val" == "true" ]]; then rust="true" break fi done + # If vdev needs compilation (cache miss), enable rust + vdev_needs_compile="false" + if [[ "${{ inputs.vdev }}" == "true" && "${{ steps.check-vdev-cache.outputs.cache-hit }}" != "true" ]]; then + rust="true" + vdev_needs_compile="true" + fi + echo "NEEDS_RUST=$rust" >> "$GITHUB_ENV" + echo "VDEV_NEEDS_COMPILE=$vdev_needs_compile" >> "$GITHUB_ENV" if [[ "$rust" == "true" ]]; then echo "RUST_BACKTRACE=full" >> "$GITHUB_ENV" @@ -125,7 +143,7 @@ runs: shell: bash - name: Cache Cargo registry, index, and git DB - if: ${{ inputs.cargo-cache == 'true' || env.NEEDS_RUST == 'true' }} + if: ${{ inputs.cargo-cache == 'true' || env.NEEDS_RUST == 'true' || env.VDEV_NEEDS_COMPILE == 'true' }} uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: | @@ -137,7 +155,7 @@ runs: ${{ runner.os }}-cargo- - name: Install mold - if: ${{ inputs.mold == 'true' }} + if: ${{ inputs.mold == 'true' || env.VDEV_NEEDS_COMPILE == 'true' }} shell: bash run: | echo "Installing mold" @@ -208,6 +226,8 @@ runs: - name: Install vdev if: ${{ inputs.vdev == 'true' }} uses: ./.github/actions/install-vdev + with: + skip-cache: ${{ env.VDEV_NEEDS_COMPILE == 'true' }} - name: Cache prepare.sh binaries id: cache-prepare-binaries @@ -243,4 +263,6 @@ runs: [[ "${{ inputs.datadog-ci }}" == "true" ]] && mods+=("datadog-ci") csm=$(IFS=,; echo "${mods[*]}") - [[ "$csm" != "" ]] && ./scripts/environment/prepare.sh --modules="${csm}" + if [[ "$csm" != "" ]]; then + ./scripts/environment/prepare.sh --modules="${csm}" + fi diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml index 00f0da93714d3..5439d62f40a0b 100644 --- a/.github/workflows/deny.yml +++ b/.github/workflows/deny.yml @@ -43,6 +43,7 @@ jobs: - uses: ./.github/actions/setup with: + mold: false cargo-deny: true - name: Check cargo deny advisories/licenses diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 77bd725af8308..c15b85de2ec4a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -84,6 +84,11 @@ jobs: needs: changes steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: ./.github/actions/setup + with: + cargo-cache: false + mold: false + vdev: true - run: make check-scripts check-events: @@ -95,7 +100,9 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: ./.github/actions/setup with: - rust: true + cargo-cache: false + mold: false + vdev: true - run: make check-events check-licenses: @@ -109,6 +116,7 @@ jobs: with: cargo-cache: false mold: false + vdev: true dd-rust-license-tool: true - run: make check-licenses diff --git a/Makefile b/Makefile index e5fc317a5249b..f259f6c200cca 100644 --- a/Makefile +++ b/Makefile @@ -62,12 +62,23 @@ export ENVIRONMENT_NETWORK ?= host # Multiple port publishing can be provided using spaces, for example: 8686:8686 8080:8080/udp export ENVIRONMENT_PUBLISH ?= +# If ENVIRONMENT is true, always use cargo vdev since it may be running inside the container +ifeq ($(origin VDEV), environment) +ifeq ($(ENVIRONMENT), true) +VDEV := cargo vdev +else +# VDEV is already set from environment, keep it +endif +else +VDEV := cargo vdev +endif + # Set dummy AWS credentials if not present - used for AWS and ES integration tests export AWS_ACCESS_KEY_ID ?= "dummy" export AWS_SECRET_ACCESS_KEY ?= "dummy" # Set version -export VERSION ?= $(shell command -v cargo >/dev/null && cargo vdev version || echo unknown) +export VERSION ?= $(shell command -v cargo >/dev/null && $(VDEV) version || echo unknown) # Set if you are on the CI and actually want the things to happen. (Non-CI users should never set this.) export CI ?= false @@ -379,10 +390,10 @@ test-integration: test-integration-redis test-integration-splunk test-integratio test-integration: test-integration-datadog-traces test-integration-shutdown test-integration-%-cleanup: - cargo vdev --verbose integration stop $* + $(VDEV) --verbose integration stop $* test-integration-%: - cargo vdev --verbose integration test $* + $(VDEV) --verbose integration test $* ifeq ($(AUTODESPAWN), true) make test-integration-$*-cleanup endif @@ -451,7 +462,7 @@ bench-all: bench-remap-functions .PHONY: check check: ## Run prerequisite code checks - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check rust + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check rust .PHONY: check-all check-all: ## Check everything @@ -461,47 +472,47 @@ check-all: check-scripts check-deny check-component-docs check-licenses .PHONY: check-component-features check-component-features: ## Check that all component features are setup properly - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check component-features + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check component-features .PHONY: check-clippy check-clippy: ## Check code with Clippy - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check rust + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check rust .PHONY: check-docs check-docs: ## Check that all /docs file are valid - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check docs + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check docs .PHONY: check-fmt check-fmt: ## Check that all files are formatted properly - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check fmt + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check fmt .PHONY: check-licenses check-licenses: ## Check that the 3rd-party license file is up to date - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check licenses + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check licenses .PHONY: check-markdown check-markdown: ## Check that markdown is styled properly - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check markdown + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check markdown .PHONY: check-examples check-examples: ## Check that the config/examples files are valid - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check examples + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check examples .PHONY: check-scripts check-scripts: ## Check that scripts do not have common mistakes - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check scripts + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check scripts .PHONY: check-deny check-deny: ## Check advisories licenses and sources for crate dependencies - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check deny + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check deny .PHONY: check-events check-events: ## Check that events satisfy patterns set in https://github.com/vectordotdev/vector/blob/master/rfcs/2020-03-17-2064-event-driven-observability.md - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check events + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check events .PHONY: check-component-docs check-component-docs: generate-component-docs ## Checks that the machine-generated component Cue docs are up-to-date. - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check component-docs + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check component-docs ##@ Rustdoc build-rustdoc: ## Build Vector's Rustdocs @@ -522,7 +533,7 @@ target/artifacts/vector-${VERSION}-%.tar.gz: target/%/release/vector.tar.gz .PHONY: package package: build ## Build the Vector archive - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev package archive + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) package archive .PHONY: package-x86_64-unknown-linux-gnu-all package-x86_64-unknown-linux-gnu-all: package-x86_64-unknown-linux-gnu package-deb-x86_64-unknown-linux-gnu package-rpm-x86_64-unknown-linux-gnu # Build all x86_64 GNU packages @@ -621,31 +632,31 @@ release: release-prepare generate release-commit ## Release a new Vector version .PHONY: release-commit release-commit: ## Commits release changes - @cargo vdev release commit + @$(VDEV) release commit .PHONY: release-docker release-docker: ## Release to Docker Hub - @cargo vdev release docker + @$(VDEV) release docker .PHONY: release-github release-github: ## Release to GitHub - @cargo vdev release github + @$(VDEV) release github .PHONY: release-homebrew release-homebrew: ## Release to vectordotdev Homebrew tap - @cargo vdev release homebrew --vector-version $(VECTOR_VERSION) + @$(VDEV) release homebrew --vector-version $(VECTOR_VERSION) .PHONY: release-prepare release-prepare: ## Prepares the release with metadata and highlights - @cargo vdev release prepare + @$(VDEV) release prepare .PHONY: release-push release-push: ## Push new Vector version - @cargo vdev release push + @$(VDEV) release push .PHONY: release-s3 release-s3: ## Release artifacts to S3 - @cargo vdev release s3 + @$(VDEV) release s3 .PHONY: sha256sum sha256sum: ## Generate SHA256 checksums of CI artifacts @@ -655,11 +666,11 @@ sha256sum: ## Generate SHA256 checksums of CI artifacts .PHONY: test-vrl test-vrl: ## Run the VRL test suite - @cargo vdev test-vrl + @$(VDEV) test-vrl .PHONY: compile-vrl-wasm compile-vrl-wasm: ## Compile VRL crates to WASM target - cargo vdev build vrl-wasm + $(VDEV) build vrl-wasm ##@ Utility @@ -669,13 +680,13 @@ clean: environment-clean ## Clean everything .PHONY: generate-kubernetes-manifests generate-kubernetes-manifests: ## Generate Kubernetes manifests from latest Helm chart - cargo vdev build manifests + $(VDEV) build manifests .PHONY: generate-component-docs generate-component-docs: ## Generate per-component Cue docs from the configuration schema. ${MAYBE_ENVIRONMENT_EXEC} cargo build $(if $(findstring true,$(CI)),--quiet,) target/debug/vector generate-schema > /tmp/vector-config-schema.json 2>/dev/null - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev build component-docs /tmp/vector-config-schema.json \ + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) build component-docs /tmp/vector-config-schema.json \ $(if $(findstring true,$(CI)),>/dev/null,) .PHONY: signoff @@ -684,7 +695,7 @@ signoff: ## Signsoff all previous commits since branch creation .PHONY: version version: ## Get the current Vector version - @cargo vdev version + @$(VDEV) version .PHONY: git-hooks git-hooks: ## Add Vector-local git hooks for commit sign-off @@ -697,16 +708,16 @@ cargo-install-%: .PHONY: ci-generate-publish-metadata ci-generate-publish-metadata: ## Generates the necessary metadata required for building/publishing Vector. - cargo vdev build publish-metadata + $(VDEV) build publish-metadata .PHONY: clippy-fix clippy-fix: - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev check rust --fix + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) check rust --fix .PHONY: fmt fmt: - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev fmt + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) fmt .PHONY: build-licenses build-licenses: - ${MAYBE_ENVIRONMENT_EXEC} cargo vdev build licenses + ${MAYBE_ENVIRONMENT_EXEC} $(VDEV) build licenses From 250de61049b4862586ddd1885057324c16bccfa4 Mon Sep 17 00:00:00 2001 From: elkh510 <60512579+elkh510@users.noreply.github.com> Date: Tue, 9 Dec 2025 20:02:13 +0200 Subject: [PATCH 136/227] feat(amqp source): Configure prefetch count (#24138) * fix * fix * fix * fix * fix * fix * fix * fix * fix --- changelog.d/21037_amqp_prefetch.feature.md | 3 +++ src/sources/amqp.rs | 24 ++++++++++++++++++- .../components/sources/generated/amqp.cue | 15 ++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 changelog.d/21037_amqp_prefetch.feature.md diff --git a/changelog.d/21037_amqp_prefetch.feature.md b/changelog.d/21037_amqp_prefetch.feature.md new file mode 100644 index 0000000000000..359bc56eab7e5 --- /dev/null +++ b/changelog.d/21037_amqp_prefetch.feature.md @@ -0,0 +1,3 @@ +Added a new `prefetch_count` option to the AMQP source configuration. This allows limiting the number of in-flight (unacknowledged) messages per consumer using RabbitMQ's prefetch mechanism (`basic.qos`). Setting this value helps control memory usage and load when processing messages slowly. + +authors: elkh510 diff --git a/src/sources/amqp.rs b/src/sources/amqp.rs index b9928606a7341..a0be621b0f7b8 100644 --- a/src/sources/amqp.rs +++ b/src/sources/amqp.rs @@ -7,7 +7,7 @@ use bytes::Bytes; use chrono::{TimeZone, Utc}; use futures::{FutureExt, StreamExt}; use futures_util::Stream; -use lapin::{Channel, acker::Acker, message::Delivery}; +use lapin::{Channel, acker::Acker, message::Delivery, options::BasicQosOptions}; use snafu::Snafu; use tokio_util::codec::FramedRead; use vector_lib::{ @@ -100,6 +100,17 @@ pub struct AmqpSourceConfig { #[configurable(derived)] #[serde(default, deserialize_with = "bool_or_struct")] pub(crate) acknowledgements: SourceAcknowledgementsConfig, + + /// Maximum number of unacknowledged messages the broker will deliver to this consumer. + /// + /// This controls flow control via AMQP QoS prefetch. Lower values limit memory usage and + /// prevent overwhelming slow consumers, but may reduce throughput. Higher values increase + /// throughput but consume more memory. + /// + /// If not set, the broker/client default applies (often unlimited). + #[serde(default)] + #[configurable(metadata(docs::examples = 100))] + pub(crate) prefetch_count: Option, } fn default_queue() -> String { @@ -422,6 +433,17 @@ async fn run_amqp_source( let (finalizer, mut ack_stream) = UnorderedFinalizer::::maybe_new(acknowledgements, Some(shutdown.clone())); + // Apply AMQP QoS (prefetch) before starting consumption. + if let Some(count) = config.prefetch_count { + // per-consumer prefetch (global = false) + channel + .basic_qos(count, BasicQosOptions { global: false }) + .await + .map_err(|error| { + error!(message = "Failed to apply basic_qos.", ?error); + })?; + } + debug!("Starting amqp source, listening to queue {}.", config.queue); let mut consumer = channel .basic_consume( diff --git a/website/cue/reference/components/sources/generated/amqp.cue b/website/cue/reference/components/sources/generated/amqp.cue index 366c1c0462b6d..ef71f410f0e29 100644 --- a/website/cue/reference/components/sources/generated/amqp.cue +++ b/website/cue/reference/components/sources/generated/amqp.cue @@ -559,6 +559,21 @@ generated: components: sources: amqp: configuration: { required: false type: string: default: "offset" } + prefetch_count: { + description: """ + Maximum number of unacknowledged messages the broker will deliver to this consumer. + + This controls flow control via AMQP QoS prefetch. Lower values limit memory usage and + prevent overwhelming slow consumers, but may reduce throughput. Higher values increase + throughput but consume more memory. + + If not set, the broker/client default applies (often unlimited). + """ + required: false + type: uint: examples: [ + 100, + ] + } queue: { description: "The name of the queue to consume." required: false From cf6e3293a859c04c50e63d34c857ed183fa5bea5 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Tue, 9 Dec 2025 12:14:58 -0600 Subject: [PATCH 137/227] chore(performance): Refactor `EventMetadata` deserialization from protobuf (#24336) The existing code would create an `EventMetadata::default()` and then fill in the fields coming from the protobuf metadata. Unfortunately, this default creates several non-empty values, notably the `source_event_id`, which makes that construct take extra time, particularly since the incoming data will (almost?) always already have a `source_event_id` if the origin is another Vector instance. While refactoring, I also changed the method to use destructuring to access the fields inside the source value to force the compiler to error if any fields are added later. --- lib/vector-core/src/event/metadata.rs | 8 ++-- lib/vector-core/src/event/proto.rs | 66 ++++++++++++++------------- 2 files changed, 38 insertions(+), 36 deletions(-) diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index cd9cecbe5501c..a4096eea5a302 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -40,7 +40,7 @@ pub(super) struct Inner { pub(crate) secrets: Secrets, #[serde(default, skip)] - finalizers: EventFinalizers, + pub(crate) finalizers: EventFinalizers, /// The id of the source pub(crate) source_id: Option>, @@ -60,7 +60,7 @@ pub(super) struct Inner { /// /// TODO(Jean): must not skip serialization to track schemas across restarts. #[serde(default = "default_schema_definition", skip)] - schema_definition: Arc, + pub(crate) schema_definition: Arc, /// A store of values that may be dropped during the encoding process but may be needed /// later on. The map is indexed by meaning. @@ -68,7 +68,7 @@ pub(super) struct Inner { /// we need to ensure it is still available later on for emitting metrics tagged by the service. /// This field could almost be keyed by `&'static str`, but because it needs to be deserializable /// we have to use `String`. - dropped_fields: ObjectMap, + pub(crate) dropped_fields: ObjectMap, /// Metadata to track the origin of metrics. This is always `None` for log and trace events. /// Only a small set of Vector sources and transforms explicitly set this field. @@ -264,7 +264,7 @@ impl Default for EventMetadata { } } -fn default_schema_definition() -> Arc { +pub(super) fn default_schema_definition() -> Arc { Arc::new(schema::Definition::new_with_default_metadata( Kind::any(), [LogNamespace::Legacy, LogNamespace::Vector], diff --git a/lib/vector-core/src/event/proto.rs b/lib/vector-core/src/event/proto.rs index 548065447c291..6a4796906d287 100644 --- a/lib/vector-core/src/event/proto.rs +++ b/lib/vector-core/src/event/proto.rs @@ -16,6 +16,8 @@ pub use metric::Value as MetricValue; pub use proto_event::*; use vrl::value::{ObjectMap, Value as VrlValue}; +use super::EventFinalizers; +use super::metadata::{Inner, default_schema_definition}; use super::{EventMetadata, array, metric::MetricSketch}; impl event_array::Events { @@ -644,49 +646,49 @@ impl From for Metadata { impl From for EventMetadata { fn from(value: Metadata) -> Self { - let mut metadata = EventMetadata::default(); - - if let Some(value) = value.value.and_then(decode_value) { - *metadata.value_mut() = value; - } - - if let Some(source_id) = value.source_id { - metadata.set_source_id(Arc::new(source_id.into())); - } - - if let Some(source_type) = value.source_type { - metadata.set_source_type(source_type); - } - - if let Some(upstream_id) = value.upstream_id { - metadata.set_upstream_id(Arc::new(upstream_id.into())); - } - - if let Some(secrets) = value.secrets { - metadata.secrets_mut().merge(secrets.into()); - } - - if let Some(origin_metadata) = value.datadog_origin_metadata { - metadata = metadata.with_origin_metadata(origin_metadata.into()); - } - - let maybe_source_event_id = if value.source_event_id.is_empty() { + let Metadata { + value: metadata_value, + source_id, + source_type, + upstream_id, + secrets, + datadog_origin_metadata, + source_event_id, + } = value; + + let metadata_value = metadata_value.and_then(decode_value); + let source_id = source_id.map(|s| Arc::new(s.into())); + let upstream_id = upstream_id.map(|id| Arc::new(id.into())); + let secrets = secrets.map(Into::into); + let datadog_origin_metadata = datadog_origin_metadata.map(Into::into); + let source_event_id = if source_event_id.is_empty() { None } else { - match Uuid::from_slice(&value.source_event_id) { + match Uuid::from_slice(&source_event_id) { Ok(id) => Some(id), Err(error) => { error!( - message = "Failed to parse source_event_id: {}", - %error + %error, + source_event_id = %String::from_utf8_lossy(&source_event_id), + "Failed to parse source_event_id.", ); None } } }; - metadata = metadata.with_source_event_id(maybe_source_event_id); - metadata + EventMetadata(Arc::new(Inner { + value: metadata_value.unwrap_or_else(|| vrl::value::Value::Object(ObjectMap::new())), + secrets: secrets.unwrap_or_default(), + finalizers: EventFinalizers::default(), + source_id, + source_type: source_type.map(Into::into), + upstream_id, + schema_definition: default_schema_definition(), + dropped_fields: ObjectMap::new(), + datadog_origin_metadata, + source_event_id, + })) } } From f1eecd0e778784bab08fd72c6e33b0b5631e2d79 Mon Sep 17 00:00:00 2001 From: skipper Date: Tue, 9 Dec 2025 11:42:57 -0800 Subject: [PATCH 138/227] fix(kafka sink): upgrade rdkafka to 0.38.0 to resolve idempotent-producer fatal "Inconsistent state" stalls (#24197) * chore(deps): update rdkafka to 0.38.0 Signed-off-by: hansedong * cargo vdev build licenses * Use clang in cross/bootstrap-ubuntu.sh --------- Signed-off-by: hansedong Co-authored-by: Thomas --- Cargo.lock | 79 +++++++++++++++++++++++++------ Cargo.toml | 2 +- scripts/cross/bootstrap-ubuntu.sh | 4 +- 3 files changed, 68 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d35cde9c8e42d..012c49206b7bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1929,6 +1929,24 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.9.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2 1.0.101", + "quote 1.0.40", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.106", +] + [[package]] name = "bit-set" version = "0.8.0" @@ -2337,6 +2355,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom 7.1.3", +] + [[package]] name = "cfb-mode" version = "0.8.2" @@ -2462,6 +2489,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "4.5.48" @@ -2544,9 +2582,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] @@ -3148,9 +3186,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.74+curl-8.9.0" +version = "0.4.84+curl-8.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8af10b986114528fcdc4b63b6f5f021b7057618411046a4de2ba0f0149a097bf" +checksum = "abc4294dc41b882eaff37973c2ec3ae203d0091341ee68fbadd1d06e0c18a73b" dependencies = [ "cc", "libc", @@ -3158,7 +3196,7 @@ dependencies = [ "openssl-sys", "pkg-config", "vcpkg", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6312,6 +6350,16 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.0", +] + [[package]] name = "libm" version = "0.2.8" @@ -8077,9 +8125,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "platforms" @@ -9042,9 +9090,9 @@ dependencies = [ [[package]] name = "rdkafka" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b52c81ac3cac39c9639b95c20452076e74b8d9a71bc6fc4d83407af2ea6fff" +checksum = "5f1856d72dbbbea0d2a5b2eaf6af7fb3847ef2746e883b11781446a51dbc85c0" dependencies = [ "futures-channel", "futures-util", @@ -9060,9 +9108,9 @@ dependencies = [ [[package]] name = "rdkafka-sys" -version = "4.8.0+2.3.0" +version = "4.9.0+2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced38182dc436b3d9df0c77976f37a67134df26b050df1f0006688e46fc4c8be" +checksum = "5230dca48bc354d718269f3e4353280e188b610f7af7e2fcf54b7a79d5802872" dependencies = [ "cmake", "curl-sys", @@ -9615,9 +9663,9 @@ checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -14210,10 +14258,11 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ + "bindgen", "cc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index dcf035c37f089..3685d7c9b2c9a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -401,7 +401,7 @@ pulsar = { version = "6.3.1", default-features = false, features = ["tokio-runti quick-junit = { version = "0.5.1" } rand.workspace = true rand_distr.workspace = true -rdkafka = { version = "0.37.0", default-features = false, features = ["curl-static", "tokio", "libz", "ssl", "zstd"], optional = true } +rdkafka = { version = "0.38.0", default-features = false, features = ["curl-static", "tokio", "libz", "ssl", "zstd"], optional = true } redis = { version = "0.32.4", default-features = false, features = ["connection-manager", "sentinel", "tokio-comp", "tokio-native-tls-comp"], optional = true } regex.workspace = true roaring = { version = "0.11.2", default-features = false, features = ["std"], optional = true } diff --git a/scripts/cross/bootstrap-ubuntu.sh b/scripts/cross/bootstrap-ubuntu.sh index cf053aa365b20..88e208ae2ea56 100755 --- a/scripts/cross/bootstrap-ubuntu.sh +++ b/scripts/cross/bootstrap-ubuntu.sh @@ -20,8 +20,10 @@ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key| apt-key add - apt-get update -# needed by onig_sys +# unzip is needed for protoc +# clang/llvm is needed due to bindgen (zstd-sys/onig_sys) apt-get install -y \ libclang1-9 \ llvm-9 \ + clang \ unzip From a7996cec4d7268dae610e1f9fca8804cd129955e Mon Sep 17 00:00:00 2001 From: Jansen Date: Tue, 9 Dec 2025 13:06:21 -0700 Subject: [PATCH 139/227] chore(performance): EventMetadata UUID generation optimizations (#24358) * Enable fast-rng feature for UUID generation * Use UUID v4 generation instead of V7 v7 are created using a timestamp so they can be ordered, however this comes at a performance cost. We currently don't need to order these UUID's, so for now we can use v4. * Add regression test for disk buffer v2 implementation * Rename metadata test for UUID v4 * Remove unused uuid v7 feature * Add uuid v7 feature back [Websocket server sink uses it](https://github.com/vectordotdev/vector/blob/72e09673fda9d6fbf933adacea1220bdfae162a8/src/sinks/websocket_server/buffering.rs#L235) for time ordered replays in case the connection drops * Replace source event ID if none when merging event metadata * Update test to describe behavior with UUID v4 --- Cargo.toml | 2 +- lib/vector-core/src/event/log_event.rs | 6 +-- lib/vector-core/src/event/metadata.rs | 15 +++----- .../http_to_http_disk_buffer/experiment.yaml | 11 ++++++ .../lading/lading.yaml | 21 +++++++++++ .../vector/vector.yaml | 37 +++++++++++++++++++ 6 files changed, 78 insertions(+), 14 deletions(-) create mode 100644 regression/cases/http_to_http_disk_buffer/experiment.yaml create mode 100644 regression/cases/http_to_http_disk_buffer/lading/lading.yaml create mode 100644 regression/cases/http_to_http_disk_buffer/vector/vector.yaml diff --git a/Cargo.toml b/Cargo.toml index 3685d7c9b2c9a..fa99780782eb1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -194,7 +194,7 @@ tonic-build = { version = "0.11", default-features = false, features = ["transpo tracing = { version = "0.1.34", default-features = false } tracing-subscriber = { version = "0.3.20", default-features = false, features = ["fmt"] } url = { version = "2.5.4", default-features = false, features = ["serde"] } -uuid = { version = "1.18.1", features = ["v4", "v7", "serde"] } +uuid = { version = "1.18.1", features = ["v4", "v7", "serde", "fast-rng"] } vector-config = { path = "lib/vector-config" } vector-config-common = { path = "lib/vector-config-common" } vector-config-macros = { path = "lib/vector-config-macros" } diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index 1f507cf9ec881..336e15a43ab57 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -1200,15 +1200,15 @@ mod test { } #[test] - fn metadata_set_unique_uuid_v7_source_event_id() { - // Check if event id is UUID v7 + fn metadata_set_unique_uuid_v4_source_event_id() { + // Check if event id is UUID v4 let log1 = LogEvent::default(); assert_eq!( log1.metadata() .source_event_id() .expect("source_event_id should be auto-generated for new events") .get_version(), - Some(Version::SortRand) + Some(Version::Random) ); // Check if event id is unique on creation diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index a4096eea5a302..f860b03bb207b 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -253,7 +253,7 @@ impl Default for Inner { upstream_id: None, dropped_fields: ObjectMap::new(), datadog_origin_metadata: None, - source_event_id: Some(Uuid::now_v7()), + source_event_id: Some(Uuid::new_v4()), } } } @@ -348,14 +348,8 @@ impl EventMetadata { inner.secrets.merge(other.secrets); // Update `source_event_id` if necessary. - match (inner.source_event_id, other.source_event_id) { - (None, Some(id)) => { - inner.source_event_id = Some(id); - } - (Some(uuid1), Some(uuid2)) if uuid2 < uuid1 => { - inner.source_event_id = Some(uuid2); - } - _ => {} // Keep the existing value. + if inner.source_event_id.is_none() { + inner.source_event_id = other.source_event_id; } } @@ -561,6 +555,7 @@ mod test { let m1 = EventMetadata::default(); let m2 = EventMetadata::default(); + // Always maintain the original source event id when merging, similar to how we handle other metadata. { let mut merged = m1.clone(); merged.merge(m2.clone()); @@ -570,7 +565,7 @@ mod test { { let mut merged = m2.clone(); merged.merge(m1.clone()); - assert_eq!(merged.source_event_id(), m1.source_event_id()); + assert_eq!(merged.source_event_id(), m2.source_event_id()); } } } diff --git a/regression/cases/http_to_http_disk_buffer/experiment.yaml b/regression/cases/http_to_http_disk_buffer/experiment.yaml new file mode 100644 index 0000000000000..e7faba7f54b2a --- /dev/null +++ b/regression/cases/http_to_http_disk_buffer/experiment.yaml @@ -0,0 +1,11 @@ +optimization_goal: ingress_throughput + +target: + name: vector + command: /usr/bin/vector + cpu_allotment: 6 + memory_allotment: 8GiB + + environment: + VECTOR_THREADS: 4 + diff --git a/regression/cases/http_to_http_disk_buffer/lading/lading.yaml b/regression/cases/http_to_http_disk_buffer/lading/lading.yaml new file mode 100644 index 0000000000000..a5b1c01a5c222 --- /dev/null +++ b/regression/cases/http_to_http_disk_buffer/lading/lading.yaml @@ -0,0 +1,21 @@ +generator: + - http: + seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, + 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] + target_uri: "http://localhost:8282/" + bytes_per_second: "100 Mb" + parallel_connections: 10 + method: + post: + variant: "ascii" + maximum_prebuild_cache_size_bytes: "256 Mb" + headers: {} + +blackhole: + - http: + binding_addr: "0.0.0.0:8080" + +target_metrics: + - prometheus: # internal telemetry + uri: "http://127.0.0.1:9090/metrics" + diff --git a/regression/cases/http_to_http_disk_buffer/vector/vector.yaml b/regression/cases/http_to_http_disk_buffer/vector/vector.yaml new file mode 100644 index 0000000000000..cd675e05c5ce7 --- /dev/null +++ b/regression/cases/http_to_http_disk_buffer/vector/vector.yaml @@ -0,0 +1,37 @@ +data_dir: "/var/lib/vector" + +## +## Sources +## + +sources: + internal_metrics: + type: "internal_metrics" + + http_source: + type: "http" + acknowledgements: true + address: "0.0.0.0:8282" + +## +## Sinks +## + +sinks: + prometheus: + type: "prometheus_exporter" + inputs: [ "internal_metrics" ] + address: "0.0.0.0:9090" + + http_sink: + type: "http" + inputs: [ "http_source" ] + uri: "http://localhost:8080" + encoding: + codec: "text" + healthcheck: + enabled: false + buffer: + type: "disk" + max_size: 4294967296 # 4GB + From 538c833d2f5c6529ba1df7b02f4bb73e60b2d778 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Dec 2025 11:38:11 -0500 Subject: [PATCH 140/227] chore(ci): bump actions/checkout from 5.0.0 to 6.0.0 (#24322) Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.0 to 6.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/08c6903cd8c0fde910a37f88322edcfb5dd907a8...1af3b93b6815bc44a9784bd300feb67ff0d1eeb3) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-test-runner.yml | 2 +- .github/workflows/changelog.yaml | 4 +-- .github/workflows/changes.yml | 6 ++-- .github/workflows/ci-integration-review.yml | 4 +-- .github/workflows/cli.yml | 4 +-- .github/workflows/compilation-timings.yml | 10 +++--- .github/workflows/component_features.yml | 4 +-- .github/workflows/cross.yml | 4 +-- .github/workflows/deny.yml | 2 +- .github/workflows/environment.yml | 4 +-- .../gardener_remove_waiting_author.yml | 2 +- .github/workflows/install-sh.yml | 4 +-- .github/workflows/integration-test.yml | 4 +-- .github/workflows/integration.yml | 4 +-- .github/workflows/k8s_e2e.yml | 8 ++--- .github/workflows/msrv.yml | 2 +- .github/workflows/protobuf.yml | 2 +- .github/workflows/publish-homebrew.yml | 2 +- .github/workflows/publish.yml | 36 +++++++++---------- .github/workflows/regression.yml | 18 +++++----- .github/workflows/scorecard.yml | 2 +- .github/workflows/test-make-command.yml | 4 +-- .github/workflows/test.yml | 24 ++++++------- .github/workflows/unit_mac.yml | 4 +-- .github/workflows/unit_windows.yml | 4 +-- .github/workflows/vdev_publish.yml | 2 +- 26 files changed, 83 insertions(+), 83 deletions(-) diff --git a/.github/workflows/build-test-runner.yml b/.github/workflows/build-test-runner.yml index d7dce608294f6..f8fc383e4c76b 100644 --- a/.github/workflows/build-test-runner.yml +++ b/.github/workflows/build-test-runner.yml @@ -24,7 +24,7 @@ jobs: build: runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.checkout_ref || inputs.commit_sha }} diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml index a3534947a1c2e..390dfcfc09bb5 100644 --- a/.github/workflows/changelog.yaml +++ b/.github/workflows/changelog.yaml @@ -42,7 +42,7 @@ jobs: exit 0 # Checkout changelog script and changelog.d/ from master - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 if: env.SHOULD_RUN == 'true' with: ref: master @@ -52,7 +52,7 @@ jobs: sparse-checkout-cone-mode: false # Checkout PR's changelog.d/ into tmp/ - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 if: env.SHOULD_RUN == 'true' with: repository: ${{ github.event.pull_request.head.repo.full_name }} diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index 65aa38f9c4e9a..e3315c657198b 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -180,7 +180,7 @@ jobs: unit_mac-yml: ${{ steps.filter.outputs.unit_mac-yml }} unit_windows-yml: ${{ steps.filter.outputs.unit_windows-yml }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: filter @@ -323,7 +323,7 @@ jobs: webhdfs: ${{ steps.filter.outputs.webhdfs }} any: ${{ steps.detect-changes.outputs.any }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: @@ -412,7 +412,7 @@ jobs: opentelemetry-metrics: ${{ steps.filter.outputs.opentelemetry-metrics }} any: ${{ steps.detect-changes.outputs.any }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: diff --git a/.github/workflows/ci-integration-review.yml b/.github/workflows/ci-integration-review.yml index 0bc2425927877..79958b5a9d89d 100644 --- a/.github/workflows/ci-integration-review.yml +++ b/.github/workflows/ci-integration-review.yml @@ -105,7 +105,7 @@ jobs: "redis", "splunk", "webhdfs" ] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: "recursive" ref: ${{ github.event.review.commit_id }} @@ -141,7 +141,7 @@ jobs: "datadog-logs", "datadog-metrics", "opentelemetry-logs", "opentelemetry-metrics" ] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: "recursive" ref: ${{ github.event.review.commit_id }} diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 80bb2d3002e0d..38b968f6dda31 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -24,13 +24,13 @@ jobs: - name: (PR review) Checkout review SHA if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Cache Cargo registry + index uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 diff --git a/.github/workflows/compilation-timings.yml b/.github/workflows/compilation-timings.yml index 2b1c95d3a685e..4f147fdc1e4d9 100644 --- a/.github/workflows/compilation-timings.yml +++ b/.github/workflows/compilation-timings.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-24.04-8core steps: - uses: colpal/actions-clean@36e6ca1abd35efe61cb60f912bd7837f67887c8a # v1.1.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh - run: bash scripts/environment/prepare.sh --modules=rustup - run: cargo clean @@ -32,7 +32,7 @@ jobs: PROFILE: debug steps: - uses: colpal/actions-clean@36e6ca1abd35efe61cb60f912bd7837f67887c8a # v1.1.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh - run: bash scripts/environment/prepare.sh --modules=rustup - run: cargo clean @@ -43,7 +43,7 @@ jobs: runs-on: ubuntu-24.04-8core steps: - uses: colpal/actions-clean@36e6ca1abd35efe61cb60f912bd7837f67887c8a # v1.1.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh - run: bash scripts/environment/prepare.sh --modules=rustup - run: cargo clean @@ -54,7 +54,7 @@ jobs: runs-on: ubuntu-24.04-8core steps: - uses: colpal/actions-clean@36e6ca1abd35efe61cb60f912bd7837f67887c8a # v1.1.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh - run: bash scripts/environment/prepare.sh --modules=rustup - run: cargo clean @@ -67,7 +67,7 @@ jobs: runs-on: ubuntu-24.04-8core steps: - uses: colpal/actions-clean@36e6ca1abd35efe61cb60f912bd7837f67887c8a # v1.1.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh - run: bash scripts/environment/prepare.sh --modules=rustup - run: cargo clean diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index 62d101d9bac4e..146239086a8b2 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -36,13 +36,13 @@ jobs: - name: (PR review) Checkout PR branch if: github.event_name == 'pull_request_review' - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: github.event_name != 'pull_request_review' - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh - run: bash scripts/environment/prepare.sh --modules=rustup diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index bc50fe8df8bbc..7468acf98fe36 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -36,13 +36,13 @@ jobs: - name: (PR review) Checkout PR branch if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 name: Cache Cargo registry + index diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml index 5439d62f40a0b..4127dbb73dc56 100644 --- a/.github/workflows/deny.yml +++ b/.github/workflows/deny.yml @@ -39,7 +39,7 @@ jobs: env: CARGO_INCREMENTAL: 0 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 62db91799fb3d..69db4836a78c2 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -35,13 +35,13 @@ jobs: - name: (PR review) Checkout PR branch if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 diff --git a/.github/workflows/gardener_remove_waiting_author.yml b/.github/workflows/gardener_remove_waiting_author.yml index ce80b9c0d07fd..6366cc7b03e73 100644 --- a/.github/workflows/gardener_remove_waiting_author.yml +++ b/.github/workflows/gardener_remove_waiting_author.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 with: labels: "meta: awaiting author" diff --git a/.github/workflows/install-sh.yml b/.github/workflows/install-sh.yml index 172204439191b..69b30d750dc00 100644 --- a/.github/workflows/install-sh.yml +++ b/.github/workflows/install-sh.yml @@ -28,13 +28,13 @@ jobs: - name: (PR comment) Checkout PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo apt-get install --yes bc - run: bash distribution/install.sh -- -y diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 257a5ec386831..8b62da0a08ec5 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -39,13 +39,13 @@ jobs: - name: (PR comment) Checkout PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: bash scripts/environment/prepare.sh --modules=rustup,datadog-ci diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 2a1330335fe9b..44bb30601df66 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -79,7 +79,7 @@ jobs: ] timeout-minutes: 90 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: "recursive" @@ -140,7 +140,7 @@ jobs: timeout-minutes: 90 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: "recursive" diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index b24a93b42b9be..891edc7c23315 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -77,13 +77,13 @@ jobs: - name: (PR review) Checkout PR branch if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: @@ -198,13 +198,13 @@ jobs: - name: (PR review) Checkout PR branch if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 16def3ba54bd9..a44bc1018627a 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 timeout-minutes: 45 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.checkout_ref }} - run: sudo -E bash scripts/environment/bootstrap-ubuntu-24.04.sh diff --git a/.github/workflows/protobuf.yml b/.github/workflows/protobuf.yml index b261f18b43631..6e56ea14c0d07 100644 --- a/.github/workflows/protobuf.yml +++ b/.github/workflows/protobuf.yml @@ -22,7 +22,7 @@ jobs: timeout-minutes: 5 steps: # Run `git checkout` - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 # Install the `buf` CLI - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 with: diff --git a/.github/workflows/publish-homebrew.yml b/.github/workflows/publish-homebrew.yml index 49a8269a7090f..47a1a7089d443 100644 --- a/.github/workflows/publish-homebrew.yml +++ b/.github/workflows/publish-homebrew.yml @@ -25,7 +25,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.HOMEBREW_PAT }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref || github.ref_name }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index c1c50cc964902..f32d309e6f774 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -37,7 +37,7 @@ jobs: vector_release_channel: ${{ steps.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Generate publish metadata @@ -55,7 +55,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -81,7 +81,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -107,7 +107,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -135,7 +135,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -163,7 +163,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -191,7 +191,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -219,7 +219,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -247,7 +247,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Ubuntu-specific) @@ -290,7 +290,7 @@ jobs: exit 1 fi - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (macOS-specific) @@ -324,7 +324,7 @@ jobs: RELEASE_BUILDER: "true" steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Bootstrap runner environment (Windows-specific) @@ -394,7 +394,7 @@ jobs: - name: Fix Git safe directories issue when in containers (actions/checkout#760) run: git config --global --add safe.directory /__w/vector/vector - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (x86_64-unknown-linux-gnu) @@ -445,7 +445,7 @@ jobs: - name: Fix Git safe directories issue when in containers (actions/checkout#760) run: git config --global --add safe.directory /__w/vector/vector - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (x86_64-unknown-linux-gnu) @@ -474,7 +474,7 @@ jobs: runner: macos-14 steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (${{ matrix.target }}) @@ -508,7 +508,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Login to DockerHub @@ -607,7 +607,7 @@ jobs: CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) @@ -692,7 +692,7 @@ jobs: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) @@ -775,7 +775,7 @@ jobs: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 738183d153646..5e2d03427160b 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -51,7 +51,7 @@ jobs: smp-version: ${{ steps.experimental-meta.outputs.SMP_CRATE_VERSION }} steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: fetch-depth: 0 # need to pull repository history to find merge bases @@ -117,7 +117,7 @@ jobs: outputs: source_changed: ${{ steps.filter.outputs.SOURCE_CHANGED }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Collect file changes id: changes @@ -192,9 +192,9 @@ jobs: steps: - uses: colpal/actions-clean@36e6ca1abd35efe61cb60f912bd7837f67887c8a # v1.1.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ needs.resolve-inputs.outputs.baseline-sha }} path: baseline-vector @@ -231,9 +231,9 @@ jobs: steps: - uses: colpal/actions-clean@36e6ca1abd35efe61cb60f912bd7837f67887c8a # v1.1.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ needs.resolve-inputs.outputs.comparison-sha }} path: comparison-vector @@ -373,7 +373,7 @@ jobs: - upload-baseline-image-to-ecr - upload-comparison-image-to-ecr steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ needs.resolve-inputs.outputs.comparison-sha }} @@ -448,7 +448,7 @@ jobs: - should-run-gate - resolve-inputs steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 @@ -485,7 +485,7 @@ jobs: - submit-job - resolve-inputs steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ needs.resolve-inputs.outputs.comparison-sha }} diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index f4c44e40831ca..c351bb4c67072 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -32,7 +32,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false diff --git a/.github/workflows/test-make-command.yml b/.github/workflows/test-make-command.yml index 555bda0fcba19..1cfde8a2dfad5 100644 --- a/.github/workflows/test-make-command.yml +++ b/.github/workflows/test-make-command.yml @@ -33,13 +33,13 @@ jobs: - name: (PR review) Checkout review SHA if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 name: Cache Cargo registry + index diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c15b85de2ec4a..40eb50d0267d5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,7 +30,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true @@ -42,7 +42,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true @@ -56,7 +56,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true @@ -83,7 +83,7 @@ jobs: if: ${{ needs.changes.outputs.scripts == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: cargo-cache: false @@ -97,7 +97,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: cargo-cache: false @@ -111,7 +111,7 @@ jobs: if: ${{ needs.changes.outputs.dependencies == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: cargo-cache: false @@ -126,7 +126,7 @@ jobs: if: ${{ needs.changes.outputs.cue == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true @@ -139,7 +139,7 @@ jobs: if: ${{ needs.changes.outputs.markdown == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true @@ -152,7 +152,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.component_docs == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - run: sudo apt-get update && sudo apt-get install -y libsasl2-dev - uses: ./.github/actions/setup with: @@ -167,7 +167,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true @@ -179,7 +179,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.cue == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true @@ -193,7 +193,7 @@ jobs: if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.dependencies == 'true' || needs.changes.outputs.test-yml == 'true' }} needs: changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/setup with: rust: true diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index 8a59c864a5c85..7d162e2eb3506 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -24,13 +24,13 @@ jobs: - name: (PR review) Checkout PR branch if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 name: Cache Cargo registry + index diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml index 41a2f24b776f8..d0d64040dcb51 100644 --- a/.github/workflows/unit_windows.yml +++ b/.github/workflows/unit_windows.yml @@ -22,13 +22,13 @@ jobs: - name: (PR review) Checkout PR branch if: ${{ github.event_name == 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ github.event.review.commit_id }} - name: Checkout branch if: ${{ github.event_name != 'pull_request_review' }} - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/setup-python@v6 with: diff --git a/.github/workflows/vdev_publish.yml b/.github/workflows/vdev_publish.yml index 36590315228e3..15bb43ec81b64 100644 --- a/.github/workflows/vdev_publish.yml +++ b/.github/workflows/vdev_publish.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout Vector - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Bootstrap runner environment (Ubuntu) if: startsWith(matrix.os, 'ubuntu') From 0b35fe8791f347a553d4835fcd07cef7a1fb5d61 Mon Sep 17 00:00:00 2001 From: James <10730172+sanjams2@users.noreply.github.com> Date: Wed, 10 Dec 2025 09:14:59 -0800 Subject: [PATCH 141/227] feat(aws_s3 source): add S3 download processing duration metric (#24289) * feat(aws_s3 source): add S3 download processing duration metric Add histogram metric `s3_object_processing_duration_seconds` to track how long S3 object downloads and processing takes. The metric includes `bucket` and `status` labels to help identify slow buckets and distinguish successful vs failed downloads. The timing captures the full download lifecycle from the GetObject API call through complete ByteStream body consumption. * add author to changelog * refactor: make success/failure different metrics * Move bucket/duration into own variables * derive NamedInternalEvent --------- Co-authored-by: sanjams2 Co-authored-by: Thomas --- ...s_s3_processing_duration_metric.feature.md | 3 ++ src/internal_events/aws_sqs.rs | 45 +++++++++++++++++++ src/sources/aws_s3/sqs.rs | 23 +++++++--- .../components/sources/internal_metrics.cue | 22 +++++++++ 4 files changed, 87 insertions(+), 6 deletions(-) create mode 100644 changelog.d/aws_s3_processing_duration_metric.feature.md diff --git a/changelog.d/aws_s3_processing_duration_metric.feature.md b/changelog.d/aws_s3_processing_duration_metric.feature.md new file mode 100644 index 0000000000000..43a4c9bbe57ba --- /dev/null +++ b/changelog.d/aws_s3_processing_duration_metric.feature.md @@ -0,0 +1,3 @@ +The `aws_s3` source now emits histogram metrics to track S3 object processing times: `s3_object_processing_succeeded_duration_seconds` for successful processing and `s3_object_processing_failed_duration_seconds` for failed processing. These measure the full processing pipeline including download, decompression, and parsing. Both metrics include a `bucket` label to help identify slow buckets. + +authors: sanjams2 diff --git a/src/internal_events/aws_sqs.rs b/src/internal_events/aws_sqs.rs index e45ce6e68ef77..4bdcc8ef48e6c 100644 --- a/src/internal_events/aws_sqs.rs +++ b/src/internal_events/aws_sqs.rs @@ -9,14 +9,59 @@ use vector_lib::{NamedInternalEvent, internal_event::InternalEvent}; #[cfg(feature = "sources-aws_s3")] mod s3 { + use std::time::Duration; + use aws_sdk_sqs::types::{ BatchResultErrorEntry, DeleteMessageBatchRequestEntry, DeleteMessageBatchResultEntry, SendMessageBatchRequestEntry, SendMessageBatchResultEntry, }; + use metrics::histogram; use super::*; use crate::sources::aws_s3::sqs::ProcessingError; + #[derive(Debug, NamedInternalEvent)] + pub struct S3ObjectProcessingSucceeded<'a> { + pub bucket: &'a str, + pub duration: Duration, + } + + impl InternalEvent for S3ObjectProcessingSucceeded<'_> { + fn emit(self) { + debug!( + message = "S3 object processing succeeded.", + bucket = %self.bucket, + duration_ms = %self.duration.as_millis(), + ); + histogram!( + "s3_object_processing_succeeded_duration_seconds", + "bucket" => self.bucket.to_owned(), + ) + .record(self.duration); + } + } + + #[derive(Debug, NamedInternalEvent)] + pub struct S3ObjectProcessingFailed<'a> { + pub bucket: &'a str, + pub duration: Duration, + } + + impl InternalEvent for S3ObjectProcessingFailed<'_> { + fn emit(self) { + debug!( + message = "S3 object processing failed.", + bucket = %self.bucket, + duration_ms = %self.duration.as_millis(), + ); + histogram!( + "s3_object_processing_failed_duration_seconds", + "bucket" => self.bucket.to_owned(), + ) + .record(self.duration); + } + } + #[derive(Debug, NamedInternalEvent)] pub struct SqsMessageProcessingError<'a> { pub message_id: &'a str, diff --git a/src/sources/aws_s3/sqs.rs b/src/sources/aws_s3/sqs.rs index ed7559226d6a1..9ad5b47be61cc 100644 --- a/src/sources/aws_s3/sqs.rs +++ b/src/sources/aws_s3/sqs.rs @@ -4,7 +4,7 @@ use std::{ num::NonZeroUsize, panic, sync::{Arc, LazyLock}, - time::Duration, + time::{Duration, Instant}, }; use aws_sdk_s3::{Client as S3Client, operation::get_object::GetObjectError}; @@ -49,11 +49,11 @@ use crate::{ config::{SourceAcknowledgementsConfig, SourceContext}, event::{BatchNotifier, BatchStatus, EstimatedJsonEncodedSizeOf, Event, LogEvent}, internal_events::{ - EventsReceived, SqsMessageDeleteBatchError, SqsMessageDeletePartialError, - SqsMessageDeleteSucceeded, SqsMessageProcessingError, SqsMessageProcessingSucceeded, - SqsMessageReceiveError, SqsMessageReceiveSucceeded, SqsMessageSendBatchError, - SqsMessageSentPartialError, SqsMessageSentSucceeded, SqsS3EventRecordInvalidEventIgnored, - StreamClosedError, + EventsReceived, S3ObjectProcessingFailed, S3ObjectProcessingSucceeded, + SqsMessageDeleteBatchError, SqsMessageDeletePartialError, SqsMessageDeleteSucceeded, + SqsMessageProcessingError, SqsMessageProcessingSucceeded, SqsMessageReceiveError, + SqsMessageReceiveSucceeded, SqsMessageSendBatchError, SqsMessageSentPartialError, + SqsMessageSentSucceeded, SqsS3EventRecordInvalidEventIgnored, StreamClosedError, }, line_agg::{self, LineAgg}, shutdown::ShutdownSignal, @@ -670,6 +670,8 @@ impl IngestorProcess { } } + let download_start = Instant::now(); + let object_result = self .state .s3_client @@ -794,6 +796,15 @@ impl IngestorProcess { // so we explicitly drop it so that we can again utilize `read_error` below. drop(stream); + let bucket = &s3_event.s3.bucket.name; + let duration = download_start.elapsed(); + + if read_error.is_some() { + emit!(S3ObjectProcessingFailed { bucket, duration }); + } else { + emit!(S3ObjectProcessingSucceeded { bucket, duration }); + } + // The BatchNotifier is cloned for each LogEvent in the batch stream, but the last // reference must be dropped before the status of the batch is sent to the channel. drop(batch); diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index b9335f1918932..da94e39ecf645 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -762,6 +762,28 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _component_tags } + s3_object_processing_failed_duration_seconds: { + description: "The time taken to process an S3 object that failed, in seconds." + type: "histogram" + default_namespace: "vector" + tags: _component_tags & { + bucket: { + description: "The name of the S3 bucket." + required: true + } + } + } + s3_object_processing_succeeded_duration_seconds: { + description: "The time taken to process an S3 object that succeeded, in seconds." + type: "histogram" + default_namespace: "vector" + tags: _component_tags & { + bucket: { + description: "The name of the S3 bucket." + required: true + } + } + } sqs_message_delete_succeeded_total: { description: "The total number of successful deletions of SQS messages." type: "counter" From 0f998497b88393ba33ee90d6775f0848237e32a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Dec 2025 13:04:15 -0500 Subject: [PATCH 142/227] chore(ci): bump aws-actions/configure-aws-credentials from 5.0.0 to 5.1.1 (#24323) chore(ci): bump aws-actions/configure-aws-credentials Bumps [aws-actions/configure-aws-credentials](https://github.com/aws-actions/configure-aws-credentials) from 5.0.0 to 5.1.1. - [Release notes](https://github.com/aws-actions/configure-aws-credentials/releases) - [Changelog](https://github.com/aws-actions/configure-aws-credentials/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws-actions/configure-aws-credentials/compare/a03048d87541d1d9fcf2ecf528a4a65ba9bd7838...61815dcd50bd041e203e49132bacad1fd04d2708) --- updated-dependencies: - dependency-name: aws-actions/configure-aws-credentials dependency-version: 5.1.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/regression.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 5e2d03427160b..758e94e078fb5 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -269,7 +269,7 @@ jobs: - resolve-inputs steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -303,7 +303,7 @@ jobs: docker load --input baseline-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -343,7 +343,7 @@ jobs: docker load --input comparison-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -378,7 +378,7 @@ jobs: ref: ${{ needs.resolve-inputs.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -451,7 +451,7 @@ jobs: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -490,7 +490,7 @@ jobs: ref: ${{ needs.resolve-inputs.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} From a053a2e62dc6c1490af2f9eacb7efafdcad0ab26 Mon Sep 17 00:00:00 2001 From: Benjamin Dornel Date: Thu, 11 Dec 2025 02:31:40 +0800 Subject: [PATCH 143/227] fix(websocket source): reconnect indefinitely when connection fails (#24069) * fix(websocket source): reconnect indefinitely when connection fails * chore: add changelog fragment * Update src/common/websocket.rs * Remove references to now deleted fresh_backoff * Fix expect()() * fix: add missing protocol --------- Co-authored-by: Pavlos Rontidis Co-authored-by: Thomas --- .../23922_websocket_source_reconnect.fix.md | 3 ++ src/common/websocket.rs | 37 +++++++++++++++++++ src/internal_events/websocket.rs | 2 + src/sources/websocket/source.rs | 28 ++++---------- 4 files changed, 50 insertions(+), 20 deletions(-) create mode 100644 changelog.d/23922_websocket_source_reconnect.fix.md diff --git a/changelog.d/23922_websocket_source_reconnect.fix.md b/changelog.d/23922_websocket_source_reconnect.fix.md new file mode 100644 index 0000000000000..a70b859079d02 --- /dev/null +++ b/changelog.d/23922_websocket_source_reconnect.fix.md @@ -0,0 +1,3 @@ +Fixed the `websocket` source entering a "zombie" state when the `connect_timeout_secs` threshold was reached with multiple sources running. The connection timeout is now applied per connect attempt with indefinite retries, rather than as a total timeout limit. + +authors: benjamin-awd diff --git a/src/common/websocket.rs b/src/common/websocket.rs index 7abb8c589cec6..dab85c0a922be 100644 --- a/src/common/websocket.rs +++ b/src/common/websocket.rs @@ -40,6 +40,8 @@ pub enum WebSocketError { DnsError { source: dns::DnsError }, #[snafu(display("No addresses returned."))] NoAddresses, + #[snafu(display("Connection attempt timed out"))] + ConnectionTimedOut, } #[derive(Clone)] @@ -138,6 +140,41 @@ impl WebSocketConnector { } } + /// Connects with exponential backoff, applying a timeout to each individual connection attempt. + /// This will retry forever until a connection is established. + pub(crate) async fn connect_backoff_with_timeout( + &self, + timeout_duration: Duration, + ) -> WebSocketStream> { + let mut backoff = ExponentialBackoff::default(); + + loop { + match time::timeout(timeout_duration, self.connect()).await { + Ok(Ok(ws_stream)) => { + emit!(WebSocketConnectionEstablished {}); + return ws_stream; + } + Ok(Err(error)) => { + emit!(WebSocketConnectionFailedError { + error: Box::new(error) + }); + } + Err(_) => { + emit!(WebSocketConnectionFailedError { + error: Box::new(WebSocketError::ConnectionTimedOut), + }); + } + } + + time::sleep( + backoff + .next() + .expect("backoff iterator always returns some value"), + ) + .await; + } + } + #[cfg(feature = "sinks-websocket")] pub(crate) async fn healthcheck(&self) -> crate::Result<()> { self.connect().await.map(|_| ()).map_err(Into::into) diff --git a/src/internal_events/websocket.rs b/src/internal_events/websocket.rs index 264e1587eedd7..960aec1916133 100644 --- a/src/internal_events/websocket.rs +++ b/src/internal_events/websocket.rs @@ -42,6 +42,7 @@ impl InternalEvent for WebSocketConnectionFailedError { ); counter!( "component_errors_total", + "protocol" => PROTOCOL, "error_code" => "websocket_connection_failed", "error_type" => error_type::CONNECTION_FAILED, "stage" => error_stage::SENDING, @@ -209,6 +210,7 @@ impl InternalEvent for WebSocketSendError<'_> { ); counter!( "component_errors_total", + "protocol" => PROTOCOL, "error_code" => "websocket_send_error", "error_type" => error_type::CONNECTION_FAILED, "stage" => error_stage::PROCESSING, diff --git a/src/sources/websocket/source.rs b/src/sources/websocket/source.rs index da515f21f3e37..cf3c75d40f534 100644 --- a/src/sources/websocket/source.rs +++ b/src/sources/websocket/source.rs @@ -21,10 +21,9 @@ use crate::{ common::websocket::{PingInterval, WebSocketConnector, is_closed}, config::SourceContext, internal_events::{ - ConnectionOpen, OpenGauge, PROTOCOL, WebSocketBytesReceived, WebSocketConnectionError, - WebSocketConnectionEstablished, WebSocketConnectionFailedError, - WebSocketConnectionShutdown, WebSocketKind, WebSocketMessageReceived, - WebSocketReceiveError, WebSocketSendError, + ConnectionOpen, OpenGauge, PROTOCOL, WebSocketBytesReceived, + WebSocketConnectionFailedError, WebSocketConnectionShutdown, WebSocketKind, + WebSocketMessageReceived, WebSocketReceiveError, WebSocketSendError, }, sources::websocket::config::WebSocketConfig, vector_lib::codecs::StreamDecodingError, @@ -297,23 +296,12 @@ impl WebSocketSource { async fn try_create_sink_and_stream( &self, ) -> Result<(WebSocketSink, WebSocketStream), WebSocketSourceError> { - let connect_future = self.params.connector.connect_backoff(); - let timeout = self.config.connect_timeout_secs; - - let ws_stream = match time::timeout(timeout, connect_future).await { - Ok(ws) => ws, - Err(_) => { - emit!(WebSocketConnectionError { - error: TungsteniteError::Io(std::io::Error::new( - std::io::ErrorKind::TimedOut, - "Connection attempt timed out", - )) - }); - return Err(WebSocketSourceError::ConnectTimeout); - } - }; + let ws_stream = self + .params + .connector + .connect_backoff_with_timeout(self.config.connect_timeout_secs) + .await; - emit!(WebSocketConnectionEstablished {}); let (sink, stream) = ws_stream.split(); Ok((Box::pin(sink), Box::pin(stream))) From 3f48cae746dfaa7d75b110e94cbe3cfedb6ebf82 Mon Sep 17 00:00:00 2001 From: Benjamin Dornel Date: Thu, 11 Dec 2025 02:53:32 +0800 Subject: [PATCH 144/227] enhancement(codecs): allow configurable null handling in Arrow encoder (#24288) * enhancement(codecs): allow configurable null handling in Arrow encoder * chore: update changelog * Remove whitespace from changelog * make fmt * Remove Arc::clone when value can be fully moved --------- Co-authored-by: Thomas Co-authored-by: Thomas --- .../24074_arrow_null_handling.enhancement.md | 6 + lib/codecs/src/encoding/format/arrow.rs | 214 +++++++++++++++++- 2 files changed, 217 insertions(+), 3 deletions(-) create mode 100644 changelog.d/24074_arrow_null_handling.enhancement.md diff --git a/changelog.d/24074_arrow_null_handling.enhancement.md b/changelog.d/24074_arrow_null_handling.enhancement.md new file mode 100644 index 0000000000000..3245272e042e1 --- /dev/null +++ b/changelog.d/24074_arrow_null_handling.enhancement.md @@ -0,0 +1,6 @@ +The Arrow encoder now supports configurable null handling through the `allow_nullable_fields` +option. This controls whether nullable fields should be explicitly marked +as nullable in the Arrow schema, enabling better compatibility with +downstream systems that have specific requirements for null handling. + +authors: benjamin-awd diff --git a/lib/codecs/src/encoding/format/arrow.rs b/lib/codecs/src/encoding/format/arrow.rs index 7588b32b94452..db4dc491f4cc3 100644 --- a/lib/codecs/src/encoding/format/arrow.rs +++ b/lib/codecs/src/encoding/format/arrow.rs @@ -33,6 +33,18 @@ pub struct ArrowStreamSerializerConfig { #[serde(skip)] #[configurable(derived)] pub schema: Option>, + + /// Allow null values for non-nullable fields in the schema. + /// + /// When enabled, missing or incompatible values will be encoded as null even for fields + /// marked as non-nullable in the Arrow schema. This is useful when working with downstream + /// systems that can handle null values through defaults, computed columns, or other mechanisms. + /// + /// When disabled (default), missing values for non-nullable fields will cause encoding errors, + /// ensuring all required data is present before sending to the sink. + #[serde(default)] + #[configurable(metadata(docs::examples = true))] + pub allow_nullable_fields: bool, } impl std::fmt::Debug for ArrowStreamSerializerConfig { @@ -45,6 +57,7 @@ impl std::fmt::Debug for ArrowStreamSerializerConfig { .as_ref() .map(|s| format!("{} fields", s.fields().len())), ) + .field("allow_nullable_fields", &self.allow_nullable_fields) .finish() } } @@ -54,6 +67,7 @@ impl ArrowStreamSerializerConfig { pub fn new(schema: Arc) -> Self { Self { schema: Some(schema), + allow_nullable_fields: false, } } @@ -77,12 +91,25 @@ pub struct ArrowStreamSerializer { impl ArrowStreamSerializer { /// Create a new ArrowStreamSerializer with the given configuration pub fn new(config: ArrowStreamSerializerConfig) -> Result { - let schema = config.schema.ok_or_else(|| { + let mut schema = config.schema.ok_or_else(|| { vector_common::Error::from( "Arrow serializer requires a schema. Pass a schema or fetch from provider before creating serializer." ) })?; + // If allow_nullable_fields is enabled, transform the schema once here + // instead of on every batch encoding + if config.allow_nullable_fields { + schema = Arc::new(Schema::new_with_metadata( + schema + .fields() + .iter() + .map(|f| Arc::new(make_field_nullable(f))) + .collect::>(), + schema.metadata().clone(), + )); + } + Ok(Self { schema }) } } @@ -172,18 +199,38 @@ pub fn encode_events_to_arrow_ipc_stream( let schema_ref = schema.ok_or(ArrowEncodingError::NoSchemaProvided)?; - let record_batch = build_record_batch(schema_ref.clone(), events)?; + let record_batch = build_record_batch(schema_ref, events)?; let ipc_err = |source| ArrowEncodingError::IpcWrite { source }; let mut buffer = BytesMut::new().writer(); - let mut writer = StreamWriter::try_new(&mut buffer, &schema_ref).map_err(ipc_err)?; + let mut writer = + StreamWriter::try_new(&mut buffer, record_batch.schema_ref()).map_err(ipc_err)?; writer.write(&record_batch).map_err(ipc_err)?; writer.finish().map_err(ipc_err)?; Ok(buffer.into_inner().freeze()) } +/// Recursively makes a Field and all its nested fields nullable +fn make_field_nullable(field: &arrow::datatypes::Field) -> arrow::datatypes::Field { + let new_data_type = match field.data_type() { + DataType::List(inner_field) => DataType::List(Arc::new(make_field_nullable(inner_field))), + DataType::Struct(fields) => { + DataType::Struct(fields.iter().map(|f| make_field_nullable(f)).collect()) + } + DataType::Map(inner_field, sorted) => { + DataType::Map(Arc::new(make_field_nullable(inner_field)), *sorted) + } + other => other.clone(), + }; + + field + .clone() + .with_data_type(new_data_type) + .with_nullable(true) +} + /// Builds an Arrow RecordBatch from events fn build_record_batch( schema: Arc, @@ -1442,4 +1489,165 @@ mod tests { assert!(!id_array.is_null(1)); assert!(!id_array.is_null(2)); } + + #[test] + fn test_config_allow_nullable_fields_overrides_schema() { + use tokio_util::codec::Encoder; + + // Create events: One valid, one missing the "required" field + let mut log1 = LogEvent::default(); + log1.insert("strict_field", 42); + let log2 = LogEvent::default(); + let events = vec![Event::Log(log1), Event::Log(log2)]; + + let schema = Arc::new(Schema::new(vec![Field::new( + "strict_field", + DataType::Int64, + false, + )])); + + let mut config = ArrowStreamSerializerConfig::new(Arc::clone(&schema)); + config.allow_nullable_fields = true; + + let mut serializer = + ArrowStreamSerializer::new(config).expect("Failed to create serializer"); + + let mut buffer = BytesMut::new(); + serializer + .encode(events, &mut buffer) + .expect("Encoding should succeed when allow_nullable_fields is true"); + + let cursor = Cursor::new(buffer); + let mut reader = StreamReader::try_new(cursor, None).expect("Failed to create reader"); + let batch = reader.next().unwrap().expect("Failed to read batch"); + + assert_eq!(batch.num_rows(), 2); + + let binding = batch.schema(); + let output_field = binding.field(0); + assert!( + output_field.is_nullable(), + "The output schema field should have been transformed to nullable=true" + ); + + let array = batch + .column(0) + .as_any() + .downcast_ref::() + .unwrap(); + + assert_eq!(array.value(0), 42); + assert!(!array.is_null(0)); + assert!( + array.is_null(1), + "The missing value should be encoded as null" + ); + } + + #[test] + fn test_make_field_nullable_with_nested_types() { + // Test that make_field_nullable recursively handles List and Struct types + + // Create a nested structure: Struct containing a List of Structs + // struct { inner_list: [{ nested_field: Int64 }] } + let inner_struct_field = Field::new("nested_field", DataType::Int64, false); + let inner_struct = + DataType::Struct(arrow::datatypes::Fields::from(vec![inner_struct_field])); + let list_field = Field::new("item", inner_struct, false); + let list_type = DataType::List(Arc::new(list_field)); + let outer_field = Field::new("inner_list", list_type, false); + let outer_struct = DataType::Struct(arrow::datatypes::Fields::from(vec![outer_field])); + + let original_field = Field::new("root", outer_struct, false); + + // Apply make_field_nullable + let nullable_field = make_field_nullable(&original_field); + + // Verify root field is nullable + assert!( + nullable_field.is_nullable(), + "Root field should be nullable" + ); + + // Verify nested struct is nullable + if let DataType::Struct(root_fields) = nullable_field.data_type() { + let inner_list_field = &root_fields[0]; + assert!( + inner_list_field.is_nullable(), + "inner_list field should be nullable" + ); + + // Verify list element is nullable + if let DataType::List(list_item_field) = inner_list_field.data_type() { + assert!( + list_item_field.is_nullable(), + "List item field should be nullable" + ); + + // Verify inner struct fields are nullable + if let DataType::Struct(inner_struct_fields) = list_item_field.data_type() { + let nested_field = &inner_struct_fields[0]; + assert!( + nested_field.is_nullable(), + "nested_field should be nullable" + ); + } else { + panic!("Expected Struct type for list items"); + } + } else { + panic!("Expected List type for inner_list"); + } + } else { + panic!("Expected Struct type for root field"); + } + } + + #[test] + fn test_make_field_nullable_with_map_type() { + // Test that make_field_nullable handles Map types + // Map is internally represented as List> + + // Create a map: Map + // Internally: List> + let key_field = Field::new("key", DataType::Utf8, false); + let value_field = Field::new("value", DataType::Int64, false); + let entries_struct = + DataType::Struct(arrow::datatypes::Fields::from(vec![key_field, value_field])); + let entries_field = Field::new("entries", entries_struct, false); + let map_type = DataType::Map(Arc::new(entries_field), false); + + let original_field = Field::new("my_map", map_type, false); + + // Apply make_field_nullable + let nullable_field = make_field_nullable(&original_field); + + // Verify root field is nullable + assert!( + nullable_field.is_nullable(), + "Root map field should be nullable" + ); + + // Verify map entries are nullable + if let DataType::Map(entries_field, _sorted) = nullable_field.data_type() { + assert!( + entries_field.is_nullable(), + "Map entries field should be nullable" + ); + + // Verify the struct inside the map is nullable + if let DataType::Struct(struct_fields) = entries_field.data_type() { + let key_field = &struct_fields[0]; + let value_field = &struct_fields[1]; + assert!(key_field.is_nullable(), "Map key field should be nullable"); + assert!( + value_field.is_nullable(), + "Map value field should be nullable" + ); + } else { + panic!("Expected Struct type for map entries"); + } + } else { + panic!("Expected Map type for my_map field"); + } + } } From a4b2b8ead2b39ae0f2c07b9de7776e6671eb68d2 Mon Sep 17 00:00:00 2001 From: zapdos26 Date: Wed, 10 Dec 2025 13:54:34 -0500 Subject: [PATCH 145/227] docs(external docs): Revise Azure instance type recommendations (#24147) * Revise Azure instance type recommendations Updated Azure recommendations for instance types. * Add new entry to spelling expect.txt Added D8plds_v6 to spelling expect.txt * Try to fix spelling --------- Co-authored-by: Thomas --- .github/actions/spelling/expect.txt | 1 + website/content/en/docs/setup/going-to-prod/sizing.md | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/actions/spelling/expect.txt b/.github/actions/spelling/expect.txt index 3f07a7f991a9a..1d68a6978570b 100644 --- a/.github/actions/spelling/expect.txt +++ b/.github/actions/spelling/expect.txt @@ -430,6 +430,7 @@ pgo PIDs PII plainify +plds ple podspec Ponge diff --git a/website/content/en/docs/setup/going-to-prod/sizing.md b/website/content/en/docs/setup/going-to-prod/sizing.md index abe6f6a199bef..18d3924a184d9 100644 --- a/website/content/en/docs/setup/going-to-prod/sizing.md +++ b/website/content/en/docs/setup/going-to-prod/sizing.md @@ -31,7 +31,7 @@ Instances with at least 8 vCPUs and 16 GiB of memory are good units for scaling. | Cloud | Recommendations | | --- | --- | | AWS | c6i.2xlarge (recommended) or c6g.2xlarge | -| Azure | f8 | +| Azure | f8 or D8plds_v6 | | GCP | c2 (8 vCPUs, 16 GiB memory) | #### CPUs @@ -46,8 +46,8 @@ The `ARM64` architecture typically offers better performance for the investment | Cloud | Recommendations | | --- | --- | | AWS | Latest generation Graviton (recommended) or Intel Xeon, ≥8 vCPUs | -| Azure | Latest generation Intel Xeon, ≥8 vCPUs | -| GCP | Latests generation Intel Xeon, ≥8 vCPUs | +| Azure | Latest generation Cobalt (recommended) or Intel Xeon, ≥8 vCPUs | +| GCP | Latest generation Intel Xeon, ≥8 vCPUs | #### Memory From d2771c3f5639e9d87ba103a0492d0db05451df86 Mon Sep 17 00:00:00 2001 From: WaterWhisperer Date: Fri, 12 Dec 2025 04:42:57 +0800 Subject: [PATCH 146/227] chore(dev): clean up some `allow` statements (#24366) * chore: clean up some `allow` statements * fix fails when running `cargo check --tests --bin vector --no-default-features` --- lib/vector-buffers/src/variants/disk_v2/common.rs | 1 - src/conditions/mod.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/vector-buffers/src/variants/disk_v2/common.rs b/lib/vector-buffers/src/variants/disk_v2/common.rs index 1c50416afccfe..c7b4c2d4818bc 100644 --- a/lib/vector-buffers/src/variants/disk_v2/common.rs +++ b/lib/vector-buffers/src/variants/disk_v2/common.rs @@ -208,7 +208,6 @@ where /// /// Defaults to `usize::MAX`, or effectively no limit. Due to the internal design of the /// buffer, the effective maximum limit is around `max_data_file_size` * 2^16. - #[allow(dead_code)] pub fn max_buffer_size(mut self, amount: u64) -> Self { self.max_buffer_size = Some(amount); self diff --git a/src/conditions/mod.rs b/src/conditions/mod.rs index 766b5cf9c0345..471662b40adfb 100644 --- a/src/conditions/mod.rs +++ b/src/conditions/mod.rs @@ -53,7 +53,6 @@ impl Condition { /// Checks if a condition is true. /// /// The event should not be modified, it is only mutable so it can be passed into VRL, but VRL type checking prevents mutation. - #[allow(dead_code)] pub fn check(&self, e: Event) -> (bool, Event) { match self { Condition::IsLog => check_is_log(e), From b9cbce345499d42a691a8d485025068dd1cab3b0 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 12 Dec 2025 12:43:29 -0500 Subject: [PATCH 147/227] docs(internal docs): README e2e badge (#24375) fix(docs): README e2e badge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bf4db29273e00..1e5b13adf3ce2 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ [![Nightly](https://github.com/vectordotdev/vector/actions/workflows/nightly.yml/badge.svg)](https://github.com/vectordotdev/vector/actions/workflows/nightly.yml) -[![E2E Test Suite](https://github.com/vectordotdev/vector/actions/workflows/e2e.yml/badge.svg)](https://github.com/vectordotdev/vector/actions/workflows/e2e.yml) +[![Integration/E2E Test Suite](https://github.com/vectordotdev/vector/actions/workflows/integration.yml/badge.svg)](https://github.com/vectordotdev/vector/actions/workflows/integration.yml/badge.svg?event=merge_group) [![Component Features](https://github.com/vectordotdev/vector/actions/workflows/component_features.yml/badge.svg)](https://github.com/vectordotdev/vector/actions/workflows/component_features.yml)

From feb33ce7f08ec6799963d94ac8f627a2e131cbbe Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 12 Dec 2025 17:05:00 -0500 Subject: [PATCH 148/227] chore(deps): bump VRL to use 0.29.0 sha (#24378) --- Cargo.lock | 78 +++++++++++++++++++++++++------------------- LICENSE-3rdparty.csv | 6 ++-- 2 files changed, 48 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 012c49206b7bc..dce114e7f9db1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2122,9 +2122,9 @@ dependencies = [ [[package]] name = "borrow-or-share" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eeab4423108c5d7c744f4d234de88d18d636100093ae04caf4825134b9c3a32" +checksum = "dc0b364ead1874514c8c2855ab558056ebfeb775653e7ae45ff72f28f8f3166c" [[package]] name = "borsh" @@ -2180,7 +2180,7 @@ dependencies = [ "base64 0.22.1", "bitvec", "getrandom 0.2.15", - "getrandom 0.3.1", + "getrandom 0.3.4", "hex", "indexmap 2.12.0", "js-sys", @@ -4196,9 +4196,9 @@ dependencies = [ [[package]] name = "fluent-uri" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1918b65d96df47d3591bed19c5cca17e3fa5d0707318e4b5ef2eae01764df7e5" +checksum = "bc74ac4d8359ae70623506d512209619e5cf8f347124910440dbc221714b328e" dependencies = [ "borrow-or-share", "ref-cast", @@ -4478,16 +4478,16 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", + "r-efi", + "wasip2", "wasm-bindgen", - "windows-targets 0.52.6", ] [[package]] @@ -4572,7 +4572,7 @@ dependencies = [ "futures-sink", "futures-timer", "futures-util", - "getrandom 0.3.1", + "getrandom 0.3.4", "hashbrown 0.15.2", "nonzero_ext", "parking_lot 0.12.4", @@ -5978,27 +5978,28 @@ dependencies = [ [[package]] name = "jsonschema" -version = "0.32.1" +version = "0.37.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24690c68dfcdde5980d676b0f1820981841016b1f29eecb4c42ad48ab4118681" +checksum = "73c9ffb2b5c56d58030e1b532d8e8389da94590515f118cf35b5cb68e4764a7e" dependencies = [ "ahash 0.8.11", - "base64 0.22.1", "bytecount", + "data-encoding", "email_address", "fancy-regex 0.16.1", "fraction", + "getrandom 0.3.4", "idna", "itoa", "num-cmp", "num-traits", - "once_cell", "percent-encoding", "referencing", "regex", "regex-syntax", "serde", "serde_json", + "unicode-general-category", "uuid-simd", ] @@ -8890,6 +8891,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79ec282e887b434b68c18fe5c121d38e72a5cf35119b59e54ec5b992ea9c8eb0" +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "radium" version = "0.7.0" @@ -8994,7 +9001,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", "zerocopy 0.8.16", ] @@ -9232,13 +9239,14 @@ dependencies = [ [[package]] name = "referencing" -version = "0.32.1" +version = "0.37.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3d769362109497b240e66462606bc28af68116436c8669bac17069533b908e" +checksum = "4283168a506f0dcbdce31c9f9cce3129c924da4c6bca46e46707fcb746d2d70c" dependencies = [ "ahash 0.8.11", - "fluent-uri 0.3.2", - "once_cell", + "fluent-uri 0.4.1", + "getrandom 0.3.4", + "hashbrown 0.16.0", "parking_lot 0.12.4", "percent-encoding", "serde_json", @@ -11155,7 +11163,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand 2.3.0", - "getrandom 0.3.1", + "getrandom 0.3.4", "once_cell", "rustix 1.0.1", "windows-sys 0.61.0", @@ -12273,6 +12281,12 @@ version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +[[package]] +name = "unicode-general-category" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b993bddc193ae5bd0d623b49ec06ac3e9312875fdae725a975c51db1cc1677f" + [[package]] name = "unicode-ident" version = "1.0.13" @@ -12430,7 +12444,7 @@ version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", "js-sys", "rand 0.9.2", "serde", @@ -12444,7 +12458,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b082222b4f6619906941c17eb2297fff4c2fb96cb60164170522942a200bd8" dependencies = [ "outref", - "uuid", "vsimd", ] @@ -13092,8 +13105,8 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" -version = "0.28.1" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#5eafcc03be7fd864e51fe86dfc719e941de8e432" +version = "0.29.0" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#195f1ed6a334978a8b5d318030b606a902973351" dependencies = [ "aes", "aes-siv", @@ -13291,12 +13304,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] -name = "wasi" -version = "0.13.3+wasi-0.2.2" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] @@ -14049,13 +14062,10 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.33.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" -dependencies = [ - "bitflags 2.9.0", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "woothee" diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index a946220a4da1b..3df3154423daa 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -613,6 +613,7 @@ quinn-proto,https://github.com/quinn-rs/quinn,MIT OR Apache-2.0,The quinn-proto quinn-udp,https://github.com/quinn-rs/quinn,MIT OR Apache-2.0,The quinn-udp Authors quote,https://github.com/dtolnay/quote,MIT OR Apache-2.0,David Tolnay quoted_printable,https://github.com/staktrace/quoted-printable,0BSD,Kartikaya Gupta +r-efi,https://github.com/r-efi/r-efi,MIT OR Apache-2.0 OR LGPL-2.1-or-later,The r-efi Authors radium,https://github.com/bitvecto-rs/radium,MIT,"Nika Layzell , myrrlyn " radix_trie,https://github.com/michaelsproul/rust_radix_trie,MIT,Michael Sproul rand,https://github.com/rust-random/rand,MIT OR Apache-2.0,"The Rand Project Developers, The Rust Project Developers" @@ -827,6 +828,7 @@ ucd-trie,https://github.com/BurntSushi/ucd-generate,MIT OR Apache-2.0,Andrew Gal unarray,https://github.com/cameron1024/unarray,MIT OR Apache-2.0,The unarray Authors unicase,https://github.com/seanmonstar/unicase,MIT OR Apache-2.0,Sean McArthur unicode-bidi,https://github.com/servo/unicode-bidi,MIT OR Apache-2.0,The Servo Project Developers +unicode-general-category,https://github.com/yeslogic/unicode-general-category,Apache-2.0,YesLogic Pty. Ltd. unicode-ident,https://github.com/dtolnay/unicode-ident,(MIT OR Apache-2.0) AND Unicode-DFS-2016,David Tolnay unicode-normalization,https://github.com/unicode-rs/unicode-normalization,MIT OR Apache-2.0,"kwantam , Manish Goregaokar " unicode-segmentation,https://github.com/unicode-rs/unicode-segmentation,MIT OR Apache-2.0,"kwantam , Manish Goregaokar " @@ -858,7 +860,7 @@ walkdir,https://github.com/BurntSushi/walkdir,Unlicense OR MIT,Andrew Gallant warp,https://github.com/seanmonstar/warp,MIT,Sean McArthur wasi,https://github.com/bytecodealliance/wasi,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,The Cranelift Project Developers -wasi,https://github.com/bytecodealliance/wasi-rs,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,The Cranelift Project Developers +wasip2,https://github.com/bytecodealliance/wasi-rs,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,The wasip2 Authors wasite,https://github.com/ardaku/wasite,Apache-2.0 OR BSL-1.0 OR MIT,The wasite Authors wasm-bindgen,https://github.com/rustwasm/wasm-bindgen,MIT OR Apache-2.0,The wasm-bindgen Developers wasm-bindgen-backend,https://github.com/rustwasm/wasm-bindgen/tree/master/crates/backend,MIT OR Apache-2.0,The wasm-bindgen Developers @@ -905,7 +907,7 @@ windows_x86_64_gnullvm,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0 windows_x86_64_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft winnow,https://github.com/winnow-rs/winnow,MIT,The winnow Authors winreg,https://github.com/gentoo90/winreg-rs,MIT,Igor Shaula -wit-bindgen-rt,https://github.com/bytecodealliance/wasi-rs,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,The wit-bindgen-rt Authors +wit-bindgen,https://github.com/bytecodealliance/wit-bindgen,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,Alex Crichton woothee,https://github.com/woothee/woothee-rust,Apache-2.0,hhatto write16,https://github.com/hsivonen/write16,Apache-2.0 OR MIT,The write16 Authors writeable,https://github.com/unicode-org/icu4x,Unicode-3.0,The ICU4X Project Developers From 3921ecb5c14a6b48f89747907af08c7ddb08b207 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 09:45:23 -0500 Subject: [PATCH 149/227] chore(ci): bump github/codeql-action from 3.30.6 to 4.31.6 (#24324) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.6 to 4.31.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/64d10c13136e1c5bce3e5fbde8d4906eeaafc885...fe4161a26a8629af62121b670040955b330f9af2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.6 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index c351bb4c67072..44965ea9f9d28 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -68,6 +68,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 + uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: sarif_file: results.sarif From faa2c21fcdbac813e8433afec24fe7849556b197 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 09:54:46 -0500 Subject: [PATCH 150/227] chore(ci): bump docker/metadata-action from 5.9.0 to 5.10.0 (#24326) Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 5.9.0 to 5.10.0. - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/318604b99e75e41977312d83839a89be02ca4893...c299e40c65443455700f0fdfc63efafe5b349051) --- updated-dependencies: - dependency-name: docker/metadata-action dependency-version: 5.10.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 69db4836a78c2..5db6aed25a65d 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -55,7 +55,7 @@ jobs: password: ${{ secrets.CI_DOCKER_PASSWORD }} - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0 + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 with: images: timberio/vector-dev flavor: | From eae0be26759331ca19fe5d77ecee78cd329e3133 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 09:55:09 -0500 Subject: [PATCH 151/227] chore(ci): bump DataDog/dd-octo-sts-action from 1.0.1 to 1.0.3 (#24325) Bumps [DataDog/dd-octo-sts-action](https://github.com/datadog/dd-octo-sts-action) from 1.0.1 to 1.0.3. - [Release notes](https://github.com/datadog/dd-octo-sts-action/releases) - [Commits](https://github.com/datadog/dd-octo-sts-action/compare/08f2144903ced3254a3dafec2592563409ba2aa0...acaa02eee7e3bb0839e4272dacb37b8f3b58ba80) --- updated-dependencies: - dependency-name: DataDog/dd-octo-sts-action dependency-version: 1.0.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cla.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index aa11ac356341f..179a849e92a0e 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -21,7 +21,7 @@ jobs: if: github.event_name == 'merge_group' run: echo "CLA verification not needed for merge queue - already checked on PR" - - uses: DataDog/dd-octo-sts-action@08f2144903ced3254a3dafec2592563409ba2aa0 # v1.0.1 + - uses: DataDog/dd-octo-sts-action@acaa02eee7e3bb0839e4272dacb37b8f3b58ba80 # v1.0.3 if: github.event_name != 'merge_group' id: octo-sts with: From e6397f3fdba0306fb0def6d602c9f3d4053fa109 Mon Sep 17 00:00:00 2001 From: Benjamin Dornel Date: Tue, 16 Dec 2025 02:39:24 +0800 Subject: [PATCH 152/227] enhancement(http_client source): add support for request body (#24170) * enhancement(http_client source): add body to config * chore: use resolved_compiled_params helper * chore: use has_vrl helper * chore: simplify map calls * chore: simplify creation of request body * chore: add changelog fragment * docs: add VRL example * chore: clippy * chore: update http_client docs * chore: update integration tests * chore: update docs * chore: rollback breaking change to base uri construction * chore: add note about content-type default * chore: update docs * chore: fix merge conflict * test: vrl compilation errors are raised before runtime * chore: fix failing tests * chore: update component docs * chore: update docs --------- Co-authored-by: Pavlos Rontidis Co-authored-by: Thomas --- .../14758_http_client_body.enhancement.md | 3 + src/http.rs | 4 +- src/sources/http_client/client.rs | 207 ++++++++------ src/sources/http_client/integration_tests.rs | 11 + src/sources/http_client/tests.rs | 254 ++++++++++++++++++ src/sources/util/http_client.rs | 25 +- .../sinks/generated/elasticsearch.cue | 4 +- .../sources/generated/http_client.cue | 36 ++- .../sources/generated/prometheus_scrape.cue | 4 +- .../components/sources/http_client.cue | 33 +++ 10 files changed, 489 insertions(+), 92 deletions(-) create mode 100644 changelog.d/14758_http_client_body.enhancement.md diff --git a/changelog.d/14758_http_client_body.enhancement.md b/changelog.d/14758_http_client_body.enhancement.md new file mode 100644 index 0000000000000..247842fec66c4 --- /dev/null +++ b/changelog.d/14758_http_client_body.enhancement.md @@ -0,0 +1,3 @@ +The `http_client` source now supports the `body` parameter. VRL is also supported in the body which allows a dynamic request body to be generated. + +authors: benjamin-awd diff --git a/src/http.rs b/src/http.rs index 1a24e38a37fcb..7c4553764bbab 100644 --- a/src/http.rs +++ b/src/http.rs @@ -596,7 +596,7 @@ pub enum ParamType { /// The parameter value is a plain string. #[default] String, - /// The parameter value is a VRL expression that will be evaluated before each request. + /// The parameter value is a VRL expression that is evaluated before each request. Vrl, } @@ -618,7 +618,7 @@ pub enum ParameterValue { Typed { /// The raw value of the parameter. value: String, - /// The type of the parameter, indicating how the `value` should be treated. + /// The parameter type, indicating how the `value` should be treated. #[serde( default, skip_serializing_if = "ParamType::is_default", diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index 34c2ee09dbe2a..c26f7302e16aa 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -114,6 +114,15 @@ pub struct HttpClientConfig { #[serde(default = "default_http_method")] pub method: HttpMethod, + /// Raw data to send as the HTTP request body. + /// + /// Can be a static string or a VRL expression. + /// + /// When a body is provided, the `Content-Type` header is automatically set to + /// `application/json` unless explicitly overridden in the `headers` configuration. + #[serde(default)] + pub body: Option, + /// TLS configuration. #[configurable(derived)] pub tls: Option, @@ -174,6 +183,44 @@ fn headers_examples() -> HashMap> { ]) } +/// Helper function to get all VRL functions for compilation +fn get_vrl_functions() -> Vec> { + vrl::stdlib::all() + .into_iter() + .chain(vector_lib::enrichment::vrl_functions()) + .chain(vector_vrl_functions::all()) + .collect() +} + +/// Helper function to compile a VRL parameter value into a Program +fn compile_parameter_vrl( + param: &ParameterValue, + functions: &[Box], +) -> Result, sources::BuildError> { + if !param.is_vrl() { + return Ok(None); + } + + let state = TypeState::default(); + let config = CompileConfig::default(); + + match compile_vrl(param.value(), functions, &state, config) { + Ok(compilation_result) => { + if !compilation_result.warnings.is_empty() { + let warnings = format_vrl_diagnostics(param.value(), compilation_result.warnings); + warn!(message = "VRL compilation warnings.", %warnings); + } + Ok(Some(compilation_result.program)) + } + Err(diagnostics) => { + let error = format_vrl_diagnostics(param.value(), diagnostics); + Err(sources::BuildError::VrlCompilationError { + message: format!("VRL compilation failed: {}", error), + }) + } + } +} + impl Default for HttpClientConfig { fn default() -> Self { Self { @@ -185,6 +232,7 @@ impl Default for HttpClientConfig { framing: default_framing_message_based(), headers: HashMap::new(), method: default_http_method(), + body: None, tls: None, auth: None, log_namespace: None, @@ -206,6 +254,17 @@ pub enum CompiledQueryParameterValue { MultiParams(Vec), } +impl CompiledQueryParameterValue { + fn has_vrl(&self) -> bool { + match self { + CompiledQueryParameterValue::SingleParam(param) => param.program.is_some(), + CompiledQueryParameterValue::MultiParams(params) => { + params.iter().any(|p| p.program.is_some()) + } + } + } +} + #[derive(Clone)] pub struct Query { original: HashMap, @@ -215,11 +274,7 @@ pub struct Query { impl Query { pub fn new(params: &HashMap) -> Result { - let functions = vrl::stdlib::all() - .into_iter() - .chain(vector_lib::enrichment::vrl_functions()) - .chain(vector_vrl_functions::all()) - .collect::>(); + let functions = get_vrl_functions(); let mut compiled: HashMap = HashMap::new(); @@ -228,12 +283,7 @@ impl Query { compiled.insert(k.clone(), compiled_param); } - let has_vrl = compiled.values().any(|compiled| match compiled { - CompiledQueryParameterValue::SingleParam(param) => param.program.is_some(), - CompiledQueryParameterValue::MultiParams(params) => { - params.iter().any(|p| p.program.is_some()) - } - }); + let has_vrl = compiled.values().any(|v| v.has_vrl()); Ok(Query { original: params.clone(), @@ -246,29 +296,7 @@ impl Query { param: &ParameterValue, functions: &[Box], ) -> Result { - let program = if param.is_vrl() { - let state = TypeState::default(); - let config = CompileConfig::default(); - - match compile_vrl(param.value(), functions, &state, config) { - Ok(compilation_result) => { - if !compilation_result.warnings.is_empty() { - let warnings = - format_vrl_diagnostics(param.value(), compilation_result.warnings); - warn!(message = "VRL compilation warnings.", %warnings); - } - Some(compilation_result.program) - } - Err(diagnostics) => { - let error = format_vrl_diagnostics(param.value(), diagnostics); - return Err(sources::BuildError::VrlCompilationError { - message: format!("VRL compilation failed: {}", error), - }); - } - } - } else { - None - }; + let program = compile_parameter_vrl(param, functions)?; Ok(CompiledParam { value: param.value().to_string(), @@ -301,22 +329,35 @@ impl Query { #[typetag::serde(name = "http_client")] impl SourceConfig for HttpClientConfig { async fn build(&self, cx: SourceContext) -> crate::Result { - let query = Query::new(&self.query.clone())?; + let query = Query::new(&self.query)?; + let functions = get_vrl_functions(); + + // Compile body if present + let body = self + .body + .as_ref() + .map(|body_param| -> Result { + let program = compile_parameter_vrl(body_param, &functions)?; + Ok(CompiledParam { + value: body_param.value().to_string(), + program, + }) + }) + .transpose()?; // Build the base URLs let endpoints = [self.endpoint.clone()]; let urls: Vec = endpoints .iter() - .map(|s| s.parse::().context(sources::UriParseSnafu)) - .map(|r| { - if query.has_vrl { - // For URLs with VRL expressions, don't add query parameters here - // They'll be added dynamically during the HTTP request - r + .map(|s| { + let uri = s.parse::().context(sources::UriParseSnafu)?; + // For URLs with VRL expressions, add query parameters dynamically during request + // For URLs without VRL expressions, add query parameters now + Ok(if query.has_vrl { + uri } else { - // For URLs without VRL expressions, add query parameters now - r.map(|uri| build_url(&uri, &query.original)) - } + build_url(&uri, &query.original) + }) }) .collect::, sources::BuildError>>()?; @@ -329,11 +370,12 @@ impl SourceConfig for HttpClientConfig { let content_type = self.decoding.content_type(&self.framing).to_string(); - // Create context with the config for dynamic query parameter evaluation + // Create context with the config for dynamic query parameter and body evaluation let context = HttpClientContext { decoder, log_namespace, query, + body, }; warn_if_interval_too_low(self.timeout, self.interval); @@ -391,6 +433,7 @@ pub struct HttpClientContext { pub decoder: Decoder, pub log_namespace: LogNamespace, query: Query, + body: Option, } impl HttpClientContext { @@ -451,6 +494,14 @@ fn resolve_vrl(value: &str, program: &Program) -> Option { }) } +/// Resolve a compiled parameter, handling VRL evaluation if present +fn resolve_compiled_param(compiled: &CompiledParam) -> Option { + match &compiled.program { + Some(program) => resolve_vrl(&compiled.value, program), + None => Some(compiled.value.clone()), + } +} + impl http_client::HttpClientContext for HttpClientContext { /// Decodes the HTTP response body into events per the decoder configured. fn on_response(&mut self, _url: &Uri, _header: &Parts, body: &Bytes) -> Option> { @@ -463,49 +514,41 @@ impl http_client::HttpClientContext for HttpClientContext { Some(events) } + /// Get the request body to send with the HTTP request + fn get_request_body(&self) -> Option { + self.body.as_ref().and_then(resolve_compiled_param) + } + /// Process the URL dynamically before each request fn process_url(&self, url: &Uri) -> Option { - // Early exit if there is no VRL to process - let query: &Query = &self.query; - if !query.has_vrl { + if !self.query.has_vrl { return None; } - let mut processed_query = HashMap::new(); - - for (param_name, compiled_value) in &query.compiled { - match compiled_value { - CompiledQueryParameterValue::SingleParam(compiled_param) => { - let result = match &compiled_param.program { - Some(prog) => resolve_vrl(&compiled_param.value, prog)?, - None => compiled_param.value.clone(), - }; - - processed_query.insert( - param_name.clone(), - QueryParameterValue::SingleParam(ParameterValue::String(result)), - ); - } - CompiledQueryParameterValue::MultiParams(compiled_params) => { - let mut results = Vec::new(); - - for param in compiled_params { - let result = match ¶m.program { - Some(p) => resolve_vrl(¶m.value, p)?, - None => param.value.clone(), - }; - results.push(ParameterValue::String(result)); + // Resolve all query parameters with VRL expressions + let processed_query: Option> = self + .query + .compiled + .iter() + .map(|(name, value)| { + let resolved = match value { + CompiledQueryParameterValue::SingleParam(param) => { + let result = resolve_compiled_param(param)?; + QueryParameterValue::SingleParam(ParameterValue::String(result)) } + CompiledQueryParameterValue::MultiParams(params) => { + let results: Option> = params + .iter() + .map(|p| resolve_compiled_param(p).map(ParameterValue::String)) + .collect(); + QueryParameterValue::MultiParams(results?) + } + }; + Some((name.clone(), resolved)) + }) + .collect(); - processed_query.insert( - param_name.clone(), - QueryParameterValue::MultiParams(results), - ); - } - }; - } - - // Extract the base URI without query parameters to avoid parameter duplication + // Build base URI and add query parameters let base_uri = Uri::builder() .scheme( url.scheme() @@ -521,7 +564,7 @@ impl http_client::HttpClientContext for HttpClientContext { .build() .ok()?; - Some(build_url(&base_uri, &processed_query)) + Some(build_url(&base_uri, &processed_query?)) } /// Enriches events with source_type, timestamp diff --git a/src/sources/http_client/integration_tests.rs b/src/sources/http_client/integration_tests.rs index f493323708163..bd2cc06595cfb 100644 --- a/src/sources/http_client/integration_tests.rs +++ b/src/sources/http_client/integration_tests.rs @@ -56,6 +56,7 @@ async fn invalid_endpoint() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, @@ -75,6 +76,7 @@ async fn collected_logs_bytes() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, @@ -100,6 +102,7 @@ async fn collected_logs_json() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, @@ -125,6 +128,7 @@ async fn collected_metrics_native_json() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, @@ -155,6 +159,7 @@ async fn collected_trace_native_json() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, @@ -180,6 +185,7 @@ async fn unauthorized_no_auth() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, @@ -199,6 +205,7 @@ async fn unauthorized_wrong_auth() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: Some(Auth::Basic { user: "white_rabbit".to_string(), @@ -221,6 +228,7 @@ async fn authorized() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: Some(Auth::Basic { user: "user".to_string(), @@ -243,6 +251,7 @@ async fn tls_invalid_ca() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: Some(TlsConfig { ca_file: Some("tests/integration/http-client/data/certs/invalid-ca-cert.pem".into()), ..Default::default() @@ -265,6 +274,7 @@ async fn tls_valid() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: Some(TlsConfig { ca_file: Some(tls::TEST_PEM_CA_PATH.into()), ..Default::default() @@ -288,6 +298,7 @@ async fn shutdown() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: None, log_namespace: None, diff --git a/src/sources/http_client/tests.rs b/src/sources/http_client/tests.rs index 1c917bcac6357..c5067c14959cc 100644 --- a/src/sources/http_client/tests.rs +++ b/src/sources/http_client/tests.rs @@ -99,6 +99,7 @@ async fn bytes_decoding() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: None, log_namespace: None, @@ -128,6 +129,7 @@ async fn json_decoding_newline_delimited() { framing: FramingConfig::NewlineDelimited(Default::default()), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: None, log_namespace: None, @@ -162,6 +164,7 @@ async fn json_decoding_character_delimited() { }), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: None, log_namespace: None, @@ -202,6 +205,7 @@ async fn request_query_applied() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: None, log_namespace: None, @@ -313,6 +317,7 @@ async fn request_query_vrl_applied() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: None, log_namespace: None, @@ -394,6 +399,7 @@ async fn request_query_vrl_dynamic_updates() { framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, + body: None, tls: None, auth: None, log_namespace: None, @@ -461,6 +467,7 @@ async fn headers_applied() { vec!["bazz".to_string(), "bizz".to_string()], )]), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, @@ -490,9 +497,256 @@ async fn accept_header_override() { framing: default_framing_message_based(), headers: HashMap::from([("ACCEPT".to_string(), vec!["application/json".to_string()])]), method: HttpMethod::Get, + body: None, auth: None, tls: None, log_namespace: None, }) .await; } + +/// POST request with JSON body data should send the body correctly +#[tokio::test] +async fn post_with_body() { + let (_guard, in_addr) = next_addr(); + + // Endpoint that echoes back the request body + let dummy_endpoint = warp::path!("endpoint") + .and(warp::post()) + .and(warp::header::exact("Content-Type", "application/json")) + .and(warp::body::bytes()) + .map(|body: bytes::Bytes| { + // Echo the body back as a string + String::from_utf8_lossy(&body).to_string() + }); + + tokio::spawn(warp::serve(dummy_endpoint).run(in_addr)); + wait_for_tcp(in_addr).await; + + let test_json = r#"{"key":"value","number":42}"#; + + let events = run_compliance(HttpClientConfig { + endpoint: format!("http://{in_addr}/endpoint"), + interval: INTERVAL, + timeout: TIMEOUT, + query: HashMap::new(), + decoding: DeserializerConfig::Json(Default::default()), + framing: default_framing_message_based(), + headers: HashMap::new(), + method: HttpMethod::Post, + body: Some(ParameterValue::String(test_json.to_string())), + tls: None, + auth: None, + log_namespace: None, + }) + .await; + + let logs: Vec<_> = events.into_iter().map(|event| event.into_log()).collect(); + + // Verify the body was echoed back correctly + for log in logs { + assert_eq!(log.get("key").unwrap().as_str().unwrap(), "value"); + let number = log.get("number").unwrap(); + match number { + vector_lib::event::Value::Integer(n) => assert_eq!(*n, 42), + _ => panic!("Expected integer value"), + } + } +} + +/// POST request without body should work as before +#[tokio::test] +async fn post_without_body() { + let (_guard, in_addr) = next_addr(); + + let dummy_endpoint = warp::path!("endpoint") + .and(warp::post()) + .map(|| r#"{"data": "success"}"#); + + tokio::spawn(warp::serve(dummy_endpoint).run(in_addr)); + wait_for_tcp(in_addr).await; + + run_compliance(HttpClientConfig { + endpoint: format!("http://{in_addr}/endpoint"), + interval: INTERVAL, + timeout: TIMEOUT, + query: HashMap::new(), + decoding: DeserializerConfig::Json(Default::default()), + framing: default_framing_message_based(), + headers: HashMap::new(), + method: HttpMethod::Post, + body: None, + tls: None, + auth: None, + log_namespace: None, + }) + .await; +} + +/// Custom Content-Type header should override the default +#[tokio::test] +async fn post_with_custom_content_type() { + let (_guard, in_addr) = next_addr(); + + let dummy_endpoint = warp::path!("endpoint") + .and(warp::post()) + .and(warp::header::exact("Content-Type", "text/plain")) + .map(|| r#"{"data": "success"}"#); + + tokio::spawn(warp::serve(dummy_endpoint).run(in_addr)); + wait_for_tcp(in_addr).await; + + run_compliance(HttpClientConfig { + endpoint: format!("http://{in_addr}/endpoint"), + interval: INTERVAL, + timeout: TIMEOUT, + query: HashMap::new(), + decoding: DeserializerConfig::Json(Default::default()), + framing: default_framing_message_based(), + headers: HashMap::from([("Content-Type".to_string(), vec!["text/plain".to_string()])]), + method: HttpMethod::Post, + body: Some(ParameterValue::String("plain text body".to_string())), + tls: None, + auth: None, + log_namespace: None, + }) + .await; +} + +/// POST request with VRL body should resolve correctly +#[tokio::test] +async fn post_with_vrl_body() { + let (_guard, in_addr) = next_addr(); + + let dummy_endpoint = warp::path!("endpoint") + .and(warp::post()) + .and(warp::header::exact("Content-Type", "application/json")) + .and(warp::body::bytes()) + .map(|body: bytes::Bytes| { + // Echo back the body as a string + String::from_utf8_lossy(&body).to_string() + }); + + tokio::spawn(warp::serve(dummy_endpoint).run(in_addr)); + wait_for_tcp(in_addr).await; + + let events = run_compliance(HttpClientConfig { + endpoint: format!("http://{in_addr}/endpoint"), + interval: INTERVAL, + timeout: TIMEOUT, + query: HashMap::new(), + decoding: DeserializerConfig::Json(Default::default()), + framing: default_framing_message_based(), + headers: HashMap::new(), + method: HttpMethod::Post, + body: Some(ParameterValue::Typed { + value: r#"encode_json({"message": upcase("hello"), "value": 42})"#.to_string(), + r#type: ParamType::Vrl, + }), + tls: None, + auth: None, + log_namespace: None, + }) + .await; + + let logs: Vec<_> = events.into_iter().map(|event| event.into_log()).collect(); + + // Verify VRL was evaluated correctly + for log in logs { + assert_eq!(log.get("message").unwrap().as_str().unwrap(), "HELLO"); + let value = log.get("value").unwrap(); + match value { + vector_lib::event::Value::Integer(n) => assert_eq!(*n, 42), + _ => panic!("Expected integer value"), + } + } +} + +/// VRL compilation errors in query parameters should fail the build +#[tokio::test] +async fn query_vrl_compilation_error() { + use crate::config::SourceConfig; + use vector_lib::source_sender::SourceSender; + + let config = HttpClientConfig { + endpoint: "http://localhost:9999/endpoint".to_string(), + interval: INTERVAL, + timeout: TIMEOUT, + query: HashMap::from([( + "bad_vrl".to_string(), + QueryParameterValue::SingleParam(ParameterValue::Typed { + value: "this_function_does_not_exist()".to_string(), + r#type: ParamType::Vrl, + }), + )]), + decoding: DeserializerConfig::Json(Default::default()), + framing: default_framing_message_based(), + headers: HashMap::new(), + method: HttpMethod::Get, + body: None, + tls: None, + auth: None, + log_namespace: None, + }; + + // Attempt to build the source - should fail + let (tx, _rx) = SourceSender::new_test(); + let cx = crate::config::SourceContext::new_test(tx, None); + let result = config.build(cx).await; + + // Verify it fails with a VRL compilation error + match result { + Err(err) => { + let err_msg = err.to_string(); + assert!( + err_msg.contains("VRL compilation failed"), + "Expected VRL compilation error, got: {}", + err_msg + ); + } + Ok(_) => panic!("Expected build to fail with VRL compilation error, but it succeeded"), + } +} + +/// VRL compilation errors in request body should fail the build +#[tokio::test] +async fn body_vrl_compilation_error() { + use crate::config::SourceConfig; + use vector_lib::source_sender::SourceSender; + + let config = HttpClientConfig { + endpoint: "http://localhost:9999/endpoint".to_string(), + interval: INTERVAL, + timeout: TIMEOUT, + query: HashMap::new(), + decoding: DeserializerConfig::Json(Default::default()), + framing: default_framing_message_based(), + headers: HashMap::new(), + method: HttpMethod::Post, + body: Some(ParameterValue::Typed { + value: "invalid_vrl_syntax((".to_string(), + r#type: ParamType::Vrl, + }), + tls: None, + auth: None, + log_namespace: None, + }; + + // Attempt to build the source - should fail + let (tx, _rx) = SourceSender::new_test(); + let cx = crate::config::SourceContext::new_test(tx, None); + let result = config.build(cx).await; + + // Verify it fails with a VRL compilation error + match result { + Err(err) => { + let err_msg = err.to_string(); + assert!( + err_msg.contains("VRL compilation failed"), + "Expected VRL compilation error, got: {}", + err_msg + ); + } + Ok(_) => panic!("Expected build to fail with VRL compilation error, but it succeeded"), + } +} diff --git a/src/sources/util/http_client.rs b/src/sources/util/http_client.rs index 4ff9eb911e433..4c302ef049c4d 100644 --- a/src/sources/util/http_client.rs +++ b/src/sources/util/http_client.rs @@ -86,6 +86,12 @@ pub(crate) trait HttpClientContext { None } + /// (Optional) Get the request body to send with the HTTP request. + /// Returns the body as a String if one should be sent, or None for an empty body. + fn get_request_body(&self) -> Option { + None + } + // This function can be defined to enrich events with additional HTTP // metadata. This function should be used rather than internal enrichment so // that accurate byte count metrics can be emitted. @@ -190,8 +196,23 @@ pub(crate) async fn call< builder = builder.header(http::header::ACCEPT, &inputs.content_type); } - // building an empty request should be infallible - let mut request = builder.body(Body::empty()).expect("error creating request"); + // Get the request body from the context (if any) + let body = match context.get_request_body() { + Some(body_str) => { + // Set Content-Type header if not already set + if !inputs + .headers + .contains_key(http::header::CONTENT_TYPE.as_str()) + { + builder = builder.header(http::header::CONTENT_TYPE, "application/json"); + } + Body::from(body_str) + } + None => Body::empty(), + }; + + // building the request should be infallible + let mut request = builder.body(body).expect("error creating request"); if let Some(auth) = &inputs.auth { auth.apply(&mut request); diff --git a/website/cue/reference/components/sinks/generated/elasticsearch.cue b/website/cue/reference/components/sinks/generated/elasticsearch.cue index 7535f22981637..b2659900df149 100644 --- a/website/cue/reference/components/sinks/generated/elasticsearch.cue +++ b/website/cue/reference/components/sinks/generated/elasticsearch.cue @@ -623,13 +623,13 @@ generated: components: sinks: elasticsearch: configuration: { type: { object: options: { type: { - description: "The type of the parameter, indicating how the `value` should be treated." + description: "The parameter type, indicating how the `value` should be treated." required: false type: string: { default: "string" enum: { string: "The parameter value is a plain string." - vrl: "The parameter value is a VRL expression that will be evaluated before each request." + vrl: "The parameter value is a VRL expression that is evaluated before each request." } } } diff --git a/website/cue/reference/components/sources/generated/http_client.cue b/website/cue/reference/components/sources/generated/http_client.cue index 4034101bf6958..6631650e47fb5 100644 --- a/website/cue/reference/components/sources/generated/http_client.cue +++ b/website/cue/reference/components/sources/generated/http_client.cue @@ -180,6 +180,38 @@ generated: components: sources: http_client: configuration: { } } } + body: { + description: """ + Raw data to send as the HTTP request body. + + Can be a static string or a VRL expression. + + When a body is provided, the `Content-Type` header is automatically set to + `application/json` unless explicitly overridden in the `headers` configuration. + """ + required: false + type: { + object: options: { + type: { + description: "The parameter type, indicating how the `value` should be treated." + required: false + type: string: { + default: "string" + enum: { + string: "The parameter value is a plain string." + vrl: "The parameter value is a VRL expression that is evaluated before each request." + } + } + } + value: { + description: "The raw value of the parameter." + required: true + type: string: {} + } + } + string: {} + } + } decoding: { description: """ Configures how events are decoded from raw bytes. Note some decoders can also determine the event output @@ -750,13 +782,13 @@ generated: components: sources: http_client: configuration: { type: { object: options: { type: { - description: "The type of the parameter, indicating how the `value` should be treated." + description: "The parameter type, indicating how the `value` should be treated." required: false type: string: { default: "string" enum: { string: "The parameter value is a plain string." - vrl: "The parameter value is a VRL expression that will be evaluated before each request." + vrl: "The parameter value is a VRL expression that is evaluated before each request." } } } diff --git a/website/cue/reference/components/sources/generated/prometheus_scrape.cue b/website/cue/reference/components/sources/generated/prometheus_scrape.cue index 897949bab1ef4..e6685e12af0e5 100644 --- a/website/cue/reference/components/sources/generated/prometheus_scrape.cue +++ b/website/cue/reference/components/sources/generated/prometheus_scrape.cue @@ -239,13 +239,13 @@ generated: components: sources: prometheus_scrape: configuration: { type: { object: options: { type: { - description: "The type of the parameter, indicating how the `value` should be treated." + description: "The parameter type, indicating how the `value` should be treated." required: false type: string: { default: "string" enum: { string: "The parameter value is a plain string." - vrl: "The parameter value is a VRL expression that will be evaluated before each request." + vrl: "The parameter value is a VRL expression that is evaluated before each request." } } } diff --git a/website/cue/reference/components/sources/http_client.cue b/website/cue/reference/components/sources/http_client.cue index f92569e485555..ed3b1937fe629 100644 --- a/website/cue/reference/components/sources/http_client.cue +++ b/website/cue/reference/components/sources/http_client.cue @@ -158,6 +158,39 @@ components: sources: http_client: { - mango - papaya - kiwi + start_time: + type: vrl + value: "now()" + ``` + """ + } + request_body_generation: { + title: "Request Body Generation" + body: """ + The request body can be a static string or a dynamic value generated via VRL. + Using VRL allows you to construct JSON payloads or other formats dynamically at request time. + + When a body is provided, the `Content-Type` header is automatically set to + `application/json` unless explicitly overridden in the `headers` configuration. + + **Static Body** + + ```yaml + body: "{"foo": "bar"}" + ``` + + **Dynamic VRL Body** + + When `type` is set to `vrl`, the `value` is evaluated as a VRL expression. The result is used as the request body. + + ```yaml + body: + type: vrl + value: | + encode_json({ + "searchStatements": [{"column": "auditAction", "operator": "=", "value": "DELETE"}], + "timestamp": now() + }) ``` """ } From 3e5d0acfd71e096e156a72ca1f6977904cf28cf9 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 15 Dec 2025 16:45:58 -0500 Subject: [PATCH 153/227] fix(dev): fix unused function warning with websocket source/sink (#24383) --- src/common/websocket.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/common/websocket.rs b/src/common/websocket.rs index dab85c0a922be..9e9e2091190d7 100644 --- a/src/common/websocket.rs +++ b/src/common/websocket.rs @@ -121,6 +121,7 @@ impl WebSocketConnector { Ok(ws_stream) } + #[cfg(feature = "sinks-websocket")] pub(crate) async fn connect_backoff(&self) -> WebSocketStream> { let mut backoff = ExponentialBackoff::default(); @@ -142,6 +143,7 @@ impl WebSocketConnector { /// Connects with exponential backoff, applying a timeout to each individual connection attempt. /// This will retry forever until a connection is established. + #[cfg(feature = "sources-websocket")] pub(crate) async fn connect_backoff_with_timeout( &self, timeout_duration: Duration, From 75dda72c84144d377cdf11610856fd1b0c4285f2 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 15 Dec 2025 17:40:17 -0500 Subject: [PATCH 154/227] chore(ci): skip mold/changes when downloading only vdev (#24385) perf(ci): skip mold/changes installation when downloading only vdev --- .github/workflows/changes.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index e3315c657198b..dd765e975dfc3 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -328,6 +328,8 @@ jobs: - uses: ./.github/actions/setup with: vdev: true + mold: false + cargo-cache: false # creates a yaml file that contains the filters for each integration, # extracted from the output of the `vdev int ci-paths` command, which @@ -417,6 +419,8 @@ jobs: - uses: ./.github/actions/setup with: vdev: true + mold: false + cargo-cache: false # creates a yaml file that contains the filters for each test, # extracted from the output of the `vdev int ci-paths` command, which From d5dbab97ba2ba4279b361aa8ec63cd698729ec0b Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 16 Dec 2025 13:24:18 -0500 Subject: [PATCH 155/227] chore(releasing): v0.52.0 (#24388) * chore(releasing): prepare v0.52.0 release (#24367) * chore(releasing): Pinned VRL version to 0.29.0 * chore(releasing): Generated release CUE file * chore(releasing): Updated website/cue/reference/administration/interfaces/kubectl.cue vector version to 0.52.0 * chore(releasing): Updated distribution/install.sh vector version to 0.52.0 * chore(releasing): Add 0.52.0 to versions.cue * chore(releasing): Created release md file * Fix date * Fix whitespace * Add release highlights * Add http_client changes + missing shas to the changelog * fix(website): Fix indentation in 0.52.0 changelog * cargo vdev build manifests * Bump version in Cargo.toml * Use VRL main branch --- Cargo.lock | 4 +- Cargo.toml | 2 +- .../14758_http_client_body.enhancement.md | 3 - .../15687_syslog_udp_received_metrics.fix.md | 9 - changelog.d/18068_journalctl_all_boots.fix.md | 9 - changelog.d/21037_amqp_prefetch.feature.md | 3 - .../23146_tls_fips_compliance.enhancement.md | 5 - ...dation_option_to_gelf_codec.enhancement.md | 10 - ...ws_cloudwatch_metric_resolution.feature.md | 3 - .../23922_websocket_source_reconnect.fix.md | 3 - ...27_file_source_multi_char_delimiter.fix.md | 3 - .../24063_retry_docker_logs_client.feature.md | 3 - .../24074_arrow_batch_codec.feature.md | 6 - .../24074_arrow_null_handling.enhancement.md | 6 - .../24081_memory_tables_in_tests.fix.md | 3 - changelog.d/24271_mongodb_upgrade.breaking.md | 3 - ...281_blackhole_sink_acknowledgements.fix.md | 3 - .../add-trace-to-log-transform.feature.md | 3 - ...d_custom_auth_strategy_http.enhancement.md | 3 - ...able_interpolate_env_var_switch.feature.md | 3 - ...s_s3_processing_duration_metric.feature.md | 3 - changelog.d/axiom_regional_edges.feature.md | 3 - .../datadog-agent-timeout.enhancement.md | 7 - .../http_client_vrl_compilation_errors.fix.md | 5 - ...e-sender-utilization-metric.enhancement.md | 9 - ...m-buffer-utilization-metric.enhancement.md | 8 - distribution/install.sh | 2 +- .../kubernetes/vector-agent/README.md | 2 +- .../kubernetes/vector-agent/configmap.yaml | 2 +- .../kubernetes/vector-agent/daemonset.yaml | 4 +- .../kubernetes/vector-agent/rbac.yaml | 4 +- .../vector-agent/service-headless.yaml | 2 +- .../vector-agent/serviceaccount.yaml | 2 +- .../kubernetes/vector-aggregator/README.md | 2 +- .../vector-aggregator/configmap.yaml | 2 +- .../vector-aggregator/service-headless.yaml | 2 +- .../kubernetes/vector-aggregator/service.yaml | 2 +- .../vector-aggregator/serviceaccount.yaml | 2 +- .../vector-aggregator/statefulset.yaml | 4 +- .../vector-stateless-aggregator/README.md | 2 +- .../configmap.yaml | 2 +- .../deployment.yaml | 4 +- .../service-headless.yaml | 2 +- .../vector-stateless-aggregator/service.yaml | 2 +- .../serviceaccount.yaml | 2 +- website/content/en/releases/0.52.0.md | 4 + .../administration/interfaces/kubectl.cue | 2 +- website/cue/reference/releases/0.52.0.cue | 372 ++++++++++++++++++ website/cue/reference/urls.cue | 1 + website/cue/reference/versions.cue | 1 + 50 files changed, 405 insertions(+), 143 deletions(-) delete mode 100644 changelog.d/14758_http_client_body.enhancement.md delete mode 100644 changelog.d/15687_syslog_udp_received_metrics.fix.md delete mode 100644 changelog.d/18068_journalctl_all_boots.fix.md delete mode 100644 changelog.d/21037_amqp_prefetch.feature.md delete mode 100644 changelog.d/23146_tls_fips_compliance.enhancement.md delete mode 100644 changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md delete mode 100644 changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md delete mode 100644 changelog.d/23922_websocket_source_reconnect.fix.md delete mode 100644 changelog.d/24027_file_source_multi_char_delimiter.fix.md delete mode 100644 changelog.d/24063_retry_docker_logs_client.feature.md delete mode 100644 changelog.d/24074_arrow_batch_codec.feature.md delete mode 100644 changelog.d/24074_arrow_null_handling.enhancement.md delete mode 100644 changelog.d/24081_memory_tables_in_tests.fix.md delete mode 100644 changelog.d/24271_mongodb_upgrade.breaking.md delete mode 100644 changelog.d/24281_blackhole_sink_acknowledgements.fix.md delete mode 100644 changelog.d/add-trace-to-log-transform.feature.md delete mode 100644 changelog.d/add_custom_auth_strategy_http.enhancement.md delete mode 100644 changelog.d/add_disable_interpolate_env_var_switch.feature.md delete mode 100644 changelog.d/aws_s3_processing_duration_metric.feature.md delete mode 100644 changelog.d/axiom_regional_edges.feature.md delete mode 100644 changelog.d/datadog-agent-timeout.enhancement.md delete mode 100644 changelog.d/http_client_vrl_compilation_errors.fix.md delete mode 100644 changelog.d/source-sender-utilization-metric.enhancement.md delete mode 100644 changelog.d/transform-buffer-utilization-metric.enhancement.md create mode 100644 website/content/en/releases/0.52.0.md create mode 100644 website/cue/reference/releases/0.52.0.cue diff --git a/Cargo.lock b/Cargo.lock index dce114e7f9db1..307a4bc90fa1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12508,7 +12508,7 @@ dependencies = [ [[package]] name = "vector" -version = "0.52.0" +version = "0.53.0" dependencies = [ "apache-avro 0.16.0", "approx", @@ -13106,7 +13106,7 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" version = "0.29.0" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#195f1ed6a334978a8b5d318030b606a902973351" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#53f01dfa6226fb3f4093d1bf838319c522f2dcad" dependencies = [ "aes", "aes-siv", diff --git a/Cargo.toml b/Cargo.toml index fa99780782eb1..c49d49d589636 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vector" -version = "0.52.0" +version = "0.53.0" authors = ["Vector Contributors "] edition = "2024" description = "A lightweight and ultra-fast tool for building observability pipelines" diff --git a/changelog.d/14758_http_client_body.enhancement.md b/changelog.d/14758_http_client_body.enhancement.md deleted file mode 100644 index 247842fec66c4..0000000000000 --- a/changelog.d/14758_http_client_body.enhancement.md +++ /dev/null @@ -1,3 +0,0 @@ -The `http_client` source now supports the `body` parameter. VRL is also supported in the body which allows a dynamic request body to be generated. - -authors: benjamin-awd diff --git a/changelog.d/15687_syslog_udp_received_metrics.fix.md b/changelog.d/15687_syslog_udp_received_metrics.fix.md deleted file mode 100644 index cd4805bc0675e..0000000000000 --- a/changelog.d/15687_syslog_udp_received_metrics.fix.md +++ /dev/null @@ -1,9 +0,0 @@ -The `syslog` source in UDP mode now emits the standard "received" metrics, aligning behavior with TCP and the Component Specification: - -- `component_received_events_total` -- `component_received_event_bytes_total` -- `component_received_bytes_total` - -This makes internal telemetry consistent and restores compliance checks for UDP syslog. - -authors: sghall diff --git a/changelog.d/18068_journalctl_all_boots.fix.md b/changelog.d/18068_journalctl_all_boots.fix.md deleted file mode 100644 index a4a0fb1f0b3c8..0000000000000 --- a/changelog.d/18068_journalctl_all_boots.fix.md +++ /dev/null @@ -1,9 +0,0 @@ -The `journald` source now correctly respects the `current_boot_only: false` setting on systemd versions >= 258. - -Compatibility notes: - -- **systemd < 250**: Both `current_boot_only: true` and `false` work correctly -- **systemd 250-257**: Due to systemd limitations, `current_boot_only: false` will not work. An error will be raised on startup. -- **systemd >= 258**: Both settings work correctly - -authors: bachorp diff --git a/changelog.d/21037_amqp_prefetch.feature.md b/changelog.d/21037_amqp_prefetch.feature.md deleted file mode 100644 index 359bc56eab7e5..0000000000000 --- a/changelog.d/21037_amqp_prefetch.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Added a new `prefetch_count` option to the AMQP source configuration. This allows limiting the number of in-flight (unacknowledged) messages per consumer using RabbitMQ's prefetch mechanism (`basic.qos`). Setting this value helps control memory usage and load when processing messages slowly. - -authors: elkh510 diff --git a/changelog.d/23146_tls_fips_compliance.enhancement.md b/changelog.d/23146_tls_fips_compliance.enhancement.md deleted file mode 100644 index da26a5cd67e09..0000000000000 --- a/changelog.d/23146_tls_fips_compliance.enhancement.md +++ /dev/null @@ -1,5 +0,0 @@ -Vector's TLS implementation now stores credentials in PEM format internally instead of PKCS12, enabling FIPS-compliant operation in -environments with strict cryptographic requirements. This change is transparent to users - both PEM and PKCS12 certificate files continue to -be supported as configuration inputs, with PKCS12 files automatically converted at load time. - -authors: rf-ben diff --git a/changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md b/changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md deleted file mode 100644 index 169a703c32d15..0000000000000 --- a/changelog.d/23458_add_validation_option_to_gelf_codec.enhancement.md +++ /dev/null @@ -1,10 +0,0 @@ -The GELF decoder now supports a `validation` option with two modes: `strict` (default) and `relaxed`. When set to `relaxed`, the decoder will accept: - -- GELF versions other than 1.1 -- Additional fields without underscore prefixes -- Additional field names with special characters -- Additional field values of any type (not just strings/numbers) - -This allows Vector to parse GELF messages from sources that don't strictly follow the GELF specification. - -authors: ds-hystax diff --git a/changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md b/changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md deleted file mode 100644 index a53f8095053b4..0000000000000 --- a/changelog.d/23821_aws_cloudwatch_metric_resolution.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Add AWS CloudWatch Metrics sink `storage_resolution` config. - -authors: trxcllnt diff --git a/changelog.d/23922_websocket_source_reconnect.fix.md b/changelog.d/23922_websocket_source_reconnect.fix.md deleted file mode 100644 index a70b859079d02..0000000000000 --- a/changelog.d/23922_websocket_source_reconnect.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed the `websocket` source entering a "zombie" state when the `connect_timeout_secs` threshold was reached with multiple sources running. The connection timeout is now applied per connect attempt with indefinite retries, rather than as a total timeout limit. - -authors: benjamin-awd diff --git a/changelog.d/24027_file_source_multi_char_delimiter.fix.md b/changelog.d/24027_file_source_multi_char_delimiter.fix.md deleted file mode 100644 index bd94e17d6355e..0000000000000 --- a/changelog.d/24027_file_source_multi_char_delimiter.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed a bug in the `file` source, which could silently corrupt data when using multi-char delimiters. - -authors: lfrancke diff --git a/changelog.d/24063_retry_docker_logs_client.feature.md b/changelog.d/24063_retry_docker_logs_client.feature.md deleted file mode 100644 index 6b61f1e9e1fb0..0000000000000 --- a/changelog.d/24063_retry_docker_logs_client.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -The `docker_logs` source now includes exponential backoff retry logic for Docker daemon communication failures, with indefinite retry capability. This improves reliability when working with slow or temporarily unresponsive Docker daemons by retrying with increasing delays instead of immediately stopping. - -authors: titaneric diff --git a/changelog.d/24074_arrow_batch_codec.feature.md b/changelog.d/24074_arrow_batch_codec.feature.md deleted file mode 100644 index f9624481b2947..0000000000000 --- a/changelog.d/24074_arrow_batch_codec.feature.md +++ /dev/null @@ -1,6 +0,0 @@ -A generic [Apache Arrow](https://arrow.apache.org/) codec has been added to -support [Arrow IPC](https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format) serialization across Vector. This enables sinks -like the `clickhouse` sink to use the ArrowStream format endpoint with significantly better performance and smaller payload sizes compared -to JSON-based formats. - -authors: benjamin-awd diff --git a/changelog.d/24074_arrow_null_handling.enhancement.md b/changelog.d/24074_arrow_null_handling.enhancement.md deleted file mode 100644 index 3245272e042e1..0000000000000 --- a/changelog.d/24074_arrow_null_handling.enhancement.md +++ /dev/null @@ -1,6 +0,0 @@ -The Arrow encoder now supports configurable null handling through the `allow_nullable_fields` -option. This controls whether nullable fields should be explicitly marked -as nullable in the Arrow schema, enabling better compatibility with -downstream systems that have specific requirements for null handling. - -authors: benjamin-awd diff --git a/changelog.d/24081_memory_tables_in_tests.fix.md b/changelog.d/24081_memory_tables_in_tests.fix.md deleted file mode 100644 index 0a1d5195c047a..0000000000000 --- a/changelog.d/24081_memory_tables_in_tests.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed an issue in vector tests where memory enrichment tables would report missing components errors. - -authors: esensar Quad9DNS diff --git a/changelog.d/24271_mongodb_upgrade.breaking.md b/changelog.d/24271_mongodb_upgrade.breaking.md deleted file mode 100644 index 626df09e468b3..0000000000000 --- a/changelog.d/24271_mongodb_upgrade.breaking.md +++ /dev/null @@ -1,3 +0,0 @@ -The `mongodb_metrics` source now requires MongoDB Server 4.2 or later. MongoDB Server 4.0, the previously supported minimum version, reached end-of-life on April 30, 2022. - -authors: thomasqueirozb diff --git a/changelog.d/24281_blackhole_sink_acknowledgements.fix.md b/changelog.d/24281_blackhole_sink_acknowledgements.fix.md deleted file mode 100644 index d6b5c58b9cf1c..0000000000000 --- a/changelog.d/24281_blackhole_sink_acknowledgements.fix.md +++ /dev/null @@ -1,3 +0,0 @@ -Fixed the blackhole sink to properly implement end-to-end acknowledgements. Previously, the sink consumed events without updating finalizer status, causing sources that depend on acknowledgements (like `aws_s3` with SQS) to never delete processed messages from the queue. - -authors: sanjams2 diff --git a/changelog.d/add-trace-to-log-transform.feature.md b/changelog.d/add-trace-to-log-transform.feature.md deleted file mode 100644 index 161fea308624a..0000000000000 --- a/changelog.d/add-trace-to-log-transform.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Introduced `trace_to_log` transform that allows converting traces to logs. - -authors: huevosabio diff --git a/changelog.d/add_custom_auth_strategy_http.enhancement.md b/changelog.d/add_custom_auth_strategy_http.enhancement.md deleted file mode 100644 index f5cc36fcadd78..0000000000000 --- a/changelog.d/add_custom_auth_strategy_http.enhancement.md +++ /dev/null @@ -1,3 +0,0 @@ -Added a new "Custom Authorization" HTTP auth strategy, allowing users to configure a custom HTTP Authorization Header - -authors: arunpidugu diff --git a/changelog.d/add_disable_interpolate_env_var_switch.feature.md b/changelog.d/add_disable_interpolate_env_var_switch.feature.md deleted file mode 100644 index ad7bb3eb9d86b..0000000000000 --- a/changelog.d/add_disable_interpolate_env_var_switch.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -Added `--disable-env-var-interpolation` CLI option to prevent environment variable interpolation. The `VECTOR_DISABLE_ENV_VAR_INTERPOLATION` environment variable can also be used to disable interpolation. - -authors: graphcareful diff --git a/changelog.d/aws_s3_processing_duration_metric.feature.md b/changelog.d/aws_s3_processing_duration_metric.feature.md deleted file mode 100644 index 43a4c9bbe57ba..0000000000000 --- a/changelog.d/aws_s3_processing_duration_metric.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -The `aws_s3` source now emits histogram metrics to track S3 object processing times: `s3_object_processing_succeeded_duration_seconds` for successful processing and `s3_object_processing_failed_duration_seconds` for failed processing. These measure the full processing pipeline including download, decompression, and parsing. Both metrics include a `bucket` label to help identify slow buckets. - -authors: sanjams2 diff --git a/changelog.d/axiom_regional_edges.feature.md b/changelog.d/axiom_regional_edges.feature.md deleted file mode 100644 index 8cda67ffdf8d8..0000000000000 --- a/changelog.d/axiom_regional_edges.feature.md +++ /dev/null @@ -1,3 +0,0 @@ -The `axiom` sink now supports regional edges for data locality. A new optional `region` configuration field allows you to specify the regional edge domain (e.g., `eu-central-1.aws.edge.axiom.co`). When configured, data is sent to `https://{region}/v1/ingest/{dataset}`. The `url` field now intelligently handles paths: URLs with custom paths are used as-is, while URLs without paths maintain backwards compatibility by appending `/v1/datasets/{dataset}/ingest`. - -authors: toppercodes diff --git a/changelog.d/datadog-agent-timeout.enhancement.md b/changelog.d/datadog-agent-timeout.enhancement.md deleted file mode 100644 index a93475c1840f9..0000000000000 --- a/changelog.d/datadog-agent-timeout.enhancement.md +++ /dev/null @@ -1,7 +0,0 @@ -Added support for configurable request timeouts to the `datadog_agent` source. - - This change also introduces two new internal metrics: - - `component_timed_out_events_total` - Counter tracking the number of events that timed out - - `component_timed_out_requests_total` - Counter tracking the number of requests that timed out - -authors: bruceg diff --git a/changelog.d/http_client_vrl_compilation_errors.fix.md b/changelog.d/http_client_vrl_compilation_errors.fix.md deleted file mode 100644 index 45a9aaf2063bc..0000000000000 --- a/changelog.d/http_client_vrl_compilation_errors.fix.md +++ /dev/null @@ -1,5 +0,0 @@ -The `http_client` source now fails to start if VRL compilation errors occur in `query` parameters when -type is set to `vrl`, instead of silently logging a warning and continuing with invalid expressions. -This prevents unexpected behavior where malformed VRL would be sent as literal strings in HTTP requests. - -authors: thomasqueirozb diff --git a/changelog.d/source-sender-utilization-metric.enhancement.md b/changelog.d/source-sender-utilization-metric.enhancement.md deleted file mode 100644 index 8ad76d9c1af8a..0000000000000 --- a/changelog.d/source-sender-utilization-metric.enhancement.md +++ /dev/null @@ -1,9 +0,0 @@ -Added the following metrics to record the utilization level of the buffer that -all sources send into: - -- `source_buffer_max_byte_size` -- `source_buffer_max_event_size` -- `source_buffer_utilization` -- `source_buffer_utilization_level` - -authors: bruceg diff --git a/changelog.d/transform-buffer-utilization-metric.enhancement.md b/changelog.d/transform-buffer-utilization-metric.enhancement.md deleted file mode 100644 index 10242092c8956..0000000000000 --- a/changelog.d/transform-buffer-utilization-metric.enhancement.md +++ /dev/null @@ -1,8 +0,0 @@ -Added metrics to record the utilization level of the buffers that each transform receives from: - -- `transform_buffer_max_byte_size` -- `transform_buffer_max_event_size` -- `transform_buffer_utilization` -- `transform_buffer_utilization_level` - -authors: bruceg diff --git a/distribution/install.sh b/distribution/install.sh index 38d765d203645..306d840567a07 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -13,7 +13,7 @@ set -u # If PACKAGE_ROOT is unset or empty, default it. PACKAGE_ROOT="${PACKAGE_ROOT:-"https://packages.timber.io/vector"}" # If VECTOR_VERSION is unset or empty, default it. -VECTOR_VERSION="${VECTOR_VERSION:-"0.51.1"}" +VECTOR_VERSION="${VECTOR_VERSION:-"0.52.0"}" _divider="--------------------------------------------------------------------------------" _prompt=">>>" _indent=" " diff --git a/distribution/kubernetes/vector-agent/README.md b/distribution/kubernetes/vector-agent/README.md index fcb899a9d6e76..b2759c3e38b1a 100644 --- a/distribution/kubernetes/vector-agent/README.md +++ b/distribution/kubernetes/vector-agent/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.48.0 with the following `values.yaml`: +version 0.49.0 with the following `values.yaml`: ```yaml role: Agent diff --git a/distribution/kubernetes/vector-agent/configmap.yaml b/distribution/kubernetes/vector-agent/configmap.yaml index 366b0bab698f4..c10ce04727f3d 100644 --- a/distribution/kubernetes/vector-agent/configmap.yaml +++ b/distribution/kubernetes/vector-agent/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" data: agent.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-agent/daemonset.yaml b/distribution/kubernetes/vector-agent/daemonset.yaml index fee139123abd7..1f45a204e0458 100644 --- a/distribution/kubernetes/vector-agent/daemonset.yaml +++ b/distribution/kubernetes/vector-agent/daemonset.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" spec: selector: matchLabels: @@ -30,7 +30,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.51.1-distroless-libc" + image: "timberio/vector:0.52.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-agent/rbac.yaml b/distribution/kubernetes/vector-agent/rbac.yaml index 7da5ea476d22b..8b0c43b3e5a8c 100644 --- a/distribution/kubernetes/vector-agent/rbac.yaml +++ b/distribution/kubernetes/vector-agent/rbac.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" rules: - apiGroups: - "" @@ -31,7 +31,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/distribution/kubernetes/vector-agent/service-headless.yaml b/distribution/kubernetes/vector-agent/service-headless.yaml index 92b00baf8e3a2..88ffaa3c22b5a 100644 --- a/distribution/kubernetes/vector-agent/service-headless.yaml +++ b/distribution/kubernetes/vector-agent/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-agent/serviceaccount.yaml b/distribution/kubernetes/vector-agent/serviceaccount.yaml index 9aab203b7da64..003a3240da320 100644 --- a/distribution/kubernetes/vector-agent/serviceaccount.yaml +++ b/distribution/kubernetes/vector-agent/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/README.md b/distribution/kubernetes/vector-aggregator/README.md index 0b5ec7a5469b4..6075bd3950758 100644 --- a/distribution/kubernetes/vector-aggregator/README.md +++ b/distribution/kubernetes/vector-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.48.0 with the following `values.yaml`: +version 0.49.0 with the following `values.yaml`: ```yaml diff --git a/distribution/kubernetes/vector-aggregator/configmap.yaml b/distribution/kubernetes/vector-aggregator/configmap.yaml index f098ca2bc2959..43e9c317b80a5 100644 --- a/distribution/kubernetes/vector-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-aggregator/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-aggregator/service-headless.yaml b/distribution/kubernetes/vector-aggregator/service-headless.yaml index 748794086e3fe..52f2814796e1c 100644 --- a/distribution/kubernetes/vector-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-aggregator/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-aggregator/service.yaml b/distribution/kubernetes/vector-aggregator/service.yaml index fa7d5c3ac015e..2e82a0bb8b39f 100644 --- a/distribution/kubernetes/vector-aggregator/service.yaml +++ b/distribution/kubernetes/vector-aggregator/service.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml index 278df3aa34777..301067ec848c9 100644 --- a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/statefulset.yaml b/distribution/kubernetes/vector-aggregator/statefulset.yaml index 4d5bb24456c25..ba2b699e16560 100644 --- a/distribution/kubernetes/vector-aggregator/statefulset.yaml +++ b/distribution/kubernetes/vector-aggregator/statefulset.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -34,7 +34,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.51.1-distroless-libc" + image: "timberio/vector:0.52.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/README.md b/distribution/kubernetes/vector-stateless-aggregator/README.md index c792cdd1635bd..cf3c7e257b344 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/README.md +++ b/distribution/kubernetes/vector-stateless-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.48.0 with the following `values.yaml`: +version 0.49.0 with the following `values.yaml`: ```yaml role: Stateless-Aggregator diff --git a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml index 43e8934cf9ae7..acd3c2b1e004a 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml index 28f5e985de428..d84ffb3a86b96 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -32,7 +32,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.51.1-distroless-libc" + image: "timberio/vector:0.52.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml index 2ce0cb4ed13a7..394e19148bd72 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-stateless-aggregator/service.yaml b/distribution/kubernetes/vector-stateless-aggregator/service.yaml index c8cd858bdad46..145ea2c110df4 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml index fe7032a56dc76..0467ef9f9fda9 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml @@ -9,5 +9,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.51.1-distroless-libc" + app.kubernetes.io/version: "0.52.0-distroless-libc" automountServiceAccountToken: true diff --git a/website/content/en/releases/0.52.0.md b/website/content/en/releases/0.52.0.md new file mode 100644 index 0000000000000..2a6cdef348514 --- /dev/null +++ b/website/content/en/releases/0.52.0.md @@ -0,0 +1,4 @@ +--- +title: Vector v0.52.0 release notes +weight: 31 +--- diff --git a/website/cue/reference/administration/interfaces/kubectl.cue b/website/cue/reference/administration/interfaces/kubectl.cue index e10737bd72906..67bcbf6592591 100644 --- a/website/cue/reference/administration/interfaces/kubectl.cue +++ b/website/cue/reference/administration/interfaces/kubectl.cue @@ -19,7 +19,7 @@ administration: interfaces: kubectl: { role_implementations: [Name=string]: { commands: { _deployment_variant: string - _vector_version: "0.51" + _vector_version: "0.52" _namespace: string | *"vector" _controller_resource_type: string _controller_resource_name: string | *_deployment_variant diff --git a/website/cue/reference/releases/0.52.0.cue b/website/cue/reference/releases/0.52.0.cue new file mode 100644 index 0000000000000..9c9d0449a68fe --- /dev/null +++ b/website/cue/reference/releases/0.52.0.cue @@ -0,0 +1,372 @@ +package metadata + +releases: "0.52.0": { + date: "2025-12-16" + codename: "" + + whats_next: [] + + description: """ + The Vector team is excited to announce version `0.52.0`! + + ## Release highlights + + - Enhanced Vector's observability with new buffer utilization metrics for sources and + transforms + ([source_buffer_*](\(urls.vector_internal_metrics)/#source_buffer_max_byte_size) + and [transform_buffer_*](\(urls.vector_internal_metrics)/#transform_buffer_max_byte_size) metrics), providing visibility into + buffer capacity, usage and historical usage levels. + - Introduced `trace_to_log` transform that allows converting traces to logs. + - The blackhole sink now implements end-to-end acknowledgements. + - The GELF decoder now supports a `validation` option with two modes: `strict` (default) + and `relaxed`. When set to `relaxed`, the decoder will parse GELF messages from sources + that don't strictly follow the GELF specification. + - The `docker_logs` source now retries Docker daemon communication failures with exponential backoff. + + + ## Breaking Changes + + - The `mongodb_metrics` source now requires MongoDB Server 4.2 or later. MongoDB Server 4.0, the previously supported minimum version, reached end-of-life on April 30, 2022. + """ + + changelog: [ + { + type: "fix" + description: """ + The `syslog` source in UDP mode now emits the standard "received" metrics, aligning behavior with TCP and the Component Specification: + + - `component_received_events_total` + - `component_received_event_bytes_total` + - `component_received_bytes_total` + + This makes internal telemetry consistent and restores compliance checks for UDP syslog. + """ + contributors: ["sghall"] + }, + { + type: "fix" + description: """ + The `journald` source now correctly respects the `current_boot_only: false` setting on systemd versions >= 258. + + Compatibility notes: + + - **systemd < 250**: Both `current_boot_only: true` and `false` work correctly + - **systemd 250-257**: Due to systemd limitations, `current_boot_only: false` will not work. An error will be raised on startup. + - **systemd >= 258**: Both settings work correctly + """ + contributors: ["bachorp"] + }, + { + type: "feat" + description: """ + Added a new `prefetch_count` option to the AMQP source configuration. This allows limiting the number of in-flight (unacknowledged) messages per consumer using RabbitMQ's prefetch mechanism (`basic.qos`). Setting this value helps control memory usage and load when processing messages slowly. + """ + contributors: ["elkh510"] + }, + { + type: "enhancement" + description: """ + Vector's TLS implementation now stores credentials in PEM format internally instead of PKCS12, enabling FIPS-compliant operation in + environments with strict cryptographic requirements. This change is transparent to users - both PEM and PKCS12 certificate files continue to + be supported as configuration inputs, with PKCS12 files automatically converted at load time. + """ + contributors: ["rf-ben"] + }, + { + type: "enhancement" + description: """ + The `http_client` source now supports the `body` parameter. VRL is also supported in the body which allows a dynamic request body to be generated. + """ + contributors: ["benjamin-awd"] + }, + { + type: "enhancement" + description: """ + The GELF decoder now supports a `validation` option with two modes: `strict` (default) and `relaxed`. When set to `relaxed`, the decoder will accept: + + - GELF versions other than 1.1 + - Additional fields without underscore prefixes + - Additional field names with special characters + - Additional field values of any type (not just strings/numbers) + + This allows Vector to parse GELF messages from sources that don't strictly follow the GELF specification. + """ + contributors: ["ds-hystax"] + }, + { + type: "feat" + description: """ + Add AWS CloudWatch Metrics sink `storage_resolution` config. + """ + contributors: ["trxcllnt"] + }, + { + type: "fix" + description: """ + Fixed the `websocket` source entering a "zombie" state when the `connect_timeout_secs` threshold was reached with multiple sources running. The connection timeout is now applied per connect attempt with indefinite retries, rather than as a total timeout limit. + """ + contributors: ["benjamin-awd"] + }, + { + type: "fix" + description: """ + Fixed a bug in the `file` source, which could silently corrupt data when using multi-char delimiters. + """ + contributors: ["lfrancke"] + }, + { + type: "feat" + description: """ + The `docker_logs` source now includes exponential backoff retry logic for Docker daemon communication failures, with indefinite retry capability. This improves reliability when working with slow or temporarily unresponsive Docker daemons by retrying with increasing delays instead of immediately stopping. + """ + contributors: ["titaneric"] + }, + { + type: "feat" + description: """ + A generic [Apache Arrow](https://arrow.apache.org/) codec has been added to + support [Arrow IPC](https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format) serialization across Vector. This enables sinks + like the `clickhouse` sink to use the ArrowStream format endpoint with significantly better performance and smaller payload sizes compared + to JSON-based formats. + """ + contributors: ["benjamin-awd"] + }, + { + type: "enhancement" + description: """ + The Arrow encoder now supports configurable null handling through the `allow_nullable_fields` + option. This controls whether nullable fields should be explicitly marked + as nullable in the Arrow schema, enabling better compatibility with + downstream systems that have specific requirements for null handling. + """ + contributors: ["benjamin-awd"] + }, + { + type: "fix" + description: """ + Fixed an issue in vector tests where memory enrichment tables would report missing components errors. + """ + contributors: ["esensar", "Quad9DNS"] + }, + { + type: "chore" + description: """ + The `mongodb_metrics` source now requires MongoDB Server 4.2 or later. MongoDB Server 4.0, the previously supported minimum version, reached end-of-life on April 30, 2022. + """ + contributors: ["thomasqueirozb"] + }, + { + type: "fix" + description: """ + Fixed the blackhole sink to properly implement end-to-end acknowledgements. Previously, the sink consumed events without updating finalizer status, causing sources that depend on acknowledgements (like `aws_s3` with SQS) to never delete processed messages from the queue. + """ + contributors: ["sanjams2"] + }, + { + type: "feat" + description: """ + Introduced `trace_to_log` transform that allows converting traces to logs. + """ + contributors: ["huevosabio"] + }, + { + type: "enhancement" + description: """ + Added a new "Custom Authorization" HTTP auth strategy, allowing users to configure a custom HTTP Authorization Header + """ + contributors: ["arunpidugu"] + }, + { + type: "feat" + description: """ + Added `--disable-env-var-interpolation` CLI option to prevent environment variable interpolation. The `VECTOR_DISABLE_ENV_VAR_INTERPOLATION` environment variable can also be used to disable interpolation. + """ + contributors: ["graphcareful"] + }, + { + type: "feat" + description: """ + The `aws_s3` source now emits histogram metrics to track S3 object processing times: `s3_object_processing_succeeded_duration_seconds` for successful processing and `s3_object_processing_failed_duration_seconds` for failed processing. These measure the full processing pipeline including download, decompression, and parsing. Both metrics include a `bucket` label to help identify slow buckets. + """ + contributors: ["sanjams2"] + }, + { + type: "feat" + description: """ + The `axiom` sink now supports regional edges for data locality. A new optional `region` configuration field allows you to specify the regional edge domain (e.g., `eu-central-1.aws.edge.axiom.co`). When configured, data is sent to `https://{region}/v1/ingest/{dataset}`. The `url` field now intelligently handles paths: URLs with custom paths are used as-is, while URLs without paths maintain backwards compatibility by appending `/v1/datasets/{dataset}/ingest`. + """ + contributors: ["toppercodes"] + }, + { + type: "enhancement" + description: """ + Added support for configurable request timeouts to the `datadog_agent` source. + + This change also introduces two new internal metrics: + - `component_timed_out_events_total` - Counter tracking the number of events that timed out + - `component_timed_out_requests_total` - Counter tracking the number of requests that timed out + """ + contributors: ["bruceg"] + }, + { + type: "fix" + description: """ + The `http_client` source now fails to start if VRL compilation errors occur in `query` parameters when + type is set to `vrl`, instead of silently logging a warning and continuing with invalid expressions. + This prevents unexpected behavior where malformed VRL would be sent as literal strings in HTTP requests. + """ + contributors: ["thomasqueirozb"] + }, + { + type: "enhancement" + description: """ + Added the following metrics to record the utilization level of the buffer that + all sources send into: + + - `source_buffer_max_byte_size` + - `source_buffer_max_event_size` + - `source_buffer_utilization` + - `source_buffer_utilization_level` + """ + contributors: ["bruceg"] + }, + { + type: "enhancement" + description: """ + Added metrics to record the utilization level of the buffers that each transform receives from: + + - `transform_buffer_max_byte_size` + - `transform_buffer_max_event_size` + - `transform_buffer_utilization` + - `transform_buffer_utilization_level` + """ + contributors: ["bruceg"] + }, + ] + + vrl_changelog: """ + ### [0.29.0 (2025-12-11)] + + #### Breaking Changes & Upgrade Guide + + - Added required `line` and `file` fields to `vrl::compiler::function::Example`. Also added the + `example!` macro to automatically populate those fields. + + authors: thomasqueirozb (https://github.com/vectordotdev/vrl/pull/1557) + + #### Fixes + + - Fixed handling of OR conjunctions in the datadog search query parser (https://github.com/vectordotdev/vrl/pull/1542) + - Fixed a bug where VRL would crash if `merge` were called without a `to` argument. + + authors: thomasqueirozb (https://github.com/vectordotdev/vrl/pull/1563) + - Fixed a bug where a stack overflow would happen in validate_json_schema if the schema had an empty $ref. + + authors: jlambatl (https://github.com/vectordotdev/vrl/pull/1577) + + + ### [0.28.1 (2025-11-07)] + """ + + commits: [ + {sha: "6bf28dd5dbcfbd50a7cd5564eff592df860cfc80", date: "2025-11-04 01:33:39 UTC", description: "add serde, tokio, tracing patterns", pr_number: 24132, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 15, deletions_count: 3}, + {sha: "2ed1eb47e3eb40b9fd4a2cd7832c562b09c40bef", date: "2025-11-04 06:39:45 UTC", description: "fix gcp test filter and ignore failing tests", pr_number: 24134, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 9, deletions_count: 1}, + {sha: "9d50f2d4bfd5fdadf72cf5b06af12b96e2958fac", date: "2025-11-04 23:44:44 UTC", description: "rebuild manifests for 0.51.0", pr_number: 24142, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Thomas", files_count: 18, insertions_count: 22, deletions_count: 22}, + {sha: "817be3846e8b932253d3f15ec915e84566675831", date: "2025-11-05 00:00:32 UTC", description: "reorg e2e tests", pr_number: 24136, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 39, insertions_count: 80, deletions_count: 65}, + {sha: "7e2b3223565396db8be2dd130a579e3364cf4a7c", date: "2025-11-05 00:53:53 UTC", description: "typo fix", pr_number: 24146, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "90b395120c694878e2c262cad7ade1c142ef6b7b", date: "2025-11-05 01:09:46 UTC", description: "v0.51.0", pr_number: 24145, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Thomas", files_count: 40, insertions_count: 643, deletions_count: 144}, + {sha: "f40ea0942430d160f6bc9e8bafd7080067ded76e", date: "2025-11-05 19:42:11 UTC", description: "Add an option to prevent interpolation of env vars within config loading process", pr_number: 23910, scopes: ["config"], type: "feat", breaking_change: false, author: "Rob Blafford", files_count: 12, insertions_count: 186, deletions_count: 69}, + {sha: "749fbb078b5fe2fd0083dc731f747ffed9d34c4d", date: "2025-11-05 19:52:22 UTC", description: ".dockerignore should exlcude target dirs", pr_number: 24154, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 3, deletions_count: 0}, + {sha: "6d332b4c48f8fd375cdf417d13a73655f8fa5fee", date: "2025-11-05 23:16:07 UTC", description: "refactor ConfigBuilderLoader (tech debt)", pr_number: 24157, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 7, insertions_count: 191, deletions_count: 169}, + {sha: "2fbe9494c530f1451590ea6932c963957b0c1fb6", date: "2025-11-05 23:50:23 UTC", description: "simplify/improve scripts/ci-free-disk-space.sh", pr_number: 24159, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 30, deletions_count: 49}, + {sha: "c9537a0de423884b0581341a9a09b15a68448094", date: "2025-11-06 06:10:43 UTC", description: "add support for regional edge endpoints in AxiomConfig", pr_number: 24037, scopes: ["axiom"], type: "feat", breaking_change: false, author: "Topper", files_count: 6, insertions_count: 551, deletions_count: 308}, + {sha: "d43ab9ec84836f484a155c8b2d155189dba1789c", date: "2025-11-06 01:43:25 UTC", description: "update toml to 0.9.8", pr_number: 24161, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 56, deletions_count: 55}, + {sha: "99b5835af91aa0423400a18c35e6c2b3619b8ed0", date: "2025-11-06 02:22:35 UTC", description: "remove --reuse-image", pr_number: 24163, scopes: ["vdev"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 13, insertions_count: 9, deletions_count: 96}, + {sha: "6913528d50b66cc890b8b34f333c2520e2d24a06", date: "2025-11-06 02:58:47 UTC", description: "refactor SecretBackendLoader (tech debt)", pr_number: 24160, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 4, insertions_count: 98, deletions_count: 118}, + {sha: "e9c81d25045f29c3b6e83030725857f1d25ebdf0", date: "2025-11-06 20:34:35 UTC", description: "fix failing dependabot dockerfile updates", pr_number: 24172, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 20, deletions_count: 1}, + {sha: "325c5c296bad7656e947c853449d5f7bb92a2f2f", date: "2025-11-06 21:01:01 UTC", description: "download toolchain only once", pr_number: 24176, scopes: ["ci"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 3, deletions_count: 1}, + {sha: "81ca9f26c487c3eebdfca6ca8e5f334024bd406c", date: "2025-11-07 02:02:18 UTC", description: "bump the artifact group with 2 updates", pr_number: 24173, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 8, insertions_count: 71, deletions_count: 71}, + {sha: "d2b4f6422a6a1af1fdfc565fad2ff733d8eadf3e", date: "2025-11-06 21:02:37 UTC", description: "bump docker/setup-qemu-action from 3.6.0 to 3.7.0", pr_number: 24174, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "f07e8833e548137ee2c3e8df9585db74b9e8d487", date: "2025-11-07 02:02:55 UTC", description: "bump docker/metadata-action from 5.8.0 to 5.9.0", pr_number: 24175, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "df6d39b7c6a196db300f5e14e34069a34c9d5447", date: "2025-11-07 02:15:12 UTC", description: "delete config subcommand", pr_number: 24181, scopes: ["vdev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 15, insertions_count: 5, deletions_count: 172}, + {sha: "02671f454061bdb41f9600cafcff3b4f26bd3773", date: "2025-11-07 18:20:48 UTC", description: "Allow `datadog_search` to use `&LogEvent` directly", pr_number: 24182, scopes: ["transforms"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 1, insertions_count: 9, deletions_count: 14}, + {sha: "c1e83f9525037e6e8eecced6804f0fac180ebc0f", date: "2025-11-07 18:44:19 UTC", description: "Refactor `source_sender` into modules", pr_number: 24183, scopes: ["sources"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 7, insertions_count: 757, deletions_count: 696}, + {sha: "f453b8b1179c3ce36c211d21cc246945365db36a", date: "2025-11-07 22:03:24 UTC", description: "Move `source_sender` into `vector-core`", pr_number: 24186, scopes: ["sources"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 18, insertions_count: 95, deletions_count: 75}, + {sha: "1e3f38736ee4b3ef592fc0efd4adbb02bcad138b", date: "2025-11-07 23:27:56 UTC", description: "add log verbosity section to the debugging guide", pr_number: 24187, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 16, deletions_count: 0}, + {sha: "61bf5ad14b76ef7f2835eb207e3ebfdc76d538d2", date: "2025-11-11 20:39:47 UTC", description: "remove build-all flag, inspect state instead", pr_number: 24206, scopes: ["ci"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 12, insertions_count: 39, deletions_count: 64}, + {sha: "98b77a1645f8457c01128a0904ce5fb1f5a8e871", date: "2025-11-11 22:33:41 UTC", description: "run fmt before commiting clippy fixes", pr_number: 24210, scopes: ["vdev"], type: "enhancement", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "4eea77c36cf376e7c411df65a30c4fbdc8596b43", date: "2025-11-11 22:41:31 UTC", description: "upgrade Rust to 1.91.1", pr_number: 24209, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 19, insertions_count: 46, deletions_count: 89}, + {sha: "927482bb33e3c5c3210c83ccddff43cbc06a2cb6", date: "2025-11-11 23:56:30 UTC", description: "Add custom authorization header strategy for http client source", pr_number: 24201, scopes: ["http_client"], type: "enhancement", breaking_change: false, author: "ArunPiduguDD", files_count: 13, insertions_count: 100, deletions_count: 0}, + {sha: "6ee7839a2bafced6bed53b4344c05e4f787032e9", date: "2025-11-12 04:45:02 UTC", description: "add missing md file for the incremental_to_absolute transform", pr_number: 24217, scopes: ["website"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 14, deletions_count: 0}, + {sha: "673a19cfcf5ecdea70ba1bd332645bab61b81ea5", date: "2025-11-12 22:17:06 UTC", description: " new blog post - First year of COSE", pr_number: 24179, scopes: ["website"], type: "feat", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 172, deletions_count: 0}, + {sha: "068475e18e016c1fe72ea4042d1e58bbd4726c5f", date: "2025-11-12 21:24:30 UTC", description: "introduces transform that converts traces to logs", pr_number: 24168, scopes: ["trace_to_log transform"], type: "feat", breaking_change: false, author: "spencerho777", files_count: 9, insertions_count: 256, deletions_count: 1}, + {sha: "44f34e823699db88dc382f0da5c23e0734181438", date: "2025-11-13 00:29:13 UTC", description: "group imports", pr_number: 24219, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 19, insertions_count: 65, deletions_count: 59}, + {sha: "24099ebe04d83324352612237e3982b1ad4578d1", date: "2025-11-13 02:04:05 UTC", description: "build-test-runner if condition", pr_number: 24224, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 11, deletions_count: 11}, + {sha: "4d22ce1b28f33c69872e57667a5bedd6a50a4b89", date: "2025-11-13 08:58:19 UTC", description: "prevent missing components errors for memory tables in tests", pr_number: 24081, scopes: ["unit tests"], type: "fix", breaking_change: false, author: "Ensar Sarajčić", files_count: 2, insertions_count: 12, deletions_count: 0}, + {sha: "8d3d623098c4caaed8295a1753235cbde1aa8dc3", date: "2025-11-13 22:21:28 UTC", description: "update manifests 0.51.1", pr_number: 24233, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Thomas", files_count: 18, insertions_count: 22, deletions_count: 22}, + {sha: "889e6a1915ca2277ca44800fe308d9eea5fe961f", date: "2025-11-13 23:52:18 UTC", description: "bump blog post date", pr_number: 24235, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "70b26187826a1ac6f047020740ee8bed65641960", date: "2025-11-14 01:09:29 UTC", description: "v0.51.1", pr_number: 24234, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Thomas", files_count: 8, insertions_count: 110, deletions_count: 28}, + {sha: "b367f7dddd58fea63045ba8bdda02eaa3c9e679a", date: "2025-11-14 01:18:15 UTC", description: "fail on VRL compilation errors in query parameters", pr_number: 24223, scopes: ["http_client"], type: "fix", breaking_change: false, author: "Thomas", files_count: 3, insertions_count: 35, deletions_count: 20}, + {sha: "3ef42ae4c457495a11955fc86d9fdf94cbda1398", date: "2025-11-14 01:20:02 UTC", description: "skip removed files when formatting", pr_number: 24232, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 5, deletions_count: 0}, + {sha: "5553521edc2415325dd28179423cdf02b45f56f6", date: "2025-11-14 02:19:00 UTC", description: "eliminate race condition when aqcuiring socket addresses", pr_number: 24212, scopes: ["dev"], type: "fix", breaking_change: false, author: "Pavlos Rontidis", files_count: 67, insertions_count: 537, deletions_count: 566}, + {sha: "41e384944f0bce1335b1aadb91f5f091c48d9f9b", date: "2025-11-15 01:27:53 UTC", description: "add arrow IPC stream batch encoder", pr_number: 24124, scopes: ["codecs"], type: "feat", breaking_change: false, author: "Benjamin Dornel", files_count: 16, insertions_count: 2029, deletions_count: 11}, + {sha: "fcd135adadf3c3ff17c6194cc09df0f2597ae99b", date: "2025-11-14 19:08:43 UTC", description: "Refactor handle_request into struct", pr_number: 24238, scopes: ["datadog_agent source"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 4, insertions_count: 94, deletions_count: 121}, + {sha: "62e34462c4219a5a6e2497b08f7d91e2cb0b082b", date: "2025-11-15 02:13:13 UTC", description: "handle custom auth strategy in all sinks", pr_number: 24240, scopes: ["http_client"], type: "fix", breaking_change: false, author: "Ensar Sarajčić", files_count: 3, insertions_count: 5, deletions_count: 0}, + {sha: "8a8f223012fab640035e65533edf3ad94c3cd3d1", date: "2025-11-15 01:49:57 UTC", description: "Apply review suggestions from PR #24234", pr_number: 24244, scopes: ["website"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 3, deletions_count: 3}, + {sha: "b9726201642b7e3219f279f9ed7ea3320ed6bdd4", date: "2025-11-15 02:39:56 UTC", description: "flush and sync files in file source tests", pr_number: 24243, scopes: ["dev"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 91, deletions_count: 83}, + {sha: "c8cbfbfe624b5d6df38367727d6e67db181e73c5", date: "2025-11-17 20:41:30 UTC", description: "add default ExponentialBackoff", pr_number: 24246, scopes: ["sources", "sinks"], type: "chore", breaking_change: false, author: "Thomas", files_count: 8, insertions_count: 26, deletions_count: 38}, + {sha: "9c3e7ee88805609492238e4994fb621df90244e1", date: "2025-11-17 20:44:02 UTC", description: "Add request timeout support", pr_number: 24245, scopes: ["datadog_agent source"], type: "enhancement", breaking_change: false, author: "Bruce Guenter", files_count: 24, insertions_count: 500, deletions_count: 160}, + {sha: "bdb96ce5f6d0ab2558da7e2aba898f51860899db", date: "2025-11-18 08:00:36 UTC", description: "introduce an option to relax GELF validation", pr_number: 24241, scopes: ["codecs"], type: "enhancement", breaking_change: false, author: "Dmitry Sergeenkov", files_count: 24, insertions_count: 666, deletions_count: 183}, + {sha: "d6c21e50eeb0ea390fc9ba64e19e4f53ecadbc0b", date: "2025-11-17 23:08:08 UTC", description: "delete cue.mod", pr_number: 24254, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 0, deletions_count: 10}, + {sha: "ac207396efc9b24b16024d3507cdf6a48c5872a3", date: "2025-11-18 12:17:38 UTC", description: "add exponential retry to docker client", pr_number: 24063, scopes: ["docker_logs source"], type: "feat", breaking_change: false, author: "Eric Huang", files_count: 2, insertions_count: 45, deletions_count: 5}, + {sha: "67509b09756a5f7d112184dd0b3d70457d8ffba7", date: "2025-11-17 23:36:30 UTC", description: "document the global healthcheck option", pr_number: 24253, scopes: ["website"], type: "fix", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 28, deletions_count: 1}, + {sha: "fff7f5a34366cca87a8a71cb18570d8a2f8927c8", date: "2025-11-17 23:50:48 UTC", description: "forbid unwrap and refactor error handling", pr_number: 24247, scopes: ["codecs"], type: "chore", breaking_change: false, author: "Thomas", files_count: 11, insertions_count: 105, deletions_count: 85}, + {sha: "6996ec55d1424be0a68929169c7119dc6baae637", date: "2025-11-18 06:05:41 UTC", description: "journalctl args in case of current_boot_only", pr_number: 23438, scopes: ["journald source"], type: "fix", breaking_change: false, author: "Pascal Bachor", files_count: 3, insertions_count: 102, deletions_count: 6}, + {sha: "61bb16f53d09d009ea4a7a363b83acbb4a753b85", date: "2025-11-18 21:10:37 UTC", description: "add note to 'include_units' option", pr_number: 24260, scopes: ["journald source"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 2, insertions_count: 8, deletions_count: 0}, + {sha: "8a8b981cc15cb4739caa05869721728d46d4fa32", date: "2025-11-18 21:10:49 UTC", description: "improve routes docs", pr_number: 24259, scopes: ["exclusive_route transform"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 3, insertions_count: 11, deletions_count: 3}, + {sha: "f1efa9dc7badd4358c82838e139bef6739b07692", date: "2025-11-18 21:35:43 UTC", description: "fix healthcheck -> healthchecks", pr_number: 24267, scopes: ["website"], type: "fix", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "e38c093e8857ebbdbbab1ff398639b6181a8cea7", date: "2025-11-18 21:42:17 UTC", description: "add aqua deps", pr_number: 24269, scopes: ["dev"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 10, deletions_count: 7}, + {sha: "677f21e4c3d9d9b63a1c73b4bef5272b736b58ec", date: "2025-11-18 22:29:30 UTC", description: "improve build from source guide", pr_number: 24268, scopes: ["website"], type: "chore", breaking_change: false, author: "Pavlos Rontidis", files_count: 1, insertions_count: 84, deletions_count: 41}, + {sha: "563251a03e2ef88a0adf871f323669a5982bbd04", date: "2025-11-18 19:32:44 UTC", description: "rework TlsSettings to carry PEM based objects", pr_number: 23146, scopes: ["security"], type: "enhancement", breaking_change: false, author: "rf-ben", files_count: 3, insertions_count: 83, deletions_count: 84}, + {sha: "8c9bc00b712b57519ed09540dc9967ed3a453c4e", date: "2025-11-18 20:06:19 UTC", description: "Support AWS CloudWatch high-resolution metrics", pr_number: 23822, scopes: ["aws_cloudwatch_metrics sink"], type: "feat", breaking_change: false, author: "Paul Taylor", files_count: 4, insertions_count: 58, deletions_count: 2}, + {sha: "5edc39344b6b3f5aad0d12decc9f33c930514b76", date: "2025-11-18 20:17:18 UTC", description: "smp cli: v0.24.1 -> v0.25.1", pr_number: 24262, scopes: ["ci"], type: "chore", breaking_change: false, author: "Geoffrey Oxberry", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "16429fa70de7b240e3c2034fcdc9ec05eba150a7", date: "2025-11-19 02:46:23 UTC", description: "handle out of order reads in test_fair_reads", pr_number: 24270, scopes: ["dev"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 20, deletions_count: 13}, + {sha: "821c1f798b5f1814a7a0b26882dfd391a1f61a91", date: "2025-11-19 20:52:47 UTC", description: "update mongodb crate to 3.3.0", pr_number: 24271, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 6, insertions_count: 252, deletions_count: 253}, + {sha: "12c880f33c5aaa45216d1a97a7977e2a8d1f1855", date: "2025-11-20 03:07:57 UTC", description: "Add CLA signature workflow", pr_number: 24276, scopes: ["ci"], type: "chore", breaking_change: false, author: "@Ara Pulido", files_count: 1, insertions_count: 44, deletions_count: 0}, + {sha: "870b86ffe1c1c8c609a2b7c4532a9836166392cd", date: "2025-11-20 01:47:53 UTC", description: "Allow CLA check to pass on merge queue events", pr_number: 24277, scopes: ["ci"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 8, deletions_count: 1}, + {sha: "b9ad9b3ec1ade765583e4d06fc7142f8e6b745a2", date: "2025-11-22 00:12:37 UTC", description: "remove number-prefix in favor of unit_prefix", pr_number: 24293, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 4, insertions_count: 6, deletions_count: 12}, + {sha: "df4f3621e7941c4eb3ba6ad76c739552b427951f", date: "2025-11-26 21:51:00 UTC", description: "implement end-to-end acknowledgements", pr_number: 24283, scopes: ["blackhole sink"], type: "fix", breaking_change: false, author: "James", files_count: 3, insertions_count: 11, deletions_count: 4}, + {sha: "84c94441223a4e2f83be5e5ae0e56180c2c45931", date: "2025-11-30 10:24:37 UTC", description: "fix return type for `mod` function in VRL function reference", pr_number: 24312, scopes: ["vrl"], type: "docs", breaking_change: false, author: "Ensar Sarajčić", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "5c16191caea16363da0baa70b5a7be67a945826a", date: "2025-12-01 23:29:37 UTC", description: "use ci-docs-build flow instead of local docs flow", pr_number: 24319, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "08dceb5e786df3f889e86ef68dccf5ae20d67fff", date: "2025-12-02 00:31:20 UTC", description: "add missing --workspace argument to make docs", pr_number: 24318, scopes: ["dev"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "e6277228958fe51d3f37cea69551e57ac51c45f4", date: "2025-12-01 23:57:00 UTC", description: "Add internal metric to record source buffer utilization", pr_number: 24272, scopes: ["sources"], type: "enhancement", breaking_change: false, author: "Bruce Guenter", files_count: 9, insertions_count: 290, deletions_count: 61}, + {sha: "3eda9d2ec27fe7615f9cf1779d1e9b89ae3ab0a7", date: "2025-12-02 01:28:13 UTC", description: "bump VRL version to include example location", pr_number: 24317, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 9, insertions_count: 18, deletions_count: 16}, + {sha: "3eae9314d1ed3f80d1c1db6ba8ede1e4ccb7183d", date: "2025-12-02 02:17:45 UTC", description: "Fix flaky test_oldest_first by ensuring distinct creation timestamps", pr_number: 24327, scopes: ["file source"], type: "fix", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 3, deletions_count: 0}, + {sha: "cea65d0b1688d40be2a819cb935b736c4a75d818", date: "2025-12-03 03:39:49 UTC", description: "bump maxminddb to 0.27 after RUSTSEC-2025-0132", pr_number: 24332, scopes: ["deps"], type: "chore", breaking_change: false, author: "Clément Delafargue", files_count: 4, insertions_count: 64, deletions_count: 60}, + {sha: "80fc73b2ccb2be507b485189de1677900cf24246", date: "2025-12-03 03:51:24 UTC", description: "Bump vrl hash and fix datadog search tests", pr_number: 24334, scopes: ["vrl"], type: "chore", breaking_change: true, author: "Yoenn Burban", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "4902750cf18eae8e201e5849fea1a404bb57afe8", date: "2025-12-02 23:21:18 UTC", description: "emit received events/bytes metrics for UDP mode", pr_number: 24296, scopes: ["syslog source"], type: "fix", breaking_change: false, author: "Steve Hall", files_count: 2, insertions_count: 91, deletions_count: 2}, + {sha: "ea556a288e2e58b1f09bbb1181add57bd1ff5742", date: "2025-12-03 20:43:41 UTC", description: "Introduce `trait NamedInternalEvent` and derive", pr_number: 24313, scopes: ["observability"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 92, insertions_count: 579, deletions_count: 431}, + {sha: "7d1773093745995f8193117855a1436ad71bdbf1", date: "2025-12-04 19:01:59 UTC", description: "Add missing `deny.toml` entry for the new macro crate", pr_number: 24339, scopes: ["ci"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 1, insertions_count: 1, deletions_count: 14}, + {sha: "72e09673fda9d6fbf933adacea1220bdfae162a8", date: "2025-12-05 00:58:22 UTC", description: "Improve deny and make it run on PRs when necessary", pr_number: 24340, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 23, deletions_count: 40}, + {sha: "dbc805a77b51a6b426e067a772ee0eae04f958d1", date: "2025-12-06 00:58:01 UTC", description: "Add internal metric to record buffer utilization", pr_number: 24329, scopes: ["transforms"], type: "enhancement", breaking_change: false, author: "Bruce Guenter", files_count: 15, insertions_count: 280, deletions_count: 117}, + {sha: "922d970672a79bf3e88ece9bfd020a73bcd7e8e4", date: "2025-12-09 03:13:53 UTC", description: "Ignore RUSTSEC-2025-0134 for rustls-pemfile", pr_number: 24352, scopes: ["ci"], type: "chore", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 2, deletions_count: 0}, + {sha: "a7a4106a4c1065fc3e85a933fedda7c2511b7ba1", date: "2025-12-09 22:05:19 UTC", description: "bump hyper, http-body and apply deprecation suggestions", pr_number: 24351, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 26, insertions_count: 118, deletions_count: 75}, + {sha: "b5d718a2da8897e9631f96402889b496620e13c0", date: "2025-12-09 22:17:22 UTC", description: "use compiled vdev with `make` commands", pr_number: 24347, scopes: ["ci"], type: "feat", breaking_change: false, author: "Thomas", files_count: 5, insertions_count: 101, deletions_count: 40}, + {sha: "250de61049b4862586ddd1885057324c16bccfa4", date: "2025-12-10 06:02:13 UTC", description: "Configure prefetch count", pr_number: 24138, scopes: ["amqp source"], type: "feat", breaking_change: false, author: "elkh510", files_count: 3, insertions_count: 41, deletions_count: 1}, + {sha: "cf6e3293a859c04c50e63d34c857ed183fa5bea5", date: "2025-12-09 22:14:58 UTC", description: "Refactor `EventMetadata` deserialization from protobuf", pr_number: 24336, scopes: ["performance"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 2, insertions_count: 38, deletions_count: 36}, + {sha: "f1eecd0e778784bab08fd72c6e33b0b5631e2d79", date: "2025-12-09 21:42:57 UTC", description: "upgrade rdkafka to 0.38.0 to resolve idempotent-producer fatal \"Inconsistent state\" stalls", pr_number: 24197, scopes: ["kafka sink"], type: "fix", breaking_change: false, author: "skipper", files_count: 3, insertions_count: 68, deletions_count: 17}, + {sha: "a7996cec4d7268dae610e1f9fca8804cd129955e", date: "2025-12-09 23:06:21 UTC", description: "EventMetadata UUID generation optimizations", pr_number: 24358, scopes: ["performance"], type: "chore", breaking_change: false, author: "Jansen", files_count: 6, insertions_count: 78, deletions_count: 14}, + {sha: "538c833d2f5c6529ba1df7b02f4bb73e60b2d778", date: "2025-12-10 21:38:11 UTC", description: "bump actions/checkout from 5.0.0 to 6.0.0", pr_number: 24322, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 26, insertions_count: 83, deletions_count: 83}, + {sha: "0b35fe8791f347a553d4835fcd07cef7a1fb5d61", date: "2025-12-10 19:14:59 UTC", description: "add S3 download processing duration metric", pr_number: 24289, scopes: ["aws_s3 source"], type: "feat", breaking_change: false, author: "James", files_count: 4, insertions_count: 87, deletions_count: 6}, + {sha: "0f998497b88393ba33ee90d6775f0848237e32a3", date: "2025-12-10 23:04:15 UTC", description: "bump aws-actions/configure-aws-credentials from 5.0.0 to 5.1.1", pr_number: 24323, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 6, deletions_count: 6}, + {sha: "a053a2e62dc6c1490af2f9eacb7efafdcad0ab26", date: "2025-12-11 12:31:40 UTC", description: "reconnect indefinitely when connection fails", pr_number: 24069, scopes: ["websocket source"], type: "fix", breaking_change: false, author: "Benjamin Dornel", files_count: 4, insertions_count: 50, deletions_count: 20}, + {sha: "3f48cae746dfaa7d75b110e94cbe3cfedb6ebf82", date: "2025-12-11 12:53:32 UTC", description: "allow configurable null handling in Arrow encoder", pr_number: 24288, scopes: ["codecs"], type: "enhancement", breaking_change: false, author: "Benjamin Dornel", files_count: 2, insertions_count: 217, deletions_count: 3}, + {sha: "d2771c3f5639e9d87ba103a0492d0db05451df86", date: "2025-12-12 14:42:57 UTC", description: "clean up some `allow` statements", pr_number: 24366, scopes: ["dev"], type: "chore", breaking_change: false, author: "WaterWhisperer", files_count: 2, insertions_count: 0, deletions_count: 2}, + {sha: "b9cbce345499d42a691a8d485025068dd1cab3b0", date: "2025-12-12 22:43:29 UTC", description: "README e2e badge", pr_number: 24375, scopes: ["internal docs"], type: "docs", breaking_change: false, author: "Thomas", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "feb33ce7f08ec6799963d94ac8f627a2e131cbbe", date: "2025-12-13 03:05:00 UTC", description: "bump VRL to use 0.29.0 sha", pr_number: 24378, scopes: ["deps"], type: "chore", breaking_change: false, author: "Thomas", files_count: 2, insertions_count: 48, deletions_count: 36}, + {sha: "3921ecb5c14a6b48f89747907af08c7ddb08b207", date: "2025-12-15 19:45:23 UTC", description: "bump github/codeql-action from 3.30.6 to 4.31.6", pr_number: 24324, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "faa2c21fcdbac813e8433afec24fe7849556b197", date: "2025-12-15 19:54:46 UTC", description: "bump docker/metadata-action from 5.9.0 to 5.10.0", pr_number: 24326, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "eae0be26759331ca19fe5d77ecee78cd329e3133", date: "2025-12-15 19:55:09 UTC", description: "bump DataDog/dd-octo-sts-action from 1.0.1 to 1.0.3", pr_number: 24325, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "e6397f3fdba0306fb0def6d602c9f3d4053fa109", date: "2025-12-16 12:39:24 UTC", description: "add support for request body", pr_number: 24170, scopes: ["http_client source"], type: "enhancement", breaking_change: false, author: "Benjamin Dornel", files_count: 10, insertions_count: 489, deletions_count: 92}, + ] +} diff --git a/website/cue/reference/urls.cue b/website/cue/reference/urls.cue index 2578c0b38c60a..cf06991ddf94f 100644 --- a/website/cue/reference/urls.cue +++ b/website/cue/reference/urls.cue @@ -637,6 +637,7 @@ urls: { vector_socket_source: "/docs/reference/configuration/sources/socket" vector_source: "/docs/reference/configuration/sources/vector" vector_sources: "/docs/reference/configuration/sources" + vector_internal_metrics: "\(vector_sources)/internal_metrics" vector_stars: "\(vector_repo)/stargazers" vector_stdin_source: "/docs/reference/configuration/sources/stdin" vector_systemd_file: "\(vector_repo)/blob/master/distribution/systemd/vector.service" diff --git a/website/cue/reference/versions.cue b/website/cue/reference/versions.cue index e4bffd0b453f2..48ba8da9b5dc6 100644 --- a/website/cue/reference/versions.cue +++ b/website/cue/reference/versions.cue @@ -2,6 +2,7 @@ package metadata // This has to be maintained manually because there's currently no way to sort versions programmatically versions: [string, ...string] & [ + "0.52.0", "0.51.1", "0.51.0", "0.50.0", From b6b334615b1e13e457f663444ea7f402fdcb5ab3 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 19 Dec 2025 13:02:51 -0500 Subject: [PATCH 156/227] chore(dev): Upgrade to rust 1.92.0 (#24376) * Upgrade to rust 1.92.0 * chore(vdev): apply vdev rust check fixes * Fix remaining clippy warnings --- lib/vector-stream/src/partitioned_batcher.rs | 2 +- rust-toolchain.toml | 2 +- src/sinks/elasticsearch/config.rs | 3 +-- src/test_util/mod.rs | 5 +---- src/topology/schema.rs | 1 - src/transforms/remap.rs | 3 +-- 6 files changed, 5 insertions(+), 11 deletions(-) diff --git a/lib/vector-stream/src/partitioned_batcher.rs b/lib/vector-stream/src/partitioned_batcher.rs index 42541b381c9bc..30cf039ec3840 100644 --- a/lib/vector-stream/src/partitioned_batcher.rs +++ b/lib/vector-stream/src/partitioned_batcher.rs @@ -359,7 +359,7 @@ mod test { use futures::{Stream, stream}; use pin_project::pin_project; use proptest::prelude::*; - use tokio::{pin, time::advance}; + use tokio::time::advance; use vector_core::{partition::Partitioner, time::KeyedTimer}; use crate::{ diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 28130b1dcb7cc..532fcab5b284c 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.91" +channel = "1.92" profile = "default" diff --git a/src/sinks/elasticsearch/config.rs b/src/sinks/elasticsearch/config.rs index f149ea57e1db0..8c2c617e82a08 100644 --- a/src/sinks/elasticsearch/config.rs +++ b/src/sinks/elasticsearch/config.rs @@ -552,11 +552,10 @@ impl SinkConfig for ElasticsearchConfig { let services = commons .iter() - .cloned() .map(|common| { let endpoint = common.base_url.clone(); - let http_request_builder = HttpRequestBuilder::new(&common, self); + let http_request_builder = HttpRequestBuilder::new(common, self); let service = ElasticsearchService::new(client.clone(), http_request_builder); (endpoint, service) diff --git a/src/test_util/mod.rs b/src/test_util/mod.rs index e9d0f82a561bc..58b0ada3e8c6f 100644 --- a/src/test_util/mod.rs +++ b/src/test_util/mod.rs @@ -106,10 +106,7 @@ where } pub fn open_fixture(path: impl AsRef) -> crate::Result { - let test_file = match File::open(path) { - Ok(file) => file, - Err(e) => return Err(e.into()), - }; + let test_file = File::open(path)?; let value: serde_json::Value = serde_json::from_reader(test_file)?; Ok(value) } diff --git a/src/topology/schema.rs b/src/topology/schema.rs index 6a3acc099edd1..e19c0ad540eb9 100644 --- a/src/topology/schema.rs +++ b/src/topology/schema.rs @@ -327,7 +327,6 @@ pub(super) fn validate_sink_expectations( &mut err .errors() .iter() - .cloned() .map(|err| format!("schema error in component {key}: {err}")) .collect(), ); diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index a23c5754b5264..219e2e3792fb9 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -497,8 +497,7 @@ where let message = error .notes() .iter() - .filter(|note| matches!(note, Note::UserErrorMessage(_))) - .next_back() + .rfind(|note| matches!(note, Note::UserErrorMessage(_))) .map(|note| note.to_string()) .unwrap_or_else(|| error.to_string()); serde_json::json!({ From 01cf516897a00b9bc7f149ab1435415c183dc876 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 19 Dec 2025 14:42:27 -0500 Subject: [PATCH 157/227] chore(website): Update to React 19 (#24392) * Update website to React 19 * Fix toc * Remove unused import --- website/assets/js/below.js | 2 +- website/assets/js/home.tsx | 9 +- website/assets/js/search.tsx | 14 +- website/package.json | 6 +- website/yarn.lock | 3674 ++++++++++++++++------------------ 5 files changed, 1717 insertions(+), 1988 deletions(-) diff --git a/website/assets/js/below.js b/website/assets/js/below.js index a6cae3674d76d..f61004ef2a746 100644 --- a/website/assets/js/below.js +++ b/website/assets/js/below.js @@ -1,4 +1,4 @@ -import 'tocbot/dist/tocbot'; +import tocbot from 'tocbot'; // Table of contents for documentation pages const tableOfContents = () => { diff --git a/website/assets/js/home.tsx b/website/assets/js/home.tsx index 25c71cae64e79..fb92927ac4fe8 100644 --- a/website/assets/js/home.tsx +++ b/website/assets/js/home.tsx @@ -4,7 +4,7 @@ // Imports import React, { useEffect, useState } from "react"; -import ReactDOM from "react-dom"; +import { createRoot } from "react-dom/client"; import classnames from "classnames"; import { useInterval } from "react-use"; import { useSpring, animated } from "react-spring"; @@ -316,5 +316,8 @@ function Diagram({className, height, width}) { }; // Place the components in the DOM -ReactDOM.render(, document.getElementById("globe")); -ReactDOM.render(, document.getElementById("diagram")); +const globeRoot = createRoot(document.getElementById("globe")); +globeRoot.render(); + +const diagramRoot = createRoot(document.getElementById("diagram")); +diagramRoot.render(); diff --git a/website/assets/js/search.tsx b/website/assets/js/search.tsx index 7aa10b54be51c..f322f463ed774 100644 --- a/website/assets/js/search.tsx +++ b/website/assets/js/search.tsx @@ -1,7 +1,7 @@ import { autocomplete } from '@algolia/autocomplete-js' import Typesense from 'typesense' import React, { createElement, Fragment, useEffect, useRef } from 'react' -import ReactDOM, { render } from 'react-dom' +import { createRoot } from 'react-dom/client' // // Algolia search // const appId = process.env.ALGOLIA_APP_ID @@ -114,6 +114,7 @@ const Result = ({ hit, components, category }) => { const Autocomplete = (props) => { const containerRef = useRef(null) + const panelRootRef = useRef({}) useEffect(() => { if (!containerRef.current) { @@ -125,7 +126,10 @@ const Autocomplete = (props) => { renderer: { createElement, Fragment }, render({ children, state, components }, root) { const { preview } = state.context as any - render( + if (!panelRootRef.current[root]) { + panelRootRef.current[root] = createRoot(root) + } + panelRootRef.current[root].render(

{children}
@@ -163,8 +167,7 @@ const Autocomplete = (props) => {
- , - root, + ) }, ...props, @@ -246,4 +249,5 @@ const Search = () => { } -ReactDOM.render(, document.getElementById('site-search')) +const searchRoot = createRoot(document.getElementById('site-search')) +searchRoot.render() diff --git a/website/package.json b/website/package.json index 2d706e07302f1..d0c18a147fae3 100644 --- a/website/package.json +++ b/website/package.json @@ -45,9 +45,9 @@ "path": "^0.12.7", "postcss-cli": "^8.3.1", "postcss-import": "^14.0.1", - "react": "^17.0.2", - "react-dom": "^17.0.2", - "react-spring": "^9.2.3", + "react": "^19.2.3", + "react-dom": "^19.2.3", + "react-spring": "^10.0.3", "react-use": "^17.2.4", "regenerator-runtime": "^0.13.7", "tailwindcss": "^2.2.4", diff --git a/website/yarn.lock b/website/yarn.lock index 8fa65756ea55e..be69bb103eaba 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -2,151 +2,139 @@ # yarn lockfile v1 -"@algolia/autocomplete-core@1.5.2": - version "1.5.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-core/-/autocomplete-core-1.5.2.tgz#ec0178e07b44fd74a057728ac157291b26cecf37" - integrity sha512-DY0bhyczFSS1b/CqJlTE/nQRtnTAHl6IemIkBy0nEWnhDzRDdtdx4p5Uuk3vwAFxwEEgi1WqKwgSSMx6DpNL4A== +"@algolia/autocomplete-core@1.19.4": + version "1.19.4" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-core/-/autocomplete-core-1.19.4.tgz#db9e4ef88cd8f2ce5b25e376373a8898dcbe2945" + integrity sha512-yVwXLrfwQ3dAndY12j1pfa0oyC5hTDv+/dgwvVHj57dY3zN6PbAmcHdV5DOOdGJrCMXff+fsPr8G2Ik8zWOPTw== dependencies: - "@algolia/autocomplete-shared" "1.5.2" + "@algolia/autocomplete-plugin-algolia-insights" "1.19.4" + "@algolia/autocomplete-shared" "1.19.4" "@algolia/autocomplete-js@^1.2.1": - version "1.5.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-js/-/autocomplete-js-1.5.2.tgz#126fbfd800c534eba1404dc05128aeb91355e1c1" - integrity sha512-6bVTym+H+KSxuEbFmpCQoLXGdv3kfzxJhTE32YXpGYHWmaH7/R4KbxnnvSPe1Df7bV0Zo+DbVu72Zq2gZRRc0w== + version "1.19.4" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-js/-/autocomplete-js-1.19.4.tgz#235e554d4e46567d7305d8c216b75dd2a0091655" + integrity sha512-ZkwsuTTIEuw+hbsIooMrNLvTVulUSSKqJT3ZeYYd//kA5fHaFf2/T0BDmd9qSGxZRhT5WS8AJYjFARLmj5x08g== dependencies: - "@algolia/autocomplete-core" "1.5.2" - "@algolia/autocomplete-preset-algolia" "1.5.2" - "@algolia/autocomplete-shared" "1.5.2" - preact "^10.0.0" + "@algolia/autocomplete-core" "1.19.4" + "@algolia/autocomplete-preset-algolia" "1.19.4" + "@algolia/autocomplete-shared" "1.19.4" + htm "^3.1.1" + preact "^10.13.2" -"@algolia/autocomplete-preset-algolia@1.5.2": - version "1.5.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.5.2.tgz#36c5638cc6dba6ea46a86e5a0314637ca40a77ca" - integrity sha512-3MRYnYQFJyovANzSX2CToS6/5cfVjbLLqFsZTKcvF3abhQzxbqwwaMBlJtt620uBUOeMzhdfasKhCc40+RHiZw== +"@algolia/autocomplete-plugin-algolia-insights@1.19.4": + version "1.19.4" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.19.4.tgz#be14ba50677ea308d43e4f9e96f4542c3da51432" + integrity sha512-K6TQhTKxx0Es1ZbjlAQjgm/QLDOtKvw23MX0xmpvO7AwkmlmaEXo2PwHdVSs3Bquv28CkO2BYKks7jVSIdcXUg== dependencies: - "@algolia/autocomplete-shared" "1.5.2" + "@algolia/autocomplete-shared" "1.19.4" -"@algolia/autocomplete-shared@1.5.2": - version "1.5.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.5.2.tgz#e157f9ad624ab8fd940ff28bd2094cdf199cdd79" - integrity sha512-ylQAYv5H0YKMfHgVWX0j0NmL8XBcAeeeVQUmppnnMtzDbDnca6CzhKj3Q8eF9cHCgcdTDdb5K+3aKyGWA0obug== +"@algolia/autocomplete-preset-algolia@1.19.4": + version "1.19.4" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.19.4.tgz#258c65112d73376c5c395d1ce67cd668deb06572" + integrity sha512-WhX4mYosy7yBDjkB6c/ag+WKICjvV2fqQv/+NWJlpvnk2JtMaZByi73F6svpQX945J+/PxpQe8YIRBZHuYsLAQ== + dependencies: + "@algolia/autocomplete-shared" "1.19.4" -"@algolia/autocomplete-theme-classic@^1.2.1": - version "1.5.2" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-theme-classic/-/autocomplete-theme-classic-1.5.2.tgz#42ad77306804e38a1ec3547202cd08ff6ce85c95" - integrity sha512-WZyqRJwotGKOn2Qz/owEIuKk/YrMUMnVIiwJpzrqKk0cD9TnbP92Nt3ioYyBvw/eVxJJAYQSLCOUFobYB4IV6w== +"@algolia/autocomplete-shared@1.19.4": + version "1.19.4" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.19.4.tgz#fd0b92e2723e70c97df4fa7ba0a170c500289918" + integrity sha512-V7tYDgRXP0AqL4alwZBWNm1HPWjJvEU94Nr7Qa2cuPcIAbsTAj7M/F/+Pv/iwOWXl3N7tzVzNkOWm7sX6JT1SQ== -"@ampproject/remapping@^2.2.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4" - integrity sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw== - dependencies: - "@jridgewell/gen-mapping" "^0.3.5" - "@jridgewell/trace-mapping" "^0.3.24" +"@algolia/autocomplete-theme-classic@^1.2.1": + version "1.19.4" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-theme-classic/-/autocomplete-theme-classic-1.19.4.tgz#7a0802e7c64dcc3584d5085e23a290a64ade4319" + integrity sha512-/qE8BETNFbul4WrrUyBYgaaKcgFPk0Px9FDKADnr3HlIkXquRpcFHTxXK16jdwXb33yrcXaAVSQZRfUUSSnxVA== "@babel/cli@^7.14.5": - version "7.17.0" - resolved "https://registry.yarnpkg.com/@babel/cli/-/cli-7.17.0.tgz#9b932d8f08a2e218fcdd9bba456044eb0a2e0b2c" - integrity sha512-es10YH/ejXbg551vtnmEzIPe3MQRNOS644o3pf8vUr1tIeNzVNlP8BBvs1Eh7roh5A+k2fEHUas+ZptOWHA1fQ== + version "7.28.3" + resolved "https://registry.yarnpkg.com/@babel/cli/-/cli-7.28.3.tgz#f33693753bc103ab0084a5776ccf8ab8a140038b" + integrity sha512-n1RU5vuCX0CsaqaXm9I0KUCNKNQMy5epmzl/xdSSm70bSqhg9GWhgeosypyQLc0bK24+Xpk1WGzZlI9pJtkZdg== dependencies: - commander "^4.0.1" - convert-source-map "^1.1.0" + "@jridgewell/trace-mapping" "^0.3.28" + commander "^6.2.0" + convert-source-map "^2.0.0" fs-readdir-recursive "^1.1.0" - glob "^7.0.0" + glob "^7.2.0" make-dir "^2.1.0" slash "^2.0.0" - source-map "^0.5.0" optionalDependencies: "@nicolo-ribaudo/chokidar-2" "2.1.8-no-fsevents.3" - chokidar "^3.4.0" + chokidar "^3.6.0" -"@babel/code-frame@^7.0.0": - version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.16.7.tgz#44416b6bd7624b998f5b1af5d470856c40138789" - integrity sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg== +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.27.1.tgz#200f715e66d52a23b221a9435534a91cc13ad5be" + integrity sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg== dependencies: - "@babel/highlight" "^7.16.7" - -"@babel/code-frame@^7.25.9", "@babel/code-frame@^7.26.2": - version "7.26.2" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" - integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== - dependencies: - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-validator-identifier" "^7.27.1" js-tokens "^4.0.0" - picocolors "^1.0.0" + picocolors "^1.1.1" -"@babel/compat-data@^7.16.8", "@babel/compat-data@^7.17.7", "@babel/compat-data@^7.20.5", "@babel/compat-data@^7.22.6", "@babel/compat-data@^7.26.5": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.5.tgz#df93ac37f4417854130e21d72c66ff3d4b897fc7" - integrity sha512-XvcZi1KWf88RVbF9wn8MN6tYFloU5qX8KjuF3E1PVBmJ9eypXfs4GRiJwLuTZL0iSnJUKn1BFPa5BPZZJyFzPg== +"@babel/compat-data@^7.17.7", "@babel/compat-data@^7.27.2", "@babel/compat-data@^7.27.7", "@babel/compat-data@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.28.5.tgz#a8a4962e1567121ac0b3b487f52107443b455c7f" + integrity sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA== "@babel/core@^7.14.6": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.7.tgz#0439347a183b97534d52811144d763a17f9d2b24" - integrity sha512-SRijHmF0PSPgLIBYlWnG0hyeJLwXE2CgpsXaMOrtt2yp9/86ALw6oUlj9KYuZ0JN07T4eBMVIW4li/9S1j2BGA== - dependencies: - "@ampproject/remapping" "^2.2.0" - "@babel/code-frame" "^7.26.2" - "@babel/generator" "^7.26.5" - "@babel/helper-compilation-targets" "^7.26.5" - "@babel/helper-module-transforms" "^7.26.0" - "@babel/helpers" "^7.26.7" - "@babel/parser" "^7.26.7" - "@babel/template" "^7.25.9" - "@babel/traverse" "^7.26.7" - "@babel/types" "^7.26.7" + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.28.5.tgz#4c81b35e51e1b734f510c99b07dfbc7bbbb48f7e" + integrity sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.28.5" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-module-transforms" "^7.28.3" + "@babel/helpers" "^7.28.4" + "@babel/parser" "^7.28.5" + "@babel/template" "^7.27.2" + "@babel/traverse" "^7.28.5" + "@babel/types" "^7.28.5" + "@jridgewell/remapping" "^2.3.5" convert-source-map "^2.0.0" debug "^4.1.0" gensync "^1.0.0-beta.2" json5 "^2.2.3" semver "^6.3.1" -"@babel/generator@^7.26.5": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.5.tgz#e44d4ab3176bbcaf78a5725da5f1dc28802a9458" - integrity sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw== +"@babel/generator@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.28.5.tgz#712722d5e50f44d07bc7ac9fe84438742dd61298" + integrity sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ== dependencies: - "@babel/parser" "^7.26.5" - "@babel/types" "^7.26.5" - "@jridgewell/gen-mapping" "^0.3.5" - "@jridgewell/trace-mapping" "^0.3.25" + "@babel/parser" "^7.28.5" + "@babel/types" "^7.28.5" + "@jridgewell/gen-mapping" "^0.3.12" + "@jridgewell/trace-mapping" "^0.3.28" jsesc "^3.0.2" -"@babel/helper-annotate-as-pure@^7.16.7", "@babel/helper-annotate-as-pure@^7.18.6", "@babel/helper-annotate-as-pure@^7.25.9": +"@babel/helper-annotate-as-pure@^7.16.7", "@babel/helper-annotate-as-pure@^7.25.9": version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.9.tgz#d8eac4d2dc0d7b6e11fa6e535332e0d3184f06b4" + resolved "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.9.tgz" integrity sha512-gv7320KBUFJz1RnylIg5WWYPRXKZ884AGkYpgpWW02TH66Dl+HaC1t1CKd0z3R4b6hdYEcmrNZHUmfCP+1u3/g== dependencies: "@babel/types" "^7.25.9" -"@babel/helper-compilation-targets@^7.16.7", "@babel/helper-compilation-targets@^7.17.7", "@babel/helper-compilation-targets@^7.20.7", "@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.25.9", "@babel/helper-compilation-targets@^7.26.5": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz#75d92bb8d8d51301c0d49e52a65c9a7fe94514d8" - integrity sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA== +"@babel/helper-annotate-as-pure@^7.27.1", "@babel/helper-annotate-as-pure@^7.27.3": + version "7.27.3" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.27.3.tgz#f31fd86b915fc4daf1f3ac6976c59be7084ed9c5" + integrity sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg== dependencies: - "@babel/compat-data" "^7.26.5" - "@babel/helper-validator-option" "^7.25.9" - browserslist "^4.24.0" - lru-cache "^5.1.1" - semver "^6.3.1" + "@babel/types" "^7.27.3" -"@babel/helper-create-class-features-plugin@^7.16.7", "@babel/helper-create-class-features-plugin@^7.18.6", "@babel/helper-create-class-features-plugin@^7.21.0", "@babel/helper-create-class-features-plugin@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.9.tgz#7644147706bb90ff613297d49ed5266bde729f83" - integrity sha512-UTZQMvt0d/rSz6KI+qdu7GQze5TIajwTS++GUozlw8VBJDEOAqSXwm1WvmYEZwqdqSGQshRocPDqrt4HBZB3fQ== +"@babel/helper-compilation-targets@^7.17.7", "@babel/helper-compilation-targets@^7.27.1", "@babel/helper-compilation-targets@^7.27.2": + version "7.27.2" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz#46a0f6efab808d51d29ce96858dd10ce8732733d" + integrity sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-member-expression-to-functions" "^7.25.9" - "@babel/helper-optimise-call-expression" "^7.25.9" - "@babel/helper-replace-supers" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/compat-data" "^7.27.2" + "@babel/helper-validator-option" "^7.27.1" + browserslist "^4.24.0" + lru-cache "^5.1.1" semver "^6.3.1" "@babel/helper-create-class-features-plugin@^7.17.1": version "7.17.1" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.17.1.tgz#9699f14a88833a7e055ce57dcd3ffdcd25186b21" + resolved "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.17.1.tgz" integrity sha512-JBdSr/LtyYIno/pNnJ75lBcqc3Z1XXujzPanHqjvvrhOA+DTceTFuJi8XjmWTZh4r3fsdfqaCMN0iZemdkxZHQ== dependencies: "@babel/helper-annotate-as-pure" "^7.16.7" @@ -157,18 +145,40 @@ "@babel/helper-replace-supers" "^7.16.7" "@babel/helper-split-export-declaration" "^7.16.7" -"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.25.9": +"@babel/helper-create-class-features-plugin@^7.27.1", "@babel/helper-create-class-features-plugin@^7.28.3", "@babel/helper-create-class-features-plugin@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.28.5.tgz#472d0c28028850968979ad89f173594a6995da46" + integrity sha512-q3WC4JfdODypvxArsJQROfupPBq9+lMwjKq7C33GhbFYJsufD0yd/ziwD+hJucLeWsnFPWZjsU2DNFqBPE7jwQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.27.3" + "@babel/helper-member-expression-to-functions" "^7.28.5" + "@babel/helper-optimise-call-expression" "^7.27.1" + "@babel/helper-replace-supers" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" + "@babel/traverse" "^7.28.5" + semver "^6.3.1" + +"@babel/helper-create-regexp-features-plugin@^7.18.6": version "7.26.3" - resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.26.3.tgz#5169756ecbe1d95f7866b90bb555b022595302a0" + resolved "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.26.3.tgz" integrity sha512-G7ZRb40uUgdKOQqPLjfD12ZmGA54PzqDFUv2BKImnC9QIfGhIHKvVML0oN8IUiDq4iRqpq74ABpvOaerfWdong== dependencies: "@babel/helper-annotate-as-pure" "^7.25.9" regexpu-core "^6.2.0" semver "^6.3.1" +"@babel/helper-create-regexp-features-plugin@^7.27.1": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.28.5.tgz#7c1ddd64b2065c7f78034b25b43346a7e19ed997" + integrity sha512-N1EhvLtHzOvj7QQOUCCS3NrPJP8c5W6ZXCHDn7Yialuy1iu4r5EmIYkXlKNqT99Ciw+W0mDqWoR6HWMZlFP3hw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.27.3" + regexpu-core "^6.3.1" + semver "^6.3.1" + "@babel/helper-define-polyfill-provider@^0.3.1", "@babel/helper-define-polyfill-provider@^0.3.2", "@babel/helper-define-polyfill-provider@^0.3.3": version "0.3.3" - resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz#8612e55be5d51f0cd1f36b4a5a83924e89884b7a" + resolved "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz" integrity sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww== dependencies: "@babel/helper-compilation-targets" "^7.17.7" @@ -178,227 +188,220 @@ resolve "^1.14.2" semver "^6.1.2" -"@babel/helper-define-polyfill-provider@^0.6.2", "@babel/helper-define-polyfill-provider@^0.6.3": - version "0.6.3" - resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.3.tgz#f4f2792fae2ef382074bc2d713522cf24e6ddb21" - integrity sha512-HK7Bi+Hj6H+VTHA3ZvBis7V/6hu9QuTrnMXNybfUf2iiuU/N97I8VjB+KbhFF8Rld/Lx5MzoCwPCpPjfK+n8Cg== +"@babel/helper-define-polyfill-provider@^0.6.5": + version "0.6.5" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.5.tgz#742ccf1cb003c07b48859fc9fa2c1bbe40e5f753" + integrity sha512-uJnGFcPsWQK8fvjgGP5LZUZZsYGIoPeRjSF5PGwrelYgq7Q15/Ft9NGFp1zglwgIv//W0uG4BevRuSJRyylZPg== dependencies: - "@babel/helper-compilation-targets" "^7.22.6" - "@babel/helper-plugin-utils" "^7.22.5" - debug "^4.1.1" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-plugin-utils" "^7.27.1" + debug "^4.4.1" lodash.debounce "^4.0.8" - resolve "^1.14.2" + resolve "^1.22.10" -"@babel/helper-environment-visitor@^7.16.7", "@babel/helper-environment-visitor@^7.18.9": +"@babel/helper-environment-visitor@^7.16.7": version "7.24.7" - resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz#4b31ba9551d1f90781ba83491dd59cf9b269f7d9" + resolved "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz" integrity sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ== dependencies: "@babel/types" "^7.24.7" "@babel/helper-function-name@^7.16.7": version "7.24.7" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz#75f1e1725742f39ac6584ee0b16d94513da38dd2" + resolved "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz" integrity sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA== dependencies: "@babel/template" "^7.24.7" "@babel/types" "^7.24.7" +"@babel/helper-globals@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/helper-globals/-/helper-globals-7.28.0.tgz#b9430df2aa4e17bc28665eadeae8aa1d985e6674" + integrity sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw== + "@babel/helper-member-expression-to-functions@^7.16.7", "@babel/helper-member-expression-to-functions@^7.25.9": version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.25.9.tgz#9dfffe46f727005a5ea29051ac835fb735e4c1a3" + resolved "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.25.9.tgz" integrity sha512-wbfdZ9w5vk0C0oyHqAJbc62+vet5prjj01jjJ8sKn3j9h3MQQlflEdXYvuqRWjHnM12coDEqiC1IRCi0U/EKwQ== dependencies: "@babel/traverse" "^7.25.9" "@babel/types" "^7.25.9" -"@babel/helper-module-imports@^7.12.13", "@babel/helper-module-imports@^7.16.7", "@babel/helper-module-imports@^7.25.9": +"@babel/helper-member-expression-to-functions@^7.27.1", "@babel/helper-member-expression-to-functions@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.28.5.tgz#f3e07a10be37ed7a63461c63e6929575945a6150" + integrity sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg== + dependencies: + "@babel/traverse" "^7.28.5" + "@babel/types" "^7.28.5" + +"@babel/helper-module-imports@^7.16.7": version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz#e7f8d20602ebdbf9ebbea0a0751fb0f2a4141715" + resolved "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz" integrity sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw== dependencies: "@babel/traverse" "^7.25.9" "@babel/types" "^7.25.9" -"@babel/helper-module-transforms@^7.25.9", "@babel/helper-module-transforms@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz#8ce54ec9d592695e58d84cd884b7b5c6a2fdeeae" - integrity sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw== +"@babel/helper-module-imports@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz#7ef769a323e2655e126673bb6d2d6913bbead204" + integrity sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w== dependencies: - "@babel/helper-module-imports" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" + +"@babel/helper-module-transforms@^7.27.1", "@babel/helper-module-transforms@^7.28.3": + version "7.28.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz#a2b37d3da3b2344fe085dab234426f2b9a2fa5f6" + integrity sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw== + dependencies: + "@babel/helper-module-imports" "^7.27.1" + "@babel/helper-validator-identifier" "^7.27.1" + "@babel/traverse" "^7.28.3" "@babel/helper-optimise-call-expression@^7.16.7", "@babel/helper-optimise-call-expression@^7.25.9": version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.9.tgz#3324ae50bae7e2ab3c33f60c9a877b6a0146b54e" + resolved "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.9.tgz" integrity sha512-FIpuNaz5ow8VyrYcnXQTDRGvV6tTjkNtCK/RYNDXGSLlUD6cBuQTSw43CShGxjvfBTfcUA/r6UhUCbtYqkhcuQ== dependencies: "@babel/types" "^7.25.9" -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.18.9", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.25.9", "@babel/helper-plugin-utils@^7.26.5", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz#18580d00c9934117ad719392c4f6585c9333cc35" - integrity sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg== +"@babel/helper-optimise-call-expression@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.27.1.tgz#c65221b61a643f3e62705e5dd2b5f115e35f9200" + integrity sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw== + dependencies: + "@babel/types" "^7.27.1" -"@babel/helper-remap-async-to-generator@^7.18.9", "@babel/helper-remap-async-to-generator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.9.tgz#e53956ab3d5b9fb88be04b3e2f31b523afd34b92" - integrity sha512-IZtukuUeBbhgOcaW2s06OXTzVNJR0ybm4W5xC1opWFFJMZbwRj5LCk+ByYH7WdZPZTt8KnFwA8pvjN2yqcPlgw== +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.27.1", "@babel/helper-plugin-utils@^7.8.0": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz#ddb2f876534ff8013e6c2b299bf4d39b3c51d44c" + integrity sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw== + +"@babel/helper-remap-async-to-generator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.27.1.tgz#4601d5c7ce2eb2aea58328d43725523fcd362ce6" + integrity sha512-7fiA521aVw8lSPeI4ZOD3vRFkoqkJcS+z4hFo82bFSH/2tNd6eJ5qCVMS5OzDmZh/kaHQeBaeyxK6wljcPtveA== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-wrap-function" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-annotate-as-pure" "^7.27.1" + "@babel/helper-wrap-function" "^7.27.1" + "@babel/traverse" "^7.27.1" -"@babel/helper-replace-supers@^7.16.7", "@babel/helper-replace-supers@^7.25.9": +"@babel/helper-replace-supers@^7.16.7": version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.26.5.tgz#6cb04e82ae291dae8e72335dfe438b0725f14c8d" + resolved "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.26.5.tgz" integrity sha512-bJ6iIVdYX1YooY2X7w1q6VITt+LnUILtNk7zT78ykuwStx8BauCzxvFqFaHjOpW1bVnSUM1PN1f0p5P21wHxvg== dependencies: "@babel/helper-member-expression-to-functions" "^7.25.9" "@babel/helper-optimise-call-expression" "^7.25.9" "@babel/traverse" "^7.26.5" -"@babel/helper-skip-transparent-expression-wrappers@^7.20.0", "@babel/helper-skip-transparent-expression-wrappers@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.25.9.tgz#0b2e1b62d560d6b1954893fd2b705dc17c91f0c9" - integrity sha512-K4Du3BFa3gvyhzgPcntrkDgZzQaq6uozzcpGbOO1OEJaI+EJdqWIMTLgFgQf6lrfiDFo5FU+BxKepI9RmZqahA== +"@babel/helper-replace-supers@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.27.1.tgz#b1ed2d634ce3bdb730e4b52de30f8cccfd692bc0" + integrity sha512-7EHz6qDZc8RYS5ElPoShMheWvEgERonFCs7IAonWLLUTXW59DP14bCZt89/GKyreYn8g3S83m21FelHKbeDCKA== dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" + "@babel/helper-member-expression-to-functions" "^7.27.1" + "@babel/helper-optimise-call-expression" "^7.27.1" + "@babel/traverse" "^7.27.1" + +"@babel/helper-skip-transparent-expression-wrappers@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.27.1.tgz#62bb91b3abba8c7f1fec0252d9dbea11b3ee7a56" + integrity sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg== + dependencies: + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" "@babel/helper-split-export-declaration@^7.16.7": version "7.24.7" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz#83949436890e07fa3d6873c61a96e3bbf692d856" + resolved "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz" integrity sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA== dependencies: "@babel/types" "^7.24.7" -"@babel/helper-string-parser@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c" - integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA== - -"@babel/helper-validator-identifier@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7" - integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ== - -"@babel/helper-validator-option@^7.16.7", "@babel/helper-validator-option@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz#86e45bd8a49ab7e03f276577f96179653d41da72" - integrity sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw== - -"@babel/helper-wrap-function@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.25.9.tgz#d99dfd595312e6c894bd7d237470025c85eea9d0" - integrity sha512-ETzz9UTjQSTmw39GboatdymDq4XIQbR8ySgVrylRhPOFpsd+JrKHIuF0de7GCWmem+T4uC5z7EZguod7Wj4A4g== - dependencies: - "@babel/template" "^7.25.9" - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/helpers@^7.26.7": - version "7.26.10" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.10.tgz#6baea3cd62ec2d0c1068778d63cb1314f6637384" - integrity sha512-UPYc3SauzZ3JGgj87GgZ89JVdC5dj0AoetR5Bw6wj4niittNyFh6+eOGonYvJ1ao6B8lEa3Q3klS7ADZ53bc5g== - dependencies: - "@babel/template" "^7.26.9" - "@babel/types" "^7.26.10" - -"@babel/highlight@^7.16.7": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.25.9.tgz#8141ce68fc73757946f983b343f1231f4691acc6" - integrity sha512-llL88JShoCsth8fF8R4SJnIn+WLvR6ccFxu1H3FlMhDontdcmZWf2HgIZ7AIqV3Xcck1idlohrN4EUBQz6klbw== - dependencies: - "@babel/helper-validator-identifier" "^7.25.9" - chalk "^2.4.2" - js-tokens "^4.0.0" - picocolors "^1.0.0" +"@babel/helper-string-parser@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz#54da796097ab19ce67ed9f88b47bb2ec49367687" + integrity sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA== -"@babel/parser@^7.25.9", "@babel/parser@^7.26.5", "@babel/parser@^7.26.7": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.7.tgz#e114cd099e5f7d17b05368678da0fb9f69b3385c" - integrity sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w== - dependencies: - "@babel/types" "^7.26.7" +"@babel/helper-validator-identifier@^7.27.1", "@babel/helper-validator-identifier@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz#010b6938fab7cb7df74aa2bbc06aa503b8fe5fb4" + integrity sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q== -"@babel/parser@^7.26.9": - version "7.26.10" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.10.tgz#e9bdb82f14b97df6569b0b038edd436839c57749" - integrity sha512-6aQR2zGE/QFi8JpDLjUZEPYOs7+mhKXm86VaKFiLP35JQwQb6bwUE+XbvkH0EptsYhbNBSUGaUBLKqxH1xSgsA== - dependencies: - "@babel/types" "^7.26.10" +"@babel/helper-validator-option@^7.16.7", "@babel/helper-validator-option@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz#fa52f5b1e7db1ab049445b421c4471303897702f" + integrity sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg== -"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.9.tgz#cc2e53ebf0a0340777fff5ed521943e253b4d8fe" - integrity sha512-ZkRyVkThtxQ/J6nv3JFYv1RYY+JT5BvU0y3k5bWrmuG4woXypRa4PXmm9RhOwodRkYFWqC0C0cqcJ4OqR7kW+g== +"@babel/helper-wrap-function@^7.27.1": + version "7.28.3" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.28.3.tgz#fe4872092bc1438ffd0ce579e6f699609f9d0a7a" + integrity sha512-zdf983tNfLZFletc0RRXYrHrucBEg95NIFMkn6K9dbeMYnsgHaSBGcQqdsCSStG2PYwRre0Qc2NNSCXbG+xc6g== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/template" "^7.27.2" + "@babel/traverse" "^7.28.3" + "@babel/types" "^7.28.2" -"@babel/plugin-bugfix-safari-class-field-initializer-scope@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.25.9.tgz#af9e4fb63ccb8abcb92375b2fcfe36b60c774d30" - integrity sha512-MrGRLZxLD/Zjj0gdU15dfs+HH/OXvnw/U4jJD8vpcP2CJQapPEv1IWwjc/qMg7ItBlPwSv1hRBbb7LeuANdcnw== +"@babel/helpers@^7.28.4": + version "7.28.4" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.28.4.tgz#fe07274742e95bdf7cf1443593eeb8926ab63827" + integrity sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/template" "^7.27.2" + "@babel/types" "^7.28.4" -"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.16.7", "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.9.tgz#e8dc26fcd616e6c5bf2bd0d5a2c151d4f92a9137" - integrity sha512-2qUwwfAFpJLZqxd02YW9btUCZHl+RFvdDkNfZwaIJrvB8Tesjsk8pEQkTvGwZXLqXUx/2oyY3ySRhm6HOXuCug== +"@babel/parser@^7.27.2", "@babel/parser@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.28.5.tgz#0b0225ee90362f030efd644e8034c99468893b08" + integrity sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/types" "^7.28.5" -"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.16.7", "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.25.9.tgz#807a667f9158acac6f6164b4beb85ad9ebc9e1d1" - integrity sha512-6xWgLZTJXwilVjlnV7ospI3xi+sl8lN8rXXbBD6vYn3UYDlGsag8wrZkKcSI8G6KgqKP7vNFaDgeDnfAABq61g== +"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.28.5.tgz#fbde57974707bbfa0376d34d425ff4fa6c732421" + integrity sha512-87GDMS3tsmMSi/3bWOte1UblL+YUTFMV8SZPZ2eSEL17s74Cw/l63rR6NmGVKMYW2GYi85nE+/d6Hw5N0bEk2Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - "@babel/plugin-transform-optional-chaining" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.28.5" -"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.9.tgz#de7093f1e7deaf68eadd7cc6b07f2ab82543269e" - integrity sha512-aLnMXYPnzwwqhYSCyXfKkIkYgJ8zv9RK+roo9DkTXz38ynIhd9XCbN08s3MGvqL2MYGVUGdRQLL/JqBIeJhJBg== +"@babel/plugin-bugfix-safari-class-field-initializer-scope@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.27.1.tgz#43f70a6d7efd52370eefbdf55ae03d91b293856d" + integrity sha512-qNeq3bCKnGgLkEXUuFry6dPlGfCdQNZbn7yUAPCInwAJHMU7THJfrBSozkcWq5sNM6RcF3S8XyQL2A52KNR9IA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-proposal-async-generator-functions@^7.16.8": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.7.tgz#bfb7276d2d573cb67ba379984a2334e262ba5326" - integrity sha512-xMbiLsn/8RK7Wq7VeVytytS2L6qE69bXPB10YCmMdDZbKF4okCqY74pI/jJQ/8U0b/F6NrT2+14b8/P9/3AMGA== +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.27.1.tgz#beb623bd573b8b6f3047bd04c32506adc3e58a72" + integrity sha512-g4L7OYun04N1WyqMNjldFwlfPCLVkgB54A/YCXICZYBsvJJE3kByKv9c9+R/nAfmIfjl2rKYLNyMHboYbZaWaA== dependencies: - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-remap-async-to-generator" "^7.18.9" - "@babel/plugin-syntax-async-generators" "^7.8.4" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-proposal-class-properties@^7.16.7": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz#b110f59741895f7ec21a6fff696ec46265c446a3" - integrity sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ== +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.27.1.tgz#e134a5479eb2ba9c02714e8c1ebf1ec9076124fd" + integrity sha512-oO02gcONcD5O1iTLi/6frMJBIwWEHceWGSGqrpCmEL8nogiS6J9PBlE48CaK20/Jx1LuRml9aDftLgdjXT8+Cw== dependencies: - "@babel/helper-create-class-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" + "@babel/plugin-transform-optional-chaining" "^7.27.1" -"@babel/plugin-proposal-class-static-block@^7.16.7": - version "7.21.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.21.0.tgz#77bdd66fb7b605f3a61302d224bdfacf5547977d" - integrity sha512-XP5G9MWNUskFuP30IfFSEFB0Z6HzLIUcjYM4bYOPHXl7eiJ9HFv8tWj6TXTN5QODiEhDZAeI4hLok2iHFFV4hw== +"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.28.3": + version "7.28.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.28.3.tgz#373f6e2de0016f73caf8f27004f61d167743742a" + integrity sha512-b6YTX108evsvE4YgWyQ921ZAFFQm3Bn+CA3+ZXlNVnPhx+UfsVURoPjfGAPCjBgrqo30yX/C2nZGX96DxvR9Iw== dependencies: - "@babel/helper-create-class-features-plugin" "^7.21.0" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-class-static-block" "^7.14.5" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.28.3" "@babel/plugin-proposal-decorators@^7.10.1": version "7.17.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.17.2.tgz#c36372ddfe0360cac1ee331a238310bddca11493" + resolved "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.17.2.tgz" integrity sha512-WH8Z95CwTq/W8rFbMqb9p3hicpt4RX4f0K659ax2VHxgOyT6qQmUaEVEjIh4WR9Eh9NymkVn5vwsrE68fAQNUw== dependencies: "@babel/helper-create-class-features-plugin" "^7.17.1" @@ -407,610 +410,427 @@ "@babel/plugin-syntax-decorators" "^7.17.0" charcodes "^0.2.0" -"@babel/plugin-proposal-dynamic-import@^7.16.7": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz#72bcf8d408799f547d759298c3c27c7e7faa4d94" - integrity sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-dynamic-import" "^7.8.3" - -"@babel/plugin-proposal-export-namespace-from@^7.16.7": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz#5f7313ab348cdb19d590145f9247540e94761203" - integrity sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA== - dependencies: - "@babel/helper-plugin-utils" "^7.18.9" - "@babel/plugin-syntax-export-namespace-from" "^7.8.3" - -"@babel/plugin-proposal-json-strings@^7.16.7": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz#7e8788c1811c393aff762817e7dbf1ebd0c05f0b" - integrity sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-json-strings" "^7.8.3" - -"@babel/plugin-proposal-logical-assignment-operators@^7.16.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.20.7.tgz#dfbcaa8f7b4d37b51e8bfb46d94a5aea2bb89d83" - integrity sha512-y7C7cZgpMIjWlKE5T7eJwp+tnRYM89HmRvWM5EQuB5BoHEONjmQ8lSNmBUwOyy/GFRsohJED51YBF79hE1djug== - dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" - -"@babel/plugin-proposal-nullish-coalescing-operator@^7.16.7": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz#fdd940a99a740e577d6c753ab6fbb43fdb9467e1" - integrity sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" - -"@babel/plugin-proposal-numeric-separator@^7.16.7": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz#899b14fbafe87f053d2c5ff05b36029c62e13c75" - integrity sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-numeric-separator" "^7.10.4" - -"@babel/plugin-proposal-object-rest-spread@^7.16.7": - version "7.20.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz#aa662940ef425779c75534a5c41e9d936edc390a" - integrity sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg== - dependencies: - "@babel/compat-data" "^7.20.5" - "@babel/helper-compilation-targets" "^7.20.7" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-object-rest-spread" "^7.8.3" - "@babel/plugin-transform-parameters" "^7.20.7" - -"@babel/plugin-proposal-optional-catch-binding@^7.16.7": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz#f9400d0e6a3ea93ba9ef70b09e72dd6da638a2cb" - integrity sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" - -"@babel/plugin-proposal-optional-chaining@^7.16.7": - version "7.21.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.21.0.tgz#886f5c8978deb7d30f678b2e24346b287234d3ea" - integrity sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA== - dependencies: - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/helper-skip-transparent-expression-wrappers" "^7.20.0" - "@babel/plugin-syntax-optional-chaining" "^7.8.3" - -"@babel/plugin-proposal-private-methods@^7.16.11": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz#5209de7d213457548a98436fa2882f52f4be6bea" - integrity sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA== - dependencies: - "@babel/helper-create-class-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" - "@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2": version "7.21.0-placeholder-for-preset-env.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz#7844f9289546efa9febac2de4cfe358a050bd703" + resolved "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz" integrity sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w== -"@babel/plugin-proposal-private-property-in-object@^7.16.7": - version "7.21.11" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.11.tgz#69d597086b6760c4126525cfa154f34631ff272c" - integrity sha512-0QZ8qP/3RLDVBwBFoWAwCtgcDZJVwA5LUJRZU8x2YFfKNuFq161wK3cuGrALu5yiPu+vzwTAg/sMWVNeWeNyaw== - dependencies: - "@babel/helper-annotate-as-pure" "^7.18.6" - "@babel/helper-create-class-features-plugin" "^7.21.0" - "@babel/helper-plugin-utils" "^7.20.2" - "@babel/plugin-syntax-private-property-in-object" "^7.14.5" - -"@babel/plugin-proposal-unicode-property-regex@^7.16.7", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz#af613d2cd5e643643b65cded64207b15c85cb78e" - integrity sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w== - dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.18.6" - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/plugin-syntax-async-generators@^7.8.4": - version "7.8.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" - integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-class-properties@^7.12.13": - version "7.12.13" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" - integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA== - dependencies: - "@babel/helper-plugin-utils" "^7.12.13" - -"@babel/plugin-syntax-class-static-block@^7.14.5": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz#195df89b146b4b78b3bf897fd7a257c84659d406" - integrity sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw== - dependencies: - "@babel/helper-plugin-utils" "^7.14.5" - "@babel/plugin-syntax-decorators@^7.17.0": version "7.17.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.17.0.tgz#a2be3b2c9fe7d78bd4994e790896bc411e2f166d" + resolved "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.17.0.tgz" integrity sha512-qWe85yCXsvDEluNP0OyeQjH63DlhAR3W7K9BxxU1MvbDb48tgBG+Ao6IJJ6smPDrrVzSQZrbF6donpkFBMcs3A== dependencies: "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-syntax-dynamic-import@^7.8.3": version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz#62bf98b2da3cd21d626154fc96ee5b3cb68eacb3" + resolved "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz" integrity sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ== dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-export-namespace-from@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz#028964a9ba80dbc094c915c487ad7c4e7a66465a" - integrity sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q== - dependencies: - "@babel/helper-plugin-utils" "^7.8.3" - -"@babel/plugin-syntax-import-assertions@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.26.0.tgz#620412405058efa56e4a564903b79355020f445f" - integrity sha512-QCWT5Hh830hK5EQa7XzuqIkQU9tT/whqbDz7kuaZMHFl1inRRg7JnuAEOQ0Ur0QUl0NufCk1msK2BeY79Aj/eg== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-syntax-import-attributes@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz#3b1412847699eea739b4f2602c74ce36f6b0b0f7" - integrity sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-syntax-json-strings@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" - integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-jsx@^7.12.13", "@babel/plugin-syntax-jsx@^7.16.7", "@babel/plugin-syntax-jsx@^7.2.0": - version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.16.7.tgz#50b6571d13f764266a113d77c82b4a6508bbe665" - integrity sha512-Esxmk7YjA8QysKeT3VhTXvF6y77f/a91SIs4pWb4H2eWGQkCKFgQaG6hdoEVZtGsrAcb2K5BW66XsOErD4WU3Q== - dependencies: - "@babel/helper-plugin-utils" "^7.16.7" - -"@babel/plugin-syntax-logical-assignment-operators@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz#ca91ef46303530448b906652bac2e9fe9941f699" - integrity sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - -"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" - integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-numeric-separator@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz#b9b070b3e33570cd9fd07ba7fa91c0dd37b9af97" - integrity sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - -"@babel/plugin-syntax-object-rest-spread@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" - integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-optional-catch-binding@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" - integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== +"@babel/plugin-syntax-import-assertions@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.27.1.tgz#88894aefd2b03b5ee6ad1562a7c8e1587496aecd" + integrity sha512-UT/Jrhw57xg4ILHLFnzFpPDlMbcdEicaAtjPQpbj9wa8T4r5KVWCimHcL/460g8Ht0DMxDyjsLgiWSkVjnwPFg== dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-optional-chaining@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" - integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-syntax-private-property-in-object@^7.14.5": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz#0dc6671ec0ea22b6e94a1114f857970cd39de1ad" - integrity sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg== +"@babel/plugin-syntax-import-attributes@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz#34c017d54496f9b11b61474e7ea3dfd5563ffe07" + integrity sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww== dependencies: - "@babel/helper-plugin-utils" "^7.14.5" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-syntax-top-level-await@^7.14.5": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz#c1cfdadc35a646240001f06138247b741c34d94c" - integrity sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw== +"@babel/plugin-syntax-jsx@^7.17.12", "@babel/plugin-syntax-jsx@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz#2f9beb5eff30fa507c5532d107daac7b888fa34c" + integrity sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w== dependencies: - "@babel/helper-plugin-utils" "^7.14.5" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-syntax-typescript@^7.16.7": - version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.16.7.tgz#39c9b55ee153151990fb038651d58d3fd03f98f8" - integrity sha512-YhUIJHHGkqPgEcMYkPCKTyGUdoGKWtopIycQyjJH8OjvRgOYsXsaKehLVPScKJWAULPxMa4N1vCe6szREFlZ7A== +"@babel/plugin-syntax-typescript@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz#5147d29066a793450f220c63fa3a9431b7e6dd18" + integrity sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-syntax-unicode-sets-regex@^7.18.6": version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz#d49a3b3e6b52e5be6740022317580234a6a47357" + resolved "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz" integrity sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg== dependencies: "@babel/helper-create-regexp-features-plugin" "^7.18.6" "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-arrow-functions@^7.16.7", "@babel/plugin-transform-arrow-functions@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.25.9.tgz#7821d4410bee5daaadbb4cdd9a6649704e176845" - integrity sha512-6jmooXYIwn9ca5/RylZADJ+EnSxVUS5sjeJ9UPk6RWRzXCmOJCy6dqItPJFpw2cuCangPK4OYr5uhGKcmrm5Qg== +"@babel/plugin-transform-arrow-functions@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.27.1.tgz#6e2061067ba3ab0266d834a9f94811196f2aba9a" + integrity sha512-8Z4TGic6xW70FKThA5HYEKKyBpOOsucTOD1DjU3fZxDg+K3zBJcXMFnt/4yQiZnf5+MiOMSXQ9PaEK/Ilh1DeA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-async-generator-functions@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.25.9.tgz#1b18530b077d18a407c494eb3d1d72da505283a2" - integrity sha512-RXV6QAzTBbhDMO9fWwOmwwTuYaiPbggWQ9INdZqAYeSHyG7FzQ+nOZaUUjNwKv9pV3aE4WFqFm1Hnbci5tBCAw== +"@babel/plugin-transform-async-generator-functions@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.28.0.tgz#1276e6c7285ab2cd1eccb0bc7356b7a69ff842c2" + integrity sha512-BEOdvX4+M765icNPZeidyADIvQ1m1gmunXufXxvRESy/jNNyfovIqUyE7MVgGBjWktCoJlzvFA1To2O4ymIO3Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-remap-async-to-generator" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-remap-async-to-generator" "^7.27.1" + "@babel/traverse" "^7.28.0" -"@babel/plugin-transform-async-to-generator@^7.16.8", "@babel/plugin-transform-async-to-generator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.25.9.tgz#c80008dacae51482793e5a9c08b39a5be7e12d71" - integrity sha512-NT7Ejn7Z/LjUH0Gv5KsBCxh7BH3fbLTV0ptHvpeMvrt3cPThHfJfst9Wrb7S8EvJ7vRTFI7z+VAvFVEQn/m5zQ== +"@babel/plugin-transform-async-to-generator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.27.1.tgz#9a93893b9379b39466c74474f55af03de78c66e7" + integrity sha512-NREkZsZVJS4xmTr8qzE5y8AfIPqsdQfRuUiLRTEzb7Qii8iFWCyDKaUV2c0rCuh4ljDZ98ALHP/PetiBV2nddA== dependencies: - "@babel/helper-module-imports" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-remap-async-to-generator" "^7.25.9" + "@babel/helper-module-imports" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-remap-async-to-generator" "^7.27.1" -"@babel/plugin-transform-block-scoped-functions@^7.16.7", "@babel/plugin-transform-block-scoped-functions@^7.26.5": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.26.5.tgz#3dc4405d31ad1cbe45293aa57205a6e3b009d53e" - integrity sha512-chuTSY+hq09+/f5lMj8ZSYgCFpppV2CbYrhNFJ1BFoXpiWPnnAb7R0MqrafCpN8E1+YRrtM1MXZHJdIx8B6rMQ== +"@babel/plugin-transform-block-scoped-functions@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.27.1.tgz#558a9d6e24cf72802dd3b62a4b51e0d62c0f57f9" + integrity sha512-cnqkuOtZLapWYZUYM5rVIdv1nXYuFVIltZ6ZJ7nIj585QsjKM5dhL2Fu/lICXZ1OyIAFc7Qy+bvDAtTXqGrlhg== dependencies: - "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-block-scoping@^7.16.7", "@babel/plugin-transform-block-scoping@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.25.9.tgz#c33665e46b06759c93687ca0f84395b80c0473a1" - integrity sha512-1F05O7AYjymAtqbsFETboN1NvBdcnzMerO+zlMyJBEz6WkMdejvGWw9p05iTSjC85RLlBseHHQpYaM4gzJkBGg== +"@babel/plugin-transform-block-scoping@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.28.5.tgz#e0d3af63bd8c80de2e567e690a54e84d85eb16f6" + integrity sha512-45DmULpySVvmq9Pj3X9B+62Xe+DJGov27QravQJU1LLcapR6/10i+gYVAucGGJpHBp5mYxIMK4nDAT/QDLr47g== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-class-properties@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.9.tgz#a8ce84fedb9ad512549984101fa84080a9f5f51f" - integrity sha512-bbMAII8GRSkcd0h0b4X+36GksxuheLFjP65ul9w6C3KgAamI3JqErNgSrosX6ZPj+Mpim5VvEbawXxJCyEUV3Q== +"@babel/plugin-transform-class-properties@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.27.1.tgz#dd40a6a370dfd49d32362ae206ddaf2bb082a925" + integrity sha512-D0VcalChDMtuRvJIu3U/fwWjf8ZMykz5iZsg77Nuj821vCKI3zCyRLwRdWbsuJ/uRwZhZ002QtCqIkwC/ZkvbA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-class-static-block@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.26.0.tgz#6c8da219f4eb15cae9834ec4348ff8e9e09664a0" - integrity sha512-6J2APTs7BDDm+UMqP1useWqhcRAXo0WIoVj26N7kPFB6S73Lgvyka4KTZYIxtgYXiN5HTyRObA72N2iu628iTQ== +"@babel/plugin-transform-class-static-block@^7.28.3": + version "7.28.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.28.3.tgz#d1b8e69b54c9993bc558203e1f49bfc979bfd852" + integrity sha512-LtPXlBbRoc4Njl/oh1CeD/3jC+atytbnf/UqLoqTDcEYGUPj022+rvfkbDYieUrSj3CaV4yHDByPE+T2HwfsJg== dependencies: - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.28.3" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-classes@^7.16.7", "@babel/plugin-transform-classes@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.9.tgz#7152457f7880b593a63ade8a861e6e26a4469f52" - integrity sha512-mD8APIXmseE7oZvZgGABDyM34GUmK45Um2TXiBUt7PnuAxrgoSVf123qUzPxEr/+/BHrRn5NMZCdE2m/1F8DGg== +"@babel/plugin-transform-classes@^7.28.4": + version "7.28.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.28.4.tgz#75d66175486788c56728a73424d67cbc7473495c" + integrity sha512-cFOlhIYPBv/iBoc+KS3M6et2XPtbT2HiCRfBXWtfpc9OAyostldxIf9YAYB6ypURBBbx+Qv6nyrLzASfJe+hBA== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-replace-supers" "^7.25.9" - "@babel/traverse" "^7.25.9" - globals "^11.1.0" + "@babel/helper-annotate-as-pure" "^7.27.3" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-globals" "^7.28.0" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-replace-supers" "^7.27.1" + "@babel/traverse" "^7.28.4" -"@babel/plugin-transform-computed-properties@^7.16.7", "@babel/plugin-transform-computed-properties@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.25.9.tgz#db36492c78460e534b8852b1d5befe3c923ef10b" - integrity sha512-HnBegGqXZR12xbcTHlJ9HGxw1OniltT26J5YpfruGqtUHlz/xKf/G2ak9e+t0rVqrjXa9WOhvYPz1ERfMj23AA== +"@babel/plugin-transform-computed-properties@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.27.1.tgz#81662e78bf5e734a97982c2b7f0a793288ef3caa" + integrity sha512-lj9PGWvMTVksbWiDT2tW68zGS/cyo4AkZ/QTp0sQT0mjPopCmrSkzxeXkznjqBxzDI6TclZhOJbBmbBLjuOZUw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/template" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/template" "^7.27.1" -"@babel/plugin-transform-destructuring@^7.16.7", "@babel/plugin-transform-destructuring@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.25.9.tgz#966ea2595c498224340883602d3cfd7a0c79cea1" - integrity sha512-WkCGb/3ZxXepmMiX101nnGiU+1CAdut8oHyEOHxkKuS1qKpU2SMXE2uSvfz8PBuLd49V6LEsbtyPhWC7fnkgvQ== +"@babel/plugin-transform-destructuring@^7.28.0", "@babel/plugin-transform-destructuring@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.28.5.tgz#b8402764df96179a2070bb7b501a1586cf8ad7a7" + integrity sha512-Kl9Bc6D0zTUcFUvkNuQh4eGXPKKNDOJQXVyyM4ZAQPMveniJdxi8XMJwLo+xSoW3MIq81bD33lcUe9kZpl0MCw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.28.5" -"@babel/plugin-transform-dotall-regex@^7.16.7", "@babel/plugin-transform-dotall-regex@^7.25.9", "@babel/plugin-transform-dotall-regex@^7.4.4": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.25.9.tgz#bad7945dd07734ca52fe3ad4e872b40ed09bb09a" - integrity sha512-t7ZQ7g5trIgSRYhI9pIJtRl64KHotutUJsh4Eze5l7olJv+mRSg4/MmbZ0tv1eeqRbdvo/+trvJD/Oc5DmW2cA== +"@babel/plugin-transform-dotall-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.27.1.tgz#aa6821de864c528b1fecf286f0a174e38e826f4d" + integrity sha512-gEbkDVGRvjj7+T1ivxrfgygpT7GUd4vmODtYpbs0gZATdkX8/iSnOtZSxiZnsgm1YjTgjI6VKBGSJJevkrclzw== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-duplicate-keys@^7.16.7", "@babel/plugin-transform-duplicate-keys@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.25.9.tgz#8850ddf57dce2aebb4394bb434a7598031059e6d" - integrity sha512-LZxhJ6dvBb/f3x8xwWIuyiAHy56nrRG3PeYTpBkkzkYRRQ6tJLu68lEF5VIqMUZiAV7a8+Tb78nEoMCMcqjXBw== +"@babel/plugin-transform-duplicate-keys@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.27.1.tgz#f1fbf628ece18e12e7b32b175940e68358f546d1" + integrity sha512-MTyJk98sHvSs+cvZ4nOauwTTG1JeonDjSGvGGUNHreGQns+Mpt6WX/dVzWBHgg+dYZhkC4X+zTDfkTU+Vy9y7Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-duplicate-named-capturing-groups-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.25.9.tgz#6f7259b4de127721a08f1e5165b852fcaa696d31" - integrity sha512-0UfuJS0EsXbRvKnwcLjFtJy/Sxc5J5jhLHnFhy7u4zih97Hz6tJkLU+O+FMMrNZrosUPxDi6sYxJ/EA8jDiAog== +"@babel/plugin-transform-duplicate-named-capturing-groups-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.27.1.tgz#5043854ca620a94149372e69030ff8cb6a9eb0ec" + integrity sha512-hkGcueTEzuhB30B3eJCbCYeCaaEQOmQR0AdvzpD4LoN0GXMWzzGSuRrxR2xTnCrvNbVwK9N6/jQ92GSLfiZWoQ== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-dynamic-import@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.25.9.tgz#23e917de63ed23c6600c5dd06d94669dce79f7b8" - integrity sha512-GCggjexbmSLaFhqsojeugBpeaRIgWNTcgKVq/0qIteFEqY2A+b9QidYadrWlnbWQUrW5fn+mCvf3tr7OeBFTyg== +"@babel/plugin-transform-dynamic-import@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.27.1.tgz#4c78f35552ac0e06aa1f6e3c573d67695e8af5a4" + integrity sha512-MHzkWQcEmjzzVW9j2q8LGjwGWpG2mjwaaB0BNQwst3FIjqsg8Ct/mIZlvSPJvfi9y2AC8mi/ktxbFVL9pZ1I4A== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-exponentiation-operator@^7.16.7", "@babel/plugin-transform-exponentiation-operator@^7.26.3": - version "7.26.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.26.3.tgz#e29f01b6de302c7c2c794277a48f04a9ca7f03bc" - integrity sha512-7CAHcQ58z2chuXPWblnn1K6rLDnDWieghSOEmqQsrBenH0P9InCUtOJYD89pvngljmZlJcz3fcmgYsXFNGa1ZQ== +"@babel/plugin-transform-explicit-resource-management@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-explicit-resource-management/-/plugin-transform-explicit-resource-management-7.28.0.tgz#45be6211b778dbf4b9d54c4e8a2b42fa72e09a1a" + integrity sha512-K8nhUcn3f6iB+P3gwCv/no7OdzOZQcKchW6N389V6PD8NUWKZHzndOd9sPDVbMoBsbmjMqlB4L9fm+fEFNVlwQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/plugin-transform-destructuring" "^7.28.0" -"@babel/plugin-transform-export-namespace-from@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.25.9.tgz#90745fe55053394f554e40584cda81f2c8a402a2" - integrity sha512-2NsEz+CxzJIVOPx2o9UsW1rXLqtChtLoVnwYHHiB04wS5sgn7mrV45fWMBX0Kk+ub9uXytVYfNP2HjbVbCB3Ww== +"@babel/plugin-transform-exponentiation-operator@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.28.5.tgz#7cc90a8170e83532676cfa505278e147056e94fe" + integrity sha512-D4WIMaFtwa2NizOp+dnoFjRez/ClKiC2BqqImwKd1X28nqBtZEyCYJ2ozQrrzlxAFrcrjxo39S6khe9RNDlGzw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-for-of@^7.16.7", "@babel/plugin-transform-for-of@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.25.9.tgz#4bdc7d42a213397905d89f02350c5267866d5755" - integrity sha512-LqHxduHoaGELJl2uhImHwRQudhCM50pT46rIBNvtT/Oql3nqiS3wOwP+5ten7NpYSXrrVLgtZU3DZmPtWZo16A== +"@babel/plugin-transform-export-namespace-from@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.27.1.tgz#71ca69d3471edd6daa711cf4dfc3400415df9c23" + integrity sha512-tQvHWSZ3/jH2xuq/vZDy0jNn+ZdXJeM8gHvX4lnJmsc3+50yPlWdZXIc5ay+umX+2/tJIqHqiEqcJvxlmIvRvQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-function-name@^7.16.7", "@babel/plugin-transform-function-name@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.9.tgz#939d956e68a606661005bfd550c4fc2ef95f7b97" - integrity sha512-8lP+Yxjv14Vc5MuWBpJsoUCd3hD6V9DgBon2FVYL4jJgbnVQ9fTgYmonchzZJOVNgzEgbxp4OwAf6xz6M/14XA== +"@babel/plugin-transform-for-of@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.27.1.tgz#bc24f7080e9ff721b63a70ac7b2564ca15b6c40a" + integrity sha512-BfbWFFEJFQzLCQ5N8VocnCtA8J1CLkNTe2Ms2wocj75dd6VpiqS5Z5quTYcUoo4Yq+DN0rtikODccuv7RU81sw== dependencies: - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" -"@babel/plugin-transform-json-strings@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.25.9.tgz#c86db407cb827cded902a90c707d2781aaa89660" - integrity sha512-xoTMk0WXceiiIvsaquQQUaLLXSW1KJ159KP87VilruQm0LNNGxWzahxSS6T6i4Zg3ezp4vA4zuwiNUR53qmQAw== +"@babel/plugin-transform-function-name@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.27.1.tgz#4d0bf307720e4dce6d7c30fcb1fd6ca77bdeb3a7" + integrity sha512-1bQeydJF9Nr1eBCMMbC+hdwmRlsv5XYOMu03YSWFwNs0HsAmtSxxF1fyuYPqemVldVyFmlCU7w8UE14LupUSZQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-compilation-targets" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.27.1" -"@babel/plugin-transform-literals@^7.16.7", "@babel/plugin-transform-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.9.tgz#1a1c6b4d4aa59bc4cad5b6b3a223a0abd685c9de" - integrity sha512-9N7+2lFziW8W9pBl2TzaNht3+pgMIRP74zizeCSrtnSKVdUl8mAjjOP2OOVQAfZ881P2cNjDj1uAMEdeD50nuQ== +"@babel/plugin-transform-json-strings@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.27.1.tgz#a2e0ce6ef256376bd527f290da023983527a4f4c" + integrity sha512-6WVLVJiTjqcQauBhn1LkICsR2H+zm62I3h9faTDKt1qP4jn2o72tSvqMwtGFKGTpojce0gJs+76eZ2uCHRZh0Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-logical-assignment-operators@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.25.9.tgz#b19441a8c39a2fda0902900b306ea05ae1055db7" - integrity sha512-wI4wRAzGko551Y8eVf6iOY9EouIDTtPb0ByZx+ktDGHwv6bHFimrgJM/2T021txPZ2s4c7bqvHbd+vXG6K948Q== +"@babel/plugin-transform-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.27.1.tgz#baaefa4d10a1d4206f9dcdda50d7d5827bb70b24" + integrity sha512-0HCFSepIpLTkLcsi86GG3mTUzxV5jpmbv97hTETW3yzrAij8aqlD36toB1D0daVFJM8NK6GvKO0gslVQmm+zZA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-member-expression-literals@^7.16.7", "@babel/plugin-transform-member-expression-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.25.9.tgz#63dff19763ea64a31f5e6c20957e6a25e41ed5de" - integrity sha512-PYazBVfofCQkkMzh2P6IdIUaCEWni3iYEerAsRWuVd8+jlM1S9S9cz1dF9hIzyoZ8IA3+OwVYIp9v9e+GbgZhA== +"@babel/plugin-transform-logical-assignment-operators@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.28.5.tgz#d028fd6db8c081dee4abebc812c2325e24a85b0e" + integrity sha512-axUuqnUTBuXyHGcJEVVh9pORaN6wC5bYfE7FGzPiaWa3syib9m7g+/IT/4VgCOe2Upef43PHzeAvcrVek6QuuA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-amd@^7.16.7", "@babel/plugin-transform-modules-amd@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.25.9.tgz#49ba478f2295101544abd794486cd3088dddb6c5" - integrity sha512-g5T11tnI36jVClQlMlt4qKDLlWnG5pP9CSM4GhdRciTNMRgkfpo5cR6b4rGIOYPgRRuFAvwjPQ/Yk+ql4dyhbw== +"@babel/plugin-transform-member-expression-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.27.1.tgz#37b88ba594d852418e99536f5612f795f23aeaf9" + integrity sha512-hqoBX4dcZ1I33jCSWcXrP+1Ku7kdqXf1oeah7ooKOIiAdKQ+uqftgCFNOSzA5AMS2XIHEYeGFg4cKRCdpxzVOQ== dependencies: - "@babel/helper-module-transforms" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-commonjs@^7.16.8", "@babel/plugin-transform-modules-commonjs@^7.26.3": - version "7.26.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.26.3.tgz#8f011d44b20d02c3de44d8850d971d8497f981fb" - integrity sha512-MgR55l4q9KddUDITEzEFYn5ZsGDXMSsU9E+kh7fjRXTIC3RHqfCo8RPRbyReYJh44HQ/yomFkqbOFohXvDCiIQ== +"@babel/plugin-transform-modules-amd@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.27.1.tgz#a4145f9d87c2291fe2d05f994b65dba4e3e7196f" + integrity sha512-iCsytMg/N9/oFq6n+gFTvUYDZQOMK5kEdeYxmxt91fcJGycfxVP9CnrxoliM0oumFERba2i8ZtwRUCMhvP1LnA== dependencies: - "@babel/helper-module-transforms" "^7.26.0" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-systemjs@^7.16.7", "@babel/plugin-transform-modules-systemjs@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.9.tgz#8bd1b43836269e3d33307151a114bcf3ba6793f8" - integrity sha512-hyss7iIlH/zLHaehT+xwiymtPOpsiwIIRlCAOwBB04ta5Tt+lNItADdlXw3jAWZ96VJ2jlhl/c+PNIQPKNfvcA== +"@babel/plugin-transform-modules-commonjs@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.27.1.tgz#8e44ed37c2787ecc23bdc367f49977476614e832" + integrity sha512-OJguuwlTYlN0gBZFRPqwOGNWssZjfIUdS7HMYtN8c1KmwpwHFBwTeFZrg9XZa+DFTitWOW5iTAG7tyCUPsCCyw== dependencies: - "@babel/helper-module-transforms" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-umd@^7.16.7", "@babel/plugin-transform-modules-umd@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.25.9.tgz#6710079cdd7c694db36529a1e8411e49fcbf14c9" - integrity sha512-bS9MVObUgE7ww36HEfwe6g9WakQ0KF07mQF74uuXdkoziUPfKyu/nIm663kz//e5O1nPInPFx36z7WJmJ4yNEw== +"@babel/plugin-transform-modules-systemjs@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.28.5.tgz#7439e592a92d7670dfcb95d0cbc04bd3e64801d2" + integrity sha512-vn5Jma98LCOeBy/KpeQhXcV2WZgaRUtjwQmjoBuLNlOmkg0fB5pdvYVeWRYI69wWKwK2cD1QbMiUQnoujWvrew== dependencies: - "@babel/helper-module-transforms" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-module-transforms" "^7.28.3" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-validator-identifier" "^7.28.5" + "@babel/traverse" "^7.28.5" -"@babel/plugin-transform-named-capturing-groups-regex@^7.16.8", "@babel/plugin-transform-named-capturing-groups-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.25.9.tgz#454990ae6cc22fd2a0fa60b3a2c6f63a38064e6a" - integrity sha512-oqB6WHdKTGl3q/ItQhpLSnWWOpjUJLsOCLVyeFgeTktkBSCiurvPOsyt93gibI9CmuKvTUEtWmG5VhZD+5T/KA== +"@babel/plugin-transform-modules-umd@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.27.1.tgz#63f2cf4f6dc15debc12f694e44714863d34cd334" + integrity sha512-iQBE/xC5BV1OxJbp6WG7jq9IWiD+xxlZhLrdwpPkTX3ydmXdvoCpyfJN7acaIBZaOqTfr76pgzqBJflNbeRK+w== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-new-target@^7.16.7", "@babel/plugin-transform-new-target@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.25.9.tgz#42e61711294b105c248336dcb04b77054ea8becd" - integrity sha512-U/3p8X1yCSoKyUj2eOBIx3FOn6pElFOKvAAGf8HTtItuPyB+ZeOqfn+mvTtg9ZlOAjsPdK3ayQEjqHjU/yLeVQ== +"@babel/plugin-transform-named-capturing-groups-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.27.1.tgz#f32b8f7818d8fc0cc46ee20a8ef75f071af976e1" + integrity sha512-SstR5JYy8ddZvD6MhV0tM/j16Qds4mIpJTOd1Yu9J9pJjH93bxHECF7pgtc28XvkzTD6Pxcm/0Z73Hvk7kb3Ng== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-nullish-coalescing-operator@^7.26.6": - version "7.26.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.26.6.tgz#fbf6b3c92cb509e7b319ee46e3da89c5bedd31fe" - integrity sha512-CKW8Vu+uUZneQCPtXmSBUC6NCAUdya26hWCElAWh5mVSlSRsmiCPUUDKb3Z0szng1hiAJa098Hkhg9o4SE35Qw== +"@babel/plugin-transform-new-target@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.27.1.tgz#259c43939728cad1706ac17351b7e6a7bea1abeb" + integrity sha512-f6PiYeqXQ05lYq3TIfIDu/MtliKUbNwkGApPUvyo6+tc7uaR4cPjPe7DFPr15Uyycg2lZU6btZ575CuQoYh7MQ== dependencies: - "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-numeric-separator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.25.9.tgz#bfed75866261a8b643468b0ccfd275f2033214a1" - integrity sha512-TlprrJ1GBZ3r6s96Yq8gEQv82s8/5HnCVHtEJScUj90thHQbwe+E5MLhi2bbNHBEJuzrvltXSru+BUxHDoog7Q== +"@babel/plugin-transform-nullish-coalescing-operator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.27.1.tgz#4f9d3153bf6782d73dd42785a9d22d03197bc91d" + integrity sha512-aGZh6xMo6q9vq1JGcw58lZ1Z0+i0xB2x0XaauNIUXd6O1xXc3RwoWEBlsTQrY4KQ9Jf0s5rgD6SiNkaUdJegTA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-object-rest-spread@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.25.9.tgz#0203725025074164808bcf1a2cfa90c652c99f18" - integrity sha512-fSaXafEE9CVHPweLYw4J0emp1t8zYTXyzN3UuG+lylqkvYd7RMrsOQ8TYx5RF231be0vqtFC6jnx3UmpJmKBYg== +"@babel/plugin-transform-numeric-separator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.27.1.tgz#614e0b15cc800e5997dadd9bd6ea524ed6c819c6" + integrity sha512-fdPKAcujuvEChxDBJ5c+0BTaS6revLV7CJL08e4m3de8qJfNIuCc2nc7XJYOjBoTMJeqSmwXJ0ypE14RCjLwaw== dependencies: - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/plugin-transform-parameters" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-object-super@^7.16.7", "@babel/plugin-transform-object-super@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.25.9.tgz#385d5de135162933beb4a3d227a2b7e52bb4cf03" - integrity sha512-Kj/Gh+Rw2RNLbCK1VAWj2U48yxxqL2x0k10nPtSdRa0O2xnHXalD0s+o1A6a0W43gJ00ANo38jxkQreckOzv5A== +"@babel/plugin-transform-object-rest-spread@^7.28.4": + version "7.28.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.28.4.tgz#9ee1ceca80b3e6c4bac9247b2149e36958f7f98d" + integrity sha512-373KA2HQzKhQCYiRVIRr+3MjpCObqzDlyrM6u4I201wL8Mp2wHf7uB8GhDwis03k2ti8Zr65Zyyqs1xOxUF/Ew== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-replace-supers" "^7.25.9" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/plugin-transform-destructuring" "^7.28.0" + "@babel/plugin-transform-parameters" "^7.27.7" + "@babel/traverse" "^7.28.4" -"@babel/plugin-transform-optional-catch-binding@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.25.9.tgz#10e70d96d52bb1f10c5caaac59ac545ea2ba7ff3" - integrity sha512-qM/6m6hQZzDcZF3onzIhZeDHDO43bkNNlOX0i8n3lR6zLbu0GN2d8qfM/IERJZYauhAHSLHy39NF0Ctdvcid7g== +"@babel/plugin-transform-object-super@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.27.1.tgz#1c932cd27bf3874c43a5cac4f43ebf970c9871b5" + integrity sha512-SFy8S9plRPbIcxlJ8A6mT/CxFdJx/c04JEctz4jf8YZaVS2px34j7NXRrlGlHkN/M2gnpL37ZpGRGVFLd3l8Ng== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-replace-supers" "^7.27.1" -"@babel/plugin-transform-optional-chaining@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.25.9.tgz#e142eb899d26ef715435f201ab6e139541eee7dd" - integrity sha512-6AvV0FsLULbpnXeBjrY4dmWF8F7gf8QnvTEoO/wX/5xm/xE1Xo8oPuD3MPS+KS9f9XBEAWN7X1aWr4z9HdOr7A== +"@babel/plugin-transform-optional-catch-binding@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.27.1.tgz#84c7341ebde35ccd36b137e9e45866825072a30c" + integrity sha512-txEAEKzYrHEX4xSZN4kJ+OfKXFVSWKB2ZxM9dpcE3wT7smwkNmXo5ORRlVzMVdJbD+Q8ILTgSD7959uj+3Dm3Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-parameters@^7.16.7", "@babel/plugin-transform-parameters@^7.20.7", "@babel/plugin-transform-parameters@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.25.9.tgz#b856842205b3e77e18b7a7a1b94958069c7ba257" - integrity sha512-wzz6MKwpnshBAiRmn4jR8LYz/g8Ksg0o80XmwZDlordjwEk9SxBzTWC7F5ef1jhbrbOW2DJ5J6ayRukrJmnr0g== +"@babel/plugin-transform-optional-chaining@^7.27.1", "@babel/plugin-transform-optional-chaining@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.28.5.tgz#8238c785f9d5c1c515a90bf196efb50d075a4b26" + integrity sha512-N6fut9IZlPnjPwgiQkXNhb+cT8wQKFlJNqcZkWlcTqkcqx6/kU4ynGmLFoa4LViBSirn05YAwk+sQBbPfxtYzQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" -"@babel/plugin-transform-private-methods@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.9.tgz#847f4139263577526455d7d3223cd8bda51e3b57" - integrity sha512-D/JUozNpQLAPUVusvqMxyvjzllRaF8/nSrP1s2YGQT/W4LHK4xxsMcHjhOGTS01mp9Hda8nswb+FblLdJornQw== +"@babel/plugin-transform-parameters@^7.27.7": + version "7.27.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.27.7.tgz#1fd2febb7c74e7d21cf3b05f7aebc907940af53a" + integrity sha512-qBkYTYCb76RRxUM6CcZA5KRu8K4SM8ajzVeUgVdMVO9NN9uI/GaVmBg/WKJJGnNokV9SY8FxNOVWGXzqzUidBg== dependencies: - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-private-property-in-object@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.25.9.tgz#9c8b73e64e6cc3cbb2743633885a7dd2c385fe33" - integrity sha512-Evf3kcMqzXA3xfYJmZ9Pg1OvKdtqsDMSWBDzZOPLvHiTt36E75jLDQo5w1gtRU95Q4E5PDttrTf25Fw8d/uWLw== +"@babel/plugin-transform-private-methods@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.27.1.tgz#fdacbab1c5ed81ec70dfdbb8b213d65da148b6af" + integrity sha512-10FVt+X55AjRAYI9BrdISN9/AQWHqldOeZDUoLyif1Kn05a56xVBXb8ZouL8pZ9jem8QpXaOt8TS7RHUIS+GPA== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-property-literals@^7.16.7", "@babel/plugin-transform-property-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.25.9.tgz#d72d588bd88b0dec8b62e36f6fda91cedfe28e3f" - integrity sha512-IvIUeV5KrS/VPavfSM/Iu+RE6llrHrYIKY1yfCzyO/lMXHQ+p7uGhonmGVisv6tSBSVgWzMBohTcvkC9vQcQFA== +"@babel/plugin-transform-private-property-in-object@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.27.1.tgz#4dbbef283b5b2f01a21e81e299f76e35f900fb11" + integrity sha512-5J+IhqTi1XPa0DXF83jYOaARrX+41gOewWbkPyjMNRDqgOCqdffGh8L3f/Ek5utaEBZExjSAzcyjmV9SSAWObQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.27.1" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-property-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.27.1.tgz#07eafd618800591e88073a0af1b940d9a42c6424" + integrity sha512-oThy3BCuCha8kDZ8ZkgOg2exvPYUlprMukKQXI1r1pJ47NCvxfkEy8vK+r/hT9nF0Aa4H1WUPZZjHTFtAhGfmQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-transform-react-display-name@^7.16.7": version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.16.7.tgz#7b6d40d232f4c0f550ea348593db3b21e2404340" + resolved "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.16.7.tgz" integrity sha512-qgIg8BcZgd0G/Cz916D5+9kqX0c7nPZyXaP8R2tLNN5tkyIZdG5fEwBrxwplzSnjC1jvQmyMNVwUCZPcbGY7Pg== dependencies: "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-react-jsx-development@^7.16.7": version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.16.7.tgz#43a00724a3ed2557ed3f276a01a929e6686ac7b8" + resolved "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.16.7.tgz" integrity sha512-RMvQWvpla+xy6MlBpPlrKZCMRs2AGiHOGHY3xRwl0pEeim348dDyxeH4xBsMPbIMhujeq7ihE702eM2Ew0Wo+A== dependencies: "@babel/plugin-transform-react-jsx" "^7.16.7" -"@babel/plugin-transform-react-jsx@^7.12.1", "@babel/plugin-transform-react-jsx@^7.16.7": - version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.16.7.tgz#86a6a220552afd0e4e1f0388a68a372be7add0d4" - integrity sha512-8D16ye66fxiE8m890w0BpPpngG9o9OVBBy0gH2E+2AR7qMR2ZpTYJEqLxAsoroenMId0p/wMW+Blc0meDgu0Ag== +"@babel/plugin-transform-react-jsx@^7.16.7", "@babel/plugin-transform-react-jsx@^7.17.12": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.27.1.tgz#1023bc94b78b0a2d68c82b5e96aed573bcfb9db0" + integrity sha512-2KH4LWGSrJIkVf5tSiBFYuXDAoWRq2MMwgivCf+93dd0GQi8RXLjKA/0EvRnVV5G0hrHczsquXuD01L8s6dmBw== dependencies: - "@babel/helper-annotate-as-pure" "^7.16.7" - "@babel/helper-module-imports" "^7.16.7" - "@babel/helper-plugin-utils" "^7.16.7" - "@babel/plugin-syntax-jsx" "^7.16.7" - "@babel/types" "^7.16.7" + "@babel/helper-annotate-as-pure" "^7.27.1" + "@babel/helper-module-imports" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/plugin-syntax-jsx" "^7.27.1" + "@babel/types" "^7.27.1" "@babel/plugin-transform-react-pure-annotations@^7.16.7": version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.16.7.tgz#232bfd2f12eb551d6d7d01d13fe3f86b45eb9c67" + resolved "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.16.7.tgz" integrity sha512-hs71ToC97k3QWxswh2ElzMFABXHvGiJ01IB1TbYQDGeWRKWz/MPUTh5jGExdHvosYKpnJW5Pm3S4+TA3FyX+GA== dependencies: "@babel/helper-annotate-as-pure" "^7.16.7" "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-regenerator@^7.16.7", "@babel/plugin-transform-regenerator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.25.9.tgz#03a8a4670d6cebae95305ac6defac81ece77740b" - integrity sha512-vwDcDNsgMPDGP0nMqzahDWE5/MLcX8sv96+wfX7as7LoF/kr97Bo/7fI00lXY4wUXYfVmwIIyG80fGZ1uvt2qg== +"@babel/plugin-transform-regenerator@^7.28.4": + version "7.28.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.28.4.tgz#9d3fa3bebb48ddd0091ce5729139cd99c67cea51" + integrity sha512-+ZEdQlBoRg9m2NnzvEeLgtvBMO4tkFBw5SQIUgLICgTrumLoU7lr+Oghi6km2PFj+dbUt2u1oby2w3BDO9YQnA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - regenerator-transform "^0.15.2" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-regexp-modifiers@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.26.0.tgz#2f5837a5b5cd3842a919d8147e9903cc7455b850" - integrity sha512-vN6saax7lrA2yA/Pak3sCxuD6F5InBjn9IcrIKQPjpsLvuHYLVroTxjdlVRHjjBWxKOqIwpTXDkOssYT4BFdRw== +"@babel/plugin-transform-regexp-modifiers@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.27.1.tgz#df9ba5577c974e3f1449888b70b76169998a6d09" + integrity sha512-TtEciroaiODtXvLZv4rmfMhkCv8jx3wgKpL68PuiPh2M4fvz5jhsA7697N1gMvkvr/JTF13DrFYyEbY9U7cVPA== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-reserved-words@^7.16.7", "@babel/plugin-transform-reserved-words@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.25.9.tgz#0398aed2f1f10ba3f78a93db219b27ef417fb9ce" - integrity sha512-7DL7DKYjn5Su++4RXu8puKZm2XBPHyjWLUidaPEkCUBbE7IPcsrkRHggAOOKydH1dASWdcUBxrkOGNxUv5P3Jg== +"@babel/plugin-transform-reserved-words@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.27.1.tgz#40fba4878ccbd1c56605a4479a3a891ac0274bb4" + integrity sha512-V2ABPHIJX4kC7HegLkYoDpfg9PVmuWy/i6vUM5eGK22bx4YVFD3M5F0QQnWQoDs6AGsUWTVOopBiMFQgHaSkVw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-transform-runtime@^7.10.1": version "7.17.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.17.0.tgz#0a2e08b5e2b2d95c4b1d3b3371a2180617455b70" + resolved "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.17.0.tgz" integrity sha512-fr7zPWnKXNc1xoHfrIU9mN/4XKX4VLZ45Q+oMhfsYIaHvg7mHgmhfOy/ckRWqDK7XF3QDigRpkh5DKq6+clE8A== dependencies: "@babel/helper-module-imports" "^7.16.7" @@ -1020,260 +840,172 @@ babel-plugin-polyfill-regenerator "^0.3.0" semver "^6.3.0" -"@babel/plugin-transform-shorthand-properties@^7.16.7", "@babel/plugin-transform-shorthand-properties@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.25.9.tgz#bb785e6091f99f826a95f9894fc16fde61c163f2" - integrity sha512-MUv6t0FhO5qHnS/W8XCbHmiRWOphNufpE1IVxhK5kuN3Td9FT1x4rx4K42s3RYdMXCXpfWkGSbCSd0Z64xA7Ng== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-transform-spread@^7.16.7", "@babel/plugin-transform-spread@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.25.9.tgz#24a35153931b4ba3d13cec4a7748c21ab5514ef9" - integrity sha512-oNknIB0TbURU5pqJFVbOOFspVlrpVwo2H1+HUIsVDvp5VauGGDP1ZEvO8Nn5xyMEs3dakajOxlmkNW7kNgSm6A== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - -"@babel/plugin-transform-sticky-regex@^7.16.7", "@babel/plugin-transform-sticky-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.25.9.tgz#c7f02b944e986a417817b20ba2c504dfc1453d32" - integrity sha512-WqBUSgeVwucYDP9U/xNRQam7xV8W5Zf+6Eo7T2SRVUFlhRiMNFdFz58u0KZmCVVqs2i7SHgpRnAhzRNmKfi2uA== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-transform-template-literals@^7.16.7", "@babel/plugin-transform-template-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.25.9.tgz#6dbd4a24e8fad024df76d1fac6a03cf413f60fe1" - integrity sha512-o97AE4syN71M/lxrCtQByzphAdlYluKPDBzDVzMmfCobUjjhAryZV0AIpRPrxN0eAkxXO6ZLEScmt+PNhj2OTw== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-transform-typeof-symbol@^7.16.7", "@babel/plugin-transform-typeof-symbol@^7.26.7": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.26.7.tgz#d0e33acd9223744c1e857dbd6fa17bd0a3786937" - integrity sha512-jfoTXXZTgGg36BmhqT3cAYK5qkmqvJpvNrPhaK/52Vgjhw4Rq29s9UqpWWV0D6yuRmgiFH/BUVlkl96zJWqnaw== - dependencies: - "@babel/helper-plugin-utils" "^7.26.5" - -"@babel/plugin-transform-typescript@^7.16.7": - version "7.16.8" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.16.8.tgz#591ce9b6b83504903fa9dd3652c357c2ba7a1ee0" - integrity sha512-bHdQ9k7YpBDO2d0NVfkj51DpQcvwIzIusJ7mEUaMlbZq3Kt/U47j24inXZHQ5MDiYpCs+oZiwnXyKedE8+q7AQ== - dependencies: - "@babel/helper-create-class-features-plugin" "^7.16.7" - "@babel/helper-plugin-utils" "^7.16.7" - "@babel/plugin-syntax-typescript" "^7.16.7" - -"@babel/plugin-transform-unicode-escapes@^7.16.7", "@babel/plugin-transform-unicode-escapes@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.25.9.tgz#a75ef3947ce15363fccaa38e2dd9bc70b2788b82" - integrity sha512-s5EDrE6bW97LtxOcGj1Khcx5AaXwiMmi4toFWRDP9/y0Woo6pXC+iyPu/KuhKtfSrNFd7jJB+/fkOtZy6aIC6Q== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-transform-unicode-property-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.25.9.tgz#a901e96f2c1d071b0d1bb5dc0d3c880ce8f53dd3" - integrity sha512-Jt2d8Ga+QwRluxRQ307Vlxa6dMrYEMZCgGxoPR8V52rxPyldHu3hdlHspxaqYmE7oID5+kB+UKUB/eWS+DkkWg== - dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-transform-unicode-regex@^7.16.7", "@babel/plugin-transform-unicode-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.25.9.tgz#5eae747fe39eacf13a8bd006a4fb0b5d1fa5e9b1" - integrity sha512-yoxstj7Rg9dlNn9UQxzk4fcNivwv4nUYz7fYXBaKxvw/lnmPuOm/ikoELygbYq68Bls3D/D+NBPHiLwZdZZ4HA== - dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-transform-unicode-sets-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.9.tgz#65114c17b4ffc20fa5b163c63c70c0d25621fabe" - integrity sha512-8BYqO3GeVNHtx69fdPshN3fnzUNLrWdHhk/icSwigksJGczKSizZ+Z6SBCxTs723Fr5VSNorTIK7a+R2tISvwQ== - dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/preset-env@^7.10.2": - version "7.16.11" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.16.11.tgz#5dd88fd885fae36f88fd7c8342475c9f0abe2982" - integrity sha512-qcmWG8R7ZW6WBRPZK//y+E3Cli151B20W1Rv7ln27vuPaXU/8TKms6jFdiJtF7UDTxcrb7mZd88tAeK9LjdT8g== - dependencies: - "@babel/compat-data" "^7.16.8" - "@babel/helper-compilation-targets" "^7.16.7" - "@babel/helper-plugin-utils" "^7.16.7" - "@babel/helper-validator-option" "^7.16.7" - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.16.7" - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.16.7" - "@babel/plugin-proposal-async-generator-functions" "^7.16.8" - "@babel/plugin-proposal-class-properties" "^7.16.7" - "@babel/plugin-proposal-class-static-block" "^7.16.7" - "@babel/plugin-proposal-dynamic-import" "^7.16.7" - "@babel/plugin-proposal-export-namespace-from" "^7.16.7" - "@babel/plugin-proposal-json-strings" "^7.16.7" - "@babel/plugin-proposal-logical-assignment-operators" "^7.16.7" - "@babel/plugin-proposal-nullish-coalescing-operator" "^7.16.7" - "@babel/plugin-proposal-numeric-separator" "^7.16.7" - "@babel/plugin-proposal-object-rest-spread" "^7.16.7" - "@babel/plugin-proposal-optional-catch-binding" "^7.16.7" - "@babel/plugin-proposal-optional-chaining" "^7.16.7" - "@babel/plugin-proposal-private-methods" "^7.16.11" - "@babel/plugin-proposal-private-property-in-object" "^7.16.7" - "@babel/plugin-proposal-unicode-property-regex" "^7.16.7" - "@babel/plugin-syntax-async-generators" "^7.8.4" - "@babel/plugin-syntax-class-properties" "^7.12.13" - "@babel/plugin-syntax-class-static-block" "^7.14.5" - "@babel/plugin-syntax-dynamic-import" "^7.8.3" - "@babel/plugin-syntax-export-namespace-from" "^7.8.3" - "@babel/plugin-syntax-json-strings" "^7.8.3" - "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" - "@babel/plugin-syntax-numeric-separator" "^7.10.4" - "@babel/plugin-syntax-object-rest-spread" "^7.8.3" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" - "@babel/plugin-syntax-optional-chaining" "^7.8.3" - "@babel/plugin-syntax-private-property-in-object" "^7.14.5" - "@babel/plugin-syntax-top-level-await" "^7.14.5" - "@babel/plugin-transform-arrow-functions" "^7.16.7" - "@babel/plugin-transform-async-to-generator" "^7.16.8" - "@babel/plugin-transform-block-scoped-functions" "^7.16.7" - "@babel/plugin-transform-block-scoping" "^7.16.7" - "@babel/plugin-transform-classes" "^7.16.7" - "@babel/plugin-transform-computed-properties" "^7.16.7" - "@babel/plugin-transform-destructuring" "^7.16.7" - "@babel/plugin-transform-dotall-regex" "^7.16.7" - "@babel/plugin-transform-duplicate-keys" "^7.16.7" - "@babel/plugin-transform-exponentiation-operator" "^7.16.7" - "@babel/plugin-transform-for-of" "^7.16.7" - "@babel/plugin-transform-function-name" "^7.16.7" - "@babel/plugin-transform-literals" "^7.16.7" - "@babel/plugin-transform-member-expression-literals" "^7.16.7" - "@babel/plugin-transform-modules-amd" "^7.16.7" - "@babel/plugin-transform-modules-commonjs" "^7.16.8" - "@babel/plugin-transform-modules-systemjs" "^7.16.7" - "@babel/plugin-transform-modules-umd" "^7.16.7" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.16.8" - "@babel/plugin-transform-new-target" "^7.16.7" - "@babel/plugin-transform-object-super" "^7.16.7" - "@babel/plugin-transform-parameters" "^7.16.7" - "@babel/plugin-transform-property-literals" "^7.16.7" - "@babel/plugin-transform-regenerator" "^7.16.7" - "@babel/plugin-transform-reserved-words" "^7.16.7" - "@babel/plugin-transform-shorthand-properties" "^7.16.7" - "@babel/plugin-transform-spread" "^7.16.7" - "@babel/plugin-transform-sticky-regex" "^7.16.7" - "@babel/plugin-transform-template-literals" "^7.16.7" - "@babel/plugin-transform-typeof-symbol" "^7.16.7" - "@babel/plugin-transform-unicode-escapes" "^7.16.7" - "@babel/plugin-transform-unicode-regex" "^7.16.7" - "@babel/preset-modules" "^0.1.5" - "@babel/types" "^7.16.8" - babel-plugin-polyfill-corejs2 "^0.3.0" - babel-plugin-polyfill-corejs3 "^0.5.0" - babel-plugin-polyfill-regenerator "^0.3.0" - core-js-compat "^3.20.2" - semver "^6.3.0" - -"@babel/preset-env@^7.14.5": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.26.7.tgz#24d38e211f4570b8d806337035cc3ae798e0c36d" - integrity sha512-Ycg2tnXwixaXOVb29rana8HNPgLVBof8qqtNQ9LE22IoyZboQbGSxI6ZySMdW3K5nAe6gu35IaJefUJflhUFTQ== - dependencies: - "@babel/compat-data" "^7.26.5" - "@babel/helper-compilation-targets" "^7.26.5" - "@babel/helper-plugin-utils" "^7.26.5" - "@babel/helper-validator-option" "^7.25.9" - "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.25.9" - "@babel/plugin-bugfix-safari-class-field-initializer-scope" "^7.25.9" - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.25.9" - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.25.9" - "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.25.9" +"@babel/plugin-transform-shorthand-properties@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.27.1.tgz#532abdacdec87bfee1e0ef8e2fcdee543fe32b90" + integrity sha512-N/wH1vcn4oYawbJ13Y/FxcQrWk63jhfNa7jef0ih7PHSIHX2LB7GWE1rkPrOnka9kwMxb6hMl19p7lidA+EHmQ== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-spread@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.27.1.tgz#1a264d5fc12750918f50e3fe3e24e437178abb08" + integrity sha512-kpb3HUqaILBJcRFVhFUs6Trdd4mkrzcGXss+6/mxUd273PfbWqSDHRzMT2234gIg2QYfAjvXLSquP1xECSg09Q== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" + +"@babel/plugin-transform-sticky-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.27.1.tgz#18984935d9d2296843a491d78a014939f7dcd280" + integrity sha512-lhInBO5bi/Kowe2/aLdBAawijx+q1pQzicSgnkB6dUPc1+RC8QmJHKf2OjvU+NZWitguJHEaEmbV6VWEouT58g== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-template-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.27.1.tgz#1a0eb35d8bb3e6efc06c9fd40eb0bcef548328b8" + integrity sha512-fBJKiV7F2DxZUkg5EtHKXQdbsbURW3DZKQUWphDum0uRP6eHGGa/He9mc0mypL680pb+e/lDIthRohlv8NCHkg== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-typeof-symbol@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.1.tgz#70e966bb492e03509cf37eafa6dcc3051f844369" + integrity sha512-RiSILC+nRJM7FY5srIyc4/fGIwUhyDuuBSdWn4y6yT6gm652DpCHZjIipgn6B7MQ1ITOUnAKWixEUjQRIBIcLw== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-typescript@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.28.5.tgz#441c5f9a4a1315039516c6c612fc66d5f4594e72" + integrity sha512-x2Qa+v/CuEoX7Dr31iAfr0IhInrVOWZU/2vJMJ00FOR/2nM0BcBEclpaf9sWCDc+v5e9dMrhSH8/atq/kX7+bA== + dependencies: + "@babel/helper-annotate-as-pure" "^7.27.3" + "@babel/helper-create-class-features-plugin" "^7.28.5" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" + "@babel/plugin-syntax-typescript" "^7.27.1" + +"@babel/plugin-transform-unicode-escapes@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.27.1.tgz#3e3143f8438aef842de28816ece58780190cf806" + integrity sha512-Ysg4v6AmF26k9vpfFuTZg8HRfVWzsh1kVfowA23y9j/Gu6dOuahdUVhkLqpObp3JIv27MLSii6noRnuKN8H0Mg== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-unicode-property-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.27.1.tgz#bdfe2d3170c78c5691a3c3be934c8c0087525956" + integrity sha512-uW20S39PnaTImxp39O5qFlHLS9LJEmANjMG7SxIhap8rCHqu0Ik+tLEPX5DKmHn6CsWQ7j3lix2tFOa5YtL12Q== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-unicode-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.27.1.tgz#25948f5c395db15f609028e370667ed8bae9af97" + integrity sha512-xvINq24TRojDuyt6JGtHmkVkrfVV3FPT16uytxImLeBZqW3/H52yN+kM1MGuyPkIQxrzKwPHs5U/MP3qKyzkGw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-unicode-sets-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.27.1.tgz#6ab706d10f801b5c72da8bb2548561fa04193cd1" + integrity sha512-EtkOujbc4cgvb0mlpQefi4NTPBzhSIevblFevACNLUspmrALgmEBdL/XfnyyITfd8fKBZrZys92zOWcik7j9Tw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/preset-env@^7.10.2", "@babel/preset-env@^7.14.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.28.5.tgz#82dd159d1563f219a1ce94324b3071eb89e280b0" + integrity sha512-S36mOoi1Sb6Fz98fBfE+UZSpYw5mJm0NUHtIKrOuNcqeFauy1J6dIvXm2KRVKobOSaGq4t/hBXdN4HGU3wL9Wg== + dependencies: + "@babel/compat-data" "^7.28.5" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-validator-option" "^7.27.1" + "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.28.5" + "@babel/plugin-bugfix-safari-class-field-initializer-scope" "^7.27.1" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.27.1" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.27.1" + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.28.3" "@babel/plugin-proposal-private-property-in-object" "7.21.0-placeholder-for-preset-env.2" - "@babel/plugin-syntax-import-assertions" "^7.26.0" - "@babel/plugin-syntax-import-attributes" "^7.26.0" + "@babel/plugin-syntax-import-assertions" "^7.27.1" + "@babel/plugin-syntax-import-attributes" "^7.27.1" "@babel/plugin-syntax-unicode-sets-regex" "^7.18.6" - "@babel/plugin-transform-arrow-functions" "^7.25.9" - "@babel/plugin-transform-async-generator-functions" "^7.25.9" - "@babel/plugin-transform-async-to-generator" "^7.25.9" - "@babel/plugin-transform-block-scoped-functions" "^7.26.5" - "@babel/plugin-transform-block-scoping" "^7.25.9" - "@babel/plugin-transform-class-properties" "^7.25.9" - "@babel/plugin-transform-class-static-block" "^7.26.0" - "@babel/plugin-transform-classes" "^7.25.9" - "@babel/plugin-transform-computed-properties" "^7.25.9" - "@babel/plugin-transform-destructuring" "^7.25.9" - "@babel/plugin-transform-dotall-regex" "^7.25.9" - "@babel/plugin-transform-duplicate-keys" "^7.25.9" - "@babel/plugin-transform-duplicate-named-capturing-groups-regex" "^7.25.9" - "@babel/plugin-transform-dynamic-import" "^7.25.9" - "@babel/plugin-transform-exponentiation-operator" "^7.26.3" - "@babel/plugin-transform-export-namespace-from" "^7.25.9" - "@babel/plugin-transform-for-of" "^7.25.9" - "@babel/plugin-transform-function-name" "^7.25.9" - "@babel/plugin-transform-json-strings" "^7.25.9" - "@babel/plugin-transform-literals" "^7.25.9" - "@babel/plugin-transform-logical-assignment-operators" "^7.25.9" - "@babel/plugin-transform-member-expression-literals" "^7.25.9" - "@babel/plugin-transform-modules-amd" "^7.25.9" - "@babel/plugin-transform-modules-commonjs" "^7.26.3" - "@babel/plugin-transform-modules-systemjs" "^7.25.9" - "@babel/plugin-transform-modules-umd" "^7.25.9" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.25.9" - "@babel/plugin-transform-new-target" "^7.25.9" - "@babel/plugin-transform-nullish-coalescing-operator" "^7.26.6" - "@babel/plugin-transform-numeric-separator" "^7.25.9" - "@babel/plugin-transform-object-rest-spread" "^7.25.9" - "@babel/plugin-transform-object-super" "^7.25.9" - "@babel/plugin-transform-optional-catch-binding" "^7.25.9" - "@babel/plugin-transform-optional-chaining" "^7.25.9" - "@babel/plugin-transform-parameters" "^7.25.9" - "@babel/plugin-transform-private-methods" "^7.25.9" - "@babel/plugin-transform-private-property-in-object" "^7.25.9" - "@babel/plugin-transform-property-literals" "^7.25.9" - "@babel/plugin-transform-regenerator" "^7.25.9" - "@babel/plugin-transform-regexp-modifiers" "^7.26.0" - "@babel/plugin-transform-reserved-words" "^7.25.9" - "@babel/plugin-transform-shorthand-properties" "^7.25.9" - "@babel/plugin-transform-spread" "^7.25.9" - "@babel/plugin-transform-sticky-regex" "^7.25.9" - "@babel/plugin-transform-template-literals" "^7.25.9" - "@babel/plugin-transform-typeof-symbol" "^7.26.7" - "@babel/plugin-transform-unicode-escapes" "^7.25.9" - "@babel/plugin-transform-unicode-property-regex" "^7.25.9" - "@babel/plugin-transform-unicode-regex" "^7.25.9" - "@babel/plugin-transform-unicode-sets-regex" "^7.25.9" + "@babel/plugin-transform-arrow-functions" "^7.27.1" + "@babel/plugin-transform-async-generator-functions" "^7.28.0" + "@babel/plugin-transform-async-to-generator" "^7.27.1" + "@babel/plugin-transform-block-scoped-functions" "^7.27.1" + "@babel/plugin-transform-block-scoping" "^7.28.5" + "@babel/plugin-transform-class-properties" "^7.27.1" + "@babel/plugin-transform-class-static-block" "^7.28.3" + "@babel/plugin-transform-classes" "^7.28.4" + "@babel/plugin-transform-computed-properties" "^7.27.1" + "@babel/plugin-transform-destructuring" "^7.28.5" + "@babel/plugin-transform-dotall-regex" "^7.27.1" + "@babel/plugin-transform-duplicate-keys" "^7.27.1" + "@babel/plugin-transform-duplicate-named-capturing-groups-regex" "^7.27.1" + "@babel/plugin-transform-dynamic-import" "^7.27.1" + "@babel/plugin-transform-explicit-resource-management" "^7.28.0" + "@babel/plugin-transform-exponentiation-operator" "^7.28.5" + "@babel/plugin-transform-export-namespace-from" "^7.27.1" + "@babel/plugin-transform-for-of" "^7.27.1" + "@babel/plugin-transform-function-name" "^7.27.1" + "@babel/plugin-transform-json-strings" "^7.27.1" + "@babel/plugin-transform-literals" "^7.27.1" + "@babel/plugin-transform-logical-assignment-operators" "^7.28.5" + "@babel/plugin-transform-member-expression-literals" "^7.27.1" + "@babel/plugin-transform-modules-amd" "^7.27.1" + "@babel/plugin-transform-modules-commonjs" "^7.27.1" + "@babel/plugin-transform-modules-systemjs" "^7.28.5" + "@babel/plugin-transform-modules-umd" "^7.27.1" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.27.1" + "@babel/plugin-transform-new-target" "^7.27.1" + "@babel/plugin-transform-nullish-coalescing-operator" "^7.27.1" + "@babel/plugin-transform-numeric-separator" "^7.27.1" + "@babel/plugin-transform-object-rest-spread" "^7.28.4" + "@babel/plugin-transform-object-super" "^7.27.1" + "@babel/plugin-transform-optional-catch-binding" "^7.27.1" + "@babel/plugin-transform-optional-chaining" "^7.28.5" + "@babel/plugin-transform-parameters" "^7.27.7" + "@babel/plugin-transform-private-methods" "^7.27.1" + "@babel/plugin-transform-private-property-in-object" "^7.27.1" + "@babel/plugin-transform-property-literals" "^7.27.1" + "@babel/plugin-transform-regenerator" "^7.28.4" + "@babel/plugin-transform-regexp-modifiers" "^7.27.1" + "@babel/plugin-transform-reserved-words" "^7.27.1" + "@babel/plugin-transform-shorthand-properties" "^7.27.1" + "@babel/plugin-transform-spread" "^7.27.1" + "@babel/plugin-transform-sticky-regex" "^7.27.1" + "@babel/plugin-transform-template-literals" "^7.27.1" + "@babel/plugin-transform-typeof-symbol" "^7.27.1" + "@babel/plugin-transform-unicode-escapes" "^7.27.1" + "@babel/plugin-transform-unicode-property-regex" "^7.27.1" + "@babel/plugin-transform-unicode-regex" "^7.27.1" + "@babel/plugin-transform-unicode-sets-regex" "^7.27.1" "@babel/preset-modules" "0.1.6-no-external-plugins" - babel-plugin-polyfill-corejs2 "^0.4.10" - babel-plugin-polyfill-corejs3 "^0.10.6" - babel-plugin-polyfill-regenerator "^0.6.1" - core-js-compat "^3.38.1" + babel-plugin-polyfill-corejs2 "^0.4.14" + babel-plugin-polyfill-corejs3 "^0.13.0" + babel-plugin-polyfill-regenerator "^0.6.5" + core-js-compat "^3.43.0" semver "^6.3.1" "@babel/preset-modules@0.1.6-no-external-plugins": version "0.1.6-no-external-plugins" - resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz#ccb88a2c49c817236861fee7826080573b8a923a" + resolved "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz" integrity sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA== dependencies: "@babel/helper-plugin-utils" "^7.0.0" "@babel/types" "^7.4.4" esutils "^2.0.2" -"@babel/preset-modules@^0.1.5": - version "0.1.6" - resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.6.tgz#31bcdd8f19538437339d17af00d177d854d9d458" - integrity sha512-ID2yj6K/4lKfhuU3+EX4UvNbIt7eACFbHmNUjzA+ep+B5971CknnA/9DEWKbRokfbbtblxxxXFJJrH47UEAMVg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-proposal-unicode-property-regex" "^7.4.4" - "@babel/plugin-transform-dotall-regex" "^7.4.4" - "@babel/types" "^7.4.4" - esutils "^2.0.2" - "@babel/preset-react@^7.10.1": version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.16.7.tgz#4c18150491edc69c183ff818f9f2aecbe5d93852" + resolved "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.16.7.tgz" integrity sha512-fWpyI8UM/HE6DfPBzD8LnhQ/OcH8AgTaqcqP2nGOXEUV+VKBR5JRN9hCk9ai+zQQ57vtm9oWeXguBCPNUjytgA== dependencies: "@babel/helper-plugin-utils" "^7.16.7" @@ -1284,227 +1016,216 @@ "@babel/plugin-transform-react-pure-annotations" "^7.16.7" "@babel/preset-typescript@^7.14.5": - version "7.16.7" - resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.16.7.tgz#ab114d68bb2020afc069cd51b37ff98a046a70b9" - integrity sha512-WbVEmgXdIyvzB77AQjGBEyYPZx+8tTsO50XtfozQrkW8QB2rLJpH2lgx0TRw5EJrBxOZQ+wCcyPVQvS8tjEHpQ== - dependencies: - "@babel/helper-plugin-utils" "^7.16.7" - "@babel/helper-validator-option" "^7.16.7" - "@babel/plugin-transform-typescript" "^7.16.7" - -"@babel/runtime@^7.1.2", "@babel/runtime@^7.13.10", "@babel/runtime@^7.14.8", "@babel/runtime@^7.25.0", "@babel/runtime@^7.26.10", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.4": - version "7.26.10" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.10.tgz#a07b4d8fa27af131a633d7b3524db803eb4764c2" - integrity sha512-2WJMeRQPHKSPemqk/awGrAiuFfzBmOIPXKizAsVhWH9YJqLZ0H+HS4c8loHGgW6utJ3E/ejXQUsiGaQy2NZ9Fw== - dependencies: - regenerator-runtime "^0.14.0" - -"@babel/template@^7.24.7", "@babel/template@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.25.9.tgz#ecb62d81a8a6f5dc5fe8abfc3901fc52ddf15016" - integrity sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg== - dependencies: - "@babel/code-frame" "^7.25.9" - "@babel/parser" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/template@^7.26.9": - version "7.26.9" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.26.9.tgz#4577ad3ddf43d194528cff4e1fa6b232fa609bb2" - integrity sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA== - dependencies: - "@babel/code-frame" "^7.26.2" - "@babel/parser" "^7.26.9" - "@babel/types" "^7.26.9" - -"@babel/traverse@^7.25.9", "@babel/traverse@^7.26.5", "@babel/traverse@^7.26.7": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.26.7.tgz#99a0a136f6a75e7fb8b0a1ace421e0b25994b8bb" - integrity sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA== - dependencies: - "@babel/code-frame" "^7.26.2" - "@babel/generator" "^7.26.5" - "@babel/parser" "^7.26.7" - "@babel/template" "^7.25.9" - "@babel/types" "^7.26.7" + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.28.5.tgz#540359efa3028236958466342967522fd8f2a60c" + integrity sha512-+bQy5WOI2V6LJZpPVxY+yp66XdZ2yifu0Mc1aP5CQKgjn4QM5IN2i5fAZ4xKop47pr8rpVhiAeu+nDQa12C8+g== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-validator-option" "^7.27.1" + "@babel/plugin-syntax-jsx" "^7.27.1" + "@babel/plugin-transform-modules-commonjs" "^7.27.1" + "@babel/plugin-transform-typescript" "^7.28.5" + +"@babel/runtime@^7.1.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.14.8", "@babel/runtime@^7.18.3", "@babel/runtime@^7.25.0", "@babel/runtime@^7.26.10": + version "7.28.4" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.4.tgz#a70226016fabe25c5783b2f22d3e1c9bc5ca3326" + integrity sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ== + +"@babel/template@^7.24.7", "@babel/template@^7.27.1", "@babel/template@^7.27.2": + version "7.27.2" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.27.2.tgz#fa78ceed3c4e7b63ebf6cb39e5852fca45f6809d" + integrity sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/parser" "^7.27.2" + "@babel/types" "^7.27.1" + +"@babel/traverse@^7.25.9", "@babel/traverse@^7.26.5", "@babel/traverse@^7.27.1", "@babel/traverse@^7.28.0", "@babel/traverse@^7.28.3", "@babel/traverse@^7.28.4", "@babel/traverse@^7.28.5": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.28.5.tgz#450cab9135d21a7a2ca9d2d35aa05c20e68c360b" + integrity sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.28.5" + "@babel/helper-globals" "^7.28.0" + "@babel/parser" "^7.28.5" + "@babel/template" "^7.27.2" + "@babel/types" "^7.28.5" debug "^4.3.1" - globals "^11.1.0" - -"@babel/types@^7.16.7", "@babel/types@^7.16.8", "@babel/types@^7.24.7", "@babel/types@^7.25.9", "@babel/types@^7.26.5", "@babel/types@^7.26.7", "@babel/types@^7.4.4": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.7.tgz#5e2b89c0768e874d4d061961f3a5a153d71dc17a" - integrity sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg== - dependencies: - "@babel/helper-string-parser" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" -"@babel/types@^7.26.10", "@babel/types@^7.26.9": - version "7.26.10" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.10.tgz#396382f6335bd4feb65741eacfc808218f859259" - integrity sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ== +"@babel/types@^7.24.7", "@babel/types@^7.25.9", "@babel/types@^7.27.1", "@babel/types@^7.27.3", "@babel/types@^7.28.2", "@babel/types@^7.28.4", "@babel/types@^7.28.5", "@babel/types@^7.4.4": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.28.5.tgz#10fc405f60897c35f07e85493c932c7b5ca0592b" + integrity sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA== dependencies: - "@babel/helper-string-parser" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-string-parser" "^7.27.1" + "@babel/helper-validator-identifier" "^7.28.5" "@colors/colors@1.6.0", "@colors/colors@^1.6.0": version "1.6.0" - resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.6.0.tgz#ec6cd237440700bc23ca23087f513c75508958b0" + resolved "https://registry.npmjs.org/@colors/colors/-/colors-1.6.0.tgz" integrity sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA== "@dabh/diagnostics@^2.0.2": version "2.0.3" - resolved "https://registry.yarnpkg.com/@dabh/diagnostics/-/diagnostics-2.0.3.tgz#7f7e97ee9a725dffc7808d93668cc984e1dc477a" + resolved "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.3.tgz" integrity sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA== dependencies: colorspace "1.1.x" enabled "2.0.x" kuler "^2.0.0" -"@datadog/browser-core@4.30.0": - version "4.30.0" - resolved "https://registry.yarnpkg.com/@datadog/browser-core/-/browser-core-4.30.0.tgz#a7e1f0f3c83d69145862c23c67d98731cdec72dc" - integrity sha512-zdPClS9ErZWvMGw3aMIQoyi0iJtJ6C/ulUGGEOtQyvAKBsyTGiziGDTwyN92cAjfD4u8kY1iFV8z1BzOI56+jA== +"@datadog/browser-core@4.50.1": + version "4.50.1" + resolved "https://registry.npmjs.org/@datadog/browser-core/-/browser-core-4.50.1.tgz" + integrity sha512-2ypS19XngsMu6W4qUBtDwvImFz886Im+PziOnEycO1w41TVS5LH8/vWBMvjSf8Suer+CeRjRN9IOu0ocRx9BVw== "@datadog/browser-logs@^4.29.1": - version "4.30.0" - resolved "https://registry.yarnpkg.com/@datadog/browser-logs/-/browser-logs-4.30.0.tgz#f98cac055956745e7b16011795011e10c56cf41a" - integrity sha512-Puir5KdLqSbOGs37b9dz2jF/FOntqpW4Runym9qmfrGZfBcMYk6d83V2IMnjTpFjBLXk2zSCjqVD2/lUp2vdhQ== + version "4.50.1" + resolved "https://registry.npmjs.org/@datadog/browser-logs/-/browser-logs-4.50.1.tgz" + integrity sha512-uSM+T+hAQDH4lUjWNNyj8MfhQKp+sFo88UBaHUzpg0Kio/gZ+tIL7/HmTLCN5eLDWNJfH8eJuCb5Gtcvo14Pkg== dependencies: - "@datadog/browser-core" "4.30.0" + "@datadog/browser-core" "4.50.1" -"@datadog/browser-rum-core@4.30.0": - version "4.30.0" - resolved "https://registry.yarnpkg.com/@datadog/browser-rum-core/-/browser-rum-core-4.30.0.tgz#a7c53cef250f572dcb6976fdacacac082fac1b6b" - integrity sha512-upK4OZiBAJAbN3y7+IlwKEULDdE0P5Gm+XVzl1mVdc+Ypz0K1E9IR94HdqpVJenned9Cr7vRDFkbplSmnSXtuQ== +"@datadog/browser-rum-core@4.50.1": + version "4.50.1" + resolved "https://registry.npmjs.org/@datadog/browser-rum-core/-/browser-rum-core-4.50.1.tgz" + integrity sha512-ABCzEjNiBq3izapvSq6uujdx9h7L4RM44n22qDhwSIBlQY6Psf9VlzlQ5fueCALoj2LgVy4rYDj5RHfGaF/lAQ== dependencies: - "@datadog/browser-core" "4.30.0" + "@datadog/browser-core" "4.50.1" "@datadog/browser-rum@^4.29.1": - version "4.30.0" - resolved "https://registry.yarnpkg.com/@datadog/browser-rum/-/browser-rum-4.30.0.tgz#4835f35f94bb12fe9bf4c1ad3b9977deb6032380" - integrity sha512-doNNOhfTRQJKNeszW8qB/7WtzBwLhTft8aCEQtyzJTkbsm0LijOLdlGNC6dUoOJ+HWGjYWZ5Rtsv9TrmIfA56w== - dependencies: - "@datadog/browser-core" "4.30.0" - "@datadog/browser-rum-core" "4.30.0" - -"@emotion/babel-plugin-jsx-pragmatic@^0.1.5": - version "0.1.5" - resolved "https://registry.yarnpkg.com/@emotion/babel-plugin-jsx-pragmatic/-/babel-plugin-jsx-pragmatic-0.1.5.tgz#27debfe9c27c4d83574d509787ae553bf8a34d7e" - integrity sha512-y+3AJ0SItMDaAgGPVkQBC/S/BaqaPACkQ6MyCI2CUlrjTxKttTVfD3TMtcs7vLEcLxqzZ1xiG0vzwCXjhopawQ== - dependencies: - "@babel/plugin-syntax-jsx" "^7.2.0" - -"@emotion/babel-plugin@^11.2.0": - version "11.7.2" - resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.7.2.tgz#fec75f38a6ab5b304b0601c74e2a5e77c95e5fa0" - integrity sha512-6mGSCWi9UzXut/ZAN6lGFu33wGR3SJisNl3c0tvlmb8XChH1b2SUvxvnOh7hvLpqyRdHHU9AiazV3Cwbk5SXKQ== - dependencies: - "@babel/helper-module-imports" "^7.12.13" - "@babel/plugin-syntax-jsx" "^7.12.13" - "@babel/runtime" "^7.13.10" - "@emotion/hash" "^0.8.0" - "@emotion/memoize" "^0.7.5" - "@emotion/serialize" "^1.0.2" - babel-plugin-macros "^2.6.1" + version "4.50.1" + resolved "https://registry.npmjs.org/@datadog/browser-rum/-/browser-rum-4.50.1.tgz" + integrity sha512-Gcs8fMoufOr4Xlhwx0g6CHvgcXVNeCM+IyAtRAwoCBeezRhL18Jo1LALE+nsLegNKgNLO1f4vjm5TBx1WsnXWQ== + dependencies: + "@datadog/browser-core" "4.50.1" + "@datadog/browser-rum-core" "4.50.1" + +"@emotion/babel-plugin-jsx-pragmatic@^0.3.0": + version "0.3.0" + resolved "https://registry.yarnpkg.com/@emotion/babel-plugin-jsx-pragmatic/-/babel-plugin-jsx-pragmatic-0.3.0.tgz#12bde56c351f5981e5de66c99e62c371df6c42ca" + integrity sha512-XkRI5RdNl+f7HqpJADfTWlzZkd4tNaz2Gjzt97ZqN72jFSOqpL0grGGLdzKJ9dMQHXJBT/KZV+kphTycOblIsQ== + dependencies: + "@babel/plugin-syntax-jsx" "^7.17.12" + +"@emotion/babel-plugin@^11.12.0": + version "11.13.5" + resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz#eab8d65dbded74e0ecfd28dc218e75607c4e7bc0" + integrity sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ== + dependencies: + "@babel/helper-module-imports" "^7.16.7" + "@babel/runtime" "^7.18.3" + "@emotion/hash" "^0.9.2" + "@emotion/memoize" "^0.9.0" + "@emotion/serialize" "^1.3.3" + babel-plugin-macros "^3.1.0" convert-source-map "^1.5.0" escape-string-regexp "^4.0.0" find-root "^1.1.0" source-map "^0.5.7" - stylis "4.0.13" + stylis "4.2.0" "@emotion/babel-preset-css-prop@^11.2.0": - version "11.2.0" - resolved "https://registry.yarnpkg.com/@emotion/babel-preset-css-prop/-/babel-preset-css-prop-11.2.0.tgz#c7e945f56b2610b438f0dc8ae5253fc55488de0e" - integrity sha512-9XLQm2eLPYTho+Cx1LQTDA1rATjoAaB4O+ds55XDvoAa+Z16Hhg8y5Vihj3C8E6+ilDM8SV5A9Z6z+yj0YIRBg== + version "11.12.0" + resolved "https://registry.yarnpkg.com/@emotion/babel-preset-css-prop/-/babel-preset-css-prop-11.12.0.tgz#a3d51df9a8edb214fa23e2adeddf396a17d98c64" + integrity sha512-wJYhkqVvH4nbxqwmw6XEkF/IWFFRQhYXiv69p7gibbT/e4S/5bMatoukDxRVxZla7aNvpZbXnfPeeNDlFehkKA== dependencies: - "@babel/plugin-transform-react-jsx" "^7.12.1" - "@babel/runtime" "^7.7.2" - "@emotion/babel-plugin" "^11.2.0" - "@emotion/babel-plugin-jsx-pragmatic" "^0.1.5" + "@babel/plugin-transform-react-jsx" "^7.17.12" + "@babel/runtime" "^7.18.3" + "@emotion/babel-plugin" "^11.12.0" + "@emotion/babel-plugin-jsx-pragmatic" "^0.3.0" -"@emotion/hash@^0.8.0": - version "0.8.0" - resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.8.0.tgz#bbbff68978fefdbe68ccb533bc8cbe1d1afb5413" - integrity sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow== +"@emotion/hash@^0.9.2": + version "0.9.2" + resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.2.tgz#ff9221b9f58b4dfe61e619a7788734bd63f6898b" + integrity sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g== -"@emotion/memoize@^0.7.4", "@emotion/memoize@^0.7.5": - version "0.7.5" - resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.7.5.tgz#2c40f81449a4e554e9fc6396910ed4843ec2be50" - integrity sha512-igX9a37DR2ZPGYtV6suZ6whr8pTFtyHL3K/oLUotxpSVO2ASaprmAe2Dkq7tBo7CRY7MMDrAa9nuQP9/YG8FxQ== - -"@emotion/serialize@^1.0.2": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.0.2.tgz#77cb21a0571c9f68eb66087754a65fa97bfcd965" - integrity sha512-95MgNJ9+/ajxU7QIAruiOAdYNjxZX7G2mhgrtDWswA21VviYIRP1R5QilZ/bDY42xiKsaktP4egJb3QdYQZi1A== - dependencies: - "@emotion/hash" "^0.8.0" - "@emotion/memoize" "^0.7.4" - "@emotion/unitless" "^0.7.5" - "@emotion/utils" "^1.0.0" +"@emotion/memoize@^0.9.0": + version "0.9.0" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.9.0.tgz#745969d649977776b43fc7648c556aaa462b4102" + integrity sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ== + +"@emotion/serialize@^1.3.3": + version "1.3.3" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.3.3.tgz#d291531005f17d704d0463a032fe679f376509e8" + integrity sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA== + dependencies: + "@emotion/hash" "^0.9.2" + "@emotion/memoize" "^0.9.0" + "@emotion/unitless" "^0.10.0" + "@emotion/utils" "^1.4.2" csstype "^3.0.2" -"@emotion/unitless@^0.7.5": - version "0.7.5" - resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.7.5.tgz#77211291c1900a700b8a78cfafda3160d76949ed" - integrity sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg== +"@emotion/unitless@^0.10.0": + version "0.10.0" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.10.0.tgz#2af2f7c7e5150f497bdabd848ce7b218a27cf745" + integrity sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg== -"@emotion/utils@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.0.0.tgz#abe06a83160b10570816c913990245813a2fd6af" - integrity sha512-mQC2b3XLDs6QCW+pDQDiyO/EdGZYOygE8s5N5rrzjSI4M3IejPE/JPndCBwRT9z982aqQNi6beWs1UeayrQxxA== +"@emotion/utils@^1.4.2": + version "1.4.2" + resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.4.2.tgz#6df6c45881fcb1c412d6688a311a98b7f59c1b52" + integrity sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA== "@fullhuman/postcss-purgecss@^4.0.3": version "4.1.3" - resolved "https://registry.yarnpkg.com/@fullhuman/postcss-purgecss/-/postcss-purgecss-4.1.3.tgz#e4eb21fc7a49257e4081c6a3a86b338618e61fce" + resolved "https://registry.npmjs.org/@fullhuman/postcss-purgecss/-/postcss-purgecss-4.1.3.tgz" integrity sha512-jqcsyfvq09VOsMXxJMPLRF6Fhg/NNltzWKnC9qtzva+QKTxerCO4esG6je7hbnmkpZtaDyPTwMBj9bzfWorsrw== dependencies: purgecss "^4.1.3" "@iarna/toml@^2.2.5": version "2.2.5" - resolved "https://registry.yarnpkg.com/@iarna/toml/-/toml-2.2.5.tgz#b32366c89b43c6f8cefbdefac778b9c828e3ba8c" + resolved "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz" integrity sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg== -"@jridgewell/gen-mapping@^0.3.5": - version "0.3.8" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz#4f0e06362e01362f823d348f1872b08f666d8142" - integrity sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA== +"@jridgewell/gen-mapping@^0.3.12", "@jridgewell/gen-mapping@^0.3.5": + version "0.3.13" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz#6342a19f44347518c93e43b1ac69deb3c4656a1f" + integrity sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA== dependencies: - "@jridgewell/set-array" "^1.2.1" - "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/sourcemap-codec" "^1.5.0" + "@jridgewell/trace-mapping" "^0.3.24" + +"@jridgewell/remapping@^2.3.5": + version "2.3.5" + resolved "https://registry.yarnpkg.com/@jridgewell/remapping/-/remapping-2.3.5.tgz#375c476d1972947851ba1e15ae8f123047445aa1" + integrity sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ== + dependencies: + "@jridgewell/gen-mapping" "^0.3.5" "@jridgewell/trace-mapping" "^0.3.24" "@jridgewell/resolve-uri@^3.1.0": version "3.1.2" - resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + resolved "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz" integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== -"@jridgewell/set-array@^1.2.1": - version "1.2.1" - resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" - integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== - -"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": +"@jridgewell/sourcemap-codec@^1.4.14", "@jridgewell/sourcemap-codec@^1.4.15": version "1.5.0" - resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" + resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz" integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== -"@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": - version "0.3.25" - resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" - integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== +"@jridgewell/sourcemap-codec@^1.5.0": + version "1.5.5" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz#6912b00d2c631c0d15ce1a7ab57cd657f2a8f8ba" + integrity sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og== + +"@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.28": + version "0.3.31" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz#db15d6781c931f3a251a3dac39501c98a6082fd0" + integrity sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw== dependencies: "@jridgewell/resolve-uri" "^3.1.0" "@jridgewell/sourcemap-codec" "^1.4.14" "@nicolo-ribaudo/chokidar-2@2.1.8-no-fsevents.3": version "2.1.8-no-fsevents.3" - resolved "https://registry.yarnpkg.com/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz#323d72dd25103d0c4fbdce89dadf574a787b1f9b" + resolved "https://registry.npmjs.org/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz" integrity sha512-s88O1aVtXftvp5bCPB7WnmXc5IwOZZ7YPuwNPt+GtOOXpPvad1LfbmjYv+qII7zP6RU2QGnqve27dnLycEnyEQ== "@nodelib/fs.scandir@2.1.5": version "2.1.5" - resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + resolved "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz" integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== dependencies: "@nodelib/fs.stat" "2.0.5" @@ -1512,120 +1233,119 @@ "@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": version "2.0.5" - resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz" integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== "@nodelib/fs.walk@^1.2.3": version "1.2.8" - resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" + resolved "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz" integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== dependencies: "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" -"@react-spring/animated@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/animated/-/animated-9.4.3.tgz#2f8d2b50dfc1975fa490ed3bc03f5ad865180866" - integrity sha512-hKKmeXPoGpJ/zrG/RC8stwW8PmMH0BbewHD8aUPLbyzD9fNvZEJ0mjKmOI0CcSwMpb43kuwY2nX3ZJVImPQCoQ== - dependencies: - "@react-spring/shared" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" - -"@react-spring/core@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/core/-/core-9.4.3.tgz#95c883fa53ff534ff882ba42f863a26a26a6a1c8" - integrity sha512-Jr6/GjHwXYxAtttcYDXOtH36krO0XGjYaSsGR6g+vOUO4y0zAPPXoAwpK6vS7Haip5fRwk7rMdNG+OzU7bB4Bg== - dependencies: - "@react-spring/animated" "~9.4.3-beta.0" - "@react-spring/rafz" "~9.4.3-beta.0" - "@react-spring/shared" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" - -"@react-spring/konva@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/konva/-/konva-9.4.3.tgz#ef5332fc0960fa4313ac0ab6a122fd9247b3b111" - integrity sha512-JWxx0YIwipjJTDs7q9XtArlBCTjejyAJZrbhvxmizOM6ZukUj8hcEFYU03Vt5HUTSC4WfG0rkg2O9V1EAXuzCQ== - dependencies: - "@react-spring/animated" "~9.4.3-beta.0" - "@react-spring/core" "~9.4.3-beta.0" - "@react-spring/shared" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" - -"@react-spring/native@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/native/-/native-9.4.3.tgz#748ee1f588c1515a76766e319aa48151308bd5ad" - integrity sha512-dfOwzSxJcbHKTNJ26pceZ7xCrqf2+L6W/U17/7aogQwGec4yf1zocWXV3QS+h0HDuY0Bk/yYa7PEy+D+HWc7Og== - dependencies: - "@react-spring/animated" "~9.4.3-beta.0" - "@react-spring/core" "~9.4.3-beta.0" - "@react-spring/shared" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" - -"@react-spring/rafz@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/rafz/-/rafz-9.4.3.tgz#0d578072c9692ef5ab74a3b1d49c1432dce32ab6" - integrity sha512-KnujiZNIHzXsRq1D4tVbCajl8Lx+e6vtvUk7o69KbuneSpEgil9P/x3b+hMDk8U0NHGhJjzhU7723/CNsQansA== - -"@react-spring/shared@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/shared/-/shared-9.4.3.tgz#86e03ddd47911ba89be1d0f5a6d11966e305ee04" - integrity sha512-mB1UUD/pl1LzaY0XeNWZtvJzxMa8gLQf02nY12HAz4Rukm9dFRj0jeYwQYLdfYLsGFo1ldvHNurun6hZMG7kiQ== - dependencies: - "@react-spring/rafz" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" - -"@react-spring/three@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/three/-/three-9.4.3.tgz#1836ea12f7cb7ccb4c4a1f39101f4fb17955c386" - integrity sha512-AhCPqoZZXUnzVcKal01sdYBRqkVd2iNxDMk7BGXZsQNWeqaOMaaBT/a6d3oG3wwPX6xIa9ogBtzmzEasN6HYzA== - dependencies: - "@react-spring/animated" "~9.4.3-beta.0" - "@react-spring/core" "~9.4.3-beta.0" - "@react-spring/shared" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" - -"@react-spring/types@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/types/-/types-9.4.3.tgz#8926d7a09812374127b1f8a904a755c7579124e6" - integrity sha512-dzJrPvUc42K2un9y6D1IsrPQO5tKsbWwUo+wsATnXjG3ePWyuDBIOMJuPe605NhIXUmPH+Vik2wMoZz06hD1uA== - -"@react-spring/web@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/web/-/web-9.4.3.tgz#b59c1491de344545590598b7fde52b607c4e5d10" - integrity sha512-llKve/uJ73JVagBAVvA74S/LfZP4oSB3XP1qmggSUNXzPZZo5ylIMrs55PxpLyxgzzihuhDU5N17ct3ATViOHw== - dependencies: - "@react-spring/animated" "~9.4.3-beta.0" - "@react-spring/core" "~9.4.3-beta.0" - "@react-spring/shared" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" - -"@react-spring/zdog@~9.4.3-beta.0": - version "9.4.3" - resolved "https://registry.yarnpkg.com/@react-spring/zdog/-/zdog-9.4.3.tgz#0a76564ea635ab00a1720a3843faf4f46ca3c82a" - integrity sha512-ujRJBKEWC6miwPhCwHkn13h9OfqK+Kkq49crebo5neY4kCK2efNoagQo54DwXFgbVNFJV+6GwcAZVI2ybS5L1Q== - dependencies: - "@react-spring/animated" "~9.4.3-beta.0" - "@react-spring/core" "~9.4.3-beta.0" - "@react-spring/shared" "~9.4.3-beta.0" - "@react-spring/types" "~9.4.3-beta.0" +"@react-spring/animated@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/animated/-/animated-10.0.3.tgz#b42f7041a51d38f395e9ba5fb53ca68c34cd324f" + integrity sha512-7MrxADV3vaUADn2V9iYhaIL6iOWRx9nCJjYrsk2AHD2kwPr6fg7Pt0v+deX5RnCDmCKNnD6W5fasiyM8D+wzJQ== + dependencies: + "@react-spring/shared" "~10.0.3" + "@react-spring/types" "~10.0.3" + +"@react-spring/core@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/core/-/core-10.0.3.tgz#3b4f3991f5902ce46770c2c1ef05c8e53c3a0f73" + integrity sha512-D4DwNO68oohDf/0HG2G0Uragzb9IA1oXblxrd6MZAcBcUQG2EHUWXewjdECMPLNmQvlYVyyBRH6gPxXM5DX7DQ== + dependencies: + "@react-spring/animated" "~10.0.3" + "@react-spring/shared" "~10.0.3" + "@react-spring/types" "~10.0.3" + +"@react-spring/konva@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/konva/-/konva-10.0.3.tgz#6cbbaf1ee4414f51a1a3104d3637f1eef1166ffa" + integrity sha512-nA1VoC94RnGY4jhhuOln+ZSXOjfBdvwnyBcVt4ojq2JRcqNTmYv+ftfo1V3qAJlDccucdjAWlJbkQEQ9bVVcQg== + dependencies: + "@react-spring/animated" "~10.0.3" + "@react-spring/core" "~10.0.3" + "@react-spring/shared" "~10.0.3" + "@react-spring/types" "~10.0.3" + +"@react-spring/native@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/native/-/native-10.0.3.tgz#19bae242cdd18e7edf51f872cca071db417de82f" + integrity sha512-ypfKsfqn+Ll3LeZCp+noFBJdJOVomIfnGjpQzpXibrfqWlPgl0Ckj9sy+U3fLGPyrbbCSw9KLvsgSwZwDCScKA== + dependencies: + "@react-spring/animated" "~10.0.3" + "@react-spring/core" "~10.0.3" + "@react-spring/shared" "~10.0.3" + "@react-spring/types" "~10.0.3" + +"@react-spring/rafz@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/rafz/-/rafz-10.0.3.tgz#9b328c3992b23d6317452998670636d6b783f2c4" + integrity sha512-Ri2/xqt8OnQ2iFKkxKMSF4Nqv0LSWnxXT4jXFzBDsHgeeH/cHxTLupAWUwmV9hAGgmEhBmh5aONtj3J6R/18wg== + +"@react-spring/shared@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/shared/-/shared-10.0.3.tgz#654d03c74d3277bae1a565aff981979536be6002" + integrity sha512-geCal66nrkaQzUVhPkGomylo+Jpd5VPK8tPMEDevQEfNSWAQP15swHm+MCRG4wVQrQlTi9lOzKzpRoTL3CA84Q== + dependencies: + "@react-spring/rafz" "~10.0.3" + "@react-spring/types" "~10.0.3" + +"@react-spring/three@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/three/-/three-10.0.3.tgz#5eea7fd8cbf04cacb6565dc9ccc354c2704acef2" + integrity sha512-hZP7ChF/EwnWn+H2xuzAsRRfQdhquoBTI1HKgO6X9V8tcVCuR69qJmsA9N00CA4Nzx0bo/zwBtqONmi55Ffm5w== + dependencies: + "@react-spring/animated" "~10.0.3" + "@react-spring/core" "~10.0.3" + "@react-spring/shared" "~10.0.3" + "@react-spring/types" "~10.0.3" + +"@react-spring/types@~10.0.3": + version "10.0.3" + resolved "https://registry.yarnpkg.com/@react-spring/types/-/types-10.0.3.tgz#0c2d7a7e783a6f652bcd24cac80ed569bc2ad8d9" + integrity sha512-H5Ixkd2OuSIgHtxuHLTt7aJYfhMXKXT/rK32HPD/kSrOB6q6ooeiWAXkBy7L8F3ZxdkBb9ini9zP9UwnEFzWgQ== + +"@react-spring/web@~10.0.3": + version "10.0.3" + resolved "https://registry.npmjs.org/@react-spring/web/-/web-10.0.3.tgz" + integrity sha512-ndU+kWY81rHsT7gTFtCJ6mrVhaJ6grFmgTnENipzmKqot4HGf5smPNK+cZZJqoGeDsj9ZsiWPW4geT/NyD484A== + dependencies: + "@react-spring/animated" "~10.0.3" + "@react-spring/core" "~10.0.3" + "@react-spring/shared" "~10.0.3" + "@react-spring/types" "~10.0.3" + +"@react-spring/zdog@~10.0.3": + version "10.0.3" + resolved "https://registry.npmjs.org/@react-spring/zdog/-/zdog-10.0.3.tgz" + integrity sha512-YCJPhPGdLLiUnM++u/1qd/7b5p70zZWdPhDOBC8TAr/zlQABZR4ivYlv5JAiS/oPLSFAwlTGFkTxD4M1sDVd0g== + dependencies: + "@react-spring/animated" "~10.0.3" + "@react-spring/core" "~10.0.3" + "@react-spring/shared" "~10.0.3" + "@react-spring/types" "~10.0.3" "@ryangjchandler/spruce@^2.6.3": version "2.7.1" - resolved "https://registry.yarnpkg.com/@ryangjchandler/spruce/-/spruce-2.7.1.tgz#d61819750817567b1d12f9ce98e79910147c295a" + resolved "https://registry.npmjs.org/@ryangjchandler/spruce/-/spruce-2.7.1.tgz" integrity sha512-UfSt3iYDCnErtK6p8OntDIbXYQMmzkozaLONckIqCkSgMSlqPd2VjHB/0IaBcFrbRHhhxHMOvieuiUoYg//s/g== dependencies: alpinejs "^2.5" "@tailwindcss/forms@^0.3.2": version "0.3.4" - resolved "https://registry.yarnpkg.com/@tailwindcss/forms/-/forms-0.3.4.tgz#e4939dc16450eccf4fd2029770096f38cbb556d4" + resolved "https://registry.npmjs.org/@tailwindcss/forms/-/forms-0.3.4.tgz" integrity sha512-vlAoBifNJUkagB+PAdW4aHMe4pKmSLroH398UPgIogBFc91D2VlHUxe4pjxQhiJl0Nfw53sHSJSQBSTQBZP3vA== dependencies: mini-svg-data-uri "^1.2.3" "@tailwindcss/typography@^0.4.1": version "0.4.1" - resolved "https://registry.yarnpkg.com/@tailwindcss/typography/-/typography-0.4.1.tgz#51ddbceea6a0ee9902c649dbe58871c81a831212" + resolved "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.4.1.tgz" integrity sha512-ovPPLUhs7zAIJfr0y1dbGlyCuPhpuv/jpBoFgqAc658DWGGrOBWBMpAWLw2KlzbNeVk4YBJMzue1ekvIbdw6XA== dependencies: lodash.castarray "^4.4.0" @@ -1634,20 +1354,21 @@ lodash.uniq "^4.5.0" "@types/dotenv-defaults@^2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@types/dotenv-defaults/-/dotenv-defaults-2.0.1.tgz#773615b6bb1f281365f6b3b650390eea64eb8b69" - integrity sha512-pH/45waKTwuwGszC893XdP9Dnj4LFtjranprQ7v3j6j4HdDyrhSF1ybHHWxZ2qp6KIrabN7zG3HeV4Y7OUVnww== + version "2.0.4" + resolved "https://registry.npmjs.org/@types/dotenv-defaults/-/dotenv-defaults-2.0.4.tgz" + integrity sha512-+KwZaAMQkt0uk5IH3F2zqjUsZqEi8ro0qEpi4dnYwpNfG3Mejf3PlCQooqMrICWkSg3gq9jgFCeAwFrbhDQmbQ== dependencies: + "@types/node" "*" dotenv "^8.2.0" "@types/geojson@*": version "7946.0.8" - resolved "https://registry.yarnpkg.com/@types/geojson/-/geojson-7946.0.8.tgz#30744afdb385e2945e22f3b033f897f76b1f12ca" + resolved "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.8.tgz" integrity sha512-1rkryxURpr6aWP7R786/UQOkJ3PcpQiWkAXBmdWc7ryFWqN6a4xfK7BtjXvFBKO9LjQ+MWQSWxYeZX1OApnArA== "@types/glob@^7.1.3": version "7.2.0" - resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.2.0.tgz#bc1b5bf3aa92f25bd5dd39f35c57361bdce5b2eb" + resolved "https://registry.npmjs.org/@types/glob/-/glob-7.2.0.tgz" integrity sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA== dependencies: "@types/minimatch" "*" @@ -1655,61 +1376,56 @@ "@types/js-cookie@^2.2.6": version "2.2.7" - resolved "https://registry.yarnpkg.com/@types/js-cookie/-/js-cookie-2.2.7.tgz#226a9e31680835a6188e887f3988e60c04d3f6a3" + resolved "https://registry.npmjs.org/@types/js-cookie/-/js-cookie-2.2.7.tgz" integrity sha512-aLkWa0C0vO5b4Sr798E26QgOkss68Un0bLjs7u9qxzPT5CG+8DuNTffWES58YzJs3hrVAOs1wonycqEBqNJubA== "@types/lodash.chunk@^4.2.6": - version "4.2.6" - resolved "https://registry.yarnpkg.com/@types/lodash.chunk/-/lodash.chunk-4.2.6.tgz#9d35f05360b0298715d7f3d9efb34dd4f77e5d2a" - integrity sha512-SPlusB7jxXyGcTXYcUdWr7WmhArO/rmTq54VN88iKMxGUhyg79I4Q8n4riGn3kjaTjOJrVlHhxgX/d7woak5BQ== + version "4.2.9" + resolved "https://registry.yarnpkg.com/@types/lodash.chunk/-/lodash.chunk-4.2.9.tgz#60da44c404dfa8b01b426034c1183e5eb9b09727" + integrity sha512-Z9VtFUSnmT0No/QymqfG9AGbfOA4O5qB/uyP89xeZBqDAsKsB4gQFTqt7d0pHjbsTwtQ4yZObQVHuKlSOhIJ5Q== dependencies: "@types/lodash" "*" "@types/lodash@*": version "4.14.178" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.178.tgz#341f6d2247db528d4a13ddbb374bcdc80406f4f8" + resolved "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.178.tgz" integrity sha512-0d5Wd09ItQWH1qFbEyQ7oTQ3GZrMfth5JkbN3EvTKLXcHLRDSXeLnlvlOn0wvxVIwK5o2M8JzP/OWz7T3NRsbw== "@types/minimatch@*": - version "3.0.5" - resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.5.tgz#1001cc5e6a3704b83c236027e77f2f58ea010f40" - integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ== - -"@types/node@*": - version "17.0.18" - resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.18.tgz#3b4fed5cfb58010e3a2be4b6e74615e4847f1074" - integrity sha512-eKj4f/BsN/qcculZiRSujogjvp5O/k4lOW5m35NopjZM/QwLOR075a8pJW5hD+Rtdm2DaCVPENS6KtSQnUD6BA== + version "5.1.2" + resolved "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz" + integrity sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA== -"@types/node@^16.0.1": - version "16.11.25" - resolved "https://registry.yarnpkg.com/@types/node/-/node-16.11.25.tgz#bb812b58bacbd060ce85921250d8b4ca553cd4a2" - integrity sha512-NrTwfD7L1RTc2qrHQD4RTTy4p0CO2LatKBEKEds3CaVuhoM/+DJzmWZl5f+ikR8cm8F5mfJxK+9rQq07gRiSjQ== +"@types/node@*", "@types/node@^16.0.1": + version "16.18.126" + resolved "https://registry.npmjs.org/@types/node/-/node-16.18.126.tgz" + integrity sha512-OTcgaiwfGFBKacvfwuHzzn1KLxH/er8mluiy8/uM3sGXHaRe73RrSIj01jow9t4kJEW633Ov+cOexXeiApTyAw== "@types/parse-json@^4.0.0": version "4.0.0" - resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" + resolved "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz" integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== "@types/topojson-specification@^1.0.1": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@types/topojson-specification/-/topojson-specification-1.0.2.tgz#e20df2c5a611e31f51d606473dbaccc1d90461c8" - integrity sha512-SGc1NdX9g3UGDp6S+p+uyG+Z8CehS51sUJ9bejA25Xgn2kkAguILk6J9nxXK+0M/mbTBN7ypMA7+4HVLNMJ8ag== + version "1.0.5" + resolved "https://registry.yarnpkg.com/@types/topojson-specification/-/topojson-specification-1.0.5.tgz#bf0009b2e0debb2d97237b124c00b9ea92570375" + integrity sha512-C7KvcQh+C2nr6Y2Ub4YfgvWvWCgP2nOQMtfhlnwsRL4pYmmwzBS7HclGiS87eQfDOU/DLQpX6GEscviaz4yLIQ== dependencies: "@types/geojson" "*" "@types/triple-beam@^1.3.2": version "1.3.5" - resolved "https://registry.yarnpkg.com/@types/triple-beam/-/triple-beam-1.3.5.tgz#74fef9ffbaa198eb8b588be029f38b00299caa2c" + resolved "https://registry.npmjs.org/@types/triple-beam/-/triple-beam-1.3.5.tgz" integrity sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw== "@xobotyi/scrollbar-width@^1.9.5": version "1.9.5" - resolved "https://registry.yarnpkg.com/@xobotyi/scrollbar-width/-/scrollbar-width-1.9.5.tgz#80224a6919272f405b87913ca13b92929bdf3c4d" + resolved "https://registry.npmjs.org/@xobotyi/scrollbar-width/-/scrollbar-width-1.9.5.tgz" integrity sha512-N8tkAACJx2ww8vFMneJmaAgmjAG1tnVBZJRLRcx061tmsLRZHSEZSLuGWnwPtunsSLvSqXQ2wfp7Mgqg1I+2dQ== -acorn-node@^1.6.1: +acorn-node@^1.8.2: version "1.8.2" - resolved "https://registry.yarnpkg.com/acorn-node/-/acorn-node-1.8.2.tgz#114c95d64539e53dede23de8b9d96df7c7ae2af8" + resolved "https://registry.npmjs.org/acorn-node/-/acorn-node-1.8.2.tgz" integrity sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A== dependencies: acorn "^7.0.0" @@ -1718,41 +1434,34 @@ acorn-node@^1.6.1: acorn-walk@^7.0.0: version "7.2.0" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-7.2.0.tgz#0de889a601203909b0fbe07b8938dc21d2e967bc" + resolved "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz" integrity sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA== acorn@^7.0.0: version "7.4.1" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" + resolved "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz" integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== alpinejs@^2.5, alpinejs@^2.8.2: version "2.8.2" - resolved "https://registry.yarnpkg.com/alpinejs/-/alpinejs-2.8.2.tgz#b14ec21ae3cd78dcee4aed0a78ed0f01b676dac4" + resolved "https://registry.npmjs.org/alpinejs/-/alpinejs-2.8.2.tgz" integrity sha512-5yOUtckn4CBp0qsHpo2qgjZyZit84uXvHbB7NJ27sn4FA6UlFl2i9PGUAdTXkcbFvvxDJBM+zpOD8RuNYFvQAw== ansi-regex@^5.0.1: version "5.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + resolved "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -ansi-styles@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== - dependencies: - color-convert "^1.9.0" - ansi-styles@^4.0.0, ansi-styles@^4.1.0: version "4.3.0" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + resolved "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz" integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== dependencies: color-convert "^2.0.1" anymatch@~3.1.2: version "3.1.2" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" + resolved "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz" integrity sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg== dependencies: normalize-path "^3.0.0" @@ -1760,93 +1469,92 @@ anymatch@~3.1.2: arg@^4.1.0: version "4.1.3" - resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" + resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz" integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== arg@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/arg/-/arg-5.0.1.tgz#eb0c9a8f77786cad2af8ff2b862899842d7b6adb" - integrity sha512-e0hDa9H2Z9AwFkk2qDlwhoMYE4eToKarchkQHovNdLTCYMHZHeRjI71crOh+dio4K6u1IcwubQqo79Ga4CyAQA== + version "5.0.2" + resolved "https://registry.yarnpkg.com/arg/-/arg-5.0.2.tgz#c81433cc427c92c4dcf4865142dbca6f15acd59c" + integrity sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg== array-union@^2.1.0: version "2.1.0" - resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + resolved "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz" integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== async@^3.2.3: version "3.2.6" - resolved "https://registry.yarnpkg.com/async/-/async-3.2.6.tgz#1b0728e14929d51b85b449b7f06e27c1145e38ce" + resolved "https://registry.npmjs.org/async/-/async-3.2.6.tgz" integrity sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA== asynckit@^0.4.0: version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + resolved "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz" integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== at-least-node@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" + resolved "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz" integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== autoprefixer@^10.2.5: - version "10.4.2" - resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.2.tgz#25e1df09a31a9fba5c40b578936b90d35c9d4d3b" - integrity sha512-9fOPpHKuDW1w/0EKfRmVnxTDt8166MAnLI3mgZ1JCnhNtYWxcJ6Ud5CO/AVOZi/AvFa8DY9RTy3h3+tFBlrrdQ== - dependencies: - browserslist "^4.19.1" - caniuse-lite "^1.0.30001297" - fraction.js "^4.1.2" - normalize-range "^0.1.2" - picocolors "^1.0.0" + version "10.4.23" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.23.tgz#c6aa6db8e7376fcd900f9fd79d143ceebad8c4e6" + integrity sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA== + dependencies: + browserslist "^4.28.1" + caniuse-lite "^1.0.30001760" + fraction.js "^5.3.4" + picocolors "^1.1.1" postcss-value-parser "^4.2.0" -axios@^1.6.0, axios@^1.7.2: - version "1.12.0" - resolved "https://registry.yarnpkg.com/axios/-/axios-1.12.0.tgz#11248459be05a5ee493485628fa0e4323d0abfc3" - integrity sha512-oXTDccv8PcfjZmPGlWsPSwtOJCZ/b6W5jAMCNcfwJbCzDckwG0jrYJFaWH1yvivfCXjVzV/SPDEhMB3Q+DSurg== +axios@^1.6.0, axios@^1.8.4: + version "1.13.2" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.13.2.tgz#9ada120b7b5ab24509553ec3e40123521117f687" + integrity sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA== dependencies: follow-redirects "^1.15.6" form-data "^4.0.4" proxy-from-env "^1.1.0" -babel-plugin-macros@^2.6.1: - version "2.8.0" - resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.8.0.tgz#0f958a7cc6556b1e65344465d99111a1e5e10138" - integrity sha512-SEP5kJpfGYqYKpBrj5XU3ahw5p5GOHJ0U5ssOSQ/WBVdwkD2Dzlce95exQTs3jOVWPPKLBN2rlEWkCK7dSmLvg== +babel-plugin-macros@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz#9ef6dc74deb934b4db344dc973ee851d148c50c1" + integrity sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg== dependencies: - "@babel/runtime" "^7.7.2" - cosmiconfig "^6.0.0" - resolve "^1.12.0" + "@babel/runtime" "^7.12.5" + cosmiconfig "^7.0.0" + resolve "^1.19.0" babel-plugin-polyfill-corejs2@^0.3.0: version "0.3.3" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz#5d1bd3836d0a19e1b84bbf2d9640ccb6f951c122" + resolved "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz" integrity sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q== dependencies: "@babel/compat-data" "^7.17.7" "@babel/helper-define-polyfill-provider" "^0.3.3" semver "^6.1.1" -babel-plugin-polyfill-corejs2@^0.4.10: - version "0.4.12" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.12.tgz#ca55bbec8ab0edeeef3d7b8ffd75322e210879a9" - integrity sha512-CPWT6BwvhrTO2d8QVorhTCQw9Y43zOu7G9HigcfxvepOU6b8o3tcWad6oVgZIsZCTt42FFv97aA7ZJsbM4+8og== +babel-plugin-polyfill-corejs2@^0.4.14: + version "0.4.14" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.14.tgz#8101b82b769c568835611542488d463395c2ef8f" + integrity sha512-Co2Y9wX854ts6U8gAAPXfn0GmAyctHuK8n0Yhfjd6t30g7yvKjspvvOo9yG+z52PZRgFErt7Ka2pYnXCjLKEpg== dependencies: - "@babel/compat-data" "^7.22.6" - "@babel/helper-define-polyfill-provider" "^0.6.3" + "@babel/compat-data" "^7.27.7" + "@babel/helper-define-polyfill-provider" "^0.6.5" semver "^6.3.1" -babel-plugin-polyfill-corejs3@^0.10.6: - version "0.10.6" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.6.tgz#2deda57caef50f59c525aeb4964d3b2f867710c7" - integrity sha512-b37+KR2i/khY5sKmWNVQAnitvquQbNdWy6lJdsr0kmquCKEEUgMKK4SboVM3HtfnZilfjr4MMQ7vY58FVWDtIA== +babel-plugin-polyfill-corejs3@^0.13.0: + version "0.13.0" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.13.0.tgz#bb7f6aeef7addff17f7602a08a6d19a128c30164" + integrity sha512-U+GNwMdSFgzVmfhNm8GJUX88AadB3uo9KpJqS3FaqNIPKgySuvMb+bHPsOmmuWyIcuqZj/pzt1RUIUZns4y2+A== dependencies: - "@babel/helper-define-polyfill-provider" "^0.6.2" - core-js-compat "^3.38.0" + "@babel/helper-define-polyfill-provider" "^0.6.5" + core-js-compat "^3.43.0" babel-plugin-polyfill-corejs3@^0.5.0: version "0.5.3" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.3.tgz#d7e09c9a899079d71a8b670c6181af56ec19c5c7" + resolved "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.3.tgz" integrity sha512-zKsXDh0XjnrUEW0mxIHLfjBfnXSMr5Q/goMe/fxpQnLm07mcOZiIZHBNWCMx60HmdvjxfXcalac0tfFg0wqxyw== dependencies: "@babel/helper-define-polyfill-provider" "^0.3.2" @@ -1854,26 +1562,26 @@ babel-plugin-polyfill-corejs3@^0.5.0: babel-plugin-polyfill-regenerator@^0.3.0: version "0.3.1" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz#2c0678ea47c75c8cc2fbb1852278d8fb68233990" + resolved "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz" integrity sha512-Y2B06tvgHYt1x0yz17jGkGeeMr5FeKUu+ASJ+N6nB5lQ8Dapfg42i0OVrf8PNGJ3zKL4A23snMi1IRwrqqND7A== dependencies: "@babel/helper-define-polyfill-provider" "^0.3.1" -babel-plugin-polyfill-regenerator@^0.6.1: - version "0.6.3" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.3.tgz#abeb1f3f1c762eace37587f42548b08b57789bc8" - integrity sha512-LiWSbl4CRSIa5x/JAU6jZiG9eit9w6mz+yVMFwDE83LAWvt0AfGBoZ7HS/mkhrKuh2ZlzfVZYKoLjXdqw6Yt7Q== +babel-plugin-polyfill-regenerator@^0.6.5: + version "0.6.5" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.5.tgz#32752e38ab6f6767b92650347bf26a31b16ae8c5" + integrity sha512-ISqQ2frbiNU9vIJkzg7dlPpznPZ4jOiUQ1uSmB0fEHeowtN3COYRsXr/xexn64NpU13P06jc/L5TgiJXOgrbEg== dependencies: - "@babel/helper-define-polyfill-provider" "^0.6.3" + "@babel/helper-define-polyfill-provider" "^0.6.5" babel-plugin-react-require@^3.1.3: version "3.1.3" - resolved "https://registry.yarnpkg.com/babel-plugin-react-require/-/babel-plugin-react-require-3.1.3.tgz#ba3d7305b044a90c35c32c5a9ab943fd68e1638d" + resolved "https://registry.npmjs.org/babel-plugin-react-require/-/babel-plugin-react-require-3.1.3.tgz" integrity sha512-kDXhW2iPTL81x4Ye2aUMdEXQ56JP0sBJmRQRXJPH5FsNB7fOc/YCsHTqHv8IovPyw9Rk07gdd7MVUz8tUmRBCA== babel-preset-next@^1.4.0: version "1.4.0" - resolved "https://registry.yarnpkg.com/babel-preset-next/-/babel-preset-next-1.4.0.tgz#6f20007befb6d888be315a64d772e0d7101b71ca" + resolved "https://registry.npmjs.org/babel-preset-next/-/babel-preset-next-1.4.0.tgz" integrity sha512-+86BkjiV3eGyXg61QLE33KCtXna/ZGoYxGDFhtr9Nqd2tdt+gLIqQrz0NXtZW2vU5RMVISqZEKhrxPK/tFOWEg== dependencies: "@babel/plugin-proposal-decorators" "^7.10.1" @@ -1885,22 +1593,27 @@ babel-preset-next@^1.4.0: balanced-match@^1.0.0: version "1.0.2" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== +baseline-browser-mapping@^2.9.0: + version "2.9.9" + resolved "https://registry.yarnpkg.com/baseline-browser-mapping/-/baseline-browser-mapping-2.9.9.tgz#68c17013c33ba9e8264c5f2ae107d506228428ee" + integrity sha512-V8fbOCSeOFvlDj7LLChUcqbZrdKD9RU/VR260piF1790vT0mfLSwGc/Qzxv3IqiTukOpNtItePa0HBpMAj7MDg== + binary-extensions@^2.0.0: version "2.2.0" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" + resolved "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz" integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== boolbase@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + resolved "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz" integrity sha1-aN/1++YMUes3cl6p4+0xDcwed24= brace-expansion@^1.1.7: version "1.1.12" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.12.tgz#ab9b454466e5a8cc3a187beaad580412a9c5b843" + resolved "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz" integrity sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg== dependencies: balanced-match "^1.0.0" @@ -1908,34 +1621,35 @@ brace-expansion@^1.1.7: braces@^3.0.3, braces@~3.0.2: version "3.0.3" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + resolved "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: fill-range "^7.1.1" -browserslist@^4.19.1, browserslist@^4.24.0, browserslist@^4.24.3: - version "4.24.4" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.4.tgz#c6b2865a3f08bcb860a0e827389003b9fe686e4b" - integrity sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A== +browserslist@^4.24.0, browserslist@^4.28.0, browserslist@^4.28.1: + version "4.28.1" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.28.1.tgz#7f534594628c53c63101079e27e40de490456a95" + integrity sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA== dependencies: - caniuse-lite "^1.0.30001688" - electron-to-chromium "^1.5.73" - node-releases "^2.0.19" - update-browserslist-db "^1.1.1" + baseline-browser-mapping "^2.9.0" + caniuse-lite "^1.0.30001759" + electron-to-chromium "^1.5.263" + node-releases "^2.0.27" + update-browserslist-db "^1.2.0" buffer-from@^1.0.0: version "1.1.2" - resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" + resolved "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz" integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== bytes@^3.0.0: version "3.1.2" - resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" + resolved "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz" integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: version "1.0.2" - resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + resolved "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz" integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== dependencies: es-errors "^1.3.0" @@ -1943,36 +1657,22 @@ call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: callsites@^3.0.0: version "3.1.0" - resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + resolved "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz" integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== camelcase-css@^2.0.1: version "2.0.1" - resolved "https://registry.yarnpkg.com/camelcase-css/-/camelcase-css-2.0.1.tgz#ee978f6947914cc30c6b44741b6ed1df7f043fd5" + resolved "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz" integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA== -caniuse-lite@^1.0.30001297: - version "1.0.30001312" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001312.tgz#e11eba4b87e24d22697dae05455d5aea28550d5f" - integrity sha512-Wiz1Psk2MEK0pX3rUzWaunLTZzqS2JYZFzNKqAiJGiuxIjRPLgV6+VDPOg6lQOUxmDwhTlh198JsTTi8Hzw6aQ== - -caniuse-lite@^1.0.30001688: - version "1.0.30001697" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001697.tgz#040bbbb54463c4b4b3377c716b34a322d16e6fc7" - integrity sha512-GwNPlWJin8E+d7Gxq96jxM6w0w+VFeyyXRsjU58emtkYqnbwHqXm5uT2uCmO0RQE9htWknOP4xtBlLmM/gWxvQ== - -chalk@^2.4.2: - version "2.4.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" +caniuse-lite@^1.0.30001759, caniuse-lite@^1.0.30001760: + version "1.0.30001760" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001760.tgz#bdd1960fafedf8d5f04ff16e81460506ff9b798f" + integrity sha512-7AAMPcueWELt1p3mi13HR/LHH0TJLT11cnwDJEs3xA4+CK/PLKeO9Kl1oru24htkyUKtkGCvAx4ohB0Ttry8Dw== chalk@^4.0.0, chalk@^4.1.1, chalk@^4.1.2: version "4.1.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + resolved "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== dependencies: ansi-styles "^4.1.0" @@ -1980,37 +1680,42 @@ chalk@^4.0.0, chalk@^4.1.1, chalk@^4.1.2: charcodes@^0.2.0: version "0.2.0" - resolved "https://registry.yarnpkg.com/charcodes/-/charcodes-0.2.0.tgz#5208d327e6cc05f99eb80ffc814707572d1f14e4" + resolved "https://registry.npmjs.org/charcodes/-/charcodes-0.2.0.tgz" integrity sha512-Y4kiDb+AM4Ecy58YkuZrrSRJBDQdQ2L+NyS1vHHFtNtUjgutcZfx3yp1dAONI/oPaPmyGfCLx5CxL+zauIMyKQ== -cheerio-select@^1.5.0: - version "1.5.0" - resolved "https://registry.yarnpkg.com/cheerio-select/-/cheerio-select-1.5.0.tgz#faf3daeb31b17c5e1a9dabcee288aaf8aafa5823" - integrity sha512-qocaHPv5ypefh6YNxvnbABM07KMxExbtbfuJoIie3iZXX1ERwYmJcIiRrr9H05ucQP1k28dav8rpdDgjQd8drg== +cheerio-select@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cheerio-select/-/cheerio-select-2.1.0.tgz#4d8673286b8126ca2a8e42740d5e3c4884ae21b4" + integrity sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g== dependencies: - css-select "^4.1.3" - css-what "^5.0.1" - domelementtype "^2.2.0" - domhandler "^4.2.0" - domutils "^2.7.0" + boolbase "^1.0.0" + css-select "^5.1.0" + css-what "^6.1.0" + domelementtype "^2.3.0" + domhandler "^5.0.3" + domutils "^3.0.1" cheerio@^1.0.0-rc.5: - version "1.0.0-rc.10" - resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.10.tgz#2ba3dcdfcc26e7956fc1f440e61d51c643379f3e" - integrity sha512-g0J0q/O6mW8z5zxQ3A8E8J1hUgp4SMOvEoW/x84OwyHKe/Zccz83PVT4y5Crcr530FV6NgmKI1qvGTKVl9XXVw== - dependencies: - cheerio-select "^1.5.0" - dom-serializer "^1.3.2" - domhandler "^4.2.0" - htmlparser2 "^6.1.0" - parse5 "^6.0.1" - parse5-htmlparser2-tree-adapter "^6.0.1" - tslib "^2.2.0" - -chokidar@^3.3.0, chokidar@^3.4.0, chokidar@^3.5.2: - version "3.5.3" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" - integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== + version "1.1.2" + resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.1.2.tgz#26af77e89336c81c63ea83197f868b4cbd351369" + integrity sha512-IkxPpb5rS/d1IiLbHMgfPuS0FgiWTtFIm/Nj+2woXDLTZ7fOT2eqzgYbdMlLweqlHbsZjxEChoVK+7iph7jyQg== + dependencies: + cheerio-select "^2.1.0" + dom-serializer "^2.0.0" + domhandler "^5.0.3" + domutils "^3.2.2" + encoding-sniffer "^0.2.1" + htmlparser2 "^10.0.0" + parse5 "^7.3.0" + parse5-htmlparser2-tree-adapter "^7.1.0" + parse5-parser-stream "^7.1.2" + undici "^7.12.0" + whatwg-mimetype "^4.0.0" + +chokidar@^3.3.0, chokidar@^3.5.2, chokidar@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b" + integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw== dependencies: anymatch "~3.1.2" braces "~3.0.2" @@ -2023,13 +1728,13 @@ chokidar@^3.3.0, chokidar@^3.4.0, chokidar@^3.5.2: fsevents "~2.3.2" classnames@^2.3.1: - version "2.3.1" - resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.3.1.tgz#dfcfa3891e306ec1dad105d0e88f4417b8535e8e" - integrity sha512-OlQdbZ7gLfGarSqxesMesDa5uz7KFbID8Kpq/SxIoNGDqY8lSYs0D+hhtBXhcdB3rcbXArFr7vlHheLk1voeNA== + version "2.5.1" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.5.1.tgz#ba774c614be0f016da105c858e7159eae8e7687b" + integrity sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow== cliui@^7.0.2: version "7.0.4" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" + resolved "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz" integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== dependencies: string-width "^4.2.0" @@ -2038,72 +1743,64 @@ cliui@^7.0.2: cliui@^8.0.1: version "8.0.1" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + resolved "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz" integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== dependencies: string-width "^4.2.0" strip-ansi "^6.0.1" wrap-ansi "^7.0.0" -color-convert@^1.9.0, color-convert@^1.9.3: +color-convert@^1.9.3: version "1.9.3" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + resolved "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz" integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== dependencies: color-name "1.1.3" color-convert@^2.0.1: version "2.0.1" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + resolved "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz" integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== dependencies: color-name "~1.1.4" color-name@1.1.3: version "1.1.3" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz" integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== color-name@^1.0.0, color-name@~1.1.4: version "1.1.4" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -color-string@^1.6.0: +color-string@^1.6.0, color-string@^1.9.0: version "1.9.1" - resolved "https://registry.yarnpkg.com/color-string/-/color-string-1.9.1.tgz#4467f9146f036f855b764dfb5bf8582bf342c7a4" + resolved "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz" integrity sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg== dependencies: color-name "^1.0.0" simple-swizzle "^0.2.2" -color-string@^1.9.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/color-string/-/color-string-1.9.0.tgz#63b6ebd1bec11999d1df3a79a7569451ac2be8aa" - integrity sha512-9Mrz2AQLefkH1UvASKj6v6hj/7eWgjnT/cVsR8CumieLoT+g900exWeNogqtweI8dxloXN9BDQTYro1oWu/5CQ== - dependencies: - color-name "^1.0.0" - simple-swizzle "^0.2.2" - color@^3.1.3: version "3.2.1" - resolved "https://registry.yarnpkg.com/color/-/color-3.2.1.tgz#3544dc198caf4490c3ecc9a790b54fe9ff45e164" + resolved "https://registry.npmjs.org/color/-/color-3.2.1.tgz" integrity sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA== dependencies: color-convert "^1.9.3" color-string "^1.6.0" color@^4.0.1: - version "4.2.1" - resolved "https://registry.yarnpkg.com/color/-/color-4.2.1.tgz#498aee5fce7fc982606c8875cab080ac0547c884" - integrity sha512-MFJr0uY4RvTQUKvPq7dh9grVOTYSFeXja2mBXioCGjnjJoXrAp9jJ1NQTDR73c9nwBSAQiNKloKl5zq9WB9UPw== + version "4.2.3" + resolved "https://registry.npmjs.org/color/-/color-4.2.3.tgz" + integrity sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A== dependencies: color-convert "^2.0.1" color-string "^1.9.0" colorspace@1.1.x: version "1.1.4" - resolved "https://registry.yarnpkg.com/colorspace/-/colorspace-1.1.4.tgz#8d442d1186152f60453bf8070cd66eb364e59243" + resolved "https://registry.npmjs.org/colorspace/-/colorspace-1.1.4.tgz" integrity sha512-BgvKJiuVu1igBUF2kEjRCZXol6wiiGbY5ipL/oVPwm0BL9sIpMIzM8IK7vwuxIIzOXMV3Ey5w+vxhm0rR/TN8w== dependencies: color "^3.1.3" @@ -2111,82 +1808,71 @@ colorspace@1.1.x: combined-stream@^1.0.8: version "1.0.8" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + resolved "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz" integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== dependencies: delayed-stream "~1.0.0" commander@2: version "2.20.3" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== -commander@^4.0.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068" - integrity sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA== +commander@^6.2.0: + version "6.2.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-6.2.1.tgz#0792eb682dfbc325999bb2b84fddddba110ac73c" + integrity sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA== commander@^8.0.0: version "8.3.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66" + resolved "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz" integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww== compute-scroll-into-view@^1.0.17: - version "1.0.17" - resolved "https://registry.yarnpkg.com/compute-scroll-into-view/-/compute-scroll-into-view-1.0.17.tgz#6a88f18acd9d42e9cf4baa6bec7e0522607ab7ab" - integrity sha512-j4dx+Fb0URmzbwwMUrhqWM2BEWHdFGx+qZ9qqASHRPqvTYdqvWnHg0H1hIbcyLnvgnoNAVMlwkepyqM3DaIFUg== + version "1.0.20" + resolved "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-1.0.20.tgz" + integrity sha512-UCB0ioiyj8CRjtrvaceBLqqhZCVP+1B8+NWQhmdsm0VXOJtobBCf1dBQmebCCo34qZmUwZfIH2MZLqNHazrfjg== concat-map@0.0.1: version "0.0.1" - resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz" integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= -convert-source-map@^1.1.0, convert-source-map@^1.5.0: +convert-source-map@^1.5.0: version "1.8.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.8.0.tgz#f3373c32d21b4d780dd8004514684fb791ca4369" + resolved "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz" integrity sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA== dependencies: safe-buffer "~5.1.1" convert-source-map@^2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" + resolved "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz" integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg== copy-to-clipboard@^3.3.1: version "3.3.1" - resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.3.1.tgz#115aa1a9998ffab6196f93076ad6da3b913662ae" + resolved "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.3.1.tgz" integrity sha512-i13qo6kIHTTpCm8/Wup+0b1mVWETvu2kIMzKoK8FpkLkFxlt0znUAHcMzox+T8sPlqtZXq3CulEjQHsYiGFJUw== dependencies: toggle-selection "^1.0.6" -core-js-compat@^3.20.2, core-js-compat@^3.21.0, core-js-compat@^3.38.0, core-js-compat@^3.38.1: - version "3.40.0" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.40.0.tgz#7485912a5a4a4315c2fdb2cbdc623e6881c88b38" - integrity sha512-0XEDpr5y5mijvw8Lbc6E5AkjrHfp7eEoPlu36SWeAbcL8fn1G1ANe8DBlo2XoNN89oVpxWwOjYIPVzR4ZvsKCQ== +core-js-compat@^3.21.0, core-js-compat@^3.43.0: + version "3.47.0" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.47.0.tgz#698224bbdbb6f2e3f39decdda4147b161e3772a3" + integrity sha512-IGfuznZ/n7Kp9+nypamBhvwdwLsW6KC8IOaURw2doAK5e98AG3acVLdh0woOnEqCfUtS+Vu882JE4k/DAm3ItQ== dependencies: - browserslist "^4.24.3" + browserslist "^4.28.0" core-js@^3.14.0: - version "3.21.0" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.21.0.tgz#f479dbfc3dffb035a0827602dd056839a774aa71" - integrity sha512-YUdI3fFu4TF/2WykQ2xzSiTQdldLB4KVuL9WeAy5XONZYt5Cun/fpQvctoKbCgvPhmzADeesTk/j2Rdx77AcKQ== - -cosmiconfig@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" - integrity sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg== - dependencies: - "@types/parse-json" "^4.0.0" - import-fresh "^3.1.0" - parse-json "^5.0.0" - path-type "^4.0.0" - yaml "^1.7.2" + version "3.47.0" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.47.0.tgz#436ef07650e191afeb84c24481b298bd60eb4a17" + integrity sha512-c3Q2VVkGAUyupsjRnaNX6u8Dq2vAdzm9iuPj5FW0fRxzlxgq9Q39MDq10IvmQSpLgHQNyQzQmOo6bgGHmH3NNg== -cosmiconfig@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.0.1.tgz#714d756522cace867867ccb4474c5d01bbae5d6d" - integrity sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ== +cosmiconfig@^7.0.0, cosmiconfig@^7.0.1: + version "7.1.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.1.0.tgz#1443b9afa596b670082ea46cbd8f6a62b84635f6" + integrity sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA== dependencies: "@types/parse-json" "^4.0.0" import-fresh "^3.2.1" @@ -2196,41 +1882,40 @@ cosmiconfig@^7.0.1: create-require@^1.1.0: version "1.1.1" - resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" + resolved "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz" integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== crypto@^1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/crypto/-/crypto-1.0.1.tgz#2af1b7cad8175d24c8a1b0778255794a21803037" + resolved "https://registry.npmjs.org/crypto/-/crypto-1.0.1.tgz" integrity sha512-VxBKmeNcqQdiUQUW2Tzq0t377b54N2bMtXO/qiLa+6eRRmmC4qT3D4OnTGoT/U6O9aklQ/jTwbOtRMTTY8G0Ig== css-color-names@^0.0.4: version "0.0.4" - resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" - integrity sha1-gIrcLnnPhHOAabZGyyDsJ762KeA= + resolved "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz" + integrity sha512-zj5D7X1U2h2zsXOAM8EyUREBnnts6H+Jm+d1M2DbiQQcUtnqgQsMrdo8JW9R80YFUmIdBZeMu5wvYM7hcgWP/Q== -css-in-js-utils@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/css-in-js-utils/-/css-in-js-utils-2.0.1.tgz#3b472b398787291b47cfe3e44fecfdd9e914ba99" - integrity sha512-PJF0SpJT+WdbVVt0AOYp9C8GnuruRlL/UFW7932nLWmFLQTaWEzTBQEx7/hn4BuV+WON75iAViSUJLiU3PKbpA== +css-in-js-utils@^3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/css-in-js-utils/-/css-in-js-utils-3.1.0.tgz" + integrity sha512-fJAcud6B3rRu+KHYk+Bwf+WFL2MDCJJ1XG9x137tJQ0xYxor7XziQtuGFbWNdqrvF4Tk26O3H73nfVqXt/fW1A== dependencies: - hyphenate-style-name "^1.0.2" - isobject "^3.0.1" + hyphenate-style-name "^1.0.3" -css-select@^4.1.3: - version "4.2.1" - resolved "https://registry.yarnpkg.com/css-select/-/css-select-4.2.1.tgz#9e665d6ae4c7f9d65dbe69d0316e3221fb274cdd" - integrity sha512-/aUslKhzkTNCQUB2qTX84lVmfia9NyjP3WpDGtj/WxhwBzWBYUV3DgUpurHTme8UTPcPlAD1DJ+b0nN/t50zDQ== +css-select@^5.1.0: + version "5.2.2" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-5.2.2.tgz#01b6e8d163637bb2dd6c982ca4ed65863682786e" + integrity sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw== dependencies: boolbase "^1.0.0" - css-what "^5.1.0" - domhandler "^4.3.0" - domutils "^2.8.0" + css-what "^6.1.0" + domhandler "^5.0.2" + domutils "^3.0.1" nth-check "^2.0.1" css-tree@^1.1.2: version "1.1.3" - resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.1.3.tgz#eb4870fb6fd7707327ec95c2ff2ab09b5e8db91d" + resolved "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz" integrity sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q== dependencies: mdn-data "2.0.14" @@ -2238,142 +1923,156 @@ css-tree@^1.1.2: css-unit-converter@^1.1.1: version "1.1.2" - resolved "https://registry.yarnpkg.com/css-unit-converter/-/css-unit-converter-1.1.2.tgz#4c77f5a1954e6dbff60695ecb214e3270436ab21" + resolved "https://registry.npmjs.org/css-unit-converter/-/css-unit-converter-1.1.2.tgz" integrity sha512-IiJwMC8rdZE0+xiEZHeru6YoONC4rfPMqGm2W85jMIbkFvv5nFTwJVFHam2eFrN6txmoUYFAFXiv8ICVeTO0MA== -css-what@^5.0.1, css-what@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/css-what/-/css-what-5.1.0.tgz#3f7b707aadf633baf62c2ceb8579b545bb40f7fe" - integrity sha512-arSMRWIIFY0hV8pIxZMEfmMI47Wj3R/aWpZDDxWYCPEiOMv6tfOrnpDtgxBYPEQD4V0Y/958+1TdC3iWTFcUPw== +css-what@^6.1.0: + version "6.2.2" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-6.2.2.tgz#cdcc8f9b6977719fdfbd1de7aec24abf756b9dea" + integrity sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA== cssesc@^3.0.0: version "3.0.0" - resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" + resolved "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz" integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg== -csstype@^3.0.2, csstype@^3.0.6: - version "3.0.10" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.10.tgz#2ad3a7bed70f35b965707c092e5f30b327c290e5" - integrity sha512-2u44ZG2OcNUO9HDp/Jl8C07x6pU/eTR3ncV91SiK3dhG9TWvRVsCoJw14Ckx5DgWkzGA3waZWO3d7pgqpUI/XA== +csstype@^3.0.2, csstype@^3.1.2: + version "3.2.3" + resolved "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz" + integrity sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ== "d3-array@2.5.0 - 3": version "3.1.1" - resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.1.1.tgz#7797eb53ead6b9083c75a45a681e93fc41bc468c" + resolved "https://registry.npmjs.org/d3-array/-/d3-array-3.1.1.tgz" integrity sha512-33qQ+ZoZlli19IFiQx4QEpf2CBEayMRzhlisJHSCsSUbDXv6ZishqS1x7uFVClKG4Wr7rZVHvaAttoLow6GqdQ== dependencies: internmap "1 - 2" d3-geo@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/d3-geo/-/d3-geo-3.0.1.tgz#4f92362fd8685d93e3b1fae0fd97dc8980b1ed7e" - integrity sha512-Wt23xBych5tSy9IYAM1FR2rWIBFWa52B/oF/GYe5zbdHrg08FU8+BuI6X4PvTwPDdqdAdq04fuWJpELtsaEjeA== + version "3.1.1" + resolved "https://registry.yarnpkg.com/d3-geo/-/d3-geo-3.1.1.tgz#6027cf51246f9b2ebd64f99e01dc7c3364033a4d" + integrity sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q== dependencies: d3-array "2.5.0 - 3" debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: version "4.4.0" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a" + resolved "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz" integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA== dependencies: ms "^2.1.3" +debug@^4.4.1: + version "4.4.3" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.3.tgz#c6ae432d9bd9662582fce08709b038c58e9e3d6a" + integrity sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA== + dependencies: + ms "^2.1.3" + defined@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693" - integrity sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM= + version "1.0.1" + resolved "https://registry.npmjs.org/defined/-/defined-1.0.1.tgz" + integrity sha512-hsBd2qSVCRE+5PmNdHt1uzyrFu5d3RwmFDKzyNZMFq/EwDNJF7Ee5+D5oEKF0hU6LhtoUF1macFvOe4AskQC1Q== delayed-stream@~1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + resolved "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz" integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== dependency-graph@^0.9.0: version "0.9.0" - resolved "https://registry.yarnpkg.com/dependency-graph/-/dependency-graph-0.9.0.tgz#11aed7e203bc8b00f48356d92db27b265c445318" + resolved "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.9.0.tgz" integrity sha512-9YLIBURXj4DJMFALxXw9K3Y3rwb5Fk0X5/8ipCzaN84+gKxoHK43tVKRNakCQbiEx07E8Uwhuq21BpUagFhZ8w== detective@^5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/detective/-/detective-5.2.0.tgz#feb2a77e85b904ecdea459ad897cc90a99bd2a7b" - integrity sha512-6SsIx+nUUbuK0EthKjv0zrdnajCCXVYGmbYYiYjFVpzcjwEs/JMDZ8tPRG29J/HhN56t3GJp2cGSWDRjjot8Pg== + version "5.2.1" + resolved "https://registry.npmjs.org/detective/-/detective-5.2.1.tgz" + integrity sha512-v9XE1zRnz1wRtgurGu0Bs8uHKFSTdteYZNbIPFVhUZ39L/S79ppMpdmVOZAnoz1jfEFodc48n6MX483Xo3t1yw== dependencies: - acorn-node "^1.6.1" + acorn-node "^1.8.2" defined "^1.0.0" - minimist "^1.1.1" + minimist "^1.2.6" didyoumean@^1.2.2: version "1.2.2" - resolved "https://registry.yarnpkg.com/didyoumean/-/didyoumean-1.2.2.tgz#989346ffe9e839b4555ecf5666edea0d3e8ad037" + resolved "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz" integrity sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw== diff@^4.0.1: version "4.0.2" - resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" + resolved "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== dir-glob@^3.0.1: version "3.0.1" - resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz" integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== dependencies: path-type "^4.0.0" dlv@^1.1.3: version "1.1.3" - resolved "https://registry.yarnpkg.com/dlv/-/dlv-1.1.3.tgz#5c198a8a11453596e751494d49874bc7732f2e79" + resolved "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz" integrity sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA== -dom-serializer@^1.0.1, dom-serializer@^1.3.2: - version "1.3.2" - resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-1.3.2.tgz#6206437d32ceefaec7161803230c7a20bc1b4d91" - integrity sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig== +dom-serializer@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-2.0.0.tgz#e41b802e1eedf9f6cae183ce5e622d789d7d8e53" + integrity sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg== dependencies: - domelementtype "^2.0.1" - domhandler "^4.2.0" - entities "^2.0.0" + domelementtype "^2.3.0" + domhandler "^5.0.2" + entities "^4.2.0" -domelementtype@^2.0.1, domelementtype@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.2.0.tgz#9a0b6c2782ed6a1c7323d42267183df9bd8b1d57" - integrity sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A== +domelementtype@^2.2.0, domelementtype@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d" + integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw== -domhandler@^4.0.0, domhandler@^4.2.0, domhandler@^4.3.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.3.0.tgz#16c658c626cf966967e306f966b431f77d4a5626" - integrity sha512-fC0aXNQXqKSFTr2wDNZDhsEYjCiYsDWl3D01kwt25hm1YIPyDGHvvi3rw+PLqHAl/m71MaiF7d5zvBr0p5UB2g== +domhandler@^4.2.0: + version "4.3.1" + resolved "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz" + integrity sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ== dependencies: domelementtype "^2.2.0" -domutils@^2.5.2, domutils@^2.7.0, domutils@^2.8.0: - version "2.8.0" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.8.0.tgz#4437def5db6e2d1f5d6ee859bd95ca7d02048135" - integrity sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A== +domhandler@^5.0.2, domhandler@^5.0.3: + version "5.0.3" + resolved "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz" + integrity sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w== dependencies: - dom-serializer "^1.0.1" - domelementtype "^2.2.0" - domhandler "^4.2.0" + domelementtype "^2.3.0" + +domutils@^3.0.1, domutils@^3.2.1, domutils@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.2.2.tgz#edbfe2b668b0c1d97c24baf0f1062b132221bc78" + integrity sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw== + dependencies: + dom-serializer "^2.0.0" + domelementtype "^2.3.0" + domhandler "^5.0.3" dotenv-defaults@^2.0.2: version "2.0.2" - resolved "https://registry.yarnpkg.com/dotenv-defaults/-/dotenv-defaults-2.0.2.tgz#6b3ec2e4319aafb70940abda72d3856770ee77ac" + resolved "https://registry.npmjs.org/dotenv-defaults/-/dotenv-defaults-2.0.2.tgz" integrity sha512-iOIzovWfsUHU91L5i8bJce3NYK5JXeAwH50Jh6+ARUdLiiGlYWfGw6UkzsYqaXZH/hjE/eCd/PlfM/qqyK0AMg== dependencies: dotenv "^8.2.0" dotenv@^16.4.5: version "16.4.5" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.5.tgz#cdd3b3b604cb327e286b4762e13502f717cb099f" + resolved "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz" integrity sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg== dotenv@^8.2.0: version "8.6.0" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.6.0.tgz#061af664d19f7f4d8fc6e4ff9b584ce237adcb8b" + resolved "https://registry.npmjs.org/dotenv/-/dotenv-8.6.0.tgz" integrity sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g== downshift@^6.1.3: - version "6.1.7" - resolved "https://registry.yarnpkg.com/downshift/-/downshift-6.1.7.tgz#fdb4c4e4f1d11587985cd76e21e8b4b3fa72e44c" - integrity sha512-cVprZg/9Lvj/uhYRxELzlu1aezRcgPWBjTvspiGTVEU64gF5pRdSRKFVLcxqsZC637cLAGMbL40JavEfWnqgNg== + version "6.1.12" + resolved "https://registry.npmjs.org/downshift/-/downshift-6.1.12.tgz" + integrity sha512-7XB/iaSJVS4T8wGFT3WRXmSF1UlBHAA40DshZtkrIscIN+VC+Lh363skLxFTvJwtNgHxAMDGEHT4xsyQFWL+UA== dependencies: "@babel/runtime" "^7.14.8" compute-scroll-into-view "^1.0.17" @@ -2383,67 +2082,80 @@ downshift@^6.1.3: dunder-proto@^1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + resolved "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz" integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== dependencies: call-bind-apply-helpers "^1.0.1" es-errors "^1.3.0" gopd "^1.2.0" -electron-to-chromium@^1.5.73: - version "1.5.94" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.94.tgz#808f294794af24f9edc63a6e58fc625f718b60a6" - integrity sha512-v+oaMuy6AgwZ6Hi2u5UgcM3wxzeFscBTsZBQL2FoDTx/T6k1XEQKz++8fe1VlQ3zjXB6hcvy5JPb5ZSkmVtdIQ== +electron-to-chromium@^1.5.263: + version "1.5.267" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz#5d84f2df8cdb6bfe7e873706bb21bd4bfb574dc7" + integrity sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw== emoji-regex@^8.0.0: version "8.0.0" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + resolved "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== enabled@2.0.x: version "2.0.0" - resolved "https://registry.yarnpkg.com/enabled/-/enabled-2.0.0.tgz#f9dd92ec2d6f4bbc0d5d1e64e21d61cd4665e7c2" + resolved "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz" integrity sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ== -entities@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" - integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== +encoding-sniffer@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/encoding-sniffer/-/encoding-sniffer-0.2.1.tgz#396ec97ac22ce5a037ba44af1992ac9d46a7b819" + integrity sha512-5gvq20T6vfpekVtqrYQsSCFZ1wEg5+wW0/QaZMWkFr6BqD3NfKs0rLCx4rrVlSWJeZb5NBJgVLswK/w2MWU+Gw== + dependencies: + iconv-lite "^0.6.3" + whatwg-encoding "^3.1.1" + +entities@^4.2.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" + integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== + +entities@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/entities/-/entities-6.0.1.tgz#c28c34a43379ca7f61d074130b2f5f7020a30694" + integrity sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g== error-ex@^1.3.1: version "1.3.2" - resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + resolved "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz" integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== dependencies: is-arrayish "^0.2.1" error-stack-parser@^2.0.6: - version "2.0.7" - resolved "https://registry.yarnpkg.com/error-stack-parser/-/error-stack-parser-2.0.7.tgz#b0c6e2ce27d0495cf78ad98715e0cad1219abb57" - integrity sha512-chLOW0ZGRf4s8raLrDxa5sdkvPec5YdvwbFnqJme4rk0rFajP8mPtrDL1+I+CwrQDCjswDA5sREX7jYQDQs9vA== + version "2.1.4" + resolved "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz" + integrity sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ== dependencies: - stackframe "^1.1.1" + stackframe "^1.3.4" es-define-property@^1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + resolved "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz" integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== es-errors@^1.3.0: version "1.3.0" - resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + resolved "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz" integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: version "1.1.1" - resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + resolved "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz" integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== dependencies: es-errors "^1.3.0" es-set-tostringtag@^2.1.0: version "2.1.0" - resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + resolved "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz" integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== dependencies: es-errors "^1.3.0" @@ -2453,87 +2165,82 @@ es-set-tostringtag@^2.1.0: escalade@^3.1.1, escalade@^3.2.0: version "3.2.0" - resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5" + resolved "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz" integrity sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA== -escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== - escape-string-regexp@^4.0.0: version "4.0.0" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz" integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== esutils@^2.0.2: version "2.0.3" - resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + resolved "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz" integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== fast-deep-equal@^3.1.3: version "3.1.3" - resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + resolved "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz" integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== fast-glob@^3.2.7, fast-glob@^3.2.9: - version "3.2.11" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.11.tgz#a1172ad95ceb8a16e20caa5c5e56480e5129c1d9" - integrity sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew== + version "3.3.3" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" + integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg== dependencies: "@nodelib/fs.stat" "^2.0.2" "@nodelib/fs.walk" "^1.2.3" glob-parent "^5.1.2" merge2 "^1.3.0" - micromatch "^4.0.4" + micromatch "^4.0.8" fast-shallow-equal@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/fast-shallow-equal/-/fast-shallow-equal-1.0.0.tgz#d4dcaf6472440dcefa6f88b98e3251e27f25628b" + resolved "https://registry.npmjs.org/fast-shallow-equal/-/fast-shallow-equal-1.0.0.tgz" integrity sha512-HPtaa38cPgWvaCFmRNhlc6NG7pv6NUHqjPgVAkWGoB9mQMwYB27/K0CvOM5Czy+qpT3e8XJ6Q4aPAnzpNpzNaw== fastest-stable-stringify@^2.0.2: version "2.0.2" - resolved "https://registry.yarnpkg.com/fastest-stable-stringify/-/fastest-stable-stringify-2.0.2.tgz#3757a6774f6ec8de40c4e86ec28ea02417214c76" + resolved "https://registry.npmjs.org/fastest-stable-stringify/-/fastest-stable-stringify-2.0.2.tgz" integrity sha512-bijHueCGd0LqqNK9b5oCMHc0MluJAx0cwqASgbWMvkO01lCYgIhacVRLcaDz3QnyYIRNJRDwMb41VuT6pHJ91Q== fastq@^1.6.0: - version "1.13.0" - resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.13.0.tgz#616760f88a7526bdfc596b7cab8c18938c36b98c" - integrity sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw== + version "1.19.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.19.1.tgz#d50eaba803c8846a883c16492821ebcd2cda55f5" + integrity sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ== dependencies: reusify "^1.0.4" fecha@^4.2.0: version "4.2.3" - resolved "https://registry.yarnpkg.com/fecha/-/fecha-4.2.3.tgz#4d9ccdbc61e8629b259fdca67e65891448d569fd" + resolved "https://registry.npmjs.org/fecha/-/fecha-4.2.3.tgz" integrity sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw== fill-range@^7.1.1: version "7.1.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + resolved "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz" integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" find-root@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/find-root/-/find-root-1.1.0.tgz#abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4" + resolved "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz" integrity sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng== fn.name@1.x.x: version "1.1.0" - resolved "https://registry.yarnpkg.com/fn.name/-/fn.name-1.1.0.tgz#26cad8017967aea8731bc42961d04a3d5988accc" + resolved "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz" integrity sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw== follow-redirects@^1.15.6: version "1.15.6" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b" + resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz" integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA== form-data@^4.0.4: version "4.0.4" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4" + resolved "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz" integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow== dependencies: asynckit "^0.4.0" @@ -2542,15 +2249,15 @@ form-data@^4.0.4: hasown "^2.0.2" mime-types "^2.1.12" -fraction.js@^4.1.2: - version "4.1.3" - resolved "https://registry.yarnpkg.com/fraction.js/-/fraction.js-4.1.3.tgz#be65b0f20762ef27e1e793860bc2dfb716e99e65" - integrity sha512-pUHWWt6vHzZZiQJcM6S/0PXfS+g6FM4BF5rj9wZyreivhQPdsh5PpE25VtSNxq80wHS5RfY51Ii+8Z0Zl/pmzg== +fraction.js@^5.3.4: + version "5.3.4" + resolved "https://registry.yarnpkg.com/fraction.js/-/fraction.js-5.3.4.tgz#8c0fcc6a9908262df4ed197427bdeef563e0699a" + integrity sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ== fs-extra@^10.0.0: - version "10.0.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-10.0.0.tgz#9ff61b655dde53fb34a82df84bb214ce802e17c1" - integrity sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ== + version "10.1.0" + resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz" + integrity sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ== dependencies: graceful-fs "^4.2.0" jsonfile "^6.0.1" @@ -2558,7 +2265,7 @@ fs-extra@^10.0.0: fs-extra@^9.0.0: version "9.1.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" + resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz" integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== dependencies: at-least-node "^1.0.0" @@ -2568,42 +2275,42 @@ fs-extra@^9.0.0: fs-readdir-recursive@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz#e32fc030a2ccee44a6b5371308da54be0b397d27" + resolved "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz" integrity sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA== fs.realpath@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + resolved "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz" integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= fs@^0.0.1-security: version "0.0.1-security" - resolved "https://registry.yarnpkg.com/fs/-/fs-0.0.1-security.tgz#8a7bd37186b6dddf3813f23858b57ecaaf5e41d4" + resolved "https://registry.npmjs.org/fs/-/fs-0.0.1-security.tgz" integrity sha1-invTcYa23d84E/I4WLV+yq9eQdQ= fsevents@~2.3.2: version "2.3.2" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + resolved "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz" integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== function-bind@^1.1.2: version "1.1.2" - resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz" integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== gensync@^1.0.0-beta.2: version "1.0.0-beta.2" - resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + resolved "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz" integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== get-caller-file@^2.0.5: version "2.0.5" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + resolved "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== get-intrinsic@^1.2.6: version "1.3.0" - resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + resolved "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz" integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== dependencies: call-bind-apply-helpers "^1.0.2" @@ -2619,7 +2326,7 @@ get-intrinsic@^1.2.6: get-proto@^1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + resolved "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz" integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== dependencies: dunder-proto "^1.0.1" @@ -2627,12 +2334,12 @@ get-proto@^1.0.1: get-stdin@^8.0.0: version "8.0.0" - resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-8.0.0.tgz#cbad6a73feb75f6eeb22ba9e01f89aa28aa97a53" + resolved "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz" integrity sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg== glob-parent@^5.1.2, glob-parent@~5.1.2: version "5.1.2" - resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz" integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== dependencies: is-glob "^4.0.1" @@ -2646,31 +2353,26 @@ glob-parent@^6.0.1: glob-promise@^4.2.0: version "4.2.2" - resolved "https://registry.yarnpkg.com/glob-promise/-/glob-promise-4.2.2.tgz#15f44bcba0e14219cd93af36da6bb905ff007877" + resolved "https://registry.npmjs.org/glob-promise/-/glob-promise-4.2.2.tgz" integrity sha512-xcUzJ8NWN5bktoTIX7eOclO1Npxd/dyVqUJxlLIDasT4C7KZyqlPIwkdJ0Ypiy3p2ZKahTjK4M9uC3sNSfNMzw== dependencies: "@types/glob" "^7.1.3" -glob@^7.0.0, glob@^7.1.7: - version "7.2.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" - integrity sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q== +glob@^7.1.7, glob@^7.2.0: + version "7.2.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" + integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== dependencies: fs.realpath "^1.0.0" inflight "^1.0.4" inherits "2" - minimatch "^3.0.4" + minimatch "^3.1.1" once "^1.3.0" path-is-absolute "^1.0.0" -globals@^11.1.0: - version "11.12.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" - integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== - globby@^11.0.0: version "11.1.0" - resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + resolved "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz" integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== dependencies: array-union "^2.1.0" @@ -2682,86 +2384,93 @@ globby@^11.0.0: gopd@^1.2.0: version "1.2.0" - resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + resolved "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz" integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== graceful-fs@^4.1.6, graceful-fs@^4.2.0: - version "4.2.9" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.9.tgz#041b05df45755e587a24942279b9d113146e1c96" - integrity sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ== - -has-flag@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + version "4.2.11" + resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz" + integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== has-flag@^4.0.0: version "4.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + resolved "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== has-symbols@^1.0.3, has-symbols@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz" integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== has-tostringtag@^1.0.2: version "1.0.2" - resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + resolved "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz" integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== dependencies: has-symbols "^1.0.3" hasown@^2.0.2: version "2.0.2" - resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + resolved "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz" integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== dependencies: function-bind "^1.1.2" hex-color-regex@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/hex-color-regex/-/hex-color-regex-1.1.0.tgz#4c06fccb4602fe2602b3c93df82d7e7dbf1a8a8e" + resolved "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz" integrity sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ== hsl-regex@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/hsl-regex/-/hsl-regex-1.0.0.tgz#d49330c789ed819e276a4c0d272dffa30b18fe6e" - integrity sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4= + resolved "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz" + integrity sha512-M5ezZw4LzXbBKMruP+BNANf0k+19hDQMgpzBIYnya//Al+fjNct9Wf3b1WedLqdEs2hKBvxq/jh+DsHJLj0F9A== hsla-regex@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/hsla-regex/-/hsla-regex-1.0.0.tgz#c1ce7a3168c8c6614033a4b5f7877f3b225f9c38" - integrity sha1-wc56MWjIxmFAM6S194d/OyJfnDg= + resolved "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz" + integrity sha512-7Wn5GMLuHBjZCb2bTmnDOycho0p/7UVaAeqXZGbHrBCl6Yd/xDhQJAXe6Ga9AXJH2I5zY1dEdYw2u1UptnSBJA== + +htm@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/htm/-/htm-3.1.1.tgz#49266582be0dc66ed2235d5ea892307cc0c24b78" + integrity sha512-983Vyg8NwUE7JkZ6NmOqpCZ+sh1bKv2iYTlUkzlWmA5JD2acKoxd4KVxbMmxX/85mtfdnDmTFoNKcg5DGAvxNQ== html-tags@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/html-tags/-/html-tags-3.1.0.tgz#7b5e6f7e665e9fb41f30007ed9e0d41e97fb2140" - integrity sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg== + version "3.3.1" + resolved "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz" + integrity sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ== -htmlparser2@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-6.1.0.tgz#c4d762b6c3371a05dbe65e94ae43a9f845fb8fb7" - integrity sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A== +htmlparser2@^10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-10.0.0.tgz#77ad249037b66bf8cc99c6e286ef73b83aeb621d" + integrity sha512-TwAZM+zE5Tq3lrEHvOlvwgj1XLWQCtaaibSN11Q+gGBAS7Y1uZSWwXXRe4iF6OXnaq1riyQAPFOBtYc77Mxq0g== dependencies: - domelementtype "^2.0.1" - domhandler "^4.0.0" - domutils "^2.5.2" - entities "^2.0.0" + domelementtype "^2.3.0" + domhandler "^5.0.3" + domutils "^3.2.1" + entities "^6.0.0" -hyphenate-style-name@^1.0.2: - version "1.0.4" - resolved "https://registry.yarnpkg.com/hyphenate-style-name/-/hyphenate-style-name-1.0.4.tgz#691879af8e220aea5750e8827db4ef62a54e361d" - integrity sha512-ygGZLjmXfPHj+ZWh6LwbC37l43MhfztxetbFCoYTM2VjkIUpeHgSNn7QIyVFj7YQ1Wl9Cbw5sholVJPzWvC2MQ== +hyphenate-style-name@^1.0.3: + version "1.1.0" + resolved "https://registry.npmjs.org/hyphenate-style-name/-/hyphenate-style-name-1.1.0.tgz" + integrity sha512-WDC/ui2VVRrz3jOVi+XtjqkDjiVjTtFaAGiW37k6b+ohyQ5wYDOGkvCZa8+H0nx3gyvv0+BST9xuOgIyGQ00gw== + +iconv-lite@0.6.3, iconv-lite@^0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" ignore@^5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.0.tgz#6d3bac8fa7fe0d45d9f9be7bac2fc279577e345a" - integrity sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ== + version "5.3.2" + resolved "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz" + integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== -import-fresh@^3.1.0, import-fresh@^3.2.1: +import-fresh@^3.2.1: version "3.3.0" - resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" + resolved "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz" integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== dependencies: parent-module "^1.0.0" @@ -2769,7 +2478,7 @@ import-fresh@^3.1.0, import-fresh@^3.2.1: inflight@^1.0.4: version "1.0.6" - resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + resolved "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz" integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= dependencies: once "^1.3.0" @@ -2777,47 +2486,47 @@ inflight@^1.0.4: inherits@2, inherits@^2.0.3: version "2.0.4" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== inherits@2.0.3: version "2.0.3" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz" integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= -inline-style-prefixer@^6.0.0: - version "6.0.1" - resolved "https://registry.yarnpkg.com/inline-style-prefixer/-/inline-style-prefixer-6.0.1.tgz#c5c0e43ba8831707afc5f5bbfd97edf45c1fa7ae" - integrity sha512-AsqazZ8KcRzJ9YPN1wMH2aNM7lkWQ8tSPrW5uDk1ziYwiAPWSZnUsC7lfZq+BDqLqz0B4Pho5wscWcJzVvRzDQ== +inline-style-prefixer@^7.0.1: + version "7.0.1" + resolved "https://registry.npmjs.org/inline-style-prefixer/-/inline-style-prefixer-7.0.1.tgz" + integrity sha512-lhYo5qNTQp3EvSSp3sRvXMbVQTLrvGV6DycRMJ5dm2BLMiJ30wpXKdDdgX+GmJZ5uQMucwRKHamXSst3Sj/Giw== dependencies: - css-in-js-utils "^2.0.0" + css-in-js-utils "^3.1.0" "internmap@1 - 2": version "2.0.3" - resolved "https://registry.yarnpkg.com/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" + resolved "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz" integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== is-arrayish@^0.2.1: version "0.2.1" - resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + resolved "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz" integrity sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0= is-arrayish@^0.3.1: version "0.3.2" - resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03" + resolved "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz" integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ== is-binary-path@~2.1.0: version "2.1.0" - resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" + resolved "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz" integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== dependencies: binary-extensions "^2.0.0" is-color-stop@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/is-color-stop/-/is-color-stop-1.1.0.tgz#cfff471aee4dd5c9e158598fbe12967b5cdad345" - integrity sha1-z/9HGu5N1cnhWFmPvhKWe1za00U= + resolved "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz" + integrity sha512-H1U8Vz0cfXNujrJzEcvvwMDW9Ra+biSYA3ThdQvAnMLJkEHQXn6bWzLkxHtVYJ+Sdbx0b6finn3jZiaVe7MAHA== dependencies: css-color-names "^0.0.4" hex-color-regex "^1.1.0" @@ -2826,7 +2535,7 @@ is-color-stop@^1.1.0: rgb-regex "^1.0.1" rgba-regex "^1.0.0" -is-core-module@^2.16.0, is-core-module@^2.8.1: +is-core-module@^2.16.1: version "2.16.1" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.16.1.tgz#2a98801a849f43e2add644fbb6bc6229b19a4ef4" integrity sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w== @@ -2835,70 +2544,65 @@ is-core-module@^2.16.0, is-core-module@^2.8.1: is-extglob@^2.1.1: version "2.1.1" - resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + resolved "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz" integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= is-fullwidth-code-point@^3.0.0: version "3.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + resolved "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz" integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1: version "4.0.3" - resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + resolved "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz" integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== dependencies: is-extglob "^2.1.1" is-number@^7.0.0: version "7.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + resolved "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz" integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== is-stream@^2.0.0: version "2.0.1" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" + resolved "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz" integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== -isobject@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" - integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= - js-cookie@^2.2.1: version "2.2.1" - resolved "https://registry.yarnpkg.com/js-cookie/-/js-cookie-2.2.1.tgz#69e106dc5d5806894562902aa5baec3744e9b2b8" + resolved "https://registry.npmjs.org/js-cookie/-/js-cookie-2.2.1.tgz" integrity sha512-HvdH2LzI/EAZcUwA8+0nKNtWHqS+ZmijLA30RwZA0bo7ToCckjK5MkGhjED9KoRcXO6BaGI3I9UIzSA1FKFPOQ== "js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: version "4.0.0" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + resolved "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz" integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== -jsesc@^3.0.2: +jsesc@^3.0.2, jsesc@~3.1.0: version "3.1.0" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-3.1.0.tgz#74d335a234f67ed19907fdadfac7ccf9d409825d" + resolved "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz" integrity sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA== jsesc@~3.0.2: version "3.0.2" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-3.0.2.tgz#bb8b09a6597ba426425f2e4a07245c3d00b9343e" + resolved "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz" integrity sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g== json-parse-even-better-errors@^2.3.0: version "2.3.1" - resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + resolved "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz" integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== json5@^2.2.3: version "2.2.3" - resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" + resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== jsonfile@^6.0.1: - version "6.1.0" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" - integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== + version "6.2.0" + resolved "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz" + integrity sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg== dependencies: universalify "^2.0.0" optionalDependencies: @@ -2906,62 +2610,62 @@ jsonfile@^6.0.1: kuler@^2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/kuler/-/kuler-2.0.0.tgz#e2c570a3800388fb44407e851531c1d670b061b3" + resolved "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz" integrity sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A== -lilconfig@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-2.0.4.tgz#f4507d043d7058b380b6a8f5cb7bcd4b34cee082" - integrity sha512-bfTIN7lEsiooCocSISTWXkiWJkRqtL9wYtYy+8EK3Y41qh3mpwPU0ycTOgjdY9ErwXCc8QyrQp82bdL0Xkm9yA== +lilconfig@^2.0.5: + version "2.1.0" + resolved "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz" + integrity sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ== lines-and-columns@^1.1.6: version "1.2.4" - resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + resolved "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz" integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== lodash.castarray@^4.4.0: version "4.4.0" - resolved "https://registry.yarnpkg.com/lodash.castarray/-/lodash.castarray-4.4.0.tgz#c02513515e309daddd4c24c60cfddcf5976d9115" - integrity sha1-wCUTUV4wna3dTCTGDP3c9ZdtkRU= + resolved "https://registry.npmjs.org/lodash.castarray/-/lodash.castarray-4.4.0.tgz" + integrity sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q== lodash.chunk@^4.2.0: version "4.2.0" - resolved "https://registry.yarnpkg.com/lodash.chunk/-/lodash.chunk-4.2.0.tgz#66e5ce1f76ed27b4303d8c6512e8d1216e8106bc" + resolved "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz" integrity sha1-ZuXOH3btJ7QwPYxlEujRIW6BBrw= lodash.debounce@^4.0.8: version "4.0.8" - resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz" integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== lodash.isplainobject@^4.0.6: version "4.0.6" - resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz#7c526a52d89b45c45cc690b88163be0497f550cb" - integrity sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs= + resolved "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz" + integrity sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA== lodash.merge@^4.6.2: version "4.6.2" - resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + resolved "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz" integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== lodash.topath@^4.5.2: version "4.5.2" - resolved "https://registry.yarnpkg.com/lodash.topath/-/lodash.topath-4.5.2.tgz#3616351f3bba61994a0931989660bd03254fd009" - integrity sha1-NhY1Hzu6YZlKCTGYlmC9AyVP0Ak= + resolved "https://registry.npmjs.org/lodash.topath/-/lodash.topath-4.5.2.tgz" + integrity sha512-1/W4dM+35DwvE/iEd1M9ekewOSTlpFekhw9mhAtrwjVqUr83/ilQiyAvmg4tVX7Unkcfl1KC+i9WdaT4B6aQcg== lodash.uniq@^4.5.0: version "4.5.0" - resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" - integrity sha1-0CJTc662Uq3BvILklFM5qEJ1R3M= + resolved "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz" + integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ== lodash@^4.17.21: version "4.17.21" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== logform@^2.6.0, logform@^2.6.1: version "2.6.1" - resolved "https://registry.yarnpkg.com/logform/-/logform-2.6.1.tgz#71403a7d8cae04b2b734147963236205db9b3df0" + resolved "https://registry.npmjs.org/logform/-/logform-2.6.1.tgz" integrity sha512-CdaO738xRapbKIMVn2m4F6KTj4j7ooJ8POVnebSgKo3KBz5axNXRAL7ZdRjIV6NOr2Uf4vjtRkxrFETOioCqSA== dependencies: "@colors/colors" "1.6.0" @@ -2973,26 +2677,26 @@ logform@^2.6.0, logform@^2.6.1: loglevel@^1.8.1: version "1.9.1" - resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.9.1.tgz#d63976ac9bcd03c7c873116d41c2a85bafff1be7" + resolved "https://registry.npmjs.org/loglevel/-/loglevel-1.9.1.tgz" integrity sha512-hP3I3kCrDIMuRwAwHltphhDM1r8i55H33GgqjXbrisuJhF4kRhW1dNuxsRklp4bXl8DSdLaNLuiL4A/LWRfxvg== -loose-envify@^1.1.0, loose-envify@^1.4.0: +loose-envify@^1.4.0: version "1.4.0" - resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + resolved "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz" integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== dependencies: js-tokens "^3.0.0 || ^4.0.0" lru-cache@^5.1.1: version "5.1.1" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + resolved "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz" integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== dependencies: yallist "^3.0.2" make-dir@^2.1.0: version "2.1.0" - resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5" + resolved "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz" integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA== dependencies: pify "^4.0.1" @@ -3000,27 +2704,27 @@ make-dir@^2.1.0: make-error@^1.1.1: version "1.3.6" - resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" + resolved "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz" integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== math-intrinsics@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + resolved "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz" integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== mdn-data@2.0.14: version "2.0.14" - resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.14.tgz#7113fc4281917d63ce29b43446f701e68c25ba50" + resolved "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz" integrity sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow== merge2@^1.3.0, merge2@^1.4.1: version "1.4.1" - resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + resolved "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== -micromatch@^4.0.4: +micromatch@^4.0.8: version "4.0.8" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + resolved "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz" integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: braces "^3.0.3" @@ -3028,125 +2732,120 @@ micromatch@^4.0.4: mime-db@1.52.0: version "1.52.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz" integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== mime-types@^2.1.12: version "2.1.35" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz" integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== dependencies: mime-db "1.52.0" mini-svg-data-uri@^1.2.3: version "1.4.3" - resolved "https://registry.yarnpkg.com/mini-svg-data-uri/-/mini-svg-data-uri-1.4.3.tgz#43177b2e93766ba338931a3e2a84a3dfd3a222b8" + resolved "https://registry.npmjs.org/mini-svg-data-uri/-/mini-svg-data-uri-1.4.3.tgz" integrity sha512-gSfqpMRC8IxghvMcxzzmMnWpXAChSA+vy4cia33RgerMS8Fex95akUyQZPbxJJmeBGiGmK7n/1OpUX8ksRjIdA== -minimatch@^3.0.4: - version "3.1.1" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.1.tgz#879ad447200773912898b46cd516a7abbb5e50b0" - integrity sha512-reLxBcKUPNBnc/sVtAbxgRVFSegoGeLaSjmphNhcwcolhYLRgtJscn5mRl6YRZNQv40Y7P6JM2YhSIsbL9OB5A== +minimatch@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== dependencies: brace-expansion "^1.1.7" -minimist@^1.1.1: - version "1.2.6" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" - integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== +minimist@^1.2.6: + version "1.2.8" + resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz" + integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== modern-normalize@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/modern-normalize/-/modern-normalize-1.1.0.tgz#da8e80140d9221426bd4f725c6e11283d34f90b7" + resolved "https://registry.npmjs.org/modern-normalize/-/modern-normalize-1.1.0.tgz" integrity sha512-2lMlY1Yc1+CUy0gw4H95uNN7vjbpoED7NNRSBHE25nWfLBdmMzFCsPshlzbxHz+gYMcBEUN8V4pU16prcdPSgA== ms@^2.1.1, ms@^2.1.3: version "2.1.3" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + resolved "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== -nano-css@^5.3.1: - version "5.3.4" - resolved "https://registry.yarnpkg.com/nano-css/-/nano-css-5.3.4.tgz#40af6a83a76f84204f346e8ccaa9169cdae9167b" - integrity sha512-wfcviJB6NOxDIDfr7RFn/GlaN7I/Bhe4d39ZRCJ3xvZX60LVe2qZ+rDqM49nm4YT81gAjzS+ZklhKP/Gnfnubg== +nano-css@^5.6.2: + version "5.6.2" + resolved "https://registry.npmjs.org/nano-css/-/nano-css-5.6.2.tgz" + integrity sha512-+6bHaC8dSDGALM1HJjOHVXpuastdu2xFoZlC77Jh4cg+33Zcgm+Gxd+1xsnpZK14eyHObSp82+ll5y3SX75liw== dependencies: + "@jridgewell/sourcemap-codec" "^1.4.15" css-tree "^1.1.2" - csstype "^3.0.6" + csstype "^3.1.2" fastest-stable-stringify "^2.0.2" - inline-style-prefixer "^6.0.0" - rtl-css-js "^1.14.0" - sourcemap-codec "^1.4.8" + inline-style-prefixer "^7.0.1" + rtl-css-js "^1.16.1" stacktrace-js "^2.0.2" - stylis "^4.0.6" + stylis "^4.3.0" -nanoid@^3.3.6: - version "3.3.8" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" - integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== +nanoid@^3.3.11: + version "3.3.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" + integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== node-emoji@^1.11.0: version "1.11.0" - resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c" + resolved "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz" integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A== dependencies: lodash "^4.17.21" -node-releases@^2.0.19: - version "2.0.19" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.19.tgz#9e445a52950951ec4d177d843af370b411caf314" - integrity sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw== +node-releases@^2.0.27: + version "2.0.27" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.27.tgz#eedca519205cf20f650f61d56b070db111231e4e" + integrity sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA== normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + resolved "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz" integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== -normalize-range@^0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" - integrity sha1-LRDAa9/TEuqXd2laTShDlFa3WUI= - nth-check@^2.0.1: version "2.0.1" - resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-2.0.1.tgz#2efe162f5c3da06a28959fbd3db75dbeea9f0fc2" + resolved "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz" integrity sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w== dependencies: boolbase "^1.0.0" object-assign@^4.1.1: version "4.1.1" - resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz" integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= object-hash@^2.2.0: version "2.2.0" - resolved "https://registry.yarnpkg.com/object-hash/-/object-hash-2.2.0.tgz#5ad518581eefc443bd763472b8ff2e9c2c0d54a5" + resolved "https://registry.npmjs.org/object-hash/-/object-hash-2.2.0.tgz" integrity sha512-gScRMn0bS5fH+IuwyIFgnh9zBdo4DV+6GhygmWM9HyNJSgS0hScp1f5vjtm7oIIOiT9trXrShAkLFSc2IqKNgw== once@^1.3.0: version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + resolved "https://registry.npmjs.org/once/-/once-1.4.0.tgz" integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= dependencies: wrappy "1" one-time@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/one-time/-/one-time-1.0.0.tgz#e06bc174aed214ed58edede573b433bbf827cb45" + resolved "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz" integrity sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g== dependencies: fn.name "1.x.x" parent-module@^1.0.0: version "1.0.1" - resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + resolved "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz" integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== dependencies: callsites "^3.0.0" parse-json@^5.0.0: version "5.2.0" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + resolved "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz" integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== dependencies: "@babel/code-frame" "^7.0.0" @@ -3154,36 +2853,46 @@ parse-json@^5.0.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -parse5-htmlparser2-tree-adapter@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" - integrity sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA== +parse5-htmlparser2-tree-adapter@^7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz#b5a806548ed893a43e24ccb42fbb78069311e81b" + integrity sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g== dependencies: - parse5 "^6.0.1" + domhandler "^5.0.3" + parse5 "^7.0.0" -parse5@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" - integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== +parse5-parser-stream@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/parse5-parser-stream/-/parse5-parser-stream-7.1.2.tgz#d7c20eadc37968d272e2c02660fff92dd27e60e1" + integrity sha512-JyeQc9iwFLn5TbvvqACIF/VXG6abODeB3Fwmv/TGdLk2LfbWkaySGY72at4+Ty7EkPZj854u4CrICqNk2qIbow== + dependencies: + parse5 "^7.0.0" + +parse5@^7.0.0, parse5@^7.3.0: + version "7.3.0" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.3.0.tgz#d7e224fa72399c7a175099f45fc2ad024b05ec05" + integrity sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw== + dependencies: + entities "^6.0.0" path-is-absolute@^1.0.0: version "1.0.1" - resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + resolved "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz" integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= path-parse@^1.0.7: version "1.0.7" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== path-type@^4.0.0: version "4.0.0" - resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + resolved "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz" integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== path@^0.12.7: version "0.12.7" - resolved "https://registry.yarnpkg.com/path/-/path-0.12.7.tgz#d4dc2a506c4ce2197eb481ebfcd5b36c0140b10f" + resolved "https://registry.npmjs.org/path/-/path-0.12.7.tgz" integrity sha1-1NwqUGxM4hl+tIHr/NWzbAFAsQ8= dependencies: process "^0.11.1" @@ -3191,27 +2900,27 @@ path@^0.12.7: picocolors@^1.0.0, picocolors@^1.1.1: version "1.1.1" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + resolved "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: version "2.3.1" - resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== pify@^2.3.0: version "2.3.0" - resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz" integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= pify@^4.0.1: version "4.0.1" - resolved "https://registry.yarnpkg.com/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231" + resolved "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz" integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g== postcss-cli@^8.3.1: version "8.3.1" - resolved "https://registry.yarnpkg.com/postcss-cli/-/postcss-cli-8.3.1.tgz#865dad08300ac59ae9cecb7066780aa81c767a77" + resolved "https://registry.npmjs.org/postcss-cli/-/postcss-cli-8.3.1.tgz" integrity sha512-leHXsQRq89S3JC9zw/tKyiVV2jAhnfQe0J8VI4eQQbUjwIe0XxVqLrR+7UsahF1s9wi4GlqP6SJ8ydf44cgF2Q== dependencies: chalk "^4.0.0" @@ -3228,9 +2937,9 @@ postcss-cli@^8.3.1: yargs "^16.0.0" postcss-import@^14.0.1: - version "14.0.2" - resolved "https://registry.yarnpkg.com/postcss-import/-/postcss-import-14.0.2.tgz#60eff77e6be92e7b67fe469ec797d9424cae1aa1" - integrity sha512-BJ2pVK4KhUyMcqjuKs9RijV5tatNzNa73e/32aBVE/ejYPe37iH+6vAu9WvqUkB5OAYgLHzbSvzHnorybJCm9g== + version "14.1.0" + resolved "https://registry.npmjs.org/postcss-import/-/postcss-import-14.1.0.tgz" + integrity sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw== dependencies: postcss-value-parser "^4.0.0" read-cache "^1.0.0" @@ -3238,92 +2947,92 @@ postcss-import@^14.0.1: postcss-js@^3.0.3: version "3.0.3" - resolved "https://registry.yarnpkg.com/postcss-js/-/postcss-js-3.0.3.tgz#2f0bd370a2e8599d45439f6970403b5873abda33" + resolved "https://registry.npmjs.org/postcss-js/-/postcss-js-3.0.3.tgz" integrity sha512-gWnoWQXKFw65Hk/mi2+WTQTHdPD5UJdDXZmX073EY/B3BWnYjO4F4t0VneTCnCGQ5E5GsCdMkzPaTXwl3r5dJw== dependencies: camelcase-css "^2.0.1" postcss "^8.1.6" postcss-load-config@^3.0.0, postcss-load-config@^3.1.0: - version "3.1.3" - resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-3.1.3.tgz#21935b2c43b9a86e6581a576ca7ee1bde2bd1d23" - integrity sha512-5EYgaM9auHGtO//ljHH+v/aC/TQ5LHXtL7bQajNAUBKUVKiYE8rYpFms7+V26D9FncaGe2zwCoPQsFKb5zF/Hw== + version "3.1.4" + resolved "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-3.1.4.tgz" + integrity sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg== dependencies: - lilconfig "^2.0.4" + lilconfig "^2.0.5" yaml "^1.10.2" postcss-nested@5.0.6: version "5.0.6" - resolved "https://registry.yarnpkg.com/postcss-nested/-/postcss-nested-5.0.6.tgz#466343f7fc8d3d46af3e7dba3fcd47d052a945bc" + resolved "https://registry.npmjs.org/postcss-nested/-/postcss-nested-5.0.6.tgz" integrity sha512-rKqm2Fk0KbA8Vt3AdGN0FB9OBOMDVajMG6ZCf/GoHgdxUJ4sBFp0A/uMIRm+MJUdo33YXEtjqIz8u7DAp8B7DA== dependencies: postcss-selector-parser "^6.0.6" postcss-reporter@^7.0.0: version "7.0.5" - resolved "https://registry.yarnpkg.com/postcss-reporter/-/postcss-reporter-7.0.5.tgz#e55bd0fdf8d17e4f25fb55e9143fcd79349a2ceb" + resolved "https://registry.npmjs.org/postcss-reporter/-/postcss-reporter-7.0.5.tgz" integrity sha512-glWg7VZBilooZGOFPhN9msJ3FQs19Hie7l5a/eE6WglzYqVeH3ong3ShFcp9kDWJT1g2Y/wd59cocf9XxBtkWA== dependencies: picocolors "^1.0.0" thenby "^1.3.4" postcss-selector-parser@^6.0.6: - version "6.0.9" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.9.tgz#ee71c3b9ff63d9cd130838876c13a2ec1a992b2f" - integrity sha512-UO3SgnZOVTwu4kyLR22UQ1xZh086RyNZppb7lLAKBFK8a32ttG5i87Y/P3+2bRSjZNyJ1B7hfFNo273tKe9YxQ== + version "6.1.2" + resolved "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz" + integrity sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg== dependencies: cssesc "^3.0.0" util-deprecate "^1.0.2" postcss-value-parser@^3.3.0: version "3.3.1" - resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz#9ff822547e2893213cf1c30efa51ac5fd1ba8281" + resolved "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz" integrity sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ== postcss-value-parser@^4.0.0, postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0: version "4.2.0" - resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" + resolved "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz" integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== postcss@^8.1.6, postcss@^8.3.5: - version "8.4.31" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.31.tgz#92b451050a9f914da6755af352bdc0192508656d" - integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ== + version "8.5.6" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.6.tgz#2825006615a619b4f62a9e7426cc120b349a8f3c" + integrity sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg== dependencies: - nanoid "^3.3.6" - picocolors "^1.0.0" - source-map-js "^1.0.2" + nanoid "^3.3.11" + picocolors "^1.1.1" + source-map-js "^1.2.1" -preact@^10.0.0: - version "10.6.6" - resolved "https://registry.yarnpkg.com/preact/-/preact-10.6.6.tgz#f1899bc8dab7c0788b858481532cb3b5d764a520" - integrity sha512-dgxpTFV2vs4vizwKohYKkk7g7rmp1wOOcfd4Tz3IB3Wi+ivZzsn/SpeKJhRENSE+n8sUfsAl4S3HiCVT923ABw== +preact@^10.13.2: + version "10.28.0" + resolved "https://registry.yarnpkg.com/preact/-/preact-10.28.0.tgz#a851300df42842797046545e4172a4128d158755" + integrity sha512-rytDAoiXr3+t6OIP3WGlDd0ouCUG1iCWzkcY3++Nreuoi17y6T5i/zRhe6uYfoVcxq6YU+sBtJouuRDsq8vvqA== prettier-plugin-go-template@^0.0.15: version "0.0.15" - resolved "https://registry.yarnpkg.com/prettier-plugin-go-template/-/prettier-plugin-go-template-0.0.15.tgz#474952ed72405e528f70bf9cf3f50938c97d8f86" + resolved "https://registry.npmjs.org/prettier-plugin-go-template/-/prettier-plugin-go-template-0.0.15.tgz" integrity sha512-WqU92E1NokWYNZ9mLE6ijoRg6LtIGdLMePt2C7UBDjXeDH9okcRI3zRqtnWR4s5AloiqyvZ66jNBAa9tmRY5EQ== dependencies: ulid "^2.3.0" prettier@^3.5.3: - version "3.5.3" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.5.3.tgz#4fc2ce0d657e7a02e602549f053b239cb7dfe1b5" - integrity sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw== + version "3.7.4" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.7.4.tgz#d2f8335d4b1cec47e1c8098645411b0c9dff9c0f" + integrity sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA== pretty-hrtime@^1.0.3: version "1.0.3" - resolved "https://registry.yarnpkg.com/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz#b7e3ea42435a4c9b2759d99e0f201eb195802ee1" + resolved "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz" integrity sha1-t+PqQkNaTJsnWdmeDyAesZWALuE= process@^0.11.1: version "0.11.10" - resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + resolved "https://registry.npmjs.org/process/-/process-0.11.10.tgz" integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= prop-types@^15.7.2: version "15.8.1" - resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + resolved "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz" integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== dependencies: loose-envify "^1.4.0" @@ -3332,12 +3041,12 @@ prop-types@^15.7.2: proxy-from-env@^1.1.0: version "1.1.0" - resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2" + resolved "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz" integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== purgecss@^4.0.3, purgecss@^4.1.3: version "4.1.3" - resolved "https://registry.yarnpkg.com/purgecss/-/purgecss-4.1.3.tgz#683f6a133c8c4de7aa82fe2746d1393b214918f7" + resolved "https://registry.npmjs.org/purgecss/-/purgecss-4.1.3.tgz" integrity sha512-99cKy4s+VZoXnPxaoM23e5ABcP851nC2y2GROkkjS8eJaJtlciGavd7iYAw2V84WeBqggZ12l8ef44G99HmTaw== dependencies: commander "^8.0.0" @@ -3347,54 +3056,52 @@ purgecss@^4.0.3, purgecss@^4.1.3: queue-microtask@^1.2.2: version "1.2.3" - resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + resolved "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz" integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== quick-lru@^5.1.1: version "5.1.1" - resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-5.1.1.tgz#366493e6b3e42a3a6885e2e99d18f80fb7a8c932" + resolved "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz" integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA== -react-dom@^17.0.2: - version "17.0.2" - resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-17.0.2.tgz#ecffb6845e3ad8dbfcdc498f0d0a939736502c23" - integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA== +react-dom@^19.2.3: + version "19.2.3" + resolved "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz" + integrity sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg== dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" - scheduler "^0.20.2" + scheduler "^0.27.0" react-is@^16.13.1: version "16.13.1" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + resolved "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== react-is@^17.0.2: version "17.0.2" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0" + resolved "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz" integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== -react-spring@^9.2.3: - version "9.4.3" - resolved "https://registry.yarnpkg.com/react-spring/-/react-spring-9.4.3.tgz#3f697d3d6e990dbf7d182619dc75a72a63a302c1" - integrity sha512-GGKAqQQ790JLoA2SAUgdJErFRG8oFR6pzX8jnJoqORVWX5Wo9bJUWs4563f2oN19+yQkVhc77neAkqQ7GCN8Lw== +react-spring@^10.0.3: + version "10.0.3" + resolved "https://registry.npmjs.org/react-spring/-/react-spring-10.0.3.tgz" + integrity sha512-opangIUqCLmkf7+AJZAlM4fLlvzdzWOG/yqAzylKjUoe97Tsjgouz1PsDLu6C9uckvcaMfb4wS/VXiU6dULz5A== dependencies: - "@react-spring/core" "~9.4.3-beta.0" - "@react-spring/konva" "~9.4.3-beta.0" - "@react-spring/native" "~9.4.3-beta.0" - "@react-spring/three" "~9.4.3-beta.0" - "@react-spring/web" "~9.4.3-beta.0" - "@react-spring/zdog" "~9.4.3-beta.0" + "@react-spring/core" "~10.0.3" + "@react-spring/konva" "~10.0.3" + "@react-spring/native" "~10.0.3" + "@react-spring/three" "~10.0.3" + "@react-spring/web" "~10.0.3" + "@react-spring/zdog" "~10.0.3" react-universal-interface@^0.6.2: version "0.6.2" - resolved "https://registry.yarnpkg.com/react-universal-interface/-/react-universal-interface-0.6.2.tgz#5e8d438a01729a4dbbcbeeceb0b86be146fe2b3b" + resolved "https://registry.npmjs.org/react-universal-interface/-/react-universal-interface-0.6.2.tgz" integrity sha512-dg8yXdcQmvgR13RIlZbTRQOoUrDciFVoSBZILwjE2LFISxZZ8loVJKAkuzswl5js8BHda79bIb2b84ehU8IjXw== react-use@^17.2.4: - version "17.3.2" - resolved "https://registry.yarnpkg.com/react-use/-/react-use-17.3.2.tgz#448abf515f47c41c32455024db28167cb6e53be8" - integrity sha512-bj7OD0/1wL03KyWmzFXAFe425zziuTf7q8olwCYBfOeFHY1qfO1FAMjROQLsLZYwG4Rx63xAfb7XAbBrJsZmEw== + version "17.6.0" + resolved "https://registry.yarnpkg.com/react-use/-/react-use-17.6.0.tgz#2101a3a79dc965a25866b21f5d6de4b128488a14" + integrity sha512-OmedEScUMKFfzn1Ir8dBxiLLSOzhKe/dPZwVxcujweSj45aNM7BEGPb9BEVIgVEqEXx6f3/TsXzwIktNgUR02g== dependencies: "@types/js-cookie" "^2.2.6" "@xobotyi/scrollbar-width" "^1.9.5" @@ -3402,7 +3109,7 @@ react-use@^17.2.4: fast-deep-equal "^3.1.3" fast-shallow-equal "^1.0.0" js-cookie "^2.2.1" - nano-css "^5.3.1" + nano-css "^5.6.2" react-universal-interface "^0.6.2" resize-observer-polyfill "^1.5.1" screenfull "^5.1.0" @@ -3411,24 +3118,21 @@ react-use@^17.2.4: ts-easing "^0.2.0" tslib "^2.1.0" -react@^17.0.2: - version "17.0.2" - resolved "https://registry.yarnpkg.com/react/-/react-17.0.2.tgz#d0b5cc516d29eb3eee383f75b62864cfb6800037" - integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA== - dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" +react@^19.2.3: + version "19.2.3" + resolved "https://registry.npmjs.org/react/-/react-19.2.3.tgz" + integrity sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA== read-cache@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/read-cache/-/read-cache-1.0.0.tgz#e664ef31161166c9751cdbe8dbcf86b5fb58f774" + resolved "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz" integrity sha1-5mTvMRYRZsl1HNvo28+GtftY93Q= dependencies: pify "^2.3.0" readable-stream@^3.4.0: version "3.6.0" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" + resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz" integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== dependencies: inherits "^2.0.3" @@ -3437,7 +3141,7 @@ readable-stream@^3.4.0: readable-stream@^3.6.2: version "3.6.2" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" + resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz" integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== dependencies: inherits "^2.0.3" @@ -3446,14 +3150,14 @@ readable-stream@^3.6.2: readdirp@~3.6.0: version "3.6.0" - resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" + resolved "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz" integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== dependencies: picomatch "^2.2.1" reduce-css-calc@^2.1.8: version "2.1.8" - resolved "https://registry.yarnpkg.com/reduce-css-calc/-/reduce-css-calc-2.1.8.tgz#7ef8761a28d614980dc0c982f772c93f7a99de03" + resolved "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-2.1.8.tgz" integrity sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg== dependencies: css-unit-converter "^1.1.1" @@ -3461,36 +3165,31 @@ reduce-css-calc@^2.1.8: regenerate-unicode-properties@^10.2.0: version "10.2.0" - resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz#626e39df8c372338ea9b8028d1f99dc3fd9c3db0" + resolved "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz" integrity sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA== dependencies: regenerate "^1.4.2" +regenerate-unicode-properties@^10.2.2: + version "10.2.2" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.2.tgz#aa113812ba899b630658c7623466be71e1f86f66" + integrity sha512-m03P+zhBeQd1RGnYxrGyDAPpWX/epKirLrp8e3qevZdVkKtnCrjjWczIbYc8+xd6vcTStVlqfycTx1KR4LOr0g== + dependencies: + regenerate "^1.4.2" + regenerate@^1.4.2: version "1.4.2" - resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" + resolved "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz" integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== regenerator-runtime@^0.13.7: version "0.13.11" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" + resolved "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz" integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== -regenerator-runtime@^0.14.0: - version "0.14.1" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" - integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== - -regenerator-transform@^0.15.2: - version "0.15.2" - resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" - integrity sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg== - dependencies: - "@babel/runtime" "^7.8.4" - regexpu-core@^6.2.0: version "6.2.0" - resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-6.2.0.tgz#0e5190d79e542bf294955dccabae04d3c7d53826" + resolved "https://registry.npmjs.org/regexpu-core/-/regexpu-core-6.2.0.tgz" integrity sha512-H66BPQMrv+V16t8xtmq+UC0CBpiTBA60V8ibS1QVReIp8T1z8hwFxqcGzm9K6lgsN7sB5edVH8a+ze6Fqm4weA== dependencies: regenerate "^1.4.2" @@ -3500,148 +3199,160 @@ regexpu-core@^6.2.0: unicode-match-property-ecmascript "^2.0.0" unicode-match-property-value-ecmascript "^2.1.0" +regexpu-core@^6.3.1: + version "6.4.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-6.4.0.tgz#3580ce0c4faedef599eccb146612436b62a176e5" + integrity sha512-0ghuzq67LI9bLXpOX/ISfve/Mq33a4aFRzoQYhnnok1JOFpmE/A2TBGkNVenOGEeSBCjIiWcc6MVOG5HEQv0sA== + dependencies: + regenerate "^1.4.2" + regenerate-unicode-properties "^10.2.2" + regjsgen "^0.8.0" + regjsparser "^0.13.0" + unicode-match-property-ecmascript "^2.0.0" + unicode-match-property-value-ecmascript "^2.2.1" + regjsgen@^0.8.0: version "0.8.0" - resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.8.0.tgz#df23ff26e0c5b300a6470cad160a9d090c3a37ab" + resolved "https://registry.npmjs.org/regjsgen/-/regjsgen-0.8.0.tgz" integrity sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q== regjsparser@^0.12.0: version "0.12.0" - resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.12.0.tgz#0e846df6c6530586429377de56e0475583b088dc" + resolved "https://registry.npmjs.org/regjsparser/-/regjsparser-0.12.0.tgz" integrity sha512-cnE+y8bz4NhMjISKbgeVJtqNbtf5QpjZP+Bslo+UqkIt9QPnX9q095eiRRASJG1/tz6dlNr6Z5NsBiWYokp6EQ== dependencies: jsesc "~3.0.2" +regjsparser@^0.13.0: + version "0.13.0" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.13.0.tgz#01f8351335cf7898d43686bc74d2dd71c847ecc0" + integrity sha512-NZQZdC5wOE/H3UT28fVGL+ikOZcEzfMGk/c3iN9UGxzWHMa1op7274oyiUVrAG4B2EuFhus8SvkaYnhvW92p9Q== + dependencies: + jsesc "~3.1.0" + require-directory@^2.1.1: version "2.1.1" - resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + resolved "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz" integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= resize-observer-polyfill@^1.5.1: version "1.5.1" - resolved "https://registry.yarnpkg.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" + resolved "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz" integrity sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg== resolve-from@^4.0.0: version "4.0.0" - resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + resolved "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz" integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== -resolve@^1.1.7, resolve@^1.12.0, resolve@^1.20.0: - version "1.22.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.0.tgz#5e0b8c67c15df57a89bdbabe603a002f21731198" - integrity sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw== +resolve@^1.1.7, resolve@^1.14.2, resolve@^1.19.0, resolve@^1.20.0, resolve@^1.22.10: + version "1.22.11" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.11.tgz#aad857ce1ffb8bfa9b0b1ac29f1156383f68c262" + integrity sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ== dependencies: - is-core-module "^2.8.1" - path-parse "^1.0.7" - supports-preserve-symlinks-flag "^1.0.0" - -resolve@^1.14.2: - version "1.22.10" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.10.tgz#b663e83ffb09bbf2386944736baae803029b8b39" - integrity sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w== - dependencies: - is-core-module "^2.16.0" + is-core-module "^2.16.1" path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" reusify@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" - integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + version "1.1.0" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.1.0.tgz#0fe13b9522e1473f51b558ee796e08f11f9b489f" + integrity sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw== rgb-regex@^1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/rgb-regex/-/rgb-regex-1.0.1.tgz#c0e0d6882df0e23be254a475e8edd41915feaeb1" - integrity sha1-wODWiC3w4jviVKR16O3UGRX+rrE= + resolved "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz" + integrity sha512-gDK5mkALDFER2YLqH6imYvK6g02gpNGM4ILDZ472EwWfXZnC2ZEpoB2ECXTyOVUKuk/bPJZMzwQPBYICzP+D3w== rgba-regex@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/rgba-regex/-/rgba-regex-1.0.0.tgz#43374e2e2ca0968b0ef1523460b7d730ff22eeb3" - integrity sha1-QzdOLiyglosO8VI0YLfXMP8i7rM= + resolved "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz" + integrity sha512-zgn5OjNQXLUTdq8m17KdaicF6w89TZs8ZU8y0AYENIU6wG8GG6LLm0yLSiPY8DmaYmHdgRW8rnApjoT0fQRfMg== -rtl-css-js@^1.14.0: - version "1.15.0" - resolved "https://registry.yarnpkg.com/rtl-css-js/-/rtl-css-js-1.15.0.tgz#680ed816e570a9ebccba9e1cd0f202c6a8bb2dc0" - integrity sha512-99Cu4wNNIhrI10xxUaABHsdDqzalrSRTie4GeCmbGVuehm4oj+fIy8fTzB+16pmKe8Bv9rl+hxIBez6KxExTew== +rtl-css-js@^1.16.1: + version "1.16.1" + resolved "https://registry.npmjs.org/rtl-css-js/-/rtl-css-js-1.16.1.tgz" + integrity sha512-lRQgou1mu19e+Ya0LsTvKrVJ5TYUbqCVPAiImX3UfLTenarvPUl1QFdvu5Z3PYmHT9RCcwIfbjRQBntExyj3Zg== dependencies: "@babel/runtime" "^7.1.2" run-parallel@^1.1.9: version "1.2.0" - resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + resolved "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz" integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== dependencies: queue-microtask "^1.2.2" safe-buffer@~5.1.1: version "5.1.2" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz" integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== safe-buffer@~5.2.0: version "5.2.1" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== safe-stable-stringify@^2.3.1: version "2.5.0" - resolved "https://registry.yarnpkg.com/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz#4ca2f8e385f2831c432a719b108a3bf7af42a1dd" + resolved "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz" integrity sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA== -scheduler@^0.20.2: - version "0.20.2" - resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.20.2.tgz#4baee39436e34aa93b4874bddcbf0fe8b8b50e91" - integrity sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ== - dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" +"safer-buffer@>= 2.1.2 < 3.0.0": + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +scheduler@^0.27.0: + version "0.27.0" + resolved "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz" + integrity sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q== screenfull@^5.1.0: version "5.2.0" - resolved "https://registry.yarnpkg.com/screenfull/-/screenfull-5.2.0.tgz#6533d524d30621fc1283b9692146f3f13a93d1ba" + resolved "https://registry.npmjs.org/screenfull/-/screenfull-5.2.0.tgz" integrity sha512-9BakfsO2aUQN2K9Fdbj87RJIEZ82Q9IGim7FqM5OsebfoFC6ZHXgDq/KvniuLTPdeM8wY2o6Dj3WQ7KeQCj3cA== semver@^5.6.0: version "5.7.2" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8" + resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz" integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== semver@^6.1.1, semver@^6.1.2, semver@^6.3.0, semver@^6.3.1: version "6.3.1" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== set-harmonic-interval@^1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/set-harmonic-interval/-/set-harmonic-interval-1.0.1.tgz#e1773705539cdfb80ce1c3d99e7f298bb3995249" + resolved "https://registry.npmjs.org/set-harmonic-interval/-/set-harmonic-interval-1.0.1.tgz" integrity sha512-AhICkFV84tBP1aWqPwLZqFvAwqEoVA9kxNMniGEUvzOlm4vLmOFLiTT3UZ6bziJTy4bOVpzWGTfSCbmaayGx8g== simple-swizzle@^0.2.2: version "0.2.2" - resolved "https://registry.yarnpkg.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" + resolved "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz" integrity sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo= dependencies: is-arrayish "^0.3.1" slash@^2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44" + resolved "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz" integrity sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A== slash@^3.0.0: version "3.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + resolved "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz" integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== -source-map-js@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" - integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== +source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== source-map-support@^0.5.17: version "0.5.21" - resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" + resolved "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz" integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== dependencies: buffer-from "^1.0.0" @@ -3649,52 +3360,47 @@ source-map-support@^0.5.17: source-map@0.5.6: version "0.5.6" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412" - integrity sha1-dc449SvwczxafwwRjYEzSiu19BI= + resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz" + integrity sha512-MjZkVp0NHr5+TPihLcadqnlVoGIoWo4IBHptutGh9wI3ttUYvCG26HkSuDi+K6lsZ25syXJXcctwgyVCt//xqA== -source-map@^0.5.0, source-map@^0.5.7: +source-map@^0.5.7: version "0.5.7" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz" integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== source-map@^0.6.0, source-map@^0.6.1: version "0.6.1" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== -sourcemap-codec@^1.4.8: - version "1.4.8" - resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4" - integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA== - stack-generator@^2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/stack-generator/-/stack-generator-2.0.5.tgz#fb00e5b4ee97de603e0773ea78ce944d81596c36" - integrity sha512-/t1ebrbHkrLrDuNMdeAcsvynWgoH/i4o8EGGfX7dEYDoTXOYVAkEpFdtshlvabzc6JlJ8Kf9YdFEoz7JkzGN9Q== + version "2.0.10" + resolved "https://registry.npmjs.org/stack-generator/-/stack-generator-2.0.10.tgz" + integrity sha512-mwnua/hkqM6pF4k8SnmZ2zfETsRUpWXREfA/goT8SLCV4iOFa4bzOX2nDipWAZFPTjLvQB82f5yaodMVhK0yJQ== dependencies: - stackframe "^1.1.1" + stackframe "^1.3.4" stack-trace@0.0.x: version "0.0.10" - resolved "https://registry.yarnpkg.com/stack-trace/-/stack-trace-0.0.10.tgz#547c70b347e8d32b4e108ea1a2a159e5fdde19c0" + resolved "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz" integrity sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg== -stackframe@^1.1.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/stackframe/-/stackframe-1.2.1.tgz#1033a3473ee67f08e2f2fc8eba6aef4f845124e1" - integrity sha512-h88QkzREN/hy8eRdyNhhsO7RSJ5oyTqxxmmn0dzBIMUclZsjpfmrsg81vp8mjjAs2vAZ72nyWxRUwSwmh0e4xg== +stackframe@^1.3.4: + version "1.3.4" + resolved "https://registry.npmjs.org/stackframe/-/stackframe-1.3.4.tgz" + integrity sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw== stacktrace-gps@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/stacktrace-gps/-/stacktrace-gps-3.0.4.tgz#7688dc2fc09ffb3a13165ebe0dbcaf41bcf0c69a" - integrity sha512-qIr8x41yZVSldqdqe6jciXEaSCKw1U8XTXpjDuy0ki/apyTn/r3w9hDAAQOhZdxvsC93H+WwwEu5cq5VemzYeg== + version "3.1.2" + resolved "https://registry.npmjs.org/stacktrace-gps/-/stacktrace-gps-3.1.2.tgz" + integrity sha512-GcUgbO4Jsqqg6RxfyTHFiPxdPqF+3LFmQhm7MgCuYQOYuWyqxo5pwRPz5d/u6/WYJdEnWfK4r+jGbyD8TSggXQ== dependencies: source-map "0.5.6" - stackframe "^1.1.1" + stackframe "^1.3.4" stacktrace-js@^2.0.2: version "2.0.2" - resolved "https://registry.yarnpkg.com/stacktrace-js/-/stacktrace-js-2.0.2.tgz#4ca93ea9f494752d55709a081d400fdaebee897b" + resolved "https://registry.npmjs.org/stacktrace-js/-/stacktrace-js-2.0.2.tgz" integrity sha512-Je5vBeY4S1r/RnLydLl0TBTi3F2qdfWmYsGvtfZgEI+SCprPppaIhQf5nGcal4gI4cGpCV/duLcAzT1np6sQqg== dependencies: error-stack-parser "^2.0.6" @@ -3703,7 +3409,7 @@ stacktrace-js@^2.0.2: string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== dependencies: emoji-regex "^8.0.0" @@ -3712,45 +3418,43 @@ string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: string_decoder@^1.1.1: version "1.3.0" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz" integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== dependencies: safe-buffer "~5.2.0" strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== dependencies: ansi-regex "^5.0.1" -stylis@4.0.13, stylis@^4.0.6: - version "4.0.13" - resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.0.13.tgz#f5db332e376d13cc84ecfe5dace9a2a51d954c91" - integrity sha512-xGPXiFVl4YED9Jh7Euv2V220mriG9u4B2TA6Ybjc1catrstKD2PpIdU3U0RKpkVBC2EhmL/F0sPCr9vrFTNRag== +stylis@4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.2.0.tgz#79daee0208964c8fe695a42fcffcac633a211a51" + integrity sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw== -supports-color@^5.3.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" - integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== - dependencies: - has-flag "^3.0.0" +stylis@^4.3.0: + version "4.3.6" + resolved "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz" + integrity sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ== supports-color@^7.1.0: version "7.2.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + resolved "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== dependencies: has-flag "^4.0.0" supports-preserve-symlinks-flag@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + resolved "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== tailwindcss@^2.2.4: version "2.2.19" - resolved "https://registry.yarnpkg.com/tailwindcss/-/tailwindcss-2.2.19.tgz#540e464832cd462bb9649c1484b0a38315c2653c" + resolved "https://registry.npmjs.org/tailwindcss/-/tailwindcss-2.2.19.tgz" integrity sha512-6Ui7JSVtXadtTUo2NtkBBacobzWiQYVjYW0ZnKaP9S1ZCKQ0w7KVNz+YSDI/j7O7KCMHbOkz94ZMQhbT9pOqjw== dependencies: arg "^5.0.1" @@ -3788,61 +3492,61 @@ tailwindcss@^2.2.4: text-hex@1.0.x: version "1.0.0" - resolved "https://registry.yarnpkg.com/text-hex/-/text-hex-1.0.0.tgz#69dc9c1b17446ee79a92bf5b884bb4b9127506f5" + resolved "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz" integrity sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg== thenby@^1.3.4: version "1.3.4" - resolved "https://registry.yarnpkg.com/thenby/-/thenby-1.3.4.tgz#81581f6e1bb324c6dedeae9bfc28e59b1a2201cc" + resolved "https://registry.npmjs.org/thenby/-/thenby-1.3.4.tgz" integrity sha512-89Gi5raiWA3QZ4b2ePcEwswC3me9JIg+ToSgtE0JWeCynLnLxNr/f9G+xfo9K+Oj4AFdom8YNJjibIARTJmapQ== throttle-debounce@^3.0.1: version "3.0.1" - resolved "https://registry.yarnpkg.com/throttle-debounce/-/throttle-debounce-3.0.1.tgz#32f94d84dfa894f786c9a1f290e7a645b6a19abb" + resolved "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-3.0.1.tgz" integrity sha512-dTEWWNu6JmeVXY0ZYoPuH5cRIwc0MeGbJwah9KUNYSJwommQpCzTySTpEe8Gs1J23aeWEuAobe4Ag7EHVt/LOg== tmp@^0.2.1: - version "0.2.4" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.4.tgz#c6db987a2ccc97f812f17137b36af2b6521b0d13" - integrity sha512-UdiSoX6ypifLmrfQ/XfiawN6hkjSBpCjhKxxZcWlUUmoXLaCKQU0bx4HF/tdDK2uzRuchf1txGvrWBzYREssoQ== + version "0.2.5" + resolved "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz" + integrity sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow== to-regex-range@^5.0.1: version "5.0.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + resolved "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz" integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== dependencies: is-number "^7.0.0" tocbot@^4.12.2: - version "4.18.0" - resolved "https://registry.yarnpkg.com/tocbot/-/tocbot-4.18.0.tgz#d389a8d24a1131e36120c780e30923a8e4da3195" - integrity sha512-QVuj2MV73j2F5brWnIaxdOXez8lVxQqDsK/VTzCQsV79GAessKEJXEYmXL21LcGWr54uZTHBVV8gIygXTbsrbg== + version "4.36.4" + resolved "https://registry.yarnpkg.com/tocbot/-/tocbot-4.36.4.tgz#5f82976a2455e5abff44bdff84c89443dc23180f" + integrity sha512-ffznkKnZ1NdghwR1y8hN6W7kjn4FwcXq32Z1mn35gA7jd8dt2cTVAwL3d0BXXZGPu0Hd0evverUvcYAb/7vn0g== toggle-selection@^1.0.6: version "1.0.6" - resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32" + resolved "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz" integrity sha1-bkWxJj8gF/oKzH2J14sVuL932jI= topojson-client@^3.1.0: version "3.1.0" - resolved "https://registry.yarnpkg.com/topojson-client/-/topojson-client-3.1.0.tgz#22e8b1ed08a2b922feeb4af6f53b6ef09a467b99" + resolved "https://registry.npmjs.org/topojson-client/-/topojson-client-3.1.0.tgz" integrity sha512-605uxS6bcYxGXw9qi62XyrV6Q3xwbndjachmNxu8HWTtVPxZfEJN9fd/SZS1Q54Sn2y0TMyMxFj/cJINqGHrKw== dependencies: commander "2" triple-beam@^1.3.0: version "1.4.1" - resolved "https://registry.yarnpkg.com/triple-beam/-/triple-beam-1.4.1.tgz#6fde70271dc6e5d73ca0c3b24e2d92afb7441984" + resolved "https://registry.npmjs.org/triple-beam/-/triple-beam-1.4.1.tgz" integrity sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg== ts-easing@^0.2.0: version "0.2.0" - resolved "https://registry.yarnpkg.com/ts-easing/-/ts-easing-0.2.0.tgz#c8a8a35025105566588d87dbda05dd7fbfa5a4ec" + resolved "https://registry.npmjs.org/ts-easing/-/ts-easing-0.2.0.tgz" integrity sha512-Z86EW+fFFh/IFB1fqQ3/+7Zpf9t2ebOAxNI/V6Wo7r5gqiqtxmgTlQ1qbqQcjLKYeSHPTsEmvlJUDg/EuL0uHQ== ts-node@^9.1.1: version "9.1.1" - resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-9.1.1.tgz#51a9a450a3e959401bda5f004a72d54b936d376d" + resolved "https://registry.npmjs.org/ts-node/-/ts-node-9.1.1.tgz" integrity sha512-hPlt7ZACERQGf03M253ytLY3dHbGNGrAq9qIHWUY9XHYl1z7wYngSr3OQ5xmui8o2AaxsONxIzjafLUiWBo1Fg== dependencies: arg "^4.1.0" @@ -3852,24 +3556,20 @@ ts-node@^9.1.1: source-map-support "^0.5.17" yn "3.1.1" -tslib@^2.1.0, tslib@^2.2.0, tslib@^2.3.0: - version "2.3.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.3.1.tgz#e8a335add5ceae51aa261d32a490158ef042ef01" - integrity sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw== - -tslib@^2.6.2: +tslib@^2.1.0, tslib@^2.3.0, tslib@^2.6.2: version "2.8.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" + resolved "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz" integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== typescript@^4.1.3: - version "4.5.5" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.5.5.tgz#d8c953832d28924a9e3d37c73d729c846c5896f3" - integrity sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA== + version "4.9.5" + resolved "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz" + integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== "typesense-sync@https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/typesense-sync-v1.1.0.tgz": version "1.1.0" - resolved "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/typesense-sync-v1.1.0.tgz#9e142972e2d6b255b3f90b3a8545c39e2e4b8cb7" + resolved "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/typesense-sync-v1.1.0.tgz" + integrity sha512-w1b1aWpiXHOF/VK9SpzsH1B4yVRcyn0EJGQDEoDL5a6R/iD3y+HvG93UGBKlwYCnjv3E5woxXOgYNIE0MqP2Kg== dependencies: "@babel/runtime" "^7.25.0" crypto "^1.0.1" @@ -3880,34 +3580,39 @@ typescript@^4.1.3: typesense@^1.8.2: version "1.8.2" - resolved "https://registry.yarnpkg.com/typesense/-/typesense-1.8.2.tgz#16341fdd4edab02b33facc14e1d27a6d58dbe0e5" + resolved "https://registry.npmjs.org/typesense/-/typesense-1.8.2.tgz" integrity sha512-aBpePjA99Qvo+OP2pJwMpvga4Jrm1Y2oV5NsrWXBxlqUDNEUCPZBIksPv2Hq0jxQxHhLLyJVbjXjByXsvpCDVA== dependencies: axios "^1.6.0" loglevel "^1.8.1" typesense@^2.0.3: - version "2.0.3" - resolved "https://registry.yarnpkg.com/typesense/-/typesense-2.0.3.tgz#cbce737544186fb338cf90bd2a76b33cb1e3a281" - integrity sha512-fRJjFdDNZn6qF9XzIk+bB8n8cm0fiAx1SGcpLDfNcsGtp8znITfG+SO+l/qk63GCRXZwJGq7wrMDLFUvblJSHA== + version "2.1.0" + resolved "https://registry.npmjs.org/typesense/-/typesense-2.1.0.tgz" + integrity sha512-a/IRTL+dRXlpRDU4UodyGj8hl5xBz3nKihVRd/KfSFAfFPGcpdX6lxIgwdXy3O6VLNNiEsN8YwIsPHQPVT0vNw== dependencies: - axios "^1.7.2" + axios "^1.8.4" loglevel "^1.8.1" tslib "^2.6.2" ulid@^2.3.0: version "2.4.0" - resolved "https://registry.yarnpkg.com/ulid/-/ulid-2.4.0.tgz#9d9ee22e63f4390ee1bcd9ad09fca39d8ae0afed" + resolved "https://registry.npmjs.org/ulid/-/ulid-2.4.0.tgz" integrity sha512-fIRiVTJNcSRmXKPZtGzFQv9WRrZ3M9eoptl/teFJvjOzmpU+/K/JH6HZ8deBfb5vMEpicJcLn7JmvdknlMq7Zg== +undici@^7.12.0: + version "7.16.0" + resolved "https://registry.yarnpkg.com/undici/-/undici-7.16.0.tgz#cb2a1e957726d458b536e3f076bf51f066901c1a" + integrity sha512-QEg3HPMll0o3t2ourKwOeUAZ159Kn9mx5pnzHRQO8+Wixmh88YdZRiIwat0iNzNNXn0yoEtXJqFpyW7eM8BV7g== + unicode-canonical-property-names-ecmascript@^2.0.0: version "2.0.1" - resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz#cb3173fe47ca743e228216e4a3ddc4c84d628cc2" + resolved "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz" integrity sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg== unicode-match-property-ecmascript@^2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz#54fd16e0ecb167cf04cf1f756bdcc92eba7976c3" + resolved "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz" integrity sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q== dependencies: unicode-canonical-property-names-ecmascript "^2.0.0" @@ -3915,42 +3620,59 @@ unicode-match-property-ecmascript@^2.0.0: unicode-match-property-value-ecmascript@^2.1.0: version "2.2.0" - resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz#a0401aee72714598f739b68b104e4fe3a0cb3c71" + resolved "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz" integrity sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg== +unicode-match-property-value-ecmascript@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.1.tgz#65a7adfad8574c219890e219285ce4c64ed67eaa" + integrity sha512-JQ84qTuMg4nVkx8ga4A16a1epI9H6uTXAknqxkGF/aFfRLw1xC/Bp24HNLaZhHSkWd3+84t8iXnp1J0kYcZHhg== + unicode-property-aliases-ecmascript@^2.0.0: version "2.1.0" - resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz#43d41e3be698bd493ef911077c9b131f827e8ccd" + resolved "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz" integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w== universalify@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" - integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== + version "2.0.1" + resolved "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz" + integrity sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw== -update-browserslist-db@^1.1.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.2.tgz#97e9c96ab0ae7bcac08e9ae5151d26e6bc6b5580" - integrity sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg== +update-browserslist-db@^1.2.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz#64d76db58713136acbeb4c49114366cc6cc2e80d" + integrity sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w== dependencies: escalade "^3.2.0" picocolors "^1.1.1" util-deprecate@^1.0.1, util-deprecate@^1.0.2: version "1.0.2" - resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + resolved "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz" integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= util@^0.10.3: version "0.10.4" - resolved "https://registry.yarnpkg.com/util/-/util-0.10.4.tgz#3aa0125bfe668a4672de58857d3ace27ecb76901" + resolved "https://registry.npmjs.org/util/-/util-0.10.4.tgz" integrity sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A== dependencies: inherits "2.0.3" +whatwg-encoding@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz#d0f4ef769905d426e1688f3e34381a99b60b76e5" + integrity sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ== + dependencies: + iconv-lite "0.6.3" + +whatwg-mimetype@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz#bc1bf94a985dc50388d54a9258ac405c3ca2fc0a" + integrity sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg== + winston-transport@^4.7.0: version "4.7.1" - resolved "https://registry.yarnpkg.com/winston-transport/-/winston-transport-4.7.1.tgz#52ff1bcfe452ad89991a0aaff9c3b18e7f392569" + resolved "https://registry.npmjs.org/winston-transport/-/winston-transport-4.7.1.tgz" integrity sha512-wQCXXVgfv/wUPOfb2x0ruxzwkcZfxcktz6JIMUaPLmcNhO4bZTwA/WtDWK74xV3F2dKu8YadrFv0qhwYjVEwhA== dependencies: logform "^2.6.1" @@ -3959,7 +3681,7 @@ winston-transport@^4.7.0: winston@^3.14.2: version "3.14.2" - resolved "https://registry.yarnpkg.com/winston/-/winston-3.14.2.tgz#94ce5fd26d374f563c969d12f0cd9c641065adab" + resolved "https://registry.npmjs.org/winston/-/winston-3.14.2.tgz" integrity sha512-CO8cdpBB2yqzEf8v895L+GNKYJiEq8eKlHU38af3snQBQ+sdAIUepjMSguOIJC7ICbzm0ZI+Af2If4vIJrtmOg== dependencies: "@colors/colors" "^1.6.0" @@ -3976,7 +3698,7 @@ winston@^3.14.2: wrap-ansi@^7.0.0: version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== dependencies: ansi-styles "^4.0.0" @@ -3985,42 +3707,42 @@ wrap-ansi@^7.0.0: wrappy@1: version "1.0.2" - resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + resolved "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz" integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= xtend@^4.0.2: version "4.0.2" - resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" + resolved "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz" integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== y18n@^5.0.5: version "5.0.8" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + resolved "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz" integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== yallist@^3.0.2: version "3.1.1" - resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" + resolved "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz" integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== -yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: +yaml@^1.10.0, yaml@^1.10.2: version "1.10.2" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + resolved "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== yargs-parser@^20.2.2: version "20.2.9" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" + resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz" integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== yargs-parser@^21.1.1: version "21.1.1" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== yargs@^16.0.0: version "16.2.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" + resolved "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz" integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== dependencies: cliui "^7.0.2" @@ -4033,7 +3755,7 @@ yargs@^16.0.0: yargs@^17.7.2: version "17.7.2" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + resolved "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz" integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== dependencies: cliui "^8.0.1" @@ -4046,5 +3768,5 @@ yargs@^17.7.2: yn@3.1.1: version "3.1.1" - resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" + resolved "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz" integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== From 60fa98017d861bf88141926ad97705f13aa65f1f Mon Sep 17 00:00:00 2001 From: Benjamin Dornel Date: Sat, 20 Dec 2025 03:49:57 +0800 Subject: [PATCH 158/227] enhancement(clickhouse sink): Add `ArrowStream` format (#24373) * enhancement(clickhouse sink): Add `ArrowStream` format * chore: add docs * build(deps): use rust_decimal from parent Cargo.toml * refactor: simplify validation for batch_encoding format * chore: remove unnecessary :: prefix * chore: add more robust handling for schema lookup query * refactor: add framework for storing clickhouse types * refactor: separate schema handling and type parsing * chore: simplify error message for bad decimal parsing * refactor: use extract_identifier for clickhouse type to arrow conversion * refactor: set schema to none if provider is used * chore: add beta flag for arrowstream format * chore: remove unnecessary Arc for schema provider * refactor: move schema provider logic to build phase * Use workspace rust_decimal * docs: add how_it_works to Clickhouse sink * chore: fix punctuation for logs * chore: remove redundant example from docs * chore: spelling * chore: update docs * build(deps): remove unnecessary dependencies from sinks-clickhouse * refactor(codecs): store owned Schema in config, wrap in Arc only at serializer * chore: fix spacing issue in docs * Fix cue formatting --------- Co-authored-by: Thomas --- Cargo.lock | 2 + Cargo.toml | 7 +- ...074_clickhouse_arrow_format.enhancement.md | 3 + lib/codecs/Cargo.toml | 3 +- lib/codecs/src/encoding/format/arrow.rs | 56 +- lib/codecs/src/encoding/format/mod.rs | 4 +- lib/codecs/src/encoding/mod.rs | 4 +- src/sinks/clickhouse/arrow/mod.rs | 6 + src/sinks/clickhouse/arrow/parser.rs | 647 ++++++++++++++++++ src/sinks/clickhouse/arrow/schema.rs | 227 ++++++ src/sinks/clickhouse/config.rs | 223 +++++- src/sinks/clickhouse/integration_tests.rs | 137 ++++ src/sinks/clickhouse/mod.rs | 1 + src/sinks/clickhouse/request_builder.rs | 11 +- src/sinks/clickhouse/service.rs | 12 +- .../reference/components/sinks/clickhouse.cue | 86 +++ .../components/sinks/generated/clickhouse.cue | 45 ++ website/cue/reference/urls.cue | 1 + 18 files changed, 1434 insertions(+), 41 deletions(-) create mode 100644 changelog.d/24074_clickhouse_arrow_format.enhancement.md create mode 100644 src/sinks/clickhouse/arrow/mod.rs create mode 100644 src/sinks/clickhouse/arrow/parser.rs create mode 100644 src/sinks/clickhouse/arrow/schema.rs diff --git a/Cargo.lock b/Cargo.lock index 307a4bc90fa1a..6c175377a63c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2595,6 +2595,7 @@ version = "0.1.0" dependencies = [ "apache-avro 0.20.0", "arrow", + "async-trait", "bytes 1.10.1", "chrono", "csv-core", @@ -12515,6 +12516,7 @@ dependencies = [ "arc-swap", "arr_macro", "arrow", + "arrow-schema", "assert_cmd", "async-compression", "async-graphql", diff --git a/Cargo.toml b/Cargo.toml index c49d49d589636..9cc3b90078953 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,7 +179,7 @@ rand = { version = "0.9.2", default-features = false, features = ["small_rng", " rand_distr = { version = "0.5.1", default-features = false } regex = { version = "1.11.2", default-features = false, features = ["std", "perf"] } reqwest = { version = "0.11.26", features = ["json"] } -rust_decimal = { version = "1.33", default-features = false, features = ["std"] } +rust_decimal = { version = "1.37.0", default-features = false, features = ["std"] } semver = { version = "1.0.26", default-features = false, features = ["serde", "std"] } serde = { version = "1.0.219", default-features = false, features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.143", default-features = false, features = ["raw_value", "std"] } @@ -341,6 +341,7 @@ arc-swap = { version = "1.7", default-features = false, optional = true } async-compression = { version = "0.4.27", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } apache-avro = { version = "0.16.0", default-features = false, optional = true } arrow = { version = "56.2.0", default-features = false, features = ["ipc"], optional = true } +arrow-schema = { version = "56.2.0", default-features = false, optional = true } axum = { version = "0.6.20", default-features = false } base64 = { workspace = true, optional = true } bloomy = { version = "1.2.0", default-features = false, optional = true } @@ -583,7 +584,7 @@ enrichment-tables-mmdb = ["dep:maxminddb"] enrichment-tables-memory = ["dep:evmap", "dep:evmap-derive", "dep:thread_local"] # Codecs -codecs-arrow = ["vector-lib/arrow"] +codecs-arrow = ["dep:arrow", "dep:arrow-schema", "vector-lib/arrow"] codecs-opentelemetry = ["vector-lib/opentelemetry"] codecs-syslog = ["vector-lib/syslog"] @@ -851,7 +852,7 @@ sinks-azure_blob = ["dep:azure_core", "dep:azure_identity", "dep:azure_storage", sinks-azure_monitor_logs = [] sinks-blackhole = [] sinks-chronicle = [] -sinks-clickhouse = [] +sinks-clickhouse = ["dep:rust_decimal", "codecs-arrow"] sinks-console = [] sinks-databend = ["dep:databend-client"] sinks-datadog_events = [] diff --git a/changelog.d/24074_clickhouse_arrow_format.enhancement.md b/changelog.d/24074_clickhouse_arrow_format.enhancement.md new file mode 100644 index 0000000000000..5612cbfe21e76 --- /dev/null +++ b/changelog.d/24074_clickhouse_arrow_format.enhancement.md @@ -0,0 +1,3 @@ +The `clickhouse` sink now supports the `arrow_stream` format option, enabling high-performance binary data transfer using Apache Arrow IPC. This provides significantly better performance and smaller payload sizes compared to JSON-based formats. + +authors: benjamin-awd diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 2cb4ae3bbdb35..7a622d52edc2b 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -15,9 +15,10 @@ path = "tests/bin/generate-avro-fixtures.rs" [dependencies] apache-avro = { version = "0.20.0", default-features = false } arrow = { version = "56.2.0", default-features = false, features = ["ipc"] } +async-trait.workspace = true bytes.workspace = true chrono.workspace = true -rust_decimal = { version = "1.37", default-features = false, features = ["std"] } +rust_decimal.workspace = true csv-core = { version = "0.1.12", default-features = false } derivative.workspace = true dyn-clone = { version = "1", default-features = false } diff --git a/lib/codecs/src/encoding/format/arrow.rs b/lib/codecs/src/encoding/format/arrow.rs index db4dc491f4cc3..3c2d3863f1fb2 100644 --- a/lib/codecs/src/encoding/format/arrow.rs +++ b/lib/codecs/src/encoding/format/arrow.rs @@ -16,6 +16,7 @@ use arrow::{ ipc::writer::StreamWriter, record_batch::RecordBatch, }; +use async_trait::async_trait; use bytes::{BufMut, Bytes, BytesMut}; use chrono::{DateTime, Utc}; use rust_decimal::Decimal; @@ -25,6 +26,18 @@ use vector_config::configurable_component; use vector_core::event::{Event, Value}; +/// Provides Arrow schema for encoding. +/// +/// Sinks can implement this trait to provide custom schema fetching logic. +#[async_trait] +pub trait SchemaProvider: Send + Sync + std::fmt::Debug { + /// Fetch the Arrow schema from the data store. + /// + /// This is called during sink configuration build phase to fetch + /// the schema once at startup, rather than at runtime. + async fn get_schema(&self) -> Result; +} + /// Configuration for Arrow IPC stream serialization #[configurable_component] #[derive(Clone, Default)] @@ -32,7 +45,7 @@ pub struct ArrowStreamSerializerConfig { /// The Arrow schema to use for encoding #[serde(skip)] #[configurable(derived)] - pub schema: Option>, + pub schema: Option, /// Allow null values for non-nullable fields in the schema. /// @@ -43,7 +56,7 @@ pub struct ArrowStreamSerializerConfig { /// When disabled (default), missing values for non-nullable fields will cause encoding errors, /// ensuring all required data is present before sending to the sink. #[serde(default)] - #[configurable(metadata(docs::examples = true))] + #[configurable(derived)] pub allow_nullable_fields: bool, } @@ -64,7 +77,7 @@ impl std::fmt::Debug for ArrowStreamSerializerConfig { impl ArrowStreamSerializerConfig { /// Create a new ArrowStreamSerializerConfig with a schema - pub fn new(schema: Arc) -> Self { + pub fn new(schema: arrow::datatypes::Schema) -> Self { Self { schema: Some(schema), allow_nullable_fields: false, @@ -91,26 +104,28 @@ pub struct ArrowStreamSerializer { impl ArrowStreamSerializer { /// Create a new ArrowStreamSerializer with the given configuration pub fn new(config: ArrowStreamSerializerConfig) -> Result { - let mut schema = config.schema.ok_or_else(|| { - vector_common::Error::from( - "Arrow serializer requires a schema. Pass a schema or fetch from provider before creating serializer." - ) - })?; + let schema = config + .schema + .ok_or_else(|| vector_common::Error::from("Arrow serializer requires a schema."))?; // If allow_nullable_fields is enabled, transform the schema once here // instead of on every batch encoding - if config.allow_nullable_fields { - schema = Arc::new(Schema::new_with_metadata( + let schema = if config.allow_nullable_fields { + Schema::new_with_metadata( schema .fields() .iter() .map(|f| Arc::new(make_field_nullable(f))) .collect::>(), schema.metadata().clone(), - )); - } + ) + } else { + schema + }; - Ok(Self { schema }) + Ok(Self { + schema: Arc::new(schema), + }) } } @@ -154,6 +169,13 @@ pub enum ArrowEncodingError { #[snafu(display("Schema must be provided before encoding"))] NoSchemaProvided, + /// Failed to fetch schema from provider + #[snafu(display("Failed to fetch schema from provider: {}", message))] + SchemaFetchError { + /// Error message from the provider + message: String, + }, + /// Unsupported Arrow data type for field #[snafu(display( "Unsupported Arrow data type for field '{}': {:?}", @@ -1500,13 +1522,9 @@ mod tests { let log2 = LogEvent::default(); let events = vec![Event::Log(log1), Event::Log(log2)]; - let schema = Arc::new(Schema::new(vec![Field::new( - "strict_field", - DataType::Int64, - false, - )])); + let schema = Schema::new(vec![Field::new("strict_field", DataType::Int64, false)]); - let mut config = ArrowStreamSerializerConfig::new(Arc::clone(&schema)); + let mut config = ArrowStreamSerializerConfig::new(schema); config.allow_nullable_fields = true; let mut serializer = diff --git a/lib/codecs/src/encoding/format/mod.rs b/lib/codecs/src/encoding/format/mod.rs index 0d21e8b94e25c..ccafb2b969cd7 100644 --- a/lib/codecs/src/encoding/format/mod.rs +++ b/lib/codecs/src/encoding/format/mod.rs @@ -23,7 +23,9 @@ mod text; use std::fmt::Debug; #[cfg(feature = "arrow")] -pub use arrow::{ArrowEncodingError, ArrowStreamSerializer, ArrowStreamSerializerConfig}; +pub use arrow::{ + ArrowEncodingError, ArrowStreamSerializer, ArrowStreamSerializerConfig, SchemaProvider, +}; pub use avro::{AvroSerializer, AvroSerializerConfig, AvroSerializerOptions}; pub use cef::{CefSerializer, CefSerializerConfig}; use dyn_clone::DynClone; diff --git a/lib/codecs/src/encoding/mod.rs b/lib/codecs/src/encoding/mod.rs index 3fe0baafa8b91..c365bc45da4fc 100644 --- a/lib/codecs/src/encoding/mod.rs +++ b/lib/codecs/src/encoding/mod.rs @@ -7,7 +7,9 @@ pub mod framing; pub mod serializer; pub use chunking::{Chunker, Chunking, GelfChunker}; #[cfg(feature = "arrow")] -pub use format::{ArrowEncodingError, ArrowStreamSerializer, ArrowStreamSerializerConfig}; +pub use format::{ + ArrowEncodingError, ArrowStreamSerializer, ArrowStreamSerializerConfig, SchemaProvider, +}; pub use format::{ AvroSerializer, AvroSerializerConfig, AvroSerializerOptions, CefSerializer, CefSerializerConfig, CsvSerializer, CsvSerializerConfig, GelfSerializer, GelfSerializerConfig, diff --git a/src/sinks/clickhouse/arrow/mod.rs b/src/sinks/clickhouse/arrow/mod.rs new file mode 100644 index 0000000000000..9ca393833c0d0 --- /dev/null +++ b/src/sinks/clickhouse/arrow/mod.rs @@ -0,0 +1,6 @@ +//! Schema fetching and Arrow type mapping for ClickHouse tables. + +pub mod parser; +pub mod schema; + +pub use schema::ClickHouseSchemaProvider; diff --git a/src/sinks/clickhouse/arrow/parser.rs b/src/sinks/clickhouse/arrow/parser.rs new file mode 100644 index 0000000000000..a13bd823487b5 --- /dev/null +++ b/src/sinks/clickhouse/arrow/parser.rs @@ -0,0 +1,647 @@ +//! ClickHouse type parsing and conversion to Arrow types. + +use arrow::datatypes::{DataType, TimeUnit}; + +const DECIMAL32_PRECISION: u8 = 9; +const DECIMAL64_PRECISION: u8 = 18; +const DECIMAL128_PRECISION: u8 = 38; +const DECIMAL256_PRECISION: u8 = 76; + +/// Represents a ClickHouse type with its modifiers and nested structure. +#[derive(Debug, PartialEq, Clone)] +pub enum ClickHouseType<'a> { + /// A primitive type like String, Int64, DateTime, etc. + Primitive(&'a str), + /// Nullable(T) + Nullable(Box>), + /// LowCardinality(T) + LowCardinality(Box>), +} + +impl<'a> ClickHouseType<'a> { + /// Returns true if this type or any of its nested types is Nullable. + pub fn is_nullable(&self) -> bool { + match self { + ClickHouseType::Nullable(_) => true, + ClickHouseType::LowCardinality(inner) => inner.is_nullable(), + _ => false, + } + } + + /// Returns the innermost base type, unwrapping all modifiers. + /// For example: LowCardinality(Nullable(String)) -> Primitive("String") + pub fn base_type(&self) -> &ClickHouseType<'a> { + match self { + ClickHouseType::Nullable(inner) | ClickHouseType::LowCardinality(inner) => { + inner.base_type() + } + _ => self, + } + } +} + +/// Parses a ClickHouse type string into a structured representation. +pub fn parse_ch_type(ty: &str) -> ClickHouseType<'_> { + let ty = ty.trim(); + + // Recursively strip and parse type modifiers + if let Some(inner) = strip_wrapper(ty, "Nullable") { + return ClickHouseType::Nullable(Box::new(parse_ch_type(inner))); + } + if let Some(inner) = strip_wrapper(ty, "LowCardinality") { + return ClickHouseType::LowCardinality(Box::new(parse_ch_type(inner))); + } + + // Base case: return primitive type for anything without modifiers + ClickHouseType::Primitive(ty) +} + +/// Helper function to strip a wrapper from a type string. +/// Returns the inner content if the type matches the wrapper pattern. +fn strip_wrapper<'a>(ty: &'a str, wrapper_name: &str) -> Option<&'a str> { + ty.strip_prefix(wrapper_name)? + .trim_start() + .strip_prefix('(')? + .strip_suffix(')') +} + +/// Unwraps ClickHouse type modifiers like Nullable() and LowCardinality(). +/// Returns a tuple of (base_type, is_nullable). +/// For example: "LowCardinality(Nullable(String))" -> ("String", true) +pub fn unwrap_type_modifiers(ch_type: &str) -> (&str, bool) { + let parsed = parse_ch_type(ch_type); + let is_nullable = parsed.is_nullable(); + + match parsed.base_type() { + ClickHouseType::Primitive(base) => (base, is_nullable), + _ => (ch_type, is_nullable), + } +} + +fn unsupported(ch_type: &str, kind: &str) -> String { + format!( + "{kind} type '{ch_type}' is not supported. \ + ClickHouse {kind} types cannot be automatically converted to Arrow format." + ) +} + +/// Converts a ClickHouse type string to an Arrow DataType. +/// Returns a tuple of (DataType, is_nullable). +pub fn clickhouse_type_to_arrow(ch_type: &str) -> Result<(DataType, bool), String> { + let (base_type, is_nullable) = unwrap_type_modifiers(ch_type); + let (type_name, _) = extract_identifier(base_type); + + let data_type = match type_name { + // Numeric + "Int8" => DataType::Int8, + "Int16" => DataType::Int16, + "Int32" => DataType::Int32, + "Int64" => DataType::Int64, + "UInt8" => DataType::UInt8, + "UInt16" => DataType::UInt16, + "UInt32" => DataType::UInt32, + "UInt64" => DataType::UInt64, + "Float32" => DataType::Float32, + "Float64" => DataType::Float64, + "Bool" => DataType::Boolean, + "Decimal" | "Decimal32" | "Decimal64" | "Decimal128" | "Decimal256" => { + parse_decimal_type(base_type)? + } + + // Strings + "String" | "FixedString" => DataType::Utf8, + + // Date and time types (timezones not currently handled, defaults to UTC) + "Date" | "Date32" => DataType::Date32, + "DateTime" => DataType::Timestamp(TimeUnit::Second, None), + "DateTime64" => parse_datetime64_precision(base_type)?, + + // Unsupported + "Array" => return Err(unsupported(ch_type, "Array")), + "Tuple" => return Err(unsupported(ch_type, "Tuple")), + "Map" => return Err(unsupported(ch_type, "Map")), + + // Unknown + _ => { + return Err(format!( + "Unknown ClickHouse type '{}'. This type cannot be automatically converted.", + type_name + )); + } + }; + + Ok((data_type, is_nullable)) +} + +/// Extracts an identifier from the start of a string. +/// Returns (identifier, remaining_string). +fn extract_identifier(input: &str) -> (&str, &str) { + for (i, c) in input.char_indices() { + if c.is_alphabetic() || c == '_' || (i > 0 && c.is_numeric()) { + continue; + } + return (&input[..i], &input[i..]); + } + (input, "") +} + +/// Parses comma-separated arguments from a parenthesized string. +/// Input: "(arg1, arg2, arg3)" -> Output: Ok(vec!["arg1".to_string(), "arg2".to_string(), "arg3".to_string()]) +/// Returns an error if parentheses are malformed. +fn parse_args(input: &str) -> Result, String> { + let trimmed = input.trim(); + if !trimmed.starts_with('(') || !trimmed.ends_with(')') { + return Err(format!( + "Expected parentheses around arguments in '{}'", + input + )); + } + + let inner = trimmed[1..trimmed.len() - 1].trim(); + if inner.is_empty() { + return Ok(vec![]); + } + + // Split by comma, handling nested parentheses and quotes + let mut args = Vec::new(); + let mut current_arg = String::new(); + let mut depth = 0; + let mut in_quotes = false; + + for c in inner.chars() { + match c { + '\'' if !in_quotes => in_quotes = true, + '\'' if in_quotes => in_quotes = false, + '(' if !in_quotes => depth += 1, + ')' if !in_quotes => depth -= 1, + ',' if depth == 0 && !in_quotes => { + args.push(current_arg.trim().to_string()); + current_arg = String::new(); + continue; + } + _ => {} + } + current_arg.push(c); + } + + if !current_arg.trim().is_empty() { + args.push(current_arg.trim().to_string()); + } + + Ok(args) +} + +/// Parses ClickHouse Decimal types and returns the appropriate Arrow decimal type. +/// ClickHouse formats: +/// - Decimal(P, S) -> generic decimal with precision P and scale S +/// - Decimal32(S) -> precision up to 9, scale S +/// - Decimal64(S) -> precision up to 18, scale S +/// - Decimal128(S) -> precision up to 38, scale S +/// - Decimal256(S) -> precision up to 76, scale S +/// +/// Uses metadata from ClickHouse's system.columns when available, otherwise falls back to parsing the type string. +fn parse_decimal_type(ch_type: &str) -> Result { + // Parse from type string + let (type_name, args_str) = extract_identifier(ch_type); + + let result = parse_args(args_str).ok().and_then(|args| match type_name { + "Decimal" if args.len() == 2 => args[0].parse::().ok().zip(args[1].parse::().ok()), + "Decimal32" | "Decimal64" | "Decimal128" | "Decimal256" if args.len() == 1 => { + args[0].parse::().ok().map(|scale| { + let precision = match type_name { + "Decimal32" => DECIMAL32_PRECISION, + "Decimal64" => DECIMAL64_PRECISION, + "Decimal128" => DECIMAL128_PRECISION, + "Decimal256" => DECIMAL256_PRECISION, + _ => unreachable!(), + }; + (precision, scale) + }) + } + _ => None, + }); + + result + .map(|(precision, scale)| { + if precision <= DECIMAL128_PRECISION { + DataType::Decimal128(precision, scale) + } else { + DataType::Decimal256(precision, scale) + } + }) + .ok_or_else(|| format!("Could not parse Decimal type '{}'.", ch_type)) +} + +/// Parses DateTime64 precision and returns the appropriate Arrow timestamp type. +/// DateTime64(0) -> Second +/// DateTime64(3) -> Millisecond +/// DateTime64(6) -> Microsecond +/// DateTime64(9) -> Nanosecond +/// +fn parse_datetime64_precision(ch_type: &str) -> Result { + // Parse from type string + let (_type_name, args_str) = extract_identifier(ch_type); + + let args = parse_args(args_str).map_err(|e| { + format!( + "Could not parse DateTime64 arguments from '{}': {}. Expected format: DateTime64(0-9) or DateTime64(0-9, 'timezone')", + ch_type, e + ) + })?; + + // DateTime64(precision) or DateTime64(precision, 'timezone') + if args.is_empty() { + return Err(format!( + "DateTime64 type '{}' has no precision argument. Expected format: DateTime64(0-9) or DateTime64(0-9, 'timezone')", + ch_type + )); + } + + // Parse the precision (first argument) + match args[0].parse::() { + Ok(0) => Ok(DataType::Timestamp(TimeUnit::Second, None)), + Ok(1..=3) => Ok(DataType::Timestamp(TimeUnit::Millisecond, None)), + Ok(4..=6) => Ok(DataType::Timestamp(TimeUnit::Microsecond, None)), + Ok(7..=9) => Ok(DataType::Timestamp(TimeUnit::Nanosecond, None)), + _ => Err(format!( + "Unsupported DateTime64 precision in '{}'. Precision must be 0-9", + ch_type + )), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // Helper function for tests that don't need metadata + fn convert_type_no_metadata(ch_type: &str) -> Result<(DataType, bool), String> { + clickhouse_type_to_arrow(ch_type) + } + + #[test] + fn test_clickhouse_type_mapping() { + assert_eq!( + convert_type_no_metadata("String").expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Utf8, false) + ); + assert_eq!( + convert_type_no_metadata("Int64").expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Int64, false) + ); + assert_eq!( + convert_type_no_metadata("Float64") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Float64, false) + ); + assert_eq!( + convert_type_no_metadata("Bool").expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Boolean, false) + ); + assert_eq!( + convert_type_no_metadata("DateTime") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Second, None), false) + ); + } + + #[test] + fn test_datetime64_precision_mapping() { + assert_eq!( + convert_type_no_metadata("DateTime64(0)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Second, None), false) + ); + assert_eq!( + convert_type_no_metadata("DateTime64(3)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Millisecond, None), false) + ); + assert_eq!( + convert_type_no_metadata("DateTime64(6)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Microsecond, None), false) + ); + assert_eq!( + convert_type_no_metadata("DateTime64(9)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Nanosecond, None), false) + ); + // Test with timezones + assert_eq!( + convert_type_no_metadata("DateTime64(9, 'UTC')") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Nanosecond, None), false) + ); + assert_eq!( + convert_type_no_metadata("DateTime64(6, 'UTC')") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Microsecond, None), false) + ); + assert_eq!( + convert_type_no_metadata("DateTime64(9, 'America/New_York')") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Nanosecond, None), false) + ); + // Test edge cases for precision ranges + assert_eq!( + convert_type_no_metadata("DateTime64(1)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Millisecond, None), false) + ); + assert_eq!( + convert_type_no_metadata("DateTime64(4)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Microsecond, None), false) + ); + assert_eq!( + convert_type_no_metadata("DateTime64(7)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Timestamp(TimeUnit::Nanosecond, None), false) + ); + } + + #[test] + fn test_nullable_type_mapping() { + // Non-nullable types + assert_eq!( + convert_type_no_metadata("String").expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Utf8, false) + ); + assert_eq!( + convert_type_no_metadata("Int64").expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Int64, false) + ); + + // Nullable types + assert_eq!( + convert_type_no_metadata("Nullable(String)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Utf8, true) + ); + assert_eq!( + convert_type_no_metadata("Nullable(Int64)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Int64, true) + ); + assert_eq!( + convert_type_no_metadata("Nullable(Float64)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Float64, true) + ); + } + + #[test] + fn test_lowcardinality_type_mapping() { + assert_eq!( + convert_type_no_metadata("LowCardinality(String)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Utf8, false) + ); + assert_eq!( + convert_type_no_metadata("LowCardinality(FixedString(10))") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Utf8, false) + ); + // Nullable + LowCardinality + assert_eq!( + convert_type_no_metadata("LowCardinality(Nullable(String))") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Utf8, true) + ); + } + + #[test] + fn test_decimal_type_mapping() { + // Generic Decimal(P, S) + assert_eq!( + convert_type_no_metadata("Decimal(10, 2)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(10, 2), false) + ); + assert_eq!( + convert_type_no_metadata("Decimal(38, 6)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(38, 6), false) + ); + assert_eq!( + convert_type_no_metadata("Decimal(50, 10)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal256(50, 10), false) + ); + + // Generic Decimal without spaces and with spaces + assert_eq!( + convert_type_no_metadata("Decimal(10,2)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(10, 2), false) + ); + assert_eq!( + convert_type_no_metadata("Decimal( 18 , 6 )") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(18, 6), false) + ); + + // Decimal32(S) - precision up to 9 + assert_eq!( + convert_type_no_metadata("Decimal32(2)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(9, 2), false) + ); + assert_eq!( + convert_type_no_metadata("Decimal32(4)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(9, 4), false) + ); + + // Decimal64(S) - precision up to 18 + assert_eq!( + convert_type_no_metadata("Decimal64(4)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(18, 4), false) + ); + assert_eq!( + convert_type_no_metadata("Decimal64(8)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(18, 8), false) + ); + + // Decimal128(S) - precision up to 38 + assert_eq!( + convert_type_no_metadata("Decimal128(10)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(38, 10), false) + ); + + // Decimal256(S) - precision up to 76 + assert_eq!( + convert_type_no_metadata("Decimal256(20)") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal256(76, 20), false) + ); + + // With Nullable wrapper + assert_eq!( + convert_type_no_metadata("Nullable(Decimal(18, 6))") + .expect("Failed to convert ClickHouse type to Arrow"), + (DataType::Decimal128(18, 6), true) + ); + } + + #[test] + fn test_extract_identifier() { + assert_eq!(extract_identifier("Decimal(10, 2)"), ("Decimal", "(10, 2)")); + assert_eq!(extract_identifier("DateTime64(3)"), ("DateTime64", "(3)")); + assert_eq!(extract_identifier("Int32"), ("Int32", "")); + assert_eq!( + extract_identifier("LowCardinality(String)"), + ("LowCardinality", "(String)") + ); + assert_eq!(extract_identifier("Decimal128(10)"), ("Decimal128", "(10)")); + } + + #[test] + fn test_parse_args() { + // Simple cases + assert_eq!( + parse_args("(10, 2)").unwrap(), + vec!["10".to_string(), "2".to_string()] + ); + assert_eq!(parse_args("(3)").unwrap(), vec!["3".to_string()]); + assert_eq!(parse_args("()").unwrap(), Vec::::new()); + + // With spaces + assert_eq!( + parse_args("( 10 , 2 )").unwrap(), + vec!["10".to_string(), "2".to_string()] + ); + + // With nested parentheses + assert_eq!( + parse_args("(Nullable(String))").unwrap(), + vec!["Nullable(String)".to_string()] + ); + assert_eq!( + parse_args("(Array(Int32), String)").unwrap(), + vec!["Array(Int32)".to_string(), "String".to_string()] + ); + + // With quotes + assert_eq!( + parse_args("(3, 'UTC')").unwrap(), + vec!["3".to_string(), "'UTC'".to_string()] + ); + assert_eq!( + parse_args("(9, 'America/New_York')").unwrap(), + vec!["9".to_string(), "'America/New_York'".to_string()] + ); + + // Complex nested case + assert_eq!( + parse_args("(Tuple(Int32, String), Array(Float64))").unwrap(), + vec![ + "Tuple(Int32, String)".to_string(), + "Array(Float64)".to_string() + ] + ); + + // Error cases + assert!(parse_args("10, 2").is_err()); // Missing parentheses + assert!(parse_args("(10, 2").is_err()); // Missing closing paren + } + + #[test] + fn test_array_type_not_supported() { + // Array types should return an error + let result = convert_type_no_metadata("Array(Int32)"); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.contains("Array type")); + assert!(err.contains("not supported")); + } + + #[test] + fn test_tuple_type_not_supported() { + // Tuple types should return an error + let result = convert_type_no_metadata("Tuple(String, Int64)"); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.contains("Tuple type")); + assert!(err.contains("not supported")); + } + + #[test] + fn test_map_type_not_supported() { + // Map types should return an error + let result = convert_type_no_metadata("Map(String, Int64)"); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.contains("Map type")); + assert!(err.contains("not supported")); + } + + #[test] + fn test_unknown_type_fails() { + // Unknown types should return an error + let result = convert_type_no_metadata("UnknownType"); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.contains("Unknown ClickHouse type")); + } + + #[test] + fn test_parse_ch_type_primitives() { + assert_eq!(parse_ch_type("String"), ClickHouseType::Primitive("String")); + assert_eq!(parse_ch_type("Int64"), ClickHouseType::Primitive("Int64")); + assert_eq!( + parse_ch_type("DateTime64(3)"), + ClickHouseType::Primitive("DateTime64(3)") + ); + } + + #[test] + fn test_parse_ch_type_nullable() { + assert_eq!( + parse_ch_type("Nullable(String)"), + ClickHouseType::Nullable(Box::new(ClickHouseType::Primitive("String"))) + ); + assert_eq!( + parse_ch_type("Nullable(Int64)"), + ClickHouseType::Nullable(Box::new(ClickHouseType::Primitive("Int64"))) + ); + } + + #[test] + fn test_parse_ch_type_lowcardinality() { + assert_eq!( + parse_ch_type("LowCardinality(String)"), + ClickHouseType::LowCardinality(Box::new(ClickHouseType::Primitive("String"))) + ); + assert_eq!( + parse_ch_type("LowCardinality(Nullable(String))"), + ClickHouseType::LowCardinality(Box::new(ClickHouseType::Nullable(Box::new( + ClickHouseType::Primitive("String") + )))) + ); + } + + #[test] + fn test_parse_ch_type_is_nullable() { + assert!(!parse_ch_type("String").is_nullable()); + assert!(parse_ch_type("Nullable(String)").is_nullable()); + assert!(parse_ch_type("LowCardinality(Nullable(String))").is_nullable()); + assert!(!parse_ch_type("LowCardinality(String)").is_nullable()); + } + + #[test] + fn test_parse_ch_type_base_type() { + let parsed = parse_ch_type("LowCardinality(Nullable(String))"); + assert_eq!(parsed.base_type(), &ClickHouseType::Primitive("String")); + + let parsed = parse_ch_type("Nullable(Int64)"); + assert_eq!(parsed.base_type(), &ClickHouseType::Primitive("Int64")); + + let parsed = parse_ch_type("String"); + assert_eq!(parsed.base_type(), &ClickHouseType::Primitive("String")); + } +} diff --git a/src/sinks/clickhouse/arrow/schema.rs b/src/sinks/clickhouse/arrow/schema.rs new file mode 100644 index 0000000000000..f2359ca5f3519 --- /dev/null +++ b/src/sinks/clickhouse/arrow/schema.rs @@ -0,0 +1,227 @@ +//! Schema fetching and Arrow schema construction for ClickHouse tables. + +use arrow::datatypes::{Field, Schema}; +use async_trait::async_trait; +use http::{Request, StatusCode}; +use hyper::Body; +use serde::Deserialize; +use vector_lib::codecs::encoding::format::{ArrowEncodingError, SchemaProvider}; + +use crate::http::{Auth, HttpClient}; + +use super::parser::clickhouse_type_to_arrow; + +#[derive(Debug, Deserialize)] +struct ColumnInfo { + name: String, + #[serde(rename = "type")] + column_type: String, +} + +/// URL-encodes a string for use in HTTP query parameters. +fn url_encode(s: &str) -> String { + percent_encoding::utf8_percent_encode(s, percent_encoding::NON_ALPHANUMERIC).to_string() +} + +/// Fetches the schema for a ClickHouse table and converts it to an Arrow schema. +pub async fn fetch_table_schema( + client: &HttpClient, + endpoint: &str, + database: &str, + table: &str, + auth: Option<&Auth>, +) -> crate::Result { + let query = "SELECT name, type \ + FROM system.columns \ + WHERE database = {db:String} AND table = {tbl:String} \ + ORDER BY position \ + FORMAT JSONEachRow"; + + // Build URI with query and parameters + let uri = format!( + "{}?query={}¶m_db={}¶m_tbl={}", + endpoint, + url_encode(query), + url_encode(database), + url_encode(table) + ); + let mut request = Request::get(&uri).body(Body::empty()).unwrap(); + + if let Some(auth) = auth { + auth.apply(&mut request); + } + + let response = client.send(request).await?; + + match response.status() { + StatusCode::OK => { + let body_bytes = http_body::Body::collect(response.into_body()) + .await? + .to_bytes(); + let body_str = String::from_utf8(body_bytes.into()) + .map_err(|e| format!("Failed to parse response as UTF-8: {}", e))?; + + parse_schema_from_response(&body_str) + } + status => Err(format!("Failed to fetch schema from ClickHouse: HTTP {}", status).into()), + } +} + +/// Parses the JSON response from ClickHouse and builds an Arrow schema. +fn parse_schema_from_response(response: &str) -> crate::Result { + let mut columns: Vec = Vec::new(); + + for line in response.lines() { + if line.trim().is_empty() { + continue; + } + + let column: ColumnInfo = serde_json::from_str(line) + .map_err(|e| format!("Failed to parse column info: {}", e))?; + columns.push(column); + } + + if columns.is_empty() { + return Err("No columns found in table schema".into()); + } + + let mut fields = Vec::new(); + for column in columns { + let (arrow_type, nullable) = clickhouse_type_to_arrow(&column.column_type) + .map_err(|e| format!("Failed to convert column '{}': {}", column.name, e))?; + fields.push(Field::new(&column.name, arrow_type, nullable)); + } + + Ok(Schema::new(fields)) +} + +/// Schema provider implementation for ClickHouse tables. +#[derive(Clone, Debug)] +pub struct ClickHouseSchemaProvider { + client: HttpClient, + endpoint: String, + database: String, + table: String, + auth: Option, +} + +impl ClickHouseSchemaProvider { + /// Create a new ClickHouse schema provider. + pub const fn new( + client: HttpClient, + endpoint: String, + database: String, + table: String, + auth: Option, + ) -> Self { + Self { + client, + endpoint, + database, + table, + auth, + } + } +} + +#[async_trait] +impl SchemaProvider for ClickHouseSchemaProvider { + async fn get_schema(&self) -> Result { + fetch_table_schema( + &self.client, + &self.endpoint, + &self.database, + &self.table, + self.auth.as_ref(), + ) + .await + .map_err(|e| ArrowEncodingError::SchemaFetchError { + message: e.to_string(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow::datatypes::{DataType, TimeUnit}; + + #[test] + fn test_parse_schema() { + let response = r#"{"name":"id","type":"Int64"} +{"name":"message","type":"String"} +{"name":"timestamp","type":"DateTime"} +"#; + + let schema = parse_schema_from_response(response).unwrap(); + assert_eq!(schema.fields().len(), 3); + assert_eq!(schema.field(0).name(), "id"); + assert_eq!(schema.field(0).data_type(), &DataType::Int64); + assert_eq!(schema.field(1).name(), "message"); + assert_eq!(schema.field(1).data_type(), &DataType::Utf8); + assert_eq!(schema.field(2).name(), "timestamp"); + assert_eq!( + schema.field(2).data_type(), + &DataType::Timestamp(TimeUnit::Second, None) + ); + } + + #[test] + fn test_parse_schema_with_type_parameters() { + // Test that type string parsing works for types with parameters + let response = r#"{"name":"bytes_sent","type":"Decimal(18, 2)"} +{"name":"timestamp","type":"DateTime64(6)"} +{"name":"duration_ms","type":"Decimal32(4)"} +"#; + + let schema = parse_schema_from_response(response).unwrap(); + assert_eq!(schema.fields().len(), 3); + + // Check Decimal parsed from type string + assert_eq!(schema.field(0).name(), "bytes_sent"); + assert_eq!(schema.field(0).data_type(), &DataType::Decimal128(18, 2)); + + // Check DateTime64 parsed from type string + assert_eq!(schema.field(1).name(), "timestamp"); + assert_eq!( + schema.field(1).data_type(), + &DataType::Timestamp(TimeUnit::Microsecond, None) + ); + + // Check Decimal32 parsed from type string + assert_eq!(schema.field(2).name(), "duration_ms"); + assert_eq!(schema.field(2).data_type(), &DataType::Decimal128(9, 4)); + } + + #[test] + fn test_schema_field_ordering() { + let response = r#"{"name":"timestamp","type":"DateTime64(3)"} +{"name":"host","type":"String"} +{"name":"message","type":"String"} +{"name":"id","type":"Int64"} +{"name":"score","type":"Float64"} +{"name":"active","type":"Bool"} +{"name":"name","type":"String"} +"#; + + let schema = parse_schema_from_response(response).unwrap(); + assert_eq!(schema.fields().len(), 7); + + assert_eq!(schema.field(0).name(), "timestamp"); + assert_eq!(schema.field(1).name(), "host"); + assert_eq!(schema.field(2).name(), "message"); + assert_eq!(schema.field(3).name(), "id"); + assert_eq!(schema.field(4).name(), "score"); + assert_eq!(schema.field(5).name(), "active"); + assert_eq!(schema.field(6).name(), "name"); + + assert_eq!( + schema.field(0).data_type(), + &DataType::Timestamp(TimeUnit::Millisecond, None) + ); + assert_eq!(schema.field(1).data_type(), &DataType::Utf8); + assert_eq!(schema.field(3).data_type(), &DataType::Int64); + assert_eq!(schema.field(4).data_type(), &DataType::Float64); + assert_eq!(schema.field(5).data_type(), &DataType::Boolean); + } +} diff --git a/src/sinks/clickhouse/config.rs b/src/sinks/clickhouse/config.rs index c33bfdbac5eda..6ff081e9aafa0 100644 --- a/src/sinks/clickhouse/config.rs +++ b/src/sinks/clickhouse/config.rs @@ -4,7 +4,8 @@ use std::fmt; use http::{Request, StatusCode, Uri}; use hyper::Body; -use vector_lib::codecs::{JsonSerializerConfig, NewlineDelimitedEncoderConfig, encoding::Framer}; +use vector_lib::codecs::encoding::format::SchemaProvider; +use vector_lib::codecs::encoding::{ArrowStreamSerializerConfig, BatchSerializerConfig}; use super::{ request_builder::ClickhouseRequestBuilder, @@ -39,6 +40,10 @@ pub enum Format { /// JSONAsString. JsonAsString, + + /// ArrowStream (beta). + #[configurable(metadata(status = "beta"))] + ArrowStream, } impl fmt::Display for Format { @@ -47,6 +52,7 @@ impl fmt::Display for Format { Format::JsonEachRow => write!(f, "JSONEachRow"), Format::JsonAsObject => write!(f, "JSONAsObject"), Format::JsonAsString => write!(f, "JSONAsString"), + Format::ArrowStream => write!(f, "ArrowStream"), } } } @@ -95,6 +101,14 @@ pub struct ClickhouseConfig { #[serde(default, skip_serializing_if = "crate::serde::is_default")] pub encoding: Transformer, + /// The batch encoding configuration for encoding events in batches. + /// + /// When specified, events are encoded together as a single batch. + /// This is mutually exclusive with per-event encoding based on the `format` field. + #[configurable(derived)] + #[serde(default)] + pub batch_encoding: Option, + #[configurable(derived)] #[serde(default)] pub batch: BatchConfig, @@ -215,15 +229,14 @@ impl SinkConfig for ClickhouseConfig { .expect("'default' should be a valid template") }); + // Resolve the encoding strategy (format + encoder) based on configuration + let (format, encoder_kind) = self + .resolve_strategy(&client, &endpoint, &database, auth.as_ref()) + .await?; + let request_builder = ClickhouseRequestBuilder { compression: self.compression, - encoding: ( - self.encoding.clone(), - Encoder::::new( - NewlineDelimitedEncoderConfig.build().into(), - JsonSerializerConfig::default().build().into(), - ), - ), + encoder: (self.encoding.clone(), encoder_kind), }; let sink = ClickhouseSink::new( @@ -231,7 +244,7 @@ impl SinkConfig for ClickhouseConfig { service, database, self.table.clone(), - self.format, + format, request_builder, ); @@ -249,6 +262,119 @@ impl SinkConfig for ClickhouseConfig { } } +impl ClickhouseConfig { + /// Resolves the encoding strategy (format + encoder) based on configuration. + /// + /// This method determines the appropriate ClickHouse format and Vector encoder + /// based on the user's configuration, ensuring they are consistent. + async fn resolve_strategy( + &self, + client: &HttpClient, + endpoint: &Uri, + database: &Template, + auth: Option<&Auth>, + ) -> crate::Result<(Format, crate::codecs::EncoderKind)> { + use crate::codecs::EncoderKind; + use vector_lib::codecs::{ + JsonSerializerConfig, NewlineDelimitedEncoderConfig, encoding::Framer, + }; + + if let Some(batch_encoding) = &self.batch_encoding { + use crate::codecs::{BatchEncoder, BatchSerializer}; + + // Validate that batch_encoding is only compatible with ArrowStream format + if self.format != Format::ArrowStream { + return Err(format!( + "'batch_encoding' is only compatible with 'format: arrow_stream'. Found 'format: {}'.", + self.format + ) + .into()); + } + + let mut arrow_config = match batch_encoding { + BatchSerializerConfig::ArrowStream(config) => config.clone(), + }; + + self.resolve_arrow_schema( + client, + endpoint.to_string(), + database, + auth, + &mut arrow_config, + ) + .await?; + + let resolved_batch_config = BatchSerializerConfig::ArrowStream(arrow_config); + let arrow_serializer = resolved_batch_config.build()?; + let batch_serializer = BatchSerializer::Arrow(arrow_serializer); + let encoder = EncoderKind::Batch(BatchEncoder::new(batch_serializer)); + + return Ok((Format::ArrowStream, encoder)); + } + + let encoder = EncoderKind::Framed(Box::new(Encoder::::new( + NewlineDelimitedEncoderConfig.build().into(), + JsonSerializerConfig::default().build().into(), + ))); + + Ok((self.format, encoder)) + } + + async fn resolve_arrow_schema( + &self, + client: &HttpClient, + endpoint: String, + database: &Template, + auth: Option<&Auth>, + config: &mut ArrowStreamSerializerConfig, + ) -> crate::Result<()> { + use super::arrow; + + if self.table.is_dynamic() || database.is_dynamic() { + return Err( + "Arrow codec requires a static table and database. Dynamic schema inference is not supported." + .into(), + ); + } + + let table_str = self.table.get_ref(); + let database_str = database.get_ref(); + + debug!( + "Fetching schema for table {}.{} at startup.", + database_str, table_str + ); + + let provider = arrow::ClickHouseSchemaProvider::new( + client.clone(), + endpoint, + database_str.to_string(), + table_str.to_string(), + auth.cloned(), + ); + + let schema = provider.get_schema().await.map_err(|e| { + format!( + "Failed to fetch schema for {}.{}: {}.", + database_str, table_str, e + ) + })?; + + config.schema = Some(schema); + + debug!( + "Successfully fetched Arrow schema with {} fields.", + config + .schema + .as_ref() + .map(|s| s.fields().len()) + .unwrap_or(0) + ); + + Ok(()) + } +} + fn get_healthcheck_uri(endpoint: &Uri) -> String { let mut uri = endpoint.to_string(); if !uri.ends_with('/') { @@ -277,6 +403,7 @@ async fn healthcheck(client: HttpClient, endpoint: Uri, auth: Option) -> c #[cfg(test)] mod tests { use super::*; + use vector_lib::codecs::encoding::ArrowStreamSerializerConfig; #[test] fn generate_config() { @@ -298,4 +425,82 @@ mod tests { "http://localhost:8123/path/?query=SELECT%201" ); } + + /// Helper to create a minimal ClickhouseConfig for testing + fn create_test_config( + format: Format, + batch_encoding: Option, + ) -> ClickhouseConfig { + ClickhouseConfig { + endpoint: "http://localhost:8123".parse::().unwrap().into(), + table: "test_table".try_into().unwrap(), + database: Some("test_db".try_into().unwrap()), + format, + batch_encoding, + ..Default::default() + } + } + + #[tokio::test] + async fn test_format_selection_with_batch_encoding() { + use crate::http::HttpClient; + use crate::tls::TlsSettings; + + // Create minimal dependencies for resolve_strategy + let tls = TlsSettings::default(); + let client = HttpClient::new(tls, &Default::default()).unwrap(); + let endpoint: http::Uri = "http://localhost:8123".parse().unwrap(); + let database: Template = "test_db".try_into().unwrap(); + + // Test incompatible formats - should all return errors + let incompatible_formats = vec![ + (Format::JsonEachRow, "json_each_row"), + (Format::JsonAsObject, "json_as_object"), + (Format::JsonAsString, "json_as_string"), + ]; + + for (format, format_name) in incompatible_formats { + let config = create_test_config( + format, + Some(BatchSerializerConfig::ArrowStream( + ArrowStreamSerializerConfig::default(), + )), + ); + + let result = config + .resolve_strategy(&client, &endpoint, &database, None) + .await; + + assert!( + result.is_err(), + "Expected error for format {} with batch_encoding, but got success", + format_name + ); + } + } + + #[test] + fn test_format_selection_without_batch_encoding() { + // When batch_encoding is None, the configured format should be used + let configs = vec![ + Format::JsonEachRow, + Format::JsonAsObject, + Format::JsonAsString, + Format::ArrowStream, + ]; + + for format in configs { + let config = create_test_config(format, None); + + assert!( + config.batch_encoding.is_none(), + "batch_encoding should be None for format {:?}", + format + ); + assert_eq!( + config.format, format, + "format should match configured value" + ); + } + } } diff --git a/src/sinks/clickhouse/integration_tests.rs b/src/sinks/clickhouse/integration_tests.rs index fe7a10226ac60..3798595708b41 100644 --- a/src/sinks/clickhouse/integration_tests.rs +++ b/src/sinks/clickhouse/integration_tests.rs @@ -16,6 +16,7 @@ use serde::Deserialize; use serde_json::Value; use tokio::time::{Duration, timeout}; use vector_lib::{ + codecs::encoding::BatchSerializerConfig, event::{BatchNotifier, BatchStatus, BatchStatusReceiver, Event, LogEvent}, lookup::PathPrefix, }; @@ -468,3 +469,139 @@ struct Stats { elapsed: f64, rows_read: usize, } + +#[tokio::test] +async fn insert_events_arrow_format() { + trace_init(); + + let table = random_table_name(); + let host = clickhouse_address(); + + let mut batch = BatchConfig::default(); + batch.max_events = Some(5); + + let config = ClickhouseConfig { + endpoint: host.parse().unwrap(), + table: table.clone().try_into().unwrap(), + compression: Compression::None, + format: crate::sinks::clickhouse::config::Format::ArrowStream, + batch_encoding: Some(BatchSerializerConfig::ArrowStream(Default::default())), + batch, + request: TowerRequestConfig { + retry_attempts: 1, + ..Default::default() + }, + ..Default::default() + }; + + let client = ClickhouseClient::new(host.clone()); + + client + .create_table( + &table, + "host String, timestamp DateTime64(3), message String, count Int64", + ) + .await; + + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); + + let mut events: Vec = Vec::new(); + for i in 0..5 { + let mut event = LogEvent::from(format!("log message {}", i)); + event.insert("host", format!("host{}.example.com", i)); + event.insert("count", i as i64); + events.push(event.into()); + } + + run_and_assert_sink_compliance(sink, stream::iter(events), &SINK_TAGS).await; + + let output = client.select_all(&table).await; + assert_eq!(5, output.rows); + + // Verify fields exist and are correctly typed + for row in output.data.iter() { + assert!(row.get("host").and_then(|v| v.as_str()).is_some()); + assert!(row.get("message").and_then(|v| v.as_str()).is_some()); + assert!( + row.get("count") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse::().ok()) + .is_some() + ); + } +} + +#[tokio::test] +async fn insert_events_arrow_with_schema_fetching() { + trace_init(); + + let table = random_table_name(); + let host = clickhouse_address(); + + let mut batch = BatchConfig::default(); + batch.max_events = Some(3); + + let client = ClickhouseClient::new(host.clone()); + + // Create table with specific typed columns including various data types + // Include standard Vector log fields: host, timestamp, message + client + .create_table( + &table, + "host String, timestamp DateTime64(3), message String, id Int64, name String, score Float64, active Bool", + ) + .await; + + let config = ClickhouseConfig { + endpoint: host.parse().unwrap(), + table: table.clone().try_into().unwrap(), + compression: Compression::None, + format: crate::sinks::clickhouse::config::Format::ArrowStream, + batch_encoding: Some(BatchSerializerConfig::ArrowStream(Default::default())), + batch, + request: TowerRequestConfig { + retry_attempts: 1, + ..Default::default() + }, + ..Default::default() + }; + + // Building the sink should fetch the schema from ClickHouse + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); + + // Create events with various types that should match the schema + let mut events: Vec = Vec::new(); + for i in 0..3 { + let mut event = LogEvent::from(format!("Test message {}", i)); + event.insert("host", format!("host{}.example.com", i)); + event.insert("id", i as i64); + event.insert("name", format!("user_{}", i)); + event.insert("score", 95.5 + i as f64); + event.insert("active", i % 2 == 0); + events.push(event.into()); + } + + run_and_assert_sink_compliance(sink, stream::iter(events), &SINK_TAGS).await; + + let output = client.select_all(&table).await; + assert_eq!(3, output.rows); + + // Verify all fields exist and have the correct types + for row in output.data.iter() { + // Check standard Vector fields exist + assert!(row.get("host").and_then(|v| v.as_str()).is_some()); + assert!(row.get("message").and_then(|v| v.as_str()).is_some()); + assert!(row.get("timestamp").is_some()); + + // Check custom fields have correct types + assert!( + row.get("id") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse::().ok()) + .is_some() + ); + assert!(row.get("name").and_then(|v| v.as_str()).is_some()); + assert!(row.get("score").and_then(|v| v.as_f64()).is_some()); + assert!(row.get("active").and_then(|v| v.as_bool()).is_some()); + } +} diff --git a/src/sinks/clickhouse/mod.rs b/src/sinks/clickhouse/mod.rs index 3a578041bf533..4b38834915379 100644 --- a/src/sinks/clickhouse/mod.rs +++ b/src/sinks/clickhouse/mod.rs @@ -9,6 +9,7 @@ //! //! This sink only supports logs for now but could support metrics and traces as well in the future. +mod arrow; pub mod config; #[cfg(all(test, feature = "clickhouse-integration-tests"))] mod integration_tests; diff --git a/src/sinks/clickhouse/request_builder.rs b/src/sinks/clickhouse/request_builder.rs index 7f8edc0d2d02e..688f1436c37df 100644 --- a/src/sinks/clickhouse/request_builder.rs +++ b/src/sinks/clickhouse/request_builder.rs @@ -1,20 +1,21 @@ //! `RequestBuilder` implementation for the `Clickhouse` sink. use bytes::Bytes; -use vector_lib::codecs::encoding::Framer; use super::sink::PartitionKey; -use crate::sinks::{prelude::*, util::http::HttpRequest}; +use crate::codecs::EncoderKind; +use crate::sinks::prelude::*; +use crate::sinks::util::http::HttpRequest; pub(super) struct ClickhouseRequestBuilder { pub(super) compression: Compression, - pub(super) encoding: (Transformer, Encoder), + pub(super) encoder: (Transformer, EncoderKind), } impl RequestBuilder<(PartitionKey, Vec)> for ClickhouseRequestBuilder { type Metadata = (PartitionKey, EventFinalizers); type Events = Vec; - type Encoder = (Transformer, Encoder); + type Encoder = (Transformer, EncoderKind); type Payload = Bytes; type Request = HttpRequest; type Error = std::io::Error; @@ -24,7 +25,7 @@ impl RequestBuilder<(PartitionKey, Vec)> for ClickhouseRequestBuilder { } fn encoder(&self) -> &Self::Encoder { - &self.encoding + &self.encoder } fn split_input( diff --git a/src/sinks/clickhouse/service.rs b/src/sinks/clickhouse/service.rs index e9974b32a8dd9..53b09270fbdfd 100644 --- a/src/sinks/clickhouse/service.rs +++ b/src/sinks/clickhouse/service.rs @@ -92,10 +92,18 @@ impl HttpServiceRequestBuilder for ClickhouseServiceRequestBuilder let auth: Option = self.auth.clone(); + // Extract format before taking payload to avoid borrow checker issues + let format = metadata.format; let payload = request.take_payload(); + // Set content type based on format + let content_type = match format { + Format::ArrowStream => "application/vnd.apache.arrow.stream", + _ => "application/x-ndjson", + }; + let mut builder = Request::post(&uri) - .header(CONTENT_TYPE, "application/x-ndjson") + .header(CONTENT_TYPE, content_type) .header(CONTENT_LENGTH, payload.len()); if let Some(ce) = self.compression.content_encoding() { builder = builder.header(CONTENT_ENCODING, ce); @@ -200,8 +208,8 @@ fn set_uri_query( #[cfg(test)] mod tests { + use super::super::config::AsyncInsertSettingsConfig; use super::*; - use crate::sinks::clickhouse::config::*; #[test] fn encode_valid() { diff --git a/website/cue/reference/components/sinks/clickhouse.cue b/website/cue/reference/components/sinks/clickhouse.cue index 893ff01c63d32..1049cf5217976 100644 --- a/website/cue/reference/components/sinks/clickhouse.cue +++ b/website/cue/reference/components/sinks/clickhouse.cue @@ -80,4 +80,90 @@ components: sinks: clickhouse: { metrics: null traces: false } + + how_it_works: { + data_formats: { + title: "Data Formats" + body: """ + The ClickHouse sink supports multiple data formats for inserting events: + + #### JSONEachRow (default) + + The default format is `JSONEachRow`, which sends events as newline-delimited JSON. Each event is + encoded as a single JSON object on its own line. This format is simple and flexible, allowing + ClickHouse to handle type conversions automatically. + + ```yaml + sinks: + clickhouse: + type: clickhouse + endpoint: http://localhost:8123 + database: my_database + table: my_table + format: json_each_row # default + ``` + + #### JSONAsObject and JSONAsString + + These formats provide alternative JSON encoding strategies: + - `json_as_object`: Wraps the entire event as a JSON object + - `json_as_string`: Encodes the event as a JSON string + + #### ArrowStream (beta) + + The `arrow_stream` format uses Apache Arrow's streaming format to send data to ClickHouse. This + format offers better performance and type safety by fetching the table schema from ClickHouse at + startup and encoding events directly into Arrow format. + + ```yaml + sinks: + clickhouse: + type: clickhouse + endpoint: http://localhost:8123 + database: my_database + table: my_table + format: arrow_stream + batch_encoding: + codec: arrow_stream + ``` + + **Note**: The ArrowStream format requires a static (non-templated) table and database name, as the + schema is fetched once at startup. Dynamic table routing is not supported with this format. + """ + } + + arrow_type_mappings: { + title: "Arrow Type Mappings" + body: """ + When using the `arrow_stream` format, Vector automatically converts ClickHouse types to Arrow types. + The sink fetches the table schema from ClickHouse and maps each column type accordingly. + + #### Unsupported ClickHouse Types + + The following ClickHouse column types are **not yet supported** by Vector's + ArrowStream implementation: + - `Array` + - `Tuple` + - `Map` + - `IPv4` + - `IPv6` + + If your table contains these types, you should use one of the JSON formats instead. + + #### Unsupported Arrow Types + + Based on [ClickHouse's Arrow format documentation](\(urls.clickhouse_arrow)), the following + types are unsupported: + - `FIXED_SIZE_BINARY` + - `JSON` + - `UUID` + - `ENUM` + + #### Timezone Handling + + DateTime and DateTime64 columns with timezone information will be converted to Arrow timestamps + without timezone metadata. All timestamps are treated as UTC by default. + """ + } + } } diff --git a/website/cue/reference/components/sinks/generated/clickhouse.cue b/website/cue/reference/components/sinks/generated/clickhouse.cue index f807b58be475c..a0f4399bdc112 100644 --- a/website/cue/reference/components/sinks/generated/clickhouse.cue +++ b/website/cue/reference/components/sinks/generated/clickhouse.cue @@ -243,6 +243,50 @@ generated: components: sinks: clickhouse: configuration: { } } } + batch_encoding: { + description: """ + The batch encoding configuration for encoding events in batches. + + When specified, events are encoded together as a single batch. + This is mutually exclusive with per-event encoding based on the `format` field. + """ + required: false + type: object: options: { + allow_nullable_fields: { + description: """ + Allow null values for non-nullable fields in the schema. + + When enabled, missing or incompatible values will be encoded as null even for fields + marked as non-nullable in the Arrow schema. This is useful when working with downstream + systems that can handle null values through defaults, computed columns, or other mechanisms. + + When disabled (default), missing values for non-nullable fields will cause encoding errors, + ensuring all required data is present before sending to the sink. + """ + required: false + type: bool: default: false + } + codec: { + description: """ + Encodes events in [Apache Arrow][apache_arrow] IPC streaming format. + + This is the streaming variant of the Arrow IPC format, which writes + a continuous stream of record batches. + + [apache_arrow]: https://arrow.apache.org/ + """ + required: true + type: string: enum: arrow_stream: """ + Encodes events in [Apache Arrow][apache_arrow] IPC streaming format. + + This is the streaming variant of the Arrow IPC format, which writes + a continuous stream of record batches. + + [apache_arrow]: https://arrow.apache.org/ + """ + } + } + } compression: { description: """ Compression configuration. @@ -333,6 +377,7 @@ generated: components: sinks: clickhouse: configuration: { type: string: { default: "json_each_row" enum: { + arrow_stream: "ArrowStream (beta)." json_as_object: "JSONAsObject." json_as_string: "JSONAsString." json_each_row: "JSONEachRow." diff --git a/website/cue/reference/urls.cue b/website/cue/reference/urls.cue index cf06991ddf94f..66dee24e23ea3 100644 --- a/website/cue/reference/urls.cue +++ b/website/cue/reference/urls.cue @@ -111,6 +111,7 @@ urls: { chrono_time_formats: "https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers" cgroups_limit_resources: "https://the.binbashtheory.com/control-resources-cgroups/" clickhouse: "https://clickhouse.com/" + clickhouse_arrow: "https://clickhouse.com/docs/en/interfaces/formats#arrow" clickhouse_http: "https://clickhouse.com/docs/en/interfaces/http/" community_id_spec: "https://github.com/corelight/community-id-spec" console: "\(wikipedia)/wiki/System_console" From fa499b94c26145e127e7d8e955fe07c4732fa0ac Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 19 Dec 2025 15:07:06 -0500 Subject: [PATCH 159/227] fix(website): VRL functions return types (#24400) --- website/cue/reference/remap/functions/assert.cue | 2 +- website/cue/reference/remap/functions/del.cue | 2 +- website/cue/reference/remap/functions/parse_int.cue | 2 +- website/cue/reference/remap/functions/slice.cue | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/cue/reference/remap/functions/assert.cue b/website/cue/reference/remap/functions/assert.cue index cc3596563feb1..6e7d905ac425b 100644 --- a/website/cue/reference/remap/functions/assert.cue +++ b/website/cue/reference/remap/functions/assert.cue @@ -37,7 +37,7 @@ remap: functions: assert: { internal_failure_reasons: [ "`condition` evaluates to `false`.", ] - return: types: ["null"] + return: types: ["boolean"] examples: [ { diff --git a/website/cue/reference/remap/functions/del.cue b/website/cue/reference/remap/functions/del.cue index f7e25df261cc6..5a825aa6e40f7 100644 --- a/website/cue/reference/remap/functions/del.cue +++ b/website/cue/reference/remap/functions/del.cue @@ -36,7 +36,7 @@ remap: functions: del: { """, ] return: { - types: ["any"] + types: ["any", "null"] rules: [ "Returns the value of the field being deleted. Returns `null` if the field doesn't exist.", ] diff --git a/website/cue/reference/remap/functions/parse_int.cue b/website/cue/reference/remap/functions/parse_int.cue index 080a20598e871..6c03784ba0117 100644 --- a/website/cue/reference/remap/functions/parse_int.cue +++ b/website/cue/reference/remap/functions/parse_int.cue @@ -30,7 +30,7 @@ remap: functions: parse_int: { "The base is not between 2 and 36.", "The number cannot be parsed in the base.", ] - return: types: ["string"] + return: types: ["integer"] examples: [ { diff --git a/website/cue/reference/remap/functions/slice.cue b/website/cue/reference/remap/functions/slice.cue index eb0192d5931bd..8e4bf4afc2196 100644 --- a/website/cue/reference/remap/functions/slice.cue +++ b/website/cue/reference/remap/functions/slice.cue @@ -32,7 +32,7 @@ remap: functions: slice: { }, ] internal_failure_reasons: [] - return: types: ["string"] + return: types: ["array", "string"] examples: [ { From 89bf79564137976a21a7f4ff138b0b1f54c36a02 Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 19 Dec 2025 16:16:23 -0500 Subject: [PATCH 160/227] feat(external docs): Add VRL crate documentation (#24384) --- rust-doc/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/rust-doc/Makefile b/rust-doc/Makefile index 5cc08cf4d4fca..4657251c0ada8 100644 --- a/rust-doc/Makefile +++ b/rust-doc/Makefile @@ -6,3 +6,4 @@ docs: ci-docs-build: ../scripts/environment/install-protoc.sh ${HOME}/protoc PATH=${PATH}:${HOME}/protoc/ cargo doc --no-default-features --features="docs" --no-deps --workspace + cargo doc --no-deps -p vrl From fad6e623a97865d0668879e37c03b50df9ec41f2 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 22 Dec 2025 13:04:21 -0500 Subject: [PATCH 161/227] chore(ci): bump cargo deny to 0.18.9 (#24404) --- scripts/environment/prepare.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/environment/prepare.sh b/scripts/environment/prepare.sh index 368e4287f4bce..fd66005c79b6a 100755 --- a/scripts/environment/prepare.sh +++ b/scripts/environment/prepare.sh @@ -163,8 +163,8 @@ if contains_module cargo-nextest; then fi if contains_module cargo-deny; then - if ! cargo-deny --version 2>/dev/null | grep -q '^cargo-deny 0.16.2'; then - cargo "${install[@]}" cargo-deny --version 0.16.2 --force --locked + if ! cargo-deny --version 2>/dev/null | grep -q '^cargo-deny 0.18.9'; then + cargo "${install[@]}" cargo-deny --version 0.18.9 --force --locked fi fi From 2af657dfbb7e476d2966d05d5f2edb5c66112b40 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 22 Dec 2025 15:49:46 -0500 Subject: [PATCH 162/227] enhancement(vrl): consolidate all VRL functions into vector-vrl-functions crate (#24402) * Create all_vrl_functions * Create and use vector-vrl-all * Move dnstap-parser off of vector-lib * Use vector-vrl-all in lib/codecs * refactor: use vector-vrl-all as workspace dependency * docs: add crate-level documentation for vector-vrl-all * Use LazyLock to cache VRL function list * Revert "Use LazyLock to cache VRL function list" This reverts commit 04616c2844a07863d75f4aabed5f87a98e13b9d7. * Add back changelog * cargo fmt * Skip testing vrl stdlib functions' examples * Fix broken examples * Move VRL function consolidation logic into vector_vrl_functions::all() * Cargo fmt * Add ignore for disallowed_methods --- Cargo.lock | 9 ++++-- Cargo.toml | 5 +-- ...vrl_functions_consolidation.enhancement.md | 4 +++ clippy.toml | 1 + lib/codecs/Cargo.toml | 1 + lib/codecs/src/decoding/format/vrl.rs | 2 +- lib/dnstap-parser/Cargo.toml | 5 ++- lib/dnstap-parser/src/internal_events.rs | 2 +- lib/dnstap-parser/src/parser.rs | 18 +++++------ lib/dnstap-parser/src/schema.rs | 14 ++++----- .../src/vrl_functions/parse_dnstap.rs | 2 +- .../src/internal_event.rs | 2 +- lib/vector-vrl/cli/Cargo.toml | 2 +- lib/vector-vrl/cli/src/main.rs | 4 +-- lib/vector-vrl/functions/Cargo.toml | 6 ++++ lib/vector-vrl/functions/src/lib.rs | 26 +++++++++++++++- lib/vector-vrl/tests/Cargo.toml | 3 +- lib/vector-vrl/tests/src/main.rs | 31 ++++++++++--------- lib/vector-vrl/web-playground/Cargo.toml | 2 +- lib/vector-vrl/web-playground/src/lib.rs | 4 +-- src/cli.rs | 6 +--- src/common/http/server_auth.rs | 8 +---- src/conditions/vrl.rs | 10 +----- src/config/unit_test/mod.rs | 3 +- src/sources/http_client/client.rs | 13 ++------ src/transforms/remap.rs | 8 +---- .../reference/remap/functions/decode_lz4.cue | 2 +- .../reference/remap/functions/decrypt_ip.cue | 8 ++--- .../reference/remap/functions/encrypt_ip.cue | 4 +-- .../cue/reference/remap/functions/find.cue | 2 +- .../remap/functions/parse_aws_alb_log.cue | 2 +- .../reference/remap/functions/split_path.cue | 8 ++--- .../remap/functions/validate_json_schema.cue | 8 +++-- .../cue/reference/remap/functions/xxhash.cue | 6 ++-- 34 files changed, 119 insertions(+), 112 deletions(-) create mode 100644 changelog.d/vrl_functions_consolidation.enhancement.md diff --git a/Cargo.lock b/Cargo.lock index 6c175377a63c1..bb5478d575f5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2631,6 +2631,7 @@ dependencies = [ "vector-config-macros", "vector-core", "vector-lookup", + "vector-vrl-functions", "vrl", ] @@ -3599,7 +3600,10 @@ dependencies = [ "prost-build 0.12.6", "snafu 0.8.9", "tracing 0.1.41", - "vector-lib", + "vector-common", + "vector-config", + "vector-core", + "vector-lookup", "vrl", ] @@ -13058,6 +13062,8 @@ dependencies = [ name = "vector-vrl-functions" version = "0.1.0" dependencies = [ + "dnstap-parser", + "enrichment", "vrl", ] @@ -13067,7 +13073,6 @@ version = "0.1.0" dependencies = [ "chrono-tz", "clap", - "dnstap-parser", "enrichment", "glob", "serde", diff --git a/Cargo.toml b/Cargo.toml index 9cc3b90078953..26e0f5b355f4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -200,6 +200,7 @@ vector-config-common = { path = "lib/vector-config-common" } vector-config-macros = { path = "lib/vector-config-macros" } vector-common-macros = { path = "lib/vector-common-macros" } vector-lib = { path = "lib/vector-lib", default-features = false, features = ["vrl"] } +vector-vrl-functions = { path = "lib/vector-vrl/functions" } vrl = { git = "https://github.com/vectordotdev/vrl.git", branch = "main", features = ["arbitrary", "cli", "test", "test_framework"] } mock_instant = { version = "0.6" } serial_test = { version = "3.2" } @@ -227,7 +228,7 @@ vector-lib.workspace = true vector-config.workspace = true vector-config-common.workspace = true vector-config-macros.workspace = true -vector-vrl-functions = { path = "lib/vector-vrl/functions" } +vector-vrl-functions.workspace = true loki-logproto = { path = "lib/loki-logproto", optional = true } # Tokio / Futures @@ -654,7 +655,7 @@ sources-aws_s3 = ["aws-core", "dep:aws-sdk-sqs", "dep:aws-sdk-s3", "dep:async-co sources-aws_sqs = ["aws-core", "dep:aws-sdk-sqs"] sources-datadog_agent = ["sources-utils-http-encoding", "protobuf-build", "dep:prost"] sources-demo_logs = ["dep:fakedata"] -sources-dnstap = ["sources-utils-net-tcp", "dep:base64", "dep:hickory-proto", "dep:dnsmsg-parser", "dep:dnstap-parser", "protobuf-build", "dep:prost"] +sources-dnstap = ["sources-utils-net-tcp", "dep:base64", "dep:hickory-proto", "dep:dnsmsg-parser", "dep:dnstap-parser", "protobuf-build", "dep:prost", "vector-vrl-functions/dnstap"] sources-docker_logs = ["docker"] sources-eventstoredb_metrics = [] sources-exec = [] diff --git a/changelog.d/vrl_functions_consolidation.enhancement.md b/changelog.d/vrl_functions_consolidation.enhancement.md new file mode 100644 index 0000000000000..85d09cdd97ed5 --- /dev/null +++ b/changelog.d/vrl_functions_consolidation.enhancement.md @@ -0,0 +1,4 @@ +Vector-specific VRL functions are now available everywhere. Previously some functions were not +available inside codec VRL transforms and in the VRL cli (via `vector vrl`). + +authors: thomasqueirozb diff --git a/clippy.toml b/clippy.toml index 0aeae804db3c7..cbd8c640fdb71 100644 --- a/clippy.toml +++ b/clippy.toml @@ -5,6 +5,7 @@ allow-unwrap-in-tests = true # https://rust-lang.github.io/rust-clippy/master/index.html#disallowed_method disallowed-methods = [ { path = "std::io::Write::write", reason = "This doesn't handle short writes, use `write_all` instead." }, + { path = "vrl::stdlib::all", reason = "Use `vector_vrl_functions::all()` instead for consistency across all Vector VRL functions." }, ] disallowed-types = [ diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 7a622d52edc2b..d73b1177c0f34 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -46,6 +46,7 @@ vector-common = { path = "../vector-common", default-features = false } vector-config = { path = "../vector-config", default-features = false } vector-config-macros = { path = "../vector-config-macros", default-features = false } vector-core = { path = "../vector-core", default-features = false, features = ["vrl"] } +vector-vrl-functions.workspace = true [dev-dependencies] futures.workspace = true diff --git a/lib/codecs/src/decoding/format/vrl.rs b/lib/codecs/src/decoding/format/vrl.rs index 7ed5244768b5b..c67f60fdd8952 100644 --- a/lib/codecs/src/decoding/format/vrl.rs +++ b/lib/codecs/src/decoding/format/vrl.rs @@ -59,7 +59,7 @@ impl VrlDeserializerConfig { match compile_vrl( &self.vrl.source, - &vrl::stdlib::all(), + &vector_vrl_functions::all(), &state, CompileConfig::default(), ) { diff --git a/lib/dnstap-parser/Cargo.toml b/lib/dnstap-parser/Cargo.toml index 64220976d2398..8768274b3d3e0 100644 --- a/lib/dnstap-parser/Cargo.toml +++ b/lib/dnstap-parser/Cargo.toml @@ -15,7 +15,10 @@ hickory-proto.workspace = true prost.workspace = true snafu.workspace = true tracing.workspace = true -vector-lib.workspace = true +vector-config.workspace = true +vector-common = { path = "../vector-common" } +vector-lookup = { path = "../vector-lookup", features = ["test"] } +vector-core = { path = "../vector-core" } vrl.workspace = true paste.workspace = true diff --git a/lib/dnstap-parser/src/internal_events.rs b/lib/dnstap-parser/src/internal_events.rs index 8ff8763de9f71..fc8f52e2b6952 100644 --- a/lib/dnstap-parser/src/internal_events.rs +++ b/lib/dnstap-parser/src/internal_events.rs @@ -1,5 +1,5 @@ use tracing::warn; -use vector_lib::{ +use vector_common::{ NamedInternalEvent, internal_event::{InternalEvent, error_stage, error_type}, }; diff --git a/lib/dnstap-parser/src/parser.rs b/lib/dnstap-parser/src/parser.rs index b90589b8e890b..3876ff363bec6 100644 --- a/lib/dnstap-parser/src/parser.rs +++ b/lib/dnstap-parser/src/parser.rs @@ -16,10 +16,8 @@ use hickory_proto::{ }; use prost::Message; use snafu::Snafu; -use vector_lib::{ - Error, Result, emit, - event::{LogEvent, Value}, -}; +use vector_common::{Error, Result, internal_event::emit}; +use vector_core::event::{LogEvent, Value}; use vrl::{owned_value_path, path}; #[allow(warnings, clippy::all, clippy::pedantic, clippy::nursery)] @@ -38,10 +36,8 @@ use dnstap_proto::{ Dnstap, Message as DnstapMessage, SocketFamily, SocketProtocol, message::Type as DnstapMessageType, }; -use vector_lib::{ - config::log_schema, - lookup::{PathPrefix, lookup_v2::ValuePath}, -}; +use vector_core::config::log_schema; +use vector_lookup::{PathPrefix, lookup_v2::ValuePath}; use crate::{internal_events::DnstapParseWarning, schema::DNSTAP_VALUE_PATHS}; @@ -151,13 +147,13 @@ impl DnstapParser { && let Err(err) = DnstapParser::parse_dnstap_message(event, &root, message, parsing_options) { - emit!(DnstapParseWarning { error: &err }); + emit(DnstapParseWarning { error: &err }); need_raw_data = true; DnstapParser::insert(event, &root, &DNSTAP_VALUE_PATHS.error, err.to_string()); } } else { - emit!(DnstapParseWarning { - error: format!("Unknown dnstap data type: {dnstap_data_type_id}") + emit(DnstapParseWarning { + error: format!("Unknown dnstap data type: {dnstap_data_type_id}"), }); need_raw_data = true; } diff --git a/lib/dnstap-parser/src/schema.rs b/lib/dnstap-parser/src/schema.rs index e795cce9d9789..29a1b861f6e69 100644 --- a/lib/dnstap-parser/src/schema.rs +++ b/lib/dnstap-parser/src/schema.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::LazyLock}; -use vector_lib::lookup::{OwnedValuePath, owned_value_path}; +use vector_lookup::{OwnedValuePath, owned_value_path}; use vrl::{ btreemap, value::{ @@ -135,8 +135,8 @@ impl DnstapEventSchema { /// Schema definition for fields stored in the root. fn root_schema_definition( &self, - schema: vector_lib::schema::Definition, - ) -> vector_lib::schema::Definition { + schema: vector_core::schema::Definition, + ) -> vector_core::schema::Definition { schema .optional_field(&DNSTAP_VALUE_PATHS.server_identity, Kind::bytes(), None) .optional_field(&DNSTAP_VALUE_PATHS.server_version, Kind::bytes(), None) @@ -152,8 +152,8 @@ impl DnstapEventSchema { /// Schema definition from the message. pub fn message_schema_definition( &self, - schema: vector_lib::schema::Definition, - ) -> vector_lib::schema::Definition { + schema: vector_core::schema::Definition, + ) -> vector_core::schema::Definition { schema .optional_field(&DNSTAP_VALUE_PATHS.socket_family, Kind::bytes(), None) .optional_field(&DNSTAP_VALUE_PATHS.socket_protocol, Kind::bytes(), None) @@ -179,8 +179,8 @@ impl DnstapEventSchema { /// The schema definition for a dns tap message. pub fn schema_definition( &self, - schema: vector_lib::schema::Definition, - ) -> vector_lib::schema::Definition { + schema: vector_core::schema::Definition, + ) -> vector_core::schema::Definition { self.root_schema_definition(self.message_schema_definition(schema)) } } diff --git a/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs b/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs index aec6934f23f5b..b32fdcfeb3106 100644 --- a/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs +++ b/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs @@ -1,6 +1,6 @@ use base64::prelude::{BASE64_STANDARD, Engine as _}; use dnsmsg_parser::dns_message_parser::DnsParserOptions; -use vector_lib::event::LogEvent; +use vector_core::event::LogEvent; use vrl::prelude::*; use crate::{parser::DnstapParser, schema::DnstapEventSchema}; diff --git a/lib/vector-common-macros/src/internal_event.rs b/lib/vector-common-macros/src/internal_event.rs index c34b8d01a7aee..aed9e71bfa9a0 100644 --- a/lib/vector-common-macros/src/internal_event.rs +++ b/lib/vector-common-macros/src/internal_event.rs @@ -27,7 +27,7 @@ pub fn derive_impl_named_internal_event(item: TokenStream) -> TokenStream { let pkg_name = std::env::var("CARGO_PKG_NAME").unwrap_or_default(); let internal_event_path = if pkg_name == "vector-common" { quote! { crate::internal_event } - } else if pkg_name.starts_with("vector-") { + } else if pkg_name.starts_with("vector-") || pkg_name == "dnstap-parser" { // Most vector-* crates depend on vector-common but not vector-lib quote! { ::vector_common::internal_event } } else { diff --git a/lib/vector-vrl/cli/Cargo.toml b/lib/vector-vrl/cli/Cargo.toml index d97acae8c3339..32f0b8ccce166 100644 --- a/lib/vector-vrl/cli/Cargo.toml +++ b/lib/vector-vrl/cli/Cargo.toml @@ -8,5 +8,5 @@ license = "MPL-2.0" [dependencies] clap.workspace = true -vector-vrl-functions = { path = "../functions" } +vector-vrl-functions.workspace = true vrl.workspace = true diff --git a/lib/vector-vrl/cli/src/main.rs b/lib/vector-vrl/cli/src/main.rs index 9aa4b09fdb0ea..6ecc982deab1f 100644 --- a/lib/vector-vrl/cli/src/main.rs +++ b/lib/vector-vrl/cli/src/main.rs @@ -2,8 +2,6 @@ use clap::Parser; use vrl::cli::{Opts, cmd::cmd}; fn main() { - let mut functions = vrl::stdlib::all(); - functions.extend(vector_vrl_functions::all()); - + let functions = vector_vrl_functions::all(); std::process::exit(cmd(&Opts::parse(), functions)); } diff --git a/lib/vector-vrl/functions/Cargo.toml b/lib/vector-vrl/functions/Cargo.toml index d6319b079cea5..8cc02c042b6f5 100644 --- a/lib/vector-vrl/functions/Cargo.toml +++ b/lib/vector-vrl/functions/Cargo.toml @@ -8,3 +8,9 @@ license = "MPL-2.0" [dependencies] vrl.workspace = true +enrichment = { path = "../../enrichment" } +dnstap-parser = { path = "../../dnstap-parser", optional = true } + +[features] +default = [] +dnstap = ["dep:dnstap-parser"] diff --git a/lib/vector-vrl/functions/src/lib.rs b/lib/vector-vrl/functions/src/lib.rs index 8216cf43092ab..a154597b50966 100644 --- a/lib/vector-vrl/functions/src/lib.rs +++ b/lib/vector-vrl/functions/src/lib.rs @@ -1,3 +1,12 @@ +//! Central location for all VRL functions used in Vector. +//! +//! This crate provides a single source of truth for the complete set of VRL functions +//! available throughout Vector, combining: +//! - Standard VRL library functions (`vrl::stdlib::all`) +//! - Vector-specific functions (`vector_vrl::secret_functions`) +//! - Enrichment table functions (`enrichment::vrl_functions`) +//! - DNS tap parsing functions (optional, with `dnstap` feature) + #![deny(warnings)] use vrl::{compiler::Function, path::OwnedTargetPath}; @@ -16,7 +25,8 @@ pub enum MetadataKey { pub const LEGACY_METADATA_KEYS: [&str; 2] = ["datadog_api_key", "splunk_hec_token"]; -pub fn all() -> Vec> { +/// Returns Vector-specific secret functions. +pub fn secret_functions() -> Vec> { vec![ Box::new(set_semantic_meaning::SetSemanticMeaning) as _, Box::new(get_secret::GetSecret) as _, @@ -24,3 +34,17 @@ pub fn all() -> Vec> { Box::new(set_secret::SetSecret) as _, ] } + +/// Returns all VRL functions available in Vector. +#[allow(clippy::disallowed_methods)] +pub fn all() -> Vec> { + let functions = vrl::stdlib::all() + .into_iter() + .chain(secret_functions()) + .chain(enrichment::vrl_functions()); + + #[cfg(feature = "dnstap")] + let functions = functions.chain(dnstap_parser::vrl_functions()); + + functions.collect() +} diff --git a/lib/vector-vrl/tests/Cargo.toml b/lib/vector-vrl/tests/Cargo.toml index ddcb78084ab92..671db99c9d989 100644 --- a/lib/vector-vrl/tests/Cargo.toml +++ b/lib/vector-vrl/tests/Cargo.toml @@ -7,10 +7,9 @@ publish = false [dependencies] chrono-tz.workspace = true -dnstap-parser = { path = "../../dnstap-parser" } +vector-vrl-functions = { workspace = true, features = ["dnstap"] } enrichment = { path = "../../enrichment" } vrl.workspace = true -vector-vrl-functions = { path = "../../vector-vrl/functions" } clap.workspace = true glob.workspace = true diff --git a/lib/vector-vrl/tests/src/main.rs b/lib/vector-vrl/tests/src/main.rs index a0eb07cb5c432..8210855ee9129 100644 --- a/lib/vector-vrl/tests/src/main.rs +++ b/lib/vector-vrl/tests/src/main.rs @@ -4,7 +4,7 @@ mod docs; mod test_enrichment; -use std::{env, path::PathBuf}; +use std::{collections::HashSet, env, path::PathBuf}; use chrono_tz::Tz; use clap::Parser; @@ -96,15 +96,10 @@ fn main() { timezone: cmd.timezone(), }; - let mut functions = vrl::stdlib::all(); - functions.extend(vector_vrl_functions::all()); - functions.extend(dnstap_parser::vrl_functions()); - functions.extend(enrichment::vrl_functions()); - run_tests( tests, &cfg, - &functions, + &vector_vrl_functions::all(), || { let mut config = CompileConfig::default(); let enrichment_table = test_enrichment::test_enrichment_table(); @@ -122,7 +117,21 @@ pub fn test_dir() -> PathBuf { fn test_glob_pattern() -> String { test_dir().join("**/*.vrl").to_str().unwrap().to_string() } + +#[allow(clippy::disallowed_methods)] fn get_tests(cmd: &Cmd) -> Vec { + // Don't test vrl stdlib functions examples since they are already tested in VRL and some will + // fail to compile since they are missing required source files such as proto definitions. + let ignore_examples_from_functions: HashSet = vrl::stdlib::all() + .into_iter() + .map(|f| format!("functions/{}", f.identifier())) + .collect(); + + let tests_from_functions = get_tests_from_functions(vector_vrl_functions::all()); + let tests_from_functions = tests_from_functions + .into_iter() + .filter(|test| !ignore_examples_from_functions.contains(&test.category)); + glob(test_glob_pattern().as_str()) .expect("valid pattern") .filter_map(|entry| { @@ -130,13 +139,7 @@ fn get_tests(cmd: &Cmd) -> Vec { Some(Test::from_path(&path)) }) .chain(docs::tests(cmd.ignore_cue)) - .chain(get_tests_from_functions( - vector_vrl_functions::all() - .into_iter() - .chain(enrichment::vrl_functions()) - .chain(dnstap_parser::vrl_functions()) - .collect(), - )) + .chain(tests_from_functions) .filter(|test| { should_run( &format!("{}/{}", test.category, test.name), diff --git a/lib/vector-vrl/web-playground/Cargo.toml b/lib/vector-vrl/web-playground/Cargo.toml index 3aaa7fda74929..a54dba46bd84b 100644 --- a/lib/vector-vrl/web-playground/Cargo.toml +++ b/lib/vector-vrl/web-playground/Cargo.toml @@ -17,7 +17,7 @@ vrl.workspace = true serde.workspace = true web-sys = { version = "0.3", features = ["Window", "Performance"] } gloo-utils = { version = "0.2", features = ["serde"] } -vector-vrl-functions = { path = "../functions" } +vector-vrl-functions.workspace = true enrichment = { path = "../../enrichment" } # Required per https://docs.rs/getrandom/latest/getrandom/#webassembly-support getrandom = { version = "0.2.15", features = ["js"] } diff --git a/lib/vector-vrl/web-playground/src/lib.rs b/lib/vector-vrl/web-playground/src/lib.rs index 4a74e32d9e0b1..07fcb4bb6b096 100644 --- a/lib/vector-vrl/web-playground/src/lib.rs +++ b/lib/vector-vrl/web-playground/src/lib.rs @@ -77,9 +77,7 @@ fn compile( mut input: Input, tz_str: Option, ) -> Result { - let mut functions = vrl::stdlib::all(); - functions.extend(vector_vrl_functions::all()); - functions.extend(enrichment::vrl_functions()); + let functions = vector_vrl_functions::all(); let event = &mut input.event; let state = TypeState::default(); diff --git a/src/cli.rs b/src/cli.rs index fd7d8d0ac9ce4..7df8ca67215be 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -369,11 +369,7 @@ impl SubCommand { #[cfg(feature = "top")] Self::Top(t) => top::cmd(t).await, Self::Validate(v) => validate::validate(v, color).await, - Self::Vrl(s) => { - let mut functions = vrl::stdlib::all(); - functions.extend(vector_vrl_functions::all()); - vrl::cli::cmd::cmd(s, functions) - } + Self::Vrl(s) => vrl::cli::cmd::cmd(s, vector_vrl_functions::all()), } } } diff --git a/src/common/http/server_auth.rs b/src/common/http/server_auth.rs index 6b79fb65d9b59..4001f1148555c 100644 --- a/src/common/http/server_auth.rs +++ b/src/common/http/server_auth.rs @@ -144,12 +144,6 @@ impl HttpServerAuthConfig { )) } HttpServerAuthConfig::Custom { source } => { - let functions = vrl::stdlib::all() - .into_iter() - .chain(vector_lib::enrichment::vrl_functions()) - .chain(vector_vrl_functions::all()) - .collect::>(); - let state = TypeState::default(); let mut config = CompileConfig::default(); @@ -160,7 +154,7 @@ impl HttpServerAuthConfig { program, warnings, config: _, - } = compile_vrl(source, &functions, &state, config) + } = compile_vrl(source, &vector_vrl_functions::all(), &state, config) .map_err(|diagnostics| format_vrl_diagnostics(source, diagnostics))?; if !program.final_type_info().result.is_boolean() { diff --git a/src/conditions/vrl.rs b/src/conditions/vrl.rs index 07ec8d213ee29..178c0ac3d0988 100644 --- a/src/conditions/vrl.rs +++ b/src/conditions/vrl.rs @@ -44,15 +44,7 @@ impl ConditionalConfig for VrlConfig { // }, // }; - let functions = vrl::stdlib::all() - .into_iter() - .chain(vector_lib::enrichment::vrl_functions()); - #[cfg(feature = "sources-dnstap")] - let functions = functions.chain(dnstap_parser::vrl_functions()); - - let functions = functions - .chain(vector_vrl_functions::all()) - .collect::>(); + let functions = vector_vrl_functions::all(); let state = TypeState::default(); diff --git a/src/config/unit_test/mod.rs b/src/config/unit_test/mod.rs index 485f8b59e75eb..a289e06cc1efc 100644 --- a/src/config/unit_test/mod.rs +++ b/src/config/unit_test/mod.rs @@ -611,8 +611,7 @@ fn build_input_event(input: &TestInput) -> Result { }, "vrl" => { if let Some(source) = &input.source { - let fns = vrl::stdlib::all(); - let result = vrl::compiler::compile(source, &fns) + let result = vrl::compiler::compile(source, &vector_vrl_functions::all()) .map_err(|e| Formatter::new(source, e.clone()).to_string())?; let mut target = TargetValue { diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index c26f7302e16aa..27d4643a73afc 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -183,15 +183,6 @@ fn headers_examples() -> HashMap> { ]) } -/// Helper function to get all VRL functions for compilation -fn get_vrl_functions() -> Vec> { - vrl::stdlib::all() - .into_iter() - .chain(vector_lib::enrichment::vrl_functions()) - .chain(vector_vrl_functions::all()) - .collect() -} - /// Helper function to compile a VRL parameter value into a Program fn compile_parameter_vrl( param: &ParameterValue, @@ -274,7 +265,7 @@ pub struct Query { impl Query { pub fn new(params: &HashMap) -> Result { - let functions = get_vrl_functions(); + let functions = vector_vrl_functions::all(); let mut compiled: HashMap = HashMap::new(); @@ -330,7 +321,7 @@ impl Query { impl SourceConfig for HttpClientConfig { async fn build(&self, cx: SourceContext) -> crate::Result { let query = Query::new(&self.query)?; - let functions = get_vrl_functions(); + let functions = vector_vrl_functions::all(); // Compile body if present let body = self diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index 219e2e3792fb9..49cb0dd683bdb 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -210,12 +210,6 @@ impl RemapConfig { _ => return Err(Box::new(BuildError::SourceAndOrFileOrFiles)), }; - let mut functions = vrl::stdlib::all(); - functions.append(&mut vector_lib::enrichment::vrl_functions()); - #[cfg(feature = "sources-dnstap")] - functions.append(&mut dnstap_parser::vrl_functions()); - functions.append(&mut vector_vrl_functions::all()); - let state = TypeState { local: Default::default(), external: ExternalEnv::new_with_kind( @@ -228,7 +222,7 @@ impl RemapConfig { config.set_custom(enrichment_tables.clone()); config.set_custom(MeaningList::default()); - let res = compile_vrl(&source, &functions, &state, config) + let res = compile_vrl(&source, &vector_vrl_functions::all(), &state, config) .map_err(|diagnostics| format_vrl_diagnostics(&source, diagnostics)) .map(|result| { ( diff --git a/website/cue/reference/remap/functions/decode_lz4.cue b/website/cue/reference/remap/functions/decode_lz4.cue index 25baf7d14b600..a77cd38f0ffb6 100644 --- a/website/cue/reference/remap/functions/decode_lz4.cue +++ b/website/cue/reference/remap/functions/decode_lz4.cue @@ -43,7 +43,7 @@ remap: functions: decode_lz4: { title: "Decode Lz4 data with prepended size." source: #""" encoded_text = decode_base64!("LAAAAPAdVGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIDEzIGxhenkgZG9ncy4=") - decode_lz4!(encoded_text, use_prepended_size: true) + decode_lz4!(encoded_text, prepended_size: true) """# return: "The quick brown fox jumps over 13 lazy dogs." }, diff --git a/website/cue/reference/remap/functions/decrypt_ip.cue b/website/cue/reference/remap/functions/decrypt_ip.cue index 5f6ac26faa8ee..8bc782c68c7ef 100644 --- a/website/cue/reference/remap/functions/decrypt_ip.cue +++ b/website/cue/reference/remap/functions/decrypt_ip.cue @@ -55,7 +55,7 @@ remap: functions: decrypt_ip: { { title: "Decrypt IPv6 address with AES128" source: #""" - decrypted_ip = decrypt_ip!("d09e:a5ea:585a:2547:dc6d:65ea:d9f1:d09d", "sixteen byte key", "aes128") + decrypted_ip = decrypt_ip!("c0e6:eb35:6887:f554:4c65:8ace:17ca:6c6a", "sixteen byte key", "aes128") decrypted_ip """# return: "2001:db8::1" @@ -63,7 +63,7 @@ remap: functions: decrypt_ip: { { title: "Decrypt IPv4 address with prefix-preserving mode" source: #""" - decrypted_ip = decrypt_ip!("b51c:3c43:4e89:819e:64ce:225f:d6d1:bf01", "thirty-two bytes key for pfx use", "pfx") + decrypted_ip = decrypt_ip!("33.245.248.61", "thirty-two bytes key for pfx use", "pfx") decrypted_ip """# return: "192.168.1.1" @@ -81,10 +81,10 @@ remap: functions: decrypt_ip: { source: #""" original_ip = "192.168.1.100" key = "sixteen byte key" - + encrypted = encrypt_ip!(original_ip, key, "aes128") decrypted = decrypt_ip!(encrypted, key, "aes128") - + decrypted == original_ip """# return: true diff --git a/website/cue/reference/remap/functions/encrypt_ip.cue b/website/cue/reference/remap/functions/encrypt_ip.cue index 83ceadd9926c2..5888f2c22055a 100644 --- a/website/cue/reference/remap/functions/encrypt_ip.cue +++ b/website/cue/reference/remap/functions/encrypt_ip.cue @@ -58,7 +58,7 @@ remap: functions: encrypt_ip: { encrypted_ip = encrypt_ip!("2001:db8::1", "sixteen byte key", "aes128") encrypted_ip """# - return: "d09e:a5ea:585a:2547:dc6d:65ea:d9f1:d09d" + return: "c0e6:eb35:6887:f554:4c65:8ace:17ca:6c6a" }, { title: "Encrypt IPv4 address with prefix-preserving mode" @@ -66,7 +66,7 @@ remap: functions: encrypt_ip: { encrypted_ip = encrypt_ip!("192.168.1.1", "thirty-two bytes key for pfx use", "pfx") encrypted_ip """# - return: "b51c:3c43:4e89:819e:64ce:225f:d6d1:bf01" + return: "33.245.248.61" }, { title: "Encrypt IPv6 address with prefix-preserving mode" diff --git a/website/cue/reference/remap/functions/find.cue b/website/cue/reference/remap/functions/find.cue index 6ed15f0de03b5..5cf6bd3ab1283 100644 --- a/website/cue/reference/remap/functions/find.cue +++ b/website/cue/reference/remap/functions/find.cue @@ -51,7 +51,7 @@ remap: functions: find: { source: #""" find("foobar", "baz") """# - return: -1 + return: null }, { title: "With an offset" diff --git a/website/cue/reference/remap/functions/parse_aws_alb_log.cue b/website/cue/reference/remap/functions/parse_aws_alb_log.cue index 0f357f5baa82f..21f87d19aa227 100644 --- a/website/cue/reference/remap/functions/parse_aws_alb_log.cue +++ b/website/cue/reference/remap/functions/parse_aws_alb_log.cue @@ -98,7 +98,7 @@ remap: functions: parse_aws_alb_log: { ssl_protocol: null target_group_arn: "arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067" trace_id: "Root=1-58337364-23a8c76965a2ef7629b185e3" - traceability_id: null + traceability_id: "TID_12345" domain_name: null chosen_cert_arn: null matched_rule_priority: "0" diff --git a/website/cue/reference/remap/functions/split_path.cue b/website/cue/reference/remap/functions/split_path.cue index f1bc93034f547..981f64bfb6522 100644 --- a/website/cue/reference/remap/functions/split_path.cue +++ b/website/cue/reference/remap/functions/split_path.cue @@ -24,28 +24,28 @@ remap: functions: split_path: { { title: "Split path with trailing slash" source: """ - split_path!("/home/user/") + split_path("/home/user/") """ return: ["/", "home", "user"] }, { title: "Split path from file path" source: """ - split_path!("/home/user") + split_path("/home/user") """ return: ["/", "home", "user"] }, { title: "Split path from root" source: """ - split_path!("/") + split_path("/") """ return: ["/"] }, { title: "Empty path returns empty array" source: """ - split_path!("") + split_path("") """ return: [] }, diff --git a/website/cue/reference/remap/functions/validate_json_schema.cue b/website/cue/reference/remap/functions/validate_json_schema.cue index 4930e35ec904a..314d6fdee8bca 100644 --- a/website/cue/reference/remap/functions/validate_json_schema.cue +++ b/website/cue/reference/remap/functions/validate_json_schema.cue @@ -57,21 +57,23 @@ remap: functions: validate_json_schema: { { title: "Payload contains an invalid email." source: """ - validate_json_schema!(s'{ "productUser": "invalidEmail" }', "resources/json-schema_definition.json", false) + ok, _err = validate_json_schema(s'{ "productUser": "invalidEmail" }', "resources/json-schema_definition.json", false) + ok """ return: false }, { title: "Payload contains a custom format declaration." source: """ - validate_json_schema!(s'{ "productUser": "a-custom-formatted-string" }', "resources/json-schema_definition.json", false) + ok, _err = validate_json_schema(s'{ "productUser": "a-custom-formatted-string" }', "resources/json-schema_definition.json", false) + ok """ return: false }, { title: "Payload contains a custom format declaration, with ignore_unknown_formats set to true." source: """ - validate_json_schema!(s'{ "productUser": "a-custom-formatted-string" }', "resources/json-schema_definition.json", true) + validate_json_schema!(s'{ "productUser": "valid@email.com" }', "resources/json-schema_definition.json", true) """ return: true }, diff --git a/website/cue/reference/remap/functions/xxhash.cue b/website/cue/reference/remap/functions/xxhash.cue index 5e6a4ba712d93..f420bf9f80410 100644 --- a/website/cue/reference/remap/functions/xxhash.cue +++ b/website/cue/reference/remap/functions/xxhash.cue @@ -45,19 +45,19 @@ remap: functions: xxhash: { source: #""" xxhash("foo", "XXH64") """# - return: -3728699739546630719 + return: 3728699739546630719 }, { title: "Calculate a hash using the XXH3-64 algorithm" source: #""" - xxhash("foo", "XXH3-64) + xxhash("foo", "XXH3-64") """# return: -6093828362558603894 }, { title: "Calculate a hash using the XXH3-128 algorithm" source: #""" - xxhash("foo", "XXH3-128) + xxhash("foo", "XXH3-128") """# return: "161745101148472925293886522910304009610" }, From 90cf7d044c60e651ff6cec8c3673686cc36f3765 Mon Sep 17 00:00:00 2001 From: Stephen Brown Date: Tue, 23 Dec 2025 00:15:28 +0000 Subject: [PATCH 163/227] feat(mqtt source): support multiple mqtt source topics (#23670) * enhancement(file source): support multiple mqtt source topics * chore: fix changelog.d file format * Remove mut + mem::take * Add mqtt source to semantic.yml * Improve changelog and rename file * Fix clippy * Fix type in int test * make generate-component-docs * add integration test for many mqtt source topics * Fix imports from merge --------- Co-authored-by: Thomas --- .github/workflows/semantic.yml | 1 + ...ort_many_mqtt_source_topics.enhancement.md | 3 + src/sources/mqtt/config.rs | 10 +-- src/sources/mqtt/integration_tests.rs | 75 ++++++++++++++++++- src/sources/mqtt/source.rs | 26 +++++-- .../components/sources/generated/mqtt.cue | 2 +- 6 files changed, 102 insertions(+), 15 deletions(-) create mode 100644 changelog.d/23670_support_many_mqtt_source_topics.enhancement.md diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml index 1b8ae3c0021c8..aa67c798646bc 100644 --- a/.github/workflows/semantic.yml +++ b/.github/workflows/semantic.yml @@ -159,6 +159,7 @@ jobs: kubernetes_logs source logstash source mongodb_metrics source + mqtt source nats source new source nginx_metrics source diff --git a/changelog.d/23670_support_many_mqtt_source_topics.enhancement.md b/changelog.d/23670_support_many_mqtt_source_topics.enhancement.md new file mode 100644 index 0000000000000..9cc08b4ad6072 --- /dev/null +++ b/changelog.d/23670_support_many_mqtt_source_topics.enhancement.md @@ -0,0 +1,3 @@ +The `mqtt` source config field `topic` can now be a list of mqtt topic strings instead of just a string. If a list is provided, the `mqtt` source client will subscribe to all the topics. + +authors: december1981 diff --git a/src/sources/mqtt/config.rs b/src/sources/mqtt/config.rs index 2e8dddba28639..635751a3a1b0a 100644 --- a/src/sources/mqtt/config.rs +++ b/src/sources/mqtt/config.rs @@ -20,7 +20,7 @@ use crate::{ TlsSnafu, }, config::{SourceConfig, SourceContext, SourceOutput}, - serde::{default_decoding, default_framing_message_based}, + serde::{OneOrMany, default_decoding, default_framing_message_based}, }; /// Configuration for the `mqtt` source. @@ -32,11 +32,11 @@ pub struct MqttSourceConfig { #[serde(flatten)] pub common: MqttCommonConfig, - /// MQTT topic from which messages are to be read. + /// MQTT topic or topics from which messages are to be read. #[configurable(derived)] #[serde(default = "default_topic")] #[derivative(Default(value = "default_topic()"))] - pub topic: String, + pub topic: OneOrMany, #[configurable(derived)] #[serde(default = "default_framing_message_based")] @@ -63,8 +63,8 @@ pub struct MqttSourceConfig { pub topic_key: OptionalValuePath, } -fn default_topic() -> String { - "vector".to_owned() +fn default_topic() -> OneOrMany { + OneOrMany::One("vector".into()) } fn default_topic_key() -> OptionalValuePath { diff --git a/src/sources/mqtt/integration_tests.rs b/src/sources/mqtt/integration_tests.rs index 0bb77b6de1d9c..ed1013f4bef0f 100644 --- a/src/sources/mqtt/integration_tests.rs +++ b/src/sources/mqtt/integration_tests.rs @@ -7,12 +7,13 @@ use futures::StreamExt; use rumqttc::{AsyncClient, MqttOptions, QoS}; use tokio::time::timeout; -use super::MqttSourceConfig; use crate::{ SourceSender, common::mqtt::MqttCommonConfig, config::{SourceConfig, SourceContext, log_schema}, event::Event, + serde::OneOrMany, + sources::mqtt::MqttSourceConfig, test_util::{ components::{SOURCE_TAGS, assert_source_compliance}, random_lines_with_stream, random_string, trace_init, @@ -59,13 +60,13 @@ async fn get_mqtt_client() -> AsyncClient { } #[tokio::test] -async fn mqtt_happy() { +async fn mqtt_one_topic_happy() { trace_init(); let topic = "source-test"; // We always want new client ID. If it were stable, subsequent tests could receive data sent in previous runs. let client_id = format!("sourceTest{}", random_string(6)); let num_events = 10; - let (input, _events) = random_lines_with_stream(100, num_events, None); + let (input, ..) = random_lines_with_stream(100, num_events, None); assert_source_compliance(&SOURCE_TAGS, async { let common = MqttCommonConfig { @@ -77,7 +78,7 @@ async fn mqtt_happy() { let config = MqttSourceConfig { common, - topic: topic.to_owned(), + topic: OneOrMany::One(topic.to_owned()), ..MqttSourceConfig::default() }; @@ -116,3 +117,69 @@ async fn mqtt_happy() { }) .await; } + +#[tokio::test] +async fn mqtt_many_topics_happy() { + trace_init(); + let topic_prefix_1 = "source-prefix-1"; + let topic_prefix_2 = "source-prefix-2"; + // We always want new client ID. If it were stable, subsequent tests could receive data sent in previous runs. + let client_id = format!("sourceTest{}", random_string(6)); + let num_events = 10; + let (input_1, ..) = random_lines_with_stream(100, num_events, None); + let (input_2, ..) = random_lines_with_stream(100, num_events, None); + + assert_source_compliance(&SOURCE_TAGS, async { + let common = MqttCommonConfig { + host: mqtt_broker_address(), + port: mqtt_broker_port(), + client_id: Some(client_id), + ..Default::default() + }; + + let config = MqttSourceConfig { + common, + topic: OneOrMany::Many(vec![ + format!("{topic_prefix_1}/#"), + format!("{topic_prefix_2}/#"), + ]), + ..MqttSourceConfig::default() + }; + + let (tx, rx) = SourceSender::new_test(); + tokio::spawn(async move { + config + .build(SourceContext::new_test(tx, None)) + .await + .unwrap() + .await + .unwrap() + }); + + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = get_mqtt_client().await; + send_test_events(&client, &format!("{topic_prefix_1}/test"), &input_1).await; + send_test_events(&client, &format!("{topic_prefix_2}/test"), &input_2).await; + + let mut expected_messages: HashSet<_> = + input_1.into_iter().chain(input_2.into_iter()).collect(); + + let events: Vec = timeout(Duration::from_secs(2), rx.take(num_events * 2).collect()) + .await + .unwrap(); + + for event in events { + let message = event + .as_log() + .get(log_schema().message_key_target_path().unwrap()) + .unwrap() + .to_string_lossy(); + if !expected_messages.remove(message.as_ref()) { + panic!("Received unexpected message: {message:?}"); + } + } + assert!(expected_messages.is_empty()); + }) + .await; +} diff --git a/src/sources/mqtt/source.rs b/src/sources/mqtt/source.rs index 40026be1b1e8b..703929261882f 100644 --- a/src/sources/mqtt/source.rs +++ b/src/sources/mqtt/source.rs @@ -1,5 +1,5 @@ use itertools::Itertools; -use rumqttc::{Event as MqttEvent, Incoming, Publish, QoS}; +use rumqttc::{Event as MqttEvent, Incoming, Publish, QoS, SubscribeFilter}; use vector_lib::{ config::{LegacyKey, LogNamespace}, internal_event::EventsReceived, @@ -12,6 +12,7 @@ use crate::{ common::mqtt::MqttConnector, event::{BatchNotifier, Event}, internal_events::{EndpointBytesReceived, StreamClosedError}, + serde::OneOrMany, shutdown::ShutdownSignal, sources::{mqtt::MqttSourceConfig, util}, }; @@ -41,10 +42,25 @@ impl MqttSource { pub async fn run(self, mut out: SourceSender, shutdown: ShutdownSignal) -> Result<(), ()> { let (client, mut connection) = self.connector.connect(); - client - .subscribe(&self.config.topic, QoS::AtLeastOnce) - .await - .map_err(|_| ())?; + match &self.config.topic { + OneOrMany::One(topic) => { + client + .subscribe(topic, QoS::AtLeastOnce) + .await + .map_err(|_| ())?; + } + OneOrMany::Many(topics) => { + client + .subscribe_many( + topics + .iter() + .cloned() + .map(|topic| SubscribeFilter::new(topic, QoS::AtLeastOnce)), + ) + .await + .map_err(|_| ())?; + } + } loop { tokio::select! { diff --git a/website/cue/reference/components/sources/generated/mqtt.cue b/website/cue/reference/components/sources/generated/mqtt.cue index 654fe714c8de2..0bcd847fd4717 100644 --- a/website/cue/reference/components/sources/generated/mqtt.cue +++ b/website/cue/reference/components/sources/generated/mqtt.cue @@ -639,7 +639,7 @@ generated: components: sources: mqtt: configuration: { } } topic: { - description: "MQTT topic from which messages are to be read." + description: "MQTT topic or topics from which messages are to be read." required: false type: string: default: "vector" } From 36a935f62bcde10ffb6646f1d99e12e6d9ea7fe1 Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 23 Dec 2025 13:53:07 -0500 Subject: [PATCH 164/227] chore(website): update TypeScript and Node.js dependencies, enable ES modules (#24406) * chore(website): update TypeScript to 5.9.3, ts-node to 10.9.2, and @types/node to 25.0.3 * Update to use modules * Use es2022 instead of commonjs --- website/babel.config.js | 12 +- website/package.json | 7 +- website/postcss.config.js | 15 ++- website/scripts/create-config-examples.js | 11 +- ...tailwind.config.js => tailwind.config.cjs} | 2 +- website/tsconfig.json | 2 +- website/yarn.lock | 118 +++++++++++++----- 7 files changed, 115 insertions(+), 52 deletions(-) rename website/{tailwind.config.js => tailwind.config.cjs} (99%) diff --git a/website/babel.config.js b/website/babel.config.js index c683354b91987..cfe4e614d29c6 100644 --- a/website/babel.config.js +++ b/website/babel.config.js @@ -1,23 +1,27 @@ -module.exports = function (api) { +import presetEnv from '@babel/preset-env'; +import presetReact from '@babel/preset-react'; +import presetTypeScript from '@babel/preset-typescript'; + +export default function (api) { api.cache(true); const presets = [ [ - require('@babel/preset-env'), + presetEnv, { "useBuiltIns": 'entry', "corejs": 3 } ], [ - require("@babel/preset-react"), + presetReact, { "flow": false, "typescript": true } ], [ - require("@babel/preset-typescript"), + presetTypeScript, { "isTSX": true, "allExtensions": true diff --git a/website/package.json b/website/package.json index d0c18a147fae3..81b4c64bd624f 100644 --- a/website/package.json +++ b/website/package.json @@ -1,4 +1,5 @@ { + "type": "module", "private": true, "scripts": { "typesense-index": "ts-node scripts/typesense-index.ts", @@ -25,7 +26,7 @@ "@tailwindcss/typography": "^0.4.1", "@types/dotenv-defaults": "^2.0.1", "@types/lodash.chunk": "^4.2.6", - "@types/node": "^16.0.1", + "@types/node": "^25.0.3", "@types/topojson-specification": "^1.0.1", "alpinejs": "^2.8.2", "autoprefixer": "^10.2.5", @@ -53,8 +54,8 @@ "tailwindcss": "^2.2.4", "tocbot": "^4.12.2", "topojson-client": "^3.1.0", - "ts-node": "^9.1.1", - "typescript": "^4.1.3", + "ts-node": "^10.9.2", + "typescript": "^5.9.3", "typesense": "^1.8.2", "typesense-sync": "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/typesense-sync-v1.1.0.tgz" }, diff --git a/website/postcss.config.js b/website/postcss.config.js index bfa428f918b93..96e73589f6611 100644 --- a/website/postcss.config.js +++ b/website/postcss.config.js @@ -1,8 +1,7 @@ -const postcssImport = require('postcss-import'); -const tailwindCss = require('tailwindcss'); -const autoprefixer = require('autoprefixer')({ - browsers: ['last 2 versions'] -}); +import postcssImport from 'postcss-import'; +import tailwindCss from 'tailwindcss'; +import autoprefixer from 'autoprefixer'; +import purgecssPlugin from '@fullhuman/postcss-purgecss'; // These are classes for things that are applied by JS, and thus missed by Hugo. // See assets/js/*.js for places where this happens. @@ -45,7 +44,7 @@ const safeClasses = { ] }; -const purgecss = require('@fullhuman/postcss-purgecss')({ +const purgecss = purgecssPlugin({ content: ['./hugo_stats.json'], safelist: safeClasses, defaultExtractor: (content) => { @@ -53,9 +52,9 @@ const purgecss = require('@fullhuman/postcss-purgecss')({ const innerMatches = content.match(/[^<>"'`\s.()]*[^<>"'`\s.():]/g) || []; return broadMatches.concat(innerMatches); } -}) +}); -module.exports = { +export default { plugins: [ postcssImport, tailwindCss, diff --git a/website/scripts/create-config-examples.js b/website/scripts/create-config-examples.js index 64a47363464fd..b9096fd4d3b1d 100755 --- a/website/scripts/create-config-examples.js +++ b/website/scripts/create-config-examples.js @@ -1,8 +1,9 @@ -const fs = require('fs'); +import fs from 'fs'; +import chalk from 'chalk'; +import * as TOML from '@iarna/toml'; +import YAML from 'yaml'; + const cueJsonOutput = "data/docs.json"; -const chalk = require('chalk'); -const TOML = require('@iarna/toml'); -const YAML = require('yaml'); // Helper functions const getExampleValue = (param, deepFilter) => { @@ -241,7 +242,7 @@ const makeUseCaseExamples = (component) => { output = example.output; } - useCase = { + const useCase = { title: example.title, description: example.description, configuration: { diff --git a/website/tailwind.config.js b/website/tailwind.config.cjs similarity index 99% rename from website/tailwind.config.js rename to website/tailwind.config.cjs index 8db256c4c47fe..4a81e622178c0 100644 --- a/website/tailwind.config.js +++ b/website/tailwind.config.cjs @@ -1,4 +1,4 @@ -const colors = require('tailwindcss/colors') +const colors = require('tailwindcss/colors'); module.exports = { purge: [], diff --git a/website/tsconfig.json b/website/tsconfig.json index 284dcc5fc6b8b..8f1278afde326 100644 --- a/website/tsconfig.json +++ b/website/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "target": "esnext", - "module": "commonjs", + "module": "es2022", "jsx": "react", "lib": ["dom", "es2015", "es2017"], "moduleResolution": "node", diff --git a/website/yarn.lock b/website/yarn.lock index be69bb103eaba..7c9bad4513585 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -1066,6 +1066,13 @@ resolved "https://registry.npmjs.org/@colors/colors/-/colors-1.6.0.tgz" integrity sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA== +"@cspotcode/source-map-support@^0.8.0": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" + integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw== + dependencies: + "@jridgewell/trace-mapping" "0.3.9" + "@dabh/diagnostics@^2.0.2": version "2.0.3" resolved "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.3.tgz" @@ -1195,20 +1202,28 @@ "@jridgewell/gen-mapping" "^0.3.5" "@jridgewell/trace-mapping" "^0.3.24" -"@jridgewell/resolve-uri@^3.1.0": +"@jridgewell/resolve-uri@^3.0.3", "@jridgewell/resolve-uri@^3.1.0": version "3.1.2" resolved "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz" integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.5.0": + version "1.5.5" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz#6912b00d2c631c0d15ce1a7ab57cd657f2a8f8ba" + integrity sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og== + "@jridgewell/sourcemap-codec@^1.4.14", "@jridgewell/sourcemap-codec@^1.4.15": version "1.5.0" resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz" integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== -"@jridgewell/sourcemap-codec@^1.5.0": - version "1.5.5" - resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz#6912b00d2c631c0d15ce1a7ab57cd657f2a8f8ba" - integrity sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og== +"@jridgewell/trace-mapping@0.3.9": + version "0.3.9" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9" + integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.28": version "0.3.31" @@ -1353,6 +1368,26 @@ lodash.merge "^4.6.2" lodash.uniq "^4.5.0" +"@tsconfig/node10@^1.0.7": + version "1.0.12" + resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.12.tgz#be57ceac1e4692b41be9de6be8c32a106636dba4" + integrity sha512-UCYBaeFvM11aU2y3YPZ//O5Rhj+xKyzy7mvcIoAjASbigy8mHMryP5cK7dgjlz2hWxh1g5pLw084E0a/wlUSFQ== + +"@tsconfig/node12@^1.0.7": + version "1.0.11" + resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d" + integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag== + +"@tsconfig/node14@^1.0.0": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1" + integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow== + +"@tsconfig/node16@^1.0.2": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.4.tgz#0b92dcc0cc1c81f6f306a381f28e31b1a56536e9" + integrity sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA== + "@types/dotenv-defaults@^2.0.1": version "2.0.4" resolved "https://registry.npmjs.org/@types/dotenv-defaults/-/dotenv-defaults-2.0.4.tgz" @@ -1396,11 +1431,18 @@ resolved "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz" integrity sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA== -"@types/node@*", "@types/node@^16.0.1": +"@types/node@*": version "16.18.126" resolved "https://registry.npmjs.org/@types/node/-/node-16.18.126.tgz" integrity sha512-OTcgaiwfGFBKacvfwuHzzn1KLxH/er8mluiy8/uM3sGXHaRe73RrSIj01jow9t4kJEW633Ov+cOexXeiApTyAw== +"@types/node@^25.0.3": + version "25.0.3" + resolved "https://registry.yarnpkg.com/@types/node/-/node-25.0.3.tgz#79b9ac8318f373fbfaaf6e2784893efa9701f269" + integrity sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA== + dependencies: + undici-types "~7.16.0" + "@types/parse-json@^4.0.0": version "4.0.0" resolved "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz" @@ -1437,11 +1479,23 @@ acorn-walk@^7.0.0: resolved "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz" integrity sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA== +acorn-walk@^8.1.1: + version "8.3.4" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.4.tgz#794dd169c3977edf4ba4ea47583587c5866236b7" + integrity sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g== + dependencies: + acorn "^8.11.0" + acorn@^7.0.0: version "7.4.1" resolved "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz" integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== +acorn@^8.11.0, acorn@^8.4.1: + version "8.15.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.15.0.tgz#a360898bc415edaac46c8241f6383975b930b816" + integrity sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== + alpinejs@^2.5, alpinejs@^2.8.2: version "2.8.2" resolved "https://registry.npmjs.org/alpinejs/-/alpinejs-2.8.2.tgz" @@ -1637,11 +1691,6 @@ browserslist@^4.24.0, browserslist@^4.28.0, browserslist@^4.28.1: node-releases "^2.0.27" update-browserslist-db "^1.2.0" -buffer-from@^1.0.0: - version "1.1.2" - resolved "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz" - integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== - bytes@^3.0.0: version "3.1.2" resolved "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz" @@ -3350,14 +3399,6 @@ source-map-js@^1.2.1: resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== -source-map-support@^0.5.17: - version "0.5.21" - resolved "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz" - integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== - dependencies: - buffer-from "^1.0.0" - source-map "^0.6.0" - source-map@0.5.6: version "0.5.6" resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz" @@ -3368,7 +3409,7 @@ source-map@^0.5.7: resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz" integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== -source-map@^0.6.0, source-map@^0.6.1: +source-map@^0.6.1: version "0.6.1" resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== @@ -3544,16 +3585,23 @@ ts-easing@^0.2.0: resolved "https://registry.npmjs.org/ts-easing/-/ts-easing-0.2.0.tgz" integrity sha512-Z86EW+fFFh/IFB1fqQ3/+7Zpf9t2ebOAxNI/V6Wo7r5gqiqtxmgTlQ1qbqQcjLKYeSHPTsEmvlJUDg/EuL0uHQ== -ts-node@^9.1.1: - version "9.1.1" - resolved "https://registry.npmjs.org/ts-node/-/ts-node-9.1.1.tgz" - integrity sha512-hPlt7ZACERQGf03M253ytLY3dHbGNGrAq9qIHWUY9XHYl1z7wYngSr3OQ5xmui8o2AaxsONxIzjafLUiWBo1Fg== - dependencies: +ts-node@^10.9.2: + version "10.9.2" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.2.tgz#70f021c9e185bccdca820e26dc413805c101c71f" + integrity sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ== + dependencies: + "@cspotcode/source-map-support" "^0.8.0" + "@tsconfig/node10" "^1.0.7" + "@tsconfig/node12" "^1.0.7" + "@tsconfig/node14" "^1.0.0" + "@tsconfig/node16" "^1.0.2" + acorn "^8.4.1" + acorn-walk "^8.1.1" arg "^4.1.0" create-require "^1.1.0" diff "^4.0.1" make-error "^1.1.1" - source-map-support "^0.5.17" + v8-compile-cache-lib "^3.0.1" yn "3.1.1" tslib@^2.1.0, tslib@^2.3.0, tslib@^2.6.2: @@ -3561,10 +3609,10 @@ tslib@^2.1.0, tslib@^2.3.0, tslib@^2.6.2: resolved "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz" integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== -typescript@^4.1.3: - version "4.9.5" - resolved "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz" - integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== +typescript@^5.9.3: + version "5.9.3" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.9.3.tgz#5b4f59e15310ab17a216f5d6cf53ee476ede670f" + integrity sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw== "typesense-sync@https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/typesense-sync-v1.1.0.tgz": version "1.1.0" @@ -3600,6 +3648,11 @@ ulid@^2.3.0: resolved "https://registry.npmjs.org/ulid/-/ulid-2.4.0.tgz" integrity sha512-fIRiVTJNcSRmXKPZtGzFQv9WRrZ3M9eoptl/teFJvjOzmpU+/K/JH6HZ8deBfb5vMEpicJcLn7JmvdknlMq7Zg== +undici-types@~7.16.0: + version "7.16.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.16.0.tgz#ffccdff36aea4884cbfce9a750a0580224f58a46" + integrity sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw== + undici@^7.12.0: version "7.16.0" resolved "https://registry.yarnpkg.com/undici/-/undici-7.16.0.tgz#cb2a1e957726d458b536e3f076bf51f066901c1a" @@ -3658,6 +3711,11 @@ util@^0.10.3: dependencies: inherits "2.0.3" +v8-compile-cache-lib@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf" + integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg== + whatwg-encoding@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz#d0f4ef769905d426e1688f3e34381a99b60b76e5" From 3749b70194de509fd0e12534f8affa35c490587b Mon Sep 17 00:00:00 2001 From: Oleksandr Zanichkovskyi Date: Tue, 23 Dec 2025 22:36:03 +0100 Subject: [PATCH 165/227] fix(sources): collect headers for logs in opentelemetry source with use_otlp_decoding set to true (#24307) * fix(opetelemetry source): collect headers for logs, metrics, traces with/without use_otlp_decoding * chore: add changelog * chore: renamed changelog * chore: removed adding headers to metrics and traces * chore: Test coverage * cargo fmt * Add newline to changelog * chore: Updated changelog to say that it is for logs only * chore: fixed clippy warning --------- Co-authored-by: Thomas --- ...telemetry_source_headers_collection.fix.md | 3 + src/sources/opentelemetry/http.rs | 11 +- src/sources/opentelemetry/tests.rs | 122 +++++++++++++----- 3 files changed, 100 insertions(+), 36 deletions(-) create mode 100644 changelog.d/24307_opentelemetry_source_headers_collection.fix.md diff --git a/changelog.d/24307_opentelemetry_source_headers_collection.fix.md b/changelog.d/24307_opentelemetry_source_headers_collection.fix.md new file mode 100644 index 0000000000000..c84d58f0348df --- /dev/null +++ b/changelog.d/24307_opentelemetry_source_headers_collection.fix.md @@ -0,0 +1,3 @@ +Fixed the opentelemetry source to collect HTTP headers for logs with or without `use_otlp_decoding` configuration option. + +authors: ozanichkovsky diff --git a/src/sources/opentelemetry/http.rs b/src/sources/opentelemetry/http.rs index 81f09f8470579..72ca6cef1de43 100644 --- a/src/sources/opentelemetry/http.rs +++ b/src/sources/opentelemetry/http.rs @@ -229,13 +229,12 @@ fn build_warp_log_filter( if let Some(d) = deserializer.as_ref() { parse_with_deserializer(d, decoded_body, log_namespace) } else { - decode_log_body(decoded_body, log_namespace, &events_received).map( - |mut events| { - enrich_events(&mut events, &headers_cfg, &headers, log_namespace); - events - }, - ) + decode_log_body(decoded_body, log_namespace, &events_received) } + .map(|mut events| { + enrich_events(&mut events, &headers_cfg, &headers, log_namespace); + events + }) }) }; diff --git a/src/sources/opentelemetry/tests.rs b/src/sources/opentelemetry/tests.rs index 76324210be13a..f6acd07533f5b 100644 --- a/src/sources/opentelemetry/tests.rs +++ b/src/sources/opentelemetry/tests.rs @@ -1,4 +1,5 @@ use std::{ + net, sync::Arc, time::{SystemTime, UNIX_EPOCH}, }; @@ -19,9 +20,7 @@ use vector_lib::{ ExportMetricsServiceRequest, metrics_service_client::MetricsServiceClient, }, }, - common::v1::{ - AnyValue, InstrumentationScope, KeyValue, any_value, any_value::Value::StringValue, - }, + common::v1::{AnyValue, InstrumentationScope, KeyValue, any_value::Value::StringValue}, logs::v1::{LogRecord, ResourceLogs, ScopeLogs}, metrics::v1::{ AggregationTemporality, ExponentialHistogram, ExponentialHistogramDataPoint, Gauge, @@ -33,7 +32,6 @@ use vector_lib::{ }, }; use vrl::value; -use warp::http::HeaderMap; use crate::{ SourceSender, @@ -1067,36 +1065,39 @@ async fn receive_summary_metric() { .await; } +fn get_source_config_with_headers( + grpc_addr: net::SocketAddr, + http_addr: net::SocketAddr, + use_otlp_decoding: bool, +) -> OpentelemetryConfig { + OpentelemetryConfig { + grpc: GrpcConfig { + address: grpc_addr, + tls: Default::default(), + }, + http: HttpConfig { + address: http_addr, + tls: Default::default(), + keepalive: Default::default(), + headers: vec![ + "User-Agent".to_string(), + "X-*".to_string(), + "AbsentHeader".to_string(), + ], + }, + acknowledgements: Default::default(), + log_namespace: Default::default(), + use_otlp_decoding, + } +} + #[tokio::test] -async fn http_headers() { +async fn http_headers_logs_use_otlp_decoding_false() { assert_source_compliance(&SOURCE_TAGS, async { let (_guard_0, grpc_addr) = next_addr(); let (_guard_1, http_addr) = next_addr(); - let mut headers = HeaderMap::new(); - headers.insert("User-Agent", "test_client".parse().unwrap()); - headers.insert("Upgrade-Insecure-Requests", "false".parse().unwrap()); - headers.insert("X-Test-Header", "true".parse().unwrap()); - - let source = OpentelemetryConfig { - grpc: GrpcConfig { - address: grpc_addr, - tls: Default::default(), - }, - http: HttpConfig { - address: http_addr, - tls: Default::default(), - keepalive: Default::default(), - headers: vec![ - "User-Agent".to_string(), - "X-*".to_string(), - "AbsentHeader".to_string(), - ], - }, - acknowledgements: Default::default(), - log_namespace: Default::default(), - use_otlp_decoding: false, - }; + let source = get_source_config_with_headers(grpc_addr, http_addr, false); let schema_definitions = source .outputs(LogNamespace::Legacy) .remove(0) @@ -1122,7 +1123,7 @@ async fn http_headers() { severity_number: 9, severity_text: "info".into(), body: Some(AnyValue { - value: Some(any_value::Value::StringValue("log body".into())), + value: Some(StringValue("log body".into())), }), attributes: vec![], dropped_attributes_count: 0, @@ -1175,6 +1176,67 @@ async fn http_headers() { .await; } +#[tokio::test] +async fn http_headers_logs_use_otlp_decoding_true() { + assert_source_compliance(&SOURCE_TAGS, async { + let (_guard_0, grpc_addr) = next_addr(); + let (_guard_1, http_addr) = next_addr(); + + let source = get_source_config_with_headers(grpc_addr, http_addr, true); + + let (sender, logs_output, _) = new_source(EventStatus::Delivered, LOGS.to_string()); + let server = source + .build(SourceContext::new_test(sender, None)) + .await + .unwrap(); + tokio::spawn(server); + test_util::wait_for_tcp(http_addr).await; + + let client = reqwest::Client::new(); + let req = ExportLogsServiceRequest { + resource_logs: vec![ResourceLogs { + resource: None, + scope_logs: vec![ScopeLogs { + scope: None, + log_records: vec![LogRecord { + time_unix_nano: 1, + observed_time_unix_nano: 2, + severity_number: 9, + severity_text: "info".into(), + body: Some(AnyValue { + value: Some(StringValue("log body".into())), + }), + attributes: vec![], + dropped_attributes_count: 0, + flags: 4, + // opentelemetry sdk will hex::decode the given trace_id and span_id + trace_id: str_into_hex_bytes("4ac52aadf321c2e531db005df08792f5"), + span_id: str_into_hex_bytes("0b9e4bda2a55530d"), + }], + schema_url: "v1".into(), + }], + schema_url: "v1".into(), + }], + }; + let _res = client + .post(format!("http://{http_addr}/v1/logs")) + .header("Content-Type", "application/x-protobuf") + .header("User-Agent", "Test") + .body(req.encode_to_vec()) + .send() + .await + .expect("Failed to send log to Opentelemetry Collector."); + + let mut output = test_util::collect_ready(logs_output).await; + assert_eq!(output.len(), 1); + let actual_event = output.pop().unwrap(); + let log = actual_event.as_log(); + assert_eq!(log["AbsentHeader"], Value::Null); + assert_eq!(log["User-Agent"], "Test".into()); + }) + .await; +} + pub struct OTelTestEnv { pub grpc_addr: String, pub config: OpentelemetryConfig, From 11aa135eeaeec0b80967725731f483372683ed64 Mon Sep 17 00:00:00 2001 From: Karol Chrapek Date: Tue, 30 Dec 2025 19:10:17 +0100 Subject: [PATCH 166/227] chore(ci): Add aggregate transform to semantic PR scope list (#24422) Added aggregate transform to the allowed scopes list in .github/workflows/semantic.yml --- .github/workflows/semantic.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml index aa67c798646bc..40a237787c53d 100644 --- a/.github/workflows/semantic.yml +++ b/.github/workflows/semantic.yml @@ -178,6 +178,7 @@ jobs: vector source websocket source + aggregate transform aws_ec2_metadata transform dedupe transform exclusive_route transform From 5f8ab319b847c10912ee01a204b40a1105616d6f Mon Sep 17 00:00:00 2001 From: Vitalii Parfonov Date: Mon, 5 Jan 2026 18:24:58 +0200 Subject: [PATCH 167/227] feat(codecs): Add syslog encoder (#23777) * feat: Add syslog codec Original commit from syedriko * chore: Split syslog encoder into separate files This is only a temporary change to make the diffs for future commits easier to follow. * refactor: Syslog facility and severity - Introduce a `Pri` struct with fields for severity and facility as enum values. - `Pri` uses `strum` crate to parse string values into their appropriate enum variant. - Handles the responsibility of encoding the two enum values ordinal values into the `PRIVAL` value for the encoder. - As `Facility` and `Severity` enums better represent their ordinal mapping directly - The `Fixed` + `Field` subtyping with custom deserializer isn't necessary. Parsing a string that represents the enum by name or its ordinal representation is much simpler. - Likewise this removes the need for the get methods as the enum can provide both the `String` or `u8` representation as needed. * refactor: `SyslogSerializer` `SyslogSerializer::encode()` has been simplified. - Only matching `Event::Log` is relevant, an `if let` bind instead of `match` helps remove a redundant level of nesting. - This method only focuses on boilerplate now, delegating the rest to `ConfigDecanter` (_adapt `LogEvent` + encoder config_) and `SyslogMessage` (_encode into syslog message string_). - This removes some complexity during actual encoding logic, which should only be concerned about directly encoding from one representation to another, not complimentary features related to Vector config or it's type system. The new `ConfigDecanter` is where many of the original helper methods that were used by `SyslogSerializer::encode()` now reside. This change better communicates the scope of their usage. - Any interaction with `LogEvent` is now contained within the methods of this new struct. Likewise for the consumption of the encoder configuration (instead of queries to config throughout encoding). - The `decant_config()` method better illustrates an overview of the data we're encoding and where that's being sourced from via the new `SyslogMessage` struct, which splits off the actual encoding responsibility (see next commit). * refactor: `SyslogSerializerConfig` `SyslogSerializerConfig` has been simplified. - Facility / Severity deserializer methods aren't needed, as per their prior refactor with `strum`. - The `app_name` default is set via `decant_config()` when not configured explicitly. - The other two fields calling a `default_nil_value()` method instead use an option value which encodes `None` into the expected `-` value. - Everything else does not need a serde attribute to apply a default, the `Default` trait on the struct is sufficient. - `trim_prefix` was removed as it didn't seem relevant. `tag` was also removed as it's represented by several subfields in RFC 5424 which RFC 3164 can also use. `SyslogMessage::encode()` refactors the original PR encoding logic: - Syslog Header fields focused, the PRI and final message value have already been prepared prior. They are only referenced at the end of `encode()` to combine into the final string output. - While less efficient than `push_str()`, each match variant has a clear structure returned via the array `join(" ")` which minimizes the noise of `SP` from the original PR. Value preparation prior to this is clear and better documented. - `Tag` is a child struct to keep the main logic easy to grok. `StructuredData` is a similar case. * chore: Merge back into `syslog.rs` No changes beyond relocating the code into a single file. * feat: Add StructuredData support to Syslog encoder * chore: Housekeeping - Drop notes referring to original PR differences + StructuredData adaption references. None of it should be relevant going forward. - Revise some other notes. - Drop `add_log_source` method (introduced from the original PR author) in favor of using `StructuredData` support instead. * chore: DRY `into_variant()` via `akin` crate This should be simple and lightweight enough to justify for the DRY benefit? This way the method doesn't need to be duplicated redundantly. That was required because there is no trait for `FromRepr` provided via `strum`. That would require a similar amount of lines for the small duplication here. The `akin` macro duplicates the `impl` block for each value in the `&enums` array. * chore: Minor revisions - `ConfigDecanter::get_message()` replaces the fallback method in favor of `to_string_lossy()` (a dedicated equivalent for converting `Value` type to a String type (_technically it is a CoW str, hence the follow-up with `to_string()`_)). - This also encodes the value better, especially for the default `log_namespace: false` as the message value (when `String`) is not quote wrapped, which matches the behaviour of the `text` encoder output. - Additionally uses the `LogEvent` method `get_message()` directly from `lib/vector-core/src/event /log_event.rs`. This can better retrieve the log message regardless of the `log_namespace` setting. - Encoding of RFC 5424 fields has changed to inline the `version` constant directly, instead of via a redundant variable. If there's ever multiple versions that need to be supported, it could be addressed then. - The RFC 5424 timestamp has a max precision of microseconds, thus this should be rounded and `AutoSi` can be used (_or `Micros` if it should have fixed padding instead of truncating trailing `000`_). * chore: Switch from `DateTime` to `DateTime` - The original PR author appears to have relied on a hard-coded timestamp key here. - `DateTime` would render the timestamp field with the local timezone offset, but other than that `DateTime` would seem more consistent with usage in Vector, especially since any original TZ context is lost by this point? - Notes adjusted accordingly, with added TODO query for each encoding mode to potentially support configurable timezone. * chore: Adopt a separate options config struct + minor revisions - Move encoder config settings under a single `syslog` config field. This better mirrors configuration options for existing encoders like Avro and CSV. - `ConfigDecanter::value_by_key()` appears to accomplish roughly the same as the existing helper method `to_string_lossy()`. Prefer that instead. This also makes the `StructuredData` helper `value_to_string()` redundant too at a glance? - Added some reference for the priority value `PRIVAL`. - `Pri::from_str_variants()` uses the existing defaults for fallback, communicate that more clearly. Contextual note is no longer useful, removed. * chore: Switch from `String` to deserialize `Facility` + `Severity` enums To better communicate the allowed values, these two config fields can change from the `String` type to their appropriate enum type. - This relies on serde to deserialize the config value to the enum which adds a bit more noise to grok. - It does make `Pri::from_str_variants()` redundant, while the `into_variant()` methods are refactored to `deserialize()` with a proper error message emitted to match the what serde would normally emit for failed enum variant deserialization. - A drawback of this change is that these two config fields lost the ability to reference a different value path in the `LogEvent`. That'll be addressed in a future commit. * fix: Support deserializing config value that is a number type In a YAML config a string can optionally be wrapped with quotes, while a number that isn't quote wrapped will be treated as a number type. The current support was only for string numbers, this change now supports flexibility for config using ordinal values in YAML regardless of quote usage. The previous `Self::into_variant(&s)` logic could have been used instead of bringing in `serde-aux`, but the external helper attribute approach seems easier to grok/follow as the intermediary container still seems required for a terse implementation. The match statement uses a reference (_which requires a deref for `from_repr`_) to appease the borrow checker for the later borrow needed by `value` in the error message. * chore: Add doc comments for enum variants to appease Vector requirement This seems redundant given the context? Mostly adds unnecessary noise. Could probably `impl Configurable` or similar to try workaround the requirement. The metadata description could generate the variant list similar to how it's been handled for error message handling? * chore: Use `snafu` for error message Not sure if this is worthwhile, but it adopts error message convention elsewhere I've seen by managing them via Snafu. * merge with master Signed-off-by: Vitalii Parfonov * Continue working with existed code: add tests, make facility and severity dynamic, payload_key optional Signed-off-by: Vitalii Parfonov * add except_fields option Signed-off-by: Vitalii Parfonov * code cleanup, remove duplication, use generic, add tests Signed-off-by: Vitalii Parfonov * add except_fields option Signed-off-by: Vitalii Parfonov * fix clippy warning Signed-off-by: Vitalii Parfonov * Merge with master * Fix compilation error. Apply formater * Fix dependencies downgraded by mistake * make code simple, expected config only via field reference * fix clippy warn * fix clippy warn and formatting * fix build with minimal set of features * add changelog file * fix changelog format * Removing `payload_key' field in favor of the standard `message`: - Removed the obsolete `payload_key` field from `SyslogSerializerOptions` and simplified the payload retrieval logic. - Applied `#[serde(deny_unknown_fields)]` to the `SyslogSerializerOptions` struct, to enforces failing if configuration errors. * Add edge case tests, fix tag truncation logic, fix formatting * add fallback for get_by_meaning() for application field * bump derive_more crate to latest: 2.0.1 * make 'toml' crate optional * Generate component docs * update licenses * fix formatting in greptimedb_logs docs --------- Signed-off-by: Vitalii Parfonov Co-authored-by: Sergey Yedrikov Co-authored-by: polarathene <5098581+polarathene@users.noreply.github.com> --- Cargo.lock | 39 +- LICENSE-3rdparty.csv | 3 + changelog.d/syslog_encoding.feature.md | 5 + lib/codecs/Cargo.toml | 7 +- lib/codecs/src/encoding/format/mod.rs | 4 + lib/codecs/src/encoding/format/syslog.rs | 850 ++++++++++++++++++ lib/codecs/src/encoding/mod.rs | 2 + lib/codecs/src/encoding/serializer.rs | 34 + src/codecs/encoding/config.rs | 2 + src/codecs/encoding/encoder.rs | 2 + src/components/validation/resources/mod.rs | 2 + .../components/sinks/generated/amqp.cue | 52 ++ .../sinks/generated/aws_cloudwatch_logs.cue | 52 ++ .../sinks/generated/aws_kinesis_firehose.cue | 52 ++ .../sinks/generated/aws_kinesis_streams.cue | 52 ++ .../components/sinks/generated/aws_s3.cue | 52 ++ .../components/sinks/generated/aws_sns.cue | 52 ++ .../components/sinks/generated/aws_sqs.cue | 52 ++ .../components/sinks/generated/azure_blob.cue | 52 ++ .../components/sinks/generated/console.cue | 52 ++ .../components/sinks/generated/file.cue | 52 ++ .../generated/gcp_chronicle_unstructured.cue | 52 ++ .../sinks/generated/gcp_cloud_storage.cue | 52 ++ .../components/sinks/generated/gcp_pubsub.cue | 52 ++ .../components/sinks/generated/http.cue | 52 ++ .../components/sinks/generated/humio_logs.cue | 52 ++ .../components/sinks/generated/kafka.cue | 52 ++ .../components/sinks/generated/loki.cue | 52 ++ .../components/sinks/generated/mqtt.cue | 52 ++ .../components/sinks/generated/nats.cue | 52 ++ .../sinks/generated/opentelemetry.cue | 52 ++ .../components/sinks/generated/papertrail.cue | 52 ++ .../components/sinks/generated/pulsar.cue | 52 ++ .../components/sinks/generated/redis.cue | 52 ++ .../components/sinks/generated/socket.cue | 52 ++ .../sinks/generated/splunk_hec_logs.cue | 52 ++ .../components/sinks/generated/webhdfs.cue | 52 ++ .../components/sinks/generated/websocket.cue | 52 ++ .../sinks/generated/websocket_server.cue | 52 ++ 39 files changed, 2403 insertions(+), 3 deletions(-) create mode 100644 changelog.d/syslog_encoding.feature.md create mode 100644 lib/codecs/src/encoding/format/syslog.rs diff --git a/Cargo.lock b/Cargo.lock index bb5478d575f5c..a30618189ed90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2600,6 +2600,7 @@ dependencies = [ "chrono", "csv-core", "derivative", + "derive_more 2.0.1", "dyn-clone", "flate2", "futures 0.3.31", @@ -2615,14 +2616,17 @@ dependencies = [ "rstest", "rust_decimal", "serde", + "serde-aux", "serde_json", "serde_with", "similar-asserts", "smallvec", "snafu 0.8.9", + "strum 0.26.3", "syslog_loose 0.23.0", "tokio", "tokio-util", + "toml 0.9.8", "tracing 0.1.41", "tracing-test", "uuid", @@ -3491,6 +3495,27 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2 1.0.101", + "quote 1.0.40", + "syn 2.0.106", + "unicode-xid 0.2.4", +] + [[package]] name = "difflib" version = "0.4.0" @@ -6948,7 +6973,7 @@ dependencies = [ "bson", "chrono", "derive-where", - "derive_more", + "derive_more 0.99.17", "futures-core", "futures-executor", "futures-io", @@ -10095,6 +10120,18 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-aux" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "207f67b28fe90fb596503a9bf0bf1ea5e831e21307658e177c5dfcdfc3ab8a0a" +dependencies = [ + "chrono", + "serde", + "serde-value", + "serde_json", +] + [[package]] name = "serde-toml-merge" version = "0.3.11" diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 3df3154423daa..b507953d3272b 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -237,6 +237,7 @@ derive_builder,https://github.com/colin-kiegel/rust-derive-builder,MIT OR Apache derive_builder_core,https://github.com/colin-kiegel/rust-derive-builder,MIT OR Apache-2.0,"Colin Kiegel , Pascal Hertleif , Jan-Erik Rediger , Ted Driggs " derive_builder_macro,https://github.com/colin-kiegel/rust-derive-builder,MIT OR Apache-2.0,"Colin Kiegel , Pascal Hertleif , Jan-Erik Rediger , Ted Driggs " derive_more,https://github.com/JelteF/derive_more,MIT,Jelte Fennema +derive_more-impl,https://github.com/JelteF/derive_more,MIT,Jelte Fennema digest,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers dirs-next,https://github.com/xdg-rs/dirs,MIT OR Apache-2.0,The @xdg-rs members dirs-sys-next,https://github.com/xdg-rs/dirs/tree/master/dirs-sys,MIT OR Apache-2.0,The @xdg-rs members @@ -688,6 +689,7 @@ security-framework,https://github.com/kornelski/rust-security-framework,MIT OR A security-framework-sys,https://github.com/kornelski/rust-security-framework,MIT OR Apache-2.0,"Steven Fackler , Kornel " semver,https://github.com/dtolnay/semver,MIT OR Apache-2.0,David Tolnay serde,https://github.com/serde-rs/serde,MIT OR Apache-2.0,"Erick Tryzelaar , David Tolnay " +serde-aux,https://github.com/iddm/serde-aux,MIT,Victor Polevoy serde-toml-merge,https://github.com/jdrouet/serde-toml-merge,MIT,Jeremie Drouet serde-value,https://github.com/arcnmx/serde-value,MIT,arcnmx serde_bytes,https://github.com/serde-rs/bytes,MIT OR Apache-2.0,David Tolnay @@ -834,6 +836,7 @@ unicode-normalization,https://github.com/unicode-rs/unicode-normalization,MIT OR unicode-segmentation,https://github.com/unicode-rs/unicode-segmentation,MIT OR Apache-2.0,"kwantam , Manish Goregaokar " unicode-truncate,https://github.com/Aetf/unicode-truncate,MIT OR Apache-2.0,Aetf unicode-width,https://github.com/unicode-rs/unicode-width,MIT OR Apache-2.0,"kwantam , Manish Goregaokar " +unicode-xid,https://github.com/unicode-rs/unicode-xid,MIT OR Apache-2.0,"erick.tryzelaar , kwantam , Manish Goregaokar " unit-prefix,https://codeberg.org/commons-rs/unit-prefix,MIT,"Fabio Valentini , Benjamin Sago " universal-hash,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers unreachable,https://github.com/reem/rust-unreachable,MIT OR Apache-2.0,Jonathan Reem diff --git a/changelog.d/syslog_encoding.feature.md b/changelog.d/syslog_encoding.feature.md new file mode 100644 index 0000000000000..425884210d7e1 --- /dev/null +++ b/changelog.d/syslog_encoding.feature.md @@ -0,0 +1,5 @@ +Added `syslog` codec for encoding Vector events to Syslog format. +It handles RFC5424 and RFC3164 format, including specific field length limitations, character sanitization +and security escaping. + +authors: syedriko polarathene vparfonov diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index d73b1177c0f34..01d9836f666b3 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -21,6 +21,7 @@ chrono.workspace = true rust_decimal.workspace = true csv-core = { version = "0.1.12", default-features = false } derivative.workspace = true +derive_more = { version = "2.0.1", optional = true, features = ["from", "display"] } dyn-clone = { version = "1", default-features = false } flate2.workspace = true influxdb-line-protocol = { version = "2", default-features = false } @@ -35,8 +36,10 @@ regex.workspace = true serde.workspace = true serde_with = { version = "3.14.0", default-features = false, features = ["std", "macros", "chrono_0_4"] } serde_json.workspace = true +serde-aux = { version = "4.5", optional = true } smallvec = { version = "1", default-features = false, features = ["union"] } snafu.workspace = true +strum = { version = "0.26.3", features = ["derive"], optional = true } syslog_loose = { version = "0.23", default-features = false, optional = true } tokio-util = { version = "0.7", default-features = false, features = ["codec"] } tokio = { workspace = true, features = ["full"] } @@ -47,7 +50,7 @@ vector-config = { path = "../vector-config", default-features = false } vector-config-macros = { path = "../vector-config-macros", default-features = false } vector-core = { path = "../vector-core", default-features = false, features = ["vrl"] } vector-vrl-functions.workspace = true - +toml = { version = "0.9.8", optional = true } [dev-dependencies] futures.workspace = true indoc.workspace = true @@ -62,4 +65,4 @@ vrl.workspace = true [features] arrow = [] opentelemetry = ["dep:opentelemetry-proto"] -syslog = ["dep:syslog_loose"] +syslog = ["dep:syslog_loose", "dep:strum", "dep:derive_more", "dep:serde-aux", "dep:toml"] diff --git a/lib/codecs/src/encoding/format/mod.rs b/lib/codecs/src/encoding/format/mod.rs index ccafb2b969cd7..5026dda422d9e 100644 --- a/lib/codecs/src/encoding/format/mod.rs +++ b/lib/codecs/src/encoding/format/mod.rs @@ -18,6 +18,8 @@ mod native_json; mod otlp; mod protobuf; mod raw_message; +#[cfg(feature = "syslog")] +mod syslog; mod text; use std::fmt::Debug; @@ -38,6 +40,8 @@ pub use native_json::{NativeJsonSerializer, NativeJsonSerializerConfig}; pub use otlp::{OtlpSerializer, OtlpSerializerConfig}; pub use protobuf::{ProtobufSerializer, ProtobufSerializerConfig, ProtobufSerializerOptions}; pub use raw_message::{RawMessageSerializer, RawMessageSerializerConfig}; +#[cfg(feature = "syslog")] +pub use syslog::{SyslogSerializer, SyslogSerializerConfig}; pub use text::{TextSerializer, TextSerializerConfig}; use vector_core::event::Event; diff --git a/lib/codecs/src/encoding/format/syslog.rs b/lib/codecs/src/encoding/format/syslog.rs new file mode 100644 index 0000000000000..601d4d3c04617 --- /dev/null +++ b/lib/codecs/src/encoding/format/syslog.rs @@ -0,0 +1,850 @@ +use bytes::{BufMut, BytesMut}; +use chrono::{DateTime, SecondsFormat, SubsecRound, Utc}; +use lookup::lookup_v2::ConfigTargetPath; +use std::collections::HashMap; +use std::fmt::Write; +use std::str::FromStr; +use strum::{EnumString, FromRepr, VariantNames}; +use tokio_util::codec::Encoder; +use vector_config::configurable_component; +use vector_core::{ + config::DataType, + event::{Event, LogEvent, Value}, + schema, +}; +use vrl::value::ObjectMap; + +/// Config used to build a `SyslogSerializer`. +#[configurable_component] +#[derive(Clone, Debug, Default)] +#[serde(default)] +pub struct SyslogSerializerConfig { + /// Options for the Syslog serializer. + pub syslog: SyslogSerializerOptions, +} + +impl SyslogSerializerConfig { + /// Build the `SyslogSerializer` from this configuration. + pub fn build(&self) -> SyslogSerializer { + SyslogSerializer::new(self) + } + + /// The data type of events that are accepted by `SyslogSerializer`. + pub fn input_type(&self) -> DataType { + DataType::Log + } + + /// The schema required by the serializer. + pub fn schema_requirement(&self) -> schema::Requirement { + schema::Requirement::empty() + } +} + +/// Syslog serializer options. +#[configurable_component] +#[derive(Clone, Debug, Default)] +#[serde(default, deny_unknown_fields)] +pub struct SyslogSerializerOptions { + /// RFC to use for formatting. + rfc: SyslogRFC, + /// Path to a field in the event to use for the facility. Defaults to "user". + facility: Option, + /// Path to a field in the event to use for the severity. Defaults to "informational". + severity: Option, + /// Path to a field in the event to use for the app name. + /// + /// If not provided, the encoder checks for a semantic "service" field. + /// If that is also missing, it defaults to "vector". + app_name: Option, + /// Path to a field in the event to use for the proc ID. + proc_id: Option, + /// Path to a field in the event to use for the msg ID. + msg_id: Option, +} + +/// Serializer that converts an `Event` to bytes using the Syslog format. +#[derive(Debug, Clone)] +pub struct SyslogSerializer { + config: SyslogSerializerConfig, +} + +impl SyslogSerializer { + /// Creates a new `SyslogSerializer`. + pub fn new(conf: &SyslogSerializerConfig) -> Self { + Self { + config: conf.clone(), + } + } +} + +impl Encoder for SyslogSerializer { + type Error = vector_common::Error; + + fn encode(&mut self, event: Event, buffer: &mut BytesMut) -> Result<(), Self::Error> { + if let Event::Log(log_event) = event { + let syslog_message = ConfigDecanter::new(&log_event).decant_config(&self.config.syslog); + let vec = syslog_message + .encode(&self.config.syslog.rfc) + .as_bytes() + .to_vec(); + buffer.put_slice(&vec); + } + + Ok(()) + } +} + +struct ConfigDecanter<'a> { + log: &'a LogEvent, +} + +impl<'a> ConfigDecanter<'a> { + fn new(log: &'a LogEvent) -> Self { + Self { log } + } + + fn decant_config(&self, config: &SyslogSerializerOptions) -> SyslogMessage { + let mut app_name = self + .get_value(&config.app_name) // P1: Configured path + .unwrap_or_else(|| { + // P2: Semantic Fallback: Check for the field designated as "service" in the schema + self.log + .get_by_meaning("service") + .map(|v| v.to_string_lossy().to_string()) + // P3: Hardcoded default + .unwrap_or_else(|| "vector".to_owned()) + }); + let mut proc_id = self.get_value(&config.proc_id); + let mut msg_id = self.get_value(&config.msg_id); + if config.rfc == SyslogRFC::Rfc5424 { + if app_name.len() > 48 { + app_name.truncate(48); + } + if let Some(pid) = &mut proc_id + && pid.len() > 128 + { + pid.truncate(128); + } + if let Some(mid) = &mut msg_id + && mid.len() > 32 + { + mid.truncate(32); + } + } + + SyslogMessage { + pri: Pri { + facility: self.get_facility(config), + severity: self.get_severity(config), + }, + timestamp: self.get_timestamp(), + hostname: self.log.get_host().map(|v| v.to_string_lossy().to_string()), + tag: Tag { + app_name, + proc_id, + msg_id, + }, + structured_data: self.get_structured_data(), + message: self.get_payload(), + } + } + + fn get_value(&self, path: &Option) -> Option { + path.as_ref() + .and_then(|p| self.log.get(p).cloned()) + .map(|v| v.to_string_lossy().to_string()) + } + + fn get_structured_data(&self) -> Option { + self.log + .get("structured_data") + .and_then(|v| v.clone().into_object()) + .map(StructuredData::from) + } + + fn get_timestamp(&self) -> DateTime { + if let Some(Value::Timestamp(timestamp)) = self.log.get_timestamp() { + return *timestamp; + } + Utc::now() + } + + fn get_payload(&self) -> String { + self.log + .get_message() + .map(|v| v.to_string_lossy().to_string()) + .unwrap_or_default() + } + + fn get_facility(&self, config: &SyslogSerializerOptions) -> Facility { + config.facility.as_ref().map_or(Facility::User, |path| { + self.get_syslog_code(path, Facility::from_repr, Facility::User) + }) + } + + fn get_severity(&self, config: &SyslogSerializerOptions) -> Severity { + config + .severity + .as_ref() + .map_or(Severity::Informational, |path| { + self.get_syslog_code(path, Severity::from_repr, Severity::Informational) + }) + } + + fn get_syslog_code( + &self, + path: &ConfigTargetPath, + from_repr_fn: fn(usize) -> Option, + default_value: T, + ) -> T + where + T: Copy + FromStr, + { + if let Some(value) = self.log.get(path).cloned() { + let s = value.to_string_lossy(); + if let Ok(val_from_name) = s.to_ascii_lowercase().parse::() { + return val_from_name; + } + if let Value::Integer(n) = value + && let Some(val_from_num) = from_repr_fn(n as usize) + { + return val_from_num; + } + } + default_value + } +} + +const NIL_VALUE: &str = "-"; +const SYSLOG_V1: &str = "1"; +const RFC3164_TAG_MAX_LENGTH: usize = 32; + +/// The syslog RFC standard to use for formatting. +#[configurable_component] +#[derive(PartialEq, Clone, Debug, Default)] +#[serde(rename_all = "snake_case")] +pub enum SyslogRFC { + /// The legacy RFC3164 syslog format. + Rfc3164, + /// The modern RFC5424 syslog format. + #[default] + Rfc5424, +} + +#[derive(Default, Debug)] +struct SyslogMessage { + pri: Pri, + timestamp: DateTime, + hostname: Option, + tag: Tag, + structured_data: Option, + message: String, +} + +impl SyslogMessage { + fn encode(&self, rfc: &SyslogRFC) -> String { + let pri_header = self.pri.encode(); + + let mut parts = Vec::new(); + + let timestamp_str = match rfc { + SyslogRFC::Rfc3164 => self.timestamp.format("%b %e %H:%M:%S").to_string(), + SyslogRFC::Rfc5424 => self + .timestamp + .round_subsecs(6) + .to_rfc3339_opts(SecondsFormat::Micros, true), + }; + parts.push(timestamp_str); + parts.push(self.hostname.as_deref().unwrap_or(NIL_VALUE).to_string()); + + let tag_str = match rfc { + SyslogRFC::Rfc3164 => self.tag.encode_rfc_3164(), + SyslogRFC::Rfc5424 => self.tag.encode_rfc_5424(), + }; + parts.push(tag_str); + + let mut message_part = self.message.clone(); + if *rfc == SyslogRFC::Rfc3164 { + message_part = Self::sanitize_rfc3164_message(&message_part); + } + + if let Some(sd) = &self.structured_data { + let sd_string = sd.encode(); + if *rfc == SyslogRFC::Rfc3164 { + if !sd.elements.is_empty() { + if !message_part.is_empty() { + message_part = format!("{sd_string} {message_part}"); + } else { + message_part = sd_string; + } + } + } else { + parts.push(sd_string); + } + } else if *rfc == SyslogRFC::Rfc5424 { + parts.push(NIL_VALUE.to_string()); + } + + if !message_part.is_empty() { + parts.push(message_part); + } + + let main_message = parts.join(" "); + + if *rfc == SyslogRFC::Rfc5424 { + format!("{pri_header}{SYSLOG_V1} {main_message}") + } else { + format!("{pri_header}{main_message}") + } + } + + fn sanitize_rfc3164_message(message: &str) -> String { + message + .chars() + .map(|ch| if (' '..='~').contains(&ch) { ch } else { ' ' }) + .collect() + } +} + +#[derive(Default, Debug)] +struct Tag { + app_name: String, + proc_id: Option, + msg_id: Option, +} + +impl Tag { + fn encode_rfc_3164(&self) -> String { + let mut tag = if let Some(proc_id) = self.proc_id.as_deref() { + format!("{}[{}]:", self.app_name, proc_id) + } else { + format!("{}:", self.app_name) + }; + if tag.len() > RFC3164_TAG_MAX_LENGTH { + tag.truncate(RFC3164_TAG_MAX_LENGTH); + if !tag.ends_with(':') { + tag.pop(); + tag.push(':'); + } + } + tag + } + + fn encode_rfc_5424(&self) -> String { + let proc_id_str = self.proc_id.as_deref().unwrap_or(NIL_VALUE); + let msg_id_str = self.msg_id.as_deref().unwrap_or(NIL_VALUE); + format!("{} {} {}", self.app_name, proc_id_str, msg_id_str) + } +} + +type StructuredDataMap = HashMap>; +#[derive(Debug, Default)] +struct StructuredData { + elements: StructuredDataMap, +} + +impl StructuredData { + fn encode(&self) -> String { + if self.elements.is_empty() { + NIL_VALUE.to_string() + } else { + self.elements + .iter() + .fold(String::new(), |mut acc, (sd_id, sd_params)| { + let _ = write!(acc, "[{sd_id}"); + for (key, value) in sd_params { + let esc_val = Self::escape_sd(value); + let _ = write!(acc, " {key}=\"{esc_val}\""); + } + let _ = write!(acc, "]"); + acc + }) + } + } + + fn escape_sd(s: &str) -> String { + s.replace('\\', "\\\\") + .replace('"', "\\\"") + .replace(']', "\\]") + } +} + +impl From for StructuredData { + fn from(fields: ObjectMap) -> Self { + let elements = fields + .into_iter() + .flat_map(|(sd_id, value)| { + let sd_params = value + .into_object()? + .into_iter() + .map(|(k, v)| (k.into(), v.to_string_lossy().to_string())) + .collect(); + Some((sd_id.into(), sd_params)) + }) + .collect(); + Self { elements } + } +} + +#[derive(Default, Debug)] +struct Pri { + facility: Facility, + severity: Severity, +} + +impl Pri { + // The last paragraph describes how to compose the enums into `PRIVAL`: + // https://datatracker.ietf.org/doc/html/rfc5424#section-6.2.1 + fn encode(&self) -> String { + let pri_val = (self.facility as u8 * 8) + self.severity as u8; + format!("<{pri_val}>") + } +} + +/// Syslog facility +#[derive(Default, Debug, EnumString, FromRepr, VariantNames, Copy, Clone, PartialEq, Eq)] +#[strum(serialize_all = "kebab-case")] +#[configurable_component] +pub enum Facility { + /// Kern + Kern = 0, + /// User + #[default] + User = 1, + /// Mail + Mail = 2, + /// Daemon + Daemon = 3, + /// Auth + Auth = 4, + /// Syslog + Syslog = 5, + /// Lpr + Lpr = 6, + /// News + News = 7, + /// Uucp + Uucp = 8, + /// Cron + Cron = 9, + /// Authpriv + Authpriv = 10, + /// Ftp + Ftp = 11, + /// Ntp + Ntp = 12, + /// Security + Security = 13, + /// Console + Console = 14, + /// SolarisCron + SolarisCron = 15, + /// Local0 + Local0 = 16, + /// Local1 + Local1 = 17, + /// Local2 + Local2 = 18, + /// Local3 + Local3 = 19, + /// Local4 + Local4 = 20, + /// Local5 + Local5 = 21, + /// Local6 + Local6 = 22, + /// Local7 + Local7 = 23, +} + +/// Syslog severity +#[derive(Default, Debug, EnumString, FromRepr, VariantNames, Copy, Clone, PartialEq, Eq)] +#[strum(serialize_all = "kebab-case")] +#[configurable_component] +pub enum Severity { + /// Emergency + Emergency = 0, + /// Alert + Alert = 1, + /// Critical + Critical = 2, + /// Error + Error = 3, + /// Warning + Warning = 4, + /// Notice + Notice = 5, + /// Informational + #[default] + Informational = 6, + /// Debug + Debug = 7, +} + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + use chrono::NaiveDate; + use std::sync::Arc; + use vector_core::config::LogNamespace; + use vector_core::event::Event::Metric; + use vector_core::event::{Event, MetricKind, MetricValue, StatisticKind}; + use vrl::path::parse_target_path; + use vrl::prelude::Kind; + use vrl::{btreemap, event_path, value}; + + fn run_encode(config: SyslogSerializerConfig, event: Event) -> String { + let mut serializer = SyslogSerializer::new(&config); + let mut buffer = BytesMut::new(); + serializer.encode(event, &mut buffer).unwrap(); + String::from_utf8(buffer.to_vec()).unwrap() + } + + fn create_simple_log() -> LogEvent { + let mut log = LogEvent::from("original message"); + log.insert( + event_path!("timestamp"), + NaiveDate::from_ymd_opt(2025, 8, 28) + .unwrap() + .and_hms_micro_opt(18, 30, 00, 123456) + .unwrap() + .and_local_timezone(Utc) + .unwrap(), + ); + log.insert(event_path!("host"), "test-host.com"); + log + } + + fn create_test_log() -> LogEvent { + let mut log = create_simple_log(); + log.insert(event_path!("app"), "my-app"); + log.insert(event_path!("pid"), "12345"); + log.insert(event_path!("mid"), "req-abc-789"); + log.insert(event_path!("fac"), "daemon"); //3 + log.insert(event_path!("sev"), Value::from(2u8)); // Critical + log.insert( + event_path!("structured_data"), + value!({"metrics": {"retries": 3}}), + ); + log + } + + #[test] + fn test_rfc5424_defaults() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc5424" + "#, + ) + .unwrap(); + let log = create_simple_log(); + let output = run_encode(config, Event::Log(log)); + let expected = + "<14>1 2025-08-28T18:30:00.123456Z test-host.com vector - - - original message"; + assert_eq!(output, expected); + } + + #[test] + fn test_rfc5424_all_fields() { + let config = toml::from_str::( + r#" + [syslog] + app_name = ".app" + proc_id = ".pid" + msg_id = ".mid" + facility = ".fac" + severity = ".sev" + "#, + ) + .unwrap(); + let log = create_test_log(); + let output = run_encode(config, Event::Log(log)); + let expected = "<26>1 2025-08-28T18:30:00.123456Z test-host.com my-app 12345 req-abc-789 [metrics retries=\"3\"] original message"; + assert_eq!(output, expected); + } + + #[test] + fn test_rfc3164_all_fields() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc3164" + facility = ".fac" + severity = ".sev" + app_name = ".app" + proc_id = ".pid" + "#, + ) + .unwrap(); + let log = create_test_log(); + let output = run_encode(config, Event::Log(log)); + let expected = "<26>Aug 28 18:30:00 test-host.com my-app[12345]: [metrics retries=\"3\"] original message"; + assert_eq!(output, expected); + } + + #[test] + fn test_parsing_logic() { + let mut log = LogEvent::from("test message"); + let config_fac = + toml::from_str::(r#"facility = ".syslog_facility""#).unwrap(); + let config_sev = + toml::from_str::(r#"severity = ".syslog_severity""#).unwrap(); + //check lowercase and digit + log.insert(event_path!("syslog_facility"), "daemon"); + log.insert(event_path!("syslog_severity"), "critical"); + let decanter = ConfigDecanter::new(&log); + let facility = decanter.get_facility(&config_fac); + let severity = decanter.get_severity(&config_sev); + assert_eq!(facility, Facility::Daemon); + assert_eq!(severity, Severity::Critical); + + //check uppercase + log.insert(event_path!("syslog_facility"), "DAEMON"); + log.insert(event_path!("syslog_severity"), "CRITICAL"); + let decanter = ConfigDecanter::new(&log); + let facility = decanter.get_facility(&config_fac); + let severity = decanter.get_severity(&config_sev); + assert_eq!(facility, Facility::Daemon); + assert_eq!(severity, Severity::Critical); + + //check digit + log.insert(event_path!("syslog_facility"), Value::from(3u8)); + log.insert(event_path!("syslog_severity"), Value::from(2u8)); + let decanter = ConfigDecanter::new(&log); + let facility = decanter.get_facility(&config_fac); + let severity = decanter.get_severity(&config_sev); + assert_eq!(facility, Facility::Daemon); + assert_eq!(severity, Severity::Critical); + + //check defaults with empty config + let empty_config = + toml::from_str::(r#"facility = ".missing_field""#).unwrap(); + let default_facility = decanter.get_facility(&empty_config); + let default_severity = decanter.get_severity(&empty_config); + assert_eq!(default_facility, Facility::User); + assert_eq!(default_severity, Severity::Informational); + } + + #[test] + fn test_rfc3164_sanitization() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc3164" + "#, + ) + .unwrap(); + + let mut log = create_simple_log(); + log.insert( + event_path!("message"), + "A\nB\tC, Привіт D, E\u{0007}F", //newline, tab, unicode + ); + + let output = run_encode(config, Event::Log(log)); + let expected_message = "A B C, D, E F"; + assert!(output.ends_with(expected_message)); + } + + #[test] + fn test_rfc5424_field_truncation() { + let long_string = "vector".repeat(50); + + let mut log = create_simple_log(); + log.insert(event_path!("long_app_name"), long_string.clone()); + log.insert(event_path!("long_proc_id"), long_string.clone()); + log.insert(event_path!("long_msg_id"), long_string.clone()); + + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc5424" + app_name = ".long_app_name" + proc_id = ".long_proc_id" + msg_id = ".long_msg_id" + "#, + ) + .unwrap(); + + let decanter = ConfigDecanter::new(&log); + let message = decanter.decant_config(&config.syslog); + + assert_eq!(message.tag.app_name.len(), 48); + assert_eq!(message.tag.proc_id.unwrap().len(), 128); + assert_eq!(message.tag.msg_id.unwrap().len(), 32); + } + + #[test] + fn test_rfc3164_tag_truncation() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc3164" + facility = "user" + severity = "notice" + app_name = ".app_name" + proc_id = ".proc_id" + "#, + ) + .unwrap(); + + let mut log = create_simple_log(); + log.insert( + event_path!("app_name"), + "this-is-a-very-very-long-application-name", + ); + log.insert(event_path!("proc_id"), "1234567890"); + + let output = run_encode(config, Event::Log(log)); + let expected_tag = "this-is-a-very-very-long-applic:"; + assert!(output.contains(expected_tag)); + } + + #[test] + fn test_rfc5424_missing_fields() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc5424" + app_name = ".app" # configured path, but not in log + proc_id = ".pid" # configured path, but not in log + msg_id = ".mid" # configured path, but not in log + "#, + ) + .unwrap(); + + let log = create_simple_log(); + let output = run_encode(config, Event::Log(log)); + + let expected = + "<14>1 2025-08-28T18:30:00.123456Z test-host.com vector - - - original message"; + assert_eq!(output, expected); + } + + #[test] + fn test_invalid_parsing_fallback() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc5424" + facility = ".fac" + severity = ".sev" + "#, + ) + .unwrap(); + + let mut log = create_simple_log(); + + log.insert(event_path!("fac"), ""); + log.insert(event_path!("sev"), "invalid_severity_name"); + + let output = run_encode(config, Event::Log(log)); + + let expected_pri = "<14>"; + assert!(output.starts_with(expected_pri)); + + let expected_suffix = "vector - - - original message"; + assert!(output.ends_with(expected_suffix)); + } + + #[test] + fn test_rfc5424_empty_message_and_sd() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc5424" + app_name = ".app" + proc_id = ".pid" + msg_id = ".mid" + "#, + ) + .unwrap(); + + let mut log = create_simple_log(); + log.insert(event_path!("message"), ""); + log.insert(event_path!("structured_data"), value!({})); + + let output = run_encode(config, Event::Log(log)); + let expected = "<14>1 2025-08-28T18:30:00.123456Z test-host.com vector - - -"; + assert_eq!(output, expected); + } + + #[test] + fn test_non_log_event_filtering() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc5424" + "#, + ) + .unwrap(); + + let metric_event = Metric(vector_core::event::Metric::new( + "metric1", + MetricKind::Incremental, + MetricValue::Distribution { + samples: vector_core::samples![10.0 => 1], + statistic: StatisticKind::Histogram, + }, + )); + + let mut serializer = SyslogSerializer::new(&config); + let mut buffer = BytesMut::new(); + + let result = serializer.encode(metric_event, &mut buffer); + + assert!(result.is_ok()); + assert!(buffer.is_empty()); + } + + #[test] + fn test_minimal_event() { + let config = toml::from_str::( + r#" + [syslog] + "#, + ) + .unwrap(); + let log = LogEvent::from(""); + + let output = run_encode(config, Event::Log(log)); + let expected_suffix = "vector - - -"; + assert!(output.starts_with("<14>1")); + assert!(output.ends_with(expected_suffix)); + } + + #[test] + fn test_app_name_meaning_fallback() { + let config = toml::from_str::( + r#" + [syslog] + rfc = "rfc5424" + severity = ".sev" + app_name = ".nonexistent" + "#, + ) + .unwrap(); + + let mut log = LogEvent::default(); + log.insert("syslog.service", "meaning-app"); + + let schema = schema::Definition::new_with_default_metadata( + Kind::object(btreemap! { + "syslog" => Kind::object(btreemap! { + "service" => Kind::bytes(), + }) + }), + [LogNamespace::Vector], + ); + let schema = schema.with_meaning(parse_target_path("syslog.service").unwrap(), "service"); + let mut event = Event::from(log); + event + .metadata_mut() + .set_schema_definition(&Arc::new(schema)); + + let output = run_encode(config, event); + assert!(output.contains("meaning-app - -")); + } +} diff --git a/lib/codecs/src/encoding/mod.rs b/lib/codecs/src/encoding/mod.rs index c365bc45da4fc..7d611790cb613 100644 --- a/lib/codecs/src/encoding/mod.rs +++ b/lib/codecs/src/encoding/mod.rs @@ -21,6 +21,8 @@ pub use format::{ }; #[cfg(feature = "opentelemetry")] pub use format::{OtlpSerializer, OtlpSerializerConfig}; +#[cfg(feature = "syslog")] +pub use format::{SyslogSerializer, SyslogSerializerConfig}; pub use framing::{ BoxedFramer, BoxedFramingError, BytesEncoder, BytesEncoderConfig, CharacterDelimitedEncoder, CharacterDelimitedEncoderConfig, CharacterDelimitedEncoderOptions, Framer, FramingConfig, diff --git a/lib/codecs/src/encoding/serializer.rs b/lib/codecs/src/encoding/serializer.rs index 899e03d60e4ec..8422587bddf28 100644 --- a/lib/codecs/src/encoding/serializer.rs +++ b/lib/codecs/src/encoding/serializer.rs @@ -8,6 +8,8 @@ use vector_core::{config::DataType, event::Event, schema}; use super::format::{ArrowStreamSerializer, ArrowStreamSerializerConfig}; #[cfg(feature = "opentelemetry")] use super::format::{OtlpSerializer, OtlpSerializerConfig}; +#[cfg(feature = "syslog")] +use super::format::{SyslogSerializer, SyslogSerializerConfig}; use super::{ chunking::Chunker, format::{ @@ -128,6 +130,11 @@ pub enum SerializerConfig { /// transform) and removing the message field while doing additional parsing on it, as this /// could lead to the encoding emitting empty strings for the given event. Text(TextSerializerConfig), + + /// Syslog encoding + /// RFC 3164 and 5424 are supported + #[cfg(feature = "syslog")] + Syslog(SyslogSerializerConfig), } impl Default for SerializerConfig { @@ -281,6 +288,8 @@ impl SerializerConfig { Ok(Serializer::RawMessage(RawMessageSerializerConfig.build())) } SerializerConfig::Text(config) => Ok(Serializer::Text(config.build())), + #[cfg(feature = "syslog")] + SerializerConfig::Syslog(config) => Ok(Serializer::Syslog(config.build())), } } @@ -313,6 +322,8 @@ impl SerializerConfig { | SerializerConfig::NativeJson | SerializerConfig::RawMessage | SerializerConfig::Text(_) => FramingConfig::NewlineDelimited, + #[cfg(feature = "syslog")] + SerializerConfig::Syslog(_) => FramingConfig::NewlineDelimited, SerializerConfig::Gelf(_) => { FramingConfig::CharacterDelimited(CharacterDelimitedEncoderConfig::new(0)) } @@ -337,6 +348,8 @@ impl SerializerConfig { SerializerConfig::Protobuf(config) => config.input_type(), SerializerConfig::RawMessage => RawMessageSerializerConfig.input_type(), SerializerConfig::Text(config) => config.input_type(), + #[cfg(feature = "syslog")] + SerializerConfig::Syslog(config) => config.input_type(), } } @@ -358,6 +371,8 @@ impl SerializerConfig { SerializerConfig::Protobuf(config) => config.schema_requirement(), SerializerConfig::RawMessage => RawMessageSerializerConfig.schema_requirement(), SerializerConfig::Text(config) => config.schema_requirement(), + #[cfg(feature = "syslog")] + SerializerConfig::Syslog(config) => config.schema_requirement(), } } } @@ -390,6 +405,9 @@ pub enum Serializer { RawMessage(RawMessageSerializer), /// Uses a `TextSerializer` for serialization. Text(TextSerializer), + /// Uses a `SyslogSerializer` for serialization. + #[cfg(feature = "syslog")] + Syslog(SyslogSerializer), } impl Serializer { @@ -405,6 +423,8 @@ impl Serializer { | Serializer::Native(_) | Serializer::Protobuf(_) | Serializer::RawMessage(_) => false, + #[cfg(feature = "syslog")] + Serializer::Syslog(_) => false, #[cfg(feature = "opentelemetry")] Serializer::Otlp(_) => false, } @@ -431,6 +451,10 @@ impl Serializer { | Serializer::RawMessage(_) => { panic!("Serializer does not support JSON") } + #[cfg(feature = "syslog")] + Serializer::Syslog(_) => { + panic!("Serializer does not support JSON") + } #[cfg(feature = "opentelemetry")] Serializer::Otlp(_) => { panic!("Serializer does not support JSON") @@ -458,6 +482,8 @@ impl Serializer { | Serializer::Protobuf(_) => true, #[cfg(feature = "opentelemetry")] Serializer::Otlp(_) => true, + #[cfg(feature = "syslog")] + Serializer::Syslog(_) => false, Serializer::Cef(_) | Serializer::Csv(_) | Serializer::Logfmt(_) @@ -541,6 +567,12 @@ impl From for Serializer { Self::Text(serializer) } } +#[cfg(feature = "syslog")] +impl From for Serializer { + fn from(serializer: SyslogSerializer) -> Self { + Self::Syslog(serializer) + } +} impl tokio_util::codec::Encoder for Serializer { type Error = vector_common::Error; @@ -560,6 +592,8 @@ impl tokio_util::codec::Encoder for Serializer { Serializer::Protobuf(serializer) => serializer.encode(event, buffer), Serializer::RawMessage(serializer) => serializer.encode(event, buffer), Serializer::Text(serializer) => serializer.encode(event, buffer), + #[cfg(feature = "syslog")] + Serializer::Syslog(serializer) => serializer.encode(event, buffer), } } } diff --git a/src/codecs/encoding/config.rs b/src/codecs/encoding/config.rs index 255db45b538ed..47bec858ffb08 100644 --- a/src/codecs/encoding/config.rs +++ b/src/codecs/encoding/config.rs @@ -132,6 +132,8 @@ impl EncodingConfigWithFraming { | Serializer::RawMessage(_) | Serializer::Text(_), ) => NewlineDelimitedEncoder::default().into(), + #[cfg(feature = "codecs-syslog")] + (None, Serializer::Syslog(_)) => NewlineDelimitedEncoder::default().into(), #[cfg(feature = "codecs-opentelemetry")] (None, Serializer::Otlp(_)) => BytesEncoder.into(), }; diff --git a/src/codecs/encoding/encoder.rs b/src/codecs/encoding/encoder.rs index 333c29b4840cf..a36b98a6496cb 100644 --- a/src/codecs/encoding/encoder.rs +++ b/src/codecs/encoding/encoder.rs @@ -198,6 +198,8 @@ impl Encoder { | Serializer::Text(_), _, ) => "text/plain", + #[cfg(feature = "codecs-syslog")] + (Serializer::Syslog(_), _) => "text/plain", #[cfg(feature = "codecs-opentelemetry")] (Serializer::Otlp(_), _) => "application/x-protobuf", } diff --git a/src/components/validation/resources/mod.rs b/src/components/validation/resources/mod.rs index 67c22910e07b0..85f72f2aaf35f 100644 --- a/src/components/validation/resources/mod.rs +++ b/src/components/validation/resources/mod.rs @@ -239,6 +239,8 @@ fn serializer_config_to_deserializer( SerializerConfig::RawMessage | SerializerConfig::Text(_) => DeserializerConfig::Bytes, #[cfg(feature = "codecs-opentelemetry")] SerializerConfig::Otlp => todo!(), + #[cfg(feature = "codecs-syslog")] + SerializerConfig::Syslog(_) => todo!(), }; deserializer_config.build() diff --git a/website/cue/reference/components/sinks/generated/amqp.cue b/website/cue/reference/components/sinks/generated/amqp.cue index 66fb1312c5695..944cd39fe9d4a 100644 --- a/website/cue/reference/components/sinks/generated/amqp.cue +++ b/website/cue/reference/components/sinks/generated/amqp.cue @@ -227,6 +227,10 @@ generated: components: sinks: amqp: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -412,6 +416,54 @@ generated: components: sinks: amqp: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/aws_cloudwatch_logs.cue b/website/cue/reference/components/sinks/generated/aws_cloudwatch_logs.cue index 12686c9b27b65..9a76aa1a730aa 100644 --- a/website/cue/reference/components/sinks/generated/aws_cloudwatch_logs.cue +++ b/website/cue/reference/components/sinks/generated/aws_cloudwatch_logs.cue @@ -423,6 +423,10 @@ generated: components: sinks: aws_cloudwatch_logs: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -608,6 +612,54 @@ generated: components: sinks: aws_cloudwatch_logs: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/aws_kinesis_firehose.cue b/website/cue/reference/components/sinks/generated/aws_kinesis_firehose.cue index da311f458462e..f3f0886c0098d 100644 --- a/website/cue/reference/components/sinks/generated/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sinks/generated/aws_kinesis_firehose.cue @@ -402,6 +402,10 @@ generated: components: sinks: aws_kinesis_firehose: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -587,6 +591,54 @@ generated: components: sinks: aws_kinesis_firehose: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/aws_kinesis_streams.cue b/website/cue/reference/components/sinks/generated/aws_kinesis_streams.cue index 4a800fa6e35da..df58e6ab7fa17 100644 --- a/website/cue/reference/components/sinks/generated/aws_kinesis_streams.cue +++ b/website/cue/reference/components/sinks/generated/aws_kinesis_streams.cue @@ -402,6 +402,10 @@ generated: components: sinks: aws_kinesis_streams: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -587,6 +591,54 @@ generated: components: sinks: aws_kinesis_streams: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/aws_s3.cue b/website/cue/reference/components/sinks/generated/aws_s3.cue index f17b2abf1e74f..6c6dffb4fb3cb 100644 --- a/website/cue/reference/components/sinks/generated/aws_s3.cue +++ b/website/cue/reference/components/sinks/generated/aws_s3.cue @@ -511,6 +511,10 @@ generated: components: sinks: aws_s3: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -696,6 +700,54 @@ generated: components: sinks: aws_s3: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/aws_sns.cue b/website/cue/reference/components/sinks/generated/aws_sns.cue index 1d2413066b071..6fdf00c84f64a 100644 --- a/website/cue/reference/components/sinks/generated/aws_sns.cue +++ b/website/cue/reference/components/sinks/generated/aws_sns.cue @@ -333,6 +333,10 @@ generated: components: sinks: aws_sns: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -518,6 +522,54 @@ generated: components: sinks: aws_sns: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/aws_sqs.cue b/website/cue/reference/components/sinks/generated/aws_sqs.cue index 912e1f2c43ea6..3e346f99a5fb2 100644 --- a/website/cue/reference/components/sinks/generated/aws_sqs.cue +++ b/website/cue/reference/components/sinks/generated/aws_sqs.cue @@ -333,6 +333,10 @@ generated: components: sinks: aws_sqs: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -518,6 +522,54 @@ generated: components: sinks: aws_sqs: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/azure_blob.cue b/website/cue/reference/components/sinks/generated/azure_blob.cue index 69bdd368f1338..5ea0c1dd20221 100644 --- a/website/cue/reference/components/sinks/generated/azure_blob.cue +++ b/website/cue/reference/components/sinks/generated/azure_blob.cue @@ -357,6 +357,10 @@ generated: components: sinks: azure_blob: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -542,6 +546,54 @@ generated: components: sinks: azure_blob: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/console.cue b/website/cue/reference/components/sinks/generated/console.cue index 16545964f6950..7b525bd20734f 100644 --- a/website/cue/reference/components/sinks/generated/console.cue +++ b/website/cue/reference/components/sinks/generated/console.cue @@ -211,6 +211,10 @@ generated: components: sinks: console: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -396,6 +400,54 @@ generated: components: sinks: console: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/file.cue b/website/cue/reference/components/sinks/generated/file.cue index 3135f4b70db5f..112bb8b4d90aa 100644 --- a/website/cue/reference/components/sinks/generated/file.cue +++ b/website/cue/reference/components/sinks/generated/file.cue @@ -231,6 +231,10 @@ generated: components: sinks: file: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -416,6 +420,54 @@ generated: components: sinks: file: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/gcp_chronicle_unstructured.cue b/website/cue/reference/components/sinks/generated/gcp_chronicle_unstructured.cue index 0a027dfa45a74..a7d18ff237b98 100644 --- a/website/cue/reference/components/sinks/generated/gcp_chronicle_unstructured.cue +++ b/website/cue/reference/components/sinks/generated/gcp_chronicle_unstructured.cue @@ -299,6 +299,10 @@ generated: components: sinks: gcp_chronicle_unstructured: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -484,6 +488,54 @@ generated: components: sinks: gcp_chronicle_unstructured: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/gcp_cloud_storage.cue b/website/cue/reference/components/sinks/generated/gcp_cloud_storage.cue index 22f565c34665d..17681f2adb96c 100644 --- a/website/cue/reference/components/sinks/generated/gcp_cloud_storage.cue +++ b/website/cue/reference/components/sinks/generated/gcp_cloud_storage.cue @@ -372,6 +372,10 @@ generated: components: sinks: gcp_cloud_storage: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -557,6 +561,54 @@ generated: components: sinks: gcp_cloud_storage: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/gcp_pubsub.cue b/website/cue/reference/components/sinks/generated/gcp_pubsub.cue index 385bf9dd13793..f13b418f70382 100644 --- a/website/cue/reference/components/sinks/generated/gcp_pubsub.cue +++ b/website/cue/reference/components/sinks/generated/gcp_pubsub.cue @@ -278,6 +278,10 @@ generated: components: sinks: gcp_pubsub: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -463,6 +467,54 @@ generated: components: sinks: gcp_pubsub: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/http.cue b/website/cue/reference/components/sinks/generated/http.cue index cc4da5bf49425..61fc5eab98ff2 100644 --- a/website/cue/reference/components/sinks/generated/http.cue +++ b/website/cue/reference/components/sinks/generated/http.cue @@ -461,6 +461,10 @@ generated: components: sinks: http: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -646,6 +650,54 @@ generated: components: sinks: http: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/humio_logs.cue b/website/cue/reference/components/sinks/generated/humio_logs.cue index b9e46513d8b9c..b1380c560a221 100644 --- a/website/cue/reference/components/sinks/generated/humio_logs.cue +++ b/website/cue/reference/components/sinks/generated/humio_logs.cue @@ -277,6 +277,10 @@ generated: components: sinks: humio_logs: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -462,6 +466,54 @@ generated: components: sinks: humio_logs: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/kafka.cue b/website/cue/reference/components/sinks/generated/kafka.cue index 9b3ef74b7dd76..e8b75370974bd 100644 --- a/website/cue/reference/components/sinks/generated/kafka.cue +++ b/website/cue/reference/components/sinks/generated/kafka.cue @@ -266,6 +266,10 @@ generated: components: sinks: kafka: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -451,6 +455,54 @@ generated: components: sinks: kafka: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/loki.cue b/website/cue/reference/components/sinks/generated/loki.cue index 933394342d1bd..5d52a07c08355 100644 --- a/website/cue/reference/components/sinks/generated/loki.cue +++ b/website/cue/reference/components/sinks/generated/loki.cue @@ -463,6 +463,10 @@ generated: components: sinks: loki: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -648,6 +652,54 @@ generated: components: sinks: loki: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/mqtt.cue b/website/cue/reference/components/sinks/generated/mqtt.cue index 980996cff164d..20f9ad4fa00f5 100644 --- a/website/cue/reference/components/sinks/generated/mqtt.cue +++ b/website/cue/reference/components/sinks/generated/mqtt.cue @@ -221,6 +221,10 @@ generated: components: sinks: mqtt: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -406,6 +410,54 @@ generated: components: sinks: mqtt: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/nats.cue b/website/cue/reference/components/sinks/generated/nats.cue index 0979492903e9c..0a53e8a2dfdf8 100644 --- a/website/cue/reference/components/sinks/generated/nats.cue +++ b/website/cue/reference/components/sinks/generated/nats.cue @@ -311,6 +311,10 @@ generated: components: sinks: nats: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -496,6 +500,54 @@ generated: components: sinks: nats: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/opentelemetry.cue b/website/cue/reference/components/sinks/generated/opentelemetry.cue index 897288dbdfe72..f48dfd18a61a4 100644 --- a/website/cue/reference/components/sinks/generated/opentelemetry.cue +++ b/website/cue/reference/components/sinks/generated/opentelemetry.cue @@ -464,6 +464,10 @@ generated: components: sinks: opentelemetry: configuration: protocol: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -649,6 +653,54 @@ generated: components: sinks: opentelemetry: configuration: protocol: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/papertrail.cue b/website/cue/reference/components/sinks/generated/papertrail.cue index b69042f48b2ff..42bbce7c9fb99 100644 --- a/website/cue/reference/components/sinks/generated/papertrail.cue +++ b/website/cue/reference/components/sinks/generated/papertrail.cue @@ -211,6 +211,10 @@ generated: components: sinks: papertrail: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -396,6 +400,54 @@ generated: components: sinks: papertrail: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/pulsar.cue b/website/cue/reference/components/sinks/generated/pulsar.cue index cc2cb6c90cb3a..7bae97a83c26b 100644 --- a/website/cue/reference/components/sinks/generated/pulsar.cue +++ b/website/cue/reference/components/sinks/generated/pulsar.cue @@ -345,6 +345,10 @@ generated: components: sinks: pulsar: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -530,6 +534,54 @@ generated: components: sinks: pulsar: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/redis.cue b/website/cue/reference/components/sinks/generated/redis.cue index fdd5686420da8..1dccc4340db69 100644 --- a/website/cue/reference/components/sinks/generated/redis.cue +++ b/website/cue/reference/components/sinks/generated/redis.cue @@ -270,6 +270,10 @@ generated: components: sinks: redis: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -455,6 +459,54 @@ generated: components: sinks: redis: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/socket.cue b/website/cue/reference/components/sinks/generated/socket.cue index 17d6e8cf7d4d0..4c88763192ae2 100644 --- a/website/cue/reference/components/sinks/generated/socket.cue +++ b/website/cue/reference/components/sinks/generated/socket.cue @@ -223,6 +223,10 @@ generated: components: sinks: socket: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -408,6 +412,54 @@ generated: components: sinks: socket: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/splunk_hec_logs.cue b/website/cue/reference/components/sinks/generated/splunk_hec_logs.cue index ce87f34e9ff83..86902622c8a40 100644 --- a/website/cue/reference/components/sinks/generated/splunk_hec_logs.cue +++ b/website/cue/reference/components/sinks/generated/splunk_hec_logs.cue @@ -327,6 +327,10 @@ generated: components: sinks: splunk_hec_logs: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -512,6 +516,54 @@ generated: components: sinks: splunk_hec_logs: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/webhdfs.cue b/website/cue/reference/components/sinks/generated/webhdfs.cue index 236bd9f491f33..12779ca5ff6f6 100644 --- a/website/cue/reference/components/sinks/generated/webhdfs.cue +++ b/website/cue/reference/components/sinks/generated/webhdfs.cue @@ -277,6 +277,10 @@ generated: components: sinks: webhdfs: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -462,6 +466,54 @@ generated: components: sinks: webhdfs: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/websocket.cue b/website/cue/reference/components/sinks/generated/websocket.cue index 146bba2dada78..7a550071e25ed 100644 --- a/website/cue/reference/components/sinks/generated/websocket.cue +++ b/website/cue/reference/components/sinks/generated/websocket.cue @@ -390,6 +390,10 @@ generated: components: sinks: websocket: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -575,6 +579,54 @@ generated: components: sinks: websocket: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false diff --git a/website/cue/reference/components/sinks/generated/websocket_server.cue b/website/cue/reference/components/sinks/generated/websocket_server.cue index f822ab29b8626..28e5823c8c7e5 100644 --- a/website/cue/reference/components/sinks/generated/websocket_server.cue +++ b/website/cue/reference/components/sinks/generated/websocket_server.cue @@ -267,6 +267,10 @@ generated: components: sinks: websocket_server: configuration: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + syslog: """ + Syslog encoding + RFC 3164 and 5424 are supported + """ text: """ Plain text encoding. @@ -452,6 +456,54 @@ generated: components: sinks: websocket_server: configuration: { } } } + syslog: { + description: "Options for the Syslog serializer." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: { + app_name: { + description: """ + Path to a field in the event to use for the app name. + + If not provided, the encoder checks for a semantic "service" field. + If that is also missing, it defaults to "vector". + """ + required: false + type: string: {} + } + facility: { + description: "Path to a field in the event to use for the facility. Defaults to \"user\"." + required: false + type: string: {} + } + msg_id: { + description: "Path to a field in the event to use for the msg ID." + required: false + type: string: {} + } + proc_id: { + description: "Path to a field in the event to use for the proc ID." + required: false + type: string: {} + } + rfc: { + description: "RFC to use for formatting." + required: false + type: string: { + default: "rfc5424" + enum: { + rfc3164: "The legacy RFC3164 syslog format." + rfc5424: "The modern RFC5424 syslog format." + } + } + } + severity: { + description: "Path to a field in the event to use for the severity. Defaults to \"informational\"." + required: false + type: string: {} + } + } + } timestamp_format: { description: "Format used for timestamp fields." required: false From c0fda7e06efdfed3b60fcc288a531304dafe13c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jan 2026 11:31:59 -0500 Subject: [PATCH 168/227] chore(deps): bump the clap group with 2 updates (#24430) Bumps the clap group with 2 updates: [clap](https://github.com/clap-rs/clap) and [clap_complete](https://github.com/clap-rs/clap). Updates `clap` from 4.5.48 to 4.5.53 - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.48...clap_complete-v4.5.53) Updates `clap_complete` from 4.5.58 to 4.5.64 - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.58...clap_complete-v4.5.64) --- updated-dependencies: - dependency-name: clap dependency-version: 4.5.53 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: clap - dependency-name: clap_complete dependency-version: 4.5.64 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: clap ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a30618189ed90..96c7a3330b7f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2502,9 +2502,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.48" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive", @@ -2522,9 +2522,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.48" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -2535,18 +2535,18 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.5.58" +version = "4.5.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75bf0b32ad2e152de789bb635ea4d3078f6b838ad7974143e99b99f45a04af4a" +checksum = "4c0da80818b2d95eca9aa614a30783e42f62bf5fdfee24e68cfb960b071ba8d1" dependencies = [ "clap", ] [[package]] name = "clap_derive" -version = "4.5.47" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck 0.5.0", "proc-macro2 1.0.101", diff --git a/Cargo.toml b/Cargo.toml index 26e0f5b355f4c..e4b501795ccfd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -144,7 +144,7 @@ bytes = { version = "1.10.1", default-features = false, features = ["serde"] } cfg-if = { version = "1.0.3", default-features = false } chrono = { version = "0.4.41", default-features = false, features = ["clock", "serde"] } chrono-tz = { version = "0.10.4", default-features = false, features = ["serde"] } -clap = { version = "4.5.48", default-features = false, features = ["derive", "error-context", "env", "help", "std", "string", "usage", "wrap_help"] } +clap = { version = "4.5.53", default-features = false, features = ["derive", "error-context", "env", "help", "std", "string", "usage", "wrap_help"] } colored = { version = "3.0.0", default-features = false } crossbeam-utils = { version = "0.8.21", default-features = false } darling = { version = "0.20.11", default-features = false, features = ["suggestions"] } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index fd9ca3c770b87..32d8a08958d01 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -21,7 +21,7 @@ anyhow.workspace = true chrono.workspace = true clap.workspace = true clap-verbosity-flag = "3.0.4" -clap_complete = "4.5.58" +clap_complete = "4.5.64" directories = "6.0.0" glob.workspace = true hex = "0.4.3" From 3656b659a2e7eced25949c10cd4162514289f1b2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jan 2026 12:04:25 -0500 Subject: [PATCH 169/227] chore(ci): bump actions/cache from 4.3.0 to 5.0.1 (#24439) Bumps [actions/cache](https://github.com/actions/cache) from 4.3.0 to 5.0.1. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0057852bfaa89a56745cba8c7296529d2fc39830...9255dc7a253b0ccc959486e2bca901246202afeb) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.1 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cli.yml | 2 +- .github/workflows/cross.yml | 2 +- .github/workflows/k8s_e2e.yml | 2 +- .github/workflows/test-make-command.yml | 2 +- .github/workflows/unit_mac.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 38b968f6dda31..1c08d63d29c4d 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -33,7 +33,7 @@ jobs: uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Cache Cargo registry + index - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 with: path: | ~/.cargo/bin/ diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index 7468acf98fe36..66fb77322a8d5 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -44,7 +44,7 @@ jobs: if: ${{ github.event_name != 'pull_request_review' }} uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 name: Cache Cargo registry + index with: path: | diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index 891edc7c23315..dc8747e979f82 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -85,7 +85,7 @@ jobs: if: ${{ github.event_name != 'pull_request_review' }} uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 with: path: | ~/.cargo/registry diff --git a/.github/workflows/test-make-command.yml b/.github/workflows/test-make-command.yml index 1cfde8a2dfad5..f36a3ca7b9f26 100644 --- a/.github/workflows/test-make-command.yml +++ b/.github/workflows/test-make-command.yml @@ -41,7 +41,7 @@ jobs: if: ${{ github.event_name != 'pull_request_review' }} uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 name: Cache Cargo registry + index with: path: | diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index 7d162e2eb3506..ba96994b72110 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -32,7 +32,7 @@ jobs: if: ${{ github.event_name != 'pull_request_review' }} uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 name: Cache Cargo registry + index with: path: | From 9e12569ba9739f4d9c4e05ffbf306e0d9ed48aa3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jan 2026 17:05:11 +0000 Subject: [PATCH 170/227] chore(ci): bump github/codeql-action from 4.31.6 to 4.31.9 (#24438) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.6 to 4.31.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fe4161a26a8629af62121b670040955b330f9af2...5d4e8d1aca955e8d8589aabd499c5cae939e33c7) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 44965ea9f9d28..3ed84a57f41c6 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -68,6 +68,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 + uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 with: sarif_file: results.sarif From 9c1b1f0e5cf3e69da9ffe94d2fbb9415d109e16a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jan 2026 12:37:08 -0500 Subject: [PATCH 171/227] chore(ci): bump docker/setup-buildx-action from 3.11.1 to 3.12.0 (#24437) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.11.1 to 3.12.0. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/e468171a9de216ec08956ac3ada2f0791b6bd435...8d2750c68a42422c14e847fe6c8ac0403b4cbd6f) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-version: 3.12.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/regression.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 5db6aed25a65d..d1cd30f8a4b75 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -46,7 +46,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - name: Login to DockerHub uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 if: env.SHOULD_PUBLISH == 'true' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index f32d309e6f774..6fb462c6c99fe 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -528,7 +528,7 @@ jobs: platforms: all - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 with: version: latest install: true diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 758e94e078fb5..11562ca6859b8 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -201,7 +201,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - name: Build 'vector' target image uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 @@ -240,7 +240,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - name: Build 'vector' target image uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 From 5dcb8262810e8158e8d2b17c9dfc0de9e9c4b846 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jan 2026 16:16:11 -0500 Subject: [PATCH 172/227] chore(ci): bump the artifact group with 2 updates (#24436) Bumps the artifact group with 2 updates: [actions/upload-artifact](https://github.com/actions/upload-artifact) and [actions/download-artifact](https://github.com/actions/download-artifact). Updates `actions/upload-artifact` from 5.0.0 to 6.0.0 - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/330a01c490aca151604b8cf639adc76d48f6c5d4...b7c566a772e6b6bfb58ed0dc250532a479d7789f) Updates `actions/download-artifact` from 6.0.0 to 7.0.0 - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/018cc2cf5baa6db3ef3c5f8a56943fffe632ef53...37930b1c2abaa49bbe596cd826c3c89aef350131) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major dependency-group: artifact - dependency-name: actions/download-artifact dependency-version: 7.0.0 dependency-type: direct:production update-type: version-update:semver-major dependency-group: artifact ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/changes.yml | 4 +- .github/workflows/cross.yml | 2 +- .github/workflows/integration.yml | 4 +- .github/workflows/k8s_e2e.yml | 4 +- .github/workflows/preview_site_trigger.yml | 2 +- .github/workflows/publish.yml | 106 ++++++++++----------- .github/workflows/regression.yml | 18 ++-- .github/workflows/scorecard.yml | 2 +- 8 files changed, 71 insertions(+), 71 deletions(-) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index dd765e975dfc3..ada9463bb9133 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -397,7 +397,7 @@ jobs: echo "any=$any_changed" >> $GITHUB_OUTPUT - name: Upload JSON artifact - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: int_tests_changes path: int_tests_changes.json @@ -457,7 +457,7 @@ jobs: echo "any=$any_changed" >> $GITHUB_OUTPUT - name: Upload JSON artifact - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: e2e_tests_changes path: e2e_tests_changes.json diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index 66fb77322a8d5..fd781154fbde0 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -64,7 +64,7 @@ jobs: # aarch64 and musl in particular are notoriously hard to link. # While it may be tempting to slot a `check` in here for quickness, please don't. - run: make cross-build-${{ matrix.target }} - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: "vector-debug-${{ matrix.target }}" path: "./target/${{ matrix.target }}/debug/vector" diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 44bb30601df66..a92c7f3d27593 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -84,7 +84,7 @@ jobs: submodules: "recursive" - name: Download JSON artifact from changes.yml - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 if: github.event_name == 'merge_group' with: name: int_tests_changes @@ -145,7 +145,7 @@ jobs: submodules: "recursive" - name: Download JSON artifact from changes.yml - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 if: github.event_name == 'merge_group' with: name: e2e_tests_changes diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index dc8747e979f82..b60953ca61e2c 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -104,7 +104,7 @@ jobs: - run: VECTOR_VERSION="$(vdev version)" make package-deb-x86_64-unknown-linux-gnu - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: e2e-test-deb-package path: target/artifacts/* @@ -206,7 +206,7 @@ jobs: if: ${{ github.event_name != 'pull_request_review' }} uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: e2e-test-deb-package path: target/artifacts diff --git a/.github/workflows/preview_site_trigger.yml b/.github/workflows/preview_site_trigger.yml index 2b2c3e4068ae0..761c010524185 100644 --- a/.github/workflows/preview_site_trigger.yml +++ b/.github/workflows/preview_site_trigger.yml @@ -45,7 +45,7 @@ jobs: # Upload the artifact using latest version (only if branch is valid) - name: Upload PR information artifact if: steps.validate.outputs.valid == 'true' - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: pr path: pr/ diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 6fb462c6c99fe..fc9dae0478486 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -65,7 +65,7 @@ jobs: - name: Build Vector run: make package-x86_64-unknown-linux-musl-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts/vector* @@ -91,7 +91,7 @@ jobs: - name: Build Vector run: make package-x86_64-unknown-linux-gnu-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts/vector* @@ -119,7 +119,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-aarch64-unknown-linux-musl-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts/vector* @@ -147,7 +147,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-aarch64-unknown-linux-gnu-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts/vector* @@ -175,7 +175,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-armv7-unknown-linux-gnueabihf-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts/vector* @@ -203,7 +203,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-armv7-unknown-linux-musleabihf - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts/vector* @@ -231,7 +231,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-arm-unknown-linux-gnueabi-all - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts/vector* @@ -259,7 +259,7 @@ jobs: DOCKER_PRIVILEGED: "true" run: make package-arm-unknown-linux-musleabi - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts/vector* @@ -305,7 +305,7 @@ jobs: export PATH="$HOME/.cargo/bin:$PATH" make package - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-${{ matrix.architecture }}-apple-darwin path: target/artifacts/vector* @@ -353,7 +353,7 @@ jobs: export PATH="/c/wix:$PATH" ./scripts/package-msi.sh - name: Stage package artifacts for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts/vector* @@ -398,7 +398,7 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts @@ -449,7 +449,7 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts @@ -478,7 +478,7 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (${{ matrix.target }}) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-${{ matrix.target }} path: target/artifacts @@ -533,42 +533,42 @@ jobs: version: latest install: true - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts @@ -611,52 +611,52 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (arm64-apple-darwin) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm64-apple-darwin path: target/artifacts - name: Download staged package artifacts (x86_64-pc-windows-msvc) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts @@ -696,57 +696,57 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (arm64-apple-darwin) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm64-apple-darwin path: target/artifacts - name: Download staged package artifacts (x86_64-pc-windows-msvc) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download artifact checksums - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-SHA256SUMS path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts @@ -779,59 +779,59 @@ jobs: with: ref: ${{ inputs.git_ref }} - name: Download staged package artifacts (aarch64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (aarch64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-aarch64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-gnu) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - name: Download staged package artifacts (x86_64-unknown-linux-musl) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-musl path: target/artifacts - name: Download staged package artifacts (arm64-apple-darwin) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm64-apple-darwin path: target/artifacts - name: Download staged package artifacts (x86_64-pc-windows-msvc) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-pc-windows-msvc path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-gnueabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-gnueabihf path: target/artifacts - name: Download staged package artifacts (armv7-unknown-linux-musleabihf) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-gnueabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi path: target/artifacts - name: Download staged package artifacts (arm-unknown-linux-musleabi) - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi path: target/artifacts - name: Generate SHA256 checksums for artifacts run: make sha256sum - name: Stage checksum for publish - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-${{ env.VECTOR_VERSION }}-SHA256SUMS path: target/artifacts/vector-${{ env.VECTOR_VERSION }}-SHA256SUMS diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 11562ca6859b8..14c45a827e227 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -216,7 +216,7 @@ jobs: vector:${{ needs.resolve-inputs.outputs.baseline-tag }} - name: Upload image as artifact - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: baseline-image path: "${{ runner.temp }}/baseline-image.tar" @@ -255,7 +255,7 @@ jobs: vector:${{ needs.resolve-inputs.outputs.comparison-tag }} - name: Upload image as artifact - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: comparison-image path: "${{ runner.temp }}/comparison-image.tar" @@ -294,7 +294,7 @@ jobs: - build-baseline steps: - name: 'Download baseline image' - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: baseline-image @@ -334,7 +334,7 @@ jobs: - build-comparison steps: - name: 'Download comparison image' - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: comparison-image @@ -408,7 +408,7 @@ jobs: --submission-metadata ${{ runner.temp }}/submission-metadata \ --replicas ${{ env.SMP_REPLICAS }} - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: vector-submission-metadata path: ${{ runner.temp }}/submission-metadata @@ -462,7 +462,7 @@ jobs: aws s3 cp s3://smp-cli-releases/v${{ needs.resolve-inputs.outputs.smp-version }}/x86_64-unknown-linux-musl/smp ${{ runner.temp }}/bin/smp - name: Download submission metadata - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-submission-metadata path: ${{ runner.temp }}/ @@ -501,7 +501,7 @@ jobs: aws s3 cp s3://smp-cli-releases/v${{ needs.resolve-inputs.outputs.smp-version }}/x86_64-unknown-linux-musl/smp ${{ runner.temp }}/bin/smp - name: Download submission metadata - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: vector-submission-metadata path: ${{ runner.temp }}/ @@ -523,7 +523,7 @@ jobs: path: ${{ runner.temp }}/outputs/report.md - name: Upload regression report to artifacts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: capture-artifacts path: ${{ runner.temp }}/outputs/* @@ -547,7 +547,7 @@ jobs: steps: - name: Download capture-artifacts continue-on-error: true - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: name: capture-artifacts diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 3ed84a57f41c6..1ad21054377b4 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -59,7 +59,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: SARIF file path: results.sarif From 236928a042a71db77b050d5afbb392ad5d4e6cdc Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 6 Jan 2026 16:14:17 -0500 Subject: [PATCH 173/227] chore(deps): bump rkyv to 0.7.46 (#24451) --- Cargo.lock | 8 ++++---- lib/vector-buffers/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96c7a3330b7f6..aedf0cc75cb92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9527,9 +9527,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" +checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" dependencies = [ "bitvec", "bytecheck", @@ -9545,9 +9545,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" dependencies = [ "proc-macro2 1.0.101", "quote 1.0.40", diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 3eb7a674ca1c4..bb99e14b8e6aa 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -24,7 +24,7 @@ memmap2 = { version = "0.9.8", default-features = false } metrics.workspace = true num-traits = { version = "0.2.19", default-features = false } paste.workspace = true -rkyv = { version = "0.7.45", default-features = false, features = ["size_32", "std", "strict", "validation"] } +rkyv = { version = "0.7.46", default-features = false, features = ["size_32", "std", "strict", "validation"] } serde.workspace = true snafu.workspace = true tokio-util = { version = "0.7.0", default-features = false } From acd4a737d7f45473c6125791743173027927d4ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ensar=20Saraj=C4=8Di=C4=87?= Date: Wed, 7 Jan 2026 08:47:54 +0100 Subject: [PATCH 174/227] feat(vrl): add functions for internal vector metrics access in VRL (#23430) * feat(vrl): add functions for internal vector metrics access in VRL Adds 2 new functions: `get_vector_metric` and `find_vector_metrics`, enabling access to current Vector metrics (with some delay, to reduce locking). Closes: #23284 * Add changelog entry * Remove old TODO * Add filtering by tags * Add basic get/find vector metrics tests * Add tests note * Add wildcard support * Update example for get/find vector metrics * Add aggregate_vector_metrics function * Update changelog * Update `metrics_storage_refresh_period` docs for aggregation * Update configuration docs * Add docs for added VRL functions * Fix typo in aggregation tests * Apply suggestions from code review Co-authored-by: domalessi <111786334+domalessi@users.noreply.github.com> * Update `metrics_storage_refresh_period` docs * Fix remaining issues in tests * Fix outputs fn def for trace_to_log * Simplify `metric_into_vrl` using `value!` macro * Increment VRL metrics error code to prevent conflict with tables code * Prevent panic on non string tags * Prevent panic on non string tags for `aggregate_vector_metrics` * Use `example!` macro for examples for `vector-vrl-metrics` * Move tags type-checking to compile time * Fix error type for non string tag values * Reuse tag validation code for vector-vrl-metrics * Add metrics to vector-vrl-functions * Remove needless full qualification for Result * Fix disallowed_methods error * Fix function names in examples * Fix example tests for vector-vrl-metrics * Remove `;` from `metric_into_vrl` macro to fix all failing tests * Add conversion for LogNamespace into bool * Make `metrics_storage_refresh_period` required to use metrics in VRL * Disable vector-vrl-metrics in vrl web-playground --------- Co-authored-by: domalessi <111786334+domalessi@users.noreply.github.com> Co-authored-by: Pavlos Rontidis Co-authored-by: Thomas --- Cargo.lock | 16 + Cargo.toml | 7 +- benches/transform/reduce.rs | 7 +- ..._internal_metrics_vrl_functions.feature.md | 3 + lib/enrichment/Cargo.toml | 2 +- lib/vector-core/src/config/global_options.rs | 11 + lib/vector-core/src/config/mod.rs | 6 + lib/vector-vrl-metrics/Cargo.toml | 15 + .../src/aggregate_vector_metrics.rs | 150 ++++ lib/vector-vrl-metrics/src/common.rs | 818 ++++++++++++++++++ .../src/find_vector_metrics.rs | 104 +++ .../src/get_vector_metric.rs | 100 +++ lib/vector-vrl-metrics/src/lib.rs | 17 + lib/vector-vrl/functions/Cargo.toml | 2 + lib/vector-vrl/functions/src/lib.rs | 3 + lib/vector-vrl/tests/Cargo.toml | 2 + lib/vector-vrl/tests/src/main.rs | 2 + lib/vector-vrl/tests/src/test_vrl_metrics.rs | 21 + lib/vector-vrl/web-playground/Cargo.toml | 2 +- src/common/http/server_auth.rs | 73 +- src/conditions/datadog_search.rs | 4 +- src/conditions/mod.rs | 12 +- src/conditions/vrl.rs | 7 +- src/config/graph.rs | 10 +- src/config/sink.rs | 3 + src/config/source.rs | 4 + src/config/transform.rs | 19 +- src/config/unit_test/mod.rs | 2 +- src/sinks/websocket_server/sink.rs | 2 +- src/sources/socket/mod.rs | 1 + src/sources/util/http/prelude.rs | 4 +- src/test_util/mock/transforms/basic.rs | 5 +- .../mock/transforms/error_definitions.rs | 5 +- src/test_util/mock/transforms/noop.rs | 5 +- src/topology/builder.rs | 34 +- src/topology/running.rs | 33 +- src/topology/schema.rs | 9 +- src/transforms/aggregate.rs | 6 +- src/transforms/aws_ec2_metadata.rs | 9 +- src/transforms/dedupe/config.rs | 8 +- src/transforms/exclusive_route/config.rs | 5 +- src/transforms/exclusive_route/transform.rs | 4 +- src/transforms/filter.rs | 12 +- src/transforms/incremental_to_absolute.rs | 5 +- src/transforms/log_to_metric.rs | 4 +- src/transforms/lua/mod.rs | 8 +- src/transforms/metric_to_log.rs | 5 +- src/transforms/reduce/config.rs | 9 +- src/transforms/reduce/transform.rs | 33 +- src/transforms/remap.rs | 63 +- src/transforms/route.rs | 12 +- src/transforms/sample/config.rs | 9 +- src/transforms/sample/tests.rs | 2 +- .../tag_cardinality_limit/config.rs | 5 +- src/transforms/throttle/config.rs | 8 +- src/transforms/throttle/transform.rs | 2 +- src/transforms/trace_to_log.rs | 5 +- src/transforms/window/config.rs | 15 +- .../cue/reference/generated/configuration.cue | 11 + website/cue/reference/remap/functions.cue | 9 + .../functions/aggregate_vector_metrics.cue | 82 ++ .../remap/functions/find_vector_metrics.cue | 48 + .../remap/functions/get_vector_metric.cue | 48 + 63 files changed, 1737 insertions(+), 210 deletions(-) create mode 100644 changelog.d/23430_internal_metrics_vrl_functions.feature.md create mode 100644 lib/vector-vrl-metrics/Cargo.toml create mode 100644 lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs create mode 100644 lib/vector-vrl-metrics/src/common.rs create mode 100644 lib/vector-vrl-metrics/src/find_vector_metrics.rs create mode 100644 lib/vector-vrl-metrics/src/get_vector_metric.rs create mode 100644 lib/vector-vrl-metrics/src/lib.rs create mode 100644 lib/vector-vrl/tests/src/test_vrl_metrics.rs create mode 100644 website/cue/reference/remap/functions/aggregate_vector_metrics.cue create mode 100644 website/cue/reference/remap/functions/find_vector_metrics.cue create mode 100644 website/cue/reference/remap/functions/get_vector_metric.cue diff --git a/Cargo.lock b/Cargo.lock index aedf0cc75cb92..e2f86f33a558e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12751,6 +12751,7 @@ dependencies = [ "vector-config-macros", "vector-lib", "vector-vrl-functions", + "vector-vrl-metrics", "vrl", "warp", "windows-service", @@ -13101,6 +13102,19 @@ version = "0.1.0" dependencies = [ "dnstap-parser", "enrichment", + "vector-vrl-metrics", + "vrl", +] + +[[package]] +name = "vector-vrl-metrics" +version = "0.1.0" +dependencies = [ + "arc-swap", + "tokio", + "tokio-stream", + "vector-common", + "vector-core", "vrl", ] @@ -13116,7 +13130,9 @@ dependencies = [ "serde_json", "tikv-jemallocator", "tracing-subscriber", + "vector-core", "vector-vrl-functions", + "vector-vrl-metrics", "vrl", ] diff --git a/Cargo.toml b/Cargo.toml index e4b501795ccfd..c85230b1cbad6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -132,11 +132,13 @@ members = [ "lib/vector-vrl/functions", "lib/vector-vrl/tests", "lib/vector-vrl/web-playground", + "lib/vector-vrl-metrics", "vdev", ] [workspace.dependencies] anyhow = { version = "1.0.99", default-features = false, features = ["std"] } +arc-swap = { version = "1.7.1", default-features = false } async-stream = { version = "0.3.6", default-features = false } async-trait = { version = "0.1.89", default-features = false } base64 = { version = "0.22.1", default-features = false } @@ -228,7 +230,8 @@ vector-lib.workspace = true vector-config.workspace = true vector-config-common.workspace = true vector-config-macros.workspace = true -vector-vrl-functions.workspace = true +vector-vrl-functions = { workspace = true, features = ["vrl-metrics"] } +vector-vrl-metrics = { path = "lib/vector-vrl-metrics" } loki-logproto = { path = "lib/loki-logproto", optional = true } # Tokio / Futures @@ -338,7 +341,7 @@ hex = { version = "0.4.3", default-features = false, optional = true } greptimedb-ingester = { git = "https://github.com/GreptimeTeam/greptimedb-ingester-rust", rev = "f7243393808640f5123b0d5b7b798da591a4df6e", optional = true } # External libs -arc-swap = { version = "1.7", default-features = false, optional = true } +arc-swap = { workspace = true, default-features = false, optional = true } async-compression = { version = "0.4.27", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } apache-avro = { version = "0.16.0", default-features = false, optional = true } arrow = { version = "56.2.0", default-features = false, features = ["ipc"], optional = true } diff --git a/benches/transform/reduce.rs b/benches/transform/reduce.rs index ac71f113249db..898ef4f88be40 100644 --- a/benches/transform/reduce.rs +++ b/benches/transform/reduce.rs @@ -54,7 +54,12 @@ fn reduce(c: &mut Criterion) { .iter_batched( || { let reduce = Transform::event_task( - Reduce::new(¶m.reduce_config, &Default::default()).unwrap(), + Reduce::new( + ¶m.reduce_config, + &Default::default(), + &Default::default(), + ) + .unwrap(), ) .into_task(); (Box::new(reduce), Box::pin(param.input.clone())) diff --git a/changelog.d/23430_internal_metrics_vrl_functions.feature.md b/changelog.d/23430_internal_metrics_vrl_functions.feature.md new file mode 100644 index 0000000000000..5242b1391cd0b --- /dev/null +++ b/changelog.d/23430_internal_metrics_vrl_functions.feature.md @@ -0,0 +1,3 @@ +Functions to access internal Vector metrics are now available for VRL: `get_vector_metric`, `find_vector_metrics` and `aggregate_vector_metrics`. They work with a snapshot of the metrics and the interval the snapshot is taken in can be controlled with `metrics_storage_refresh_period` global option. Aggregation supports `max`, `avg`, `min` and `max` functions. + +authors: esensar Quad9DNS diff --git a/lib/enrichment/Cargo.toml b/lib/enrichment/Cargo.toml index 9e82fd16e0f95..087ab7f60bfc0 100644 --- a/lib/enrichment/Cargo.toml +++ b/lib/enrichment/Cargo.toml @@ -6,7 +6,7 @@ edition = "2024" publish = false [dependencies] -arc-swap = { version = "1.7.1", default-features = false } +arc-swap.workspace = true chrono.workspace = true dyn-clone = { version = "1.0.20", default-features = false } vrl.workspace = true diff --git a/lib/vector-core/src/config/global_options.rs b/lib/vector-core/src/config/global_options.rs index cc64b3a35bd35..e789e8ae973ab 100644 --- a/lib/vector-core/src/config/global_options.rs +++ b/lib/vector-core/src/config/global_options.rs @@ -139,6 +139,14 @@ pub struct GlobalOptions { /// the global default value, defined using `expire_metrics_secs`. #[serde(skip_serializing_if = "crate::serde::is_default")] pub expire_metrics_per_metric_set: Option>, + + /// The interval, in seconds, at which the internal metrics cache for VRL is refreshed. + /// This must be set to be able to access metrics in VRL functions. + /// + /// Higher values lead to stale metric values from `get_vector_metric`, + /// `find_vector_metrics`, and `aggregate_vector_metrics` functions. + #[serde(default, skip_serializing_if = "crate::serde::is_default")] + pub metrics_storage_refresh_period: Option, } impl_generate_config_from_default!(GlobalOptions); @@ -287,6 +295,9 @@ impl GlobalOptions { expire_metrics: self.expire_metrics.or(with.expire_metrics), expire_metrics_secs: self.expire_metrics_secs.or(with.expire_metrics_secs), expire_metrics_per_metric_set: merged_expire_metrics_per_metric_set, + metrics_storage_refresh_period: self + .metrics_storage_refresh_period + .or(with.metrics_storage_refresh_period), }) } else { Err(errors) diff --git a/lib/vector-core/src/config/mod.rs b/lib/vector-core/src/config/mod.rs index 0c6af5e09c258..c86848d7b0be5 100644 --- a/lib/vector-core/src/config/mod.rs +++ b/lib/vector-core/src/config/mod.rs @@ -421,6 +421,12 @@ impl From for LogNamespace { } } +impl From for bool { + fn from(x: LogNamespace) -> Self { + x == LogNamespace::Vector + } +} + /// A shortcut to specify no `LegacyKey` should be used (since otherwise a turbofish would be required) pub const NO_LEGACY_KEY: Option> = None; diff --git a/lib/vector-vrl-metrics/Cargo.toml b/lib/vector-vrl-metrics/Cargo.toml new file mode 100644 index 0000000000000..f0a17f8b76e32 --- /dev/null +++ b/lib/vector-vrl-metrics/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "vector-vrl-metrics" +version = "0.1.0" +authors = ["Vector Contributors "] +edition = "2021" +publish = false +license = "MPL-2.0" + +[dependencies] +arc-swap.workspace = true +vrl.workspace = true +vector-core = { path = "../vector-core", default-features = false, features = ["vrl"] } +vector-common = { path = "../vector-common", default-features = false } +tokio = { version = "1.45.1", default-features = false } +tokio-stream = { version = "0.1.17", default-features = false } diff --git a/lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs b/lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs new file mode 100644 index 0000000000000..529e1df2b7824 --- /dev/null +++ b/lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs @@ -0,0 +1,150 @@ +use std::collections::BTreeMap; +use vrl::prelude::expression::Expr; +use vrl::value; + +use vrl::prelude::*; + +use crate::common::resolve_tags; +use crate::common::validate_tags; +use crate::common::{Error, MetricsStorage}; + +fn aggregate_metrics( + metrics_storage: &MetricsStorage, + function: &Bytes, + key: Value, + tags: BTreeMap, +) -> Result { + let key_str = key.as_str().expect("argument must be a string"); + let metrics = metrics_storage.find_metrics(&key_str, tags); + + let metric_values = metrics.into_iter().filter_map(|m| match m.value() { + vector_core::event::MetricValue::Counter { value } + | vector_core::event::MetricValue::Gauge { value } => NotNan::new(*value).ok(), + _ => None, + }); + + Ok(match function.as_ref() { + b"sum" => metric_values.sum::>().into(), + b"avg" => { + let len = metric_values.clone().collect::>().len(); + (metric_values.sum::>() / len as f64).into() + } + b"max" => metric_values.max().map(Into::into).unwrap_or(Value::Null), + b"min" => metric_values.min().map(Into::into).unwrap_or(Value::Null), + _ => unreachable!(), + }) +} + +#[derive(Clone, Copy, Debug)] +pub struct AggregateVectorMetrics; + +fn aggregation_functions() -> Vec { + vec![value!("sum"), value!("avg"), value!("min"), value!("max")] +} + +impl Function for AggregateVectorMetrics { + fn identifier(&self) -> &'static str { + "aggregate_vector_metrics" + } + + fn parameters(&self) -> &'static [Parameter] { + &[ + Parameter { + keyword: "function", + kind: kind::BYTES, + required: true, + }, + Parameter { + keyword: "key", + kind: kind::BYTES, + required: true, + }, + Parameter { + keyword: "tags", + kind: kind::OBJECT, + required: false, + }, + ] + } + + fn examples(&self) -> &'static [Example] { + &[ + example! { + title: "Sum vector internal metrics matching the name", + source: r#"aggregate_vector_metrics("sum", "utilization")"#, + result: Ok("0.5"), + }, + example! { + title: "Sum vector internal metrics matching the name and tags", + source: r#"aggregate_vector_metrics("sum", "utilization", tags: {"component_id": "test"})"#, + result: Ok("0.5"), + }, + example! { + title: "Average of vector internal metrics matching the name", + source: r#"aggregate_vector_metrics("avg", "utilization")"#, + result: Ok("0.5"), + }, + example! { + title: "Max of vector internal metrics matching the name", + source: r#"aggregate_vector_metrics("max", "utilization")"#, + result: Ok("0.5"), + }, + example! { + title: "Min of vector internal metrics matching the name", + source: r#"aggregate_vector_metrics("max", "utilization")"#, + result: Ok("0.5"), + }, + ] + } + + fn compile( + &self, + state: &TypeState, + ctx: &mut FunctionCompileContext, + arguments: ArgumentList, + ) -> Compiled { + let metrics = ctx + .get_external_context::() + .ok_or(Box::new(Error::MetricsStorageNotLoaded) as Box)? + .clone(); + let function = arguments + .required_enum("function", &aggregation_functions(), state)? + .try_bytes() + .expect("aggregation function not bytes"); + let key = arguments.required("key"); + let tags = arguments.optional_object("tags")?.unwrap_or_default(); + validate_tags(state, &tags)?; + + Ok(AggregateVectorMetricsFn { + metrics, + function, + key, + tags, + } + .as_expr()) + } +} + +#[derive(Debug, Clone)] +struct AggregateVectorMetricsFn { + metrics: MetricsStorage, + function: Bytes, + key: Box, + tags: BTreeMap, +} + +impl FunctionExpression for AggregateVectorMetricsFn { + fn resolve(&self, ctx: &mut Context) -> Resolved { + let key = self.key.resolve(ctx)?; + aggregate_metrics( + &self.metrics, + &self.function, + key, + resolve_tags(ctx, &self.tags)?, + ) + } + + fn type_def(&self, _: &state::TypeState) -> TypeDef { + TypeDef::float().or_null().infallible() + } +} diff --git a/lib/vector-vrl-metrics/src/common.rs b/lib/vector-vrl-metrics/src/common.rs new file mode 100644 index 0000000000000..8f765c486d193 --- /dev/null +++ b/lib/vector-vrl-metrics/src/common.rs @@ -0,0 +1,818 @@ +use std::{collections::BTreeMap, sync::Arc, time::Duration}; +use tokio::time::interval; +use tokio_stream::{wrappers::IntervalStream, StreamExt}; +use vector_common::shutdown::ShutdownSignal; +use vrl::{ + diagnostic::Label, + prelude::{expression::Expr, *}, + value, +}; + +use arc_swap::ArcSwap; +use vector_core::{event::Metric, metrics::Controller}; + +#[derive(Debug)] +pub(crate) enum Error { + MetricsStorageNotLoaded, +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::MetricsStorageNotLoaded => write!(f, "metrics storage not loaded"), + } + } +} + +impl std::error::Error for Error {} + +impl DiagnosticMessage for Error { + fn code(&self) -> usize { + 112 + } + + fn labels(&self) -> Vec
-
- {{ range $k, $v := . }} -
- - - {{ $k }} - + {{ if $v.deprecated }} +
+ + {{ partial "heading.html" (dict "text" "Deprecated" "level" 5 "toc_hide" true) }} + +
+
+ + + +
+
+ {{ if $v.deprecated_message }} + {{ $v.deprecated_message | markdownify }} + {{ else }} + This metric is deprecated. + {{ end }} +
+
+
+ {{ end }} - - {{ if not $v.required }} - {{ partial "badge.html" (dict "word" "optional" "color" "blue") }} - {{ end }} - -
+ {{ with $v.tags }} +
+
+ {{ range $k, $v := . }} +
+ + + {{ $k }} + - {{ with $v.description }} -
- {{ . | markdownify }} -
- {{ end }} -
+ + {{ if not $v.required }} + {{ partial "badge.html" (dict "word" "optional" "color" "blue") }} {{ end }} + + + + {{ with $v.description }} +
+ {{ . | markdownify }}
+ {{ end }} +
+ {{ end }}
- {{ end }} +
+ {{ end }}
{{ end }} -{{ end }} +{{ end }} \ No newline at end of file From 473e31cbbfd7c39fc2d5b0672db6da26e82e28b5 Mon Sep 17 00:00:00 2001 From: rowan Date: Fri, 16 Jan 2026 19:02:32 +0000 Subject: [PATCH 192/227] fix(host_metrics source): fix tcp netlink bug (#24441) * fix(host_metrics source): use recv_from_full in fetch_netlink_inet_headers add netlink to allowed words list * Update changelog * Use while let instead of loop --------- Co-authored-by: Thomas --- .github/actions/spelling/allow.txt | 1 + changelog.d/22487_tcp_netlink_parsing.fix.md | 3 +++ src/sources/host_metrics/tcp.rs | 6 ++++-- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/22487_tcp_netlink_parsing.fix.md diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 85ca0f9f2981f..9b14c6dbd7086 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -355,6 +355,7 @@ netcat netdata Netflix netlify +netlink Neue neuronull Nextbook diff --git a/changelog.d/22487_tcp_netlink_parsing.fix.md b/changelog.d/22487_tcp_netlink_parsing.fix.md new file mode 100644 index 0000000000000..f98c5861f95f8 --- /dev/null +++ b/changelog.d/22487_tcp_netlink_parsing.fix.md @@ -0,0 +1,3 @@ +Fixed a `host_metrics` source issue that caused tcp metrics collection to fail with "Could not parse netlink response: invalid netlink buffer" errors on Linux systems. + +authors: mushrowan diff --git a/src/sources/host_metrics/tcp.rs b/src/sources/host_metrics/tcp.rs index c800b5975d081..a544a3451fc7a 100644 --- a/src/sources/host_metrics/tcp.rs +++ b/src/sources/host_metrics/tcp.rs @@ -241,10 +241,12 @@ async fn fetch_netlink_inet_headers(addr_family: u8) -> Result Date: Tue, 20 Jan 2026 14:13:06 -0500 Subject: [PATCH 193/227] chore(website): update hugo templates to work with 0.152.2 (#24140) * Fix duplicated domains * chore(website): update hugo templates to work with 0.152.2 --- .../highlights/2020-09-18-adaptive-concurrency.md | 3 +-- .../en/highlights/2020-12-23-vector-top.md | 3 +-- website/layouts/partials/author-with-avatar.html | 15 ++++++--------- website/layouts/partials/content.html | 15 ++++++--------- 4 files changed, 14 insertions(+), 22 deletions(-) diff --git a/website/content/en/highlights/2020-09-18-adaptive-concurrency.md b/website/content/en/highlights/2020-09-18-adaptive-concurrency.md index 75aaf87dbd701..a7236e736224d 100644 --- a/website/content/en/highlights/2020-09-18-adaptive-concurrency.md +++ b/website/content/en/highlights/2020-09-18-adaptive-concurrency.md @@ -8,8 +8,7 @@ release: "0.11.0" hide_on_release_notes: false badges: type: "new feature" - domains: ["networking"] - domains: ["performance", "reliability"] + domains: ["networking", "performance", "reliability"] --- Vector 0.11 includes a new Adaptive Request Concurrency (ARC) feature that diff --git a/website/content/en/highlights/2020-12-23-vector-top.md b/website/content/en/highlights/2020-12-23-vector-top.md index 4c630f5c43914..1b99b30cc62e5 100644 --- a/website/content/en/highlights/2020-12-23-vector-top.md +++ b/website/content/en/highlights/2020-12-23-vector-top.md @@ -9,8 +9,7 @@ release: "0.12.0" hide_on_release_notes: false badges: type: "new feature" - domains: ["cli"] - domains: ["observability"] + domains: ["cli", "observability"] --- [`vector top`][top] is a command for the Vector [CLI] that displays both metrics emitted by your Vector instance as well diff --git a/website/layouts/partials/author-with-avatar.html b/website/layouts/partials/author-with-avatar.html index a19d5a1c8ecdf..3d9cb22054281 100644 --- a/website/layouts/partials/author-with-avatar.html +++ b/website/layouts/partials/author-with-avatar.html @@ -6,15 +6,12 @@ {{ $apiUrl := printf "https://api.github.com/users/%s" $handle }} {{ $name := $handle }} {{ $displayName := $handle }} - {{ with resources.GetRemote $apiUrl }} - {{ with .Err }} - {{/* Silently fall back to GitHub handle on error */}} - {{ else }} - {{ $data := . | transform.Unmarshal }} - {{ if $data.name }} - {{ $name = $data.name }} - {{ $displayName = printf "%s (%s)" $data.name $handle }} - {{ end }} + {{ $result := try (resources.GetRemote $apiUrl) }} + {{ with $result.Value }} + {{ $data := . | transform.Unmarshal }} + {{ if $data.name }} + {{ $name = $data.name }} + {{ $displayName = printf "%s (%s)" $data.name $handle }} {{ end }} {{ end }}
diff --git a/website/layouts/partials/content.html b/website/layouts/partials/content.html index c5f9520f7d284..0deb1bcfb5a12 100644 --- a/website/layouts/partials/content.html +++ b/website/layouts/partials/content.html @@ -39,15 +39,12 @@

,{{ end }} From a3bb693380ea7f7217737661b082521c5bc31a8e Mon Sep 17 00:00:00 2001 From: Thomas Date: Tue, 20 Jan 2026 16:18:37 -0500 Subject: [PATCH 194/227] chore(deps, internal docs): Add usage method to VRL functions (#24504) * Add usage method to VRL functions with descriptions from cue files * vdev build licenses --- Cargo.lock | 53 ++++++++--------- Cargo.toml | 1 + LICENSE-3rdparty.csv | 2 +- .../src/vrl_functions/parse_dnstap.rs | 4 ++ lib/enrichment/Cargo.toml | 2 + .../src/find_enrichment_table_records.rs | 7 +++ .../src/get_enrichment_table_record.rs | 8 +++ lib/enrichment/src/lib.rs | 57 +++++++++++++++++++ lib/vector-vrl-metrics/Cargo.toml | 1 + .../src/aggregate_vector_metrics.rs | 7 +++ .../src/find_vector_metrics.rs | 7 +++ .../src/get_vector_metric.rs | 7 +++ lib/vector-vrl-metrics/src/lib.rs | 7 +++ lib/vector-vrl/functions/Cargo.toml | 1 + lib/vector-vrl/functions/src/get_secret.rs | 4 ++ lib/vector-vrl/functions/src/remove_secret.rs | 4 ++ lib/vector-vrl/functions/src/set_secret.rs | 4 ++ .../functions/src/set_semantic_meaning.rs | 12 ++++ 18 files changed, 161 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b97b30d26cec3..3d2d9ef2e2617 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2833,6 +2833,12 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "const-str" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93e19f68b180ebff43d6d42005c4b5f046c65fcac28369ba8b3beaad633f9ec0" + [[package]] name = "convert_case" version = "0.4.0" @@ -3817,7 +3823,9 @@ version = "0.1.0" dependencies = [ "arc-swap", "chrono", + "const-str", "dyn-clone", + "indoc", "vrl", ] @@ -4059,21 +4067,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fancy-regex" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6215aee357f8c7c989ebb4b8466ca4d7dc93b3957039f2fc3ea2ade8ea5f279" -dependencies = [ - "bit-set", - "derivative", - "regex-automata 0.4.8", - "regex-syntax", -] - -[[package]] -name = "fancy-regex" -version = "0.16.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf04c5ec15464ace8355a7b440a33aece288993475556d461154d7a62ad9947c" +checksum = "72cf461f865c862bb7dc573f643dd6a2b6842f7c30b07882b56bd148cc2761b8" dependencies = [ "bit-set", "regex-automata 0.4.8", @@ -5988,15 +5984,15 @@ dependencies = [ [[package]] name = "jsonschema" -version = "0.37.4" +version = "0.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c9ffb2b5c56d58030e1b532d8e8389da94590515f118cf35b5cb68e4764a7e" +checksum = "89f50532ce4a0ba3ae930212908d8ec50e7806065c059fe9c75da2ece6132294" dependencies = [ "ahash 0.8.11", "bytecount", "data-encoding", "email_address", - "fancy-regex 0.16.1", + "fancy-regex", "fraction", "getrandom 0.3.4", "idna", @@ -9314,9 +9310,9 @@ dependencies = [ [[package]] name = "referencing" -version = "0.37.4" +version = "0.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4283168a506f0dcbdce31c9f9cce3129c924da4c6bca46e46707fcb746d2d70c" +checksum = "15a8af0c6bb8eaf8b07cb06fc31ff30ca6fe19fb99afa476c276d8b24f365b0b" dependencies = [ "ahash 0.8.11", "fluent-uri 0.4.1", @@ -9651,9 +9647,12 @@ dependencies = [ [[package]] name = "roxmltree" -version = "0.20.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c20b6793b5c2fa6553b250154b78d6d0db37e72700ae35fad9387a46f487c97" +checksum = "f1964b10c76125c36f8afe190065a4bf9a87bf324842c05701330bba9f1cacbb" +dependencies = [ + "memchr", +] [[package]] name = "rsa" @@ -9956,9 +9955,9 @@ dependencies = [ [[package]] name = "rustyline" -version = "16.0.0" +version = "17.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62fd9ca5ebc709e8535e8ef7c658eb51457987e48c98ead2be482172accc408d" +checksum = "e902948a25149d50edc1a8e0141aad50f54e22ba83ff988cf8f7c9ef07f50564" dependencies = [ "bitflags 2.10.0", "cfg-if", @@ -9970,7 +9969,7 @@ dependencies = [ "unicode-segmentation", "unicode-width 0.2.0", "utf8parse", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -13154,6 +13153,7 @@ version = "0.1.0" dependencies = [ "dnstap-parser", "enrichment", + "indoc", "vector-vrl-metrics", "vrl", ] @@ -13163,6 +13163,7 @@ name = "vector-vrl-metrics" version = "0.1.0" dependencies = [ "arc-swap", + "const-str", "tokio", "tokio-stream", "vector-common", @@ -13218,7 +13219,7 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" version = "0.29.0" -source = "git+https://github.com/vectordotdev/vrl.git?branch=main#53f01dfa6226fb3f4093d1bf838319c522f2dcad" +source = "git+https://github.com/vectordotdev/vrl.git?branch=main#7ab2c5516a0c46e25d6f6efbd16977af4a3441a0" dependencies = [ "aes", "aes-siv", @@ -13251,7 +13252,7 @@ dependencies = [ "dyn-clone", "encoding_rs", "exitcode", - "fancy-regex 0.15.0", + "fancy-regex", "flate2", "grok", "hex", diff --git a/Cargo.toml b/Cargo.toml index be3e047bdf894..03be659ba9864 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -149,6 +149,7 @@ chrono-tz = { version = "0.10.4", default-features = false, features = ["serde"] clap = { version = "4.5.53", default-features = false, features = ["derive", "error-context", "env", "help", "std", "string", "usage", "wrap_help"] } clap_complete = "4.5.65" colored = { version = "3.0.0", default-features = false } +const-str = { version = "1.0.0", default-features = false } crossbeam-utils = { version = "0.8.21", default-features = false } darling = { version = "0.20.11", default-features = false, features = ["suggestions"] } dashmap = { version = "6.1.0", default-features = false } diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 6ce8b1ef3d827..603be009102ba 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -184,6 +184,7 @@ concurrent-queue,https://github.com/smol-rs/concurrent-queue,Apache-2.0 OR MIT," const-oid,https://github.com/RustCrypto/formats/tree/master/const-oid,Apache-2.0 OR MIT,RustCrypto Developers const-random,https://github.com/tkaitchuck/constrandom,MIT OR Apache-2.0,Tom Kaitchuck const-random-macro,https://github.com/tkaitchuck/constrandom,MIT OR Apache-2.0,Tom Kaitchuck +const-str,https://github.com/Nugine/const-str,MIT,Nugine convert_case,https://github.com/rutrum/convert-case,MIT,David Purdum convert_case,https://github.com/rutrum/convert-case,MIT,rutrum cookie,https://github.com/SergioBenitez/cookie-rs,MIT OR Apache-2.0,"Sergio Benitez , Alex Crichton " @@ -278,7 +279,6 @@ executor-trait,https://github.com/amqp-rs/executor-trait,Apache-2.0 OR MIT,Marc- exitcode,https://github.com/benwilber/exitcode,Apache-2.0,Ben Wilber fakedata_generator,https://github.com/kevingimbel/fakedata_generator,MIT,Kevin Gimbel fallible-iterator,https://github.com/sfackler/rust-fallible-iterator,MIT OR Apache-2.0,Steven Fackler -fancy-regex,https://github.com/fancy-regex/fancy-regex,MIT,"Raph Levien , Robin Stocker " fancy-regex,https://github.com/fancy-regex/fancy-regex,MIT,"Raph Levien , Robin Stocker , Keith Hall " fastrand,https://github.com/smol-rs/fastrand,Apache-2.0 OR MIT,Stjepan Glavina ff,https://github.com/zkcrypto/ff,MIT OR Apache-2.0,"Sean Bowe , Jack Grigg " diff --git a/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs b/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs index b32fdcfeb3106..f61ef30c8f28a 100644 --- a/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs +++ b/lib/dnstap-parser/src/vrl_functions/parse_dnstap.rs @@ -13,6 +13,10 @@ impl Function for ParseDnstap { "parse_dnstap" } + fn usage(&self) -> &'static str { + "Parses the `value` as base64 encoded DNSTAP data." + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { diff --git a/lib/enrichment/Cargo.toml b/lib/enrichment/Cargo.toml index 087ab7f60bfc0..1d2a67a2c26b6 100644 --- a/lib/enrichment/Cargo.toml +++ b/lib/enrichment/Cargo.toml @@ -8,5 +8,7 @@ publish = false [dependencies] arc-swap.workspace = true chrono.workspace = true +const-str.workspace = true dyn-clone = { version = "1.0.20", default-features = false } +indoc.workspace = true vrl.workspace = true diff --git a/lib/enrichment/src/find_enrichment_table_records.rs b/lib/enrichment/src/find_enrichment_table_records.rs index 369eb5d21140b..1b016aa901a14 100644 --- a/lib/enrichment/src/find_enrichment_table_records.rs +++ b/lib/enrichment/src/find_enrichment_table_records.rs @@ -51,6 +51,13 @@ impl Function for FindEnrichmentTableRecords { "find_enrichment_table_records" } + fn usage(&self) -> &'static str { + const_str::concat!( + "Searches an [enrichment table](/docs/reference/glossary/#enrichment-tables) for rows that match the provided condition.\n\n", + super::ENRICHMENT_TABLE_EXPLAINER + ) + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { diff --git a/lib/enrichment/src/get_enrichment_table_record.rs b/lib/enrichment/src/get_enrichment_table_record.rs index efdbc92542a19..ef2103702f8ca 100644 --- a/lib/enrichment/src/get_enrichment_table_record.rs +++ b/lib/enrichment/src/get_enrichment_table_record.rs @@ -48,6 +48,14 @@ impl Function for GetEnrichmentTableRecord { "get_enrichment_table_record" } + fn usage(&self) -> &'static str { + const USAGE: &str = const_str::concat!( + "Searches an [enrichment table](/docs/reference/glossary/#enrichment-tables) for a row that matches the provided condition. A single row must be matched. If no rows are found or more than one row is found, an error is returned.\n\n", + super::ENRICHMENT_TABLE_EXPLAINER + ); + USAGE + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { diff --git a/lib/enrichment/src/lib.rs b/lib/enrichment/src/lib.rs index e69cf979a1dc0..a73f18f163181 100644 --- a/lib/enrichment/src/lib.rs +++ b/lib/enrichment/src/lib.rs @@ -9,6 +9,7 @@ mod test_util; mod vrl_util; use dyn_clone::DynClone; +use indoc::indoc; pub use tables::{TableRegistry, TableSearch}; use vrl::{ compiler::Function, @@ -97,3 +98,59 @@ pub fn vrl_functions() -> Vec> { Box::new(find_enrichment_table_records::FindEnrichmentTableRecords) as _, ] } + +pub(crate) const ENRICHMENT_TABLE_EXPLAINER: &str = indoc! {r#" + For `file` enrichment tables, this condition needs to be a VRL object in which + the key-value pairs indicate a field to search mapped to a value to search in that field. + This function returns the rows that match the provided condition(s). _All_ fields need to + match for rows to be returned; if any fields do not match, then no rows are returned. + + There are currently three forms of search criteria: + + 1. **Exact match search**. The given field must match the value exactly. Case sensitivity + can be specified using the `case_sensitive` argument. An exact match search can use an + index directly into the dataset, which should make this search fairly "cheap" from a + performance perspective. + + 2. **Wildcard match search**. The given fields specified by the exact match search may also + be matched exactly to the value provided to the `wildcard` parameter. + A wildcard match search can also use an index directly into the dataset. + + 3. **Date range search**. The given field must be greater than or equal to the `from` date + and/or less than or equal to the `to` date. A date range search involves + sequentially scanning through the rows that have been located using any exact match + criteria. This can be an expensive operation if there are many rows returned by any exact + match criteria. Therefore, use date ranges as the _only_ criteria when the enrichment + data set is very small. + + For `geoip` and `mmdb` enrichment tables, this condition needs to be a VRL object with a single key-value pair + whose value needs to be a valid IP address. Example: `{"ip": .ip }`. If a return field is expected + and without a value, `null` is used. This table can return the following fields: + + * ISP databases: + * `autonomous_system_number` + * `autonomous_system_organization` + * `isp` + * `organization` + + * City databases: + * `city_name` + * `continent_code` + * `country_code` + * `country_name` + * `region_code` + * `region_name` + * `metro_code` + * `latitude` + * `longitude` + * `postal_code` + * `timezone` + + * Connection-Type databases: + * `connection_type` + + To use this function, you need to update your configuration to + include an + [`enrichment_tables`](/docs/reference/configuration/global-options/#enrichment_tables) + parameter. +"#}; diff --git a/lib/vector-vrl-metrics/Cargo.toml b/lib/vector-vrl-metrics/Cargo.toml index f0a17f8b76e32..6f3bbb7eaa84b 100644 --- a/lib/vector-vrl-metrics/Cargo.toml +++ b/lib/vector-vrl-metrics/Cargo.toml @@ -8,6 +8,7 @@ license = "MPL-2.0" [dependencies] arc-swap.workspace = true +const-str.workspace = true vrl.workspace = true vector-core = { path = "../vector-core", default-features = false, features = ["vrl"] } vector-common = { path = "../vector-common", default-features = false } diff --git a/lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs b/lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs index 529e1df2b7824..d51c132a9f22a 100644 --- a/lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs +++ b/lib/vector-vrl-metrics/src/aggregate_vector_metrics.rs @@ -47,6 +47,13 @@ impl Function for AggregateVectorMetrics { "aggregate_vector_metrics" } + fn usage(&self) -> &'static str { + const_str::concat!( + "Aggregates internal Vector metrics, using one of 4 aggregation functions, filtering by name and optionally by tags. Returns the aggregated value. Only includes counter and gauge metrics.\n\n", + crate::VECTOR_METRICS_EXPLAINER + ) + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { diff --git a/lib/vector-vrl-metrics/src/find_vector_metrics.rs b/lib/vector-vrl-metrics/src/find_vector_metrics.rs index 98017a0fcc905..5ca5535a0179a 100644 --- a/lib/vector-vrl-metrics/src/find_vector_metrics.rs +++ b/lib/vector-vrl-metrics/src/find_vector_metrics.rs @@ -30,6 +30,13 @@ impl Function for FindVectorMetrics { "find_vector_metrics" } + fn usage(&self) -> &'static str { + const_str::concat!( + "Searches internal Vector metrics by name and optionally by tags. Returns all matching metrics.\n\n", + crate::VECTOR_METRICS_EXPLAINER + ) + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { diff --git a/lib/vector-vrl-metrics/src/get_vector_metric.rs b/lib/vector-vrl-metrics/src/get_vector_metric.rs index eb669a4ac537c..5f24705a9c65c 100644 --- a/lib/vector-vrl-metrics/src/get_vector_metric.rs +++ b/lib/vector-vrl-metrics/src/get_vector_metric.rs @@ -27,6 +27,13 @@ impl Function for GetVectorMetric { "get_vector_metric" } + fn usage(&self) -> &'static str { + const_str::concat!( + "Searches internal Vector metrics by name and optionally by tags. Returns the first matching metric.\n\n", + crate::VECTOR_METRICS_EXPLAINER + ) + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { diff --git a/lib/vector-vrl-metrics/src/lib.rs b/lib/vector-vrl-metrics/src/lib.rs index f3246159f5fbb..6df6e762cf110 100644 --- a/lib/vector-vrl-metrics/src/lib.rs +++ b/lib/vector-vrl-metrics/src/lib.rs @@ -8,6 +8,13 @@ mod find_vector_metrics; mod get_vector_metric; pub use common::MetricsStorage; +pub(crate) const VECTOR_METRICS_EXPLAINER: &str = "\ +Internal Vector metrics functions work with a snapshot of the metrics. The interval at which \ +the snapshot is updated is controlled through the \ +`metrics_storage_refresh_period` (/docs/reference/configuration/global-options/#metrics_storage_refresh_period) \ +global option. Higher values can reduce performance impact of that process, but may cause \ +stale metrics data in the snapshot."; + pub fn all() -> Vec> { vec![ Box::new(get_vector_metric::GetVectorMetric) as _, diff --git a/lib/vector-vrl/functions/Cargo.toml b/lib/vector-vrl/functions/Cargo.toml index 5b08d1bf5f40e..4e630b9515e92 100644 --- a/lib/vector-vrl/functions/Cargo.toml +++ b/lib/vector-vrl/functions/Cargo.toml @@ -7,6 +7,7 @@ publish = false license = "MPL-2.0" [dependencies] +indoc.workspace = true vrl.workspace = true enrichment = { path = "../../enrichment" } dnstap-parser = { path = "../../dnstap-parser", optional = true } diff --git a/lib/vector-vrl/functions/src/get_secret.rs b/lib/vector-vrl/functions/src/get_secret.rs index a1fbfc593ba70..b643d34152e3c 100644 --- a/lib/vector-vrl/functions/src/get_secret.rs +++ b/lib/vector-vrl/functions/src/get_secret.rs @@ -17,6 +17,10 @@ impl Function for GetSecret { "get_secret" } + fn usage(&self) -> &'static str { + "Returns the value of the given secret from an event." + } + fn parameters(&self) -> &'static [Parameter] { &[Parameter { keyword: "key", diff --git a/lib/vector-vrl/functions/src/remove_secret.rs b/lib/vector-vrl/functions/src/remove_secret.rs index 4788c7cedfb48..5bdc75153b551 100644 --- a/lib/vector-vrl/functions/src/remove_secret.rs +++ b/lib/vector-vrl/functions/src/remove_secret.rs @@ -14,6 +14,10 @@ impl Function for RemoveSecret { "remove_secret" } + fn usage(&self) -> &'static str { + "Removes a secret from an event." + } + fn parameters(&self) -> &'static [Parameter] { &[Parameter { keyword: "key", diff --git a/lib/vector-vrl/functions/src/set_secret.rs b/lib/vector-vrl/functions/src/set_secret.rs index e6ba1e310c31a..5a128b448c825 100644 --- a/lib/vector-vrl/functions/src/set_secret.rs +++ b/lib/vector-vrl/functions/src/set_secret.rs @@ -21,6 +21,10 @@ impl Function for SetSecret { "set_secret" } + fn usage(&self) -> &'static str { + "Sets the given secret in the event." + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { diff --git a/lib/vector-vrl/functions/src/set_semantic_meaning.rs b/lib/vector-vrl/functions/src/set_semantic_meaning.rs index 14d45acbdd519..19dc0be342b06 100644 --- a/lib/vector-vrl/functions/src/set_semantic_meaning.rs +++ b/lib/vector-vrl/functions/src/set_semantic_meaning.rs @@ -9,6 +9,8 @@ use vrl::{ prelude::*, }; +use indoc::indoc; + #[derive(Debug, Default, Clone)] pub struct MeaningList(pub BTreeMap); @@ -34,6 +36,16 @@ impl Function for SetSemanticMeaning { "set_semantic_meaning" } + fn usage(&self) -> &'static str { + indoc! {" + Sets a semantic meaning for an event. **Note**: This function assigns + meaning at startup, and has _no_ runtime behavior. It is suggested + to put all calls to this function at the beginning of a VRL function. The function + cannot be conditionally called. For example, using an if statement cannot stop the meaning + from being assigned. + "} + } + fn parameters(&self) -> &'static [Parameter] { &[ Parameter { From 249657ba198470dc619f4c0e676ab52392b5469c Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Tue, 20 Jan 2026 17:00:54 -0600 Subject: [PATCH 195/227] chore(codecs): Merge `src/codecs` into `lib/codecs` (#24516) Vector has long had two `codecs` libraries: a separate `codecs` crate in `lib` and a `codecs` module in `src`. This change merges both into the crate to reduce confusion over where codec functionality is found. --- Cargo.lock | 2 + lib/codecs/Cargo.toml | 5 ++ .../codecs/src}/decoding/config.rs | 9 +-- .../codecs/src}/decoding/decoder.rs | 34 ++++----- lib/codecs/src/decoding/mod.rs | 4 + .../codecs/src}/encoding/config.rs | 34 +++++---- .../codecs/src}/encoding/encoder.rs | 73 ++++++++++++------- lib/codecs/src/encoding/mod.rs | 6 ++ .../codecs/src}/encoding/transformer.rs | 41 +++++------ .../codecs/src/internal_events.rs | 46 ++++++++---- lib/codecs/src/lib.rs | 30 +++++--- .../codecs => lib/codecs/src}/ready_frames.rs | 0 .../src/internal_event.rs | 3 +- lib/vector-lib/Cargo.toml | 2 +- src/codecs/decoding/mod.rs | 5 -- src/codecs/encoding/mod.rs | 7 -- src/codecs/mod.rs | 15 ---- src/components/validation/resources/event.rs | 2 +- src/components/validation/resources/mod.rs | 3 +- src/internal_events/mod.rs | 2 - src/lib.rs | 4 +- src/sinks/amqp/encoder.rs | 4 +- src/sinks/amqp/sink.rs | 4 +- src/sinks/clickhouse/config.rs | 6 +- src/sinks/clickhouse/request_builder.rs | 2 +- src/sinks/databend/encoding.rs | 7 +- src/sinks/doris/common.rs | 7 +- .../gcp_chronicle/chronicle_unstructured.rs | 2 +- src/sinks/http/batch.rs | 2 +- src/sinks/mqtt/request_builder.rs | 4 +- src/sinks/pulsar/encoder.rs | 4 +- src/sinks/redis/sink.rs | 4 +- src/sinks/splunk_hec/logs/encoder.rs | 6 +- src/sinks/util/encoding.rs | 40 +++++----- src/sources/aws_kinesis_firehose/handlers.rs | 2 +- src/sources/aws_sqs/source.rs | 2 +- src/sources/datadog_agent/logs.rs | 2 +- src/sources/datadog_agent/tests.rs | 8 +- src/sources/demo_logs.rs | 2 +- src/sources/exec/mod.rs | 5 +- src/sources/exec/tests.rs | 2 +- src/sources/file_descriptors/mod.rs | 2 +- src/sources/http_client/client.rs | 2 +- src/sources/http_server.rs | 2 +- src/sources/mqtt/source.rs | 4 +- src/sources/nats/source.rs | 2 +- src/sources/okta/client.rs | 10 +-- src/sources/pulsar.rs | 5 +- src/sources/redis/mod.rs | 5 +- src/sources/socket/udp.rs | 2 +- src/sources/util/message_decoding.rs | 4 +- src/sources/util/net/tcp/mod.rs | 17 ++--- 52 files changed, 260 insertions(+), 237 deletions(-) rename {src/codecs => lib/codecs/src}/decoding/config.rs (87%) rename {src/codecs => lib/codecs/src}/decoding/decoder.rs (89%) rename {src/codecs => lib/codecs/src}/encoding/config.rs (90%) rename {src/codecs => lib/codecs/src}/encoding/encoder.rs (86%) rename {src/codecs => lib/codecs/src}/encoding/transformer.rs (95%) rename src/internal_events/codecs.rs => lib/codecs/src/internal_events.rs (75%) rename {src/codecs => lib/codecs/src}/ready_frames.rs (100%) delete mode 100644 src/codecs/decoding/mod.rs delete mode 100644 src/codecs/encoding/mod.rs delete mode 100644 src/codecs/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 3d2d9ef2e2617..90324b9babd6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2601,6 +2601,7 @@ dependencies = [ "indoc", "influxdb-line-protocol", "memchr", + "metrics", "opentelemetry-proto", "ordered-float 4.6.0", "prost 0.12.6", @@ -2625,6 +2626,7 @@ dependencies = [ "tracing-test", "uuid", "vector-common", + "vector-common-macros", "vector-config", "vector-config-macros", "vector-core", diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 01d9836f666b3..d8ef9e1057361 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -24,9 +24,11 @@ derivative.workspace = true derive_more = { version = "2.0.1", optional = true, features = ["from", "display"] } dyn-clone = { version = "1", default-features = false } flate2.workspace = true +futures.workspace = true influxdb-line-protocol = { version = "2", default-features = false } lookup = { package = "vector-lookup", path = "../vector-lookup", default-features = false, features = ["test"] } memchr = { version = "2", default-features = false } +metrics.workspace = true opentelemetry-proto = { path = "../opentelemetry-proto", optional = true } ordered-float.workspace = true prost.workspace = true @@ -46,11 +48,13 @@ tokio = { workspace = true, features = ["full"] } tracing.workspace = true vrl.workspace = true vector-common = { path = "../vector-common", default-features = false } +vector-common-macros.workspace = true vector-config = { path = "../vector-config", default-features = false } vector-config-macros = { path = "../vector-config-macros", default-features = false } vector-core = { path = "../vector-core", default-features = false, features = ["vrl"] } vector-vrl-functions.workspace = true toml = { version = "0.9.8", optional = true } + [dev-dependencies] futures.workspace = true indoc.workspace = true @@ -66,3 +70,4 @@ vrl.workspace = true arrow = [] opentelemetry = ["dep:opentelemetry-proto"] syslog = ["dep:syslog_loose", "dep:strum", "dep:derive_more", "dep:serde-aux", "dep:toml"] +test = [] diff --git a/src/codecs/decoding/config.rs b/lib/codecs/src/decoding/config.rs similarity index 87% rename from src/codecs/decoding/config.rs rename to lib/codecs/src/decoding/config.rs index 2670b76c977dc..6ed15f8243867 100644 --- a/src/codecs/decoding/config.rs +++ b/lib/codecs/src/decoding/config.rs @@ -1,10 +1,7 @@ use serde::{Deserialize, Serialize}; -use vector_lib::{ - codecs::decoding::{DeserializerConfig, FramingConfig}, - config::LogNamespace, -}; +use vector_core::config::LogNamespace; -use crate::codecs::Decoder; +use crate::decoding::{Decoder, DeserializerConfig, FramingConfig}; /// Config used to build a `Decoder`. #[derive(Debug, Clone, Deserialize, Serialize)] @@ -43,7 +40,7 @@ impl DecodingConfig { } /// Builds a `Decoder` from the provided configuration. - pub fn build(&self) -> vector_lib::Result { + pub fn build(&self) -> vector_common::Result { // Build the framer. let framer = self.framing.build(); diff --git a/src/codecs/decoding/decoder.rs b/lib/codecs/src/decoding/decoder.rs similarity index 89% rename from src/codecs/decoding/decoder.rs rename to lib/codecs/src/decoding/decoder.rs index 499dedb10e9e6..0796e61ff82f3 100644 --- a/src/codecs/decoding/decoder.rs +++ b/lib/codecs/src/decoding/decoder.rs @@ -1,18 +1,18 @@ use bytes::{Bytes, BytesMut}; use smallvec::SmallVec; -use vector_lib::{ - codecs::decoding::{ - BoxedFramingError, BytesDeserializer, Deserializer, Error, Framer, NewlineDelimitedDecoder, - format::Deserializer as _, - }, - config::LogNamespace, -}; +use vector_common::internal_event::emit; +use vector_core::{config::LogNamespace, event::Event}; use crate::{ - event::Event, + decoding::format::Deserializer as _, + decoding::{ + BoxedFramingError, BytesDeserializer, Deserializer, Error, Framer, NewlineDelimitedDecoder, + }, internal_events::{DecoderDeserializeError, DecoderFramingError}, }; +type DecodedFrame = (SmallVec<[Event; 1]>, usize); + /// A decoder that can decode structured events from a byte stream / byte /// messages. #[derive(Clone)] @@ -60,9 +60,9 @@ impl Decoder { fn handle_framing_result( &mut self, frame: Result, BoxedFramingError>, - ) -> Result, usize)>, Error> { + ) -> Result, Error> { let frame = frame.map_err(|error| { - emit!(DecoderFramingError { error: &error }); + emit(DecoderFramingError { error: &error }); Error::FramingError(error) })?; @@ -72,7 +72,7 @@ impl Decoder { } /// Parses a frame using the included deserializer, and handles any errors by logging. - pub fn deserializer_parse(&self, frame: Bytes) -> Result<(SmallVec<[Event; 1]>, usize), Error> { + pub fn deserializer_parse(&self, frame: Bytes) -> Result { let byte_size = frame.len(); // Parse structured events from the byte frame. @@ -80,14 +80,14 @@ impl Decoder { .parse(frame, self.log_namespace) .map(|events| (events, byte_size)) .map_err(|error| { - emit!(DecoderDeserializeError { error: &error }); + emit(DecoderDeserializeError { error: &error }); Error::ParsingError(error) }) } } impl tokio_util::codec::Decoder for Decoder { - type Item = (SmallVec<[Event; 1]>, usize); + type Item = DecodedFrame; type Error = Error; fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { @@ -106,13 +106,13 @@ mod tests { use bytes::Bytes; use futures::{StreamExt, stream}; use tokio_util::{codec::FramedRead, io::StreamReader}; - use vector_lib::codecs::{ - JsonDeserializer, NewlineDelimitedDecoder, StreamDecodingError, - decoding::{Deserializer, Framer}, - }; use vrl::value::Value; use super::Decoder; + use crate::{ + JsonDeserializer, NewlineDelimitedDecoder, StreamDecodingError, + decoding::{Deserializer, Framer}, + }; #[tokio::test] async fn framed_read_recover_from_error() { diff --git a/lib/codecs/src/decoding/mod.rs b/lib/codecs/src/decoding/mod.rs index f27c30d93d501..c87337856454a 100644 --- a/lib/codecs/src/decoding/mod.rs +++ b/lib/codecs/src/decoding/mod.rs @@ -1,6 +1,8 @@ //! A collection of support structures that are used in the process of decoding //! bytes into events. +mod config; +mod decoder; mod error; pub mod format; pub mod framing; @@ -8,6 +10,8 @@ pub mod framing; use std::fmt::Debug; use bytes::{Bytes, BytesMut}; +pub use config::DecodingConfig; +pub use decoder::Decoder; pub use error::StreamDecodingError; pub use format::{ BoxedDeserializer, BytesDeserializer, BytesDeserializerConfig, GelfDeserializer, diff --git a/src/codecs/encoding/config.rs b/lib/codecs/src/encoding/config.rs similarity index 90% rename from src/codecs/encoding/config.rs rename to lib/codecs/src/encoding/config.rs index 47bec858ffb08..32d18c9e83d00 100644 --- a/src/codecs/encoding/config.rs +++ b/lib/codecs/src/encoding/config.rs @@ -1,14 +1,13 @@ -use crate::codecs::{Encoder, EncoderKind, Transformer}; -use vector_lib::{ - codecs::{ - CharacterDelimitedEncoder, LengthDelimitedEncoder, NewlineDelimitedEncoder, - encoding::{Framer, FramingConfig, Serializer, SerializerConfig}, - }, - configurable::configurable_component, +use vector_config::configurable_component; + +use super::{Encoder, EncoderKind, Transformer}; +use crate::encoding::{ + CharacterDelimitedEncoder, Framer, FramingConfig, LengthDelimitedEncoder, + NewlineDelimitedEncoder, Serializer, SerializerConfig, }; -#[cfg(feature = "codecs-opentelemetry")] -use vector_lib::codecs::BytesEncoder; +#[cfg(feature = "opentelemetry")] +use crate::encoding::BytesEncoder; /// Encoding configuration. #[configurable_component] @@ -43,7 +42,7 @@ impl EncodingConfig { } /// Build the `Serializer` for this config. - pub fn build(&self) -> crate::Result { + pub fn build(&self) -> vector_common::Result { self.encoding.build() } } @@ -100,7 +99,7 @@ impl EncodingConfigWithFraming { } /// Build the `Framer` and `Serializer` for this config. - pub fn build(&self, sink_type: SinkType) -> crate::Result<(Framer, Serializer)> { + pub fn build(&self, sink_type: SinkType) -> vector_common::Result<(Framer, Serializer)> { let framer = self.framing.as_ref().map(|framing| framing.build()); let serializer = self.encoding.build()?; @@ -132,9 +131,9 @@ impl EncodingConfigWithFraming { | Serializer::RawMessage(_) | Serializer::Text(_), ) => NewlineDelimitedEncoder::default().into(), - #[cfg(feature = "codecs-syslog")] + #[cfg(feature = "syslog")] (None, Serializer::Syslog(_)) => NewlineDelimitedEncoder::default().into(), - #[cfg(feature = "codecs-opentelemetry")] + #[cfg(feature = "opentelemetry")] (None, Serializer::Otlp(_)) => BytesEncoder.into(), }; @@ -142,7 +141,10 @@ impl EncodingConfigWithFraming { } /// Build the `Transformer` and `EncoderKind` for this config. - pub fn build_encoder(&self, sink_type: SinkType) -> crate::Result<(Transformer, EncoderKind)> { + pub fn build_encoder( + &self, + sink_type: SinkType, + ) -> vector_common::Result<(Transformer, EncoderKind)> { let (framer, serializer) = self.build(sink_type)?; let encoder = EncoderKind::Framed(Box::new(Encoder::::new(framer, serializer))); Ok((self.transformer(), encoder)) @@ -172,10 +174,10 @@ where #[cfg(test)] mod test { - use vector_lib::lookup::lookup_v2::{ConfigValuePath, parse_value_path}; + use lookup::lookup_v2::{ConfigValuePath, parse_value_path}; use super::*; - use crate::codecs::encoding::TimestampFormat; + use crate::encoding::TimestampFormat; #[test] fn deserialize_encoding_config() { diff --git a/src/codecs/encoding/encoder.rs b/lib/codecs/src/encoding/encoder.rs similarity index 86% rename from src/codecs/encoding/encoder.rs rename to lib/codecs/src/encoding/encoder.rs index a36b98a6496cb..4924dd05447b1 100644 --- a/src/codecs/encoding/encoder.rs +++ b/lib/codecs/src/encoding/encoder.rs @@ -1,14 +1,12 @@ use bytes::BytesMut; use tokio_util::codec::Encoder as _; -#[cfg(feature = "codecs-arrow")] -use vector_lib::codecs::encoding::ArrowStreamSerializer; -use vector_lib::codecs::{ - CharacterDelimitedEncoder, NewlineDelimitedEncoder, TextSerializerConfig, - encoding::{Error, Framer, Serializer}, -}; +use vector_common::internal_event::emit; +use vector_core::event::Event; +#[cfg(feature = "arrow")] +use crate::encoding::ArrowStreamSerializer; use crate::{ - event::Event, + encoding::{Error, Framer, Serializer}, internal_events::{EncoderFramingError, EncoderSerializeError}, }; @@ -16,7 +14,7 @@ use crate::{ #[derive(Debug, Clone)] pub enum BatchSerializer { /// Arrow IPC stream format serializer. - #[cfg(feature = "codecs-arrow")] + #[cfg(feature = "arrow")] Arrow(ArrowStreamSerializer), } @@ -38,7 +36,7 @@ impl BatchEncoder { } /// Get the HTTP content type. - #[cfg(feature = "codecs-arrow")] + #[cfg(feature = "arrow")] pub const fn content_type(&self) -> &'static str { match &self.serializer { BatchSerializer::Arrow(_) => "application/vnd.apache.arrow.stream", @@ -53,10 +51,10 @@ impl tokio_util::codec::Encoder> for BatchEncoder { fn encode(&mut self, events: Vec, buffer: &mut BytesMut) -> Result<(), Self::Error> { #[allow(unreachable_patterns)] match &mut self.serializer { - #[cfg(feature = "codecs-arrow")] + #[cfg(feature = "arrow")] BatchSerializer::Arrow(serializer) => { serializer.encode(events, buffer).map_err(|err| { - use vector_lib::codecs::encoding::ArrowEncodingError; + use crate::encoding::ArrowEncodingError; match err { ArrowEncodingError::NullConstraint { .. } => { Error::SchemaConstraintViolation(Box::new(err)) @@ -76,7 +74,7 @@ pub enum EncoderKind { /// Uses framing to encode individual events Framed(Box>), /// Encodes events in batches without framing - #[cfg(feature = "codecs-arrow")] + #[cfg(feature = "arrow")] Batch(BatchEncoder), } @@ -92,6 +90,8 @@ where impl Default for Encoder { fn default() -> Self { + use crate::encoding::{NewlineDelimitedEncoder, TextSerializerConfig}; + Self { framer: NewlineDelimitedEncoder::default().into(), serializer: TextSerializerConfig::default().build().into(), @@ -101,6 +101,8 @@ impl Default for Encoder { impl Default for Encoder<()> { fn default() -> Self { + use crate::encoding::TextSerializerConfig; + Self { framer: (), serializer: TextSerializerConfig::default().build().into(), @@ -127,7 +129,7 @@ where /// Serialize the event without applying framing, at the start of the provided buffer. fn serialize_at_start(&mut self, event: Event, buffer: &mut BytesMut) -> Result<(), Error> { self.serializer.encode(event, buffer).map_err(|error| { - emit!(EncoderSerializeError { error: &error }); + emit(EncoderSerializeError { error: &error }); Error::SerializingError(error) }) } @@ -155,7 +157,9 @@ impl Encoder { pub const fn batch_prefix(&self) -> &[u8] { match (&self.framer, &self.serializer) { ( - Framer::CharacterDelimited(CharacterDelimitedEncoder { delimiter: b',' }), + Framer::CharacterDelimited(crate::encoding::CharacterDelimitedEncoder { + delimiter: b',', + }), Serializer::Json(_) | Serializer::NativeJson(_), ) => b"[", _ => &[], @@ -166,7 +170,9 @@ impl Encoder { pub const fn batch_suffix(&self, empty: bool) -> &[u8] { match (&self.framer, &self.serializer, empty) { ( - Framer::CharacterDelimited(CharacterDelimitedEncoder { delimiter: b',' }), + Framer::CharacterDelimited(crate::encoding::CharacterDelimitedEncoder { + delimiter: b',', + }), Serializer::Json(_) | Serializer::NativeJson(_), _, ) => b"]", @@ -183,7 +189,9 @@ impl Encoder { } ( Serializer::Gelf(_) | Serializer::Json(_) | Serializer::NativeJson(_), - Framer::CharacterDelimited(CharacterDelimitedEncoder { delimiter: b',' }), + Framer::CharacterDelimited(crate::encoding::CharacterDelimitedEncoder { + delimiter: b',', + }), ) => "application/json", (Serializer::Native(_), _) | (Serializer::Protobuf(_), _) => "application/octet-stream", ( @@ -198,9 +206,9 @@ impl Encoder { | Serializer::Text(_), _, ) => "text/plain", - #[cfg(feature = "codecs-syslog")] + #[cfg(feature = "syslog")] (Serializer::Syslog(_), _) => "text/plain", - #[cfg(feature = "codecs-opentelemetry")] + #[cfg(feature = "opentelemetry")] (Serializer::Otlp(_), _) => "application/x-protobuf", } } @@ -233,7 +241,7 @@ impl tokio_util::codec::Encoder for Encoder { // Frame the serialized event. self.framer.encode((), &mut payload).map_err(|error| { - emit!(EncoderFramingError { error: &error }); + emit(EncoderFramingError { error: &error }); Error::FramingError(error) })?; @@ -261,11 +269,12 @@ impl tokio_util::codec::Encoder for Encoder<()> { #[cfg(test)] mod tests { use bytes::BufMut; - use futures_util::{SinkExt, StreamExt}; + use futures::{SinkExt, StreamExt}; use tokio_util::codec::FramedWrite; - use vector_lib::{codecs::encoding::BoxedFramingError, event::LogEvent}; + use vector_core::event::LogEvent; use super::*; + use crate::encoding::BoxedFramingError; #[derive(Debug, Clone)] struct ParenEncoder; @@ -325,7 +334,9 @@ mod tests { async fn test_encode_events_sink_empty() { let encoder = Encoder::::new( Framer::Boxed(Box::new(ParenEncoder::new())), - TextSerializerConfig::default().build().into(), + crate::encoding::TextSerializerConfig::default() + .build() + .into(), ); let source = futures::stream::iter(vec![ Event::Log(LogEvent::from("foo")), @@ -344,7 +355,9 @@ mod tests { async fn test_encode_events_sink_non_empty() { let encoder = Encoder::::new( Framer::Boxed(Box::new(ParenEncoder::new())), - TextSerializerConfig::default().build().into(), + crate::encoding::TextSerializerConfig::default() + .build() + .into(), ); let source = futures::stream::iter(vec![ Event::Log(LogEvent::from("bar")), @@ -363,7 +376,9 @@ mod tests { async fn test_encode_events_sink_empty_handle_framing_error() { let encoder = Encoder::::new( Framer::Boxed(Box::new(ErrorNthEncoder::new(ParenEncoder::new(), 1))), - TextSerializerConfig::default().build().into(), + crate::encoding::TextSerializerConfig::default() + .build() + .into(), ); let source = futures::stream::iter(vec![ Event::Log(LogEvent::from("foo")), @@ -383,7 +398,9 @@ mod tests { async fn test_encode_events_sink_non_empty_handle_framing_error() { let encoder = Encoder::::new( Framer::Boxed(Box::new(ErrorNthEncoder::new(ParenEncoder::new(), 1))), - TextSerializerConfig::default().build().into(), + crate::encoding::TextSerializerConfig::default() + .build() + .into(), ); let source = futures::stream::iter(vec![ Event::Log(LogEvent::from("bar")), @@ -402,8 +419,10 @@ mod tests { #[tokio::test] async fn test_encode_batch_newline() { let encoder = Encoder::::new( - Framer::NewlineDelimited(NewlineDelimitedEncoder::default()), - TextSerializerConfig::default().build().into(), + Framer::NewlineDelimited(crate::encoding::NewlineDelimitedEncoder::default()), + crate::encoding::TextSerializerConfig::default() + .build() + .into(), ); let source = futures::stream::iter(vec![ Event::Log(LogEvent::from("bar")), diff --git a/lib/codecs/src/encoding/mod.rs b/lib/codecs/src/encoding/mod.rs index 7d611790cb613..5e3885d2752e4 100644 --- a/lib/codecs/src/encoding/mod.rs +++ b/lib/codecs/src/encoding/mod.rs @@ -2,10 +2,15 @@ //! events into bytes. pub mod chunking; +mod config; +mod encoder; pub mod format; pub mod framing; pub mod serializer; +mod transformer; pub use chunking::{Chunker, Chunking, GelfChunker}; +pub use config::{EncodingConfig, EncodingConfigWithFraming, SinkType}; +pub use encoder::{BatchEncoder, BatchSerializer, Encoder, EncoderKind}; #[cfg(feature = "arrow")] pub use format::{ ArrowEncodingError, ArrowStreamSerializer, ArrowStreamSerializerConfig, SchemaProvider, @@ -33,6 +38,7 @@ pub use framing::{ #[cfg(feature = "arrow")] pub use serializer::BatchSerializerConfig; pub use serializer::{Serializer, SerializerConfig}; +pub use transformer::{TimestampFormat, Transformer}; /// An error that occurred while building an encoder. pub type BuildError = Box; diff --git a/src/codecs/encoding/transformer.rs b/lib/codecs/src/encoding/transformer.rs similarity index 95% rename from src/codecs/encoding/transformer.rs rename to lib/codecs/src/encoding/transformer.rs index 65304989af291..2fb84f00e6fd1 100644 --- a/src/codecs/encoding/transformer.rs +++ b/lib/codecs/src/encoding/transformer.rs @@ -1,21 +1,19 @@ #![deny(missing_docs)] -use core::fmt::Debug; use std::collections::BTreeMap; use chrono::{DateTime, Utc}; +use lookup::{PathPrefix, event_path, lookup_v2::ConfigValuePath}; use ordered_float::NotNan; use serde::{Deserialize, Deserializer}; -use vector_lib::{ - configurable::configurable_component, - event::{LogEvent, MaybeAsLogMut}, - lookup::{PathPrefix, event_path, lookup_v2::ConfigValuePath}, +use vector_config::configurable_component; +use vector_core::{ + event::{Event, LogEvent, MaybeAsLogMut}, schema::meaning, + serde::is_default, }; use vrl::{path::OwnedValuePath, value::Value}; -use crate::{event::Event, serde::is_default}; - /// Transformations to prepare an event for serialization. #[configurable_component(no_deser)] #[derive(Clone, Debug, Default, PartialEq, Eq)] @@ -72,7 +70,7 @@ impl Transformer { only_fields: Option>, except_fields: Option>, timestamp_format: Option, - ) -> Result { + ) -> vector_common::Result { Self::validate_fields(only_fields.as_ref(), except_fields.as_ref())?; Ok(Self { @@ -83,7 +81,7 @@ impl Transformer { } /// Get the `Transformer`'s `only_fields`. - #[cfg(test)] + #[cfg(any(test, feature = "test"))] pub const fn only_fields(&self) -> &Option> { &self.only_fields } @@ -104,7 +102,7 @@ impl Transformer { fn validate_fields( only_fields: Option<&Vec>, except_fields: Option<&Vec>, - ) -> crate::Result<()> { + ) -> vector_common::Result<()> { if let (Some(only_fields), Some(except_fields)) = (only_fields, except_fields) && except_fields .iter() @@ -188,7 +186,8 @@ impl Transformer { } } for (k, v) in unix_timestamps { - log.parse_path_and_insert(k, v).unwrap(); + log.parse_path_and_insert(k, v) + .expect("timestamp fields must allow insertion"); } } else { // root is not an object @@ -213,7 +212,8 @@ impl Transformer { ts.timestamp_nanos_opt().expect("Timestamp out of range") }), TimestampFormat::UnixFloat => self.format_timestamps(log, |ts| { - NotNan::new(ts.timestamp_micros() as f64 / 1e6).unwrap() + NotNan::new(ts.timestamp_micros() as f64 / 1e6) + .expect("this division will never produce a NaN") }), // RFC3339 is the default serialization of a timestamp. TimestampFormat::Rfc3339 => (), @@ -225,11 +225,11 @@ impl Transformer { /// /// Returns `Err` if the new `except_fields` fail validation, i.e. are not mutually exclusive /// with `only_fields`. - #[cfg(test)] + #[cfg(any(test, feature = "test"))] pub fn set_except_fields( &mut self, except_fields: Option>, - ) -> crate::Result<()> { + ) -> vector_common::Result<()> { Self::validate_fields(self.only_fields.as_ref(), except_fields.as_ref())?; self.except_fields = except_fields; Ok(()) @@ -265,15 +265,14 @@ mod tests { use std::{collections::BTreeMap, sync::Arc}; use indoc::indoc; - use vector_lib::{ - btreemap, + use lookup::path::parse_target_path; + use vector_core::{ config::{LogNamespace, log_schema}, - lookup::path::parse_target_path, + schema, }; - use vrl::value::Kind; + use vrl::{btreemap, value::Kind}; use super::*; - use crate::config::schema; #[test] fn serialize() { @@ -450,7 +449,7 @@ mod tests { Kind::object(btreemap! { "thing" => Kind::object(btreemap! { "service" => Kind::bytes(), - }) + }), }), [LogNamespace::Vector], ); @@ -490,7 +489,7 @@ mod tests { Kind::object(btreemap! { "thing" => Kind::object(btreemap! { "service" => Kind::bytes(), - }) + }), }), [LogNamespace::Vector], ); diff --git a/src/internal_events/codecs.rs b/lib/codecs/src/internal_events.rs similarity index 75% rename from src/internal_events/codecs.rs rename to lib/codecs/src/internal_events.rs index 27980af51b799..134fee16ecf8b 100644 --- a/src/internal_events/codecs.rs +++ b/lib/codecs/src/internal_events.rs @@ -1,11 +1,16 @@ +//! Internal events for codecs. + use metrics::counter; -use vector_lib::NamedInternalEvent; -use vector_lib::internal_event::{ - ComponentEventsDropped, InternalEvent, UNINTENTIONAL, error_stage, error_type, +use tracing::error; +use vector_common::internal_event::{ + ComponentEventsDropped, InternalEvent, UNINTENTIONAL, emit, error_stage, error_type, }; +use vector_common_macros::NamedInternalEvent; #[derive(Debug, NamedInternalEvent)] +/// Emitted when a decoder framing error occurs. pub struct DecoderFramingError { + /// The framing error that occurred. pub error: E, } @@ -29,8 +34,10 @@ impl InternalEvent for DecoderFramingError { } #[derive(Debug, NamedInternalEvent)] +/// Emitted when a decoder fails to deserialize a frame. pub struct DecoderDeserializeError<'a> { - pub error: &'a crate::Error, + /// The deserialize error that occurred. + pub error: &'a vector_common::Error, } impl InternalEvent for DecoderDeserializeError<'_> { @@ -53,8 +60,10 @@ impl InternalEvent for DecoderDeserializeError<'_> { } #[derive(Debug, NamedInternalEvent)] +/// Emitted when an encoder framing error occurs. pub struct EncoderFramingError<'a> { - pub error: &'a vector_lib::codecs::encoding::BoxedFramingError, + /// The framing error that occurred. + pub error: &'a crate::encoding::BoxedFramingError, } impl InternalEvent for EncoderFramingError<'_> { @@ -74,13 +83,15 @@ impl InternalEvent for EncoderFramingError<'_> { "stage" => error_stage::SENDING, ) .increment(1); - emit!(ComponentEventsDropped:: { count: 1, reason }); + emit(ComponentEventsDropped:: { count: 1, reason }); } } #[derive(Debug, NamedInternalEvent)] +/// Emitted when an encoder fails to serialize a frame. pub struct EncoderSerializeError<'a> { - pub error: &'a crate::Error, + /// The serialization error that occurred. + pub error: &'a vector_common::Error, } impl InternalEvent for EncoderSerializeError<'_> { @@ -100,16 +111,19 @@ impl InternalEvent for EncoderSerializeError<'_> { "stage" => error_stage::SENDING, ) .increment(1); - emit!(ComponentEventsDropped:: { + emit(ComponentEventsDropped:: { count: 1, - reason: SERIALIZE_REASON + reason: SERIALIZE_REASON, }); } } #[derive(Debug, NamedInternalEvent)] +/// Emitted when writing encoded bytes fails. pub struct EncoderWriteError<'a, E> { + /// The write error that occurred. pub error: &'a E, + /// The number of events dropped by the failed write. pub count: usize, } @@ -129,7 +143,7 @@ impl InternalEvent for EncoderWriteError<'_, E> { ) .increment(1); if self.count > 0 { - emit!(ComponentEventsDropped:: { + emit(ComponentEventsDropped:: { count: self.count, reason, }); @@ -137,13 +151,15 @@ impl InternalEvent for EncoderWriteError<'_, E> { } } -#[cfg(feature = "codecs-arrow")] +#[cfg(feature = "arrow")] #[derive(Debug, NamedInternalEvent)] +/// Emitted when encoding violates a schema constraint. pub struct EncoderNullConstraintError<'a> { - pub error: &'a crate::Error, + /// The schema constraint error that occurred. + pub error: &'a vector_common::Error, } -#[cfg(feature = "codecs-arrow")] +#[cfg(feature = "arrow")] impl InternalEvent for EncoderNullConstraintError<'_> { fn emit(self) { const CONSTRAINT_REASON: &str = "Schema constraint violation."; @@ -161,9 +177,9 @@ impl InternalEvent for EncoderNullConstraintError<'_> { "stage" => error_stage::SENDING, ) .increment(1); - emit!(ComponentEventsDropped:: { + emit(ComponentEventsDropped:: { count: 1, - reason: CONSTRAINT_REASON + reason: CONSTRAINT_REASON, }); } } diff --git a/lib/codecs/src/lib.rs b/lib/codecs/src/lib.rs index e386bb30f378a..c88eb531dc1a7 100644 --- a/lib/codecs/src/lib.rs +++ b/lib/codecs/src/lib.rs @@ -8,27 +8,33 @@ mod common; pub mod decoding; pub mod encoding; pub mod gelf; +pub mod internal_events; +mod ready_frames; pub use decoding::{ BytesDecoder, BytesDecoderConfig, BytesDeserializer, BytesDeserializerConfig, - CharacterDelimitedDecoder, CharacterDelimitedDecoderConfig, GelfDeserializer, - GelfDeserializerConfig, JsonDeserializer, JsonDeserializerConfig, LengthDelimitedDecoder, - LengthDelimitedDecoderConfig, NativeDeserializer, NativeDeserializerConfig, - NativeJsonDeserializer, NativeJsonDeserializerConfig, NewlineDelimitedDecoder, - NewlineDelimitedDecoderConfig, OctetCountingDecoder, OctetCountingDecoderConfig, - StreamDecodingError, VarintLengthDelimitedDecoder, VarintLengthDelimitedDecoderConfig, + CharacterDelimitedDecoder, CharacterDelimitedDecoderConfig, Decoder, DecodingConfig, + GelfDeserializer, GelfDeserializerConfig, JsonDeserializer, JsonDeserializerConfig, + LengthDelimitedDecoder, LengthDelimitedDecoderConfig, NativeDeserializer, + NativeDeserializerConfig, NativeJsonDeserializer, NativeJsonDeserializerConfig, + NewlineDelimitedDecoder, NewlineDelimitedDecoderConfig, OctetCountingDecoder, + OctetCountingDecoderConfig, StreamDecodingError, VarintLengthDelimitedDecoder, + VarintLengthDelimitedDecoderConfig, }; #[cfg(feature = "syslog")] pub use decoding::{SyslogDeserializer, SyslogDeserializerConfig}; pub use encoding::{ - BytesEncoder, BytesEncoderConfig, CharacterDelimitedEncoder, CharacterDelimitedEncoderConfig, - CsvSerializer, CsvSerializerConfig, GelfSerializer, GelfSerializerConfig, JsonSerializer, - JsonSerializerConfig, LengthDelimitedEncoder, LengthDelimitedEncoderConfig, LogfmtSerializer, - LogfmtSerializerConfig, NativeJsonSerializer, NativeJsonSerializerConfig, NativeSerializer, - NativeSerializerConfig, NewlineDelimitedEncoder, NewlineDelimitedEncoderConfig, - RawMessageSerializer, RawMessageSerializerConfig, TextSerializer, TextSerializerConfig, + BatchEncoder, BatchSerializer, BytesEncoder, BytesEncoderConfig, CharacterDelimitedEncoder, + CharacterDelimitedEncoderConfig, CsvSerializer, CsvSerializerConfig, Encoder, EncoderKind, + EncodingConfig, EncodingConfigWithFraming, GelfSerializer, GelfSerializerConfig, + JsonSerializer, JsonSerializerConfig, LengthDelimitedEncoder, LengthDelimitedEncoderConfig, + LogfmtSerializer, LogfmtSerializerConfig, NativeJsonSerializer, NativeJsonSerializerConfig, + NativeSerializer, NativeSerializerConfig, NewlineDelimitedEncoder, + NewlineDelimitedEncoderConfig, RawMessageSerializer, RawMessageSerializerConfig, SinkType, + TextSerializer, TextSerializerConfig, TimestampFormat, Transformer, }; pub use gelf::{VALID_FIELD_REGEX, gelf_fields}; +pub use ready_frames::ReadyFrames; use vector_config_macros::configurable_component; /// The user configuration to choose the metric tag strategy. diff --git a/src/codecs/ready_frames.rs b/lib/codecs/src/ready_frames.rs similarity index 100% rename from src/codecs/ready_frames.rs rename to lib/codecs/src/ready_frames.rs diff --git a/lib/vector-common-macros/src/internal_event.rs b/lib/vector-common-macros/src/internal_event.rs index aed9e71bfa9a0..58972625d64f2 100644 --- a/lib/vector-common-macros/src/internal_event.rs +++ b/lib/vector-common-macros/src/internal_event.rs @@ -27,7 +27,8 @@ pub fn derive_impl_named_internal_event(item: TokenStream) -> TokenStream { let pkg_name = std::env::var("CARGO_PKG_NAME").unwrap_or_default(); let internal_event_path = if pkg_name == "vector-common" { quote! { crate::internal_event } - } else if pkg_name.starts_with("vector-") || pkg_name == "dnstap-parser" { + } else if pkg_name.starts_with("vector-") || pkg_name == "codecs" || pkg_name == "dnstap-parser" + { // Most vector-* crates depend on vector-common but not vector-lib quote! { ::vector_common::internal_event } } else { diff --git a/lib/vector-lib/Cargo.toml b/lib/vector-lib/Cargo.toml index c72af97fdaa62..6c46f70a019bd 100644 --- a/lib/vector-lib/Cargo.toml +++ b/lib/vector-lib/Cargo.toml @@ -34,5 +34,5 @@ opentelemetry = ["dep:opentelemetry-proto", "codecs/opentelemetry"] prometheus = ["dep:prometheus-parser"] proptest = ["vector-lookup/proptest", "vrl/proptest"] syslog = ["codecs/syslog"] -test = ["vector-core/test"] +test = ["codecs/test", "vector-core/test"] vrl = ["vector-core/vrl", "dep:vrl"] diff --git a/src/codecs/decoding/mod.rs b/src/codecs/decoding/mod.rs deleted file mode 100644 index ad4cfdb444fd6..0000000000000 --- a/src/codecs/decoding/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod config; -mod decoder; - -pub use config::DecodingConfig; -pub use decoder::Decoder; diff --git a/src/codecs/encoding/mod.rs b/src/codecs/encoding/mod.rs deleted file mode 100644 index 36d637bd75090..0000000000000 --- a/src/codecs/encoding/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod config; -mod encoder; -mod transformer; - -pub use config::{EncodingConfig, EncodingConfigWithFraming, SinkType}; -pub use encoder::{BatchEncoder, BatchSerializer, Encoder, EncoderKind}; -pub use transformer::{TimestampFormat, Transformer}; diff --git a/src/codecs/mod.rs b/src/codecs/mod.rs deleted file mode 100644 index 32b0e9efb7f8b..0000000000000 --- a/src/codecs/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! A collection of codecs that can be used to transform between bytes streams / -//! byte messages, byte frames and structured events. - -#![deny(missing_docs)] - -mod decoding; -mod encoding; -mod ready_frames; - -pub use decoding::{Decoder, DecodingConfig}; -pub use encoding::{ - BatchEncoder, BatchSerializer, Encoder, EncoderKind, EncodingConfig, EncodingConfigWithFraming, - SinkType, TimestampFormat, Transformer, -}; -pub use ready_frames::ReadyFrames; diff --git a/src/components/validation/resources/event.rs b/src/components/validation/resources/event.rs index f97ff4b5c4adf..b34493837248a 100644 --- a/src/components/validation/resources/event.rs +++ b/src/components/validation/resources/event.rs @@ -12,7 +12,7 @@ use vector_lib::{ event::{Event, LogEvent}, }; -use crate::codecs::Encoder; +use vector_lib::codecs::Encoder; /// A test case event for deserialization from yaml file. /// This is an intermediary step to TestEvent. diff --git a/src/components/validation/resources/mod.rs b/src/components/validation/resources/mod.rs index 85f72f2aaf35f..6b3fa43a1b6ba 100644 --- a/src/components/validation/resources/mod.rs +++ b/src/components/validation/resources/mod.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use tokio::sync::{Mutex, mpsc}; use vector_lib::{ codecs::{ - BytesEncoder, + BytesEncoder, Decoder, DecodingConfig, Encoder, EncodingConfig, EncodingConfigWithFraming, decoding::{self, DeserializerConfig}, encoding::{ self, Framer, FramingConfig, JsonSerializerConfig, SerializerConfig, @@ -26,7 +26,6 @@ use super::{ RunnerMetrics, sync::{Configuring, TaskCoordinator}, }; -use crate::codecs::{Decoder, DecodingConfig, Encoder, EncodingConfig, EncodingConfigWithFraming}; /// The codec used by the external resource. /// diff --git a/src/internal_events/mod.rs b/src/internal_events/mod.rs index f22f896336070..30f282686357a 100644 --- a/src/internal_events/mod.rs +++ b/src/internal_events/mod.rs @@ -28,7 +28,6 @@ mod aws_kinesis_firehose; #[cfg(any(feature = "sources-aws_s3", feature = "sources-aws_sqs",))] mod aws_sqs; mod batch; -mod codecs; mod common; mod conditions; #[cfg(feature = "sources-datadog_agent")] @@ -184,7 +183,6 @@ pub(crate) use self::aws_kinesis::*; pub(crate) use self::aws_kinesis_firehose::*; #[cfg(any(feature = "sources-aws_s3", feature = "sources-aws_sqs",))] pub(crate) use self::aws_sqs::*; -pub(crate) use self::codecs::*; #[cfg(feature = "sources-datadog_agent")] pub(crate) use self::datadog_agent::*; #[cfg(feature = "sinks-datadog_metrics")] diff --git a/src/lib.rs b/src/lib.rs index 7e66be254e94a..d4d4ab1eeb38a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,6 +32,8 @@ extern crate tracing; extern crate vector_lib; pub use indoc::indoc; +// re-export codecs for convenience +pub use vector_lib::codecs; #[cfg(all(feature = "tikv-jemallocator", not(feature = "allocation-tracing")))] #[global_allocator] @@ -72,8 +74,6 @@ pub mod app; pub mod async_read; #[cfg(feature = "aws-config")] pub mod aws; -#[allow(unreachable_pub)] -pub mod codecs; pub mod common; pub mod completion; mod convert_config; diff --git a/src/sinks/amqp/encoder.rs b/src/sinks/amqp/encoder.rs index 07ab035c5efec..ec403b39cfd54 100644 --- a/src/sinks/amqp/encoder.rs +++ b/src/sinks/amqp/encoder.rs @@ -9,8 +9,8 @@ use crate::sinks::prelude::*; #[derive(Clone, Debug)] pub(super) struct AmqpEncoder { - pub(super) encoder: crate::codecs::Encoder<()>, - pub(super) transformer: crate::codecs::Transformer, + pub(super) encoder: vector_lib::codecs::Encoder<()>, + pub(super) transformer: vector_lib::codecs::Transformer, } impl encoding::Encoder for AmqpEncoder { diff --git a/src/sinks/amqp/sink.rs b/src/sinks/amqp/sink.rs index 058eaf22bc10f..ba00cbcee98cc 100644 --- a/src/sinks/amqp/sink.rs +++ b/src/sinks/amqp/sink.rs @@ -32,7 +32,7 @@ pub(super) struct AmqpSink { routing_key: Option