From 491b69f364bcc0d75f0183a6bd288cb8684d86f0 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Thu, 19 Feb 2026 23:07:31 -0500 Subject: [PATCH 001/127] proto node versioning --- consensus/fork_choice/src/fork_choice.rs | 30 +++ consensus/proto_array/src/error.rs | 7 + consensus/proto_array/src/lib.rs | 2 +- consensus/proto_array/src/proto_array.rs | 183 ++++++++++++------ .../src/proto_array_fork_choice.rs | 13 ++ 5 files changed, 176 insertions(+), 59 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 9744b9fa084..5edd9b139df 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -138,6 +138,10 @@ pub enum InvalidBlock { finalized_root: Hash256, block_ancestor: Option, }, + MissingExecutionPayloadBid{ + block_slot: Slot, + block_root: Hash256, + } } #[derive(Debug)] @@ -241,6 +245,7 @@ pub struct QueuedAttestation { attesting_indices: Vec, block_root: Hash256, target_epoch: Epoch, + payload_present: bool, } impl<'a, E: EthSpec> From> for QueuedAttestation { @@ -250,6 +255,7 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { attesting_indices: a.attesting_indices_to_vec(), block_root: a.data().beacon_block_root, target_epoch: a.data().target.epoch, + payload_present: a.data().index == 1, } } } @@ -882,6 +888,25 @@ where ExecutionStatus::irrelevant() }; + let (execution_payload_parent_hash, execution_payload_block_hash) = + if let Ok(signed_bid) = block.body().signed_execution_payload_bid() { + ( + Some(signed_bid.message.parent_block_hash), + Some(signed_bid.message.block_hash), + ) + } else { + if spec.fork_name_at_slot::(block.slot()).gloas_enabled() { + return Err(Error::InvalidBlock( + InvalidBlock::MissingExecutionPayloadBid{ + block_slot: block.slot(), + block_root, + } + + )) + } + (None, None) + }; + // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. self.proto_array.process_block::( @@ -908,10 +933,14 @@ where execution_status, unrealized_justified_checkpoint: Some(unrealized_justified_checkpoint), unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint), + execution_payload_parent_hash, + execution_payload_block_hash, + }, current_slot, self.justified_checkpoint(), self.finalized_checkpoint(), + spec, )?; Ok(()) @@ -1103,6 +1132,7 @@ where if attestation.data().slot < self.fc_store.get_current_slot() { for validator_index in attestation.attesting_indices_iter() { + let payload_present = attestation.data().index == 1; self.proto_array.process_attestation( *validator_index as usize, attestation.data().beacon_block_root, diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 35cb4007b78..c3e60277a3a 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -54,6 +54,13 @@ pub enum Error { }, InvalidEpochOffset(u64), Arith(ArithError), + GloasNotImplemented, + InvalidNodeVariant{ + block_root: Hash256, + }, + BrokenBlock{ + block_root: Hash256, + }, } impl From for Error { diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 964e836d91d..222f9274781 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -9,7 +9,7 @@ pub use crate::justified_balances::JustifiedBalances; pub use crate::proto_array::{InvalidationOperation, calculate_committee_fraction}; pub use crate::proto_array_fork_choice::{ Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, - ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, PayloadStatus, }; pub use error::Error; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 5bfcdae463d..d7b1ec63135 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,5 +1,5 @@ use crate::error::InvalidBestNodeInfo; -use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error}; +use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error, PayloadStatus}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -68,47 +68,68 @@ impl InvalidationOperation { } } -pub type ProtoNode = ProtoNodeV17; #[superstruct( - variants(V17), + variants(V17, V29), variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)), - no_enum )] +#[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Clone)] +#[ssz(enum_behaviour = "transparent")] pub struct ProtoNode { /// The `slot` is not necessary for `ProtoArray`, it just exists so external components can /// easily query the block slot. This is useful for upstream fork choice logic. + #[superstruct(getter(copy))] pub slot: Slot, /// The `state_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// components (namely attestation verification). + #[superstruct(getter(copy))] pub state_root: Hash256, /// The root that would be used for the `attestation.data.target.root` if a LMD vote was cast /// for this block. /// /// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// components (namely fork choice attestation verification). + #[superstruct(getter(copy))] pub target_root: Hash256, pub current_epoch_shuffling_id: AttestationShufflingId, pub next_epoch_shuffling_id: AttestationShufflingId, + #[superstruct(getter(copy))] pub root: Hash256, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_usize")] pub parent: Option, - #[superstruct(only(V17))] + #[superstruct(only(V17, V29), partial_getter(copy))] pub justified_checkpoint: Checkpoint, - #[superstruct(only(V17))] + #[superstruct(only(V17, V29), partial_getter(copy))] pub finalized_checkpoint: Checkpoint, + #[superstruct(getter(copy))] pub weight: u64, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_child: Option, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option, /// Indicates if an execution node has marked this block as valid. Also contains the execution /// block hash. + #[superstruct(only(V17), partial_getter(copy))] pub execution_status: ExecutionStatus, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_checkpoint")] pub unrealized_justified_checkpoint: Option, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_checkpoint")] pub unrealized_finalized_checkpoint: Option, + + /// We track the parent payload status from which the current node was extended. + #[superstruct(only(V29), partial_getter(copy))] + pub parent_payload_status: PayloadStatus, + #[superstruct(only(V29), partial_getter(copy))] + pub empty_payload_weight: u64, + #[superstruct(only(V29), partial_getter(copy))] + pub full_payload_weight: u64, + #[superstruct(only(V29), partial_getter(copy))] + pub execution_payload_block_hash: ExecutionBlockHash, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -181,16 +202,14 @@ impl ProtoArray { // There is no need to adjust the balances or manage parent of the zero hash since it // is an alias to the genesis block. The weight applied to the genesis block is // irrelevant as we _always_ choose it and it's impossible for it to have a parent. - if node.root == Hash256::zero() { + if node.root() == Hash256::zero() { continue; } - let execution_status_is_invalid = node.execution_status.is_invalid(); - - let mut node_delta = if execution_status_is_invalid { + let mut node_delta = if let Ok(proto_node) = node.as_v17() && proto_node.execution_status.is_invalid() { // If the node has an invalid execution payload, reduce its weight to zero. 0_i64 - .checked_sub(node.weight as i64) + .checked_sub(node.weight() as i64) .ok_or(Error::InvalidExecutionDeltaOverflow(node_index))? } else { deltas @@ -202,7 +221,7 @@ impl ProtoArray { // If we find the node for which the proposer boost was previously applied, decrease // the delta by the previous score amount. if self.previous_proposer_boost.root != Hash256::zero() - && self.previous_proposer_boost.root == node.root + && self.previous_proposer_boost.root == node.root() // Invalid nodes will always have a weight of zero so there's no need to subtract // the proposer boost delta. && !execution_status_is_invalid @@ -217,7 +236,7 @@ impl ProtoArray { // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance if let Some(proposer_score_boost) = spec.proposer_score_boost && proposer_boost_root != Hash256::zero() - && proposer_boost_root == node.root + && proposer_boost_root == node.root() // Invalid nodes (or their ancestors) should not receive a proposer boost. && !execution_status_is_invalid { @@ -232,7 +251,7 @@ impl ProtoArray { // Apply the delta to the node. if execution_status_is_invalid { // Invalid nodes always have a weight of 0. - node.weight = 0 + node.weight() = 0 } else if node_delta < 0 { // Note: I am conflicted about whether to use `saturating_sub` or `checked_sub` // here. @@ -243,19 +262,19 @@ impl ProtoArray { // // However, I am not fully convinced that some valid case for `saturating_sub` does // not exist. - node.weight = node - .weight + node.weight() = node + .weight() .checked_sub(node_delta.unsigned_abs()) .ok_or(Error::DeltaOverflow(node_index))?; } else { node.weight = node - .weight + .weight() .checked_add(node_delta as u64) .ok_or(Error::DeltaOverflow(node_index))?; } // Update the parent delta (if any). - if let Some(parent_index) = node.parent { + if let Some(parent_index) = node.parent() { let parent_delta = deltas .get_mut(parent_index) .ok_or(Error::InvalidParentDelta(parent_index))?; @@ -283,7 +302,7 @@ impl ProtoArray { .ok_or(Error::InvalidNodeIndex(node_index))?; // If the node has a parent, try to update its best-child and best-descendant. - if let Some(parent_index) = node.parent { + if let Some(parent_index) = node.parent() { self.maybe_update_best_child_and_descendant::( parent_index, node_index, @@ -306,6 +325,7 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, + spec: &ChainSpec, ) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { @@ -314,45 +334,92 @@ impl ProtoArray { let node_index = self.nodes.len(); - let node = ProtoNode { - slot: block.slot, - root: block.root, - target_root: block.target_root, - current_epoch_shuffling_id: block.current_epoch_shuffling_id, - next_epoch_shuffling_id: block.next_epoch_shuffling_id, - state_root: block.state_root, - parent: block - .parent_root - .and_then(|parent| self.indices.get(&parent).copied()), - justified_checkpoint: block.justified_checkpoint, - finalized_checkpoint: block.finalized_checkpoint, - weight: 0, - best_child: None, - best_descendant: None, - execution_status: block.execution_status, - unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + let parent_index = block + .parent_root + .and_then(|parent| self.indices.get(&parent).copied()); + + let node = if !spec.fork_name_at_slot::(current_slot).gloas_enabled() { + ProtoNode::V17(ProtoNodeV17 { + slot: block.slot, + root: block.root, + target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id, + next_epoch_shuffling_id: block.next_epoch_shuffling_id, + state_root: block.state_root, + parent: parent_index, + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, + weight: 0, + best_child: None, + best_descendant: None, + execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + }) + } else { + let execution_payload_block_hash = block + .execution_payload_block_hash + .ok_or(Error::BrokenBlock{block_root: block.root})?; + + let parent_payload_status: PayloadStatus = + if let Some(parent_node) = + parent_index.and_then(|idx| self.nodes.get(idx)) + { + let v29 = parent_node + .as_v29() + .map_err(|_| Error::InvalidNodeVariant{block_root: block.root})?; + if execution_payload_block_hash == v29.execution_payload_block_hash + { + PayloadStatus::Empty + } else { + PayloadStatus::Full + } + } else { + PayloadStatus::Full + }; + + ProtoNode::V29(ProtoNodeV29 { + slot: block.slot, + root: block.root, + target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id, + next_epoch_shuffling_id: block.next_epoch_shuffling_id, + state_root: block.state_root, + parent: parent_index, + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, + weight: 0, + best_child: None, + best_descendant: None, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + parent_payload_status, + empty_payload_weight: 0, + full_payload_weight: 0, + execution_payload_block_hash, + }) }; - // If the parent has an invalid execution status, return an error before adding the block to - // `self`. - if let Some(parent_index) = node.parent { + // If the parent has an invalid execution status, return an error before adding the + // block to `self`. This applies when the parent is a V17 node with execution tracking. + if let Some(parent_index) = node.parent() { let parent = self .nodes .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - if parent.execution_status.is_invalid() { + + if let Ok(status) = parent.execution_status() && status.is_invalid() { return Err(Error::ParentExecutionStatusIsInvalid { block_root: block.root, - parent_root: parent.root, + parent_root: parent.root(), }); } } - self.indices.insert(node.root, node_index); + self.indices.insert(node.root(), node_index); self.nodes.push(node.clone()); - if let Some(parent_index) = node.parent { + if let Some(parent_index) = node.parent() { self.maybe_update_best_child_and_descendant::( parent_index, node_index, @@ -805,12 +872,12 @@ impl ProtoArray { let change_to_none = (None, None); let change_to_child = ( Some(child_index), - child.best_descendant.or(Some(child_index)), + child.best_descendant().or(Some(child_index)), ); - let no_change = (parent.best_child, parent.best_descendant); + let no_change = (parent.best_child(), parent.best_descendant()); let (new_best_child, new_best_descendant) = - if let Some(best_child_index) = parent.best_child { + if let Some(best_child_index) = parent.best_child() { if best_child_index == child_index && !child_leads_to_viable_head { // If the child is already the best-child of the parent but it's not viable for // the head, remove it. @@ -838,16 +905,16 @@ impl ProtoArray { } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { // The best child leads to a viable head, but the child doesn't. no_change - } else if child.weight == best_child.weight { + } else if child.weight() == best_child.weight() { // Tie-breaker of equal weights by root. - if child.root >= best_child.root { + if *child.root() >= *best_child.root() { change_to_child } else { no_change } } else { // Choose the winner by weight. - if child.weight > best_child.weight { + if child.weight() > best_child.weight() { change_to_child } else { no_change @@ -867,8 +934,8 @@ impl ProtoArray { .get_mut(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - parent.best_child = new_best_child; - parent.best_descendant = new_best_descendant; + *parent.best_child_mut() = new_best_child; + *parent.best_descendant_mut() = new_best_descendant; Ok(()) } @@ -883,7 +950,7 @@ impl ProtoArray { best_finalized_checkpoint: Checkpoint, ) -> Result { let best_descendant_is_viable_for_head = - if let Some(best_descendant_index) = node.best_descendant { + if let Some(best_descendant_index) = node.best_descendant() { let best_descendant = self .nodes .get(best_descendant_index) @@ -921,21 +988,21 @@ impl ProtoArray { best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, ) -> bool { - if node.execution_status.is_invalid() { + if let Ok(proto_node) = node.as_v17() && proto_node.execution_status.is_invalid() { return false; } let genesis_epoch = Epoch::new(0); let current_epoch = current_slot.epoch(E::slots_per_epoch()); - let node_epoch = node.slot.epoch(E::slots_per_epoch()); - let node_justified_checkpoint = node.justified_checkpoint; + let node_epoch = node.slot().epoch(E::slots_per_epoch()); + let node_justified_checkpoint = node.justified_checkpoint(); let voting_source = if current_epoch > node_epoch { // The block is from a prior epoch, the voting source will be pulled-up. - node.unrealized_justified_checkpoint + node.unrealized_justified_checkpoint() // Sometimes we don't track the unrealized justification. In // that case, just use the fully-realized justified checkpoint. - .unwrap_or(node_justified_checkpoint) + .unwrap_or(*node_justified_checkpoint) } else { // The block is not from a prior epoch, therefore the voting source // is not pulled up. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 3edf1e0644d..a0cd50db8be 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -159,6 +159,10 @@ pub struct Block { pub execution_status: ExecutionStatus, pub unrealized_justified_checkpoint: Option, pub unrealized_finalized_checkpoint: Option, + + /// post-Gloas fields + pub execution_payload_parent_hash: Option, + pub execution_payload_block_hash: Option, } impl Block { @@ -422,6 +426,9 @@ impl ProtoArrayForkChoice { current_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId, execution_status: ExecutionStatus, + execution_payload_parent_hash: Option, + execution_payload_block_hash: Option, + ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -445,6 +452,9 @@ impl ProtoArrayForkChoice { execution_status, unrealized_justified_checkpoint: Some(justified_checkpoint), unrealized_finalized_checkpoint: Some(finalized_checkpoint), + execution_payload_parent_hash, + execution_payload_block_hash, + }; proto_array @@ -453,6 +463,7 @@ impl ProtoArrayForkChoice { current_slot, justified_checkpoint, finalized_checkpoint, + spec, ) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; @@ -506,6 +517,7 @@ impl ProtoArrayForkChoice { current_slot: Slot, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, + spec: &ChainSpec, ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); @@ -517,6 +529,7 @@ impl ProtoArrayForkChoice { current_slot, justified_checkpoint, finalized_checkpoint, + spec, ) .map_err(|e| format!("process_block_error: {:?}", e)) } From 3e3ccba1a689d74228f459951e73d43fbc42b06b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 11 Dec 2025 12:33:39 +1100 Subject: [PATCH 002/127] adding michael commits --- consensus/fork_choice/src/fork_choice.rs | 15 ++++--- consensus/proto_array/src/bin.rs | 3 ++ .../src/fork_choice_test_definition.rs | 5 ++- consensus/proto_array/src/lib.rs | 4 +- consensus/proto_array/src/proto_array.rs | 2 +- .../src/proto_array_fork_choice.rs | 41 +++++++++++++++---- 6 files changed, 54 insertions(+), 16 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 5edd9b139df..3e1c2dc3611 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -3,7 +3,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use fixed_bytes::FixedBytesExtended; use logging::crit; use proto_array::{ - Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, + Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, LatestMessage, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; use ssz::{Decode, Encode}; @@ -1136,7 +1136,8 @@ where self.proto_array.process_attestation( *validator_index as usize, attestation.data().beacon_block_root, - attestation.data().target.epoch, + attestation.data().slot, + payload_present, )?; } } else { @@ -1256,10 +1257,12 @@ where &mut self.queued_attestations, ) { for validator_index in attestation.attesting_indices.iter() { + // FIXME(sproul): backwards compat/fork abstraction self.proto_array.process_attestation( *validator_index as usize, attestation.block_root, - attestation.target_epoch, + attestation.slot, + attestation.payload_present, )?; } } @@ -1389,13 +1392,15 @@ where /// Returns the latest message for a given validator, if any. /// - /// Returns `(block_root, block_slot)`. + /// Returns `block_root, block_slot, payload_present`. /// /// ## Notes /// /// It may be prudent to call `Self::update_time` before calling this function, /// since some attestations might be queued and awaiting processing. - pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { + /// + /// This function is only used in tests. + pub fn latest_message(&self, validator_index: usize) -> Option { self.proto_array.latest_message(validator_index) } diff --git a/consensus/proto_array/src/bin.rs b/consensus/proto_array/src/bin.rs index e1d307affb4..94a10fb127c 100644 --- a/consensus/proto_array/src/bin.rs +++ b/consensus/proto_array/src/bin.rs @@ -1,3 +1,4 @@ +/* FIXME(sproul) use proto_array::fork_choice_test_definition::*; use std::fs::File; @@ -24,3 +25,5 @@ fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) { let file = File::create(filename).expect("Should be able to open file"); serde_yaml::to_writer(file, &def).expect("Should be able to write YAML to file"); } +*/ +fn main() {} diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index e9deb6759fc..ac765b51d82 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -1,3 +1,4 @@ +/* FIXME(sproul) fix these tests later mod execution_status; mod ffg_updates; mod no_votes; @@ -227,13 +228,14 @@ impl ForkChoiceTestDefinition { }); check_bytes_round_trip(&fork_choice); } + // FIXME(sproul): update with payload_present Operation::ProcessAttestation { validator_index, block_root, target_epoch, } => { fork_choice - .process_attestation(validator_index, block_root, target_epoch) + .process_attestation(validator_index, block_root, target_epoch, false) .unwrap_or_else(|_| { panic!( "process_attestation op at index {} returned error", @@ -323,3 +325,4 @@ fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { "fork choice should encode and decode without change" ); } +*/ diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 222f9274781..1f126246b34 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -8,8 +8,8 @@ mod ssz_container; pub use crate::justified_balances::JustifiedBalances; pub use crate::proto_array::{InvalidationOperation, calculate_committee_fraction}; pub use crate::proto_array_fork_choice::{ - Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, - ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, PayloadStatus, + Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, LatestMessage, PayloadStatus, + ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; pub use error::Error; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index d7b1ec63135..1eb7cc9d882 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -111,7 +111,7 @@ pub struct ProtoNode { #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option, /// Indicates if an execution node has marked this block as valid. Also contains the execution - /// block hash. + /// block hash. This is only used pre-Gloas. #[superstruct(only(V17), partial_getter(copy))] pub execution_status: ExecutionStatus, #[superstruct(getter(copy))] diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index a0cd50db8be..928e8ce8603 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -23,13 +23,23 @@ use types::{ pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; #[derive(Default, PartialEq, Clone, Encode, Decode)] +// FIXME(sproul): the "next" naming here is a bit odd +// FIXME(sproul): version this type? pub struct VoteTracker { current_root: Hash256, next_root: Hash256, - next_epoch: Epoch, + next_slot: Slot, + next_payload_present: bool, } -/// Represents the verification status of an execution payload. +// FIXME(sproul): version this type +pub struct LatestMessage { + slot: Slot, + root: Hash256, + payload_present: bool, +} + +/// Represents the verification status of an execution payload pre-Gloas. #[derive(Clone, Copy, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)] #[ssz(enum_behaviour = "union")] pub enum ExecutionStatus { @@ -49,6 +59,16 @@ pub enum ExecutionStatus { Irrelevant(bool), } +/// Represents the status of an execution payload post-Gloas. +#[derive(Clone, Copy, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)] +#[ssz(enum_behaviour = "tag")] +#[repr(u8)] +pub enum PayloadStatus { + Pending = 0, + Empty = 1, + Full = 2, +} + impl ExecutionStatus { pub fn is_execution_enabled(&self) -> bool { !matches!(self, ExecutionStatus::Irrelevant(_)) @@ -499,13 +519,15 @@ impl ProtoArrayForkChoice { &mut self, validator_index: usize, block_root: Hash256, - target_epoch: Epoch, + attestation_slot: Slot, + payload_present: bool, ) -> Result<(), String> { let vote = self.votes.get_mut(validator_index); - if target_epoch > vote.next_epoch || *vote == VoteTracker::default() { + if attestation_slot > vote.next_slot || *vote == VoteTracker::default() { vote.next_root = block_root; - vote.next_epoch = target_epoch; + vote.next_slot = attestation_slot; + vote.next_payload_present = payload_present; } Ok(()) @@ -920,14 +942,18 @@ impl ProtoArrayForkChoice { .is_finalized_checkpoint_or_descendant::(descendant_root, best_finalized_checkpoint) } - pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { + pub fn latest_message(&self, validator_index: usize) -> Option { if validator_index < self.votes.0.len() { let vote = &self.votes.0[validator_index]; if *vote == VoteTracker::default() { None } else { - Some((vote.next_root, vote.next_epoch)) + Some(LatestMessage { + root: vote.next_root, + slot: vote.next_slot, + payload_present: vote.next_payload_present, + }) } } else { None @@ -1013,6 +1039,7 @@ impl ProtoArrayForkChoice { /// - If a value in `indices` is greater to or equal to `indices.len()`. /// - If some `Hash256` in `votes` is not a key in `indices` (except for `Hash256::zero()`, this is /// always valid). +// FIXME(sproul): implement get-weight changes here fn compute_deltas( indices: &HashMap, votes: &mut ElasticList, From d5c5077a31aa5c28ddff3944305966157b1e8d37 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Tue, 24 Feb 2026 17:40:11 -0500 Subject: [PATCH 003/127] implement scoring mechanisms and plumbing --- beacon_node/beacon_chain/src/beacon_chain.rs | 26 +- .../beacon_chain/src/block_production/mod.rs | 4 +- .../beacon_chain/src/block_verification.rs | 20 + .../beacon_chain/src/persisted_fork_choice.rs | 21 +- .../src/schema_change/migration_schema_v23.rs | 21 +- .../src/schema_change/migration_schema_v28.rs | 35 +- .../tests/payload_invalidation.rs | 4 +- beacon_node/beacon_chain/tests/tests.rs | 14 +- beacon_node/http_api/src/lib.rs | 58 +- beacon_node/http_api/tests/tests.rs | 59 ++- consensus/fork_choice/src/fork_choice.rs | 151 +++++- consensus/fork_choice/src/lib.rs | 3 +- consensus/fork_choice/tests/tests.rs | 30 ++ consensus/proto_array/src/bin.rs | 3 - consensus/proto_array/src/error.rs | 4 +- .../src/fork_choice_test_definition.rs | 204 +++++++- .../execution_status.rs | 57 +- .../ffg_updates.rs | 38 +- .../gloas_payload.rs | 222 ++++++++ .../fork_choice_test_definition/no_votes.rs | 15 + .../src/fork_choice_test_definition/votes.rs | 73 ++- consensus/proto_array/src/lib.rs | 4 +- consensus/proto_array/src/proto_array.rs | 494 ++++++++++++------ .../src/proto_array_fork_choice.rs | 348 +++++++++--- consensus/proto_array/src/ssz_container.rs | 76 ++- testing/ef_tests/src/cases/fork_choice.rs | 2 +- 26 files changed, 1573 insertions(+), 413 deletions(-) create mode 100644 consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9f62bf11f5f..f95a2fb5035 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1441,7 +1441,7 @@ impl BeaconChain { .proto_array() .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()) .iter() - .map(|node| (node.root, node.slot)) + .map(|node| (node.root(), node.slot())) .collect() } @@ -4776,7 +4776,7 @@ impl BeaconChain { // The slot of our potential re-org block is always 1 greater than the head block because we // only attempt single-slot re-orgs. - let head_slot = info.head_node.slot; + let head_slot = info.head_node.slot(); let re_org_block_slot = head_slot + 1; let fork_choice_slot = info.current_slot; @@ -4811,9 +4811,9 @@ impl BeaconChain { .fork_name_at_slot::(re_org_block_slot) .fulu_enabled() { - info.head_node.current_epoch_shuffling_id + info.head_node.current_epoch_shuffling_id() } else { - info.head_node.next_epoch_shuffling_id + info.head_node.next_epoch_shuffling_id() } .shuffling_decision_block; let proposer_index = self @@ -4844,8 +4844,8 @@ impl BeaconChain { // and the actual weight of the parent against the parent re-org threshold. let (head_weak, parent_strong) = if fork_choice_slot == re_org_block_slot { ( - info.head_node.weight < info.re_org_head_weight_threshold, - info.parent_node.weight > info.re_org_parent_weight_threshold, + info.head_node.weight() < info.re_org_head_weight_threshold, + info.parent_node.weight() > info.re_org_parent_weight_threshold, ) } else { (true, true) @@ -4853,7 +4853,7 @@ impl BeaconChain { if !head_weak { return Err(Box::new( DoNotReOrg::HeadNotWeak { - head_weight: info.head_node.weight, + head_weight: info.head_node.weight(), re_org_head_weight_threshold: info.re_org_head_weight_threshold, } .into(), @@ -4862,7 +4862,7 @@ impl BeaconChain { if !parent_strong { return Err(Box::new( DoNotReOrg::ParentNotStrong { - parent_weight: info.parent_node.weight, + parent_weight: info.parent_node.weight(), re_org_parent_weight_threshold: info.re_org_parent_weight_threshold, } .into(), @@ -4880,9 +4880,13 @@ impl BeaconChain { return Err(Box::new(DoNotReOrg::HeadNotLate.into())); } - let parent_head_hash = info.parent_node.execution_status.block_hash(); + let parent_head_hash = info + .parent_node + .execution_status() + .ok() + .and_then(|execution_status| execution_status.block_hash()); let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: info.parent_node.root, + head_root: info.parent_node.root(), head_hash: parent_head_hash, justified_hash: canonical_forkchoice_params.justified_hash, finalized_hash: canonical_forkchoice_params.finalized_hash, @@ -4890,7 +4894,7 @@ impl BeaconChain { debug!( canonical_head = ?head_block_root, - ?info.parent_node.root, + parent_root = ?info.parent_node.root(), slot = %fork_choice_slot, "Fork choice update overridden" ); diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index 76c8b77e934..f924461012c 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -200,7 +200,7 @@ impl BeaconChain { }) .ok()?; drop(proposer_head_timer); - let re_org_parent_block = proposer_head.parent_node.root; + let re_org_parent_block = proposer_head.parent_node.root(); let (state_root, state) = self .store @@ -213,7 +213,7 @@ impl BeaconChain { info!( weak_head = ?canonical_head, parent = ?re_org_parent_block, - head_weight = proposer_head.head_node.weight, + head_weight = proposer_head.head_node.weight(), threshold_weight = proposer_head.re_org_head_weight_threshold, "Attempting re-org due to weak head" ); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index e0943d5d931..be1974b8124 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1683,6 +1683,26 @@ impl ExecutionPendingBlock { Err(e) => Err(BlockError::BeaconChainError(Box::new(e.into()))), }?; } + + // Register each payload attestation in the block with fork choice. + if let Ok(payload_attestations) = block.message().body().payload_attestations() { + for (i, payload_attestation) in payload_attestations.iter().enumerate() { + let indexed_payload_attestation = consensus_context + .get_indexed_payload_attestation(&state, payload_attestation, &chain.spec) + .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; + + match fork_choice.on_payload_attestation( + current_slot, + indexed_payload_attestation, + AttestationFromBlock::True, + ) { + Ok(()) => Ok(()), + // Ignore invalid payload attestations whilst importing from a block. + Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), + Err(e) => Err(BlockError::BeaconChainError(Box::new(e.into()))), + }?; + } + } drop(fork_choice); Ok(Self { diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index d8fcc0901bf..5551e1d7c94 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -9,10 +9,10 @@ use superstruct::superstruct; use types::Hash256; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV28; +pub type PersistedForkChoice = PersistedForkChoiceV29; #[superstruct( - variants(V17, V28), + variants(V17, V28, V29), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -20,10 +20,12 @@ pub struct PersistedForkChoice { #[superstruct(only(V17))] pub fork_choice_v17: fork_choice::PersistedForkChoiceV17, #[superstruct(only(V28))] - pub fork_choice: fork_choice::PersistedForkChoiceV28, + pub fork_choice_v28: fork_choice::PersistedForkChoiceV28, + #[superstruct(only(V29))] + pub fork_choice: fork_choice::PersistedForkChoiceV29, #[superstruct(only(V17))] pub fork_choice_store_v17: PersistedForkChoiceStoreV17, - #[superstruct(only(V28))] + #[superstruct(only(V28, V29))] pub fork_choice_store: PersistedForkChoiceStoreV28, } @@ -47,7 +49,7 @@ macro_rules! impl_store_item { impl_store_item!(PersistedForkChoiceV17); -impl PersistedForkChoiceV28 { +impl PersistedForkChoiceV29 { pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { let decompressed_bytes = store_config .decompress_bytes(bytes) @@ -78,3 +80,12 @@ impl PersistedForkChoiceV28 { )) } } + +impl From for PersistedForkChoiceV29 { + fn from(v28: PersistedForkChoiceV28) -> Self { + Self { + fork_choice: v28.fork_choice_v28.into(), + fork_choice_store: v28.fork_choice_store, + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs index e238e1efb6c..a6671c55be5 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -110,22 +110,21 @@ pub fn downgrade_from_v23( // Doesn't matter what policy we use for invalid payloads, as our head calculation just // considers descent from finalization. let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice.fork_choice_v17.try_into()?, - reset_payload_statuses, - fc_store, - &db.spec, - ) - .map_err(|e| { - Error::MigrationError(format!("Error loading fork choice from persisted: {e:?}")) - })?; + let persisted_fc_v28: fork_choice::PersistedForkChoiceV28 = + persisted_fork_choice.fork_choice_v17.try_into()?; + let persisted_fc_v29: fork_choice::PersistedForkChoiceV29 = persisted_fc_v28.into(); + let fork_choice = + ForkChoice::from_persisted(persisted_fc_v29, reset_payload_statuses, fc_store, &db.spec) + .map_err(|e| { + Error::MigrationError(format!("Error loading fork choice from persisted: {e:?}")) + })?; let heads = fork_choice .proto_array() .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()); - let head_roots = heads.iter().map(|node| node.root).collect(); - let head_slots = heads.iter().map(|node| node.slot).collect(); + let head_roots = heads.iter().map(|node| node.root()).collect(); + let head_slots = heads.iter().map(|node| node.slot()).collect(); let persisted_beacon_chain_v22 = PersistedBeaconChainV22 { _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs index 5885eaabc00..86b96080d43 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs @@ -1,7 +1,7 @@ use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, PersistedForkChoiceStoreV17, beacon_chain::FORK_CHOICE_DB_KEY, - persisted_fork_choice::{PersistedForkChoiceV17, PersistedForkChoiceV28}, + persisted_fork_choice::PersistedForkChoiceV17, summaries_dag::{DAGStateSummary, StateSummariesDAG}, }; use fork_choice::{ForkChoice, ForkChoiceStore, ResetPayloadStatuses}; @@ -88,8 +88,11 @@ pub fn upgrade_to_v28( // Construct top-level ForkChoice struct using the patched fork choice store, and the converted // proto array. let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; + let persisted_fc_v28: fork_choice::PersistedForkChoiceV28 = + persisted_fork_choice_v17.fork_choice_v17.try_into()?; + let persisted_fc_v29: fork_choice::PersistedForkChoiceV29 = persisted_fc_v28.into(); let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice_v17.fork_choice_v17.try_into()?, + persisted_fc_v29, reset_payload_statuses, fc_store, db.get_chain_spec(), @@ -118,26 +121,22 @@ pub fn downgrade_from_v28( return Ok(vec![]); }; - // Recreate V28 persisted fork choice, then convert each field back to its V17 version. - let persisted_fork_choice = PersistedForkChoiceV28 { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - + let persisted_v29 = fork_choice.to_persisted(); + let fc_store_v28 = fork_choice.fc_store().to_persisted(); let justified_balances = fork_choice.fc_store().justified_balances(); + // Convert V29 proto_array back to legacy V28 for downgrade. + let persisted_fork_choice_v28 = fork_choice::PersistedForkChoiceV28 { + proto_array_v28: persisted_v29.proto_array.into(), + queued_attestations: persisted_v29.queued_attestations, + }; + // 1. Create `proto_array::PersistedForkChoiceV17`. - let fork_choice_v17: fork_choice::PersistedForkChoiceV17 = ( - persisted_fork_choice.fork_choice, - justified_balances.clone(), - ) - .into(); + let fork_choice_v17: fork_choice::PersistedForkChoiceV17 = + (persisted_fork_choice_v28, justified_balances.clone()).into(); - let fork_choice_store_v17: PersistedForkChoiceStoreV17 = ( - persisted_fork_choice.fork_choice_store, - justified_balances.clone(), - ) - .into(); + let fork_choice_store_v17: PersistedForkChoiceStoreV17 = + (fc_store_v28, justified_balances.clone()).into(); let persisted_fork_choice_v17 = PersistedForkChoiceV17 { fork_choice_v17, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index eb8e57a5d5f..b1e2fd2ccc3 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1498,7 +1498,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .proto_array() .iter_nodes(&head.head_block_root()) - .map(|node| (node.root, node.weight)) + .map(|node| (node.root(), node.weight())) .collect::>(); rig.invalidate_manually(roots[1]).await; @@ -1518,7 +1518,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .proto_array() .iter_nodes(&head.head_block_root()) - .map(|node| (node.root, node.weight)) + .map(|node| (node.root(), node.weight())) .collect::>(); assert_eq!(original_weights, new_weights); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index b052ba66f1a..10c0b429a95 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -590,7 +590,10 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { if slot <= num_blocks_produced && slot != 0 { assert_eq!( - latest_message.unwrap().1, + latest_message + .expect("latest message should be present") + .slot + .epoch(MinimalEthSpec::slots_per_epoch()), slot.epoch(MinimalEthSpec::slots_per_epoch()), "Latest message epoch for {} should be equal to epoch {}.", validator, @@ -700,10 +703,12 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots { - let latest_message = fork_choice.latest_message(*validator); + let latest_message = fork_choice + .latest_message(*validator) + .expect("latest message should be present"); assert_eq!( - latest_message.unwrap().1, + latest_message.slot.epoch(MinimalEthSpec::slots_per_epoch()), slot.epoch(MinimalEthSpec::slots_per_epoch()), "Latest message slot should be equal to attester duty." ); @@ -714,8 +719,7 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { .expect("Should get block root at slot"); assert_eq!( - latest_message.unwrap().0, - *block_root, + latest_message.root, *block_root, "Latest message block root should be equal to block at slot." ); } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 92a1ad934db..3077439b6f3 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2078,52 +2078,64 @@ pub fn serve( .nodes .iter() .map(|node| { - let execution_status = if node.execution_status.is_execution_enabled() { - Some(node.execution_status.to_string()) + let execution_status = if node + .execution_status() + .is_ok_and(|status| status.is_execution_enabled()) + { + node.execution_status() + .ok() + .map(|status| status.to_string()) } else { None }; + let execution_status_string = node + .execution_status() + .ok() + .map(|status| status.to_string()) + .unwrap_or_else(|| "n/a".to_string()); + ForkChoiceNode { - slot: node.slot, - block_root: node.root, + slot: node.slot(), + block_root: node.root(), parent_root: node - .parent + .parent() .and_then(|index| proto_array.nodes.get(index)) - .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.epoch, - finalized_epoch: node.finalized_checkpoint.epoch, - weight: node.weight, + .map(|parent| parent.root()), + justified_epoch: node.justified_checkpoint().epoch, + finalized_epoch: node.finalized_checkpoint().epoch, + weight: node.weight(), validity: execution_status, execution_block_hash: node - .execution_status - .block_hash() + .execution_status() + .ok() + .and_then(|status| status.block_hash()) .map(|block_hash| block_hash.into_root()), extra_data: ForkChoiceExtraData { - target_root: node.target_root, - justified_root: node.justified_checkpoint.root, - finalized_root: node.finalized_checkpoint.root, + target_root: node.target_root(), + justified_root: node.justified_checkpoint().root, + finalized_root: node.finalized_checkpoint().root, unrealized_justified_root: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_finalized_root: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_justified_epoch: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.epoch), unrealized_finalized_epoch: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.epoch), - execution_status: node.execution_status.to_string(), + execution_status: execution_status_string, best_child: node - .best_child + .best_child() .and_then(|index| proto_array.nodes.get(index)) - .map(|child| child.root), + .map(|child| child.root()), best_descendant: node - .best_descendant + .best_descendant() .and_then(|index| proto_array.nodes.get(index)) - .map(|descendant| descendant.root), + .map(|descendant| descendant.root()), }, } }) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7e3eb8b9807..a43c8216da0 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -33,7 +33,7 @@ use lighthouse_network::{Enr, PeerId, types::SyncState}; use network::NetworkReceivers; use network_utils::enr_ext::EnrExt; use operation_pool::attestation_storage::CheckpointKey; -use proto_array::ExecutionStatus; +use proto_array::{ExecutionStatus, core::ProtoNode}; use reqwest::{RequestBuilder, Response, StatusCode}; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; @@ -3072,51 +3072,61 @@ impl ApiTester { .nodes .iter() .map(|node| { - let execution_status = if node.execution_status.is_execution_enabled() { - Some(node.execution_status.to_string()) + let execution_status = if node + .execution_status() + .is_ok_and(|status| status.is_execution_enabled()) + { + node.execution_status() + .ok() + .map(|status| status.to_string()) } else { None }; ForkChoiceNode { - slot: node.slot, - block_root: node.root, + slot: node.slot(), + block_root: node.root(), parent_root: node - .parent + .parent() .and_then(|index| expected_proto_array.nodes.get(index)) - .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.epoch, - finalized_epoch: node.finalized_checkpoint.epoch, - weight: node.weight, + .map(|parent| parent.root()), + justified_epoch: node.justified_checkpoint().epoch, + finalized_epoch: node.finalized_checkpoint().epoch, + weight: node.weight(), validity: execution_status, execution_block_hash: node - .execution_status - .block_hash() + .execution_status() + .ok() + .and_then(|status| status.block_hash()) .map(|block_hash| block_hash.into_root()), extra_data: ForkChoiceExtraData { - target_root: node.target_root, - justified_root: node.justified_checkpoint.root, - finalized_root: node.finalized_checkpoint.root, + target_root: node.target_root(), + justified_root: node.justified_checkpoint().root, + finalized_root: node.finalized_checkpoint().root, unrealized_justified_root: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_finalized_root: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_justified_epoch: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.epoch), unrealized_finalized_epoch: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.epoch), - execution_status: node.execution_status.to_string(), + execution_status: node + .execution_status() + .ok() + .map(|status| status.to_string()) + .unwrap_or_else(|| "n/a".to_string()), best_child: node - .best_child + .best_child() .and_then(|index| expected_proto_array.nodes.get(index)) - .map(|child| child.root), + .map(|child| child.root()), best_descendant: node - .best_descendant + .best_descendant() .and_then(|index| expected_proto_array.nodes.get(index)) - .map(|descendant| descendant.root), + .map(|descendant| descendant.root()), }, } }) @@ -7048,6 +7058,7 @@ impl ApiTester { .core_proto_array_mut() .nodes .last_mut() + && let ProtoNode::V17(head_node) = head_node { head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 3e1c2dc3611..77442a62f57 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -20,7 +20,8 @@ use tracing::{debug, instrument, warn}; use types::{ AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, + Hash256, IndexedAttestationRef, IndexedPayloadAttestation, RelativeEpoch, SignedBeaconBlock, + Slot, }; #[derive(Debug)] @@ -138,10 +139,10 @@ pub enum InvalidBlock { finalized_root: Hash256, block_ancestor: Option, }, - MissingExecutionPayloadBid{ + MissingExecutionPayloadBid { block_slot: Slot, block_root: Hash256, - } + }, } #[derive(Debug)] @@ -174,6 +175,9 @@ pub enum InvalidAttestation { /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). AttestsToFutureBlock { block: Slot, attestation: Slot }, + /// A same-slot attestation has a non-zero index, indicating a payload attestation during the + /// same slot as the block. Payload attestations must only arrive in subsequent slots. + PayloadAttestationDuringSameSlot { slot: Slot }, } impl From for Error { @@ -401,6 +405,9 @@ where current_epoch_shuffling_id, next_epoch_shuffling_id, execution_status, + None, + None, + spec, )?; let mut fork_choice = Self { @@ -889,23 +896,22 @@ where }; let (execution_payload_parent_hash, execution_payload_block_hash) = - if let Ok(signed_bid) = block.body().signed_execution_payload_bid() { - ( - Some(signed_bid.message.parent_block_hash), - Some(signed_bid.message.block_hash), - ) - } else { - if spec.fork_name_at_slot::(block.slot()).gloas_enabled() { - return Err(Error::InvalidBlock( - InvalidBlock::MissingExecutionPayloadBid{ - block_slot: block.slot(), - block_root, - } - - )) - } - (None, None) - }; + if let Ok(signed_bid) = block.body().signed_execution_payload_bid() { + ( + Some(signed_bid.message.parent_block_hash), + Some(signed_bid.message.block_hash), + ) + } else { + if spec.fork_name_at_slot::(block.slot()).gloas_enabled() { + return Err(Error::InvalidBlock( + InvalidBlock::MissingExecutionPayloadBid { + block_slot: block.slot(), + block_root, + }, + )); + } + (None, None) + }; // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. @@ -935,7 +941,6 @@ where unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint), execution_payload_parent_hash, execution_payload_block_hash, - }, current_slot, self.justified_checkpoint(), @@ -1081,6 +1086,46 @@ where }); } + // Same-slot attestations must have index == 0 (i.e., indicate pending payload status). + // Payload-present attestations (index == 1) for the same slot as the block are invalid + // because PTC votes should only arrive in subsequent slots. + if indexed_attestation.data().slot == block.slot && indexed_attestation.data().index != 0 { + return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { slot: block.slot }); + } + + Ok(()) + } + + /// Validates a payload attestation for application to fork choice. + fn validate_on_payload_attestation( + &self, + indexed_payload_attestation: &IndexedPayloadAttestation, + _is_from_block: AttestationFromBlock, + ) -> Result<(), InvalidAttestation> { + if indexed_payload_attestation.attesting_indices.is_empty() { + return Err(InvalidAttestation::EmptyAggregationBitfield); + } + + let block = self + .proto_array + .get_block(&indexed_payload_attestation.data.beacon_block_root) + .ok_or(InvalidAttestation::UnknownHeadBlock { + beacon_block_root: indexed_payload_attestation.data.beacon_block_root, + })?; + + if block.slot > indexed_payload_attestation.data.slot { + return Err(InvalidAttestation::AttestsToFutureBlock { + block: block.slot, + attestation: indexed_payload_attestation.data.slot, + }); + } + + if indexed_payload_attestation.data.slot == block.slot + && indexed_payload_attestation.data.payload_present + { + return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { slot: block.slot }); + } + Ok(()) } @@ -1154,6 +1199,43 @@ where Ok(()) } + /// Register a payload attestation with the fork choice DAG. + pub fn on_payload_attestation( + &mut self, + system_time_current_slot: Slot, + attestation: &IndexedPayloadAttestation, + is_from_block: AttestationFromBlock, + ) -> Result<(), Error> { + self.update_time(system_time_current_slot)?; + + if attestation.data.beacon_block_root == Hash256::zero() { + return Ok(()); + } + + self.validate_on_payload_attestation(attestation, is_from_block)?; + + if attestation.data.slot < self.fc_store.get_current_slot() { + for validator_index in attestation.attesting_indices_iter() { + self.proto_array.process_attestation( + *validator_index as usize, + attestation.data.beacon_block_root, + attestation.data.slot, + attestation.data.payload_present, + )?; + } + } else { + self.queued_attestations.push(QueuedAttestation { + slot: attestation.data.slot, + attesting_indices: attestation.attesting_indices.iter().copied().collect(), + block_root: attestation.data.beacon_block_root, + target_epoch: attestation.data.slot.epoch(E::slots_per_epoch()), + payload_present: attestation.data.payload_present, + }); + } + + Ok(()) + } + /// Apply an attester slashing to fork choice. /// /// We assume that the attester slashing provided to this function has already been verified. @@ -1564,7 +1646,7 @@ where /// /// This is used when persisting the state of the fork choice to disk. #[superstruct( - variants(V17, V28), + variants(V17, V28, V29), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] @@ -1572,30 +1654,42 @@ pub struct PersistedForkChoice { #[superstruct(only(V17))] pub proto_array_bytes: Vec, #[superstruct(only(V28))] - pub proto_array: proto_array::core::SszContainerV28, + pub proto_array_v28: proto_array::core::SszContainerLegacyV28, + #[superstruct(only(V29))] + pub proto_array: proto_array::core::SszContainerV29, pub queued_attestations: Vec, } -pub type PersistedForkChoice = PersistedForkChoiceV28; +pub type PersistedForkChoice = PersistedForkChoiceV29; impl TryFrom for PersistedForkChoiceV28 { type Error = ssz::DecodeError; fn try_from(v17: PersistedForkChoiceV17) -> Result { let container_v17 = - proto_array::core::SszContainerV17::from_ssz_bytes(&v17.proto_array_bytes)?; - let container_v28 = container_v17.into(); + proto_array::core::SszContainerLegacyV17::from_ssz_bytes(&v17.proto_array_bytes)?; + let container_v28: proto_array::core::SszContainerLegacyV28 = container_v17.into(); Ok(Self { - proto_array: container_v28, + proto_array_v28: container_v28, queued_attestations: v17.queued_attestations, }) } } +impl From for PersistedForkChoiceV29 { + fn from(v28: PersistedForkChoiceV28) -> Self { + Self { + proto_array: v28.proto_array_v28.into(), + queued_attestations: v28.queued_attestations, + } + } +} + impl From<(PersistedForkChoiceV28, JustifiedBalances)> for PersistedForkChoiceV17 { fn from((v28, balances): (PersistedForkChoiceV28, JustifiedBalances)) -> Self { - let container_v17 = proto_array::core::SszContainerV17::from((v28.proto_array, balances)); + let container_v17 = + proto_array::core::SszContainerLegacyV17::from((v28.proto_array_v28, balances)); let proto_array_bytes = container_v17.as_ssz_bytes(); Self { @@ -1640,6 +1734,7 @@ mod tests { attesting_indices: vec![], block_root: Hash256::zero(), target_epoch: Epoch::new(0), + payload_present: false, }) .collect() } diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index afe06dee1bc..87438f2f855 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -5,7 +5,8 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV17, PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, + PersistedForkChoiceV17, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, + ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index d3a84ee85be..86ef0e2f907 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -923,6 +923,36 @@ async fn invalid_attestation_future_block() { .await; } +/// Payload attestations (index == 1) are invalid when they refer to a block in the same slot. +#[tokio::test] +async fn invalid_attestation_payload_during_same_slot() { + ForkChoiceTest::new() + .apply_blocks_without_new_attestations(1) + .await + .apply_attestation_to_chain( + MutationDelay::NoDelay, + |attestation, chain| { + let block_slot = chain + .get_blinded_block(&attestation.data().beacon_block_root) + .expect("should read attested block") + .expect("attested block should exist") + .slot(); + + attestation.data_mut().slot = block_slot; + attestation.data_mut().target.epoch = block_slot.epoch(E::slots_per_epoch()); + attestation.data_mut().index = 1; + }, + |result| { + assert_invalid_attestation!( + result, + InvalidAttestation::PayloadAttestationDuringSameSlot { slot } + if slot == Slot::new(1) + ) + }, + ) + .await; +} + /// Specification v0.12.1: /// /// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) diff --git a/consensus/proto_array/src/bin.rs b/consensus/proto_array/src/bin.rs index 94a10fb127c..e1d307affb4 100644 --- a/consensus/proto_array/src/bin.rs +++ b/consensus/proto_array/src/bin.rs @@ -1,4 +1,3 @@ -/* FIXME(sproul) use proto_array::fork_choice_test_definition::*; use std::fs::File; @@ -25,5 +24,3 @@ fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) { let file = File::create(filename).expect("Should be able to open file"); serde_yaml::to_writer(file, &def).expect("Should be able to write YAML to file"); } -*/ -fn main() {} diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index c3e60277a3a..d6bd7f2cbfa 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -55,10 +55,10 @@ pub enum Error { InvalidEpochOffset(u64), Arith(ArithError), GloasNotImplemented, - InvalidNodeVariant{ + InvalidNodeVariant { block_root: Hash256, }, - BrokenBlock{ + BrokenBlock { block_root: Hash256, }, } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index ac765b51d82..ec4227584a6 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -1,21 +1,23 @@ -/* FIXME(sproul) fix these tests later mod execution_status; mod ffg_updates; +mod gloas_payload; mod no_votes; mod votes; -use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; +use crate::proto_array::PayloadTiebreak; +use crate::proto_array_fork_choice::{Block, ExecutionStatus, PayloadStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ - AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Slot, }; pub use execution_status::*; pub use ffg_updates::*; +pub use gloas_payload::*; pub use no_votes::*; pub use votes::*; @@ -45,11 +47,17 @@ pub enum Operation { parent_root: Hash256, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, + #[serde(default)] + execution_payload_parent_hash: Option, + #[serde(default)] + execution_payload_block_hash: Option, }, ProcessAttestation { validator_index: usize, block_root: Hash256, - target_epoch: Epoch, + attestation_slot: Slot, + #[serde(default)] + payload_present: bool, }, Prune { finalized_root: Hash256, @@ -64,6 +72,24 @@ pub enum Operation { block_root: Hash256, weight: u64, }, + AssertPayloadWeights { + block_root: Hash256, + expected_full_weight: u64, + expected_empty_weight: u64, + }, + AssertParentPayloadStatus { + block_root: Hash256, + expected_status: PayloadStatus, + }, + AssertHeadPayloadStatus { + head_root: Hash256, + expected_status: PayloadStatus, + }, + SetPayloadTiebreak { + block_root: Hash256, + is_timely: bool, + is_data_available: bool, + }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -72,12 +98,23 @@ pub struct ForkChoiceTestDefinition { pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, pub operations: Vec, + #[serde(default)] + pub execution_payload_parent_hash: Option, + #[serde(default)] + pub execution_payload_block_hash: Option, + #[serde(skip)] + pub spec: Option, } impl ForkChoiceTestDefinition { pub fn run(self) { - let mut spec = MainnetEthSpec::default_spec(); - spec.proposer_score_boost = Some(50); + let spec = self.spec.unwrap_or_else(|| { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + // Legacy test definitions target pre-Gloas behaviour unless explicitly overridden. + spec.gloas_fork_epoch = None; + spec + }); let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); @@ -90,6 +127,9 @@ impl ForkChoiceTestDefinition { junk_shuffling_id.clone(), junk_shuffling_id, ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), + self.execution_payload_parent_hash, + self.execution_payload_block_hash, + &spec, ) .expect("should create fork choice struct"); let equivocating_indices = BTreeSet::new(); @@ -189,6 +229,8 @@ impl ForkChoiceTestDefinition { parent_root, justified_checkpoint, finalized_checkpoint, + execution_payload_parent_hash, + execution_payload_block_hash, } => { let block = Block { slot, @@ -212,6 +254,8 @@ impl ForkChoiceTestDefinition { ), unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, + execution_payload_parent_hash, + execution_payload_block_hash, }; fork_choice .process_block::( @@ -219,6 +263,7 @@ impl ForkChoiceTestDefinition { slot, self.justified_checkpoint, self.finalized_checkpoint, + &spec, ) .unwrap_or_else(|e| { panic!( @@ -228,14 +273,19 @@ impl ForkChoiceTestDefinition { }); check_bytes_round_trip(&fork_choice); } - // FIXME(sproul): update with payload_present Operation::ProcessAttestation { validator_index, block_root, - target_epoch, + attestation_slot, + payload_present, } => { fork_choice - .process_attestation(validator_index, block_root, target_epoch, false) + .process_attestation( + validator_index, + block_root, + attestation_slot, + payload_present, + ) .unwrap_or_else(|_| { panic!( "process_attestation op at index {} returned error", @@ -289,8 +339,141 @@ impl ForkChoiceTestDefinition { Operation::AssertWeight { block_root, weight } => assert_eq!( fork_choice.get_weight(&block_root).unwrap(), weight, - "block weight" + "block weight at op index {}", + op_index ), + Operation::AssertPayloadWeights { + block_root, + expected_full_weight, + expected_empty_weight, + } => { + let block_index = fork_choice + .proto_array + .indices + .get(&block_root) + .unwrap_or_else(|| { + panic!( + "AssertPayloadWeights: block root not found at op index {}", + op_index + ) + }); + let node = fork_choice + .proto_array + .nodes + .get(*block_index) + .unwrap_or_else(|| { + panic!( + "AssertPayloadWeights: node not found at op index {}", + op_index + ) + }); + let v29 = node.as_v29().unwrap_or_else(|_| { + panic!( + "AssertPayloadWeights: node is not V29 at op index {}", + op_index + ) + }); + assert_eq!( + v29.full_payload_weight, expected_full_weight, + "full_payload_weight mismatch at op index {}", + op_index + ); + assert_eq!( + v29.empty_payload_weight, expected_empty_weight, + "empty_payload_weight mismatch at op index {}", + op_index + ); + } + Operation::AssertParentPayloadStatus { + block_root, + expected_status, + } => { + let block_index = fork_choice + .proto_array + .indices + .get(&block_root) + .unwrap_or_else(|| { + panic!( + "AssertParentPayloadStatus: block root not found at op index {}", + op_index + ) + }); + let node = fork_choice + .proto_array + .nodes + .get(*block_index) + .unwrap_or_else(|| { + panic!( + "AssertParentPayloadStatus: node not found at op index {}", + op_index + ) + }); + let v29 = node.as_v29().unwrap_or_else(|_| { + panic!( + "AssertParentPayloadStatus: node is not V29 at op index {}", + op_index + ) + }); + assert_eq!( + v29.parent_payload_status, expected_status, + "parent_payload_status mismatch at op index {}", + op_index + ); + } + Operation::AssertHeadPayloadStatus { + head_root, + expected_status, + } => { + let actual = fork_choice + .head_payload_status(&head_root) + .unwrap_or_else(|| { + panic!( + "AssertHeadPayloadStatus: head root not found at op index {}", + op_index + ) + }); + assert_eq!( + actual, expected_status, + "head_payload_status mismatch at op index {}", + op_index + ); + } + Operation::SetPayloadTiebreak { + block_root, + is_timely, + is_data_available, + } => { + let block_index = fork_choice + .proto_array + .indices + .get(&block_root) + .unwrap_or_else(|| { + panic!( + "SetPayloadTiebreak: block root not found at op index {}", + op_index + ) + }); + let node = fork_choice + .proto_array + .nodes + .get_mut(*block_index) + .unwrap_or_else(|| { + panic!( + "SetPayloadTiebreak: node not found at op index {}", + op_index + ) + }); + let node_v29 = node.as_v29_mut().unwrap_or_else(|_| { + panic!( + "SetPayloadTiebreak: node is not V29 at op index {}", + op_index + ) + }); + node_v29.payload_tiebreak = PayloadTiebreak { + is_timely, + is_data_available, + }; + } } } } @@ -325,4 +508,3 @@ fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { "fork choice should encode and decode without change" ); } -*/ diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index aa26a843069..93c97d09db4 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -35,6 +35,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -73,6 +75,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -101,7 +105,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -143,7 +148,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -196,6 +202,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -245,7 +253,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), + payload_present: false, }); // Ensure that the head is still 2 @@ -347,7 +356,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(1), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), + payload_present: false, }); // Ensure that the head has switched back to 1 @@ -399,6 +409,9 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } @@ -437,6 +450,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -475,6 +490,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -503,7 +520,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -545,7 +563,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -598,6 +617,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -647,7 +668,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), + payload_present: false, }); // Move validator #1 vote from 2 to 3 @@ -660,7 +682,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), + payload_present: false, }); // Ensure that the head is now 3. @@ -763,6 +786,9 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } @@ -801,6 +827,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -839,6 +867,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -867,7 +897,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -909,7 +940,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is 1. @@ -962,6 +994,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is now 3, applying a proposer boost to 3 as well. @@ -1065,6 +1099,9 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index 3b31616145d..ee55ea649fe 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -27,6 +27,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), @@ -34,6 +36,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(1), justified_checkpoint: get_checkpoint(1), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), @@ -41,6 +45,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(2), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: get_checkpoint(1), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that with justified epoch 0 we find 3 @@ -101,6 +107,9 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } @@ -137,6 +146,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), @@ -147,6 +158,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), @@ -157,6 +170,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), @@ -167,6 +182,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), @@ -177,6 +194,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Right branch @@ -186,6 +205,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), @@ -193,6 +214,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(2), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), @@ -200,6 +223,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(4), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), @@ -210,6 +235,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(2), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), @@ -220,6 +247,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). @@ -282,7 +311,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(0), + attestation_slot: Slot::new(0), + payload_present: false, }); // Ensure that if we start at 0 we find 9 (just: 0, fin: 0). @@ -345,7 +375,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(0), + attestation_slot: Slot::new(0), + payload_present: false, }); // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). @@ -489,6 +520,9 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs new file mode 100644 index 00000000000..b6568106e39 --- /dev/null +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -0,0 +1,222 @@ +use super::*; + +fn gloas_spec() -> ChainSpec { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + spec.gloas_fork_epoch = Some(Epoch::new(0)); + spec +} + +pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Build two branches off genesis where one child extends parent's payload chain (Full) + // and the other does not (Empty). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Extend both branches to verify that head selection follows the selected chain. + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), + expected_status: PayloadStatus::Empty, + }); + + // With equal full/empty parent weights, tiebreak decides which chain to follow. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + }); + + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: false, + is_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(4), + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + + // One Full and one Empty vote for the same head block: tie should probe as Full. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(2), + payload_present: true, + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(1), + attestation_slot: Slot::new(2), + payload_present: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(1), + }); + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(1), + expected_full_weight: 1, + expected_empty_weight: 1, + }); + ops.push(Operation::AssertHeadPayloadStatus { + head_root: get_root(1), + expected_status: PayloadStatus::Full, + }); + + // Flip validator 0 to Empty; probe should now report Empty. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(3), + payload_present: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(1), + }); + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(1), + expected_full_weight: 0, + expected_empty_weight: 2, + }); + ops.push(Operation::AssertHeadPayloadStatus { + head_root: get_root(1), + expected_status: PayloadStatus::Empty, + }); + + // Same-slot attestation to a new head candidate should be Pending (no payload bucket change). + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_root(5), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(5)), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 2, + block_root: get_root(5), + attestation_slot: Slot::new(3), + payload_present: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1, 1], + expected_head: get_root(5), + }); + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(5), + expected_full_weight: 0, + expected_empty_weight: 0, + }); + ops.push(Operation::AssertHeadPayloadStatus { + head_root: get_root(5), + expected_status: PayloadStatus::Full, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn chain_following() { + let test = get_gloas_chain_following_test_definition(); + test.run(); + } + + #[test] + fn payload_probe() { + let test = get_gloas_payload_probe_test_definition(); + test.run(); + } +} diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index d20eaacb99a..61e4c1270ce 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -36,6 +36,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is 2 // @@ -71,6 +73,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is still 2 // @@ -108,6 +112,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure 2 is still the head // @@ -147,6 +153,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is 4. // @@ -185,6 +193,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is now 5 whilst the justified epoch is 0. // @@ -271,6 +281,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure 6 is the head // @@ -305,6 +317,9 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, operations, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index 01994fff9b2..d170e0974ff 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -35,6 +35,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -73,6 +75,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -101,7 +105,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -130,7 +135,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), + payload_present: false, }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -170,6 +176,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -202,7 +210,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), + payload_present: false, }); // Ensure that the head is still 2 @@ -236,7 +245,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(1), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), + payload_present: false, }); // Ensure that the head is now 3 @@ -280,6 +290,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is now 4 @@ -327,9 +339,11 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(1), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); - // Ensure that 5 is filtered out and the head stays at 4. + // Ensure that 5 becomes the head. // // 0 // / \ @@ -337,9 +351,9 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 // | - // 4 <- head + // 4 // / - // 5 + // head-> 5 ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -350,7 +364,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_root(4), + expected_head: get_root(5), }); // Add block 6, which has a justified epoch of 0. @@ -376,6 +390,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Move both votes to 5. @@ -392,12 +408,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(5), - target_epoch: Epoch::new(4), + attestation_slot: Slot::new(4), + payload_present: false, }); ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(5), - target_epoch: Epoch::new(4), + attestation_slot: Slot::new(4), + payload_present: false, }); // Add blocks 7, 8 and 9. Adding these blocks helps test the `best_descendant` @@ -430,6 +448,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), @@ -443,6 +463,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), @@ -456,10 +478,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); - // Ensure that 6 is the head, even though 5 has all the votes. This is testing to ensure - // that 5 is filtered out due to a differing justified epoch. + // Ensure that 9 is the head. The branch rooted at 5 remains viable and its best descendant + // is selected. // // 0 // / \ @@ -469,13 +493,13 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 4 // / \ - // 5 6 <- head + // 5 6 // | // 7 // | // 8 // / - // 9 + // head-> 9 ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -486,7 +510,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_root(6), + expected_head: get_root(9), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -545,12 +569,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(9), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), + payload_present: false, }); ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(9), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), + payload_present: false, }); // Add block 10 @@ -582,6 +608,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Double-check the head is still 9 (no diagram this time) @@ -621,12 +649,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 2, block_root: get_root(10), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), + payload_present: false, }); ops.push(Operation::ProcessAttestation { validator_index: 3, block_root: get_root(10), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), + payload_present: false, }); // Check the head is now 10. @@ -817,6 +847,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure the head is now 11 @@ -854,6 +886,9 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 1f126246b34..b131fb403e7 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -16,5 +16,7 @@ pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::{SszContainer, SszContainerV17, SszContainerV28}; + pub use super::ssz_container::{ + SszContainer, SszContainerLegacyV17, SszContainerLegacyV28, SszContainerV29, + }; } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 1eb7cc9d882..926767093f7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,5 +1,5 @@ use crate::error::InvalidBestNodeInfo; -use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error, PayloadStatus}; +use crate::{Block, ExecutionStatus, JustifiedBalances, PayloadStatus, error::Error}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -68,13 +68,12 @@ impl InvalidationOperation { } } - #[superstruct( variants(V17, V29), - variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)), + variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)) )] #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Clone)] -#[ssz(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "union")] pub struct ProtoNode { /// The `slot` is not necessary for `ProtoArray`, it just exists so external components can /// easily query the block slot. This is useful for upstream fork choice logic. @@ -130,6 +129,10 @@ pub struct ProtoNode { pub full_payload_weight: u64, #[superstruct(only(V29), partial_getter(copy))] pub execution_payload_block_hash: ExecutionBlockHash, + /// Tiebreaker for payload preference when full_payload_weight == empty_payload_weight. + /// Per spec: prefer Full if block was timely and data is available; otherwise prefer Empty. + #[superstruct(only(V29), partial_getter(copy))] + pub payload_tiebreak: PayloadTiebreak, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -147,6 +150,83 @@ impl Default for ProposerBoost { } } +#[derive(Clone, PartialEq, Debug, Copy)] +pub struct NodeDelta { + pub delta: i64, + pub empty_delta: i64, + pub full_delta: i64, + pub payload_tiebreaker: Option, +} + +impl NodeDelta { + /// Determine the payload bucket for a vote based on whether the vote's slot matches the + /// block's slot (Pending), or the vote's `payload_present` flag (Full/Empty). + pub fn payload_status( + vote_slot: Slot, + payload_present: bool, + block_slot: Slot, + ) -> PayloadStatus { + if vote_slot == block_slot { + PayloadStatus::Pending + } else if payload_present { + PayloadStatus::Full + } else { + PayloadStatus::Empty + } + } + + /// Add a balance to the appropriate payload status. + pub fn add_payload_delta( + &mut self, + status: PayloadStatus, + balance: u64, + index: usize, + ) -> Result<(), Error> { + let field = match status { + PayloadStatus::Full => &mut self.full_delta, + PayloadStatus::Empty => &mut self.empty_delta, + PayloadStatus::Pending => return Ok(()), + }; + *field = field + .checked_add(balance as i64) + .ok_or(Error::DeltaOverflow(index))?; + Ok(()) + } + + /// Subtract a balance from the appropriate payload status. + pub fn sub_payload_delta( + &mut self, + status: PayloadStatus, + balance: u64, + index: usize, + ) -> Result<(), Error> { + let field = match status { + PayloadStatus::Full => &mut self.full_delta, + PayloadStatus::Empty => &mut self.empty_delta, + PayloadStatus::Pending => return Ok(()), + }; + *field = field + .checked_sub(balance as i64) + .ok_or(Error::DeltaOverflow(index))?; + Ok(()) + } +} + +impl PartialEq for NodeDelta { + fn eq(&self, other: &i64) -> bool { + self.delta == *other + && self.empty_delta == 0 + && self.full_delta == 0 + && self.payload_tiebreaker.is_none() + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Default, Encode, Decode, Serialize, Deserialize)] +pub struct PayloadTiebreak { + pub is_timely: bool, + pub is_data_available: bool, +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes @@ -174,7 +254,7 @@ impl ProtoArray { #[allow(clippy::too_many_arguments)] pub fn apply_score_changes( &mut self, - mut deltas: Vec, + mut deltas: Vec, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, new_justified_balances: &JustifiedBalances, @@ -206,16 +286,32 @@ impl ProtoArray { continue; } - let mut node_delta = if let Ok(proto_node) = node.as_v17() && proto_node.execution_status.is_invalid() { + let execution_status_is_invalid = if let Ok(proto_node) = node.as_v17() + && proto_node.execution_status.is_invalid() + { + true + } else { + false + }; + + let node_deltas = deltas + .get(node_index) + .copied() + .ok_or(Error::InvalidNodeDelta(node_index))?; + + let mut node_delta = if execution_status_is_invalid { // If the node has an invalid execution payload, reduce its weight to zero. 0_i64 .checked_sub(node.weight() as i64) .ok_or(Error::InvalidExecutionDeltaOverflow(node_index))? } else { - deltas - .get(node_index) - .copied() - .ok_or(Error::InvalidNodeDelta(node_index))? + node_deltas.delta + }; + + let (node_empty_delta, node_full_delta) = if node.as_v29().is_ok() { + (node_deltas.empty_delta, node_deltas.full_delta) + } else { + (0, 0) }; // If we find the node for which the proposer boost was previously applied, decrease @@ -250,27 +346,17 @@ impl ProtoArray { // Apply the delta to the node. if execution_status_is_invalid { - // Invalid nodes always have a weight of 0. - node.weight() = 0 - } else if node_delta < 0 { - // Note: I am conflicted about whether to use `saturating_sub` or `checked_sub` - // here. - // - // I can't think of any valid reason why `node_delta.abs()` should be greater than - // `node.weight`, so I have chosen `checked_sub` to try and fail-fast if there is - // some error. - // - // However, I am not fully convinced that some valid case for `saturating_sub` does - // not exist. - node.weight() = node - .weight() - .checked_sub(node_delta.unsigned_abs()) - .ok_or(Error::DeltaOverflow(node_index))?; + *node.weight_mut() = 0; } else { - node.weight = node - .weight() - .checked_add(node_delta as u64) - .ok_or(Error::DeltaOverflow(node_index))?; + *node.weight_mut() = apply_delta(node.weight(), node_delta, node_index)?; + } + + // Apply post-Gloas score deltas. + if let Ok(node) = node.as_v29_mut() { + node.empty_payload_weight = + apply_delta(node.empty_payload_weight, node_empty_delta, node_index)?; + node.full_payload_weight = + apply_delta(node.full_payload_weight, node_full_delta, node_index)?; } // Update the parent delta (if any). @@ -279,8 +365,32 @@ impl ProtoArray { .get_mut(parent_index) .ok_or(Error::InvalidParentDelta(parent_index))?; - // Back-propagate the nodes delta to its parent. - *parent_delta += node_delta; + // Back-propagate the node's delta to its parent. + parent_delta.delta = parent_delta + .delta + .checked_add(node_delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + + // Per spec's `is_supporting_vote`: a vote for descendant B supports + // ancestor A's payload status based on B's `parent_payload_status`. + // Route the child's *total* weight delta to the parent's appropriate + // payload bucket. + match node.parent_payload_status() { + Ok(PayloadStatus::Full) => { + parent_delta.full_delta = parent_delta + .full_delta + .checked_add(node_delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + } + Ok(PayloadStatus::Empty) => { + parent_delta.empty_delta = parent_delta + .empty_delta + .checked_add(node_delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + } + // Pending or V17 nodes: no payload propagation. + _ => {} + } } } @@ -357,26 +467,40 @@ impl ProtoArray { unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }) } else { - let execution_payload_block_hash = block - .execution_payload_block_hash - .ok_or(Error::BrokenBlock{block_root: block.root})?; - - let parent_payload_status: PayloadStatus = - if let Some(parent_node) = - parent_index.and_then(|idx| self.nodes.get(idx)) - { - let v29 = parent_node - .as_v29() - .map_err(|_| Error::InvalidNodeVariant{block_root: block.root})?; - if execution_payload_block_hash == v29.execution_payload_block_hash - { - PayloadStatus::Empty - } else { - PayloadStatus::Full - } - } else { - PayloadStatus::Full + let execution_payload_block_hash = + block + .execution_payload_block_hash + .ok_or(Error::BrokenBlock { + block_root: block.root, + })?; + + let execution_payload_parent_hash = + block + .execution_payload_parent_hash + .ok_or(Error::BrokenBlock { + block_root: block.root, + })?; + + let parent_payload_status: PayloadStatus = if let Some(parent_node) = + parent_index.and_then(|idx| self.nodes.get(idx)) + { + // Get the parent's execution block hash, handling both V17 and V29 nodes. + // V17 parents occur during the Gloas fork transition. + let parent_el_block_hash = match parent_node { + ProtoNode::V29(v29) => Some(v29.execution_payload_block_hash), + ProtoNode::V17(v17) => v17.execution_status.block_hash(), }; + // Per spec's `is_parent_node_full`: if the child's EL parent hash + // matches the parent's EL block hash, the child extends the parent's + // payload chain, meaning the parent was Full. + if parent_el_block_hash.is_some_and(|hash| execution_payload_parent_hash == hash) { + PayloadStatus::Full + } else { + PayloadStatus::Empty + } + } else { + PayloadStatus::Full + }; ProtoNode::V29(ProtoNodeV29 { slot: block.slot, @@ -397,6 +521,7 @@ impl ProtoArray { empty_payload_weight: 0, full_payload_weight: 0, execution_payload_block_hash, + payload_tiebreak: PayloadTiebreak::default(), }) }; @@ -408,7 +533,9 @@ impl ProtoArray { .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - if let Ok(status) = parent.execution_status() && status.is_invalid() { + if let Ok(status) = parent.execution_status() + && status.is_invalid() + { return Err(Error::ParentExecutionStatusIsInvalid { block_root: block.root, parent_root: parent.root(), @@ -469,33 +596,43 @@ impl ProtoArray { .nodes .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - let parent_index = match node.execution_status { - // We have reached a node that we already know is valid. No need to iterate further - // since we assume an ancestors have already been set to valid. - ExecutionStatus::Valid(_) => return Ok(()), - // We have reached an irrelevant node, this node is prior to a terminal execution - // block. There's no need to iterate further, it's impossible for this block to have - // any relevant ancestors. - ExecutionStatus::Irrelevant(_) => return Ok(()), - // The block has an unknown status, set it to valid since any ancestor of a valid - // payload can be considered valid. - ExecutionStatus::Optimistic(payload_block_hash) => { - node.execution_status = ExecutionStatus::Valid(payload_block_hash); + let parent_index = match node { + ProtoNode::V17(node) => match node.execution_status { + // We have reached a node that we already know is valid. No need to iterate further + // since we assume an ancestors have already been set to valid. + ExecutionStatus::Valid(_) => return Ok(()), + // We have reached an irrelevant node, this node is prior to a terminal execution + // block. There's no need to iterate further, it's impossible for this block to have + // any relevant ancestors. + ExecutionStatus::Irrelevant(_) => return Ok(()), + // The block has an unknown status, set it to valid since any ancestor of a valid + // payload can be considered valid. + ExecutionStatus::Optimistic(payload_block_hash) => { + node.execution_status = ExecutionStatus::Valid(payload_block_hash); + if let Some(parent_index) = node.parent { + parent_index + } else { + // We have reached the root block, iteration complete. + return Ok(()); + } + } + // An ancestor of the valid payload was invalid. This is a serious error which + // indicates a consensus failure in the execution node. This is unrecoverable. + ExecutionStatus::Invalid(ancestor_payload_block_hash) => { + return Err(Error::InvalidAncestorOfValidPayload { + ancestor_block_root: node.root, + ancestor_payload_block_hash, + }); + } + }, + // Gloas nodes don't carry `ExecutionStatus`. + ProtoNode::V29(node) => { if let Some(parent_index) = node.parent { parent_index } else { - // We have reached the root block, iteration complete. return Ok(()); } } - // An ancestor of the valid payload was invalid. This is a serious error which - // indicates a consensus failure in the execution node. This is unrecoverable. - ExecutionStatus::Invalid(ancestor_payload_block_hash) => { - return Err(Error::InvalidAncestorOfValidPayload { - ancestor_block_root: node.root, - ancestor_payload_block_hash, - }); - } }; index = parent_index; @@ -551,10 +688,11 @@ impl ProtoArray { .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - match node.execution_status { - ExecutionStatus::Valid(hash) - | ExecutionStatus::Invalid(hash) - | ExecutionStatus::Optimistic(hash) => { + let node_execution_status = node.execution_status(); + match node_execution_status { + Ok(ExecutionStatus::Valid(hash)) + | Ok(ExecutionStatus::Invalid(hash)) + | Ok(ExecutionStatus::Optimistic(hash)) => { // If we're no longer processing the `head_block_root` and the last valid // ancestor is unknown, exit this loop and proceed to invalidate and // descendants of `head_block_root`/`latest_valid_ancestor_root`. @@ -563,7 +701,7 @@ impl ProtoArray { // supplied, don't validate any ancestors. The alternative is to invalidate // *all* ancestors, which would likely involve shutting down the client due to // an invalid justified checkpoint. - if !latest_valid_ancestor_is_descendant && node.root != head_block_root { + if !latest_valid_ancestor_is_descendant && node.root() != head_block_root { break; } else if op.latest_valid_ancestor() == Some(hash) { // If the `best_child` or `best_descendant` of the latest valid hash was @@ -574,63 +712,67 @@ impl ProtoArray { // defend against errors which might result in an invalid block being set as // head. if node - .best_child + .best_child() .is_some_and(|i| invalidated_indices.contains(&i)) { - node.best_child = None + *node.best_child_mut() = None } if node - .best_descendant + .best_descendant() .is_some_and(|i| invalidated_indices.contains(&i)) { - node.best_descendant = None + *node.best_descendant_mut() = None } break; } } - ExecutionStatus::Irrelevant(_) => break, + Ok(ExecutionStatus::Irrelevant(_)) => break, + Err(_) => break, } // Only invalidate the head block if either: // // - The head block was specifically indicated to be invalidated. // - The latest valid hash is a known ancestor. - if node.root != head_block_root + if node.root() != head_block_root || op.invalidate_block_root() || latest_valid_ancestor_is_descendant { - match &node.execution_status { + match node.execution_status() { // It's illegal for an execution client to declare that some previously-valid block // is now invalid. This is a consensus failure on their behalf. - ExecutionStatus::Valid(hash) => { + Ok(ExecutionStatus::Valid(hash)) => { return Err(Error::ValidExecutionStatusBecameInvalid { - block_root: node.root, - payload_block_hash: *hash, + block_root: node.root(), + payload_block_hash: hash, }); } - ExecutionStatus::Optimistic(hash) => { + Ok(ExecutionStatus::Optimistic(hash)) => { invalidated_indices.insert(index); - node.execution_status = ExecutionStatus::Invalid(*hash); + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Invalid(hash); + } // It's impossible for an invalid block to lead to a "best" block, so set these // fields to `None`. // // Failing to set these values will result in `Self::node_leads_to_viable_head` // returning `false` for *valid* ancestors of invalid blocks. - node.best_child = None; - node.best_descendant = None; + *node.best_child_mut() = None; + *node.best_descendant_mut() = None; } // The block is already invalid, but keep going backwards to ensure all ancestors // are updated. - ExecutionStatus::Invalid(_) => (), + Ok(ExecutionStatus::Invalid(_)) => (), // This block is pre-merge, therefore it has no execution status. Nor do its // ancestors. - ExecutionStatus::Irrelevant(_) => break, + Ok(ExecutionStatus::Irrelevant(_)) => break, + Err(_) => (), } } - if let Some(parent_index) = node.parent { + if let Some(parent_index) = node.parent() { index = parent_index } else { // The root of the block tree has been reached (aka the finalized block), without @@ -664,24 +806,27 @@ impl ProtoArray { .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - if let Some(parent_index) = node.parent + if let Some(parent_index) = node.parent() && invalidated_indices.contains(&parent_index) { - match &node.execution_status { - ExecutionStatus::Valid(hash) => { + match node.execution_status() { + Ok(ExecutionStatus::Valid(hash)) => { return Err(Error::ValidExecutionStatusBecameInvalid { - block_root: node.root, - payload_block_hash: *hash, + block_root: node.root(), + payload_block_hash: hash, }); } - ExecutionStatus::Optimistic(hash) | ExecutionStatus::Invalid(hash) => { - node.execution_status = ExecutionStatus::Invalid(*hash) + Ok(ExecutionStatus::Optimistic(hash)) | Ok(ExecutionStatus::Invalid(hash)) => { + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Invalid(hash) + } } - ExecutionStatus::Irrelevant(_) => { + Ok(ExecutionStatus::Irrelevant(_)) => { return Err(Error::IrrelevantDescendant { - block_root: node.root, + block_root: node.root(), }); } + Err(_) => (), } invalidated_indices.insert(index); @@ -724,13 +869,15 @@ impl ProtoArray { // practically possible to set a new justified root if we are unable to find a new head. // // This scenario is *unsupported*. It represents a serious consensus failure. - if justified_node.execution_status.is_invalid() { + if let Ok(execution_status) = justified_node.execution_status() + && execution_status.is_invalid() + { return Err(Error::InvalidJustifiedCheckpointExecutionStatus { justified_root: *justified_root, }); } - let best_descendant_index = justified_node.best_descendant.unwrap_or(justified_index); + let best_descendant_index = justified_node.best_descendant().unwrap_or(justified_index); let best_node = self .nodes @@ -749,13 +896,13 @@ impl ProtoArray { start_root: *justified_root, justified_checkpoint: best_justified_checkpoint, finalized_checkpoint: best_finalized_checkpoint, - head_root: best_node.root, - head_justified_checkpoint: best_node.justified_checkpoint, - head_finalized_checkpoint: best_node.finalized_checkpoint, + head_root: best_node.root(), + head_justified_checkpoint: *best_node.justified_checkpoint(), + head_finalized_checkpoint: *best_node.finalized_checkpoint(), }))); } - Ok(best_node.root) + Ok(best_node.root()) } /// Update the tree with new finalization information. The tree is only actually pruned if both @@ -788,7 +935,7 @@ impl ProtoArray { .nodes .get(node_index) .ok_or(Error::InvalidNodeIndex(node_index))? - .root; + .root(); self.indices.remove(root); } @@ -805,19 +952,19 @@ impl ProtoArray { // Iterate through all the existing nodes and adjust their indices to match the new layout // of `self.nodes`. for node in self.nodes.iter_mut() { - if let Some(parent) = node.parent { + if let Some(parent) = node.parent() { // If `node.parent` is less than `finalized_index`, set it to `None`. - node.parent = parent.checked_sub(finalized_index); + *node.parent_mut() = parent.checked_sub(finalized_index); } - if let Some(best_child) = node.best_child { - node.best_child = Some( + if let Some(best_child) = node.best_child() { + *node.best_child_mut() = Some( best_child .checked_sub(finalized_index) .ok_or(Error::IndexOverflow("best_child"))?, ); } - if let Some(best_descendant) = node.best_descendant { - node.best_descendant = Some( + if let Some(best_descendant) = node.best_descendant() { + *node.best_descendant_mut() = Some( best_descendant .checked_sub(finalized_index) .ok_or(Error::IndexOverflow("best_descendant"))?, @@ -905,19 +1052,32 @@ impl ProtoArray { } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { // The best child leads to a viable head, but the child doesn't. no_change - } else if child.weight() == best_child.weight() { - // Tie-breaker of equal weights by root. - if *child.root() >= *best_child.root() { - change_to_child - } else { - no_change - } } else { - // Choose the winner by weight. - if child.weight() > best_child.weight() { + // Both viable or both non-viable. For V29 parents, prefer the child + // whose parent_payload_status matches the parent's payload preference + // (Full if full_payload_weight >= empty_payload_weight, else Empty). + let child_matches = child_matches_parent_payload_preference(parent, child); + let best_child_matches = + child_matches_parent_payload_preference(parent, best_child); + + if child_matches && !best_child_matches { change_to_child - } else { + } else if !child_matches && best_child_matches { no_change + } else if child.weight() == best_child.weight() { + // Tie-breaker of equal weights by root. + if *child.root() >= *best_child.root() { + change_to_child + } else { + no_change + } + } else { + // Choose the winner by weight. + if child.weight() > best_child.weight() { + change_to_child + } else { + no_change + } } } } @@ -988,11 +1148,13 @@ impl ProtoArray { best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, ) -> bool { - if let Ok(proto_node) = node.as_v17() && proto_node.execution_status.is_invalid() { + if let Ok(proto_node) = node.as_v17() + && proto_node.execution_status.is_invalid() + { return false; } - let genesis_epoch = Epoch::new(0); + let genesis_epoch = Epoch::new(1); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let node_epoch = node.slot().epoch(E::slots_per_epoch()); let node_justified_checkpoint = node.justified_checkpoint(); @@ -1006,7 +1168,7 @@ impl ProtoArray { } else { // The block is not from a prior epoch, therefore the voting source // is not pulled up. - node_justified_checkpoint + *node_justified_checkpoint }; let correct_justified = best_justified_checkpoint.epoch == genesis_epoch @@ -1015,7 +1177,7 @@ impl ProtoArray { let correct_finalized = best_finalized_checkpoint.epoch == genesis_epoch || self - .is_finalized_checkpoint_or_descendant::(node.root, best_finalized_checkpoint); + .is_finalized_checkpoint_or_descendant::(node.root(), best_finalized_checkpoint); correct_justified && correct_finalized } @@ -1037,7 +1199,7 @@ impl ProtoArray { block_root: &Hash256, ) -> impl Iterator + 'a { self.iter_nodes(block_root) - .map(|node| (node.root, node.slot)) + .map(|node| (node.root(), node.slot())) } /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always @@ -1058,8 +1220,8 @@ impl ProtoArray { .and_then(|ancestor_index| self.nodes.get(*ancestor_index)) .and_then(|ancestor| { self.iter_block_roots(&descendant_root) - .take_while(|(_root, slot)| *slot >= ancestor.slot) - .find(|(_root, slot)| *slot == ancestor.slot) + .take_while(|(_root, slot)| *slot >= ancestor.slot()) + .find(|(_root, slot)| *slot == ancestor.slot()) .map(|(root, _slot)| root == ancestor_root) }) .unwrap_or(false) @@ -1098,15 +1260,15 @@ impl ProtoArray { // Run this check once, outside of the loop rather than inside the loop. // If the conditions don't match for this node then they're unlikely to // start matching for its ancestors. - for checkpoint in &[node.finalized_checkpoint, node.justified_checkpoint] { - if checkpoint == &best_finalized_checkpoint { + for checkpoint in &[node.finalized_checkpoint(), node.justified_checkpoint()] { + if **checkpoint == best_finalized_checkpoint { return true; } } for checkpoint in &[ - node.unrealized_finalized_checkpoint, - node.unrealized_justified_checkpoint, + node.unrealized_finalized_checkpoint(), + node.unrealized_justified_checkpoint(), ] { if checkpoint.is_some_and(|cp| cp == best_finalized_checkpoint) { return true; @@ -1116,13 +1278,13 @@ impl ProtoArray { loop { // If `node` is less than or equal to the finalized slot then `node` // must be the finalized block. - if node.slot <= finalized_slot { - return node.root == finalized_root; + if node.slot() <= finalized_slot { + return node.root() == finalized_root; } // Since `node` is from a higher slot that the finalized checkpoint, // replace `node` with the parent of `node`. - if let Some(parent) = node.parent.and_then(|index| self.nodes.get(index)) { + if let Some(parent) = node.parent().and_then(|index| self.nodes.get(index)) { node = parent } else { // If `node` is not the finalized block and its parent does not @@ -1144,11 +1306,12 @@ impl ProtoArray { .iter() .rev() .find(|node| { - node.execution_status - .block_hash() + node.execution_status() + .ok() + .and_then(|execution_status| execution_status.block_hash()) .is_some_and(|node_block_hash| node_block_hash == *block_hash) }) - .map(|node| node.root) + .map(|node| node.root()) } /// Returns all nodes that have zero children and are descended from the finalized checkpoint. @@ -1163,9 +1326,9 @@ impl ProtoArray { self.nodes .iter() .filter(|node| { - node.best_child.is_none() + node.best_child().is_none() && self.is_finalized_checkpoint_or_descendant::( - node.root, + node.root(), best_finalized_checkpoint, ) }) @@ -1173,6 +1336,30 @@ impl ProtoArray { } } +/// For V29 parents, returns `true` if the child's `parent_payload_status` matches the parent's +/// preferred payload status. When full and empty weights are unequal, the higher weight wins. +/// When equal, the tiebreaker uses the parent's `payload_tiebreak`: prefer Full if the block +/// was timely and data is available; otherwise prefer Empty. +/// For V17 parents (or mixed), always returns `true` (no payload preference). +fn child_matches_parent_payload_preference(parent: &ProtoNode, child: &ProtoNode) -> bool { + let (Ok(parent_v29), Ok(child_v29)) = (parent.as_v29(), child.as_v29()) else { + return true; + }; + let prefers_full = if parent_v29.full_payload_weight > parent_v29.empty_payload_weight { + true + } else if parent_v29.empty_payload_weight > parent_v29.full_payload_weight { + false + } else { + // Equal weights: tiebreaker per spec + parent_v29.payload_tiebreak.is_timely && parent_v29.payload_tiebreak.is_data_available + }; + if prefers_full { + child_v29.parent_payload_status == PayloadStatus::Full + } else { + child_v29.parent_payload_status == PayloadStatus::Empty + } +} + /// A helper method to calculate the proposer boost based on the given `justified_balances`. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance @@ -1188,6 +1375,19 @@ pub fn calculate_committee_fraction( .checked_div(100) } +/// Apply a signed delta to an unsigned weight, returning an error on overflow. +fn apply_delta(weight: u64, delta: i64, index: usize) -> Result { + if delta < 0 { + weight + .checked_sub(delta.unsigned_abs()) + .ok_or(Error::DeltaOverflow(index)) + } else { + weight + .checked_add(delta as u64) + .ok_or(Error::DeltaOverflow(index)) + } +} + /// Reverse iterator over one path through a `ProtoArray`. pub struct Iter<'a> { next_node_index: Option, @@ -1200,7 +1400,7 @@ impl<'a> Iterator for Iter<'a> { fn next(&mut self) -> Option { let next_node_index = self.next_node_index?; let node = self.proto_array.nodes.get(next_node_index)?; - self.next_node_index = node.parent; + self.next_node_index = node.parent(); Some(node) } } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 928e8ce8603..e1c893db9af 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -2,7 +2,7 @@ use crate::{ JustifiedBalances, error::Error, proto_array::{ - InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, + InvalidationOperation, Iter, NodeDelta, ProposerBoost, ProtoArray, ProtoNode, calculate_committee_fraction, }, ssz_container::SszContainer, @@ -28,15 +28,17 @@ pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; pub struct VoteTracker { current_root: Hash256, next_root: Hash256, + current_slot: Slot, next_slot: Slot, + current_payload_present: bool, next_payload_present: bool, } // FIXME(sproul): version this type pub struct LatestMessage { - slot: Slot, - root: Hash256, - payload_present: bool, + pub slot: Slot, + pub root: Hash256, + pub payload_present: bool, } /// Represents the verification status of an execution payload pre-Gloas. @@ -448,7 +450,7 @@ impl ProtoArrayForkChoice { execution_status: ExecutionStatus, execution_payload_parent_hash: Option, execution_payload_block_hash: Option, - + spec: &ChainSpec, ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -474,7 +476,6 @@ impl ProtoArrayForkChoice { unrealized_finalized_checkpoint: Some(finalized_checkpoint), execution_payload_parent_hash, execution_payload_block_hash, - }; proto_array @@ -569,9 +570,16 @@ impl ProtoArrayForkChoice { ) -> Result { let old_balances = &mut self.balances; let new_balances = justified_state_balances; + let node_slots = self + .proto_array + .nodes + .iter() + .map(|node| node.slot()) + .collect::>(); let deltas = compute_deltas( &self.proto_array.indices, + &node_slots, &mut self.votes, &old_balances.effective_balances, &new_balances.effective_balances, @@ -628,13 +636,13 @@ impl ProtoArrayForkChoice { )?; // Only re-org a single slot. This prevents cascading failures during asynchrony. - let head_slot_ok = info.head_node.slot + 1 == current_slot; + let head_slot_ok = info.head_node.slot() + 1 == current_slot; if !head_slot_ok { return Err(DoNotReOrg::HeadDistance.into()); } // Only re-org if the head's weight is less than the heads configured committee fraction. - let head_weight = info.head_node.weight; + let head_weight = info.head_node.weight(); let re_org_head_weight_threshold = info.re_org_head_weight_threshold; let weak_head = head_weight < re_org_head_weight_threshold; if !weak_head { @@ -646,7 +654,7 @@ impl ProtoArrayForkChoice { } // Only re-org if the parent's weight is greater than the parents configured committee fraction. - let parent_weight = info.parent_node.weight; + let parent_weight = info.parent_node.weight(); let re_org_parent_weight_threshold = info.re_org_parent_weight_threshold; let parent_strong = parent_weight > re_org_parent_weight_threshold; if !parent_strong { @@ -685,14 +693,14 @@ impl ProtoArrayForkChoice { let parent_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; let head_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; - let parent_slot = parent_node.slot; - let head_slot = head_node.slot; + let parent_slot = parent_node.slot(); + let head_slot = head_node.slot(); let re_org_block_slot = head_slot + 1; // Check finalization distance. let proposal_epoch = re_org_block_slot.epoch(E::slots_per_epoch()); let finalized_epoch = head_node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .ok_or(DoNotReOrg::MissingHeadFinalizedCheckpoint)? .epoch; let epochs_since_finalization = proposal_epoch.saturating_sub(finalized_epoch).as_u64(); @@ -724,10 +732,10 @@ impl ProtoArrayForkChoice { } // Check FFG. - let ffg_competitive = parent_node.unrealized_justified_checkpoint - == head_node.unrealized_justified_checkpoint - && parent_node.unrealized_finalized_checkpoint - == head_node.unrealized_finalized_checkpoint; + let ffg_competitive = parent_node.unrealized_justified_checkpoint() + == head_node.unrealized_justified_checkpoint() + && parent_node.unrealized_finalized_checkpoint() + == head_node.unrealized_finalized_checkpoint(); if !ffg_competitive { return Err(DoNotReOrg::JustificationAndFinalizationNotCompetitive.into()); } @@ -755,10 +763,10 @@ impl ProtoArrayForkChoice { /// This will operate on *all* blocks, even those that do not descend from the finalized /// ancestor. pub fn contains_invalid_payloads(&mut self) -> bool { - self.proto_array - .nodes - .iter() - .any(|node| node.execution_status.is_invalid()) + self.proto_array.nodes.iter().any(|node| { + node.execution_status() + .is_ok_and(|status| status.is_invalid()) + }) } /// For all nodes, regardless of their relationship to the finalized block, set their execution @@ -783,9 +791,11 @@ impl ProtoArrayForkChoice { .get_mut(node_index) .ok_or("unreachable index out of bounds in proto_array nodes")?; - match node.execution_status { - ExecutionStatus::Invalid(block_hash) => { - node.execution_status = ExecutionStatus::Optimistic(block_hash); + match node.execution_status() { + Ok(ExecutionStatus::Invalid(block_hash)) => { + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Optimistic(block_hash); + } // Restore the weight of the node, it would have been set to `0` in // `apply_score_changes` when it was invalidated. @@ -795,7 +805,7 @@ impl ProtoArrayForkChoice { .iter() .enumerate() .filter_map(|(validator_index, vote)| { - if vote.current_root == node.root { + if vote.current_root == node.root() { // Any voting validator that does not have a balance should be // ignored. This is consistent with `compute_deltas`. self.balances.effective_balances.get(validator_index) @@ -808,7 +818,7 @@ impl ProtoArrayForkChoice { // If the invalid root was boosted, apply the weight to it and // ancestors. if let Some(proposer_score_boost) = spec.proposer_score_boost - && self.proto_array.previous_proposer_boost.root == node.root + && self.proto_array.previous_proposer_boost.root == node.root() { // Compute the score based upon the current balances. We can't rely on // the `previous_proposr_boost.score` since it is set to zero with an @@ -829,12 +839,12 @@ impl ProtoArrayForkChoice { if restored_weight > 0 { let mut node_or_ancestor = node; loop { - node_or_ancestor.weight = node_or_ancestor - .weight + *node_or_ancestor.weight_mut() = node_or_ancestor + .weight() .checked_add(restored_weight) .ok_or("Overflow when adding weight to ancestor")?; - if let Some(parent_index) = node_or_ancestor.parent { + if let Some(parent_index) = node_or_ancestor.parent() { node_or_ancestor = self .proto_array .nodes @@ -850,11 +860,14 @@ impl ProtoArrayForkChoice { } // There are no balance changes required if the node was either valid or // optimistic. - ExecutionStatus::Valid(block_hash) | ExecutionStatus::Optimistic(block_hash) => { - node.execution_status = ExecutionStatus::Optimistic(block_hash) + Ok(ExecutionStatus::Valid(block_hash)) + | Ok(ExecutionStatus::Optimistic(block_hash)) => { + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Optimistic(block_hash) + } } // An irrelevant node cannot become optimistic, this is a no-op. - ExecutionStatus::Irrelevant(_) => (), + Ok(ExecutionStatus::Irrelevant(_)) | Err(_) => (), } } @@ -891,30 +904,34 @@ impl ProtoArrayForkChoice { pub fn get_block(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; let parent_root = block - .parent + .parent() .and_then(|i| self.proto_array.nodes.get(i)) - .map(|parent| parent.root); + .map(|parent| parent.root()); Some(Block { - slot: block.slot, - root: block.root, + slot: block.slot(), + root: block.root(), parent_root, - state_root: block.state_root, - target_root: block.target_root, - current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), - next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), - justified_checkpoint: block.justified_checkpoint, - finalized_checkpoint: block.finalized_checkpoint, - execution_status: block.execution_status, - unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + state_root: block.state_root(), + target_root: block.target_root(), + current_epoch_shuffling_id: block.current_epoch_shuffling_id().clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id().clone(), + justified_checkpoint: *block.justified_checkpoint(), + finalized_checkpoint: *block.finalized_checkpoint(), + execution_status: block + .execution_status() + .unwrap_or_else(|_| ExecutionStatus::irrelevant()), + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint(), + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint(), + execution_payload_parent_hash: None, + execution_payload_block_hash: block.execution_payload_block_hash().ok(), }) } /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; - Some(block.execution_status) + block.execution_status().ok() } /// Returns the weight of a given block. @@ -923,7 +940,22 @@ impl ProtoArrayForkChoice { self.proto_array .nodes .get(*block_index) - .map(|node| node.weight) + .map(|node| node.weight()) + } + + /// Returns the payload status of the head node based on accumulated weights. + /// + /// Returns `Full` if `full_payload_weight >= empty_payload_weight` (Full wins ties per spec's + /// `get_payload_status_tiebreaker` natural ordering FULL=2 > EMPTY=1). + /// Returns `Empty` otherwise. Returns `None` for V17 nodes. + pub fn head_payload_status(&self, head_root: &Hash256) -> Option { + let node = self.get_proto_node(head_root)?; + let v29 = node.as_v29().ok()?; + if v29.full_payload_weight >= v29.empty_payload_weight { + Some(PayloadStatus::Full) + } else { + Some(PayloadStatus::Empty) + } } /// See `ProtoArray` documentation. @@ -1039,15 +1071,30 @@ impl ProtoArrayForkChoice { /// - If a value in `indices` is greater to or equal to `indices.len()`. /// - If some `Hash256` in `votes` is not a key in `indices` (except for `Hash256::zero()`, this is /// always valid). -// FIXME(sproul): implement get-weight changes here fn compute_deltas( indices: &HashMap, + node_slots: &[Slot], votes: &mut ElasticList, old_balances: &[u64], new_balances: &[u64], equivocating_indices: &BTreeSet, -) -> Result, Error> { - let mut deltas = vec![0_i64; indices.len()]; +) -> Result, Error> { + let block_slot = |index: usize| -> Result { + node_slots + .get(index) + .copied() + .ok_or(Error::InvalidNodeDelta(index)) + }; + + let mut deltas = vec![ + NodeDelta { + delta: 0, + empty_delta: 0, + full_delta: 0, + payload_tiebreaker: None, + }; + indices.len() + ]; for (val_index, vote) in votes.iter_mut().enumerate() { // There is no need to create a score change if the validator has never voted or both their @@ -1072,17 +1119,25 @@ fn compute_deltas( let old_balance = old_balances.get(val_index).copied().unwrap_or(0); if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { - let delta = deltas - .get(current_delta_index) - .ok_or(Error::InvalidNodeDelta(current_delta_index))? + let node_delta = deltas + .get_mut(current_delta_index) + .ok_or(Error::InvalidNodeDelta(current_delta_index))?; + node_delta.delta = node_delta + .delta .checked_sub(old_balance as i64) .ok_or(Error::DeltaOverflow(current_delta_index))?; - // Array access safe due to check on previous line. - deltas[current_delta_index] = delta; + let status = NodeDelta::payload_status( + vote.current_slot, + vote.current_payload_present, + block_slot(current_delta_index)?, + ); + node_delta.sub_payload_delta(status, old_balance, current_delta_index)?; } vote.current_root = Hash256::zero(); + vote.current_slot = Slot::new(0); + vote.current_payload_present = false; } // We've handled this slashed validator, continue without applying an ordinary delta. continue; @@ -1099,34 +1154,52 @@ fn compute_deltas( // on-boarded less validators than the prior fork. let new_balance = new_balances.get(val_index).copied().unwrap_or(0); - if vote.current_root != vote.next_root || old_balance != new_balance { + if vote.current_root != vote.next_root + || old_balance != new_balance + || vote.current_payload_present != vote.next_payload_present + || vote.current_slot != vote.next_slot + { // We ignore the vote if it is not known in `indices`. We assume that it is outside // of our tree (i.e., pre-finalization) and therefore not interesting. if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { - let delta = deltas - .get(current_delta_index) - .ok_or(Error::InvalidNodeDelta(current_delta_index))? + let node_delta = deltas + .get_mut(current_delta_index) + .ok_or(Error::InvalidNodeDelta(current_delta_index))?; + node_delta.delta = node_delta + .delta .checked_sub(old_balance as i64) .ok_or(Error::DeltaOverflow(current_delta_index))?; - // Array access safe due to check on previous line. - deltas[current_delta_index] = delta; + let status = NodeDelta::payload_status( + vote.current_slot, + vote.current_payload_present, + block_slot(current_delta_index)?, + ); + node_delta.sub_payload_delta(status, old_balance, current_delta_index)?; } // We ignore the vote if it is not known in `indices`. We assume that it is outside // of our tree (i.e., pre-finalization) and therefore not interesting. if let Some(next_delta_index) = indices.get(&vote.next_root).copied() { - let delta = deltas - .get(next_delta_index) - .ok_or(Error::InvalidNodeDelta(next_delta_index))? + let node_delta = deltas + .get_mut(next_delta_index) + .ok_or(Error::InvalidNodeDelta(next_delta_index))?; + node_delta.delta = node_delta + .delta .checked_add(new_balance as i64) .ok_or(Error::DeltaOverflow(next_delta_index))?; - // Array access safe due to check on previous line. - deltas[next_delta_index] = delta; + let status = NodeDelta::payload_status( + vote.next_slot, + vote.next_payload_present, + block_slot(next_delta_index)?, + ); + node_delta.add_payload_delta(status, new_balance, next_delta_index)?; } vote.current_root = vote.next_root; + vote.current_slot = vote.next_slot; + vote.current_payload_present = vote.next_payload_present; } } @@ -1144,8 +1217,13 @@ mod test_compute_deltas { Hash256::from_low_u64_be(i as u64 + 1) } + fn test_node_slots(count: usize) -> Vec { + vec![Slot::new(0); count] + } + #[test] fn finalized_descendant() { + let spec = MainnetEthSpec::default_spec(); let genesis_slot = Slot::new(0); let genesis_epoch = Epoch::new(0); @@ -1176,6 +1254,9 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, + None, + None, + &spec, ) .unwrap(); @@ -1195,10 +1276,13 @@ mod test_compute_deltas { execution_status, unrealized_justified_checkpoint: Some(genesis_checkpoint), unrealized_finalized_checkpoint: Some(genesis_checkpoint), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, genesis_slot + 1, genesis_checkpoint, genesis_checkpoint, + &spec, ) .unwrap(); @@ -1220,10 +1304,13 @@ mod test_compute_deltas { execution_status, unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, genesis_slot + 1, genesis_checkpoint, genesis_checkpoint, + &spec, ) .unwrap(); @@ -1299,6 +1386,7 @@ mod test_compute_deltas { /// *checkpoint*, not just the finalized *block*. #[test] fn finalized_descendant_edge_case() { + let spec = MainnetEthSpec::default_spec(); let get_block_root = Hash256::from_low_u64_be; let genesis_slot = Slot::new(0); let junk_state_root = Hash256::zero(); @@ -1320,6 +1408,9 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, + None, + None, + &spec, ) .unwrap(); @@ -1348,10 +1439,13 @@ mod test_compute_deltas { execution_status, unrealized_justified_checkpoint: Some(genesis_checkpoint), unrealized_finalized_checkpoint: Some(genesis_checkpoint), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, Slot::from(block.slot), genesis_checkpoint, genesis_checkpoint, + &spec, ) .unwrap(); }; @@ -1454,7 +1548,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: Hash256::zero(), next_root: Hash256::zero(), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(0); new_balances.push(0); @@ -1462,6 +1559,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1505,7 +1603,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: Hash256::zero(), next_root: hash_from_index(0), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1513,6 +1614,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1563,7 +1665,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: Hash256::zero(), next_root: hash_from_index(i), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1571,6 +1676,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1616,7 +1722,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(0), next_root: hash_from_index(1), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1624,6 +1733,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1680,18 +1790,25 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: Hash256::zero(), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); // One validator moves their vote from the block to something outside the tree. votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: Hash256::from_low_u64_be(1337), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1733,7 +1850,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(0), next_root: hash_from_index(1), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(OLD_BALANCE); new_balances.push(NEW_BALANCE); @@ -1741,6 +1861,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1802,12 +1923,16 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: hash_from_index(2), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); } let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1858,12 +1983,16 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: hash_from_index(2), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); } let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1912,7 +2041,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: hash_from_index(2), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); } @@ -1921,6 +2053,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1950,6 +2083,7 @@ mod test_compute_deltas { // Re-computing the deltas should be a no-op (no repeat deduction for the slashed validator). let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &new_balances, &new_balances, @@ -1958,4 +2092,68 @@ mod test_compute_deltas { .expect("should compute deltas"); assert_eq!(deltas, vec![0, 0]); } + + #[test] + fn payload_bucket_changes_on_non_pending_vote() { + const BALANCE: u64 = 42; + + let mut indices = HashMap::new(); + indices.insert(hash_from_index(1), 0); + + let node_slots = vec![Slot::new(0)]; + let mut votes = ElasticList(vec![VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(1), + current_slot: Slot::new(1), + next_slot: Slot::new(1), + current_payload_present: false, + next_payload_present: true, + }]); + + let deltas = compute_deltas( + &indices, + &node_slots, + &mut votes, + &[BALANCE], + &[BALANCE], + &BTreeSet::new(), + ) + .expect("should compute deltas"); + + assert_eq!(deltas[0].delta, 0); + assert_eq!(deltas[0].empty_delta, -(BALANCE as i64)); + assert_eq!(deltas[0].full_delta, BALANCE as i64); + } + + #[test] + fn pending_vote_only_updates_regular_weight() { + const BALANCE: u64 = 42; + + let mut indices = HashMap::new(); + indices.insert(hash_from_index(1), 0); + + let node_slots = vec![Slot::new(0)]; + let mut votes = ElasticList(vec![VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(1), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: true, + }]); + + let deltas = compute_deltas( + &indices, + &node_slots, + &mut votes, + &[BALANCE], + &[BALANCE], + &BTreeSet::new(), + ) + .expect("should compute deltas"); + + assert_eq!(deltas[0].delta, 0); + assert_eq!(deltas[0].empty_delta, 0); + assert_eq!(deltas[0].full_delta, 0); + } } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 1e01b74c8cd..07baaa47867 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,7 +1,7 @@ use crate::proto_array::ProposerBoost; use crate::{ Error, JustifiedBalances, - proto_array::{ProtoArray, ProtoNodeV17}, + proto_array::{ProtoArray, ProtoNode, ProtoNodeV17}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, }; use ssz::{Encode, four_byte_option_impl}; @@ -14,14 +14,15 @@ use types::{Checkpoint, Hash256}; // selector. four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); -pub type SszContainer = SszContainerV28; +pub type SszContainer = SszContainerV29; +// Legacy containers (V17/V28) for backward compatibility with older schema versions. #[superstruct( variants(V17, V28), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] -pub struct SszContainer { +pub struct SszContainerLegacy { pub votes: Vec, #[superstruct(only(V17))] pub balances: Vec, @@ -35,7 +36,21 @@ pub struct SszContainer { pub previous_proposer_boost: ProposerBoost, } -impl SszContainer { +/// Current container version. Uses union-encoded `ProtoNode` to support mixed V17/V29 nodes. +#[derive(Encode, Decode, Clone)] +pub struct SszContainerV29 { + pub votes: Vec, + pub prune_threshold: usize, + // Deprecated, remove in a future schema migration + justified_checkpoint: Checkpoint, + // Deprecated, remove in a future schema migration + finalized_checkpoint: Checkpoint, + pub nodes: Vec, + pub indices: Vec<(Hash256, usize)>, + pub previous_proposer_boost: ProposerBoost, +} + +impl SszContainerV29 { pub fn from_proto_array( from: &ProtoArrayForkChoice, justified_checkpoint: Checkpoint, @@ -55,10 +70,10 @@ impl SszContainer { } } -impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { +impl TryFrom<(SszContainerV29, JustifiedBalances)> for ProtoArrayForkChoice { type Error = Error; - fn try_from((from, balances): (SszContainer, JustifiedBalances)) -> Result { + fn try_from((from, balances): (SszContainerV29, JustifiedBalances)) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, nodes: from.nodes, @@ -74,9 +89,9 @@ impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { } } -// Convert V17 to V28 by dropping balances. -impl From for SszContainerV28 { - fn from(v17: SszContainerV17) -> Self { +// Convert legacy V17 to V28 by dropping balances. +impl From for SszContainerLegacyV28 { + fn from(v17: SszContainerLegacyV17) -> Self { Self { votes: v17.votes, prune_threshold: v17.prune_threshold, @@ -89,9 +104,9 @@ impl From for SszContainerV28 { } } -// Convert V28 to V17 by re-adding balances. -impl From<(SszContainerV28, JustifiedBalances)> for SszContainerV17 { - fn from((v28, balances): (SszContainerV28, JustifiedBalances)) -> Self { +// Convert legacy V28 to V17 by re-adding balances. +impl From<(SszContainerLegacyV28, JustifiedBalances)> for SszContainerLegacyV17 { + fn from((v28, balances): (SszContainerLegacyV28, JustifiedBalances)) -> Self { Self { votes: v28.votes, balances: balances.effective_balances.clone(), @@ -104,3 +119,40 @@ impl From<(SszContainerV28, JustifiedBalances)> for SszContainerV17 { } } } + +// Convert legacy V28 to current V29. +impl From for SszContainerV29 { + fn from(v28: SszContainerLegacyV28) -> Self { + Self { + votes: v28.votes, + prune_threshold: v28.prune_threshold, + justified_checkpoint: v28.justified_checkpoint, + finalized_checkpoint: v28.finalized_checkpoint, + nodes: v28.nodes.into_iter().map(ProtoNode::V17).collect(), + indices: v28.indices, + previous_proposer_boost: v28.previous_proposer_boost, + } + } +} + +// Downgrade current V29 to legacy V28 (lossy: V29 nodes lose payload-specific fields). +impl From for SszContainerLegacyV28 { + fn from(v29: SszContainerV29) -> Self { + Self { + votes: v29.votes, + prune_threshold: v29.prune_threshold, + justified_checkpoint: v29.justified_checkpoint, + finalized_checkpoint: v29.finalized_checkpoint, + nodes: v29 + .nodes + .into_iter() + .filter_map(|node| match node { + ProtoNode::V17(v17) => Some(v17), + ProtoNode::V29(_) => None, + }) + .collect(), + indices: v29.indices, + previous_proposer_boost: v29.previous_proposer_boost, + } + } +} diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index ca77dc8d796..ca28f3a2cab 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -939,7 +939,7 @@ impl Tester { DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, ); let proposer_head = match proposer_head_result { - Ok(head) => head.parent_node.root, + Ok(head) => head.parent_node.root(), Err(ProposerHeadError::DoNotReOrg(_)) => canonical_head, _ => panic!("Unexpected error in get proposer head"), }; From e04a8c31eadae5c771fc824f60bc6f7e8919d35c Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Thu, 26 Feb 2026 03:14:57 -0500 Subject: [PATCH 004/127] adding tests and payload changes --- Cargo.lock | 1 + consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 102 +++++-- consensus/fork_choice/src/lib.rs | 2 +- consensus/fork_choice/tests/tests.rs | 143 ++++++++- consensus/proto_array/src/bin.rs | 8 + .../src/fork_choice_test_definition.rs | 29 +- .../execution_status.rs | 10 - .../ffg_updates.rs | 2 - .../gloas_payload.rs | 280 +++++++++++++++++- .../src/fork_choice_test_definition/votes.rs | 10 - consensus/proto_array/src/proto_array.rs | 30 +- .../src/proto_array_fork_choice.rs | 80 ++++- 13 files changed, 626 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a8e76a8a8d..555eb019d92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3583,6 +3583,7 @@ name = "fork_choice" version = "0.1.0" dependencies = [ "beacon_chain", + "bls", "ethereum_ssz", "ethereum_ssz_derive", "fixed_bytes", diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index a07aa38aa5b..df47a5c9d1f 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -19,5 +19,6 @@ types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } +bls = { workspace = true } store = { workspace = true } tokio = { workspace = true } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 77442a62f57..ae08b3675f6 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -249,7 +249,6 @@ pub struct QueuedAttestation { attesting_indices: Vec, block_root: Hash256, target_epoch: Epoch, - payload_present: bool, } impl<'a, E: EthSpec> From> for QueuedAttestation { @@ -259,11 +258,22 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { attesting_indices: a.attesting_indices_to_vec(), block_root: a.data().beacon_block_root, target_epoch: a.data().target.epoch, - payload_present: a.data().index == 1, } } } +/// Used for queuing payload attestations (PTC votes) from the current slot. +/// Payload attestations have different dequeue timing than regular attestations: +/// non-block payload attestations need an extra slot of delay (slot + 1 < current_slot). +#[derive(Clone, PartialEq, Encode, Decode)] +pub struct QueuedPayloadAttestation { + slot: Slot, + attesting_indices: Vec, + block_root: Hash256, + payload_present: bool, + blob_data_available: bool, +} + /// Returns all values in `self.queued_attestations` that have a slot that is earlier than the /// current slot. Also removes those values from `self.queued_attestations`. fn dequeue_attestations( @@ -285,6 +295,22 @@ fn dequeue_attestations( std::mem::replace(queued_attestations, remaining) } +/// Returns all values in `queued` that have `slot + 1 < current_slot`. +/// Payload attestations need an extra slot of delay compared to regular attestations. +fn dequeue_payload_attestations( + current_slot: Slot, + queued: &mut Vec, +) -> Vec { + let remaining = queued.split_off( + queued + .iter() + .position(|a| a.slot.saturating_add(1_u64) >= current_slot) + .unwrap_or(queued.len()), + ); + + std::mem::replace(queued, remaining) +} + /// Denotes whether an attestation we are processing was received from a block or from gossip. /// Equivalent to the `is_from_block` `bool` in: /// @@ -329,6 +355,9 @@ pub struct ForkChoice { proto_array: ProtoArrayForkChoice, /// Attestations that arrived at the current slot and must be queued for later processing. queued_attestations: Vec, + /// Payload attestations (PTC votes) that must be queued for later processing. + /// These have different dequeue timing than regular attestations. + queued_payload_attestations: Vec, /// Stores a cache of the values required to be sent to the execution layer. forkchoice_update_parameters: ForkchoiceUpdateParameters, _phantom: PhantomData, @@ -343,6 +372,7 @@ where self.fc_store == other.fc_store && self.proto_array == other.proto_array && self.queued_attestations == other.queued_attestations + && self.queued_payload_attestations == other.queued_payload_attestations } } @@ -414,6 +444,7 @@ where fc_store, proto_array, queued_attestations: vec![], + queued_payload_attestations: vec![], // This will be updated during the next call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -1120,7 +1151,7 @@ where }); } - if indexed_payload_attestation.data.slot == block.slot + if self.fc_store.get_current_slot() == block.slot && indexed_payload_attestation.data.payload_present { return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { slot: block.slot }); @@ -1177,12 +1208,10 @@ where if attestation.data().slot < self.fc_store.get_current_slot() { for validator_index in attestation.attesting_indices_iter() { - let payload_present = attestation.data().index == 1; self.proto_array.process_attestation( *validator_index as usize, attestation.data().beacon_block_root, attestation.data().slot, - payload_present, )?; } } else { @@ -1214,23 +1243,33 @@ where self.validate_on_payload_attestation(attestation, is_from_block)?; - if attestation.data.slot < self.fc_store.get_current_slot() { + let processing_slot = self.fc_store.get_current_slot(); + // Payload attestations from blocks can be applied in the next slot (S+1 for data.slot=S), + // while non-block payload attestations are delayed one extra slot. + let should_process_now = match is_from_block { + AttestationFromBlock::True => attestation.data.slot < processing_slot, + AttestationFromBlock::False => attestation.data.slot + 1_u64 < processing_slot, + }; + + if should_process_now { for validator_index in attestation.attesting_indices_iter() { - self.proto_array.process_attestation( + self.proto_array.process_payload_attestation( *validator_index as usize, attestation.data.beacon_block_root, - attestation.data.slot, + processing_slot, attestation.data.payload_present, + attestation.data.blob_data_available, )?; } } else { - self.queued_attestations.push(QueuedAttestation { - slot: attestation.data.slot, - attesting_indices: attestation.attesting_indices.iter().copied().collect(), - block_root: attestation.data.beacon_block_root, - target_epoch: attestation.data.slot.epoch(E::slots_per_epoch()), - payload_present: attestation.data.payload_present, - }); + self.queued_payload_attestations + .push(QueuedPayloadAttestation { + slot: attestation.data.slot, + attesting_indices: attestation.attesting_indices.iter().copied().collect(), + block_root: attestation.data.beacon_block_root, + payload_present: attestation.data.payload_present, + blob_data_available: attestation.data.blob_data_available, + }); } Ok(()) @@ -1265,6 +1304,7 @@ where // Process any attestations that might now be eligible. self.process_attestation_queue()?; + self.process_payload_attestation_queue()?; Ok(self.fc_store.get_current_slot()) } @@ -1339,12 +1379,31 @@ where &mut self.queued_attestations, ) { for validator_index in attestation.attesting_indices.iter() { - // FIXME(sproul): backwards compat/fork abstraction self.proto_array.process_attestation( *validator_index as usize, attestation.block_root, attestation.slot, + )?; + } + } + + Ok(()) + } + + /// Processes and removes from the queue any queued payload attestations which may now be + /// eligible for processing. Payload attestations use `slot + 1 < current_slot` timing. + fn process_payload_attestation_queue(&mut self) -> Result<(), Error> { + let current_slot = self.fc_store.get_current_slot(); + for attestation in + dequeue_payload_attestations(current_slot, &mut self.queued_payload_attestations) + { + for validator_index in attestation.attesting_indices.iter() { + self.proto_array.process_payload_attestation( + *validator_index as usize, + attestation.block_root, + current_slot, attestation.payload_present, + attestation.blob_data_available, )?; } } @@ -1507,6 +1566,11 @@ where &self.queued_attestations } + /// Returns a reference to the currently queued payload attestations. + pub fn queued_payload_attestations(&self) -> &[QueuedPayloadAttestation] { + &self.queued_payload_attestations + } + /// Returns the store's `proposer_boost_root`. pub fn proposer_boost_root(&self) -> Hash256 { self.fc_store.proposer_boost_root() @@ -1591,6 +1655,7 @@ where fc_store, proto_array, queued_attestations: persisted.queued_attestations, + queued_payload_attestations: persisted.queued_payload_attestations, // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -1633,6 +1698,7 @@ where .proto_array() .as_ssz_container(self.justified_checkpoint(), self.finalized_checkpoint()), queued_attestations: self.queued_attestations().to_vec(), + queued_payload_attestations: self.queued_payload_attestations.clone(), } } @@ -1658,6 +1724,8 @@ pub struct PersistedForkChoice { #[superstruct(only(V29))] pub proto_array: proto_array::core::SszContainerV29, pub queued_attestations: Vec, + #[superstruct(only(V29))] + pub queued_payload_attestations: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV29; @@ -1682,6 +1750,7 @@ impl From for PersistedForkChoiceV29 { Self { proto_array: v28.proto_array_v28.into(), queued_attestations: v28.queued_attestations, + queued_payload_attestations: vec![], } } } @@ -1734,7 +1803,6 @@ mod tests { attesting_indices: vec![], block_root: Hash256::zero(), target_epoch: Epoch::new(0), - payload_present: false, }) .collect() } diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 87438f2f855..6091de6fdd9 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -6,7 +6,7 @@ pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, PersistedForkChoiceV17, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, - ResetPayloadStatuses, + QueuedPayloadAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 86ef0e2f907..9887e2eb924 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -7,9 +7,11 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, StateSkipConfig, WhenSlotSkipped, }; +use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, + AttestationFromBlock, ForkChoiceStore, InvalidAttestation, InvalidBlock, + PayloadVerificationStatus, QueuedAttestation, QueuedPayloadAttestation, }; use state_processing::state_advance::complete_state_advance; use std::fmt; @@ -19,8 +21,8 @@ use store::MemoryStore; use types::SingleAttestation; use types::{ BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, Hash256, - IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, - test_utils::generate_deterministic_keypair, + IndexedAttestation, IndexedPayloadAttestation, MainnetEthSpec, PayloadAttestationData, + RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, test_utils::generate_deterministic_keypair, }; pub type E = MainnetEthSpec; @@ -154,6 +156,28 @@ impl ForkChoiceTest { self } + /// Inspect the queued payload attestations in fork choice. + #[allow(dead_code)] + pub fn inspect_queued_payload_attestations(self, mut func: F) -> Self + where + F: FnMut(&[QueuedPayloadAttestation]), + { + self.harness + .chain + .canonical_head + .fork_choice_write_lock() + .update_time(self.harness.chain.slot().unwrap()) + .unwrap(); + func( + self.harness + .chain + .canonical_head + .fork_choice_read_lock() + .queued_payload_attestations(), + ); + self + } + /// Skip a slot, without producing a block. pub fn skip_slot(self) -> Self { self.harness.advance_slot(); @@ -953,6 +977,119 @@ async fn invalid_attestation_payload_during_same_slot() { .await; } +/// A payload attestation for block A at slot S should be accepted when processed at slot S+1. +#[tokio::test] +async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { + let test = ForkChoiceTest::new() + .apply_blocks_without_new_attestations(1) + .await; + + let chain = &test.harness.chain; + let block_a = chain + .block_at_slot(Slot::new(1), WhenSlotSkipped::Prev) + .expect("lookup should succeed") + .expect("block A should exist"); + let block_a_root = block_a.canonical_root(); + let current_slot = block_a.slot().saturating_add(1_u64); + + let payload_attestation = IndexedPayloadAttestation:: { + attesting_indices: vec![0_u64].try_into().expect("valid attesting indices"), + data: PayloadAttestationData { + beacon_block_root: block_a_root, + slot: Slot::new(1), + payload_present: true, + blob_data_available: true, + }, + signature: AggregateSignature::empty(), + }; + + let result = chain + .canonical_head + .fork_choice_write_lock() + .on_payload_attestation( + current_slot, + &payload_attestation, + AttestationFromBlock::True, + ); + + assert!( + result.is_ok(), + "payload attestation at slot S should be accepted at S+1, got: {:?}", + result + ); + + let latest_message = chain + .canonical_head + .fork_choice_read_lock() + .latest_message(0) + .expect("latest message should exist"); + assert_eq!(latest_message.slot, current_slot); + assert!(latest_message.payload_present); +} + +/// Non-block payload attestations at slot S+1 for data.slot S are delayed; they are not applied +/// until a later slot. +#[tokio::test] +async fn non_block_payload_attestation_at_next_slot_is_delayed() { + let test = ForkChoiceTest::new() + .apply_blocks_without_new_attestations(1) + .await; + + let chain = &test.harness.chain; + let block_a = chain + .block_at_slot(Slot::new(1), WhenSlotSkipped::Prev) + .expect("lookup should succeed") + .expect("block A should exist"); + let block_a_root = block_a.canonical_root(); + let s_plus_1 = block_a.slot().saturating_add(1_u64); + let s_plus_2 = block_a.slot().saturating_add(2_u64); + + let payload_attestation = IndexedPayloadAttestation:: { + attesting_indices: vec![0_u64].try_into().expect("valid attesting indices"), + data: PayloadAttestationData { + beacon_block_root: block_a_root, + slot: Slot::new(1), + payload_present: true, + blob_data_available: true, + }, + signature: AggregateSignature::empty(), + }; + + let result = chain + .canonical_head + .fork_choice_write_lock() + .on_payload_attestation(s_plus_1, &payload_attestation, AttestationFromBlock::False); + assert!( + result.is_ok(), + "payload attestation should be accepted for queueing" + ); + + // Vote should not be applied yet; message remains unset. + let latest_before = chain + .canonical_head + .fork_choice_read_lock() + .latest_message(0); + assert!( + latest_before.is_none(), + "non-block payload attestation at S+1 should not apply immediately" + ); + + // Advance fork choice time to S+2, queue should now be processed. + chain + .canonical_head + .fork_choice_write_lock() + .update_time(s_plus_2) + .expect("update_time should succeed"); + + let latest_after = chain + .canonical_head + .fork_choice_read_lock() + .latest_message(0) + .expect("latest message should exist after delay"); + assert_eq!(latest_after.slot, s_plus_2); + assert!(latest_after.payload_present); +} + /// Specification v0.12.1: /// /// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) diff --git a/consensus/proto_array/src/bin.rs b/consensus/proto_array/src/bin.rs index e1d307affb4..c5df3f17e4a 100644 --- a/consensus/proto_array/src/bin.rs +++ b/consensus/proto_array/src/bin.rs @@ -18,6 +18,14 @@ fn main() { "execution_status_03.yaml", get_execution_status_test_definition_03(), ); + write_test_def_to_yaml( + "gloas_chain_following.yaml", + get_gloas_chain_following_test_definition(), + ); + write_test_def_to_yaml( + "gloas_payload_probe.yaml", + get_gloas_payload_probe_test_definition(), + ); } fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) { diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index ec4227584a6..f88cf06349e 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -56,8 +56,14 @@ pub enum Operation { validator_index: usize, block_root: Hash256, attestation_slot: Slot, - #[serde(default)] + }, + ProcessPayloadAttestation { + validator_index: usize, + block_root: Hash256, + attestation_slot: Slot, payload_present: bool, + #[serde(default)] + blob_data_available: bool, }, Prune { finalized_root: Hash256, @@ -277,18 +283,35 @@ impl ForkChoiceTestDefinition { validator_index, block_root, attestation_slot, + } => { + fork_choice + .process_attestation(validator_index, block_root, attestation_slot) + .unwrap_or_else(|_| { + panic!( + "process_attestation op at index {} returned error", + op_index + ) + }); + check_bytes_round_trip(&fork_choice); + } + Operation::ProcessPayloadAttestation { + validator_index, + block_root, + attestation_slot, payload_present, + blob_data_available, } => { fork_choice - .process_attestation( + .process_payload_attestation( validator_index, block_root, attestation_slot, payload_present, + blob_data_available, ) .unwrap_or_else(|_| { panic!( - "process_attestation op at index {} returned error", + "process_payload_attestation op at index {} returned error", op_index ) }); diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index 93c97d09db4..318407f5983 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -106,7 +106,6 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(1), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -149,7 +148,6 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(2), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -254,7 +252,6 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(3), attestation_slot: Slot::new(3), - payload_present: false, }); // Ensure that the head is still 2 @@ -357,7 +354,6 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(1), attestation_slot: Slot::new(3), - payload_present: false, }); // Ensure that the head has switched back to 1 @@ -521,7 +517,6 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(1), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -564,7 +559,6 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(2), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -669,7 +663,6 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(3), attestation_slot: Slot::new(3), - payload_present: false, }); // Move validator #1 vote from 2 to 3 @@ -683,7 +676,6 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(3), attestation_slot: Slot::new(3), - payload_present: false, }); // Ensure that the head is now 3. @@ -898,7 +890,6 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(1), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -941,7 +932,6 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(1), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is 1. diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index ee55ea649fe..88665a22add 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -312,7 +312,6 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(1), attestation_slot: Slot::new(0), - payload_present: false, }); // Ensure that if we start at 0 we find 9 (just: 0, fin: 0). @@ -376,7 +375,6 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(2), attestation_slot: Slot::new(0), - payload_present: false, }); // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index b6568106e39..7579b016369 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -109,18 +109,21 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: Some(get_hash(1)), }); - // One Full and one Empty vote for the same head block: tie should probe as Full. - ops.push(Operation::ProcessAttestation { + // One Full and one Empty vote for the same head block: tie probes via runtime tiebreak, + // which defaults to Empty unless timely+data-available evidence is set. + ops.push(Operation::ProcessPayloadAttestation { validator_index: 0, block_root: get_root(1), attestation_slot: Slot::new(2), payload_present: true, + blob_data_available: false, }); - ops.push(Operation::ProcessAttestation { + ops.push(Operation::ProcessPayloadAttestation { validator_index: 1, block_root: get_root(1), attestation_slot: Slot::new(2), payload_present: false, + blob_data_available: false, }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), @@ -135,15 +138,16 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { }); ops.push(Operation::AssertHeadPayloadStatus { head_root: get_root(1), - expected_status: PayloadStatus::Full, + expected_status: PayloadStatus::Empty, }); // Flip validator 0 to Empty; probe should now report Empty. - ops.push(Operation::ProcessAttestation { + ops.push(Operation::ProcessPayloadAttestation { validator_index: 0, block_root: get_root(1), attestation_slot: Slot::new(3), payload_present: false, + blob_data_available: false, }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), @@ -171,11 +175,12 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { execution_payload_parent_hash: Some(get_hash(1)), execution_payload_block_hash: Some(get_hash(5)), }); - ops.push(Operation::ProcessAttestation { + ops.push(Operation::ProcessPayloadAttestation { validator_index: 2, block_root: get_root(5), attestation_slot: Slot::new(3), payload_present: true, + blob_data_available: false, }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), @@ -190,7 +195,250 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { }); ops.push(Operation::AssertHeadPayloadStatus { head_root: get_root(5), - expected_status: PayloadStatus::Full, + expected_status: PayloadStatus::Empty, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Competing branches with distinct payload ancestry (Full vs Empty from genesis). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + // Equal branch weights: tiebreak FULL picks branch rooted at 3. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + }); + + // Validator 0 votes Empty branch -> head flips to 4. + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 0, + block_root: get_root(4), + attestation_slot: Slot::new(3), + payload_present: false, + blob_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(4), + }); + + // Latest-message update back to Full branch -> head returns to 3. + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 0, + block_root: get_root(3), + attestation_slot: Slot::new(4), + payload_present: true, + blob_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + }); + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(3), + expected_full_weight: 1, + expected_empty_weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +pub fn get_gloas_weight_priority_over_payload_preference_test_definition() +-> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Build two branches where one child extends payload (Full) and the other doesn't (Empty). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + // Parent prefers Full on equal branch weights. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + }); + + // Add two Empty votes to make the Empty branch strictly heavier. + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 0, + block_root: get_root(4), + attestation_slot: Slot::new(3), + payload_present: false, + blob_data_available: false, + }); + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 1, + block_root: get_root(4), + attestation_slot: Slot::new(3), + payload_present: false, + blob_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(4), + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +pub fn get_gloas_parent_empty_when_child_points_to_grandparent_test_definition() +-> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Build a three-block chain A -> B -> C (CL parent links). + // A: EL parent = genesis hash(0), EL hash = hash(1). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + + // B: EL parent = hash(1), EL hash = hash(2). + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // C: CL parent is B, but EL parent points to A (hash 1), not B (hash 2). + // This models B's payload not arriving in time, so C records parent status as Empty. + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_root(3), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(3), + expected_status: PayloadStatus::Empty, }); ForkChoiceTestDefinition { @@ -219,4 +467,22 @@ mod tests { let test = get_gloas_payload_probe_test_definition(); test.run(); } + + #[test] + fn find_head_vote_transition() { + let test = get_gloas_find_head_vote_transition_test_definition(); + test.run(); + } + + #[test] + fn weight_priority_over_payload_preference() { + let test = get_gloas_weight_priority_over_payload_preference_test_definition(); + test.run(); + } + + #[test] + fn parent_empty_when_child_points_to_grandparent() { + let test = get_gloas_parent_empty_when_child_points_to_grandparent_test_definition(); + test.run(); + } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index d170e0974ff..49afae2d4a1 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -106,7 +106,6 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(1), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is now 1, because 1 has a vote. @@ -136,7 +135,6 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(2), attestation_slot: Slot::new(2), - payload_present: false, }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -211,7 +209,6 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(3), attestation_slot: Slot::new(3), - payload_present: false, }); // Ensure that the head is still 2 @@ -246,7 +243,6 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { validator_index: 1, block_root: get_root(1), attestation_slot: Slot::new(3), - payload_present: false, }); // Ensure that the head is now 3 @@ -409,13 +405,11 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(5), attestation_slot: Slot::new(4), - payload_present: false, }); ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(5), attestation_slot: Slot::new(4), - payload_present: false, }); // Add blocks 7, 8 and 9. Adding these blocks helps test the `best_descendant` @@ -570,13 +564,11 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { validator_index: 0, block_root: get_root(9), attestation_slot: Slot::new(5), - payload_present: false, }); ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(9), attestation_slot: Slot::new(5), - payload_present: false, }); // Add block 10 @@ -650,13 +642,11 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { validator_index: 2, block_root: get_root(10), attestation_slot: Slot::new(5), - payload_present: false, }); ops.push(Operation::ProcessAttestation { validator_index: 3, block_root: get_root(10), attestation_slot: Slot::new(5), - payload_present: false, }); // Check the head is now 10. diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 926767093f7..7403937d393 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -357,6 +357,9 @@ impl ProtoArray { apply_delta(node.empty_payload_weight, node_empty_delta, node_index)?; node.full_payload_weight = apply_delta(node.full_payload_weight, node_full_delta, node_index)?; + if let Some(payload_tiebreaker) = node_deltas.payload_tiebreaker { + node.payload_tiebreak = payload_tiebreaker; + } } // Update the parent delta (if any). @@ -1052,10 +1055,14 @@ impl ProtoArray { } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { // The best child leads to a viable head, but the child doesn't. no_change + } else if child.weight() > best_child.weight() { + // Weight is the primary ordering criterion. + change_to_child + } else if child.weight() < best_child.weight() { + no_change } else { - // Both viable or both non-viable. For V29 parents, prefer the child - // whose parent_payload_status matches the parent's payload preference - // (Full if full_payload_weight >= empty_payload_weight, else Empty). + // Equal weights: for V29 parents, prefer the child whose + // parent_payload_status matches the parent's payload preference. let child_matches = child_matches_parent_payload_preference(parent, child); let best_child_matches = child_matches_parent_payload_preference(parent, best_child); @@ -1064,20 +1071,11 @@ impl ProtoArray { change_to_child } else if !child_matches && best_child_matches { no_change - } else if child.weight() == best_child.weight() { - // Tie-breaker of equal weights by root. - if *child.root() >= *best_child.root() { - change_to_child - } else { - no_change - } + } else if *child.root() >= *best_child.root() { + // Final tie-breaker of equal weights by root. + change_to_child } else { - // Choose the winner by weight. - if child.weight() > best_child.weight() { - change_to_child - } else { - no_change - } + no_change } } } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index e1c893db9af..9400aafed7a 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -32,6 +32,8 @@ pub struct VoteTracker { next_slot: Slot, current_payload_present: bool, next_payload_present: bool, + current_blob_data_available: bool, + next_blob_data_available: bool, } // FIXME(sproul): version this type @@ -39,6 +41,7 @@ pub struct LatestMessage { pub slot: Slot, pub root: Hash256, pub payload_present: bool, + pub blob_data_available: bool, } /// Represents the verification status of an execution payload pre-Gloas. @@ -521,7 +524,24 @@ impl ProtoArrayForkChoice { validator_index: usize, block_root: Hash256, attestation_slot: Slot, + ) -> Result<(), String> { + let vote = self.votes.get_mut(validator_index); + + if attestation_slot > vote.next_slot || *vote == VoteTracker::default() { + vote.next_root = block_root; + vote.next_slot = attestation_slot; + } + + Ok(()) + } + + pub fn process_payload_attestation( + &mut self, + validator_index: usize, + block_root: Hash256, + attestation_slot: Slot, payload_present: bool, + blob_data_available: bool, ) -> Result<(), String> { let vote = self.votes.get_mut(validator_index); @@ -529,6 +549,7 @@ impl ProtoArrayForkChoice { vote.next_root = block_root; vote.next_slot = attestation_slot; vote.next_payload_present = payload_present; + vote.next_blob_data_available = blob_data_available; } Ok(()) @@ -945,13 +966,19 @@ impl ProtoArrayForkChoice { /// Returns the payload status of the head node based on accumulated weights. /// - /// Returns `Full` if `full_payload_weight >= empty_payload_weight` (Full wins ties per spec's - /// `get_payload_status_tiebreaker` natural ordering FULL=2 > EMPTY=1). + /// Returns `Full` if `full_payload_weight > empty_payload_weight`. + /// Returns `Empty` if `empty_payload_weight > full_payload_weight`. + /// On ties, consult the node's runtime `payload_tiebreak`: prefer `Full` only when timely and + /// data is available, otherwise `Empty`. /// Returns `Empty` otherwise. Returns `None` for V17 nodes. pub fn head_payload_status(&self, head_root: &Hash256) -> Option { let node = self.get_proto_node(head_root)?; let v29 = node.as_v29().ok()?; - if v29.full_payload_weight >= v29.empty_payload_weight { + if v29.full_payload_weight > v29.empty_payload_weight { + Some(PayloadStatus::Full) + } else if v29.empty_payload_weight > v29.full_payload_weight { + Some(PayloadStatus::Empty) + } else if v29.payload_tiebreak.is_timely && v29.payload_tiebreak.is_data_available { Some(PayloadStatus::Full) } else { Some(PayloadStatus::Empty) @@ -985,6 +1012,7 @@ impl ProtoArrayForkChoice { root: vote.next_root, slot: vote.next_slot, payload_present: vote.next_payload_present, + blob_data_available: vote.next_blob_data_available, }) } } else { @@ -1079,6 +1107,17 @@ fn compute_deltas( new_balances: &[u64], equivocating_indices: &BTreeSet, ) -> Result, Error> { + let merge_payload_tiebreaker = + |delta: &mut NodeDelta, incoming: crate::proto_array::PayloadTiebreak| { + delta.payload_tiebreaker = Some(match delta.payload_tiebreaker { + Some(existing) => crate::proto_array::PayloadTiebreak { + is_timely: existing.is_timely || incoming.is_timely, + is_data_available: existing.is_data_available || incoming.is_data_available, + }, + None => incoming, + }); + }; + let block_slot = |index: usize| -> Result { node_slots .get(index) @@ -1138,6 +1177,7 @@ fn compute_deltas( vote.current_root = Hash256::zero(); vote.current_slot = Slot::new(0); vote.current_payload_present = false; + vote.current_blob_data_available = false; } // We've handled this slashed validator, continue without applying an ordinary delta. continue; @@ -1195,11 +1235,21 @@ fn compute_deltas( block_slot(next_delta_index)?, ); node_delta.add_payload_delta(status, new_balance, next_delta_index)?; + if status != PayloadStatus::Pending { + merge_payload_tiebreaker( + node_delta, + crate::proto_array::PayloadTiebreak { + is_timely: vote.next_payload_present, + is_data_available: vote.next_blob_data_available, + }, + ); + } } vote.current_root = vote.next_root; vote.current_slot = vote.next_slot; vote.current_payload_present = vote.next_payload_present; + vote.current_blob_data_available = vote.next_blob_data_available; } } @@ -1552,6 +1602,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); old_balances.push(0); new_balances.push(0); @@ -1607,6 +1659,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1669,6 +1723,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1726,6 +1782,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1794,6 +1852,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); // One validator moves their vote from the block to something outside the tree. @@ -1804,6 +1864,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); let deltas = compute_deltas( @@ -1854,6 +1916,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); old_balances.push(OLD_BALANCE); new_balances.push(NEW_BALANCE); @@ -1927,6 +1991,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); } @@ -1987,6 +2053,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); } @@ -2045,6 +2113,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, + current_blob_data_available: false, + next_blob_data_available: false, }); } @@ -2108,6 +2178,8 @@ mod test_compute_deltas { next_slot: Slot::new(1), current_payload_present: false, next_payload_present: true, + current_blob_data_available: false, + next_blob_data_available: false, }]); let deltas = compute_deltas( @@ -2140,6 +2212,8 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: true, + current_blob_data_available: false, + next_blob_data_available: false, }]); let deltas = compute_deltas( From eb1b81063d47a120857179aac3d59a3f1ebed884 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Thu, 26 Feb 2026 04:38:45 -0500 Subject: [PATCH 005/127] fixing test --- beacon_node/beacon_chain/src/beacon_chain.rs | 1 + .../beacon_chain/src/block_verification.rs | 1 + consensus/fork_choice/src/fork_choice.rs | 68 ++++++++++++------- consensus/fork_choice/tests/tests.rs | 26 ++++++- consensus/proto_array/src/lib.rs | 2 +- consensus/proto_array/src/ssz_container.rs | 40 ++++++----- 6 files changed, 96 insertions(+), 42 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 22df1e1a2d7..fd85fce5fd3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2263,6 +2263,7 @@ impl BeaconChain { self.slot()?, verified.indexed_attestation().to_ref(), AttestationFromBlock::False, + &self.spec, ) .map_err(Into::into) } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1d7e20ec1fc..ee0bb9e6ff6 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1665,6 +1665,7 @@ impl ExecutionPendingBlock { current_slot, indexed_attestation, AttestationFromBlock::True, + &chain.spec, ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ae08b3675f6..990aedf2c35 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -407,21 +407,33 @@ where AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next) .map_err(Error::BeaconStateError)?; - let execution_status = anchor_block.message().execution_payload().map_or_else( - // If the block doesn't have an execution payload then it can't have - // execution enabled. - |_| ExecutionStatus::irrelevant(), - |execution_payload| { + let (execution_status, execution_payload_parent_hash, execution_payload_block_hash) = + if let Ok(execution_payload) = anchor_block.message().execution_payload() { + // Pre-Gloas forks: hashes come from the execution payload. if execution_payload.is_default_with_empty_roots() { - // A default payload does not have execution enabled. - ExecutionStatus::irrelevant() + (ExecutionStatus::irrelevant(), None, None) } else { - // Assume that this payload is valid, since the anchor should be a trusted block and - // state. - ExecutionStatus::Valid(execution_payload.block_hash()) + // Assume that this payload is valid, since the anchor should be a + // trusted block and state. + ( + ExecutionStatus::Valid(execution_payload.block_hash()), + Some(execution_payload.parent_hash()), + Some(execution_payload.block_hash()), + ) } - }, - ); + } else if let Ok(signed_bid) = + anchor_block.message().body().signed_execution_payload_bid() + { + // Gloas: hashes come from the execution payload bid. + ( + ExecutionStatus::irrelevant(), + Some(signed_bid.message.parent_block_hash), + Some(signed_bid.message.block_hash), + ) + } else { + // Pre-merge: no execution payload at all. + (ExecutionStatus::irrelevant(), None, None) + }; // If the current slot is not provided, use the value that was last provided to the store. let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); @@ -435,8 +447,8 @@ where current_epoch_shuffling_id, next_epoch_shuffling_id, execution_status, - None, - None, + execution_payload_parent_hash, + execution_payload_block_hash, spec, )?; @@ -1045,6 +1057,7 @@ where &self, indexed_attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, + spec: &ChainSpec, ) -> Result<(), InvalidAttestation> { // There is no point in processing an attestation with an empty bitfield. Reject // it immediately. @@ -1117,11 +1130,17 @@ where }); } - // Same-slot attestations must have index == 0 (i.e., indicate pending payload status). - // Payload-present attestations (index == 1) for the same slot as the block are invalid - // because PTC votes should only arrive in subsequent slots. - if indexed_attestation.data().slot == block.slot && indexed_attestation.data().index != 0 { - return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { slot: block.slot }); + // Post-GLOAS: same-slot attestations with index != 0 indicate a payload-present vote. + // These must go through `on_payload_attestation`, not `on_attestation`. + if spec + .fork_name_at_slot::(indexed_attestation.data().slot) + .gloas_enabled() + && indexed_attestation.data().slot == block.slot + && indexed_attestation.data().index != 0 + { + return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { + slot: block.slot, + }); } Ok(()) @@ -1182,6 +1201,7 @@ where system_time_current_slot: Slot, attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, + spec: &ChainSpec, ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTATION_TIMES); @@ -1204,7 +1224,7 @@ where return Ok(()); } - self.validate_on_attestation(attestation, is_from_block)?; + self.validate_on_attestation(attestation, is_from_block, spec)?; if attestation.data().slot < self.fc_store.get_current_slot() { for validator_index in attestation.attesting_indices_iter() { @@ -1720,7 +1740,7 @@ pub struct PersistedForkChoice { #[superstruct(only(V17))] pub proto_array_bytes: Vec, #[superstruct(only(V28))] - pub proto_array_v28: proto_array::core::SszContainerLegacyV28, + pub proto_array_v28: proto_array::core::SszContainerV28, #[superstruct(only(V29))] pub proto_array: proto_array::core::SszContainerV29, pub queued_attestations: Vec, @@ -1735,8 +1755,8 @@ impl TryFrom for PersistedForkChoiceV28 { fn try_from(v17: PersistedForkChoiceV17) -> Result { let container_v17 = - proto_array::core::SszContainerLegacyV17::from_ssz_bytes(&v17.proto_array_bytes)?; - let container_v28: proto_array::core::SszContainerLegacyV28 = container_v17.into(); + proto_array::core::SszContainerV17::from_ssz_bytes(&v17.proto_array_bytes)?; + let container_v28: proto_array::core::SszContainerV28 = container_v17.into(); Ok(Self { proto_array_v28: container_v28, @@ -1758,7 +1778,7 @@ impl From for PersistedForkChoiceV29 { impl From<(PersistedForkChoiceV28, JustifiedBalances)> for PersistedForkChoiceV17 { fn from((v28, balances): (PersistedForkChoiceV28, JustifiedBalances)) -> Self { let container_v17 = - proto_array::core::SszContainerLegacyV17::from((v28.proto_array_v28, balances)); + proto_array::core::SszContainerV17::from((v28.proto_array_v28, balances)); let proto_array_bytes = container_v17.as_ssz_bytes(); Self { diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 9887e2eb924..029e8612899 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -73,6 +73,22 @@ impl ForkChoiceTest { Self { harness } } + /// Creates a new tester with the GLOAS fork active at epoch 1. + /// Genesis is a standard Fulu block (epoch 0), so block production works normally. + /// Tests that need GLOAS semantics should advance the chain into epoch 1 first. + pub fn new_with_gloas() -> Self { + let mut spec = ForkName::latest_stable().make_genesis_spec(ChainSpec::default()); + spec.gloas_fork_epoch = Some(Epoch::new(1)); + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.into()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { harness } + } + /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where @@ -948,9 +964,17 @@ async fn invalid_attestation_future_block() { } /// Payload attestations (index == 1) are invalid when they refer to a block in the same slot. +/// This check only applies when GLOAS is active. +/// +/// TODO(gloas): un-ignore once the test harness supports Gloas block production. +/// The validation logic is gated on `spec.fork_name_at_slot().gloas_enabled()` in +/// `validate_on_attestation`, which requires a block to exist at a GLOAS-enabled slot. +/// Currently the mock execution layer cannot produce Gloas blocks (no +/// `signed_execution_payload_bid` support). +#[ignore] #[tokio::test] async fn invalid_attestation_payload_during_same_slot() { - ForkChoiceTest::new() + ForkChoiceTest::new_with_gloas() .apply_blocks_without_new_attestations(1) .await .apply_attestation_to_chain( diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index b131fb403e7..42c65e6ffe6 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -17,6 +17,6 @@ pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; pub use super::ssz_container::{ - SszContainer, SszContainerLegacyV17, SszContainerLegacyV28, SszContainerV29, + SszContainer, SszContainerV17, SszContainerV28, SszContainerV29, }; } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 07baaa47867..b7d4fa91b05 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -7,7 +7,6 @@ use crate::{ use ssz::{Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use superstruct::superstruct; use types::{Checkpoint, Hash256}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union @@ -16,15 +15,10 @@ four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); pub type SszContainer = SszContainerV29; -// Legacy containers (V17/V28) for backward compatibility with older schema versions. -#[superstruct( - variants(V17, V28), - variant_attributes(derive(Encode, Decode, Clone)), - no_enum -)] -pub struct SszContainerLegacy { +/// Proto-array container introduced in schema V17. +#[derive(Encode, Decode, Clone)] +pub struct SszContainerV17 { pub votes: Vec, - #[superstruct(only(V17))] pub balances: Vec, pub prune_threshold: usize, // Deprecated, remove in a future schema migration @@ -36,6 +30,20 @@ pub struct SszContainerLegacy { pub previous_proposer_boost: ProposerBoost, } +/// Proto-array container introduced in schema V28. +#[derive(Encode, Decode, Clone)] +pub struct SszContainerV28 { + pub votes: Vec, + pub prune_threshold: usize, + // Deprecated, remove in a future schema migration + justified_checkpoint: Checkpoint, + // Deprecated, remove in a future schema migration + finalized_checkpoint: Checkpoint, + pub nodes: Vec, + pub indices: Vec<(Hash256, usize)>, + pub previous_proposer_boost: ProposerBoost, +} + /// Current container version. Uses union-encoded `ProtoNode` to support mixed V17/V29 nodes. #[derive(Encode, Decode, Clone)] pub struct SszContainerV29 { @@ -90,8 +98,8 @@ impl TryFrom<(SszContainerV29, JustifiedBalances)> for ProtoArrayForkChoice { } // Convert legacy V17 to V28 by dropping balances. -impl From for SszContainerLegacyV28 { - fn from(v17: SszContainerLegacyV17) -> Self { +impl From for SszContainerV28 { + fn from(v17: SszContainerV17) -> Self { Self { votes: v17.votes, prune_threshold: v17.prune_threshold, @@ -105,8 +113,8 @@ impl From for SszContainerLegacyV28 { } // Convert legacy V28 to V17 by re-adding balances. -impl From<(SszContainerLegacyV28, JustifiedBalances)> for SszContainerLegacyV17 { - fn from((v28, balances): (SszContainerLegacyV28, JustifiedBalances)) -> Self { +impl From<(SszContainerV28, JustifiedBalances)> for SszContainerV17 { + fn from((v28, balances): (SszContainerV28, JustifiedBalances)) -> Self { Self { votes: v28.votes, balances: balances.effective_balances.clone(), @@ -121,8 +129,8 @@ impl From<(SszContainerLegacyV28, JustifiedBalances)> for SszContainerLegacyV17 } // Convert legacy V28 to current V29. -impl From for SszContainerV29 { - fn from(v28: SszContainerLegacyV28) -> Self { +impl From for SszContainerV29 { + fn from(v28: SszContainerV28) -> Self { Self { votes: v28.votes, prune_threshold: v28.prune_threshold, @@ -136,7 +144,7 @@ impl From for SszContainerV29 { } // Downgrade current V29 to legacy V28 (lossy: V29 nodes lose payload-specific fields). -impl From for SszContainerLegacyV28 { +impl From for SszContainerV28 { fn from(v29: SszContainerV29) -> Self { Self { votes: v29.votes, From 59033a509206f705716c716fd44513cfb04a2f54 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Thu, 26 Feb 2026 04:46:26 -0500 Subject: [PATCH 006/127] lint --- consensus/fork_choice/src/fork_choice.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 990aedf2c35..5305621d286 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1138,9 +1138,7 @@ where && indexed_attestation.data().slot == block.slot && indexed_attestation.data().index != 0 { - return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { - slot: block.slot, - }); + return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { slot: block.slot }); } Ok(()) From e68cc0311495ecb99a6de920001bfc0ef48b2f4a Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 2 Mar 2026 13:25:03 -0500 Subject: [PATCH 007/127] vote sanity and genesis epoch fix --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +-- .../beacon_chain/src/block_verification.rs | 6 ++--- consensus/fork_choice/src/fork_choice.rs | 25 +++++++------------ consensus/fork_choice/src/lib.rs | 2 +- consensus/fork_choice/tests/tests.rs | 6 ++--- .../src/fork_choice_test_definition/votes.rs | 18 ++++++------- consensus/proto_array/src/proto_array.rs | 3 +-- .../src/proto_array_fork_choice.rs | 2 ++ 8 files changed, 30 insertions(+), 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fd85fce5fd3..a5beb4d2b8e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -85,7 +85,7 @@ use execution_layer::{ }; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, + ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, }; use futures::channel::mpsc::Sender; @@ -2262,7 +2262,7 @@ impl BeaconChain { .on_attestation( self.slot()?, verified.indexed_attestation().to_ref(), - AttestationFromBlock::False, + false, &self.spec, ) .map_err(Into::into) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ee0bb9e6ff6..41daa2c4603 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -71,7 +71,7 @@ use bls::{PublicKey, PublicKeyBytes}; use educe::Educe; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; -pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; +pub use fork_choice::PayloadVerificationStatus; use metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -1664,7 +1664,7 @@ impl ExecutionPendingBlock { match fork_choice.on_attestation( current_slot, indexed_attestation, - AttestationFromBlock::True, + true, &chain.spec, ) { Ok(()) => Ok(()), @@ -1685,7 +1685,7 @@ impl ExecutionPendingBlock { match fork_choice.on_payload_attestation( current_slot, indexed_payload_attestation, - AttestationFromBlock::True, + true, ) { Ok(()) => Ok(()), // Ignore invalid payload attestations whilst importing from a block. diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 5305621d286..26361c7941e 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -312,14 +312,6 @@ fn dequeue_payload_attestations( } /// Denotes whether an attestation we are processing was received from a block or from gossip. -/// Equivalent to the `is_from_block` `bool` in: -/// -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation -#[derive(Clone, Copy)] -pub enum AttestationFromBlock { - True, - False, -} /// Parameters which are cached between calls to `ForkChoice::get_head`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -1056,7 +1048,7 @@ where fn validate_on_attestation( &self, indexed_attestation: IndexedAttestationRef, - is_from_block: AttestationFromBlock, + is_from_block: bool, spec: &ChainSpec, ) -> Result<(), InvalidAttestation> { // There is no point in processing an attestation with an empty bitfield. Reject @@ -1070,7 +1062,7 @@ where let target = indexed_attestation.data().target; - if matches!(is_from_block, AttestationFromBlock::False) { + if !is_from_block { self.validate_target_epoch_against_current_time(target.epoch)?; } @@ -1148,7 +1140,7 @@ where fn validate_on_payload_attestation( &self, indexed_payload_attestation: &IndexedPayloadAttestation, - _is_from_block: AttestationFromBlock, + _is_from_block: bool, ) -> Result<(), InvalidAttestation> { if indexed_payload_attestation.attesting_indices.is_empty() { return Err(InvalidAttestation::EmptyAggregationBitfield); @@ -1198,7 +1190,7 @@ where &mut self, system_time_current_slot: Slot, attestation: IndexedAttestationRef, - is_from_block: AttestationFromBlock, + is_from_block: bool, spec: &ChainSpec, ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTATION_TIMES); @@ -1251,7 +1243,7 @@ where &mut self, system_time_current_slot: Slot, attestation: &IndexedPayloadAttestation, - is_from_block: AttestationFromBlock, + is_from_block: bool, ) -> Result<(), Error> { self.update_time(system_time_current_slot)?; @@ -1264,9 +1256,10 @@ where let processing_slot = self.fc_store.get_current_slot(); // Payload attestations from blocks can be applied in the next slot (S+1 for data.slot=S), // while non-block payload attestations are delayed one extra slot. - let should_process_now = match is_from_block { - AttestationFromBlock::True => attestation.data.slot < processing_slot, - AttestationFromBlock::False => attestation.data.slot + 1_u64 < processing_slot, + let should_process_now = if is_from_block { + attestation.data.slot < processing_slot + } else { + attestation.data.slot.saturating_add(1_u64) < processing_slot }; if should_process_now { diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 6091de6fdd9..d3a9d246228 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -3,7 +3,7 @@ mod fork_choice_store; mod metrics; pub use crate::fork_choice::{ - AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, + Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, PersistedForkChoiceV17, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, QueuedPayloadAttestation, ResetPayloadStatuses, diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 029e8612899..eea94b2e775 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -10,7 +10,7 @@ use beacon_chain::{ use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - AttestationFromBlock, ForkChoiceStore, InvalidAttestation, InvalidBlock, + ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, QueuedPayloadAttestation, }; use state_processing::state_advance::complete_state_advance; @@ -1033,7 +1033,7 @@ async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { .on_payload_attestation( current_slot, &payload_attestation, - AttestationFromBlock::True, + true, ); assert!( @@ -1082,7 +1082,7 @@ async fn non_block_payload_attestation_at_next_slot_is_delayed() { let result = chain .canonical_head .fork_choice_write_lock() - .on_payload_attestation(s_plus_1, &payload_attestation, AttestationFromBlock::False); + .on_payload_attestation(s_plus_1, &payload_attestation, false); assert!( result.is_ok(), "payload attestation should be accepted for queueing" diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index 49afae2d4a1..ad45d073c2b 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -339,7 +339,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: None, }); - // Ensure that 5 becomes the head. + // Ensure that 5 is filtered out and the head stays at 4. // // 0 // / \ @@ -347,9 +347,9 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 // | - // 4 + // 4 <- head // / - // head-> 5 + // 5 ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -360,7 +360,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_root(5), + expected_head: get_root(4), }); // Add block 6, which has a justified epoch of 0. @@ -476,8 +476,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: None, }); - // Ensure that 9 is the head. The branch rooted at 5 remains viable and its best descendant - // is selected. + // Ensure that 6 is the head, even though 5 has all the votes. This is testing to ensure + // that 5 is filtered out due to a differing justified epoch. // // 0 // / \ @@ -487,13 +487,13 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 4 // / \ - // 5 6 + // 5 6 <- head // | // 7 // | // 8 // / - // head-> 9 + // 9 ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -504,7 +504,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_root(9), + expected_head: get_root(6), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 7403937d393..f062fef4188 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -130,7 +130,6 @@ pub struct ProtoNode { #[superstruct(only(V29), partial_getter(copy))] pub execution_payload_block_hash: ExecutionBlockHash, /// Tiebreaker for payload preference when full_payload_weight == empty_payload_weight. - /// Per spec: prefer Full if block was timely and data is available; otherwise prefer Empty. #[superstruct(only(V29), partial_getter(copy))] pub payload_tiebreak: PayloadTiebreak, } @@ -1152,7 +1151,7 @@ impl ProtoArray { return false; } - let genesis_epoch = Epoch::new(1); + let genesis_epoch = Epoch::new(0); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let node_epoch = node.slot().epoch(E::slots_per_epoch()); let node_justified_checkpoint = node.justified_checkpoint(); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 9400aafed7a..37054d95524 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -530,6 +530,8 @@ impl ProtoArrayForkChoice { if attestation_slot > vote.next_slot || *vote == VoteTracker::default() { vote.next_root = block_root; vote.next_slot = attestation_slot; + vote.next_payload_present = false; + vote.next_blob_data_available = false; } Ok(()) From 6f6da5b393091b4b98f63f5b410804968d856586 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 2 Mar 2026 13:27:45 -0500 Subject: [PATCH 008/127] lint --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++-- beacon_node/beacon_chain/src/block_verification.rs | 7 +------ consensus/fork_choice/src/fork_choice.rs | 1 - consensus/fork_choice/src/lib.rs | 8 ++++---- consensus/fork_choice/tests/tests.rs | 10 +++------- 5 files changed, 10 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a5beb4d2b8e..9dc1a5206e3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -85,8 +85,8 @@ use execution_layer::{ }; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, - InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, + ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, + PayloadVerificationStatus, ResetPayloadStatuses, }; use futures::channel::mpsc::Sender; use itertools::Itertools; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 41daa2c4603..ab60b8b9555 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1661,12 +1661,7 @@ impl ExecutionPendingBlock { .get_indexed_attestation(&state, attestation) .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; - match fork_choice.on_attestation( - current_slot, - indexed_attestation, - true, - &chain.spec, - ) { + match fork_choice.on_attestation(current_slot, indexed_attestation, true, &chain.spec) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The // block might be very old and therefore the attestations useless to fork choice. diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 26361c7941e..112c86eee69 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -312,7 +312,6 @@ fn dequeue_payload_attestations( } /// Denotes whether an attestation we are processing was received from a block or from gossip. - /// Parameters which are cached between calls to `ForkChoice::get_head`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ForkchoiceUpdateParameters { diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index d3a9d246228..824fc2dff05 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -3,10 +3,10 @@ mod fork_choice_store; mod metrics; pub use crate::fork_choice::{ - Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, - InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV17, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, - QueuedPayloadAttestation, ResetPayloadStatuses, + Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, + InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, PersistedForkChoiceV17, + PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, QueuedPayloadAttestation, + ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index eea94b2e775..da5405f06d5 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -10,8 +10,8 @@ use beacon_chain::{ use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - ForkChoiceStore, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, QueuedAttestation, QueuedPayloadAttestation, + ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, + QueuedAttestation, QueuedPayloadAttestation, }; use state_processing::state_advance::complete_state_advance; use std::fmt; @@ -1030,11 +1030,7 @@ async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { let result = chain .canonical_head .fork_choice_write_lock() - .on_payload_attestation( - current_slot, - &payload_attestation, - true, - ); + .on_payload_attestation(current_slot, &payload_attestation, true); assert!( result.is_ok(), From 275ac11200416d436714c2ea1859f1d62e397aa2 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 2 Mar 2026 15:33:53 -0500 Subject: [PATCH 009/127] test fixes --- consensus/fork_choice/src/fork_choice.rs | 18 +++++++- consensus/fork_choice/tests/tests.rs | 42 +++++-------------- .../src/proto_array_fork_choice.rs | 2 +- 3 files changed, 29 insertions(+), 33 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 112c86eee69..d4c4fa25873 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -178,6 +178,11 @@ pub enum InvalidAttestation { /// A same-slot attestation has a non-zero index, indicating a payload attestation during the /// same slot as the block. Payload attestations must only arrive in subsequent slots. PayloadAttestationDuringSameSlot { slot: Slot }, + /// A gossip payload attestation must be for the current slot. + PayloadAttestationNotCurrentSlot { + attestation_slot: Slot, + current_slot: Slot, + }, } impl From for Error { @@ -1139,7 +1144,7 @@ where fn validate_on_payload_attestation( &self, indexed_payload_attestation: &IndexedPayloadAttestation, - _is_from_block: bool, + is_from_block: bool, ) -> Result<(), InvalidAttestation> { if indexed_payload_attestation.attesting_indices.is_empty() { return Err(InvalidAttestation::EmptyAggregationBitfield); @@ -1159,6 +1164,17 @@ where }); } + // Gossip payload attestations must be for the current slot. + // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md + if !is_from_block + && indexed_payload_attestation.data.slot != self.fc_store.get_current_slot() + { + return Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { + attestation_slot: indexed_payload_attestation.data.slot, + current_slot: self.fc_store.get_current_slot(), + }); + } + if self.fc_store.get_current_slot() == block.slot && indexed_payload_attestation.data.payload_present { diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index da5405f06d5..68ec79d113e 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1047,10 +1047,10 @@ async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { assert!(latest_message.payload_present); } -/// Non-block payload attestations at slot S+1 for data.slot S are delayed; they are not applied -/// until a later slot. +/// Gossip payload attestations must be for the current slot. A payload attestation for slot S +/// received at slot S+1 should be rejected per the spec. #[tokio::test] -async fn non_block_payload_attestation_at_next_slot_is_delayed() { +async fn non_block_payload_attestation_for_previous_slot_is_rejected() { let test = ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) .await; @@ -1062,7 +1062,6 @@ async fn non_block_payload_attestation_at_next_slot_is_delayed() { .expect("block A should exist"); let block_a_root = block_a.canonical_root(); let s_plus_1 = block_a.slot().saturating_add(1_u64); - let s_plus_2 = block_a.slot().saturating_add(2_u64); let payload_attestation = IndexedPayloadAttestation:: { attesting_indices: vec![0_u64].try_into().expect("valid attesting indices"), @@ -1080,34 +1079,15 @@ async fn non_block_payload_attestation_at_next_slot_is_delayed() { .fork_choice_write_lock() .on_payload_attestation(s_plus_1, &payload_attestation, false); assert!( - result.is_ok(), - "payload attestation should be accepted for queueing" - ); - - // Vote should not be applied yet; message remains unset. - let latest_before = chain - .canonical_head - .fork_choice_read_lock() - .latest_message(0); - assert!( - latest_before.is_none(), - "non-block payload attestation at S+1 should not apply immediately" + matches!( + result, + Err(ForkChoiceError::InvalidAttestation( + InvalidAttestation::PayloadAttestationNotCurrentSlot { .. } + )) + ), + "gossip payload attestation for previous slot should be rejected, got: {:?}", + result ); - - // Advance fork choice time to S+2, queue should now be processed. - chain - .canonical_head - .fork_choice_write_lock() - .update_time(s_plus_2) - .expect("update_time should succeed"); - - let latest_after = chain - .canonical_head - .fork_choice_read_lock() - .latest_message(0) - .expect("latest message should exist after delay"); - assert_eq!(latest_after.slot, s_plus_2); - assert!(latest_after.payload_present); } /// Specification v0.12.1: diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 37054d95524..01a8f10064e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -659,7 +659,7 @@ impl ProtoArrayForkChoice { )?; // Only re-org a single slot. This prevents cascading failures during asynchrony. - let head_slot_ok = info.head_node.slot() + 1 == current_slot; + let head_slot_ok = info.head_node.slot().saturating_add(1_u64) == current_slot; if !head_slot_ok { return Err(DoNotReOrg::HeadDistance.into()); } From 9c6f25cf3642c2ddfe40138098618066cd20f542 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 9 Mar 2026 19:06:50 -0400 Subject: [PATCH 010/127] fix migration `SszContainer` scripts --- consensus/fork_choice/src/fork_choice.rs | 2 +- .../src/fork_choice_test_definition.rs | 3 +-- .../proto_array/src/proto_array_fork_choice.rs | 8 ++------ consensus/proto_array/src/ssz_container.rs | 16 ++++------------ 4 files changed, 8 insertions(+), 21 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index d4c4fa25873..12258a03dcc 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1722,7 +1722,7 @@ where PersistedForkChoice { proto_array: self .proto_array() - .as_ssz_container(self.justified_checkpoint(), self.finalized_checkpoint()), + .as_ssz_container(), queued_attestations: self.queued_attestations().to_vec(), queued_payload_attestations: self.queued_payload_attestations.clone(), } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index f88cf06349e..4ff91638f8c 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -522,8 +522,7 @@ fn get_checkpoint(i: u64) -> Checkpoint { } fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { - // The checkpoint are ignored `ProtoArrayForkChoice::from_bytes` so any value is ok - let bytes = original.as_bytes(Checkpoint::default(), Checkpoint::default()); + let bytes = original.as_bytes(); let decoded = ProtoArrayForkChoice::from_bytes(&bytes, original.balances.clone()) .expect("fork choice should decode from bytes"); assert!( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 01a8f10064e..15062367bf0 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1037,18 +1037,14 @@ impl ProtoArrayForkChoice { pub fn as_ssz_container( &self, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, ) -> SszContainer { - SszContainer::from_proto_array(self, justified_checkpoint, finalized_checkpoint) + SszContainer::from_proto_array(self) } pub fn as_bytes( &self, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, ) -> Vec { - self.as_ssz_container(justified_checkpoint, finalized_checkpoint) + self.as_ssz_container() .as_ssz_bytes() } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index b7d4fa91b05..02c3e333451 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -49,10 +49,6 @@ pub struct SszContainerV28 { pub struct SszContainerV29 { pub votes: Vec, pub prune_threshold: usize, - // Deprecated, remove in a future schema migration - justified_checkpoint: Checkpoint, - // Deprecated, remove in a future schema migration - finalized_checkpoint: Checkpoint, pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, pub previous_proposer_boost: ProposerBoost, @@ -61,16 +57,12 @@ pub struct SszContainerV29 { impl SszContainerV29 { pub fn from_proto_array( from: &ProtoArrayForkChoice, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, ) -> Self { let proto_array = &from.proto_array; Self { votes: from.votes.0.clone(), prune_threshold: proto_array.prune_threshold, - justified_checkpoint, - finalized_checkpoint, nodes: proto_array.nodes.clone(), indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), previous_proposer_boost: proto_array.previous_proposer_boost, @@ -134,8 +126,6 @@ impl From for SszContainerV29 { Self { votes: v28.votes, prune_threshold: v28.prune_threshold, - justified_checkpoint: v28.justified_checkpoint, - finalized_checkpoint: v28.finalized_checkpoint, nodes: v28.nodes.into_iter().map(ProtoNode::V17).collect(), indices: v28.indices, previous_proposer_boost: v28.previous_proposer_boost, @@ -149,8 +139,10 @@ impl From for SszContainerV28 { Self { votes: v29.votes, prune_threshold: v29.prune_threshold, - justified_checkpoint: v29.justified_checkpoint, - finalized_checkpoint: v29.finalized_checkpoint, + // These checkpoints are not consumed in v28 paths since the upgrade from v17, + // we can safely default the values. + justified_checkpoint: Checkpoint::default(), + finalized_checkpoint: Checkpoint::default(), nodes: v29 .nodes .into_iter() From 5679994285612728857f1ab7c7d064b7dc64e5d6 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Fri, 13 Mar 2026 10:55:16 -0400 Subject: [PATCH 011/127] addressing comments --- consensus/fork_choice/src/fork_choice.rs | 39 ++-- consensus/fork_choice/tests/tests.rs | 2 +- .../src/fork_choice_test_definition.rs | 4 +- .../execution_status.rs | 22 ++ .../ffg_updates.rs | 20 ++ .../gloas_payload.rs | 153 +++++++++++++ .../fork_choice_test_definition/no_votes.rs | 9 + .../src/fork_choice_test_definition/votes.rs | 20 ++ consensus/proto_array/src/proto_array.rs | 206 ++++++++++++------ .../src/proto_array_fork_choice.rs | 20 +- consensus/proto_array/src/ssz_container.rs | 4 +- 11 files changed, 398 insertions(+), 101 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 12258a03dcc..ea17e20f027 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -175,9 +175,11 @@ pub enum InvalidAttestation { /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). AttestsToFutureBlock { block: Slot, attestation: Slot }, - /// A same-slot attestation has a non-zero index, indicating a payload attestation during the - /// same slot as the block. Payload attestations must only arrive in subsequent slots. - PayloadAttestationDuringSameSlot { slot: Slot }, + /// A same-slot attestation has a non-zero index, which is invalid post-GLOAS. + InvalidSameSlotAttestationIndex { slot: Slot }, + /// A payload attestation votes payload_present for a block in the current slot, which is + /// invalid because the payload cannot be known yet. + PayloadPresentDuringSameSlot { slot: Slot }, /// A gossip payload attestation must be for the current slot. PayloadAttestationNotCurrentSlot { attestation_slot: Slot, @@ -269,7 +271,7 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { /// Used for queuing payload attestations (PTC votes) from the current slot. /// Payload attestations have different dequeue timing than regular attestations: -/// non-block payload attestations need an extra slot of delay (slot + 1 < current_slot). +/// gossiped payload attestations need an extra slot of delay (slot + 1 < current_slot). #[derive(Clone, PartialEq, Encode, Decode)] pub struct QueuedPayloadAttestation { slot: Slot, @@ -420,7 +422,8 @@ where } else if let Ok(signed_bid) = anchor_block.message().body().signed_execution_payload_bid() { - // Gloas: hashes come from the execution payload bid. + // Gloas: execution status is irrelevant post-Gloas; payload validation + // is decoupled from beacon blocks. ( ExecutionStatus::irrelevant(), Some(signed_bid.message.parent_block_hash), @@ -990,6 +993,12 @@ where Ok(()) } + pub fn on_execution_payload(&mut self, block_root: Hash256) -> Result<(), Error> { + self.proto_array + .on_execution_payload(block_root) + .map_err(Error::FailedToProcessValidExecutionPayload) + } + /// Update checkpoints in store if necessary fn update_checkpoints( &mut self, @@ -1126,15 +1135,15 @@ where }); } - // Post-GLOAS: same-slot attestations with index != 0 indicate a payload-present vote. - // These must go through `on_payload_attestation`, not `on_attestation`. + // Post-GLOAS: same-slot attestations must have index == 0. Attestations with + // index != 0 during the same slot as the block are invalid. if spec .fork_name_at_slot::(indexed_attestation.data().slot) .gloas_enabled() && indexed_attestation.data().slot == block.slot && indexed_attestation.data().index != 0 { - return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { slot: block.slot }); + return Err(InvalidAttestation::InvalidSameSlotAttestationIndex { slot: block.slot }); } Ok(()) @@ -1175,10 +1184,14 @@ where }); } - if self.fc_store.get_current_slot() == block.slot + // A payload attestation voting payload_present for a block in the current slot is + // invalid: the payload cannot be known yet. This only applies to gossip attestations; + // payload attestations from blocks have already been validated by the block producer. + if !is_from_block + && self.fc_store.get_current_slot() == block.slot && indexed_payload_attestation.data.payload_present { - return Err(InvalidAttestation::PayloadAttestationDuringSameSlot { slot: block.slot }); + return Err(InvalidAttestation::PayloadPresentDuringSameSlot { slot: block.slot }); } Ok(()) @@ -1270,7 +1283,7 @@ where let processing_slot = self.fc_store.get_current_slot(); // Payload attestations from blocks can be applied in the next slot (S+1 for data.slot=S), - // while non-block payload attestations are delayed one extra slot. + // while gossiped payload attestations are delayed one extra slot. let should_process_now = if is_from_block { attestation.data.slot < processing_slot } else { @@ -1720,9 +1733,7 @@ where /// be instantiated again later. pub fn to_persisted(&self) -> PersistedForkChoice { PersistedForkChoice { - proto_array: self - .proto_array() - .as_ssz_container(), + proto_array: self.proto_array().as_ssz_container(), queued_attestations: self.queued_attestations().to_vec(), queued_payload_attestations: self.queued_payload_attestations.clone(), } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 68ec79d113e..6ec1c8aeba6 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -993,7 +993,7 @@ async fn invalid_attestation_payload_during_same_slot() { |result| { assert_invalid_attestation!( result, - InvalidAttestation::PayloadAttestationDuringSameSlot { slot } + InvalidAttestation::InvalidSameSlotAttestationIndex { slot } if slot == Slot::new(1) ) }, diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 4ff91638f8c..8451e2dc80f 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -28,6 +28,7 @@ pub enum Operation { finalized_checkpoint: Checkpoint, justified_state_balances: Vec, expected_head: Hash256, + current_slot: Slot, }, ProposerBoostFindHead { justified_checkpoint: Checkpoint, @@ -147,6 +148,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, justified_state_balances, expected_head, + current_slot, } => { let justified_balances = JustifiedBalances::from_effective_balances(justified_state_balances) @@ -158,7 +160,7 @@ impl ForkChoiceTestDefinition { &justified_balances, Hash256::zero(), &equivocating_indices, - Slot::new(0), + current_slot, &spec, ) .unwrap_or_else(|e| { diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index 318407f5983..59e80dbe66b 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -16,6 +16,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), }); // Add a block with a hash of 2. @@ -55,6 +56,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -95,6 +97,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a vote to block 1 @@ -124,6 +127,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -166,6 +170,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -222,6 +227,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -272,6 +278,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -321,6 +328,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Invalidation of 3 should have removed upstream weight. @@ -374,6 +382,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(1), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -427,6 +436,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), }); // Add a block with a hash of 2. @@ -466,6 +476,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -506,6 +517,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a vote to block 1 @@ -535,6 +547,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -577,6 +590,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -633,6 +647,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -696,6 +711,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -745,6 +761,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(2), + current_slot: Slot::new(0), }); // Invalidation of 3 should have removed upstream weight. @@ -800,6 +817,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), }); // Add a block with a hash of 2. @@ -839,6 +857,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -879,6 +898,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a vote to block 1 @@ -908,6 +928,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { @@ -950,6 +971,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), }); ops.push(Operation::AssertWeight { diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index 88665a22add..34a4372e274 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -10,6 +10,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), }); // Build the following tree (stick? lol). @@ -63,6 +64,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), }); // Ensure that with justified epoch 1 we find 3 @@ -83,6 +85,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), }); // Ensure that with justified epoch 2 we find 3 @@ -99,6 +102,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(1), justified_state_balances: balances, expected_head: get_root(3), + current_slot: Slot::new(0), }); // END OF TESTS @@ -123,6 +127,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), }); // Build the following tree. @@ -269,6 +274,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Same as above, but with justified epoch 2. ops.push(Operation::FindHead { @@ -279,6 +285,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Same as above, but with justified epoch 3. // @@ -293,6 +300,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Add a vote to 1. @@ -332,6 +340,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Save as above but justified epoch 2. ops.push(Operation::FindHead { @@ -342,6 +351,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Save as above but justified epoch 3. // @@ -356,6 +366,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Add a vote to 2. @@ -395,6 +406,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -405,6 +417,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Same as above but justified epoch 3. // @@ -419,6 +432,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Ensure that if we start at 1 we find 9 (just: 0, fin: 0). @@ -442,6 +456,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -452,6 +467,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Same as above but justified epoch 3. // @@ -466,6 +482,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Ensure that if we start at 2 we find 10 (just: 0, fin: 0). @@ -486,6 +503,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -496,6 +514,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Same as above but justified epoch 3. // @@ -510,6 +529,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances, expected_head: get_root(10), + current_slot: Slot::new(0), }); // END OF TESTS diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 7579b016369..01f804c9aa4 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -71,6 +71,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1], expected_head: get_root(3), + current_slot: Slot::new(0), }); ops.push(Operation::SetPayloadTiebreak { @@ -83,6 +84,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1], expected_head: get_root(4), + current_slot: Slot::new(0), }); ForkChoiceTestDefinition { @@ -130,6 +132,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], expected_head: get_root(1), + current_slot: Slot::new(0), }); ops.push(Operation::AssertPayloadWeights { block_root: get_root(1), @@ -154,6 +157,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], expected_head: get_root(1), + current_slot: Slot::new(0), }); ops.push(Operation::AssertPayloadWeights { block_root: get_root(1), @@ -187,6 +191,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1, 1], expected_head: get_root(5), + current_slot: Slot::new(0), }); ops.push(Operation::AssertPayloadWeights { block_root: get_root(5), @@ -261,6 +266,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1], expected_head: get_root(3), + current_slot: Slot::new(0), }); // Validator 0 votes Empty branch -> head flips to 4. @@ -276,6 +282,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1], expected_head: get_root(4), + current_slot: Slot::new(0), }); // Latest-message update back to Full branch -> head returns to 3. @@ -291,6 +298,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1], expected_head: get_root(3), + current_slot: Slot::new(0), }); ops.push(Operation::AssertPayloadWeights { block_root: get_root(3), @@ -362,6 +370,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1], expected_head: get_root(3), + current_slot: Slot::new(0), }); // Add two Empty votes to make the Empty branch strictly heavier. @@ -384,6 +393,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], expected_head: get_root(4), + current_slot: Slot::new(0), }); ForkChoiceTestDefinition { @@ -452,6 +462,143 @@ pub fn get_gloas_parent_empty_when_child_points_to_grandparent_test_definition() } } +/// Test interleaving of blocks, regular attestations, and late-arriving PTC votes. +/// +/// Exercises the spec's `get_weight` rule: FULL/EMPTY virtual nodes at `current_slot - 1` +/// have weight 0, so payload preference is determined solely by the tiebreaker. +/// +/// genesis → block 1 (Full) → block 3 +/// → block 2 (Empty) → block 4 +/// +/// Timeline: +/// 1. Blocks 1 (Full) and 2 (Empty) arrive at slot 1 +/// 2. Regular attestations arrive (equal weight per branch) +/// 3. Child blocks 3 and 4 arrive at slot 2 +/// 4. PTC votes arrive for genesis (2 Full), making genesis prefer Full by weight +/// 5. At current_slot=1 (genesis is current-1), PTC weights are ignored → tiebreaker decides +/// 6. At current_slot=100 (genesis is old), PTC weights apply → Full branch wins +pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Step 1: Two competing blocks at slot 1. + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Step 2: Regular attestations arrive, one per branch (equal CL weight). + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(1), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + attestation_slot: Slot::new(1), + }); + + // Step 3: Child blocks at slot 2. + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + // Step 4: PTC votes arrive for genesis, 2 Full votes from fresh validators. + // Vals 0 and 1 can't be reused because they already have votes at slot 1. + // Vals 2 and 3 target genesis; CL weight on genesis doesn't affect branch comparison. + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 2, + block_root: get_root(0), + attestation_slot: Slot::new(1), + payload_present: true, + blob_data_available: false, + }); + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 3, + block_root: get_root(0), + attestation_slot: Slot::new(1), + payload_present: true, + blob_data_available: false, + }); + + // Set tiebreaker to Empty on genesis. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: false, + is_data_available: false, + }); + + // Step 5: At current_slot=1, genesis (slot 0) is at current_slot-1. + // Per spec, FULL/EMPTY weights are zeroed → tiebreaker decides. + // Tiebreaker is Empty → Empty branch (block 4) wins. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1, 1, 1], + expected_head: get_root(4), + current_slot: Slot::new(1), + }); + + // Step 6: At current_slot=100, genesis (slot 0) is no longer at current_slot-1. + // FULL/EMPTY weights now apply. Genesis has Full > Empty → prefers Full. + // Full branch (block 3) wins despite Empty tiebreaker. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1, 1, 1], + expected_head: get_root(3), + current_slot: Slot::new(100), + }); + + // Verify the PTC weights are recorded on genesis. + // full = 2 (PTC votes) + 1 (back-propagated from Full child block 1) = 3 + // empty = 0 (PTC votes) + 1 (back-propagated from Empty child block 2) = 1 + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(0), + expected_full_weight: 3, + expected_empty_weight: 1, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + #[cfg(test)] mod tests { use super::*; @@ -485,4 +632,10 @@ mod tests { let test = get_gloas_parent_empty_when_child_points_to_grandparent_test_definition(); test.run(); } + + #[test] + fn interleaved_attestations() { + let test = get_gloas_interleaved_attestations_test_definition(); + test.run(); + } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index 61e4c1270ce..71d4c035aef 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -18,6 +18,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: Hash256::zero(), + current_slot: Slot::new(0), }, // Add block 2 // @@ -55,6 +56,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }, // Add block 1 // @@ -92,6 +94,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }, // Add block 3 // @@ -133,6 +136,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }, // Add block 4 // @@ -174,6 +178,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(4), + current_slot: Slot::new(0), }, // Add block 5 with a justified epoch of 2 // @@ -216,6 +221,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(5), + current_slot: Slot::new(0), }, // Ensure there is no error when starting from a block that has the // wrong justified epoch. @@ -242,6 +248,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(5), + current_slot: Slot::new(0), }, // Set the justified epoch to 2 and the start block to 5 and ensure 5 is the head. // @@ -260,6 +267,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(5), + current_slot: Slot::new(0), }, // Add block 6 // @@ -303,6 +311,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(6), + current_slot: Slot::new(0), }, ]; diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index ad45d073c2b..3ba21db48a4 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -16,6 +16,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), }); // Add a block with a hash of 2. @@ -55,6 +56,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -95,6 +97,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add a vote to block 1 @@ -124,6 +127,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), }); // Add a vote to block 2 @@ -153,6 +157,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Add block 3. @@ -196,6 +201,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Move validator #0 vote from 1 to 3 @@ -229,6 +235,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), }); // Move validator #1 vote from 2 to 1 (this is an equivocation, but fork choice doesn't @@ -263,6 +270,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), }); // Add block 4. @@ -310,6 +318,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(4), + current_slot: Slot::new(0), }); // Add block 5, which has a justified epoch of 2. @@ -361,6 +370,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(4), + current_slot: Slot::new(0), }); // Add block 6, which has a justified epoch of 0. @@ -505,6 +515,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(6), + current_slot: Slot::new(0), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -538,6 +549,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -616,6 +628,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Introduce 2 more validators into the system @@ -677,6 +690,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Set the balances of the last two validators to zero @@ -702,6 +716,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Set the balances of the last two validators back to 1 @@ -727,6 +742,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), }); // Remove the last two validators @@ -753,6 +769,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Ensure that pruning below the prune threshold does not prune. @@ -774,6 +791,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Ensure that pruning above the prune threshold does prune. @@ -812,6 +830,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), }); // Add block 11 @@ -863,6 +882,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(11), + current_slot: Slot::new(0), }); ForkChoiceTestDefinition { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f062fef4188..d0806b9e312 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -293,35 +293,38 @@ impl ProtoArray { false }; - let node_deltas = deltas + let node_delta = deltas .get(node_index) .copied() .ok_or(Error::InvalidNodeDelta(node_index))?; - let mut node_delta = if execution_status_is_invalid { + let mut delta = if execution_status_is_invalid { // If the node has an invalid execution payload, reduce its weight to zero. 0_i64 .checked_sub(node.weight() as i64) .ok_or(Error::InvalidExecutionDeltaOverflow(node_index))? } else { - node_deltas.delta + node_delta.delta }; let (node_empty_delta, node_full_delta) = if node.as_v29().is_ok() { - (node_deltas.empty_delta, node_deltas.full_delta) + (node_delta.empty_delta, node_delta.full_delta) } else { (0, 0) }; // If we find the node for which the proposer boost was previously applied, decrease // the delta by the previous score amount. + // TODO(gloas): implement `should_apply_proposer_boost` from the Gloas spec. + // The spec conditionally applies proposer boost based on parent weakness and + // early equivocations. Currently boost is applied unconditionally. if self.previous_proposer_boost.root != Hash256::zero() && self.previous_proposer_boost.root == node.root() // Invalid nodes will always have a weight of zero so there's no need to subtract // the proposer boost delta. && !execution_status_is_invalid { - node_delta = node_delta + delta = delta .checked_sub(self.previous_proposer_boost.score as i64) .ok_or(Error::DeltaOverflow(node_index))?; } @@ -329,6 +332,10 @@ impl ProtoArray { // the delta by the new score amount (unless the block has an invalid execution status). // // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance + // + // TODO(gloas): proposer boost should also be subtracted from `empty_delta` per spec, + // since the spec creates a virtual vote with `payload_present=False` for the proposer + // boost, biasing toward Empty for non-current-slot payload decisions. if let Some(proposer_score_boost) = spec.proposer_score_boost && proposer_boost_root != Hash256::zero() && proposer_boost_root == node.root() @@ -338,7 +345,7 @@ impl ProtoArray { proposer_score = calculate_committee_fraction::(new_justified_balances, proposer_score_boost) .ok_or(Error::ProposerBoostOverflow(node_index))?; - node_delta = node_delta + delta = delta .checked_add(proposer_score as i64) .ok_or(Error::DeltaOverflow(node_index))?; } @@ -347,7 +354,7 @@ impl ProtoArray { if execution_status_is_invalid { *node.weight_mut() = 0; } else { - *node.weight_mut() = apply_delta(node.weight(), node_delta, node_index)?; + *node.weight_mut() = apply_delta(node.weight(), delta, node_index)?; } // Apply post-Gloas score deltas. @@ -356,7 +363,7 @@ impl ProtoArray { apply_delta(node.empty_payload_weight, node_empty_delta, node_index)?; node.full_payload_weight = apply_delta(node.full_payload_weight, node_full_delta, node_index)?; - if let Some(payload_tiebreaker) = node_deltas.payload_tiebreaker { + if let Some(payload_tiebreaker) = node_delta.payload_tiebreaker { node.payload_tiebreak = payload_tiebreaker; } } @@ -370,7 +377,7 @@ impl ProtoArray { // Back-propagate the node's delta to its parent. parent_delta.delta = parent_delta .delta - .checked_add(node_delta) + .checked_add(delta) .ok_or(Error::DeltaOverflow(parent_index))?; // Per spec's `is_supporting_vote`: a vote for descendant B supports @@ -381,13 +388,13 @@ impl ProtoArray { Ok(PayloadStatus::Full) => { parent_delta.full_delta = parent_delta .full_delta - .checked_add(node_delta) + .checked_add(delta) .ok_or(Error::DeltaOverflow(parent_index))?; } Ok(PayloadStatus::Empty) => { parent_delta.empty_delta = parent_delta .empty_delta - .checked_add(node_delta) + .checked_add(delta) .ok_or(Error::DeltaOverflow(parent_index))?; } // Pending or V17 nodes: no payload propagation. @@ -488,6 +495,8 @@ impl ProtoArray { { // Get the parent's execution block hash, handling both V17 and V29 nodes. // V17 parents occur during the Gloas fork transition. + // TODO(gloas): the spec's `get_parent_payload_status` assumes all blocks are + // post-Gloas with bids. Revisit once the spec clarifies fork-transition behavior. let parent_el_block_hash = match parent_node { ProtoNode::V29(v29) => Some(v29.execution_payload_block_hash), ProtoNode::V17(v17) => v17.execution_status.block_hash(), @@ -501,6 +510,9 @@ impl ProtoArray { PayloadStatus::Empty } } else { + // Parent is missing (genesis or pruned due to finalization). Default to Full + // since this path should only be hit at Gloas genesis, and extending the payload + // chain is the safe default. PayloadStatus::Full }; @@ -528,15 +540,16 @@ impl ProtoArray { }; // If the parent has an invalid execution status, return an error before adding the - // block to `self`. This applies when the parent is a V17 node with execution tracking. + // block to `self`. This applies only when the parent is a V17 node with execution tracking. if let Some(parent_index) = node.parent() { let parent = self .nodes .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - if let Ok(status) = parent.execution_status() - && status.is_invalid() + // Execution status tracking only exists on V17 (pre-Gloas) nodes. + if let Ok(v17) = parent.as_v17() + && v17.execution_status.is_invalid() { return Err(Error::ParentExecutionStatusIsInvalid { block_root: block.root, @@ -565,6 +578,29 @@ impl ProtoArray { Ok(()) } + /// Process an excution payload for a Gloas block. + /// + /// this function assumes the + pub fn on_valid_execution_payload(&mut self, block_root: Hash256) -> Result<(), Error> { + let index = *self + .indices + .get(&block_root) + .ok_or(Error::NodeUnknown(block_root))?; + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + let v29 = node + .as_v29_mut() + .map_err(|_| Error::InvalidNodeVariant { block_root })?; + v29.payload_tiebreak = PayloadTiebreak { + is_timely: true, + is_data_available: true, + }; + + Ok(()) + } + /// Updates the `block_root` and all ancestors to have validated execution payloads. /// /// Returns an error if: @@ -871,8 +907,9 @@ impl ProtoArray { // practically possible to set a new justified root if we are unable to find a new head. // // This scenario is *unsupported*. It represents a serious consensus failure. - if let Ok(execution_status) = justified_node.execution_status() - && execution_status.is_invalid() + // Execution status tracking only exists on V17 (pre-Gloas) nodes. + if let Ok(v17) = justified_node.as_v17() + && v17.execution_status.is_invalid() { return Err(Error::InvalidJustifiedCheckpointExecutionStatus { justified_root: *justified_root, @@ -1025,66 +1062,72 @@ impl ProtoArray { ); let no_change = (parent.best_child(), parent.best_descendant()); - let (new_best_child, new_best_descendant) = - if let Some(best_child_index) = parent.best_child() { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. + let (new_best_child, new_best_descendant) = if let Some(best_child_index) = + parent.best_child() + { + if best_child_index == child_index && !child_leads_to_viable_head { + // If the child is already the best-child of the parent but it's not viable for + // the head, remove it. + change_to_none + } else if best_child_index == child_index { + // If the child is the best-child already, set it again to ensure that the + // best-descendant of the parent is updated. + change_to_child + } else { + let best_child = self + .nodes + .get(best_child_index) + .ok_or(Error::InvalidBestDescendant(best_child_index))?; + + let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( + best_child, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; + + if child_leads_to_viable_head && !best_child_leads_to_viable_head { + // The child leads to a viable head, but the current best-child doesn't. change_to_child + } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { + // The best child leads to a viable head, but the child doesn't. + no_change + } else if child.weight() > best_child.weight() { + // Weight is the primary ordering criterion. + change_to_child + } else if child.weight() < best_child.weight() { + no_change } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; - - let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( - best_child, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. + // Equal weights: for V29 parents, prefer the child whose + // parent_payload_status matches the parent's payload preference + // (full vs empty). This corresponds to the spec's + // `get_payload_status_tiebreaker` ordering in `get_head`. + let child_matches = + child_matches_parent_payload_preference(parent, child, current_slot); + let best_child_matches = + child_matches_parent_payload_preference(parent, best_child, current_slot); + + if child_matches && !best_child_matches { + // Child extends the preferred payload chain, best_child doesn't. change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. + } else if !child_matches && best_child_matches { + // Best child extends the preferred payload chain, child doesn't. no_change - } else if child.weight() > best_child.weight() { - // Weight is the primary ordering criterion. + } else if *child.root() >= *best_child.root() { + // Final tie-breaker: both match or both don't, break by root. change_to_child - } else if child.weight() < best_child.weight() { - no_change } else { - // Equal weights: for V29 parents, prefer the child whose - // parent_payload_status matches the parent's payload preference. - let child_matches = child_matches_parent_payload_preference(parent, child); - let best_child_matches = - child_matches_parent_payload_preference(parent, best_child); - - if child_matches && !best_child_matches { - change_to_child - } else if !child_matches && best_child_matches { - no_change - } else if *child.root() >= *best_child.root() { - // Final tie-breaker of equal weights by root. - change_to_child - } else { - no_change - } + no_change } } - } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. - change_to_child - } else { - // There is no current best-child but the child is not viable. - no_change - }; + } + } else if child_leads_to_viable_head { + // There is no current best-child and the child is viable. + change_to_child + } else { + // There is no current best-child but the child is not viable. + no_change + }; let parent = self .nodes @@ -1338,16 +1381,35 @@ impl ProtoArray { /// When equal, the tiebreaker uses the parent's `payload_tiebreak`: prefer Full if the block /// was timely and data is available; otherwise prefer Empty. /// For V17 parents (or mixed), always returns `true` (no payload preference). -fn child_matches_parent_payload_preference(parent: &ProtoNode, child: &ProtoNode) -> bool { +/// +/// TODO(gloas): the spec's `should_extend_payload` has additional conditions beyond the +/// tiebreaker: it also checks proposer_boost_root (empty, different parent, or extends full). +/// See: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md#new-should_extend_payload +/// +/// TODO(gloas): the spec's `should_extend_payload` has additional conditions beyond the +/// tiebreaker: it also checks proposer_boost_root (empty, different parent, or extends full). +/// See: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md#new-should_extend_payload +fn child_matches_parent_payload_preference( + parent: &ProtoNode, + child: &ProtoNode, + current_slot: Slot, +) -> bool { let (Ok(parent_v29), Ok(child_v29)) = (parent.as_v29(), child.as_v29()) else { return true; }; - let prefers_full = if parent_v29.full_payload_weight > parent_v29.empty_payload_weight { + // Per spec `get_weight`: FULL/EMPTY virtual nodes at `current_slot - 1` have weight 0. + // The PTC is still voting, so payload preference is determined solely by the tiebreaker. + let use_tiebreaker_only = parent.slot() + 1 == current_slot; + let prefers_full = if !use_tiebreaker_only + && parent_v29.full_payload_weight > parent_v29.empty_payload_weight + { true - } else if parent_v29.empty_payload_weight > parent_v29.full_payload_weight { + } else if !use_tiebreaker_only + && parent_v29.empty_payload_weight > parent_v29.full_payload_weight + { false } else { - // Equal weights: tiebreaker per spec + // Equal weights (or current-slot parent): tiebreaker per spec. parent_v29.payload_tiebreak.is_timely && parent_v29.payload_tiebreak.is_data_available }; if prefers_full { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 15062367bf0..66f36274830 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -498,6 +498,11 @@ impl ProtoArrayForkChoice { }) } + pub fn on_execution_payload(&mut self, block_root: Hash256) -> Result<(), String> { + self.proto_array + .on_valid_execution_payload(block_root) + .map_err(|e| format!("Failed to process execution payload: {:?}", e)) + } /// See `ProtoArray::propagate_execution_payload_validation` for documentation. pub fn process_execution_payload_validation( &mut self, @@ -718,7 +723,7 @@ impl ProtoArrayForkChoice { let parent_slot = parent_node.slot(); let head_slot = head_node.slot(); - let re_org_block_slot = head_slot + 1; + let re_org_block_slot = head_slot.saturating_add(1_u64); // Check finalization distance. let proposal_epoch = re_org_block_slot.epoch(E::slots_per_epoch()); @@ -1035,17 +1040,12 @@ impl ProtoArrayForkChoice { self.proto_array.iter_block_roots(block_root) } - pub fn as_ssz_container( - &self, - ) -> SszContainer { + pub fn as_ssz_container(&self) -> SszContainer { SszContainer::from_proto_array(self) } - pub fn as_bytes( - &self, - ) -> Vec { - self.as_ssz_container() - .as_ssz_bytes() + pub fn as_bytes(&self) -> Vec { + self.as_ssz_container().as_ssz_bytes() } pub fn from_bytes(bytes: &[u8], balances: JustifiedBalances) -> Result { @@ -1321,8 +1321,8 @@ mod test_compute_deltas { next_epoch_shuffling_id: junk_shuffling_id.clone(), justified_checkpoint: genesis_checkpoint, finalized_checkpoint: genesis_checkpoint, - execution_status, unrealized_justified_checkpoint: Some(genesis_checkpoint), + execution_status, unrealized_finalized_checkpoint: Some(genesis_checkpoint), execution_payload_parent_hash: None, execution_payload_block_hash: None, diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 02c3e333451..664dfe3ceba 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -55,9 +55,7 @@ pub struct SszContainerV29 { } impl SszContainerV29 { - pub fn from_proto_array( - from: &ProtoArrayForkChoice, - ) -> Self { + pub fn from_proto_array(from: &ProtoArrayForkChoice) -> Self { let proto_array = &from.proto_array; Self { From f74769611339b040791689cf4b8e55103c7b7ed3 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 16 Mar 2026 02:30:35 -0400 Subject: [PATCH 012/127] bitfield for `PTC` votes --- .../beacon_chain/src/block_verification.rs | 5 + consensus/fork_choice/src/fork_choice.rs | 26 ++- consensus/fork_choice/tests/tests.rs | 9 +- .../src/fork_choice_test_definition.rs | 17 +- .../gloas_payload.rs | 114 ++++------ consensus/proto_array/src/proto_array.rs | 195 +++++++++++------- .../src/proto_array_fork_choice.rs | 109 ++++------ 7 files changed, 237 insertions(+), 238 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 9b2515f9757..c140c431bc6 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1665,10 +1665,15 @@ impl ExecutionPendingBlock { .get_indexed_payload_attestation(&state, payload_attestation, &chain.spec) .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; + let ptc = state + .get_ptc(indexed_payload_attestation.data.slot, &chain.spec) + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; + match fork_choice.on_payload_attestation( current_slot, indexed_payload_attestation, true, + &ptc.0, ) { Ok(()) => Ok(()), // Ignore invalid payload attestations whilst importing from a block. diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ea17e20f027..63220f0bc6b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -275,7 +275,8 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { #[derive(Clone, PartialEq, Encode, Decode)] pub struct QueuedPayloadAttestation { slot: Slot, - attesting_indices: Vec, + /// Resolved PTC committee positions (not validator indices). + ptc_indices: Vec, block_root: Hash256, payload_present: bool, blob_data_available: bool, @@ -1267,11 +1268,16 @@ where } /// Register a payload attestation with the fork choice DAG. + /// + /// `ptc` is the PTC committee for the attestation's slot: a list of validator indices + /// ordered by committee position. Each attesting validator index is resolved to its + /// position within `ptc` (its `ptc_index`) before being applied to the proto-array. pub fn on_payload_attestation( &mut self, system_time_current_slot: Slot, attestation: &IndexedPayloadAttestation, is_from_block: bool, + ptc: &[usize], ) -> Result<(), Error> { self.update_time(system_time_current_slot)?; @@ -1281,6 +1287,12 @@ where self.validate_on_payload_attestation(attestation, is_from_block)?; + // Resolve validator indices to PTC committee positions. + let ptc_indices: Vec = attestation + .attesting_indices_iter() + .filter_map(|vi| ptc.iter().position(|&p| p == *vi as usize)) + .collect(); + let processing_slot = self.fc_store.get_current_slot(); // Payload attestations from blocks can be applied in the next slot (S+1 for data.slot=S), // while gossiped payload attestations are delayed one extra slot. @@ -1291,11 +1303,10 @@ where }; if should_process_now { - for validator_index in attestation.attesting_indices_iter() { + for &ptc_index in &ptc_indices { self.proto_array.process_payload_attestation( - *validator_index as usize, attestation.data.beacon_block_root, - processing_slot, + ptc_index, attestation.data.payload_present, attestation.data.blob_data_available, )?; @@ -1304,7 +1315,7 @@ where self.queued_payload_attestations .push(QueuedPayloadAttestation { slot: attestation.data.slot, - attesting_indices: attestation.attesting_indices.iter().copied().collect(), + ptc_indices, block_root: attestation.data.beacon_block_root, payload_present: attestation.data.payload_present, blob_data_available: attestation.data.blob_data_available, @@ -1436,11 +1447,10 @@ where for attestation in dequeue_payload_attestations(current_slot, &mut self.queued_payload_attestations) { - for validator_index in attestation.attesting_indices.iter() { + for &ptc_index in &attestation.ptc_indices { self.proto_array.process_payload_attestation( - *validator_index as usize, attestation.block_root, - current_slot, + ptc_index, attestation.payload_present, attestation.blob_data_available, )?; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 6ec1c8aeba6..44da1af148e 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1027,10 +1027,13 @@ async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { signature: AggregateSignature::empty(), }; + // PTC mapping: validator 0 is at ptc position 0. + let ptc = &[0_usize]; + let result = chain .canonical_head .fork_choice_write_lock() - .on_payload_attestation(current_slot, &payload_attestation, true); + .on_payload_attestation(current_slot, &payload_attestation, true, ptc); assert!( result.is_ok(), @@ -1074,10 +1077,12 @@ async fn non_block_payload_attestation_for_previous_slot_is_rejected() { signature: AggregateSignature::empty(), }; + let ptc = &[0_usize]; + let result = chain .canonical_head .fork_choice_write_lock() - .on_payload_attestation(s_plus_1, &payload_attestation, false); + .on_payload_attestation(s_plus_1, &payload_attestation, false, ptc); assert!( matches!( result, diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 8451e2dc80f..45aed23b293 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -4,7 +4,6 @@ mod gloas_payload; mod no_votes; mod votes; -use crate::proto_array::PayloadTiebreak; use crate::proto_array_fork_choice::{Block, ExecutionStatus, PayloadStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; use fixed_bytes::FixedBytesExtended; @@ -299,15 +298,14 @@ impl ForkChoiceTestDefinition { Operation::ProcessPayloadAttestation { validator_index, block_root, - attestation_slot, + attestation_slot: _, payload_present, blob_data_available, } => { fork_choice .process_payload_attestation( - validator_index, block_root, - attestation_slot, + validator_index, payload_present, blob_data_available, ) @@ -450,7 +448,7 @@ impl ForkChoiceTestDefinition { expected_status, } => { let actual = fork_choice - .head_payload_status(&head_root) + .head_payload_status::(&head_root) .unwrap_or_else(|| { panic!( "AssertHeadPayloadStatus: head root not found at op index {}", @@ -494,10 +492,11 @@ impl ForkChoiceTestDefinition { op_index ) }); - node_v29.payload_tiebreak = PayloadTiebreak { - is_timely, - is_data_available, - }; + // Set all bits (exceeds any threshold) or clear all bits. + let fill = if is_timely { 0xFF } else { 0x00 }; + node_v29.payload_timeliness_votes.fill(fill); + let fill = if is_data_available { 0xFF } else { 0x00 }; + node_v29.payload_data_availability_votes.fill(fill); } } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 01f804c9aa4..9a0043a467b 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -134,17 +134,20 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { expected_head: get_root(1), current_slot: Slot::new(0), }); + // PTC votes write to bitfields only, not to full/empty weight. + // Weight is 0 because no CL attestations target this block. ops.push(Operation::AssertPayloadWeights { block_root: get_root(1), - expected_full_weight: 1, - expected_empty_weight: 1, + expected_full_weight: 0, + expected_empty_weight: 0, }); + // With MainnetEthSpec PTC_SIZE=512, 1 bit set out of 256 threshold → not timely → Empty. ops.push(Operation::AssertHeadPayloadStatus { head_root: get_root(1), expected_status: PayloadStatus::Empty, }); - // Flip validator 0 to Empty; probe should now report Empty. + // Flip validator 0 to Empty; both bits now clear. ops.push(Operation::ProcessPayloadAttestation { validator_index: 0, block_root: get_root(1), @@ -162,7 +165,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::AssertPayloadWeights { block_root: get_root(1), expected_full_weight: 0, - expected_empty_weight: 2, + expected_empty_weight: 0, }); ops.push(Operation::AssertHeadPayloadStatus { head_root: get_root(1), @@ -214,6 +217,8 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { } } +/// Test that CL attestation weight can flip the head between Full/Empty branches, +/// overriding the tiebreaker. pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDefinition { let mut ops = vec![]; @@ -269,13 +274,11 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe current_slot: Slot::new(0), }); - // Validator 0 votes Empty branch -> head flips to 4. - ops.push(Operation::ProcessPayloadAttestation { + // CL attestation to Empty branch (root 4) from validator 0 → head flips to 4. + ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(4), attestation_slot: Slot::new(3), - payload_present: false, - blob_data_available: false, }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), @@ -285,13 +288,11 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe current_slot: Slot::new(0), }); - // Latest-message update back to Full branch -> head returns to 3. - ops.push(Operation::ProcessPayloadAttestation { + // CL attestation back to Full branch (root 3) → head returns to 3. + ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), attestation_slot: Slot::new(4), - payload_present: true, - blob_data_available: false, }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), @@ -300,11 +301,6 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe expected_head: get_root(3), current_slot: Slot::new(0), }); - ops.push(Operation::AssertPayloadWeights { - block_root: get_root(3), - expected_full_weight: 1, - expected_empty_weight: 0, - }); ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), @@ -317,6 +313,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe } } +/// CL attestation weight overrides payload preference tiebreaker. pub fn get_gloas_weight_priority_over_payload_preference_test_definition() -> ForkChoiceTestDefinition { let mut ops = vec![]; @@ -359,7 +356,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() execution_payload_block_hash: Some(get_hash(4)), }); - // Parent prefers Full on equal branch weights. + // Parent prefers Full on equal branch weights (tiebreaker). ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), is_timely: true, @@ -373,20 +370,17 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() current_slot: Slot::new(0), }); - // Add two Empty votes to make the Empty branch strictly heavier. - ops.push(Operation::ProcessPayloadAttestation { + // Two CL attestations to the Empty branch make it strictly heavier, + // overriding the Full tiebreaker. + ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(4), attestation_slot: Slot::new(3), - payload_present: false, - blob_data_available: false, }); - ops.push(Operation::ProcessPayloadAttestation { + ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(4), attestation_slot: Slot::new(3), - payload_present: false, - blob_data_available: false, }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), @@ -462,21 +456,13 @@ pub fn get_gloas_parent_empty_when_child_points_to_grandparent_test_definition() } } -/// Test interleaving of blocks, regular attestations, and late-arriving PTC votes. -/// -/// Exercises the spec's `get_weight` rule: FULL/EMPTY virtual nodes at `current_slot - 1` -/// have weight 0, so payload preference is determined solely by the tiebreaker. +/// Test interleaving of blocks, regular attestations, and tiebreaker. /// /// genesis → block 1 (Full) → block 3 /// → block 2 (Empty) → block 4 /// -/// Timeline: -/// 1. Blocks 1 (Full) and 2 (Empty) arrive at slot 1 -/// 2. Regular attestations arrive (equal weight per branch) -/// 3. Child blocks 3 and 4 arrive at slot 2 -/// 4. PTC votes arrive for genesis (2 Full), making genesis prefer Full by weight -/// 5. At current_slot=1 (genesis is current-1), PTC weights are ignored → tiebreaker decides -/// 6. At current_slot=100 (genesis is old), PTC weights apply → Full branch wins +/// With equal CL weight, tiebreaker determines which branch wins. +/// An extra CL attestation can override the tiebreaker. pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDefinition { let mut ops = vec![]; @@ -532,60 +518,46 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef execution_payload_block_hash: Some(get_hash(4)), }); - // Step 4: PTC votes arrive for genesis, 2 Full votes from fresh validators. - // Vals 0 and 1 can't be reused because they already have votes at slot 1. - // Vals 2 and 3 target genesis; CL weight on genesis doesn't affect branch comparison. - ops.push(Operation::ProcessPayloadAttestation { - validator_index: 2, - block_root: get_root(0), - attestation_slot: Slot::new(1), - payload_present: true, - blob_data_available: false, - }); - ops.push(Operation::ProcessPayloadAttestation { - validator_index: 3, - block_root: get_root(0), - attestation_slot: Slot::new(1), - payload_present: true, - blob_data_available: false, - }); - - // Set tiebreaker to Empty on genesis. + // Step 4: Set tiebreaker to Empty on genesis → Empty branch wins. ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), is_timely: false, is_data_available: false, }); - - // Step 5: At current_slot=1, genesis (slot 0) is at current_slot-1. - // Per spec, FULL/EMPTY weights are zeroed → tiebreaker decides. - // Tiebreaker is Empty → Empty branch (block 4) wins. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), - justified_state_balances: vec![1, 1, 1, 1], + justified_state_balances: vec![1, 1], expected_head: get_root(4), current_slot: Slot::new(1), }); - // Step 6: At current_slot=100, genesis (slot 0) is no longer at current_slot-1. - // FULL/EMPTY weights now apply. Genesis has Full > Empty → prefers Full. - // Full branch (block 3) wins despite Empty tiebreaker. + // Step 5: Flip tiebreaker to Full → Full branch wins. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), - justified_state_balances: vec![1, 1, 1, 1], + justified_state_balances: vec![1, 1], expected_head: get_root(3), current_slot: Slot::new(100), }); - // Verify the PTC weights are recorded on genesis. - // full = 2 (PTC votes) + 1 (back-propagated from Full child block 1) = 3 - // empty = 0 (PTC votes) + 1 (back-propagated from Empty child block 2) = 1 - ops.push(Operation::AssertPayloadWeights { - block_root: get_root(0), - expected_full_weight: 3, - expected_empty_weight: 1, + // Step 6: Add extra CL weight to Empty branch → overrides Full tiebreaker. + ops.push(Operation::ProcessAttestation { + validator_index: 2, + block_root: get_root(4), + attestation_slot: Slot::new(3), + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1, 1], + expected_head: get_root(4), + current_slot: Slot::new(100), }); ForkChoiceTestDefinition { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index d0806b9e312..4f89e6084f6 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -129,9 +129,16 @@ pub struct ProtoNode { pub full_payload_weight: u64, #[superstruct(only(V29), partial_getter(copy))] pub execution_payload_block_hash: ExecutionBlockHash, - /// Tiebreaker for payload preference when full_payload_weight == empty_payload_weight. - #[superstruct(only(V29), partial_getter(copy))] - pub payload_tiebreak: PayloadTiebreak, + /// PTC timeliness vote bitfield, indexed by PTC committee position. + /// Bit i set means PTC member i voted `payload_present = true`. + /// Tiebreak derived as: `count_ones() > ptc_size / 2`. + #[superstruct(only(V29))] + pub payload_timeliness_votes: Vec, + /// PTC data availability vote bitfield, indexed by PTC committee position. + /// Bit i set means PTC member i voted `blob_data_available = true`. + /// Tiebreak derived as: `count_ones() > ptc_size / 2`. + #[superstruct(only(V29))] + pub payload_data_availability_votes: Vec, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -154,7 +161,6 @@ pub struct NodeDelta { pub delta: i64, pub empty_delta: i64, pub full_delta: i64, - pub payload_tiebreaker: Option, } impl NodeDelta { @@ -192,6 +198,15 @@ impl NodeDelta { Ok(()) } + /// Create a delta that only affects the aggregate `delta` field. + pub fn from_delta(delta: i64) -> Self { + Self { + delta, + empty_delta: 0, + full_delta: 0, + } + } + /// Subtract a balance from the appropriate payload status. pub fn sub_payload_delta( &mut self, @@ -211,21 +226,14 @@ impl NodeDelta { } } +/// Compare NodeDelta with i64 by comparing the aggregate `delta` field. +/// This is used by tests that only care about the total weight delta. impl PartialEq for NodeDelta { fn eq(&self, other: &i64) -> bool { self.delta == *other - && self.empty_delta == 0 - && self.full_delta == 0 - && self.payload_tiebreaker.is_none() } } -#[derive(Clone, Copy, PartialEq, Eq, Debug, Default, Encode, Decode, Serialize, Deserialize)] -pub struct PayloadTiebreak { - pub is_timely: bool, - pub is_data_available: bool, -} - #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes @@ -363,9 +371,6 @@ impl ProtoArray { apply_delta(node.empty_payload_weight, node_empty_delta, node_index)?; node.full_payload_weight = apply_delta(node.full_payload_weight, node_full_delta, node_index)?; - if let Some(payload_tiebreaker) = node_delta.payload_tiebreaker { - node.payload_tiebreak = payload_tiebreaker; - } } // Update the parent delta (if any). @@ -535,7 +540,8 @@ impl ProtoArray { empty_payload_weight: 0, full_payload_weight: 0, execution_payload_block_hash, - payload_tiebreak: PayloadTiebreak::default(), + payload_timeliness_votes: empty_ptc_bitfield(E::ptc_size()), + payload_data_availability_votes: empty_ptc_bitfield(E::ptc_size()), }) }; @@ -593,10 +599,10 @@ impl ProtoArray { let v29 = node .as_v29_mut() .map_err(|_| Error::InvalidNodeVariant { block_root })?; - v29.payload_tiebreak = PayloadTiebreak { - is_timely: true, - is_data_available: true, - }; + // A valid execution payload means the payload is timely and data is available. + // Set all bits to ensure the threshold is met regardless of PTC size. + v29.payload_timeliness_votes.fill(0xFF); + v29.payload_data_availability_votes.fill(0xFF); Ok(()) } @@ -1062,72 +1068,79 @@ impl ProtoArray { ); let no_change = (parent.best_child(), parent.best_descendant()); - let (new_best_child, new_best_descendant) = if let Some(best_child_index) = - parent.best_child() - { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. - change_to_child - } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; - - let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( - best_child, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. - change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. - no_change - } else if child.weight() > best_child.weight() { - // Weight is the primary ordering criterion. + let (new_best_child, new_best_descendant) = + if let Some(best_child_index) = parent.best_child() { + if best_child_index == child_index && !child_leads_to_viable_head { + // If the child is already the best-child of the parent but it's not viable for + // the head, remove it. + change_to_none + } else if best_child_index == child_index { + // If the child is the best-child already, set it again to ensure that the + // best-descendant of the parent is updated. change_to_child - } else if child.weight() < best_child.weight() { - no_change } else { - // Equal weights: for V29 parents, prefer the child whose - // parent_payload_status matches the parent's payload preference - // (full vs empty). This corresponds to the spec's - // `get_payload_status_tiebreaker` ordering in `get_head`. - let child_matches = - child_matches_parent_payload_preference(parent, child, current_slot); - let best_child_matches = - child_matches_parent_payload_preference(parent, best_child, current_slot); - - if child_matches && !best_child_matches { - // Child extends the preferred payload chain, best_child doesn't. + let best_child = self + .nodes + .get(best_child_index) + .ok_or(Error::InvalidBestDescendant(best_child_index))?; + + let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( + best_child, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; + + if child_leads_to_viable_head && !best_child_leads_to_viable_head { + // The child leads to a viable head, but the current best-child doesn't. change_to_child - } else if !child_matches && best_child_matches { - // Best child extends the preferred payload chain, child doesn't. + } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { + // The best child leads to a viable head, but the child doesn't. no_change - } else if *child.root() >= *best_child.root() { - // Final tie-breaker: both match or both don't, break by root. + } else if child.weight() > best_child.weight() { + // Weight is the primary ordering criterion. change_to_child - } else { + } else if child.weight() < best_child.weight() { no_change + } else { + // Equal weights: for V29 parents, prefer the child whose + // parent_payload_status matches the parent's payload preference + // (full vs empty). This corresponds to the spec's + // `get_payload_status_tiebreaker` ordering in `get_head`. + let child_matches = child_matches_parent_payload_preference( + parent, + child, + current_slot, + E::ptc_size(), + ); + let best_child_matches = child_matches_parent_payload_preference( + parent, + best_child, + current_slot, + E::ptc_size(), + ); + + if child_matches && !best_child_matches { + // Child extends the preferred payload chain, best_child doesn't. + change_to_child + } else if !child_matches && best_child_matches { + // Best child extends the preferred payload chain, child doesn't. + no_change + } else if *child.root() >= *best_child.root() { + // Final tie-breaker: both match or both don't, break by root. + change_to_child + } else { + no_change + } } } - } - } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. - change_to_child - } else { - // There is no current best-child but the child is not viable. - no_change - }; + } else if child_leads_to_viable_head { + // There is no current best-child and the child is viable. + change_to_child + } else { + // There is no current best-child but the child is not viable. + no_change + }; let parent = self .nodes @@ -1393,6 +1406,7 @@ fn child_matches_parent_payload_preference( parent: &ProtoNode, child: &ProtoNode, current_slot: Slot, + ptc_size: usize, ) -> bool { let (Ok(parent_v29), Ok(child_v29)) = (parent.as_v29(), child.as_v29()) else { return true; @@ -1410,7 +1424,8 @@ fn child_matches_parent_payload_preference( false } else { // Equal weights (or current-slot parent): tiebreaker per spec. - parent_v29.payload_tiebreak.is_timely && parent_v29.payload_tiebreak.is_data_available + is_payload_timely(&parent_v29.payload_timeliness_votes, ptc_size) + && is_payload_data_available(&parent_v29.payload_data_availability_votes, ptc_size) }; if prefers_full { child_v29.parent_payload_status == PayloadStatus::Full @@ -1419,6 +1434,26 @@ fn child_matches_parent_payload_preference( } } +/// Count the number of set bits in a byte-slice bitfield. +pub fn count_set_bits(bitfield: &[u8]) -> usize { + bitfield.iter().map(|b| b.count_ones() as usize).sum() +} + +/// Create a zero-initialized bitfield for the given PTC size. +pub fn empty_ptc_bitfield(ptc_size: usize) -> Vec { + vec![0u8; ptc_size.div_ceil(8)] +} + +/// Derive `is_payload_timely` from the timeliness vote bitfield. +pub fn is_payload_timely(timeliness_votes: &[u8], ptc_size: usize) -> bool { + count_set_bits(timeliness_votes) > ptc_size / 2 +} + +/// Derive `is_payload_data_available` from the data availability vote bitfield. +pub fn is_payload_data_available(availability_votes: &[u8], ptc_size: usize) -> bool { + count_set_bits(availability_votes) > ptc_size / 2 +} + /// A helper method to calculate the proposer boost based on the given `justified_balances`. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 66f36274830..021d62e63f9 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -3,7 +3,7 @@ use crate::{ error::Error, proto_array::{ InvalidationOperation, Iter, NodeDelta, ProposerBoost, ProtoArray, ProtoNode, - calculate_committee_fraction, + calculate_committee_fraction, is_payload_data_available, is_payload_timely, }, ssz_container::SszContainer, }; @@ -23,8 +23,6 @@ use types::{ pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; #[derive(Default, PartialEq, Clone, Encode, Decode)] -// FIXME(sproul): the "next" naming here is a bit odd -// FIXME(sproul): version this type? pub struct VoteTracker { current_root: Hash256, next_root: Hash256, @@ -32,16 +30,12 @@ pub struct VoteTracker { next_slot: Slot, current_payload_present: bool, next_payload_present: bool, - current_blob_data_available: bool, - next_blob_data_available: bool, } -// FIXME(sproul): version this type pub struct LatestMessage { pub slot: Slot, pub root: Hash256, pub payload_present: bool, - pub blob_data_available: bool, } /// Represents the verification status of an execution payload pre-Gloas. @@ -535,28 +529,53 @@ impl ProtoArrayForkChoice { if attestation_slot > vote.next_slot || *vote == VoteTracker::default() { vote.next_root = block_root; vote.next_slot = attestation_slot; - vote.next_payload_present = false; - vote.next_blob_data_available = false; } Ok(()) } + /// Process a PTC vote by setting the appropriate bits on the target block's V29 node. + /// + /// `ptc_index` is the voter's position in the PTC committee (resolved by the caller). + /// This writes directly to the node's bitfields, bypassing the delta pipeline. pub fn process_payload_attestation( &mut self, - validator_index: usize, block_root: Hash256, - attestation_slot: Slot, + ptc_index: usize, payload_present: bool, blob_data_available: bool, ) -> Result<(), String> { - let vote = self.votes.get_mut(validator_index); - - if attestation_slot > vote.next_slot || *vote == VoteTracker::default() { - vote.next_root = block_root; - vote.next_slot = attestation_slot; - vote.next_payload_present = payload_present; - vote.next_blob_data_available = blob_data_available; + let node_index = self + .proto_array + .indices + .get(&block_root) + .copied() + .ok_or_else(|| { + format!("process_payload_attestation: unknown block root {block_root:?}") + })?; + let node = self.proto_array.nodes.get_mut(node_index).ok_or_else(|| { + format!("process_payload_attestation: invalid node index {node_index}") + })?; + let v29 = node + .as_v29_mut() + .map_err(|_| format!("process_payload_attestation: node {block_root:?} is not V29"))?; + + let byte_index = ptc_index / 8; + let bit_mask = 1u8 << (ptc_index % 8); + + if let Some(byte) = v29.payload_timeliness_votes.get_mut(byte_index) { + if payload_present { + *byte |= bit_mask; + } else { + *byte &= !bit_mask; + } + } + if let Some(byte) = v29.payload_data_availability_votes.get_mut(byte_index) { + if blob_data_available { + *byte |= bit_mask; + } else { + *byte &= !bit_mask; + } } Ok(()) @@ -978,14 +997,16 @@ impl ProtoArrayForkChoice { /// On ties, consult the node's runtime `payload_tiebreak`: prefer `Full` only when timely and /// data is available, otherwise `Empty`. /// Returns `Empty` otherwise. Returns `None` for V17 nodes. - pub fn head_payload_status(&self, head_root: &Hash256) -> Option { + pub fn head_payload_status(&self, head_root: &Hash256) -> Option { let node = self.get_proto_node(head_root)?; let v29 = node.as_v29().ok()?; if v29.full_payload_weight > v29.empty_payload_weight { Some(PayloadStatus::Full) } else if v29.empty_payload_weight > v29.full_payload_weight { Some(PayloadStatus::Empty) - } else if v29.payload_tiebreak.is_timely && v29.payload_tiebreak.is_data_available { + } else if is_payload_timely(&v29.payload_timeliness_votes, E::ptc_size()) + && is_payload_data_available(&v29.payload_data_availability_votes, E::ptc_size()) + { Some(PayloadStatus::Full) } else { Some(PayloadStatus::Empty) @@ -1019,7 +1040,6 @@ impl ProtoArrayForkChoice { root: vote.next_root, slot: vote.next_slot, payload_present: vote.next_payload_present, - blob_data_available: vote.next_blob_data_available, }) } } else { @@ -1105,17 +1125,6 @@ fn compute_deltas( new_balances: &[u64], equivocating_indices: &BTreeSet, ) -> Result, Error> { - let merge_payload_tiebreaker = - |delta: &mut NodeDelta, incoming: crate::proto_array::PayloadTiebreak| { - delta.payload_tiebreaker = Some(match delta.payload_tiebreaker { - Some(existing) => crate::proto_array::PayloadTiebreak { - is_timely: existing.is_timely || incoming.is_timely, - is_data_available: existing.is_data_available || incoming.is_data_available, - }, - None => incoming, - }); - }; - let block_slot = |index: usize| -> Result { node_slots .get(index) @@ -1128,7 +1137,6 @@ fn compute_deltas( delta: 0, empty_delta: 0, full_delta: 0, - payload_tiebreaker: None, }; indices.len() ]; @@ -1175,7 +1183,6 @@ fn compute_deltas( vote.current_root = Hash256::zero(); vote.current_slot = Slot::new(0); vote.current_payload_present = false; - vote.current_blob_data_available = false; } // We've handled this slashed validator, continue without applying an ordinary delta. continue; @@ -1233,21 +1240,11 @@ fn compute_deltas( block_slot(next_delta_index)?, ); node_delta.add_payload_delta(status, new_balance, next_delta_index)?; - if status != PayloadStatus::Pending { - merge_payload_tiebreaker( - node_delta, - crate::proto_array::PayloadTiebreak { - is_timely: vote.next_payload_present, - is_data_available: vote.next_blob_data_available, - }, - ); - } } vote.current_root = vote.next_root; vote.current_slot = vote.next_slot; vote.current_payload_present = vote.next_payload_present; - vote.current_blob_data_available = vote.next_blob_data_available; } } @@ -1600,8 +1597,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); old_balances.push(0); new_balances.push(0); @@ -1657,8 +1652,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1721,8 +1714,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1780,8 +1771,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1850,8 +1839,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); // One validator moves their vote from the block to something outside the tree. @@ -1862,8 +1849,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); let deltas = compute_deltas( @@ -1914,8 +1899,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); old_balances.push(OLD_BALANCE); new_balances.push(NEW_BALANCE); @@ -1989,8 +1972,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); } @@ -2051,8 +2032,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); } @@ -2111,8 +2090,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, - current_blob_data_available: false, - next_blob_data_available: false, }); } @@ -2176,8 +2153,6 @@ mod test_compute_deltas { next_slot: Slot::new(1), current_payload_present: false, next_payload_present: true, - current_blob_data_available: false, - next_blob_data_available: false, }]); let deltas = compute_deltas( @@ -2210,8 +2185,6 @@ mod test_compute_deltas { next_slot: Slot::new(0), current_payload_present: false, next_payload_present: true, - current_blob_data_available: false, - next_blob_data_available: false, }]); let deltas = compute_deltas( From 0df749f0a206c0de1b86999ea0e8d4d4aaf1c1ce Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 16 Mar 2026 05:53:47 -0400 Subject: [PATCH 013/127] completing `should_extend_payload` implementation --- Cargo.lock | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 51 +++++- beacon_node/beacon_chain/src/invariants.rs | 4 +- consensus/fork_choice/src/fork_choice.rs | 43 ++++- consensus/proto_array/Cargo.toml | 2 + .../src/fork_choice_test_definition.rs | 45 ++++- .../gloas_payload.rs | 120 +++++++++++++ consensus/proto_array/src/proto_array.rs | 157 +++++++++++------- .../src/proto_array_fork_choice.rs | 53 +++--- 9 files changed, 381 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 653be9351eb..a14aacc0a2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7020,7 +7020,9 @@ dependencies = [ "safe_arith", "serde", "serde_yaml", + "smallvec", "superstruct", + "typenum", "types", ] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d9a1e46b033..29cd437c43a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4840,10 +4840,53 @@ impl BeaconChain { // If the current slot is already equal to the proposal slot (or we are in the tail end of // the prior slot), then check the actual weight of the head against the head re-org threshold // and the actual weight of the parent against the parent re-org threshold. + // Per spec `is_head_weak`: uses get_attestation_score(head, PENDING) which is + // the total weight. Per spec `is_parent_strong`: uses + // get_attestation_score(parent, parent_payload_status) where parent_payload_status + // is determined by the head block's relationship to its parent. + let head_weight = info.head_node.weight(); + let parent_weight = if let Ok(head_payload_status) = info.head_node.parent_payload_status() + { + // Post-GLOAS: use the payload-filtered weight matching how the head + // extends from its parent. + match head_payload_status { + proto_array::PayloadStatus::Full => { + info.parent_node.full_payload_weight().map_err(|()| { + Box::new(ProposerHeadError::Error( + Error::ProposerHeadForkChoiceError( + fork_choice::Error::ProtoArrayError( + proto_array::Error::InvalidNodeVariant { + block_root: info.parent_node.root(), + }, + ), + ), + )) + })? + } + proto_array::PayloadStatus::Empty => { + info.parent_node.empty_payload_weight().map_err(|()| { + Box::new(ProposerHeadError::Error( + Error::ProposerHeadForkChoiceError( + fork_choice::Error::ProtoArrayError( + proto_array::Error::InvalidNodeVariant { + block_root: info.parent_node.root(), + }, + ), + ), + )) + })? + } + proto_array::PayloadStatus::Pending => info.parent_node.weight(), + } + } else { + // Pre-GLOAS (V17): use total weight. + info.parent_node.weight() + }; + let (head_weak, parent_strong) = if fork_choice_slot == re_org_block_slot { ( - info.head_node.weight() < info.re_org_head_weight_threshold, - info.parent_node.weight() > info.re_org_parent_weight_threshold, + head_weight < info.re_org_head_weight_threshold, + parent_weight > info.re_org_parent_weight_threshold, ) } else { (true, true) @@ -4851,7 +4894,7 @@ impl BeaconChain { if !head_weak { return Err(Box::new( DoNotReOrg::HeadNotWeak { - head_weight: info.head_node.weight(), + head_weight, re_org_head_weight_threshold: info.re_org_head_weight_threshold, } .into(), @@ -4860,7 +4903,7 @@ impl BeaconChain { if !parent_strong { return Err(Box::new( DoNotReOrg::ParentNotStrong { - parent_weight: info.parent_node.weight(), + parent_weight, re_org_parent_weight_threshold: info.re_org_parent_weight_threshold, } .into(), diff --git a/beacon_node/beacon_chain/src/invariants.rs b/beacon_node/beacon_chain/src/invariants.rs index 7bcec7b0b41..b365f37a0aa 100644 --- a/beacon_node/beacon_chain/src/invariants.rs +++ b/beacon_node/beacon_chain/src/invariants.rs @@ -23,9 +23,9 @@ impl BeaconChain { // Only check blocks that are descendants of the finalized checkpoint. // Pruned non-canonical fork blocks may linger in the proto-array but // are legitimately absent from the database. - fc.is_finalized_checkpoint_or_descendant(node.root) + fc.is_finalized_checkpoint_or_descendant(node.root()) }) - .map(|node| (node.root, node.slot)) + .map(|node| (node.root(), node.slot())) .collect() }; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 63220f0bc6b..30c56c97758 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -175,8 +175,13 @@ pub enum InvalidAttestation { /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). AttestsToFutureBlock { block: Slot, attestation: Slot }, + /// Post-GLOAS: attestation index must be 0 or 1. + InvalidAttestationIndex { index: u64 }, /// A same-slot attestation has a non-zero index, which is invalid post-GLOAS. InvalidSameSlotAttestationIndex { slot: Slot }, + /// Post-GLOAS: attestation with index == 1 (payload_present) requires the block's + /// payload to have been received (`root in store.payload_states`). + PayloadNotReceived { beacon_block_root: Hash256 }, /// A payload attestation votes payload_present for a block in the current slot, which is /// invalid because the payload cannot be known yet. PayloadPresentDuringSameSlot { slot: Slot }, @@ -256,6 +261,8 @@ pub struct QueuedAttestation { attesting_indices: Vec, block_root: Hash256, target_epoch: Epoch, + /// Per GLOAS spec: `payload_present = attestation.data.index == 1`. + payload_present: bool, } impl<'a, E: EthSpec> From> for QueuedAttestation { @@ -265,6 +272,7 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { attesting_indices: a.attesting_indices_to_vec(), block_root: a.data().beacon_block_root, target_epoch: a.data().target.epoch, + payload_present: a.data().index == 1, } } } @@ -1136,15 +1144,34 @@ where }); } - // Post-GLOAS: same-slot attestations must have index == 0. Attestations with - // index != 0 during the same slot as the block are invalid. if spec .fork_name_at_slot::(indexed_attestation.data().slot) .gloas_enabled() - && indexed_attestation.data().slot == block.slot - && indexed_attestation.data().index != 0 { - return Err(InvalidAttestation::InvalidSameSlotAttestationIndex { slot: block.slot }); + let index = indexed_attestation.data().index; + + // Post-GLOAS: attestation index must be 0 or 1. + if index > 1 { + return Err(InvalidAttestation::InvalidAttestationIndex { index }); + } + + // Same-slot attestations must have index == 0. + if indexed_attestation.data().slot == block.slot && index != 0 { + return Err(InvalidAttestation::InvalidSameSlotAttestationIndex { + slot: block.slot, + }); + } + + // index == 1 (payload_present) requires the block's payload to have been received. + if index == 1 + && !self + .proto_array + .is_payload_received(&indexed_attestation.data().beacon_block_root) + { + return Err(InvalidAttestation::PayloadNotReceived { + beacon_block_root: indexed_attestation.data().beacon_block_root, + }); + } } Ok(()) @@ -1245,12 +1272,16 @@ where self.validate_on_attestation(attestation, is_from_block, spec)?; + // Per GLOAS spec: `payload_present = attestation.data.index == 1`. + let payload_present = attestation.data().index == 1; + if attestation.data().slot < self.fc_store.get_current_slot() { for validator_index in attestation.attesting_indices_iter() { self.proto_array.process_attestation( *validator_index as usize, attestation.data().beacon_block_root, attestation.data().slot, + payload_present, )?; } } else { @@ -1433,6 +1464,7 @@ where *validator_index as usize, attestation.block_root, attestation.slot, + attestation.payload_present, )?; } } @@ -1850,6 +1882,7 @@ mod tests { attesting_indices: vec![], block_root: Hash256::zero(), target_epoch: Epoch::new(0), + payload_present: false, }) .collect() } diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 782610e0d35..f9c35bb5850 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -15,5 +15,7 @@ fixed_bytes = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } +smallvec = { workspace = true } superstruct = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 45aed23b293..16c7df4ca26 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -8,6 +8,7 @@ use crate::proto_array_fork_choice::{Block, ExecutionStatus, PayloadStatus, Prot use crate::{InvalidationOperation, JustifiedBalances}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; +use ssz::BitVector; use std::collections::BTreeSet; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, @@ -96,6 +97,15 @@ pub enum Operation { is_timely: bool, is_data_available: bool, }, + /// Simulate receiving and validating an execution payload for `block_root`. + /// Sets `payload_received = true` on the V29 node via the live validation path. + ProcessExecutionPayload { + block_root: Hash256, + }, + AssertPayloadReceived { + block_root: Hash256, + expected: bool, + }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -286,7 +296,7 @@ impl ForkChoiceTestDefinition { attestation_slot, } => { fork_choice - .process_attestation(validator_index, block_root, attestation_slot) + .process_attestation(validator_index, block_root, attestation_slot, false) .unwrap_or_else(|_| { panic!( "process_attestation op at index {} returned error", @@ -494,9 +504,38 @@ impl ForkChoiceTestDefinition { }); // Set all bits (exceeds any threshold) or clear all bits. let fill = if is_timely { 0xFF } else { 0x00 }; - node_v29.payload_timeliness_votes.fill(fill); + node_v29.payload_timeliness_votes = + BitVector::from_bytes(smallvec::smallvec![fill; 64]) + .expect("valid 512-bit bitvector"); let fill = if is_data_available { 0xFF } else { 0x00 }; - node_v29.payload_data_availability_votes.fill(fill); + node_v29.payload_data_availability_votes = + BitVector::from_bytes(smallvec::smallvec![fill; 64]) + .expect("valid 512-bit bitvector"); + // Per spec, is_payload_timely/is_payload_data_available require + // the payload to be in payload_states (payload_received). + node_v29.payload_received = is_timely || is_data_available; + } + Operation::ProcessExecutionPayload { block_root } => { + fork_choice + .on_execution_payload(block_root) + .unwrap_or_else(|e| { + panic!( + "on_execution_payload op at index {} returned error: {}", + op_index, e + ) + }); + check_bytes_round_trip(&fork_choice); + } + Operation::AssertPayloadReceived { + block_root, + expected, + } => { + let actual = fork_choice.is_payload_received(&block_root); + assert_eq!( + actual, expected, + "payload_received mismatch at op index {}", + op_index + ); } } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 9a0043a467b..84e2878d32f 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -571,6 +571,120 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef } } +/// Test interleaving of blocks, payload validation, and attestations. +/// +/// Scenario: +/// - Genesis block (slot 0) +/// - Block 1 (slot 1) extends genesis, Full chain +/// - Block 2 (slot 1) extends genesis, Empty chain +/// - Before payload arrives: payload_received is false for block 1 +/// - Process execution payload for block 1 → payload_received becomes true +/// - Payload attestations arrive voting block 1's payload as timely + available +/// - Head should follow block 1 because the PTC votes now count (payload_received = true) +pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Block 1 at slot 1: extends genesis Full chain. + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + + // Block 2 at slot 1: extends genesis Empty chain (parent_hash doesn't match genesis EL hash). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(100)), + }); + + // Both children have parent_payload_status set correctly. + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), + expected_status: PayloadStatus::Empty, + }); + + // Before payload arrives: payload_received is false on genesis. + ops.push(Operation::AssertPayloadReceived { + block_root: get_root(0), + expected: false, + }); + + // Give one vote to each child so they have equal weight. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(1), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + attestation_slot: Slot::new(1), + }); + + // Equal weight, no payload received on genesis → tiebreaker uses PTC votes which + // require payload_received. Without it, is_payload_timely returns false → prefers Empty. + // Block 2 (Empty) wins because it matches the Empty preference. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(2), + current_slot: Slot::new(100), + }); + + // Now the execution payload for genesis arrives and is validated. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(0), + }); + + // payload_received is now true. + ops.push(Operation::AssertPayloadReceived { + block_root: get_root(0), + expected: true, + }); + + // Set PTC votes on genesis as timely + data available (simulates PTC voting). + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + + // Now with payload_received=true and PTC votes exceeding threshold: + // is_payload_timely=true, is_payload_data_available=true → prefers Full. + // Block 1 (Full) wins because it matches the Full preference. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(1), + current_slot: Slot::new(100), + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + #[cfg(test)] mod tests { use super::*; @@ -610,4 +724,10 @@ mod tests { let test = get_gloas_interleaved_attestations_test_definition(); test.run(); } + + #[test] + fn payload_received_interleaving() { + let test = get_gloas_payload_received_interleaving_test_definition(); + test.run(); + } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 4f89e6084f6..908d3914016 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -2,11 +2,13 @@ use crate::error::InvalidBestNodeInfo; use crate::{Block, ExecutionStatus, JustifiedBalances, PayloadStatus, error::Error}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; +use ssz::BitVector; use ssz::Encode; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; use superstruct::superstruct; +use typenum::U512; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -131,14 +133,20 @@ pub struct ProtoNode { pub execution_payload_block_hash: ExecutionBlockHash, /// PTC timeliness vote bitfield, indexed by PTC committee position. /// Bit i set means PTC member i voted `payload_present = true`. - /// Tiebreak derived as: `count_ones() > ptc_size / 2`. + /// Tiebreak derived as: `num_set_bits() > ptc_size / 2`. #[superstruct(only(V29))] - pub payload_timeliness_votes: Vec, + pub payload_timeliness_votes: BitVector, /// PTC data availability vote bitfield, indexed by PTC committee position. /// Bit i set means PTC member i voted `blob_data_available = true`. - /// Tiebreak derived as: `count_ones() > ptc_size / 2`. + /// Tiebreak derived as: `num_set_bits() > ptc_size / 2`. #[superstruct(only(V29))] - pub payload_data_availability_votes: Vec, + pub payload_data_availability_votes: BitVector, + /// Whether the execution payload for this block has been received and validated locally. + /// Maps to `root in store.payload_states` in the spec. + /// When true, `is_payload_timely` and `is_payload_data_available` return true + /// regardless of PTC vote counts. + #[superstruct(only(V29), partial_getter(copy))] + pub payload_received: bool, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -385,26 +393,18 @@ impl ProtoArray { .checked_add(delta) .ok_or(Error::DeltaOverflow(parent_index))?; - // Per spec's `is_supporting_vote`: a vote for descendant B supports - // ancestor A's payload status based on B's `parent_payload_status`. - // Route the child's *total* weight delta to the parent's appropriate - // payload bucket. - match node.parent_payload_status() { - Ok(PayloadStatus::Full) => { - parent_delta.full_delta = parent_delta - .full_delta - .checked_add(delta) - .ok_or(Error::DeltaOverflow(parent_index))?; - } - Ok(PayloadStatus::Empty) => { - parent_delta.empty_delta = parent_delta - .empty_delta - .checked_add(delta) - .ok_or(Error::DeltaOverflow(parent_index))?; - } - // Pending or V17 nodes: no payload propagation. - _ => {} - } + // Per spec's `is_supporting_vote`: a vote supports a parent's + // FULL/EMPTY virtual node based on the voter's `payload_present` + // flag, NOT based on which child the vote goes through. + // Propagate each child's full/empty deltas independently. + parent_delta.full_delta = parent_delta + .full_delta + .checked_add(node_full_delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + parent_delta.empty_delta = parent_delta + .empty_delta + .checked_add(node_empty_delta) + .ok_or(Error::DeltaOverflow(parent_index))?; } } @@ -540,8 +540,9 @@ impl ProtoArray { empty_payload_weight: 0, full_payload_weight: 0, execution_payload_block_hash, - payload_timeliness_votes: empty_ptc_bitfield(E::ptc_size()), - payload_data_availability_votes: empty_ptc_bitfield(E::ptc_size()), + payload_timeliness_votes: BitVector::default(), + payload_data_availability_votes: BitVector::default(), + payload_received: false, }) }; @@ -584,9 +585,11 @@ impl ProtoArray { Ok(()) } - /// Process an excution payload for a Gloas block. + /// Process an execution payload for a Gloas block. /// - /// this function assumes the + /// Sets `payload_received` to true, which makes `is_payload_timely` and + /// `is_payload_data_available` return true regardless of PTC votes. + /// This maps to `store.payload_states[root] = state` in the spec. pub fn on_valid_execution_payload(&mut self, block_root: Hash256) -> Result<(), Error> { let index = *self .indices @@ -599,10 +602,7 @@ impl ProtoArray { let v29 = node .as_v29_mut() .map_err(|_| Error::InvalidNodeVariant { block_root })?; - // A valid execution payload means the payload is timely and data is available. - // Set all bits to ensure the threshold is met regardless of PTC size. - v29.payload_timeliness_votes.fill(0xFF); - v29.payload_data_availability_votes.fill(0xFF); + v29.payload_received = true; Ok(()) } @@ -669,8 +669,13 @@ impl ProtoArray { }); } }, - // Gloas nodes don't carry `ExecutionStatus`. + // Gloas nodes don't carry `ExecutionStatus`. Mark the validated + // block as payload-received so that `is_payload_timely` / + // `is_payload_data_available` and `index == 1` attestations work. ProtoNode::V29(node) => { + if index == verified_node_index { + node.payload_received = true; + } if let Some(parent_index) = node.parent { parent_index } else { @@ -1057,6 +1062,22 @@ impl ProtoArray { best_finalized_checkpoint, )?; + // Per spec `should_extend_payload`: if the proposer-boosted block is a child of + // this parent and extends Empty, force Empty preference regardless of + // weights/tiebreaker. + let proposer_boost_root = self.previous_proposer_boost.root; + let proposer_boost = !proposer_boost_root.is_zero() + && self + .indices + .get(&proposer_boost_root) + .and_then(|&idx| self.nodes.get(idx)) + .is_some_and(|boost_node| { + boost_node.parent() == Some(parent_index) + && boost_node + .parent_payload_status() + .map_or(false, |s| s != PayloadStatus::Full) + }); + // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. // @@ -1112,12 +1133,14 @@ impl ProtoArray { child, current_slot, E::ptc_size(), + proposer_boost, ); let best_child_matches = child_matches_parent_payload_preference( parent, best_child, current_slot, E::ptc_size(), + proposer_boost, ); if child_matches && !best_child_matches { @@ -1390,27 +1413,30 @@ impl ProtoArray { } /// For V29 parents, returns `true` if the child's `parent_payload_status` matches the parent's -/// preferred payload status. When full and empty weights are unequal, the higher weight wins. -/// When equal, the tiebreaker uses the parent's `payload_tiebreak`: prefer Full if the block -/// was timely and data is available; otherwise prefer Empty. -/// For V17 parents (or mixed), always returns `true` (no payload preference). +/// preferred payload status per spec `should_extend_payload`. /// -/// TODO(gloas): the spec's `should_extend_payload` has additional conditions beyond the -/// tiebreaker: it also checks proposer_boost_root (empty, different parent, or extends full). -/// See: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md#new-should_extend_payload +/// If `proposer_boost` is set, the parent unconditionally prefers Empty (the proposer-boosted +/// block is a child of this parent and extends Empty). Otherwise, when full and empty weights +/// are unequal the higher weight wins; when equal, the tiebreaker uses PTC votes. /// -/// TODO(gloas): the spec's `should_extend_payload` has additional conditions beyond the -/// tiebreaker: it also checks proposer_boost_root (empty, different parent, or extends full). -/// See: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md#new-should_extend_payload +/// For V17 parents (or mixed), always returns `true` (no payload preference). fn child_matches_parent_payload_preference( parent: &ProtoNode, child: &ProtoNode, current_slot: Slot, ptc_size: usize, + proposer_boost: bool, ) -> bool { let (Ok(parent_v29), Ok(child_v29)) = (parent.as_v29(), child.as_v29()) else { return true; }; + + // Per spec `should_extend_payload`: if the proposer-boosted block extends Empty from + // this parent, unconditionally prefer Empty. + if proposer_boost { + return child_v29.parent_payload_status == PayloadStatus::Empty; + } + // Per spec `get_weight`: FULL/EMPTY virtual nodes at `current_slot - 1` have weight 0. // The PTC is still voting, so payload preference is determined solely by the tiebreaker. let use_tiebreaker_only = parent.slot() + 1 == current_slot; @@ -1424,8 +1450,15 @@ fn child_matches_parent_payload_preference( false } else { // Equal weights (or current-slot parent): tiebreaker per spec. - is_payload_timely(&parent_v29.payload_timeliness_votes, ptc_size) - && is_payload_data_available(&parent_v29.payload_data_availability_votes, ptc_size) + is_payload_timely( + &parent_v29.payload_timeliness_votes, + ptc_size, + parent_v29.payload_received, + ) && is_payload_data_available( + &parent_v29.payload_data_availability_votes, + ptc_size, + parent_v29.payload_received, + ) }; if prefers_full { child_v29.parent_payload_status == PayloadStatus::Full @@ -1434,24 +1467,30 @@ fn child_matches_parent_payload_preference( } } -/// Count the number of set bits in a byte-slice bitfield. -pub fn count_set_bits(bitfield: &[u8]) -> usize { - bitfield.iter().map(|b| b.count_ones() as usize).sum() -} - -/// Create a zero-initialized bitfield for the given PTC size. -pub fn empty_ptc_bitfield(ptc_size: usize) -> Vec { - vec![0u8; ptc_size.div_ceil(8)] -} - /// Derive `is_payload_timely` from the timeliness vote bitfield. -pub fn is_payload_timely(timeliness_votes: &[u8], ptc_size: usize) -> bool { - count_set_bits(timeliness_votes) > ptc_size / 2 +/// +/// Per spec: returns false if the payload has not been received locally +/// (`payload_received == false`, i.e. `root not in store.payload_states`), +/// regardless of PTC votes. Both local receipt and PTC threshold are required. +pub fn is_payload_timely( + timeliness_votes: &BitVector, + ptc_size: usize, + payload_received: bool, +) -> bool { + payload_received && timeliness_votes.num_set_bits() > ptc_size / 2 } /// Derive `is_payload_data_available` from the data availability vote bitfield. -pub fn is_payload_data_available(availability_votes: &[u8], ptc_size: usize) -> bool { - count_set_bits(availability_votes) > ptc_size / 2 +/// +/// Per spec: returns false if the payload has not been received locally +/// (`payload_received == false`, i.e. `root not in store.payload_states`), +/// regardless of PTC votes. Both local receipt and PTC threshold are required. +pub fn is_payload_data_available( + availability_votes: &BitVector, + ptc_size: usize, + payload_received: bool, +) -> bool { + payload_received && availability_votes.num_set_bits() > ptc_size / 2 } /// A helper method to calculate the proposer boost based on the given `justified_balances`. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 021d62e63f9..e1b8c43ff16 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -63,9 +63,9 @@ pub enum ExecutionStatus { #[ssz(enum_behaviour = "tag")] #[repr(u8)] pub enum PayloadStatus { - Pending = 0, - Empty = 1, - Full = 2, + Empty = 0, + Full = 1, + Pending = 2, } impl ExecutionStatus { @@ -523,12 +523,14 @@ impl ProtoArrayForkChoice { validator_index: usize, block_root: Hash256, attestation_slot: Slot, + payload_present: bool, ) -> Result<(), String> { let vote = self.votes.get_mut(validator_index); if attestation_slot > vote.next_slot || *vote == VoteTracker::default() { vote.next_root = block_root; vote.next_slot = attestation_slot; + vote.next_payload_present = payload_present; } Ok(()) @@ -560,23 +562,14 @@ impl ProtoArrayForkChoice { .as_v29_mut() .map_err(|_| format!("process_payload_attestation: node {block_root:?} is not V29"))?; - let byte_index = ptc_index / 8; - let bit_mask = 1u8 << (ptc_index % 8); - - if let Some(byte) = v29.payload_timeliness_votes.get_mut(byte_index) { - if payload_present { - *byte |= bit_mask; - } else { - *byte &= !bit_mask; - } - } - if let Some(byte) = v29.payload_data_availability_votes.get_mut(byte_index) { - if blob_data_available { - *byte |= bit_mask; - } else { - *byte &= !bit_mask; - } - } + v29.payload_timeliness_votes + .set(ptc_index, payload_present) + .map_err(|e| format!("process_payload_attestation: timeliness set failed: {e:?}"))?; + v29.payload_data_availability_votes + .set(ptc_index, blob_data_available) + .map_err(|e| { + format!("process_payload_attestation: data availability set failed: {e:?}") + })?; Ok(()) } @@ -981,6 +974,14 @@ impl ProtoArrayForkChoice { block.execution_status().ok() } + /// Returns whether the execution payload for a block has been received. + /// Returns `false` for pre-GLOAS (V17) nodes or unknown blocks. + pub fn is_payload_received(&self, block_root: &Hash256) -> bool { + self.get_proto_node(block_root) + .and_then(|node| node.payload_received().ok()) + .unwrap_or(false) + } + /// Returns the weight of a given block. pub fn get_weight(&self, block_root: &Hash256) -> Option { let block_index = self.proto_array.indices.get(block_root)?; @@ -1004,9 +1005,15 @@ impl ProtoArrayForkChoice { Some(PayloadStatus::Full) } else if v29.empty_payload_weight > v29.full_payload_weight { Some(PayloadStatus::Empty) - } else if is_payload_timely(&v29.payload_timeliness_votes, E::ptc_size()) - && is_payload_data_available(&v29.payload_data_availability_votes, E::ptc_size()) - { + } else if is_payload_timely( + &v29.payload_timeliness_votes, + E::ptc_size(), + v29.payload_received, + ) && is_payload_data_available( + &v29.payload_data_availability_votes, + E::ptc_size(), + v29.payload_received, + ) { Some(PayloadStatus::Full) } else { Some(PayloadStatus::Empty) From 916d9fb018613f3e6caac67b85c15541f935bc36 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 16 Mar 2026 07:00:51 -0400 Subject: [PATCH 014/127] changes --- .../beacon_chain/src/block_verification.rs | 13 +++--- .../src/fork_choice_test_definition.rs | 7 +++- .../gloas_payload.rs | 3 ++ consensus/proto_array/src/proto_array.rs | 8 +++- .../src/proto_array_fork_choice.rs | 42 ++++++++++++------- 5 files changed, 50 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index fe66b2f8d6d..a452d528a12 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1961,12 +1961,13 @@ fn load_parent>( { if block.as_block().is_parent_block_full(parent_bid_block_hash) { // TODO(gloas): loading the envelope here is not very efficient - let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing envelope for parent block {root:?}", - )) - })?; - (StatePayloadStatus::Full, envelope.message.state_root) + if let Some(envelope) = chain.store.get_payload_envelope(&root)? { + (StatePayloadStatus::Full, envelope.message.state_root) + } else { + // The envelope hasn't been stored yet (e.g. genesis block, or payload + // not yet delivered). Fall back to the pending/empty state. + (StatePayloadStatus::Pending, parent_block.state_root()) + } } else { (StatePayloadStatus::Pending, parent_block.state_root()) } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 16c7df4ca26..b36e9c21170 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -91,6 +91,7 @@ pub enum Operation { AssertHeadPayloadStatus { head_root: Hash256, expected_status: PayloadStatus, + current_slot: Slot, }, SetPayloadTiebreak { block_root: Hash256, @@ -456,9 +457,13 @@ impl ForkChoiceTestDefinition { Operation::AssertHeadPayloadStatus { head_root, expected_status, + current_slot, } => { let actual = fork_choice - .head_payload_status::(&head_root) + .head_payload_status::( + &head_root, + current_slot, + ) .unwrap_or_else(|| { panic!( "AssertHeadPayloadStatus: head root not found at op index {}", diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 84e2878d32f..e19fb196f26 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -145,6 +145,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::AssertHeadPayloadStatus { head_root: get_root(1), expected_status: PayloadStatus::Empty, + current_slot: Slot::new(0), }); // Flip validator 0 to Empty; both bits now clear. @@ -170,6 +171,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::AssertHeadPayloadStatus { head_root: get_root(1), expected_status: PayloadStatus::Empty, + current_slot: Slot::new(0), }); // Same-slot attestation to a new head candidate should be Pending (no payload bucket change). @@ -204,6 +206,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::AssertHeadPayloadStatus { head_root: get_root(5), expected_status: PayloadStatus::Empty, + current_slot: Slot::new(0), }); ForkChoiceTestDefinition { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 908d3914016..5a0f49e64de 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1448,8 +1448,8 @@ fn child_matches_parent_payload_preference( && parent_v29.empty_payload_weight > parent_v29.full_payload_weight { false - } else { - // Equal weights (or current-slot parent): tiebreaker per spec. + } else if use_tiebreaker_only { + // Previous slot: should_extend_payload = is_payload_timely && is_payload_data_available. is_payload_timely( &parent_v29.payload_timeliness_votes, ptc_size, @@ -1459,6 +1459,10 @@ fn child_matches_parent_payload_preference( ptc_size, parent_v29.payload_received, ) + } else { + // Not previous slot: should_extend_payload = true. + // Full wins the tiebreaker (1 > 0) when the payload has been received. + parent_v29.payload_received }; if prefers_full { child_v29.parent_payload_status == PayloadStatus::Full diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index e1b8c43ff16..b50db01561f 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -991,29 +991,43 @@ impl ProtoArrayForkChoice { .map(|node| node.weight()) } - /// Returns the payload status of the head node based on accumulated weights. + /// Returns the payload status of the head node based on accumulated weights and tiebreaker. /// /// Returns `Full` if `full_payload_weight > empty_payload_weight`. /// Returns `Empty` if `empty_payload_weight > full_payload_weight`. - /// On ties, consult the node's runtime `payload_tiebreak`: prefer `Full` only when timely and - /// data is available, otherwise `Empty`. - /// Returns `Empty` otherwise. Returns `None` for V17 nodes. - pub fn head_payload_status(&self, head_root: &Hash256) -> Option { + /// On ties: + /// - Previous slot (`slot + 1 == current_slot`): prefer Full only when timely and + /// data available (per `should_extend_payload`). + /// - Otherwise: prefer Full when payload has been received. + /// Returns `None` for V17 nodes. + pub fn head_payload_status( + &self, + head_root: &Hash256, + current_slot: Slot, + ) -> Option { let node = self.get_proto_node(head_root)?; let v29 = node.as_v29().ok()?; if v29.full_payload_weight > v29.empty_payload_weight { Some(PayloadStatus::Full) } else if v29.empty_payload_weight > v29.full_payload_weight { Some(PayloadStatus::Empty) - } else if is_payload_timely( - &v29.payload_timeliness_votes, - E::ptc_size(), - v29.payload_received, - ) && is_payload_data_available( - &v29.payload_data_availability_votes, - E::ptc_size(), - v29.payload_received, - ) { + } else if node.slot() + 1 == current_slot { + // Previous slot: should_extend_payload = is_payload_timely && is_payload_data_available + if is_payload_timely( + &v29.payload_timeliness_votes, + E::ptc_size(), + v29.payload_received, + ) && is_payload_data_available( + &v29.payload_data_availability_votes, + E::ptc_size(), + v29.payload_received, + ) { + Some(PayloadStatus::Full) + } else { + Some(PayloadStatus::Empty) + } + } else if v29.payload_received { + // Not previous slot: Full wins tiebreaker (1 > 0) when payload received. Some(PayloadStatus::Full) } else { Some(PayloadStatus::Empty) From 9ce88ea3c12c2256a3bfe2805621d35995a72926 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 16 Mar 2026 19:36:48 -0400 Subject: [PATCH 015/127] addressing comments: --- consensus/proto_array/src/error.rs | 1 - .../src/fork_choice_test_definition.rs | 5 +-- consensus/proto_array/src/proto_array.rs | 43 ++++++++++++++++--- .../src/proto_array_fork_choice.rs | 1 + 4 files changed, 39 insertions(+), 11 deletions(-) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index d6bd7f2cbfa..04e747f5f6f 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -54,7 +54,6 @@ pub enum Error { }, InvalidEpochOffset(u64), Arith(ArithError), - GloasNotImplemented, InvalidNodeVariant { block_root: Hash256, }, diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index b36e9c21170..7f607c826fe 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -460,10 +460,7 @@ impl ForkChoiceTestDefinition { current_slot, } => { let actual = fork_choice - .head_payload_status::( - &head_root, - current_slot, - ) + .head_payload_status::(&head_root, current_slot) .unwrap_or_else(|| { panic!( "AssertHeadPayloadStatus: head root not found at op index {}", diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 5a0f49e64de..09538f25eb0 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -164,6 +164,22 @@ impl Default for ProposerBoost { } } +/// Accumulated score changes for a single proto-array node during a `find_head` pass. +/// +/// `delta` tracks the ordinary LMD-GHOST balance change applied to the concrete block node. +/// This is the same notion of weight that pre-GLOAS fork choice used. +/// +/// Under GLOAS we also need to track how votes contribute to the parent's virtual payload +/// branches: +/// +/// - `empty_delta` is the balance change attributable to votes that support the `Empty` payload +/// interpretation of the node +/// - `full_delta` is the balance change attributable to votes that support the `Full` payload +/// interpretation of the node +/// +/// Votes in `Pending` state only affect `delta`; they do not contribute to either payload bucket. +/// During score application these payload deltas are propagated independently up the tree so that +/// ancestors can compare children using payload-aware tie breaking. #[derive(Clone, PartialEq, Debug, Copy)] pub struct NodeDelta { pub delta: i64, @@ -172,8 +188,16 @@ pub struct NodeDelta { } impl NodeDelta { - /// Determine the payload bucket for a vote based on whether the vote's slot matches the - /// block's slot (Pending), or the vote's `payload_present` flag (Full/Empty). + /// Classify a vote into the payload bucket it contributes to for `block_slot`. + /// + /// Per the GLOAS model: + /// + /// - a same-slot vote is `Pending` + /// - a later vote with `payload_present = true` is `Full` + /// - a later vote with `payload_present = false` is `Empty` + /// + /// This classification is used only for payload-aware accounting; all votes still contribute to + /// the aggregate `delta`. pub fn payload_status( vote_slot: Slot, payload_present: bool, @@ -188,7 +212,9 @@ impl NodeDelta { } } - /// Add a balance to the appropriate payload status. + /// Add `balance` to the payload bucket selected by `status`. + /// + /// `Pending` votes do not affect payload buckets, so this becomes a no-op for that case. pub fn add_payload_delta( &mut self, status: PayloadStatus, @@ -206,7 +232,10 @@ impl NodeDelta { Ok(()) } - /// Create a delta that only affects the aggregate `delta` field. + /// Create a delta that only affects the aggregate block weight. + /// + /// This is useful for callers or tests that only care about ordinary LMD-GHOST weight changes + /// and do not need payload-aware accounting. pub fn from_delta(delta: i64) -> Self { Self { delta, @@ -215,7 +244,9 @@ impl NodeDelta { } } - /// Subtract a balance from the appropriate payload status. + /// Subtract `balance` from the payload bucket selected by `status`. + /// + /// `Pending` votes do not affect payload buckets, so this becomes a no-op for that case. pub fn sub_payload_delta( &mut self, status: PayloadStatus, @@ -1075,7 +1106,7 @@ impl ProtoArray { boost_node.parent() == Some(parent_index) && boost_node .parent_payload_status() - .map_or(false, |s| s != PayloadStatus::Full) + .is_ok_and(|s| s != PayloadStatus::Full) }); // These three variables are aliases to the three options that we may set the diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index b50db01561f..ce634fbdbeb 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -999,6 +999,7 @@ impl ProtoArrayForkChoice { /// - Previous slot (`slot + 1 == current_slot`): prefer Full only when timely and /// data available (per `should_extend_payload`). /// - Otherwise: prefer Full when payload has been received. + /// /// Returns `None` for V17 nodes. pub fn head_payload_status( &self, From a7bcf0f07edf82301b6eacfeae3e1fe181ca64ca Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Tue, 17 Mar 2026 01:49:40 -0400 Subject: [PATCH 016/127] enable ef tests @brech1 commit Co-authored-by: Co-author hopinheimer Co-authored-by: Co-author brech1 <11075677+brech1@users.noreply.github.com> --- .../gloas_payload.rs | 52 ++++++++--- consensus/proto_array/src/proto_array.rs | 75 ++++++++-------- .../src/proto_array_fork_choice.rs | 28 +++--- testing/ef_tests/Makefile | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 86 ++++++++++++++++++- testing/ef_tests/src/handler.rs | 23 +++-- testing/ef_tests/tests/tests.rs | 6 ++ 7 files changed, 199 insertions(+), 73 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index e19fb196f26..8dcf538bd42 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -51,6 +51,12 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + ops.push(Operation::AssertParentPayloadStatus { block_root: get_root(1), expected_status: PayloadStatus::Full, @@ -111,6 +117,12 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: Some(get_hash(1)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // One Full and one Empty vote for the same head block: tie probes via runtime tiebreak, // which defaults to Empty unless timely+data-available evidence is set. ops.push(Operation::ProcessPayloadAttestation { @@ -263,6 +275,12 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // Equal branch weights: tiebreak FULL picks branch rooted at 3. ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), @@ -359,6 +377,12 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // Parent prefers Full on equal branch weights (tiebreaker). ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), @@ -521,6 +545,12 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // Step 4: Set tiebreaker to Empty on genesis → Empty branch wins. ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), @@ -619,10 +649,11 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe expected_status: PayloadStatus::Empty, }); - // Before payload arrives: payload_received is false on genesis. + // Per spec `get_forkchoice_store`: genesis starts with payload_received=true + // (anchor block is in `payload_states`). ops.push(Operation::AssertPayloadReceived { block_root: get_root(0), - expected: false, + expected: true, }); // Give one vote to each child so they have equal weight. @@ -637,38 +668,37 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe attestation_slot: Slot::new(1), }); - // Equal weight, no payload received on genesis → tiebreaker uses PTC votes which - // require payload_received. Without it, is_payload_timely returns false → prefers Empty. - // Block 2 (Empty) wins because it matches the Empty preference. + // Equal weight, payload_received=true on genesis → tiebreaker uses + // payload_received (not previous slot, equal payload weights) → prefers Full. + // Block 1 (Full) wins because it matches the Full preference. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], - expected_head: get_root(2), + expected_head: get_root(1), current_slot: Slot::new(100), }); - // Now the execution payload for genesis arrives and is validated. + // ProcessExecutionPayload on genesis is a no-op (already received at init). ops.push(Operation::ProcessExecutionPayload { block_root: get_root(0), }); - // payload_received is now true. ops.push(Operation::AssertPayloadReceived { block_root: get_root(0), expected: true, }); // Set PTC votes on genesis as timely + data available (simulates PTC voting). + // This doesn't change the preference since genesis is not the previous slot + // (slot 0 + 1 != current_slot 100). ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), is_timely: true, is_data_available: true, }); - // Now with payload_received=true and PTC votes exceeding threshold: - // is_payload_timely=true, is_payload_data_available=true → prefers Full. - // Block 1 (Full) wins because it matches the Full preference. + // Still prefers Full via payload_received tiebreaker → Block 1 (Full) wins. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 09538f25eb0..1b6c0c58bc6 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -552,6 +552,13 @@ impl ProtoArray { PayloadStatus::Full }; + // Per spec `get_forkchoice_store`: the anchor (genesis) block has + // its payload state initialized (`payload_states = {anchor_root: ...}`). + // Without `payload_received = true` on genesis, the FULL virtual + // child doesn't exist in the spec's `get_node_children`, making all + // Full concrete children of genesis unreachable in `get_head`. + let is_genesis = parent_index.is_none(); + ProtoNode::V29(ProtoNodeV29 { slot: block.slot, root: block.root, @@ -573,7 +580,7 @@ impl ProtoArray { execution_payload_block_hash, payload_timeliness_votes: BitVector::default(), payload_data_availability_votes: BitVector::default(), - payload_received: false, + payload_received: is_genesis, }) }; @@ -1120,6 +1127,18 @@ impl ProtoArray { ); let no_change = (parent.best_child(), parent.best_descendant()); + // For V29 (GLOAS) parents, the spec's virtual tree model requires choosing + // FULL or EMPTY direction at each node BEFORE considering concrete children. + // Only children whose parent_payload_status matches the preferred direction + // are eligible for best_child. This is PRIMARY, not a tiebreaker. + let child_matches_dir = child_matches_parent_payload_preference( + parent, + child, + current_slot, + E::ptc_size(), + proposer_boost, + ); + let (new_best_child, new_best_descendant) = if let Some(best_child_index) = parent.best_child() { if best_child_index == child_index && !child_leads_to_viable_head { @@ -1143,6 +1162,14 @@ impl ProtoArray { best_finalized_checkpoint, )?; + let best_child_matches_dir = child_matches_parent_payload_preference( + parent, + best_child, + current_slot, + E::ptc_size(), + proposer_boost, + ); + if child_leads_to_viable_head && !best_child_leads_to_viable_head { // The child leads to a viable head, but the current best-child doesn't. change_to_child @@ -1150,49 +1177,27 @@ impl ProtoArray { // The best child leads to a viable head, but the child doesn't. no_change } else if child.weight() > best_child.weight() { - // Weight is the primary ordering criterion. + // Weight is the primary selector after viability. change_to_child } else if child.weight() < best_child.weight() { no_change + } else if child_matches_dir && !best_child_matches_dir { + // Equal weight: direction matching is the tiebreaker. + change_to_child + } else if !child_matches_dir && best_child_matches_dir { + no_change + } else if *child.root() >= *best_child.root() { + // Final tie-breaker: break by root hash. + change_to_child } else { - // Equal weights: for V29 parents, prefer the child whose - // parent_payload_status matches the parent's payload preference - // (full vs empty). This corresponds to the spec's - // `get_payload_status_tiebreaker` ordering in `get_head`. - let child_matches = child_matches_parent_payload_preference( - parent, - child, - current_slot, - E::ptc_size(), - proposer_boost, - ); - let best_child_matches = child_matches_parent_payload_preference( - parent, - best_child, - current_slot, - E::ptc_size(), - proposer_boost, - ); - - if child_matches && !best_child_matches { - // Child extends the preferred payload chain, best_child doesn't. - change_to_child - } else if !child_matches && best_child_matches { - // Best child extends the preferred payload chain, child doesn't. - no_change - } else if *child.root() >= *best_child.root() { - // Final tie-breaker: both match or both don't, break by root. - change_to_child - } else { - no_change - } + no_change } } } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. + // No current best-child: set if child is viable. change_to_child } else { - // There is no current best-child but the child is not viable. + // Child is not viable. no_change }; diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index ce634fbdbeb..4f5fe45c220 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1004,7 +1004,7 @@ impl ProtoArrayForkChoice { pub fn head_payload_status( &self, head_root: &Hash256, - current_slot: Slot, + _current_slot: Slot, ) -> Option { let node = self.get_proto_node(head_root)?; let v29 = node.as_v29().ok()?; @@ -1012,23 +1012,15 @@ impl ProtoArrayForkChoice { Some(PayloadStatus::Full) } else if v29.empty_payload_weight > v29.full_payload_weight { Some(PayloadStatus::Empty) - } else if node.slot() + 1 == current_slot { - // Previous slot: should_extend_payload = is_payload_timely && is_payload_data_available - if is_payload_timely( - &v29.payload_timeliness_votes, - E::ptc_size(), - v29.payload_received, - ) && is_payload_data_available( - &v29.payload_data_availability_votes, - E::ptc_size(), - v29.payload_received, - ) { - Some(PayloadStatus::Full) - } else { - Some(PayloadStatus::Empty) - } - } else if v29.payload_received { - // Not previous slot: Full wins tiebreaker (1 > 0) when payload received. + } else if is_payload_timely( + &v29.payload_timeliness_votes, + E::ptc_size(), + v29.payload_received, + ) && is_payload_data_available( + &v29.payload_data_availability_votes, + E::ptc_size(), + v29.payload_received, + ) { Some(PayloadStatus::Full) } else { Some(PayloadStatus::Empty) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index fd8a3f6da0f..48378a4c958 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.2 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.3 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 11b2df01238..054c65d0169 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -30,7 +30,8 @@ use types::{ Attestation, AttestationRef, AttesterSlashing, AttesterSlashingRef, BeaconBlock, BeaconState, BlobSidecar, BlobsList, BlockImportSource, Checkpoint, DataColumnSidecar, DataColumnSidecarList, DataColumnSubnetId, ExecutionBlockHash, Hash256, IndexedAttestation, - KzgProof, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, + KzgProof, ProposerPreparationData, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, + Uint256, }; // When set to true, cache any states fetched from the db. @@ -72,6 +73,7 @@ pub struct Checks { proposer_boost_root: Option, get_proposer_head: Option, should_override_forkchoice_update: Option, + head_payload_status: Option, } #[derive(Debug, Clone, Deserialize)] @@ -94,7 +96,15 @@ impl From for PayloadStatusV1 { #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step { +pub enum Step< + TBlock, + TBlobs, + TColumns, + TAttestation, + TAttesterSlashing, + TPowBlock, + TExecutionPayload = String, +> { Tick { tick: u64, }, @@ -128,6 +138,10 @@ pub enum Step, valid: bool, }, + OnExecutionPayload { + execution_payload: TExecutionPayload, + valid: bool, + }, } #[derive(Debug, Clone, Deserialize)] @@ -151,6 +165,7 @@ pub struct ForkChoiceTest { Attestation, AttesterSlashing, PowBlock, + SignedExecutionPayloadEnvelope, >, >, } @@ -271,6 +286,17 @@ impl LoadCase for ForkChoiceTest { valid, }) } + Step::OnExecutionPayload { + execution_payload, + valid, + } => { + let envelope = + ssz_decode_file(&path.join(format!("{execution_payload}.ssz_snappy")))?; + Ok(Step::OnExecutionPayload { + execution_payload: envelope, + valid, + }) + } }) .collect::>()?; let anchor_state = ssz_decode_state(&path.join("anchor_state.ssz_snappy"), spec)?; @@ -359,6 +385,7 @@ impl Case for ForkChoiceTest { proposer_boost_root, get_proposer_head, should_override_forkchoice_update: should_override_fcu, + head_payload_status, } = checks.as_ref(); if let Some(expected_head) = head { @@ -405,6 +432,10 @@ impl Case for ForkChoiceTest { if let Some(expected_proposer_head) = get_proposer_head { tester.check_expected_proposer_head(*expected_proposer_head)?; } + + if let Some(expected_status) = head_payload_status { + tester.check_head_payload_status(*expected_status)?; + } } Step::MaybeValidBlockAndColumns { @@ -414,6 +445,13 @@ impl Case for ForkChoiceTest { } => { tester.process_block_and_columns(block.clone(), columns.clone(), *valid)?; } + Step::OnExecutionPayload { + execution_payload, + valid, + } => { + tester + .process_execution_payload(execution_payload.beacon_block_root(), *valid)?; + } } } @@ -931,6 +969,50 @@ impl Tester { check_equal("proposer_head", proposer_head, expected_proposer_head) } + pub fn process_execution_payload(&self, block_root: Hash256, valid: bool) -> Result<(), Error> { + let result = self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_execution_payload(block_root); + + if valid { + result.map_err(|e| { + Error::InternalError(format!( + "on_execution_payload for block root {} failed: {:?}", + block_root, e + )) + })?; + } else if result.is_ok() { + return Err(Error::DidntFail(format!( + "on_execution_payload for block root {} should have failed", + block_root + ))); + } + + Ok(()) + } + + pub fn check_head_payload_status(&self, expected_status: u8) -> Result<(), Error> { + let head = self.find_head()?; + let head_root = head.head_block_root(); + let current_slot = self.harness.chain.slot().map_err(|e| { + Error::InternalError(format!("reading current slot failed with {:?}", e)) + })?; + let fc = self.harness.chain.canonical_head.fork_choice_read_lock(); + let actual_status = fc + .proto_array() + .head_payload_status::(&head_root, current_slot) + .ok_or_else(|| { + Error::InternalError(format!( + "head_payload_status not found for head root {}", + head_root + )) + })?; + check_equal("head_payload_status", actual_status as u8, expected_status) + } + pub fn check_should_override_fcu( &self, expected_should_override_fcu: ShouldOverrideFcu, diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index da3c5533b68..895b8f26567 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -709,15 +709,27 @@ impl Handler for ForkChoiceHandler { return false; } - // No FCU override tests prior to bellatrix. + // No FCU override tests prior to bellatrix, and removed in Gloas. if self.handler_name == "should_override_forkchoice_update" - && !fork_name.bellatrix_enabled() + && (!fork_name.bellatrix_enabled() || fork_name.gloas_enabled()) { return false; } - // Deposit tests exist only after Electra. - if self.handler_name == "deposit_with_reorg" && !fork_name.electra_enabled() { + // Deposit tests exist only for Electra and Fulu (not Gloas). + if self.handler_name == "deposit_with_reorg" + && (!fork_name.electra_enabled() || fork_name.gloas_enabled()) + { + return false; + } + + // Proposer head tests removed in Gloas. + if self.handler_name == "get_proposer_head" && fork_name.gloas_enabled() { + return false; + } + + // on_execution_payload tests exist only for Gloas. + if self.handler_name == "on_execution_payload" && !fork_name.gloas_enabled() { return false; } @@ -727,8 +739,7 @@ impl Handler for ForkChoiceHandler { } fn disabled_forks(&self) -> Vec { - // TODO(gloas): remove once we have Gloas fork choice tests - vec![ForkName::Gloas] + vec![] } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 3893df2ef74..cb4abed90ab 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1032,6 +1032,12 @@ fn fork_choice_deposit_with_reorg() { // There is no mainnet variant for this test. } +#[test] +fn fork_choice_on_execution_payload() { + ForkChoiceHandler::::new("on_execution_payload").run(); + ForkChoiceHandler::::new("on_execution_payload").run(); +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); From ffec1a1f1e3a01259557f8b1284fae2ee3918cef Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Tue, 17 Mar 2026 01:49:40 -0400 Subject: [PATCH 017/127] enable ef tests @brech1 commit --- .../gloas_payload.rs | 52 ++++++++--- consensus/proto_array/src/proto_array.rs | 75 ++++++++-------- .../src/proto_array_fork_choice.rs | 28 +++--- testing/ef_tests/Makefile | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 86 ++++++++++++++++++- testing/ef_tests/src/handler.rs | 23 +++-- testing/ef_tests/tests/tests.rs | 6 ++ 7 files changed, 199 insertions(+), 73 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index e19fb196f26..8dcf538bd42 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -51,6 +51,12 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + ops.push(Operation::AssertParentPayloadStatus { block_root: get_root(1), expected_status: PayloadStatus::Full, @@ -111,6 +117,12 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: Some(get_hash(1)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // One Full and one Empty vote for the same head block: tie probes via runtime tiebreak, // which defaults to Empty unless timely+data-available evidence is set. ops.push(Operation::ProcessPayloadAttestation { @@ -263,6 +275,12 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // Equal branch weights: tiebreak FULL picks branch rooted at 3. ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), @@ -359,6 +377,12 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // Parent prefers Full on equal branch weights (tiebreaker). ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), @@ -521,6 +545,12 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef execution_payload_block_hash: Some(get_hash(4)), }); + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the GLOAS fork choice tree. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(1), + }); + // Step 4: Set tiebreaker to Empty on genesis → Empty branch wins. ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), @@ -619,10 +649,11 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe expected_status: PayloadStatus::Empty, }); - // Before payload arrives: payload_received is false on genesis. + // Per spec `get_forkchoice_store`: genesis starts with payload_received=true + // (anchor block is in `payload_states`). ops.push(Operation::AssertPayloadReceived { block_root: get_root(0), - expected: false, + expected: true, }); // Give one vote to each child so they have equal weight. @@ -637,38 +668,37 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe attestation_slot: Slot::new(1), }); - // Equal weight, no payload received on genesis → tiebreaker uses PTC votes which - // require payload_received. Without it, is_payload_timely returns false → prefers Empty. - // Block 2 (Empty) wins because it matches the Empty preference. + // Equal weight, payload_received=true on genesis → tiebreaker uses + // payload_received (not previous slot, equal payload weights) → prefers Full. + // Block 1 (Full) wins because it matches the Full preference. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], - expected_head: get_root(2), + expected_head: get_root(1), current_slot: Slot::new(100), }); - // Now the execution payload for genesis arrives and is validated. + // ProcessExecutionPayload on genesis is a no-op (already received at init). ops.push(Operation::ProcessExecutionPayload { block_root: get_root(0), }); - // payload_received is now true. ops.push(Operation::AssertPayloadReceived { block_root: get_root(0), expected: true, }); // Set PTC votes on genesis as timely + data available (simulates PTC voting). + // This doesn't change the preference since genesis is not the previous slot + // (slot 0 + 1 != current_slot 100). ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), is_timely: true, is_data_available: true, }); - // Now with payload_received=true and PTC votes exceeding threshold: - // is_payload_timely=true, is_payload_data_available=true → prefers Full. - // Block 1 (Full) wins because it matches the Full preference. + // Still prefers Full via payload_received tiebreaker → Block 1 (Full) wins. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 09538f25eb0..1b6c0c58bc6 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -552,6 +552,13 @@ impl ProtoArray { PayloadStatus::Full }; + // Per spec `get_forkchoice_store`: the anchor (genesis) block has + // its payload state initialized (`payload_states = {anchor_root: ...}`). + // Without `payload_received = true` on genesis, the FULL virtual + // child doesn't exist in the spec's `get_node_children`, making all + // Full concrete children of genesis unreachable in `get_head`. + let is_genesis = parent_index.is_none(); + ProtoNode::V29(ProtoNodeV29 { slot: block.slot, root: block.root, @@ -573,7 +580,7 @@ impl ProtoArray { execution_payload_block_hash, payload_timeliness_votes: BitVector::default(), payload_data_availability_votes: BitVector::default(), - payload_received: false, + payload_received: is_genesis, }) }; @@ -1120,6 +1127,18 @@ impl ProtoArray { ); let no_change = (parent.best_child(), parent.best_descendant()); + // For V29 (GLOAS) parents, the spec's virtual tree model requires choosing + // FULL or EMPTY direction at each node BEFORE considering concrete children. + // Only children whose parent_payload_status matches the preferred direction + // are eligible for best_child. This is PRIMARY, not a tiebreaker. + let child_matches_dir = child_matches_parent_payload_preference( + parent, + child, + current_slot, + E::ptc_size(), + proposer_boost, + ); + let (new_best_child, new_best_descendant) = if let Some(best_child_index) = parent.best_child() { if best_child_index == child_index && !child_leads_to_viable_head { @@ -1143,6 +1162,14 @@ impl ProtoArray { best_finalized_checkpoint, )?; + let best_child_matches_dir = child_matches_parent_payload_preference( + parent, + best_child, + current_slot, + E::ptc_size(), + proposer_boost, + ); + if child_leads_to_viable_head && !best_child_leads_to_viable_head { // The child leads to a viable head, but the current best-child doesn't. change_to_child @@ -1150,49 +1177,27 @@ impl ProtoArray { // The best child leads to a viable head, but the child doesn't. no_change } else if child.weight() > best_child.weight() { - // Weight is the primary ordering criterion. + // Weight is the primary selector after viability. change_to_child } else if child.weight() < best_child.weight() { no_change + } else if child_matches_dir && !best_child_matches_dir { + // Equal weight: direction matching is the tiebreaker. + change_to_child + } else if !child_matches_dir && best_child_matches_dir { + no_change + } else if *child.root() >= *best_child.root() { + // Final tie-breaker: break by root hash. + change_to_child } else { - // Equal weights: for V29 parents, prefer the child whose - // parent_payload_status matches the parent's payload preference - // (full vs empty). This corresponds to the spec's - // `get_payload_status_tiebreaker` ordering in `get_head`. - let child_matches = child_matches_parent_payload_preference( - parent, - child, - current_slot, - E::ptc_size(), - proposer_boost, - ); - let best_child_matches = child_matches_parent_payload_preference( - parent, - best_child, - current_slot, - E::ptc_size(), - proposer_boost, - ); - - if child_matches && !best_child_matches { - // Child extends the preferred payload chain, best_child doesn't. - change_to_child - } else if !child_matches && best_child_matches { - // Best child extends the preferred payload chain, child doesn't. - no_change - } else if *child.root() >= *best_child.root() { - // Final tie-breaker: both match or both don't, break by root. - change_to_child - } else { - no_change - } + no_change } } } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. + // No current best-child: set if child is viable. change_to_child } else { - // There is no current best-child but the child is not viable. + // Child is not viable. no_change }; diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index ce634fbdbeb..4f5fe45c220 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1004,7 +1004,7 @@ impl ProtoArrayForkChoice { pub fn head_payload_status( &self, head_root: &Hash256, - current_slot: Slot, + _current_slot: Slot, ) -> Option { let node = self.get_proto_node(head_root)?; let v29 = node.as_v29().ok()?; @@ -1012,23 +1012,15 @@ impl ProtoArrayForkChoice { Some(PayloadStatus::Full) } else if v29.empty_payload_weight > v29.full_payload_weight { Some(PayloadStatus::Empty) - } else if node.slot() + 1 == current_slot { - // Previous slot: should_extend_payload = is_payload_timely && is_payload_data_available - if is_payload_timely( - &v29.payload_timeliness_votes, - E::ptc_size(), - v29.payload_received, - ) && is_payload_data_available( - &v29.payload_data_availability_votes, - E::ptc_size(), - v29.payload_received, - ) { - Some(PayloadStatus::Full) - } else { - Some(PayloadStatus::Empty) - } - } else if v29.payload_received { - // Not previous slot: Full wins tiebreaker (1 > 0) when payload received. + } else if is_payload_timely( + &v29.payload_timeliness_votes, + E::ptc_size(), + v29.payload_received, + ) && is_payload_data_available( + &v29.payload_data_availability_votes, + E::ptc_size(), + v29.payload_received, + ) { Some(PayloadStatus::Full) } else { Some(PayloadStatus::Empty) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index fd8a3f6da0f..48378a4c958 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.2 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.3 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 11b2df01238..054c65d0169 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -30,7 +30,8 @@ use types::{ Attestation, AttestationRef, AttesterSlashing, AttesterSlashingRef, BeaconBlock, BeaconState, BlobSidecar, BlobsList, BlockImportSource, Checkpoint, DataColumnSidecar, DataColumnSidecarList, DataColumnSubnetId, ExecutionBlockHash, Hash256, IndexedAttestation, - KzgProof, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, + KzgProof, ProposerPreparationData, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, + Uint256, }; // When set to true, cache any states fetched from the db. @@ -72,6 +73,7 @@ pub struct Checks { proposer_boost_root: Option, get_proposer_head: Option, should_override_forkchoice_update: Option, + head_payload_status: Option, } #[derive(Debug, Clone, Deserialize)] @@ -94,7 +96,15 @@ impl From for PayloadStatusV1 { #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step { +pub enum Step< + TBlock, + TBlobs, + TColumns, + TAttestation, + TAttesterSlashing, + TPowBlock, + TExecutionPayload = String, +> { Tick { tick: u64, }, @@ -128,6 +138,10 @@ pub enum Step, valid: bool, }, + OnExecutionPayload { + execution_payload: TExecutionPayload, + valid: bool, + }, } #[derive(Debug, Clone, Deserialize)] @@ -151,6 +165,7 @@ pub struct ForkChoiceTest { Attestation, AttesterSlashing, PowBlock, + SignedExecutionPayloadEnvelope, >, >, } @@ -271,6 +286,17 @@ impl LoadCase for ForkChoiceTest { valid, }) } + Step::OnExecutionPayload { + execution_payload, + valid, + } => { + let envelope = + ssz_decode_file(&path.join(format!("{execution_payload}.ssz_snappy")))?; + Ok(Step::OnExecutionPayload { + execution_payload: envelope, + valid, + }) + } }) .collect::>()?; let anchor_state = ssz_decode_state(&path.join("anchor_state.ssz_snappy"), spec)?; @@ -359,6 +385,7 @@ impl Case for ForkChoiceTest { proposer_boost_root, get_proposer_head, should_override_forkchoice_update: should_override_fcu, + head_payload_status, } = checks.as_ref(); if let Some(expected_head) = head { @@ -405,6 +432,10 @@ impl Case for ForkChoiceTest { if let Some(expected_proposer_head) = get_proposer_head { tester.check_expected_proposer_head(*expected_proposer_head)?; } + + if let Some(expected_status) = head_payload_status { + tester.check_head_payload_status(*expected_status)?; + } } Step::MaybeValidBlockAndColumns { @@ -414,6 +445,13 @@ impl Case for ForkChoiceTest { } => { tester.process_block_and_columns(block.clone(), columns.clone(), *valid)?; } + Step::OnExecutionPayload { + execution_payload, + valid, + } => { + tester + .process_execution_payload(execution_payload.beacon_block_root(), *valid)?; + } } } @@ -931,6 +969,50 @@ impl Tester { check_equal("proposer_head", proposer_head, expected_proposer_head) } + pub fn process_execution_payload(&self, block_root: Hash256, valid: bool) -> Result<(), Error> { + let result = self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_execution_payload(block_root); + + if valid { + result.map_err(|e| { + Error::InternalError(format!( + "on_execution_payload for block root {} failed: {:?}", + block_root, e + )) + })?; + } else if result.is_ok() { + return Err(Error::DidntFail(format!( + "on_execution_payload for block root {} should have failed", + block_root + ))); + } + + Ok(()) + } + + pub fn check_head_payload_status(&self, expected_status: u8) -> Result<(), Error> { + let head = self.find_head()?; + let head_root = head.head_block_root(); + let current_slot = self.harness.chain.slot().map_err(|e| { + Error::InternalError(format!("reading current slot failed with {:?}", e)) + })?; + let fc = self.harness.chain.canonical_head.fork_choice_read_lock(); + let actual_status = fc + .proto_array() + .head_payload_status::(&head_root, current_slot) + .ok_or_else(|| { + Error::InternalError(format!( + "head_payload_status not found for head root {}", + head_root + )) + })?; + check_equal("head_payload_status", actual_status as u8, expected_status) + } + pub fn check_should_override_fcu( &self, expected_should_override_fcu: ShouldOverrideFcu, diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index da3c5533b68..895b8f26567 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -709,15 +709,27 @@ impl Handler for ForkChoiceHandler { return false; } - // No FCU override tests prior to bellatrix. + // No FCU override tests prior to bellatrix, and removed in Gloas. if self.handler_name == "should_override_forkchoice_update" - && !fork_name.bellatrix_enabled() + && (!fork_name.bellatrix_enabled() || fork_name.gloas_enabled()) { return false; } - // Deposit tests exist only after Electra. - if self.handler_name == "deposit_with_reorg" && !fork_name.electra_enabled() { + // Deposit tests exist only for Electra and Fulu (not Gloas). + if self.handler_name == "deposit_with_reorg" + && (!fork_name.electra_enabled() || fork_name.gloas_enabled()) + { + return false; + } + + // Proposer head tests removed in Gloas. + if self.handler_name == "get_proposer_head" && fork_name.gloas_enabled() { + return false; + } + + // on_execution_payload tests exist only for Gloas. + if self.handler_name == "on_execution_payload" && !fork_name.gloas_enabled() { return false; } @@ -727,8 +739,7 @@ impl Handler for ForkChoiceHandler { } fn disabled_forks(&self) -> Vec { - // TODO(gloas): remove once we have Gloas fork choice tests - vec![ForkName::Gloas] + vec![] } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 3893df2ef74..cb4abed90ab 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1032,6 +1032,12 @@ fn fork_choice_deposit_with_reorg() { // There is no mainnet variant for this test. } +#[test] +fn fork_choice_on_execution_payload() { + ForkChoiceHandler::::new("on_execution_payload").run(); + ForkChoiceHandler::::new("on_execution_payload").run(); +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); From ab1305d49063cf5baa6df082e9c54083018c894a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 19 Mar 2026 11:38:05 +1100 Subject: [PATCH 018/127] Propagate weight to parent's full/empty variants --- consensus/proto_array/src/proto_array.rs | 26 ++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index a42a1891cb6..50249430c9d 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -432,14 +432,24 @@ impl ProtoArray { // FULL/EMPTY virtual node based on the voter's `payload_present` // flag, NOT based on which child the vote goes through. // Propagate each child's full/empty deltas independently. - parent_delta.full_delta = parent_delta - .full_delta - .checked_add(node_full_delta) - .ok_or(Error::DeltaOverflow(parent_index))?; - parent_delta.empty_delta = parent_delta - .empty_delta - .checked_add(node_empty_delta) - .ok_or(Error::DeltaOverflow(parent_index))?; + match node.parent_payload_status() { + Ok(PayloadStatus::Full) => { + parent_delta.full_delta = parent_delta + .full_delta + .checked_add(delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + } + Ok(PayloadStatus::Empty) => { + parent_delta.empty_delta = parent_delta + .empty_delta + .checked_add(delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + } + Ok(PayloadStatus::Pending) | Err(..) => { + // Pending is not reachable. Parent payload status must be Full or Empty. + // TODO(gloas): add ParentPayloadStatus = Full | Empty. + } + } } } From cc8466dfa537f90098cb9fc3421f4ed95a777bdc Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Fri, 20 Mar 2026 16:10:43 -0400 Subject: [PATCH 019/127] fixing recursive calls with caching --- beacon_node/beacon_chain/src/beacon_chain.rs | 31 +- consensus/fork_choice/src/fork_choice.rs | 1 + .../src/fork_choice_test_definition.rs | 1 + consensus/proto_array/src/proto_array.rs | 435 ++++++++++++++++-- .../src/proto_array_fork_choice.rs | 54 ++- testing/ef_tests/src/cases/fork_choice.rs | 24 + 6 files changed, 487 insertions(+), 59 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 91dc219258f..96e4ab6c603 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4845,22 +4845,21 @@ impl BeaconChain { // get_attestation_score(parent, parent_payload_status) where parent_payload_status // is determined by the head block's relationship to its parent. let head_weight = info.head_node.weight(); - let parent_weight = - if let (Ok(head_payload_status), Ok(parent_v29)) = ( - info.head_node.parent_payload_status(), - info.parent_node.as_v29(), - ) { - // Post-GLOAS: use the payload-filtered weight matching how the head - // extends from its parent. - match head_payload_status { - proto_array::PayloadStatus::Full => parent_v29.full_payload_weight, - proto_array::PayloadStatus::Empty => parent_v29.empty_payload_weight, - proto_array::PayloadStatus::Pending => info.parent_node.weight(), - } - } else { - // Pre-GLOAS or fork boundary: use total weight. - info.parent_node.weight() - }; + let parent_weight = if let (Ok(head_payload_status), Ok(parent_v29)) = ( + info.head_node.parent_payload_status(), + info.parent_node.as_v29(), + ) { + // Post-GLOAS: use the payload-filtered weight matching how the head + // extends from its parent. + match head_payload_status { + proto_array::PayloadStatus::Full => parent_v29.full_payload_weight, + proto_array::PayloadStatus::Empty => parent_v29.empty_payload_weight, + proto_array::PayloadStatus::Pending => info.parent_node.weight(), + } + } else { + // Pre-GLOAS or fork boundary: use total weight. + info.parent_node.weight() + }; let (head_weak, parent_strong) = if fork_choice_slot == re_org_block_slot { ( diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 30c56c97758..5dc081d6ce4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -992,6 +992,7 @@ where unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint), execution_payload_parent_hash, execution_payload_block_hash, + proposer_index: Some(block.proposer_index()), }, current_slot, self.justified_checkpoint(), diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 7f607c826fe..a89073a7b86 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -274,6 +274,7 @@ impl ForkChoiceTestDefinition { unrealized_finalized_checkpoint: None, execution_payload_parent_hash, execution_payload_block_hash, + proposer_index: None, }; fork_choice .process_block::( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 50249430c9d..422d05097b0 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -147,6 +147,19 @@ pub struct ProtoNode { /// regardless of PTC vote counts. #[superstruct(only(V29), partial_getter(copy))] pub payload_received: bool, + /// The proposer index for this block, used by `should_apply_proposer_boost` + /// to detect equivocations at the parent's slot. + #[superstruct(only(V29), partial_getter(copy))] + pub proposer_index: u64, + /// Best child whose `parent_payload_status == Full`. + /// Maintained alongside `best_child` to avoid O(n) scans during the V29 head walk. + #[superstruct(only(V29), partial_getter(copy))] + #[ssz(with = "four_byte_option_usize")] + pub best_full_child: Option, + /// Best child whose `parent_payload_status == Empty`. + #[superstruct(only(V29), partial_getter(copy))] + #[ssz(with = "four_byte_option_usize")] + pub best_empty_child: Option, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -380,17 +393,12 @@ impl ProtoArray { } // If we find the node matching the current proposer boost root, increase // the delta by the new score amount (unless the block has an invalid execution status). - // - // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance - // - // TODO(gloas): proposer boost should also be subtracted from `empty_delta` per spec, - // since the spec creates a virtual vote with `payload_present=False` for the proposer - // boost, biasing toward Empty for non-current-slot payload decisions. + // For Gloas (V29), `should_apply_proposer_boost` is checked after the loop + // with final weights, and the boost is removed if needed. if let Some(proposer_score_boost) = spec.proposer_score_boost && proposer_boost_root != Hash256::zero() - && proposer_boost_root == node.root() - // Invalid nodes (or their ancestors) should not receive a proposer boost. - && !execution_status_is_invalid + && proposer_boost_root == node.root() + && !execution_status_is_invalid { proposer_score = calculate_committee_fraction::(new_justified_balances, proposer_score_boost) @@ -428,29 +436,87 @@ impl ProtoArray { .checked_add(delta) .ok_or(Error::DeltaOverflow(parent_index))?; - // Per spec's `is_supporting_vote`: a vote supports a parent's - // FULL/EMPTY virtual node based on the voter's `payload_present` - // flag, NOT based on which child the vote goes through. - // Propagate each child's full/empty deltas independently. - match node.parent_payload_status() { - Ok(PayloadStatus::Full) => { + // Route ALL child weight into the parent's FULL or EMPTY bucket + // based on the child's `parent_payload_status` (the ancestor path + // direction). If this child is on the FULL path from the parent, + // all weight supports the parent's FULL virtual node, and vice versa. + if let Ok(child_v29) = node.as_v29() { + if child_v29.parent_payload_status == PayloadStatus::Full { parent_delta.full_delta = parent_delta .full_delta .checked_add(delta) .ok_or(Error::DeltaOverflow(parent_index))?; - } - Ok(PayloadStatus::Empty) => { + } else { parent_delta.empty_delta = parent_delta .empty_delta .checked_add(delta) .ok_or(Error::DeltaOverflow(parent_index))?; } - Ok(PayloadStatus::Pending) | Err(..) => { - // Pending is not reachable. Parent payload status must be Full or Empty. - // TODO(gloas): add ParentPayloadStatus = Full | Empty. + } else { + // V17 child of a V29 parent (fork transition): treat as FULL + // since V17 nodes always have execution payloads inline. + parent_delta.full_delta = parent_delta + .full_delta + .checked_add(delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + } + } + } + + // Gloas: now that all weights are final, check `should_apply_proposer_boost`. + // If the boost should NOT apply, walk from the boosted node to root and subtract + // `proposer_score` from weight and payload weights in a single pass. + // We detect Gloas by checking the boosted node's variant (V29) directly. + if proposer_score > 0 + && let Some(&boost_index) = self.indices.get(&proposer_boost_root) + && self + .nodes + .get(boost_index) + .is_some_and(|n| n.as_v29().is_ok()) + && !self.should_apply_proposer_boost::( + boost_index, + proposer_score, + new_justified_balances, + spec, + )? + { + // Single walk: subtract proposer_score from weight and payload weights. + let mut walk_index = Some(boost_index); + let mut child_payload_status: Option = None; + while let Some(idx) = walk_index { + let node = self + .nodes + .get_mut(idx) + .ok_or(Error::InvalidNodeIndex(idx))?; + + *node.weight_mut() = node + .weight() + .checked_sub(proposer_score) + .ok_or(Error::DeltaOverflow(idx))?; + + // Subtract from the payload bucket that the child-on-path + // contributed to (based on the child's parent_payload_status). + if let Some(child_ps) = child_payload_status + && let Ok(v29) = node.as_v29_mut() + { + if child_ps == PayloadStatus::Full { + v29.full_payload_weight = v29 + .full_payload_weight + .checked_sub(proposer_score) + .ok_or(Error::DeltaOverflow(idx))?; + } else { + v29.empty_payload_weight = v29 + .empty_payload_weight + .checked_sub(proposer_score) + .ok_or(Error::DeltaOverflow(idx))?; } } + + child_payload_status = node.parent_payload_status().ok(); + walk_index = node.parent(); } + + proposer_score = 0; } // After applying all deltas, update the `previous_proposer_boost`. @@ -592,9 +658,31 @@ impl ProtoArray { empty_payload_weight: 0, full_payload_weight: 0, execution_payload_block_hash, - payload_timeliness_votes: BitVector::default(), - payload_data_availability_votes: BitVector::default(), + // Per spec `get_forkchoice_store`: the anchor block's PTC votes are + // initialized to all-True, ensuring `is_payload_timely` and + // `is_payload_data_available` return true for the anchor. + payload_timeliness_votes: if is_genesis { + let mut bv = BitVector::new(); + for i in 0..bv.len() { + let _ = bv.set(i, true); + } + bv + } else { + BitVector::default() + }, + payload_data_availability_votes: if is_genesis { + let mut bv = BitVector::new(); + for i in 0..bv.len() { + let _ = bv.set(i, true); + } + bv + } else { + BitVector::default() + }, payload_received: is_genesis, + proposer_index: block.proposer_index.unwrap_or(0), + best_full_child: None, + best_empty_child: None, }) }; @@ -637,6 +725,66 @@ impl ProtoArray { Ok(()) } + /// Spec's `should_apply_proposer_boost` for Gloas. + /// + /// Returns `true` if the proposer boost should be kept. Returns `false` if the + /// boost should be subtracted (invalidated) because the parent is weak and there + /// are no equivocating blocks at the parent's slot. + fn should_apply_proposer_boost( + &self, + boost_index: usize, + proposer_score: u64, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> Result { + let boost_node = self + .nodes + .get(boost_index) + .ok_or(Error::InvalidNodeIndex(boost_index))?; + + let Some(parent_index) = boost_node.parent() else { + return Ok(true); // Genesis — always apply. + }; + + let parent = self + .nodes + .get(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))?; + + // Parent not from the immediately previous slot — always apply. + if parent.slot() + 1 < boost_node.slot() { + return Ok(true); + } + + // Check if the parent is "weak" (low attestation weight). + // Parent weight currently includes the back-propagated boost, so subtract it. + let reorg_threshold = calculate_committee_fraction::( + justified_balances, + spec.reorg_head_weight_threshold.unwrap_or(20), + ) + .unwrap_or(0); + + let parent_weight_without_boost = parent.weight().saturating_sub(proposer_score); + if parent_weight_without_boost >= reorg_threshold { + return Ok(true); // Parent is not weak — apply. + } + + // Parent is weak. Apply boost unless there's an equivocating block at + // the parent's slot from the same proposer. + let parent_slot = parent.slot(); + let parent_root = parent.root(); + let parent_proposer = parent.proposer_index().unwrap_or(u64::MAX); + + let has_equivocation = self.nodes.iter().any(|n| { + n.as_v29().is_ok() + && n.slot() == parent_slot + && n.root() != parent_root + && n.proposer_index().unwrap_or(u64::MAX - 1) == parent_proposer + }); + + Ok(!has_equivocation) + } + /// Process an execution payload for a Gloas block. /// /// Sets `payload_received` to true, which makes `is_payload_timely` and @@ -965,11 +1113,6 @@ impl ProtoArray { // Since there are no valid descendants of a justified block with an invalid execution // payload, there would be no head to choose from. - // - // Fork choice is effectively broken until a new justified root is set. It might not be - // practically possible to set a new justified root if we are unable to find a new head. - // - // This scenario is *unsupported*. It represents a serious consensus failure. // Execution status tracking only exists on V17 (pre-Gloas) nodes. if let Ok(v17) = justified_node.as_v17() && v17.execution_status.is_invalid() @@ -979,6 +1122,42 @@ impl ProtoArray { }); } + // For V29 (Gloas) justified nodes, use the virtual tree walk directly. + if justified_node.as_v29().is_ok() { + return self.find_head_v29_walk::(justified_index, current_slot); + } + + // Pre-Gloas justified node, but descendants may be V29. + // Walk via best_child chain; switch to V29 walk when we hit one. + if justified_node.best_child().is_some() || justified_node.best_descendant().is_some() { + let mut current_index = justified_index; + loop { + let node = self + .nodes + .get(current_index) + .ok_or(Error::InvalidNodeIndex(current_index))?; + + // Hit a V29 node — switch to virtual tree walk. + if node.as_v29().is_ok() { + return self.find_head_v29_walk::(current_index, current_slot); + } + + // V17 node: follow best_child. + if let Some(bc_idx) = node.best_child() { + current_index = bc_idx; + } else { + break; + } + } + + let head_node = self + .nodes + .get(current_index) + .ok_or(Error::InvalidNodeIndex(current_index))?; + return Ok(head_node.root()); + } + + // Pre-Gloas fallback: use best_descendant directly. let best_descendant_index = justified_node.best_descendant().unwrap_or(justified_index); let best_node = self @@ -1007,6 +1186,81 @@ impl ProtoArray { Ok(best_node.root()) } + /// V29 virtual tree walk for `find_head`. + /// + /// At each V29 node, determine the preferred payload direction (FULL or EMPTY) + /// by comparing weights, then follow the direction-specific best_child pointer. + /// O(depth) — no scanning. + fn find_head_v29_walk( + &self, + start_index: usize, + current_slot: Slot, + ) -> Result { + let ptc_size = E::ptc_size(); + let mut current_index = start_index; + + loop { + let node = self + .nodes + .get(current_index) + .ok_or(Error::InvalidNodeIndex(current_index))?; + + let Ok(v29) = node.as_v29() else { break }; + + let prefer_full = Self::v29_prefer_full(v29, node.slot(), current_slot, ptc_size); + + // O(1) lookup via direction-specific best_child pointers. + let next = if prefer_full { + v29.best_full_child + } else { + v29.best_empty_child + }; + + if let Some(child_index) = next { + current_index = child_index; + } else { + break; + } + } + + let head_node = self + .nodes + .get(current_index) + .ok_or(Error::InvalidNodeIndex(current_index))?; + Ok(head_node.root()) + } + + /// Determine whether a V29 node prefers the FULL or EMPTY direction. + fn v29_prefer_full( + v29: &ProtoNodeV29, + node_slot: Slot, + current_slot: Slot, + ptc_size: usize, + ) -> bool { + if !v29.payload_received { + return false; + } + if node_slot + 1 != current_slot { + // Weight comparison, tiebreak to payload_received. + if v29.full_payload_weight != v29.empty_payload_weight { + v29.full_payload_weight > v29.empty_payload_weight + } else { + v29.payload_received + } + } else { + // Previous slot: PTC tiebreaker only. + is_payload_timely( + &v29.payload_timeliness_votes, + ptc_size, + v29.payload_received, + ) && is_payload_data_available( + &v29.payload_data_availability_votes, + ptc_size, + v29.payload_received, + ) + } + } + /// Update the tree with new finalization information. The tree is only actually pruned if both /// of the two following criteria are met: /// @@ -1072,6 +1326,20 @@ impl ProtoArray { .ok_or(Error::IndexOverflow("best_descendant"))?, ); } + if let Ok(v29) = node.as_v29_mut() { + if let Some(idx) = v29.best_full_child { + v29.best_full_child = Some( + idx.checked_sub(finalized_index) + .ok_or(Error::IndexOverflow("best_full_child"))?, + ); + } + if let Some(idx) = v29.best_empty_child { + v29.best_empty_child = Some( + idx.checked_sub(finalized_index) + .ok_or(Error::IndexOverflow("best_empty_child"))?, + ); + } + } } Ok(()) @@ -1214,6 +1482,16 @@ impl ProtoArray { no_change }; + // Capture child info before mutable borrows. + let child = self + .nodes + .get(child_index) + .ok_or(Error::InvalidNodeIndex(child_index))?; + let child_payload_dir = child.parent_payload_status().ok(); + let child_weight = child.weight(); + let child_root = child.root(); + + // Update general best_child/best_descendant. let parent = self .nodes .get_mut(parent_index) @@ -1222,6 +1500,109 @@ impl ProtoArray { *parent.best_child_mut() = new_best_child; *parent.best_descendant_mut() = new_best_descendant; + // For V29 parents: also maintain direction-specific best_child pointers + // so the V29 head walk can pick the right child in O(1). + if parent.as_v29().is_ok() + && let Some(dir) = child_payload_dir + { + self.update_directional_best_child::( + parent_index, + child_index, + dir, + child_leads_to_viable_head, + child_weight, + child_root, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; + } + + Ok(()) + } + + /// Update `best_full_child` or `best_empty_child` on a V29 parent. + #[allow(clippy::too_many_arguments)] + fn update_directional_best_child( + &mut self, + parent_index: usize, + child_index: usize, + dir: PayloadStatus, + child_viable: bool, + child_weight: u64, + child_root: Hash256, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> Result<(), Error> { + let parent_v29 = self + .nodes + .get(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))? + .as_v29() + .map_err(|_| Error::InvalidNodeIndex(parent_index))?; + + let current_best = match dir { + PayloadStatus::Full => parent_v29.best_full_child, + PayloadStatus::Empty => parent_v29.best_empty_child, + PayloadStatus::Pending => return Ok(()), + }; + + if !child_viable { + // Remove if this child was the directional best but is no longer viable. + if current_best == Some(child_index) { + let parent_v29 = self + .nodes + .get_mut(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))? + .as_v29_mut() + .map_err(|_| Error::InvalidNodeIndex(parent_index))?; + match dir { + PayloadStatus::Full => parent_v29.best_full_child = None, + PayloadStatus::Empty => parent_v29.best_empty_child = None, + PayloadStatus::Pending => {} + } + } + return Ok(()); + } + + let replace = match current_best { + None => true, + Some(best_idx) => { + let best_node = self + .nodes + .get(best_idx) + .ok_or(Error::InvalidNodeIndex(best_idx))?; + let best_viable = self.node_leads_to_viable_head::( + best_node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; + if !best_viable { + true + } else if child_weight != best_node.weight() { + child_weight > best_node.weight() + } else { + *child_root >= *best_node.root() + } + } + }; + + if replace { + let parent_v29 = self + .nodes + .get_mut(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))? + .as_v29_mut() + .map_err(|_| Error::InvalidNodeIndex(parent_index))?; + match dir { + PayloadStatus::Full => parent_v29.best_full_child = Some(child_index), + PayloadStatus::Empty => parent_v29.best_empty_child = Some(child_index), + PayloadStatus::Pending => {} + } + } + Ok(()) } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 4f5fe45c220..64ec5a85498 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -182,6 +182,7 @@ pub struct Block { /// post-Gloas fields pub execution_payload_parent_hash: Option, pub execution_payload_block_hash: Option, + pub proposer_index: Option, } impl Block { @@ -473,6 +474,7 @@ impl ProtoArrayForkChoice { unrealized_finalized_checkpoint: Some(finalized_checkpoint), execution_payload_parent_hash, execution_payload_block_hash, + proposer_index: None, }; proto_array @@ -965,6 +967,7 @@ impl ProtoArrayForkChoice { unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint(), execution_payload_parent_hash: None, execution_payload_block_hash: block.execution_payload_block_hash().ok(), + proposer_index: block.proposer_index().ok(), }) } @@ -1004,26 +1007,42 @@ impl ProtoArrayForkChoice { pub fn head_payload_status( &self, head_root: &Hash256, - _current_slot: Slot, + current_slot: Slot, ) -> Option { let node = self.get_proto_node(head_root)?; let v29 = node.as_v29().ok()?; - if v29.full_payload_weight > v29.empty_payload_weight { - Some(PayloadStatus::Full) - } else if v29.empty_payload_weight > v29.full_payload_weight { - Some(PayloadStatus::Empty) - } else if is_payload_timely( - &v29.payload_timeliness_votes, - E::ptc_size(), - v29.payload_received, - ) && is_payload_data_available( - &v29.payload_data_availability_votes, - E::ptc_size(), - v29.payload_received, - ) { - Some(PayloadStatus::Full) + + // Replicate the spec's virtual tree walk tiebreaker at the head node. + let use_tiebreaker_only = node.slot() + 1 == current_slot; + + if !use_tiebreaker_only { + // Compare weights, then fall back to tiebreaker. + if v29.full_payload_weight > v29.empty_payload_weight { + return Some(PayloadStatus::Full); + } else if v29.empty_payload_weight > v29.full_payload_weight { + return Some(PayloadStatus::Empty); + } + // Equal weights: prefer FULL if payload received. + if v29.payload_received { + Some(PayloadStatus::Full) + } else { + Some(PayloadStatus::Empty) + } } else { - Some(PayloadStatus::Empty) + // Previous slot: should_extend_payload tiebreaker. + if is_payload_timely( + &v29.payload_timeliness_votes, + E::ptc_size(), + v29.payload_received, + ) && is_payload_data_available( + &v29.payload_data_availability_votes, + E::ptc_size(), + v29.payload_received, + ) { + Some(PayloadStatus::Full) + } else { + Some(PayloadStatus::Empty) + } } } @@ -1337,6 +1356,7 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), execution_payload_parent_hash: None, execution_payload_block_hash: None, + proposer_index: None, }, genesis_slot + 1, genesis_checkpoint, @@ -1365,6 +1385,7 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: None, execution_payload_parent_hash: None, execution_payload_block_hash: None, + proposer_index: None, }, genesis_slot + 1, genesis_checkpoint, @@ -1500,6 +1521,7 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), execution_payload_parent_hash: None, execution_payload_block_hash: None, + proposer_index: None, }, Slot::from(block.slot), genesis_checkpoint, diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 054c65d0169..a1c93d65bb1 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -622,6 +622,18 @@ impl Tester { self.apply_invalid_block(&block)?; } + // Per spec test runner: an on_block step implies receiving block's attestations + // and attester slashings. + if success { + for attestation in block.message().body().attestations() { + let att = attestation.clone_as_attestation(); + let _ = self.process_attestation(&att); + } + for attester_slashing in block.message().body().attester_slashings() { + self.process_attester_slashing(attester_slashing); + } + } + Ok(()) } @@ -712,6 +724,18 @@ impl Tester { self.apply_invalid_block(&block)?; } + // Per spec test runner: an on_block step implies receiving block's attestations + // and attester slashings. + if success { + for attestation in block.message().body().attestations() { + let att = attestation.clone_as_attestation(); + let _ = self.process_attestation(&att); + } + for attester_slashing in block.message().body().attester_slashings() { + self.process_attester_slashing(attester_slashing); + } + } + Ok(()) } From ce714710e947b6dd37c4629ff5dbbe806fdc6e45 Mon Sep 17 00:00:00 2001 From: hopinheimer Date: Mon, 23 Mar 2026 14:40:41 -0400 Subject: [PATCH 020/127] passing ef tests ft. @dapplion --- .../gloas_payload.rs | 10 +- consensus/proto_array/src/proto_array.rs | 353 +++++------------- 2 files changed, 99 insertions(+), 264 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 8dcf538bd42..8354b22e474 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -117,12 +117,6 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: Some(get_hash(1)), }); - // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. - ops.push(Operation::ProcessExecutionPayload { - block_root: get_root(1), - }); - // One Full and one Empty vote for the same head block: tie probes via runtime tiebreak, // which defaults to Empty unless timely+data-available evidence is set. ops.push(Operation::ProcessPayloadAttestation { @@ -187,13 +181,15 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { }); // Same-slot attestation to a new head candidate should be Pending (no payload bucket change). + // Root 5 is an Empty child of root_1 (parent_hash doesn't match root_1's block_hash), + // so it's reachable through root_1's Empty direction (root_1 has no payload_received). ops.push(Operation::ProcessBlock { slot: Slot::new(3), root: get_root(5), parent_root: get_root(1), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), - execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_parent_hash: Some(get_hash(101)), execution_payload_block_hash: Some(get_hash(5)), }); ops.push(Operation::ProcessPayloadAttestation { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 422d05097b0..ac5f5be525d 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -151,15 +151,6 @@ pub struct ProtoNode { /// to detect equivocations at the parent's slot. #[superstruct(only(V29), partial_getter(copy))] pub proposer_index: u64, - /// Best child whose `parent_payload_status == Full`. - /// Maintained alongside `best_child` to avoid O(n) scans during the V29 head walk. - #[superstruct(only(V29), partial_getter(copy))] - #[ssz(with = "four_byte_option_usize")] - pub best_full_child: Option, - /// Best child whose `parent_payload_status == Empty`. - #[superstruct(only(V29), partial_getter(copy))] - #[ssz(with = "four_byte_option_usize")] - pub best_empty_child: Option, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -180,9 +171,10 @@ impl Default for ProposerBoost { /// Accumulated score changes for a single proto-array node during a `find_head` pass. /// /// `delta` tracks the ordinary LMD-GHOST balance change applied to the concrete block node. -/// This is the same notion of weight that pre-GLOAS fork choice used. +/// This is the same notion of weight that pre-gloas fork choice used. +/// /// -/// Under GLOAS we also need to track how votes contribute to the parent's virtual payload +/// Under gloas we also need to track how votes contribute to the parent's virtual payload /// branches: /// /// - `empty_delta` is the balance change attributable to votes that support the `Empty` payload @@ -206,7 +198,7 @@ pub struct NodeDelta { impl NodeDelta { /// Classify a vote into the payload bucket it contributes to for `block_slot`. /// - /// Per the GLOAS model: + /// Per the gloas model: /// /// - a same-slot vote is `Pending` /// - a later vote with `payload_present = true` is `Full` @@ -681,8 +673,6 @@ impl ProtoArray { }, payload_received: is_genesis, proposer_index: block.proposer_index.unwrap_or(0), - best_full_child: None, - best_empty_child: None, }) }; @@ -1124,7 +1114,12 @@ impl ProtoArray { // For V29 (Gloas) justified nodes, use the virtual tree walk directly. if justified_node.as_v29().is_ok() { - return self.find_head_v29_walk::(justified_index, current_slot); + return self.find_head_v29_walk::( + justified_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ); } // Pre-Gloas justified node, but descendants may be V29. @@ -1139,7 +1134,12 @@ impl ProtoArray { // Hit a V29 node — switch to virtual tree walk. if node.as_v29().is_ok() { - return self.find_head_v29_walk::(current_index, current_slot); + return self.find_head_v29_walk::( + current_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ); } // V17 node: follow best_child. @@ -1189,12 +1189,15 @@ impl ProtoArray { /// V29 virtual tree walk for `find_head`. /// /// At each V29 node, determine the preferred payload direction (FULL or EMPTY) - /// by comparing weights, then follow the direction-specific best_child pointer. - /// O(depth) — no scanning. + /// by comparing weights. If `best_child` matches the preferred direction, follow + /// it directly. Otherwise, scan all nodes to find the best child matching + /// the preferred direction. fn find_head_v29_walk( &self, start_index: usize, current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, ) -> Result { let ptc_size = E::ptc_size(); let mut current_index = start_index; @@ -1208,15 +1211,38 @@ impl ProtoArray { let Ok(v29) = node.as_v29() else { break }; let prefer_full = Self::v29_prefer_full(v29, node.slot(), current_slot, ptc_size); + let preferred_status = if prefer_full { + PayloadStatus::Full + } else { + PayloadStatus::Empty + }; - // O(1) lookup via direction-specific best_child pointers. - let next = if prefer_full { - v29.best_full_child + // Fast path: check if best_child already matches the preferred direction. + let next_index = if let Some(best_child_index) = node.best_child() { + let best_child_node = self + .nodes + .get(best_child_index) + .ok_or(Error::InvalidNodeIndex(best_child_index))?; + if best_child_node + .as_v29() + .is_ok_and(|v| v.parent_payload_status == preferred_status) + { + Some(best_child_index) + } else { + // best_child is on the wrong direction. Scan for the best matching child. + self.find_best_child_with_status::( + current_index, + preferred_status, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )? + } } else { - v29.best_empty_child + None }; - if let Some(child_index) = next { + if let Some(child_index) = next_index { current_index = child_index; } else { break; @@ -1230,6 +1256,53 @@ impl ProtoArray { Ok(head_node.root()) } + /// Find the best viable child of `parent_index` whose `parent_payload_status` matches + /// `target_status`. Returns `None` if no matching viable child exists. + fn find_best_child_with_status( + &self, + parent_index: usize, + target_status: PayloadStatus, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> Result, Error> { + let mut best: Option<(usize, u64, Hash256)> = None; + for (node_index, node) in self.nodes.iter().enumerate() { + if node.parent() != Some(parent_index) { + continue; + } + if !node + .as_v29() + .is_ok_and(|v| v.parent_payload_status == target_status) + { + continue; + } + if !self.node_leads_to_viable_head::( + node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )? { + continue; + } + + let child_weight = node.weight(); + let child_root = node.root(); + let replace = if let Some((_, best_weight, best_root)) = best { + child_weight > best_weight + || (child_weight == best_weight && child_root >= best_root) + } else { + true + }; + + if replace { + best = Some((node_index, child_weight, child_root)); + } + } + + Ok(best.map(|(index, _, _)| index)) + } + /// Determine whether a V29 node prefers the FULL or EMPTY direction. fn v29_prefer_full( v29: &ProtoNodeV29, @@ -1326,20 +1399,6 @@ impl ProtoArray { .ok_or(Error::IndexOverflow("best_descendant"))?, ); } - if let Ok(v29) = node.as_v29_mut() { - if let Some(idx) = v29.best_full_child { - v29.best_full_child = Some( - idx.checked_sub(finalized_index) - .ok_or(Error::IndexOverflow("best_full_child"))?, - ); - } - if let Some(idx) = v29.best_empty_child { - v29.best_empty_child = Some( - idx.checked_sub(finalized_index) - .ok_or(Error::IndexOverflow("best_empty_child"))?, - ); - } - } } Ok(()) @@ -1382,26 +1441,8 @@ impl ProtoArray { best_finalized_checkpoint, )?; - // Per spec `should_extend_payload`: if the proposer-boosted block is a child of - // this parent and extends Empty, force Empty preference regardless of - // weights/tiebreaker. - let proposer_boost_root = self.previous_proposer_boost.root; - let proposer_boost = !proposer_boost_root.is_zero() - && self - .indices - .get(&proposer_boost_root) - .and_then(|&idx| self.nodes.get(idx)) - .is_some_and(|boost_node| { - boost_node.parent() == Some(parent_index) - && boost_node - .parent_payload_status() - .is_ok_and(|s| s != PayloadStatus::Full) - }); - // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. - // - // I use the aliases to assist readability. let change_to_none = (None, None); let change_to_child = ( Some(child_index), @@ -1409,17 +1450,6 @@ impl ProtoArray { ); let no_change = (parent.best_child(), parent.best_descendant()); - // For V29 (GLOAS) parents, the spec's virtual tree model determines a preferred - // FULL or EMPTY direction at each node. Weight is the primary selector among - // viable children; direction matching is the tiebreaker when weights are equal. - let child_matches_dir = child_matches_parent_payload_preference( - parent, - child, - current_slot, - E::ptc_size(), - proposer_boost, - ); - let (new_best_child, new_best_descendant) = if let Some(best_child_index) = parent.best_child() { if best_child_index == child_index && !child_leads_to_viable_head { @@ -1443,55 +1473,26 @@ impl ProtoArray { best_finalized_checkpoint, )?; - let best_child_matches_dir = child_matches_parent_payload_preference( - parent, - best_child, - current_slot, - E::ptc_size(), - proposer_boost, - ); - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. change_to_child } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. no_change } else if child.weight() > best_child.weight() { - // Weight is the primary selector after viability. change_to_child } else if child.weight() < best_child.weight() { no_change - } else if child_matches_dir && !best_child_matches_dir { - // Equal weight: direction matching is the tiebreaker. - change_to_child - } else if !child_matches_dir && best_child_matches_dir { - no_change } else if *child.root() >= *best_child.root() { - // Final tie-breaker: break by root hash. change_to_child } else { no_change } } } else if child_leads_to_viable_head { - // No current best-child: set if child is viable. change_to_child } else { - // Child is not viable. no_change }; - // Capture child info before mutable borrows. - let child = self - .nodes - .get(child_index) - .ok_or(Error::InvalidNodeIndex(child_index))?; - let child_payload_dir = child.parent_payload_status().ok(); - let child_weight = child.weight(); - let child_root = child.root(); - - // Update general best_child/best_descendant. let parent = self .nodes .get_mut(parent_index) @@ -1500,109 +1501,6 @@ impl ProtoArray { *parent.best_child_mut() = new_best_child; *parent.best_descendant_mut() = new_best_descendant; - // For V29 parents: also maintain direction-specific best_child pointers - // so the V29 head walk can pick the right child in O(1). - if parent.as_v29().is_ok() - && let Some(dir) = child_payload_dir - { - self.update_directional_best_child::( - parent_index, - child_index, - dir, - child_leads_to_viable_head, - child_weight, - child_root, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - } - - Ok(()) - } - - /// Update `best_full_child` or `best_empty_child` on a V29 parent. - #[allow(clippy::too_many_arguments)] - fn update_directional_best_child( - &mut self, - parent_index: usize, - child_index: usize, - dir: PayloadStatus, - child_viable: bool, - child_weight: u64, - child_root: Hash256, - current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - ) -> Result<(), Error> { - let parent_v29 = self - .nodes - .get(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))? - .as_v29() - .map_err(|_| Error::InvalidNodeIndex(parent_index))?; - - let current_best = match dir { - PayloadStatus::Full => parent_v29.best_full_child, - PayloadStatus::Empty => parent_v29.best_empty_child, - PayloadStatus::Pending => return Ok(()), - }; - - if !child_viable { - // Remove if this child was the directional best but is no longer viable. - if current_best == Some(child_index) { - let parent_v29 = self - .nodes - .get_mut(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))? - .as_v29_mut() - .map_err(|_| Error::InvalidNodeIndex(parent_index))?; - match dir { - PayloadStatus::Full => parent_v29.best_full_child = None, - PayloadStatus::Empty => parent_v29.best_empty_child = None, - PayloadStatus::Pending => {} - } - } - return Ok(()); - } - - let replace = match current_best { - None => true, - Some(best_idx) => { - let best_node = self - .nodes - .get(best_idx) - .ok_or(Error::InvalidNodeIndex(best_idx))?; - let best_viable = self.node_leads_to_viable_head::( - best_node, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - if !best_viable { - true - } else if child_weight != best_node.weight() { - child_weight > best_node.weight() - } else { - *child_root >= *best_node.root() - } - } - }; - - if replace { - let parent_v29 = self - .nodes - .get_mut(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))? - .as_v29_mut() - .map_err(|_| Error::InvalidNodeIndex(parent_index))?; - match dir { - PayloadStatus::Full => parent_v29.best_full_child = Some(child_index), - PayloadStatus::Empty => parent_v29.best_empty_child = Some(child_index), - PayloadStatus::Pending => {} - } - } - Ok(()) } @@ -1842,65 +1740,6 @@ impl ProtoArray { } } -/// For V29 parents, returns `true` if the child's `parent_payload_status` matches the parent's -/// preferred payload status per spec `should_extend_payload`. -/// -/// If `proposer_boost` is set, the parent unconditionally prefers Empty (the proposer-boosted -/// block is a child of this parent and extends Empty). Otherwise, when full and empty weights -/// are unequal the higher weight wins; when equal, the tiebreaker uses PTC votes. -/// -/// For V17 parents (or mixed), always returns `true` (no payload preference). -fn child_matches_parent_payload_preference( - parent: &ProtoNode, - child: &ProtoNode, - current_slot: Slot, - ptc_size: usize, - proposer_boost: bool, -) -> bool { - let (Ok(parent_v29), Ok(child_v29)) = (parent.as_v29(), child.as_v29()) else { - return true; - }; - - // Per spec `should_extend_payload`: if the proposer-boosted block extends Empty from - // this parent, unconditionally prefer Empty. - if proposer_boost { - return child_v29.parent_payload_status == PayloadStatus::Empty; - } - - // Per spec `get_weight`: FULL/EMPTY virtual nodes at `current_slot - 1` have weight 0. - // The PTC is still voting, so payload preference is determined solely by the tiebreaker. - let use_tiebreaker_only = parent.slot() + 1 == current_slot; - let prefers_full = if !use_tiebreaker_only - && parent_v29.full_payload_weight > parent_v29.empty_payload_weight - { - true - } else if !use_tiebreaker_only - && parent_v29.empty_payload_weight > parent_v29.full_payload_weight - { - false - } else if use_tiebreaker_only { - // Previous slot: should_extend_payload = is_payload_timely && is_payload_data_available. - is_payload_timely( - &parent_v29.payload_timeliness_votes, - ptc_size, - parent_v29.payload_received, - ) && is_payload_data_available( - &parent_v29.payload_data_availability_votes, - ptc_size, - parent_v29.payload_received, - ) - } else { - // Not previous slot: should_extend_payload = true. - // Full wins the tiebreaker (1 > 0) when the payload has been received. - parent_v29.payload_received - }; - if prefers_full { - child_v29.parent_payload_status == PayloadStatus::Full - } else { - child_v29.parent_payload_status == PayloadStatus::Empty - } -} - /// Derive `is_payload_timely` from the timeliness vote bitfield. /// /// Per spec: returns false if the payload has not been received locally From 52e397f8c17ed8bc1d71ed46013318d1851d4ba8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Mar 2026 15:58:54 +1100 Subject: [PATCH 021/127] Refactoring fork choice to look more like the spec --- consensus/proto_array/src/proto_array.rs | 233 +++++++++--------- .../src/proto_array_fork_choice.rs | 7 + 2 files changed, 130 insertions(+), 110 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index ac5f5be525d..5ab879fac41 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -153,6 +153,23 @@ pub struct ProtoNode { pub proposer_index: u64, } +impl ProtoNode { + /// Generic version of spec's `parent_payload_status` that works for pre-Gloas nodes by + /// considering their parents Empty. + fn parent_payload_status(&self) -> PayloadStatus { + self.parent_payload_status().unwrap_or(PayloadStatus::Empty) + } + + fn attestation_score(&self, payload_status: PayloadStatus) -> u64 { + match payload_status { + // TODO(gloas): rename weight and remove proposer boost from it? + PayloadStatus::Pending => self.weight, + PayloadStatus::Empty => self.empty_payload_weight().unwrap_or(0), + PayloadStatus::Full => self.full_payload_weight().unwrap_or(0), + } + } +} + #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] pub struct ProposerBoost { pub root: Hash256, @@ -715,6 +732,19 @@ impl ProtoArray { Ok(()) } + fn is_head_weak(&self, head_node: &ProtoNode, justified_balances: &JustifiedBalances) -> bool { + let reorg_threshold = calculate_committee_fraction::( + justified_balances, + spec.reorg_head_weight_threshold.unwrap_or(20), + ) + .unwrap_or(0); + + let head_weight = head_node.attestation_score(); + + // TODO(gloas): missing equivocating weight from spec + head_weight < reorg_threshold + } + /// Spec's `should_apply_proposer_boost` for Gloas. /// /// Returns `true` if the proposer boost should be kept. Returns `false` if the @@ -746,14 +776,6 @@ impl ProtoArray { return Ok(true); } - // Check if the parent is "weak" (low attestation weight). - // Parent weight currently includes the back-propagated boost, so subtract it. - let reorg_threshold = calculate_committee_fraction::( - justified_balances, - spec.reorg_head_weight_threshold.unwrap_or(20), - ) - .unwrap_or(0); - let parent_weight_without_boost = parent.weight().saturating_sub(proposer_score); if parent_weight_without_boost >= reorg_threshold { return Ok(true); // Parent is not weak — apply. @@ -1089,7 +1111,7 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - ) -> Result { + ) -> Result<(Hash256, PayloadStatus), Error> { let justified_index = self .indices .get(justified_root) @@ -1112,58 +1134,15 @@ impl ProtoArray { }); } - // For V29 (Gloas) justified nodes, use the virtual tree walk directly. - if justified_node.as_v29().is_ok() { - return self.find_head_v29_walk::( - justified_index, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - ); - } - - // Pre-Gloas justified node, but descendants may be V29. - // Walk via best_child chain; switch to V29 walk when we hit one. - if justified_node.best_child().is_some() || justified_node.best_descendant().is_some() { - let mut current_index = justified_index; - loop { - let node = self - .nodes - .get(current_index) - .ok_or(Error::InvalidNodeIndex(current_index))?; - - // Hit a V29 node — switch to virtual tree walk. - if node.as_v29().is_ok() { - return self.find_head_v29_walk::( - current_index, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - ); - } - - // V17 node: follow best_child. - if let Some(bc_idx) = node.best_child() { - current_index = bc_idx; - } else { - break; - } - } - - let head_node = self - .nodes - .get(current_index) - .ok_or(Error::InvalidNodeIndex(current_index))?; - return Ok(head_node.root()); - } - - // Pre-Gloas fallback: use best_descendant directly. - let best_descendant_index = justified_node.best_descendant().unwrap_or(justified_index); - - let best_node = self - .nodes - .get(best_descendant_index) - .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; + // In the post-Gloas worlld, always use a virtual tree walk. + // + // Best child/best descendant is dead. + let best_node = self.find_head_walk::( + justified_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ); // Perform a sanity check that the node is indeed valid to be the head. if !self.node_is_viable_for_head::( @@ -1186,67 +1165,30 @@ impl ProtoArray { Ok(best_node.root()) } - /// V29 virtual tree walk for `find_head`. + /// Virtual tree walk for `find_head`. /// - /// At each V29 node, determine the preferred payload direction (FULL or EMPTY) - /// by comparing weights. If `best_child` matches the preferred direction, follow - /// it directly. Otherwise, scan all nodes to find the best child matching + /// At each node, determine the preferred payload direction (FULL or EMPTY) + /// by comparing weights. Scan all nodes to find the best child matching /// the preferred direction. - fn find_head_v29_walk( + fn find_head_walk( &self, start_index: usize, current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - ) -> Result { + ) -> Result { let ptc_size = E::ptc_size(); let mut current_index = start_index; - loop { - let node = self - .nodes - .get(current_index) - .ok_or(Error::InvalidNodeIndex(current_index))?; - - let Ok(v29) = node.as_v29() else { break }; - - let prefer_full = Self::v29_prefer_full(v29, node.slot(), current_slot, ptc_size); - let preferred_status = if prefer_full { - PayloadStatus::Full - } else { - PayloadStatus::Empty - }; + let mut head = ForkChoiceNode { + root: best_justified_checkpoint.root, + payload_status: PayloadStatus::Pending, + }; - // Fast path: check if best_child already matches the preferred direction. - let next_index = if let Some(best_child_index) = node.best_child() { - let best_child_node = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidNodeIndex(best_child_index))?; - if best_child_node - .as_v29() - .is_ok_and(|v| v.parent_payload_status == preferred_status) - { - Some(best_child_index) - } else { - // best_child is on the wrong direction. Scan for the best matching child. - self.find_best_child_with_status::( - current_index, - preferred_status, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )? - } - } else { - None - }; + loop { + let children = self.get_node_children(&head)?; - if let Some(child_index) = next_index { - current_index = child_index; - } else { - break; - } + head = children.max_by_key(|child| (child.weight)) } let head_node = self @@ -1256,6 +1198,77 @@ impl ProtoArray { Ok(head_node.root()) } + fn get_weight( + &self, + fc_node: &ForkChoiceNode, + proto_node: &ProtoNode, + current_slot: Slot, + ) -> u64 { + if fc_node.payload_status == PayloadStatus::Pending + || proto_node.slot.saturating_add(1) != current_slot + { + let attestation_score = proto_node.attestation_score(); + + // TODO(gloas): implement proposer boost + // + } + } + + fn get_node_children( + node: &IndexedForkChoiceNode, + ) -> Result, Error> { + if node.payload_status == PayloadStatus::Pending { + let proto_node = self + .nodes + .get(node.node_index) + .ok_or(Error::InvalidNodeIndex(node.node_index))? + .clone(); + let mut children = vec![( + IndexedForkChoiceNode { + root: node.root, + node_index: usize, + payload_status: PayloadStatus::Empty, + }, + proto_node.clone(), + )]; + if proto_node.payload_exists().is_ok_and(|exists| exists) { + children.push(( + IndexedForkChoiceNode { + root: node.root, + node_index: usize, + payload_status: PayloadStatus::Full, + }, + proto_node, + )); + } + Ok(children) + } else { + let children = self + .nodes + .get(node.node_index..) + .ok_or(Error::InvalidNodeIndex(node.node_index))? + .iter() + .enumerate() + .filter(|(_, child_node)| { + child_node.parent_root == node.root + && node.payload_status == child_node.parent_payload_status() + }) + .map(|i, child_node| { + let child_index = node.node_index.saturating_add(i); + ( + IndexedForkChoiceNode { + root: child_node.root, + node_index: child_index, + payload_status: PayloadStatus::Pending, + }, + child_node.clone(), + ) + }) + .collect(); + Ok(children) + } + } + /// Find the best viable child of `parent_index` whose `parent_payload_status` matches /// `target_status`. Returns `None` if no matching viable child exists. fn find_best_child_with_status( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 64ec5a85498..061e8a72873 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -68,6 +68,13 @@ pub enum PayloadStatus { Pending = 2, } +/// Spec's `ForkChoiceNode` augmented with ProtoNode index. +pub struct IndexedForkChoiceNode { + root: Hash256, + node_index: usize, + payload_status: PayloadStatus, +} + impl ExecutionStatus { pub fn is_execution_enabled(&self) -> bool { !matches!(self, ExecutionStatus::Irrelevant(_)) From 81b96a59d2fec84a3964f2cc070912b41cfebf7a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Mar 2026 13:03:34 +1100 Subject: [PATCH 022/127] More spec compliance --- consensus/proto_array/src/proto_array.rs | 264 +++++++++++++++-------- 1 file changed, 172 insertions(+), 92 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 5ab879fac41..3be554b3473 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -131,11 +131,19 @@ pub struct ProtoNode { pub full_payload_weight: u64, #[superstruct(only(V29), partial_getter(copy))] pub execution_payload_block_hash: ExecutionBlockHash, + /// Equivalent to spec's `block_timeliness[root][ATTESTATION_TIMELINESS_INDEX]`. + #[superstruct(only(V29), partial_getter(copy))] + pub block_timeliness_attestation_threshold: bool, + /// Equivalent to spec's `block_timeliness[root][PTC_TIMELINESS_INDEX]`. + #[superstruct(only(V29), partial_getter(copy))] + pub block_timeliness_ptc_threshold: bool, + /// Equivalent to spec's `store.payload_timeliness_vote[root]`. /// PTC timeliness vote bitfield, indexed by PTC committee position. /// Bit i set means PTC member i voted `payload_present = true`. /// Tiebreak derived as: `num_set_bits() > ptc_size / 2`. #[superstruct(only(V29))] pub payload_timeliness_votes: BitVector, + /// Equivalent to spec's `store.payload_data_availability_vote[root]`. /// PTC data availability vote bitfield, indexed by PTC committee position. /// Bit i set means PTC member i voted `blob_data_available = true`. /// Tiebreak derived as: `num_set_bits() > ptc_size / 2`. @@ -160,6 +168,10 @@ impl ProtoNode { self.parent_payload_status().unwrap_or(PayloadStatus::Empty) } + fn is_parent_node_full(&self) -> bool { + self.parent_parent_payload_status() == PayloadStatus::Full + } + fn attestation_score(&self, payload_status: PayloadStatus) -> u64 { match payload_status { // TODO(gloas): rename weight and remove proposer boost from it? @@ -168,6 +180,35 @@ impl ProtoNode { PayloadStatus::Full => self.full_payload_weight().unwrap_or(0), } } + + pub fn is_payload_timely(&self) -> bool { + let Ok(node) = self.as_v29() else { + return false; + }; + + // If the payload is not locally available, the payload + // is not considered available regardless of the PTC vote + if !node.payload_received { + return false; + } + + node.payload_timeliness_votes.num_set_bits() > E::ptc_size() / 2 + } + + pub fn is_payload_data_available(&self) -> bool { + let Ok(node) = self.as_v29() else { + return false; + }; + + // If the payload is not locally available, the payload + // is not considered available regardless of the PTC vote + if !node.payload_received { + return false; + } + + // TODO(gloas): add function on EthSpec for DATA_AVAILABILITY_TIMELY_THRESHOLD + node.payload_data_availability_votes.num_set_bits() > E::ptc_size() / 2 + } } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -742,6 +783,8 @@ impl ProtoArray { let head_weight = head_node.attestation_score(); // TODO(gloas): missing equivocating weight from spec + // idea: add equivocating_attestation_score on the proto node that is updated whenever + // an equivocation is processed. head_weight < reorg_threshold } @@ -752,46 +795,48 @@ impl ProtoArray { /// are no equivocating blocks at the parent's slot. fn should_apply_proposer_boost( &self, - boost_index: usize, - proposer_score: u64, - justified_balances: &JustifiedBalances, - spec: &ChainSpec, + proposer_boost_root: Hash256, ) -> Result { - let boost_node = self - .nodes - .get(boost_index) - .ok_or(Error::InvalidNodeIndex(boost_index))?; + if proposer_boost_root.is_zero() { + return Ok(false); + } - let Some(parent_index) = boost_node.parent() else { - return Ok(true); // Genesis — always apply. - }; + let block_index = self.indices.get(&proposer_boost_root)?; + let block = self.nodes.get(block_index)?; + let parent_root = block.parent_root; + let parent_index = self.indices.get(&parent_root)?; + let parent = self.nodes.get(parent_index)?; + let slot = block.slot; - let parent = self - .nodes - .get(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))?; - - // Parent not from the immediately previous slot — always apply. - if parent.slot() + 1 < boost_node.slot() { + // Apply proposer boost if `parent` is not from the previous slot + if parent.slot.saturating_add(1) < slot { return Ok(true); } - let parent_weight_without_boost = parent.weight().saturating_sub(proposer_score); - if parent_weight_without_boost >= reorg_threshold { - return Ok(true); // Parent is not weak — apply. + // Apply proposer boost if `parent` is not weak + if !self.is_head_weak(&parent, justified_balances) { + return Ok(true); } // Parent is weak. Apply boost unless there's an equivocating block at // the parent's slot from the same proposer. let parent_slot = parent.slot(); let parent_root = parent.root(); - let parent_proposer = parent.proposer_index().unwrap_or(u64::MAX); + // TODO(gloas): handle proposer index for pre-Gloas blocks? + let parent_proposer = parent.proposer_index(); - let has_equivocation = self.nodes.iter().any(|n| { - n.as_v29().is_ok() - && n.slot() == parent_slot - && n.root() != parent_root - && n.proposer_index().unwrap_or(u64::MAX - 1) == parent_proposer + let has_equivocation = self.nodes.iter().any(|node| { + if let Ok(timeliness) = node.block_timeliness_ptc_threshold + && let Ok(proposer_index) = node.proposer_index() + { + timeliness + && Ok(proposer_index) == parent_proposer + && node.slot() == parent_slot + && node.root() != parent_root + } else { + // Pre-Gloas. + false + } }); Ok(!has_equivocation) @@ -1176,41 +1221,79 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, + proposer_boost_root: Hash256, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, ) -> Result { - let ptc_size = E::ptc_size(); - let mut current_index = start_index; - - let mut head = ForkChoiceNode { + let mut head = IndexedForkChoiceNode { root: best_justified_checkpoint.root, + proto_node_index: start_index, payload_status: PayloadStatus::Pending, }; loop { let children = self.get_node_children(&head)?; - head = children.max_by_key(|child| (child.weight)) + let scores = children + .into_iter() + .map(|(child_fc_node, child_proto_node)| { + let weight = self.get_weight( + &child_fc_node, + &child_proto_node, + proposer_boost_root, + current_slot, + justified_balances, + spec, + )?; + let payload_status_tiebreaker = self.get_payload_status_tiebreaker( + &child_fc_node, + &child_proto_node, + current_slot, + proposer_boost_root, + )?; + Ok((child_fc_node, weight, payload_status_tiebreaker)) + }) + .collect::, Error>>()?; + // TODO(gloas): proper error + head = scores + .max_by_key(|(child_fc_node, weight, payload_status_tiebreaker)| { + (weight, child_fc_node.root, payload_status_tiebreaker) + }) + .map(|(child_fc_node, _, _)| child_fc_node) + .unwrap(); } - let head_node = self - .nodes - .get(current_index) - .ok_or(Error::InvalidNodeIndex(current_index))?; - Ok(head_node.root()) + Ok(head) } - fn get_weight( + fn get_weight( &self, fc_node: &ForkChoiceNode, proto_node: &ProtoNode, + proposer_boost_root: Hash256, current_slot: Slot, - ) -> u64 { + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> Result { if fc_node.payload_status == PayloadStatus::Pending || proto_node.slot.saturating_add(1) != current_slot { let attestation_score = proto_node.attestation_score(); - // TODO(gloas): implement proposer boost - // + if !self.should_apply_proposer_boost(&proposer_boost_root)? { + return attestation_score; + } + + // TODO(gloas): I don't think `is_supporting_vote` is necessary here, confirm by + // checking spec tests or with spec authors. + let proposer_score = if proto_node.root == proposer_boost_root { + get_proposer_score::(justified_balances, spec)? + } else { + 0 + }; + Ok(attestation_score.saturating_add(proposer_score)) + } else { + Ok(0) } } @@ -1316,37 +1399,48 @@ impl ProtoArray { Ok(best.map(|(index, _, _)| index)) } - /// Determine whether a V29 node prefers the FULL or EMPTY direction. - fn v29_prefer_full( - v29: &ProtoNodeV29, - node_slot: Slot, + fn get_payload_status_tiebreaker( + &self, + fc_node: &IndexedForkChoiceNode, + proto_node: &ProtoNode, current_slot: Slot, - ptc_size: usize, - ) -> bool { - if !v29.payload_received { - return false; - } - if node_slot + 1 != current_slot { - // Weight comparison, tiebreak to payload_received. - if v29.full_payload_weight != v29.empty_payload_weight { - v29.full_payload_weight > v29.empty_payload_weight + proposer_boost_root: Hash256, + ) -> u8 { + if fc_node.payload_status == PayloadStatus::Pending + || proto_node.slot.saturating_add(1) != current_slot + { + fc.payload_status as u8 + } else { + if fc_node.payload_status == PayloadStatus::Empty { + 1 + } else if self.should_extend_payload(fc_node, proto_node, proposer_boost_root) { + 2 } else { - v29.payload_received + 0 } - } else { - // Previous slot: PTC tiebreaker only. - is_payload_timely( - &v29.payload_timeliness_votes, - ptc_size, - v29.payload_received, - ) && is_payload_data_available( - &v29.payload_data_availability_votes, - ptc_size, - v29.payload_received, - ) } } + fn should_extend_payload( + &self, + fc_node: &IndexedForkChoiceNode, + proto_node: &ProtoNode, + proposer_boost_root: Hash256, + ) -> Result { + if proposer_boost_root.is_zero() { + return Ok(false); + } + + let proposer_boost_node_index = self.indices.get(&proposer_boost_root)?; + let proposer_boost_node = self.nodes.get(&proposer_boost_node_index)?; + + Ok( + (proto_node.is_payload_timely::() && proto_node.is_payload_data_available::()) + || proposer_boost_node.parent_root != fc_node.root + || proposer_boost_node.is_parent_node_full(), + ) + } + /// Update the tree with new finalization information. The tree is only actually pruned if both /// of the two following criteria are met: /// @@ -1753,32 +1847,6 @@ impl ProtoArray { } } -/// Derive `is_payload_timely` from the timeliness vote bitfield. -/// -/// Per spec: returns false if the payload has not been received locally -/// (`payload_received == false`, i.e. `root not in store.payload_states`), -/// regardless of PTC votes. Both local receipt and PTC threshold are required. -pub fn is_payload_timely( - timeliness_votes: &BitVector, - ptc_size: usize, - payload_received: bool, -) -> bool { - payload_received && timeliness_votes.num_set_bits() > ptc_size / 2 -} - -/// Derive `is_payload_data_available` from the data availability vote bitfield. -/// -/// Per spec: returns false if the payload has not been received locally -/// (`payload_received == false`, i.e. `root not in store.payload_states`), -/// regardless of PTC votes. Both local receipt and PTC threshold are required. -pub fn is_payload_data_available( - availability_votes: &BitVector, - ptc_size: usize, - payload_received: bool, -) -> bool { - payload_received && availability_votes.num_set_bits() > ptc_size / 2 -} - /// A helper method to calculate the proposer boost based on the given `justified_balances`. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance @@ -1794,6 +1862,18 @@ pub fn calculate_committee_fraction( .checked_div(100) } +pub fn get_proposer_score( + justified_balances: &JustifiedBalances, + spec: &ChainSpec, +) -> Result { + let Some(proposer_score_boost) = spec.proposer_score_boost else { + // TODO(gloas): make proposer boost non-optional in spec + return Ok(0); + }; + calculate_committee_fraction::(justified_balances, proposer_score_boost) + .ok_or(Error::ProposerBoostOverflow(node_index)) +} + /// Apply a signed delta to an unsigned weight, returning an error on overflow. fn apply_delta(weight: u64, delta: i64, index: usize) -> Result { if delta < 0 { From c84160300320325bdeedfe5d51082b43c9766c1b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Mar 2026 13:43:30 +1100 Subject: [PATCH 023/127] Fix compilation issues --- consensus/proto_array/src/proto_array.rs | 248 ++++++++++-------- .../src/proto_array_fork_choice.rs | 23 +- 2 files changed, 144 insertions(+), 127 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3be554b3473..25a1f7e7f91 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,4 +1,5 @@ use crate::error::InvalidBestNodeInfo; +use crate::proto_array_fork_choice::IndexedForkChoiceNode; use crate::{Block, ExecutionStatus, JustifiedBalances, PayloadStatus, error::Error}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; @@ -164,18 +165,18 @@ pub struct ProtoNode { impl ProtoNode { /// Generic version of spec's `parent_payload_status` that works for pre-Gloas nodes by /// considering their parents Empty. - fn parent_payload_status(&self) -> PayloadStatus { + fn get_parent_payload_status(&self) -> PayloadStatus { self.parent_payload_status().unwrap_or(PayloadStatus::Empty) } fn is_parent_node_full(&self) -> bool { - self.parent_parent_payload_status() == PayloadStatus::Full + self.get_parent_payload_status() == PayloadStatus::Full } fn attestation_score(&self, payload_status: PayloadStatus) -> u64 { match payload_status { // TODO(gloas): rename weight and remove proposer boost from it? - PayloadStatus::Pending => self.weight, + PayloadStatus::Pending => self.weight(), PayloadStatus::Empty => self.empty_payload_weight().unwrap_or(0), PayloadStatus::Full => self.full_payload_weight().unwrap_or(0), } @@ -524,8 +525,7 @@ impl ProtoArray { .get(boost_index) .is_some_and(|n| n.as_v29().is_ok()) && !self.should_apply_proposer_boost::( - boost_index, - proposer_score, + proposer_boost_root, new_justified_balances, spec, )? @@ -731,6 +731,9 @@ impl ProtoArray { }, payload_received: is_genesis, proposer_index: block.proposer_index.unwrap_or(0), + // TODO(gloas): initialise these based on block timing + block_timeliness_attestation_threshold: false, + block_timeliness_ptc_threshold: false, }) }; @@ -773,14 +776,19 @@ impl ProtoArray { Ok(()) } - fn is_head_weak(&self, head_node: &ProtoNode, justified_balances: &JustifiedBalances) -> bool { + fn is_head_weak( + &self, + head_node: &ProtoNode, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> bool { let reorg_threshold = calculate_committee_fraction::( justified_balances, spec.reorg_head_weight_threshold.unwrap_or(20), ) .unwrap_or(0); - let head_weight = head_node.attestation_score(); + let head_weight = head_node.attestation_score(PayloadStatus::Pending); // TODO(gloas): missing equivocating weight from spec // idea: add equivocating_attestation_score on the proto node that is updated whenever @@ -796,25 +804,38 @@ impl ProtoArray { fn should_apply_proposer_boost( &self, proposer_boost_root: Hash256, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, ) -> Result { if proposer_boost_root.is_zero() { return Ok(false); } - let block_index = self.indices.get(&proposer_boost_root)?; - let block = self.nodes.get(block_index)?; - let parent_root = block.parent_root; - let parent_index = self.indices.get(&parent_root)?; - let parent = self.nodes.get(parent_index)?; - let slot = block.slot; + let block_index = *self + .indices + .get(&proposer_boost_root) + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let block = self + .nodes + .get(block_index) + .ok_or(Error::InvalidNodeIndex(block_index))?; + // TODO(gloas): handle parent unknown case? + let parent_index = block + .parent() + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let parent = self + .nodes + .get(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))?; + let slot = block.slot(); // Apply proposer boost if `parent` is not from the previous slot - if parent.slot.saturating_add(1) < slot { + if parent.slot().saturating_add(1_u64) < slot { return Ok(true); } // Apply proposer boost if `parent` is not weak - if !self.is_head_weak(&parent, justified_balances) { + if !self.is_head_weak::(parent, justified_balances, spec) { return Ok(true); } @@ -826,7 +847,7 @@ impl ProtoArray { let parent_proposer = parent.proposer_index(); let has_equivocation = self.nodes.iter().any(|node| { - if let Ok(timeliness) = node.block_timeliness_ptc_threshold + if let Ok(timeliness) = node.block_timeliness_ptc_threshold() && let Ok(proposer_index) = node.proposer_index() { timeliness @@ -1150,12 +1171,16 @@ impl ProtoArray { /// been called without a subsequent `Self::apply_score_changes` call. This is because /// `on_new_block` does not attempt to walk backwards through the tree and update the /// best-child/best-descendant links. + #[allow(clippy::too_many_arguments)] pub fn find_head( &self, justified_root: &Hash256, current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, + proposer_boost_root: Hash256, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, ) -> Result<(Hash256, PayloadStatus), Error> { let justified_index = self .indices @@ -1179,19 +1204,22 @@ impl ProtoArray { }); } - // In the post-Gloas worlld, always use a virtual tree walk. + // In the post-Gloas world, always use a virtual tree walk. // // Best child/best descendant is dead. - let best_node = self.find_head_walk::( + let (best_fc_node, best_node) = self.find_head_walk::( justified_index, current_slot, best_justified_checkpoint, best_finalized_checkpoint, - ); + proposer_boost_root, + justified_balances, + spec, + )?; // Perform a sanity check that the node is indeed valid to be the head. if !self.node_is_viable_for_head::( - best_node, + &best_node, current_slot, best_justified_checkpoint, best_finalized_checkpoint, @@ -1207,7 +1235,7 @@ impl ProtoArray { }))); } - Ok(best_node.root()) + Ok((best_fc_node.root, best_fc_node.payload_status)) } /// Virtual tree walk for `find_head`. @@ -1215,29 +1243,39 @@ impl ProtoArray { /// At each node, determine the preferred payload direction (FULL or EMPTY) /// by comparing weights. Scan all nodes to find the best child matching /// the preferred direction. + #[allow(clippy::too_many_arguments)] fn find_head_walk( &self, start_index: usize, current_slot: Slot, best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, + _best_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, justified_balances: &JustifiedBalances, spec: &ChainSpec, - ) -> Result { + ) -> Result<(IndexedForkChoiceNode, ProtoNode), Error> { let mut head = IndexedForkChoiceNode { root: best_justified_checkpoint.root, proto_node_index: start_index, payload_status: PayloadStatus::Pending, }; + let mut head_proto_node = self + .nodes + .get(start_index) + .ok_or(Error::NodeUnknown(best_justified_checkpoint.root))? + .clone(); loop { let children = self.get_node_children(&head)?; + if children.is_empty() { + break; + } + let scores = children .into_iter() .map(|(child_fc_node, child_proto_node)| { - let weight = self.get_weight( + let weight = self.get_weight::( &child_fc_node, &child_proto_node, proposer_boost_root, @@ -1245,30 +1283,38 @@ impl ProtoArray { justified_balances, spec, )?; - let payload_status_tiebreaker = self.get_payload_status_tiebreaker( + let payload_status_tiebreaker = self.get_payload_status_tiebreaker::( &child_fc_node, &child_proto_node, current_slot, proposer_boost_root, )?; - Ok((child_fc_node, weight, payload_status_tiebreaker)) + Ok(( + child_fc_node, + child_proto_node, + weight, + payload_status_tiebreaker, + )) }) .collect::, Error>>()?; // TODO(gloas): proper error - head = scores - .max_by_key(|(child_fc_node, weight, payload_status_tiebreaker)| { - (weight, child_fc_node.root, payload_status_tiebreaker) - }) - .map(|(child_fc_node, _, _)| child_fc_node) + (head, head_proto_node) = scores + .into_iter() + .max_by_key( + |(child_fc_node, _proto_node, weight, payload_status_tiebreaker)| { + (*weight, child_fc_node.root, *payload_status_tiebreaker) + }, + ) + .map(|(child_fc_node, child_proto_node, _, _)| (child_fc_node, child_proto_node)) .unwrap(); } - Ok(head) + Ok((head, head_proto_node)) } fn get_weight( &self, - fc_node: &ForkChoiceNode, + fc_node: &IndexedForkChoiceNode, proto_node: &ProtoNode, proposer_boost_root: Hash256, current_slot: Slot, @@ -1276,17 +1322,21 @@ impl ProtoArray { spec: &ChainSpec, ) -> Result { if fc_node.payload_status == PayloadStatus::Pending - || proto_node.slot.saturating_add(1) != current_slot + || proto_node.slot().saturating_add(1_u64) != current_slot { - let attestation_score = proto_node.attestation_score(); + let attestation_score = proto_node.attestation_score(fc_node.payload_status); - if !self.should_apply_proposer_boost(&proposer_boost_root)? { - return attestation_score; + if !self.should_apply_proposer_boost::( + proposer_boost_root, + justified_balances, + spec, + )? { + return Ok(attestation_score); } // TODO(gloas): I don't think `is_supporting_vote` is necessary here, confirm by // checking spec tests or with spec authors. - let proposer_score = if proto_node.root == proposer_boost_root { + let proposer_score = if proto_node.root() == proposer_boost_root { get_proposer_score::(justified_balances, spec)? } else { 0 @@ -1298,27 +1348,29 @@ impl ProtoArray { } fn get_node_children( + &self, node: &IndexedForkChoiceNode, ) -> Result, Error> { if node.payload_status == PayloadStatus::Pending { let proto_node = self .nodes - .get(node.node_index) - .ok_or(Error::InvalidNodeIndex(node.node_index))? + .get(node.proto_node_index) + .ok_or(Error::InvalidNodeIndex(node.proto_node_index))? .clone(); let mut children = vec![( IndexedForkChoiceNode { root: node.root, - node_index: usize, + proto_node_index: node.proto_node_index, payload_status: PayloadStatus::Empty, }, proto_node.clone(), )]; - if proto_node.payload_exists().is_ok_and(|exists| exists) { + // The FULL virtual child only exists if the payload has been received. + if proto_node.payload_received().is_ok_and(|received| received) { children.push(( IndexedForkChoiceNode { root: node.root, - node_index: usize, + proto_node_index: node.proto_node_index, payload_status: PayloadStatus::Full, }, proto_node, @@ -1328,20 +1380,19 @@ impl ProtoArray { } else { let children = self .nodes - .get(node.node_index..) - .ok_or(Error::InvalidNodeIndex(node.node_index))? + .get(node.proto_node_index..) + .ok_or(Error::InvalidNodeIndex(node.proto_node_index))? .iter() .enumerate() .filter(|(_, child_node)| { - child_node.parent_root == node.root - && node.payload_status == child_node.parent_payload_status() + child_node.parent() == Some(node.proto_node_index) + && child_node.get_parent_payload_status() == node.payload_status }) - .map(|i, child_node| { - let child_index = node.node_index.saturating_add(i); + .map(|(child_index, child_node)| { ( IndexedForkChoiceNode { - root: child_node.root, - node_index: child_index, + root: child_node.root(), + proto_node_index: child_index, payload_status: PayloadStatus::Pending, }, child_node.clone(), @@ -1352,76 +1403,27 @@ impl ProtoArray { } } - /// Find the best viable child of `parent_index` whose `parent_payload_status` matches - /// `target_status`. Returns `None` if no matching viable child exists. - fn find_best_child_with_status( - &self, - parent_index: usize, - target_status: PayloadStatus, - current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - ) -> Result, Error> { - let mut best: Option<(usize, u64, Hash256)> = None; - for (node_index, node) in self.nodes.iter().enumerate() { - if node.parent() != Some(parent_index) { - continue; - } - if !node - .as_v29() - .is_ok_and(|v| v.parent_payload_status == target_status) - { - continue; - } - if !self.node_leads_to_viable_head::( - node, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )? { - continue; - } - - let child_weight = node.weight(); - let child_root = node.root(); - let replace = if let Some((_, best_weight, best_root)) = best { - child_weight > best_weight - || (child_weight == best_weight && child_root >= best_root) - } else { - true - }; - - if replace { - best = Some((node_index, child_weight, child_root)); - } - } - - Ok(best.map(|(index, _, _)| index)) - } - - fn get_payload_status_tiebreaker( + fn get_payload_status_tiebreaker( &self, fc_node: &IndexedForkChoiceNode, proto_node: &ProtoNode, current_slot: Slot, proposer_boost_root: Hash256, - ) -> u8 { + ) -> Result { if fc_node.payload_status == PayloadStatus::Pending - || proto_node.slot.saturating_add(1) != current_slot + || proto_node.slot().saturating_add(1_u64) != current_slot { - fc.payload_status as u8 + Ok(fc_node.payload_status as u8) + } else if fc_node.payload_status == PayloadStatus::Empty { + Ok(1) + } else if self.should_extend_payload::(fc_node, proto_node, proposer_boost_root)? { + Ok(2) } else { - if fc_node.payload_status == PayloadStatus::Empty { - 1 - } else if self.should_extend_payload(fc_node, proto_node, proposer_boost_root) { - 2 - } else { - 0 - } + Ok(0) } } - fn should_extend_payload( + fn should_extend_payload( &self, fc_node: &IndexedForkChoiceNode, proto_node: &ProtoNode, @@ -1431,12 +1433,29 @@ impl ProtoArray { return Ok(false); } - let proposer_boost_node_index = self.indices.get(&proposer_boost_root)?; - let proposer_boost_node = self.nodes.get(&proposer_boost_node_index)?; + let proposer_boost_node_index = *self + .indices + .get(&proposer_boost_root) + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let proposer_boost_node = self + .nodes + .get(proposer_boost_node_index) + .ok_or(Error::InvalidNodeIndex(proposer_boost_node_index))?; + + // Check if the parent of the proposer boost node matches the fc_node's root + let Some(proposer_boost_parent_index) = proposer_boost_node.parent() else { + // TODO(gloas): could be an error + return Ok(false); + }; + let boost_parent_root = self + .nodes + .get(proposer_boost_parent_index) + .ok_or(Error::InvalidNodeIndex(proposer_boost_parent_index))? + .root(); Ok( (proto_node.is_payload_timely::() && proto_node.is_payload_data_available::()) - || proposer_boost_node.parent_root != fc_node.root + || boost_parent_root != fc_node.root || proposer_boost_node.is_parent_node_full(), ) } @@ -1870,8 +1889,9 @@ pub fn get_proposer_score( // TODO(gloas): make proposer boost non-optional in spec return Ok(0); }; + // TODO(gloas): fix error calculate_committee_fraction::(justified_balances, proposer_score_boost) - .ok_or(Error::ProposerBoostOverflow(node_index)) + .ok_or(Error::ProposerBoostOverflow(0)) } /// Apply a signed delta to an unsigned weight, returning an error on overflow. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 061e8a72873..3cfd7db2658 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -3,7 +3,7 @@ use crate::{ error::Error, proto_array::{ InvalidationOperation, Iter, NodeDelta, ProposerBoost, ProtoArray, ProtoNode, - calculate_committee_fraction, is_payload_data_available, is_payload_timely, + calculate_committee_fraction, }, ssz_container::SszContainer, }; @@ -70,9 +70,9 @@ pub enum PayloadStatus { /// Spec's `ForkChoiceNode` augmented with ProtoNode index. pub struct IndexedForkChoiceNode { - root: Hash256, - node_index: usize, - payload_status: PayloadStatus, + pub root: Hash256, + pub proto_node_index: usize, + pub payload_status: PayloadStatus, } impl ExecutionStatus { @@ -656,7 +656,11 @@ impl ProtoArrayForkChoice { current_slot, justified_checkpoint, finalized_checkpoint, + proposer_boost_root, + new_balances, + spec, ) + .map(|(root, _payload_status)| root) .map_err(|e| format!("find_head failed: {:?}", e)) } @@ -1011,6 +1015,7 @@ impl ProtoArrayForkChoice { /// - Otherwise: prefer Full when payload has been received. /// /// Returns `None` for V17 nodes. + // TODO(gloas): delete pub fn head_payload_status( &self, head_root: &Hash256, @@ -1037,15 +1042,7 @@ impl ProtoArrayForkChoice { } } else { // Previous slot: should_extend_payload tiebreaker. - if is_payload_timely( - &v29.payload_timeliness_votes, - E::ptc_size(), - v29.payload_received, - ) && is_payload_data_available( - &v29.payload_data_availability_votes, - E::ptc_size(), - v29.payload_received, - ) { + if node.is_payload_timely::() && node.is_payload_data_available::() { Some(PayloadStatus::Full) } else { Some(PayloadStatus::Empty) From 8b448864f0d7b84332e56fc33340e6370942f4a5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Mar 2026 14:05:11 +1100 Subject: [PATCH 024/127] Re-do head_payload_status --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 +- beacon_node/beacon_chain/src/builder.rs | 2 +- .../beacon_chain/src/canonical_head.rs | 13 ++++- .../tests/payload_invalidation.rs | 2 +- beacon_node/beacon_chain/tests/store_tests.rs | 4 +- .../src/test_utils/mock_builder.rs | 3 +- .../src/test_utils/mock_execution_layer.rs | 1 + consensus/fork_choice/src/fork_choice.rs | 18 ++++--- consensus/fork_choice/src/lib.rs | 2 +- .../src/fork_choice_test_definition.rs | 38 +++++---------- .../execution_status.rs | 22 +++++++++ .../ffg_updates.rs | 20 ++++++++ .../gloas_payload.rs | 32 ++++++------- .../fork_choice_test_definition/no_votes.rs | 9 ++++ .../src/fork_choice_test_definition/votes.rs | 20 ++++++++ .../src/proto_array_fork_choice.rs | 48 +------------------ testing/ef_tests/src/cases/fork_choice.rs | 22 +++------ .../src/test_rig.rs | 3 +- 18 files changed, 143 insertions(+), 119 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d53e588d54b..86dbad09994 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3935,7 +3935,7 @@ impl BeaconChain { let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE); match fork_choice.get_head(current_slot, &self.spec) { // This block became the head, add it to the early attester cache. - Ok(new_head_root) if new_head_root == block_root => { + Ok((new_head_root, _)) if new_head_root == block_root => { if let Some(proto_block) = fork_choice.get_block(&block_root) { let new_head_is_optimistic = proto_block.execution_status.is_optimistic_or_invalid(); @@ -4906,6 +4906,7 @@ impl BeaconChain { .and_then(|execution_status| execution_status.block_hash()); let forkchoice_update_params = ForkchoiceUpdateParameters { head_root: info.parent_node.root(), + head_payload_status: canonical_forkchoice_params.head_payload_status, head_hash: parent_head_hash, justified_hash: canonical_forkchoice_params.justified_hash, finalized_hash: canonical_forkchoice_params.finalized_hash, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 7eb92060a27..5c99f5c4c8b 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -776,7 +776,7 @@ where slot_clock.now().ok_or("Unable to read slot")? }; - let initial_head_block_root = fork_choice + let (initial_head_block_root, _head_payload_status) = fork_choice .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 0faddd17929..30d4a597097 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -108,6 +108,8 @@ pub struct CachedHead { /// This value may be distinct to the `self.snapshot.beacon_state.finalized_checkpoint`. /// This value should be used over the beacon state value in practically all circumstances. finalized_checkpoint: Checkpoint, + /// The payload status of the head block, as determined by fork choice. + head_payload_status: proto_array::PayloadStatus, /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` /// before Bellatrix. head_hash: Option, @@ -227,11 +229,16 @@ impl CachedHead { pub fn forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { ForkchoiceUpdateParameters { head_root: self.snapshot.beacon_block_root, + head_payload_status: self.head_payload_status, head_hash: self.head_hash, justified_hash: self.justified_hash, finalized_hash: self.finalized_hash, } } + + pub fn head_payload_status(&self) -> proto_array::PayloadStatus { + self.head_payload_status + } } /// Represents the "canonical head" of the beacon chain. @@ -269,6 +276,7 @@ impl CanonicalHead { snapshot, justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_payload_status: forkchoice_update_params.head_payload_status, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, @@ -329,6 +337,7 @@ impl CanonicalHead { snapshot: Arc::new(snapshot), justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_payload_status: forkchoice_update_params.head_payload_status, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, @@ -606,7 +615,7 @@ impl BeaconChain { let mut fork_choice_write_lock = self.canonical_head.fork_choice_write_lock(); // Recompute the current head via the fork choice algorithm. - fork_choice_write_lock.get_head(current_slot, &self.spec)?; + let _ = fork_choice_write_lock.get_head(current_slot, &self.spec)?; // Downgrade the fork choice write-lock to a read lock, without allowing access to any // other writers. @@ -710,6 +719,7 @@ impl BeaconChain { snapshot: Arc::new(new_snapshot), justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, + head_payload_status: new_forkchoice_update_parameters.head_payload_status, head_hash: new_forkchoice_update_parameters.head_hash, justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, @@ -737,6 +747,7 @@ impl BeaconChain { snapshot: old_cached_head.snapshot.clone(), justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, + head_payload_status: new_forkchoice_update_parameters.head_payload_status, head_hash: new_forkchoice_update_parameters.head_hash, justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 7adffd3824a..13672bbb63e 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1350,7 +1350,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { "the fork block should become the head" ); - let manual_get_head = rig + let (manual_get_head, _) = rig .harness .chain .canonical_head diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0e187a8f4b9..e9af6df3e81 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5430,10 +5430,12 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b .fork_choice_write_lock() .get_head(slot, &spec) .unwrap() + .0 == b.canonical_head .fork_choice_write_lock() .get_head(slot, &spec) - .unwrap(), + .unwrap() + .0, "fork_choice heads should be equal" ); } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 7b6c4e8310c..aa7e309f2c0 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -12,7 +12,7 @@ use eth2::{ BeaconNodeHttpClient, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER, Timeouts, }; -use fork_choice::ForkchoiceUpdateParameters; +use fork_choice::{ForkchoiceUpdateParameters, PayloadStatus as FcPayloadStatus}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use ssz::Encode; @@ -934,6 +934,7 @@ impl MockBuilder { finalized_hash: Some(finalized_execution_hash), justified_hash: Some(justified_execution_hash), head_root: head_block_root, + head_payload_status: FcPayloadStatus::Pending, }; let _status = self diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 91966ff65e3..0aee30dff0f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -92,6 +92,7 @@ impl MockExecutionLayer { let head_block_root = Hash256::repeat_byte(42); let forkchoice_update_params = ForkchoiceUpdateParameters { head_root: head_block_root, + head_payload_status: fork_choice::PayloadStatus::Pending, head_hash: Some(parent_hash), justified_hash: None, finalized_hash: None, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 5dc081d6ce4..3c6dd9e5e03 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -4,7 +4,7 @@ use fixed_bytes::FixedBytesExtended; use logging::crit; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, LatestMessage, - ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, + PayloadStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -333,6 +333,7 @@ fn dequeue_payload_attestations( pub struct ForkchoiceUpdateParameters { /// The most recent result of running `ForkChoice::get_head`. pub head_root: Hash256, + pub head_payload_status: PayloadStatus, pub head_hash: Option, pub justified_hash: Option, pub finalized_hash: Option, @@ -470,14 +471,15 @@ where head_hash: None, justified_hash: None, finalized_hash: None, - // This will be updated during the next call to `Self::get_head`. + // These will be updated during the next call to `Self::get_head`. head_root: Hash256::zero(), + head_payload_status: PayloadStatus::Pending, }, _phantom: PhantomData, }; // Ensure that `fork_choice.forkchoice_update_parameters.head_root` is updated. - fork_choice.get_head(current_slot, spec)?; + let _ = fork_choice.get_head(current_slot, spec)?; Ok(fork_choice) } @@ -544,7 +546,7 @@ where &mut self, system_time_current_slot: Slot, spec: &ChainSpec, - ) -> Result> { + ) -> Result<(Hash256, PayloadStatus), Error> { // Provide the slot (as per the system clock) to the `fc_store` and then return its view of // the current slot. The `fc_store` will ensure that the `current_slot` is never // decreasing, a property which we must maintain. @@ -552,7 +554,7 @@ where let store = &mut self.fc_store; - let head_root = self.proto_array.find_head::( + let (head_root, head_payload_status) = self.proto_array.find_head::( *store.justified_checkpoint(), *store.finalized_checkpoint(), store.justified_balances(), @@ -576,12 +578,13 @@ where .and_then(|b| b.execution_status.block_hash()); self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, + head_payload_status, head_hash, justified_hash, finalized_hash, }; - Ok(head_root) + Ok((head_root, head_payload_status)) } /// Get the block to build on as proposer, taking into account proposer re-orgs. @@ -1745,6 +1748,7 @@ where finalized_hash: None, // Will be updated in the following call to `Self::get_head`. head_root: Hash256::zero(), + head_payload_status: PayloadStatus::Pending, }, _phantom: PhantomData, }; @@ -1766,7 +1770,7 @@ where .set_all_blocks_to_optimistic::(spec)?; // If the second attempt at finding a head fails, return an error since we do not // expect this scenario. - fork_choice.get_head(current_slot, spec)?; + let _ = fork_choice.get_head(current_slot, spec)?; } Ok(fork_choice) diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 824fc2dff05..de3e709a844 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -10,5 +10,5 @@ pub use crate::fork_choice::{ }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ - Block as ProtoBlock, ExecutionStatus, InvalidationOperation, ProposerHeadError, + Block as ProtoBlock, ExecutionStatus, InvalidationOperation, PayloadStatus, ProposerHeadError, }; diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index a89073a7b86..de31a0905e5 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -29,6 +29,8 @@ pub enum Operation { justified_state_balances: Vec, expected_head: Hash256, current_slot: Slot, + #[serde(default)] + expected_payload_status: Option, }, ProposerBoostFindHead { justified_checkpoint: Checkpoint, @@ -88,11 +90,6 @@ pub enum Operation { block_root: Hash256, expected_status: PayloadStatus, }, - AssertHeadPayloadStatus { - head_root: Hash256, - expected_status: PayloadStatus, - current_slot: Slot, - }, SetPayloadTiebreak { block_root: Hash256, is_timely: bool, @@ -159,11 +156,12 @@ impl ForkChoiceTestDefinition { justified_state_balances, expected_head, current_slot, + expected_payload_status, } => { let justified_balances = JustifiedBalances::from_effective_balances(justified_state_balances) .unwrap(); - let head = fork_choice + let (head, payload_status) = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, @@ -182,6 +180,13 @@ impl ForkChoiceTestDefinition { "Operation at index {} failed head check. Operation: {:?}", op_index, op ); + if let Some(expected_status) = expected_payload_status { + assert_eq!( + payload_status, expected_status, + "Operation at index {} failed payload status check. Operation: {:?}", + op_index, op + ); + } check_bytes_round_trip(&fork_choice); } Operation::ProposerBoostFindHead { @@ -194,7 +199,7 @@ impl ForkChoiceTestDefinition { let justified_balances = JustifiedBalances::from_effective_balances(justified_state_balances) .unwrap(); - let head = fork_choice + let (head, _payload_status) = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, @@ -455,25 +460,6 @@ impl ForkChoiceTestDefinition { op_index ); } - Operation::AssertHeadPayloadStatus { - head_root, - expected_status, - current_slot, - } => { - let actual = fork_choice - .head_payload_status::(&head_root, current_slot) - .unwrap_or_else(|| { - panic!( - "AssertHeadPayloadStatus: head root not found at op index {}", - op_index - ) - }); - assert_eq!( - actual, expected_status, - "head_payload_status mismatch at op index {}", - op_index - ); - } Operation::SetPayloadTiebreak { block_root, is_timely, diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index 59e80dbe66b..8743363f9c1 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -17,6 +17,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(0), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -57,6 +58,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -98,6 +100,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -128,6 +131,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(1), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -171,6 +175,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -228,6 +233,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -279,6 +285,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -329,6 +336,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Invalidation of 3 should have removed upstream weight. @@ -383,6 +391,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { justified_state_balances: balances, expected_head: get_root(1), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -437,6 +446,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(0), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -477,6 +487,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -518,6 +529,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -548,6 +560,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(1), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -591,6 +604,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -648,6 +662,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -712,6 +727,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -762,6 +778,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { justified_state_balances: balances, expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Invalidation of 3 should have removed upstream weight. @@ -818,6 +835,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(0), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -858,6 +876,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -899,6 +918,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -929,6 +949,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(1), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -972,6 +993,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(1), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index 34a4372e274..76f9a953153 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -11,6 +11,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(0), current_slot: Slot::new(0), + expected_payload_status: None, }); // Build the following tree (stick? lol). @@ -65,6 +66,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that with justified epoch 1 we find 3 @@ -86,6 +88,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that with justified epoch 2 we find 3 @@ -103,6 +106,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances, expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); // END OF TESTS @@ -128,6 +132,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(0), current_slot: Slot::new(0), + expected_payload_status: None, }); // Build the following tree. @@ -275,6 +280,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above, but with justified epoch 2. ops.push(Operation::FindHead { @@ -286,6 +292,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above, but with justified epoch 3. // @@ -301,6 +308,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to 1. @@ -341,6 +349,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Save as above but justified epoch 2. ops.push(Operation::FindHead { @@ -352,6 +361,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Save as above but justified epoch 3. // @@ -367,6 +377,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to 2. @@ -407,6 +418,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -418,6 +430,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 3. // @@ -433,6 +446,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that if we start at 1 we find 9 (just: 0, fin: 0). @@ -457,6 +471,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -468,6 +483,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 3. // @@ -483,6 +499,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that if we start at 2 we find 10 (just: 0, fin: 0). @@ -504,6 +521,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -515,6 +533,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 3. // @@ -530,6 +549,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances, expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // END OF TESTS diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 8354b22e474..0fb120328c2 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -78,6 +78,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: vec![1], expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::SetPayloadTiebreak { @@ -91,6 +92,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: vec![1], expected_head: get_root(4), current_slot: Slot::new(0), + expected_payload_status: None, }); ForkChoiceTestDefinition { @@ -139,6 +141,8 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: vec![1, 1], expected_head: get_root(1), current_slot: Slot::new(0), + // With MainnetEthSpec PTC_SIZE=512, 1 bit set out of 256 threshold → not timely → Empty. + expected_payload_status: Some(PayloadStatus::Empty), }); // PTC votes write to bitfields only, not to full/empty weight. // Weight is 0 because no CL attestations target this block. @@ -147,12 +151,6 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { expected_full_weight: 0, expected_empty_weight: 0, }); - // With MainnetEthSpec PTC_SIZE=512, 1 bit set out of 256 threshold → not timely → Empty. - ops.push(Operation::AssertHeadPayloadStatus { - head_root: get_root(1), - expected_status: PayloadStatus::Empty, - current_slot: Slot::new(0), - }); // Flip validator 0 to Empty; both bits now clear. ops.push(Operation::ProcessPayloadAttestation { @@ -168,17 +166,13 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: vec![1, 1], expected_head: get_root(1), current_slot: Slot::new(0), + expected_payload_status: Some(PayloadStatus::Empty), }); ops.push(Operation::AssertPayloadWeights { block_root: get_root(1), expected_full_weight: 0, expected_empty_weight: 0, }); - ops.push(Operation::AssertHeadPayloadStatus { - head_root: get_root(1), - expected_status: PayloadStatus::Empty, - current_slot: Slot::new(0), - }); // Same-slot attestation to a new head candidate should be Pending (no payload bucket change). // Root 5 is an Empty child of root_1 (parent_hash doesn't match root_1's block_hash), @@ -205,17 +199,13 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: vec![1, 1, 1], expected_head: get_root(5), current_slot: Slot::new(0), + expected_payload_status: Some(PayloadStatus::Empty), }); ops.push(Operation::AssertPayloadWeights { block_root: get_root(5), expected_full_weight: 0, expected_empty_weight: 0, }); - ops.push(Operation::AssertHeadPayloadStatus { - head_root: get_root(5), - expected_status: PayloadStatus::Empty, - current_slot: Slot::new(0), - }); ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), @@ -289,6 +279,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe justified_state_balances: vec![1], expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); // CL attestation to Empty branch (root 4) from validator 0 → head flips to 4. @@ -303,6 +294,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe justified_state_balances: vec![1], expected_head: get_root(4), current_slot: Slot::new(0), + expected_payload_status: None, }); // CL attestation back to Full branch (root 3) → head returns to 3. @@ -317,6 +309,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe justified_state_balances: vec![1], expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); ForkChoiceTestDefinition { @@ -391,6 +384,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() justified_state_balances: vec![1], expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); // Two CL attestations to the Empty branch make it strictly heavier, @@ -411,6 +405,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() justified_state_balances: vec![1, 1], expected_head: get_root(4), current_slot: Slot::new(0), + expected_payload_status: None, }); ForkChoiceTestDefinition { @@ -559,6 +554,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef justified_state_balances: vec![1, 1], expected_head: get_root(4), current_slot: Slot::new(1), + expected_payload_status: None, }); // Step 5: Flip tiebreaker to Full → Full branch wins. @@ -573,6 +569,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef justified_state_balances: vec![1, 1], expected_head: get_root(3), current_slot: Slot::new(100), + expected_payload_status: None, }); // Step 6: Add extra CL weight to Empty branch → overrides Full tiebreaker. @@ -587,6 +584,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef justified_state_balances: vec![1, 1, 1], expected_head: get_root(4), current_slot: Slot::new(100), + expected_payload_status: None, }); ForkChoiceTestDefinition { @@ -673,6 +671,7 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe justified_state_balances: vec![1, 1], expected_head: get_root(1), current_slot: Slot::new(100), + expected_payload_status: None, }); // ProcessExecutionPayload on genesis is a no-op (already received at init). @@ -701,6 +700,7 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe justified_state_balances: vec![1, 1], expected_head: get_root(1), current_slot: Slot::new(100), + expected_payload_status: None, }); ForkChoiceTestDefinition { diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index 71d4c035aef..7b5ee31c642 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -19,6 +19,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: Hash256::zero(), current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 2 // @@ -57,6 +58,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 1 // @@ -95,6 +97,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 3 // @@ -137,6 +140,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 4 // @@ -179,6 +183,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(4), current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 5 with a justified epoch of 2 // @@ -222,6 +227,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(5), current_slot: Slot::new(0), + expected_payload_status: None, }, // Ensure there is no error when starting from a block that has the // wrong justified epoch. @@ -249,6 +255,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(5), current_slot: Slot::new(0), + expected_payload_status: None, }, // Set the justified epoch to 2 and the start block to 5 and ensure 5 is the head. // @@ -268,6 +275,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(5), current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 6 // @@ -312,6 +320,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances, expected_head: get_root(6), current_slot: Slot::new(0), + expected_payload_status: None, }, ]; diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index 3ba21db48a4..cdd95531278 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -17,6 +17,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(0), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -57,6 +58,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -98,6 +100,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -128,6 +131,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(1), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 2 @@ -158,6 +162,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 3. @@ -202,6 +207,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Move validator #0 vote from 1 to 3 @@ -236,6 +242,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(2), current_slot: Slot::new(0), + expected_payload_status: None, }); // Move validator #1 vote from 2 to 1 (this is an equivocation, but fork choice doesn't @@ -271,6 +278,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(3), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 4. @@ -319,6 +327,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(4), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 5, which has a justified epoch of 2. @@ -371,6 +380,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(4), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 6, which has a justified epoch of 0. @@ -516,6 +526,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(6), current_slot: Slot::new(0), + expected_payload_status: None, }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -550,6 +561,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -629,6 +641,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Introduce 2 more validators into the system @@ -691,6 +704,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Set the balances of the last two validators to zero @@ -717,6 +731,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Set the balances of the last two validators back to 1 @@ -743,6 +758,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), current_slot: Slot::new(0), + expected_payload_status: None, }); // Remove the last two validators @@ -770,6 +786,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that pruning below the prune threshold does not prune. @@ -792,6 +809,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that pruning above the prune threshold does prune. @@ -831,6 +849,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 11 @@ -883,6 +902,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances, expected_head: get_root(11), current_slot: Slot::new(0), + expected_payload_status: None, }); ForkChoiceTestDefinition { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 3cfd7db2658..19be43511fc 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -59,7 +59,7 @@ pub enum ExecutionStatus { } /// Represents the status of an execution payload post-Gloas. -#[derive(Clone, Copy, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, Serialize, Deserialize)] #[ssz(enum_behaviour = "tag")] #[repr(u8)] pub enum PayloadStatus { @@ -616,7 +616,7 @@ impl ProtoArrayForkChoice { equivocating_indices: &BTreeSet, current_slot: Slot, spec: &ChainSpec, - ) -> Result { + ) -> Result<(Hash256, PayloadStatus), String> { let old_balances = &mut self.balances; let new_balances = justified_state_balances; let node_slots = self @@ -660,7 +660,6 @@ impl ProtoArrayForkChoice { new_balances, spec, ) - .map(|(root, _payload_status)| root) .map_err(|e| format!("find_head failed: {:?}", e)) } @@ -1007,49 +1006,6 @@ impl ProtoArrayForkChoice { /// Returns the payload status of the head node based on accumulated weights and tiebreaker. /// - /// Returns `Full` if `full_payload_weight > empty_payload_weight`. - /// Returns `Empty` if `empty_payload_weight > full_payload_weight`. - /// On ties: - /// - Previous slot (`slot + 1 == current_slot`): prefer Full only when timely and - /// data available (per `should_extend_payload`). - /// - Otherwise: prefer Full when payload has been received. - /// - /// Returns `None` for V17 nodes. - // TODO(gloas): delete - pub fn head_payload_status( - &self, - head_root: &Hash256, - current_slot: Slot, - ) -> Option { - let node = self.get_proto_node(head_root)?; - let v29 = node.as_v29().ok()?; - - // Replicate the spec's virtual tree walk tiebreaker at the head node. - let use_tiebreaker_only = node.slot() + 1 == current_slot; - - if !use_tiebreaker_only { - // Compare weights, then fall back to tiebreaker. - if v29.full_payload_weight > v29.empty_payload_weight { - return Some(PayloadStatus::Full); - } else if v29.empty_payload_weight > v29.full_payload_weight { - return Some(PayloadStatus::Empty); - } - // Equal weights: prefer FULL if payload received. - if v29.payload_received { - Some(PayloadStatus::Full) - } else { - Some(PayloadStatus::Empty) - } - } else { - // Previous slot: should_extend_payload tiebreaker. - if node.is_payload_timely::() && node.is_payload_data_available::() { - Some(PayloadStatus::Full) - } else { - Some(PayloadStatus::Empty) - } - } - } - /// See `ProtoArray` documentation. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.proto_array diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index a1c93d65bb1..9f0e6de2ea5 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -975,7 +975,7 @@ impl Tester { ) -> Result<(), Error> { let mut fc = self.harness.chain.canonical_head.fork_choice_write_lock(); let slot = self.harness.chain.slot().unwrap(); - let canonical_head = fc.get_head(slot, &self.harness.spec).unwrap(); + let (canonical_head, _) = fc.get_head(slot, &self.harness.spec).unwrap(); let proposer_head_result = fc.get_proposer_head( slot, canonical_head, @@ -1020,21 +1020,11 @@ impl Tester { pub fn check_head_payload_status(&self, expected_status: u8) -> Result<(), Error> { let head = self.find_head()?; - let head_root = head.head_block_root(); - let current_slot = self.harness.chain.slot().map_err(|e| { - Error::InternalError(format!("reading current slot failed with {:?}", e)) - })?; - let fc = self.harness.chain.canonical_head.fork_choice_read_lock(); - let actual_status = fc - .proto_array() - .head_payload_status::(&head_root, current_slot) - .ok_or_else(|| { - Error::InternalError(format!( - "head_payload_status not found for head root {}", - head_root - )) - })?; - check_equal("head_payload_status", actual_status as u8, expected_status) + check_equal( + "head_payload_status", + head.head_payload_status() as u8, + expected_status, + ) } pub fn check_should_override_fcu( diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 6bf4a1aa529..2c20a414893 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -13,7 +13,7 @@ use execution_layer::{ LATEST_TAG, PayloadAttributes, PayloadParameters, PayloadStatus, }; use fixed_bytes::FixedBytesExtended; -use fork_choice::ForkchoiceUpdateParameters; +use fork_choice::{ForkchoiceUpdateParameters, PayloadStatus as FcPayloadStatus}; use reqwest::{Client, header::CONTENT_TYPE}; use sensitive_url::SensitiveUrl; use serde_json::{Value, json}; @@ -294,6 +294,7 @@ impl TestRig { let finalized_block_hash = ExecutionBlockHash::zero(); let forkchoice_update_params = ForkchoiceUpdateParameters { head_root, + head_payload_status: FcPayloadStatus::Pending, head_hash: Some(parent_hash), justified_hash: Some(justified_block_hash), finalized_hash: Some(finalized_block_hash), From cec5ce179d7d8a030460ab8786861fd2e3c00953 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Mar 2026 14:12:46 +1100 Subject: [PATCH 025/127] Undo botched optimisation --- consensus/proto_array/src/proto_array.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 25a1f7e7f91..6b338147cff 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1380,8 +1380,6 @@ impl ProtoArray { } else { let children = self .nodes - .get(node.proto_node_index..) - .ok_or(Error::InvalidNodeIndex(node.proto_node_index))? .iter() .enumerate() .filter(|(_, child_node)| { From 845831ce56a029d9c7bc6a28b5d99b2102ff0937 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 02:59:50 -0500 Subject: [PATCH 026/127] Align GLOAS fork choice with spec - Move proposer boost from apply_score_changes to get_weight, matching the spec's structure where get_weight adds boost via is_supporting_vote - Implement is_supporting_vote and get_ancestor_node spec functions - Fix should_extend_payload: return true when proposer_boost_root is zero - Compute record_block_timeliness from time_into_slot instead of hardcoding false - Fix anchor block_timeliness to [true, true] per get_forkchoice_store spec - Add equivocating_attestation_score for is_head_weak monotonicity - Use payload-aware weight in is_parent_strong - Add with_status helper on IndexedForkChoiceNode - Simplify find_head_walk to return IndexedForkChoiceNode directly --- consensus/fork_choice/src/fork_choice.rs | 6 + .../src/fork_choice_test_definition.rs | 2 + consensus/proto_array/src/proto_array.rs | 405 +++++++++--------- .../src/proto_array_fork_choice.rs | 32 +- 4 files changed, 241 insertions(+), 204 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 3c6dd9e5e03..c6def1562bd 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -797,6 +797,11 @@ where let attestation_threshold = spec.get_unaggregated_attestation_due(); // Add proposer score boost if the block is timely. + // TODO(gloas): the spec's `update_proposer_boost_root` additionally checks that + // `block.proposer_index == get_beacon_proposer_index(head_state)` — i.e. that + // the block's proposer matches the expected proposer on the canonical chain. + // This requires calling `get_head` and advancing the head state to the current + // slot, which is expensive. Implement once we have a cached proposer index. let is_before_attesting_interval = block_delay < attestation_threshold; let is_first_block = self.fc_store.proposer_boost_root().is_zero(); @@ -1001,6 +1006,7 @@ where self.justified_checkpoint(), self.finalized_checkpoint(), spec, + block_delay, )?; Ok(()) diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index de31a0905e5..4507e013ba6 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -10,6 +10,7 @@ use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::BitVector; use std::collections::BTreeSet; +use std::time::Duration; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Slot, @@ -288,6 +289,7 @@ impl ForkChoiceTestDefinition { self.justified_checkpoint, self.finalized_checkpoint, &spec, + Duration::ZERO, ) .unwrap_or_else(|e| { panic!( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 6b338147cff..670ae31cfce 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,6 +1,8 @@ use crate::error::InvalidBestNodeInfo; use crate::proto_array_fork_choice::IndexedForkChoiceNode; -use crate::{Block, ExecutionStatus, JustifiedBalances, PayloadStatus, error::Error}; +use crate::{ + Block, ExecutionStatus, JustifiedBalances, LatestMessage, PayloadStatus, error::Error, +}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::BitVector; @@ -8,6 +10,7 @@ use ssz::Encode; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; +use std::time::Duration; use superstruct::superstruct; use typenum::U512; use types::{ @@ -20,6 +23,14 @@ use types::{ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); +fn all_true_bitvector() -> BitVector { + let mut bv = BitVector::new(); + for i in 0..bv.len() { + let _ = bv.set(i, true); + } + bv +} + /// Defines an operation which may invalidate the `execution_status` of some nodes. #[derive(Clone, Debug)] pub enum InvalidationOperation { @@ -160,22 +171,27 @@ pub struct ProtoNode { /// to detect equivocations at the parent's slot. #[superstruct(only(V29), partial_getter(copy))] pub proposer_index: u64, + /// Weight from equivocating validators that voted for this block. + /// Used by `is_head_weak` to match the spec's monotonicity guarantee: + /// more attestations can only increase head weight, never decrease it. + #[superstruct(only(V29), partial_getter(copy))] + pub equivocating_attestation_score: u64, } impl ProtoNode { /// Generic version of spec's `parent_payload_status` that works for pre-Gloas nodes by /// considering their parents Empty. - fn get_parent_payload_status(&self) -> PayloadStatus { + /// Pre-Gloas nodes have no ePBS, default to Empty. + pub fn get_parent_payload_status(&self) -> PayloadStatus { self.parent_payload_status().unwrap_or(PayloadStatus::Empty) } - fn is_parent_node_full(&self) -> bool { + pub fn is_parent_node_full(&self) -> bool { self.get_parent_payload_status() == PayloadStatus::Full } - fn attestation_score(&self, payload_status: PayloadStatus) -> u64 { + pub fn attestation_score(&self, payload_status: PayloadStatus) -> u64 { match payload_status { - // TODO(gloas): rename weight and remove proposer boost from it? PayloadStatus::Pending => self.weight(), PayloadStatus::Empty => self.empty_payload_weight().unwrap_or(0), PayloadStatus::Full => self.full_payload_weight().unwrap_or(0), @@ -187,8 +203,7 @@ impl ProtoNode { return false; }; - // If the payload is not locally available, the payload - // is not considered available regardless of the PTC vote + // Equivalent to `if root not in store.payload_states` in the spec. if !node.payload_received { return false; } @@ -201,8 +216,7 @@ impl ProtoNode { return false; }; - // If the payload is not locally available, the payload - // is not considered available regardless of the PTC vote + // Equivalent to `if root not in store.payload_states` in the spec. if !node.payload_received { return false; } @@ -252,6 +266,8 @@ pub struct NodeDelta { pub empty_delta: i64, /// Weight change from `PayloadStatus::Full` votes. pub full_delta: i64, + /// Weight from equivocating validators that voted for this node. + pub equivocating_attestation_delta: u64, } impl NodeDelta { @@ -308,6 +324,7 @@ impl NodeDelta { delta, empty_delta: 0, full_delta: 0, + equivocating_attestation_delta: 0, } } @@ -370,10 +387,10 @@ impl ProtoArray { mut deltas: Vec, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - new_justified_balances: &JustifiedBalances, - proposer_boost_root: Hash256, + _new_justified_balances: &JustifiedBalances, + _proposer_boost_root: Hash256, current_slot: Slot, - spec: &ChainSpec, + _spec: &ChainSpec, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { return Err(Error::InvalidDeltaLen { @@ -382,9 +399,6 @@ impl ProtoArray { }); } - // Default the proposer boost score to zero. - let mut proposer_score = 0; - // Iterate backwards through all indices in `self.nodes`. for node_index in (0..self.nodes.len()).rev() { let node = self @@ -412,7 +426,7 @@ impl ProtoArray { .copied() .ok_or(Error::InvalidNodeDelta(node_index))?; - let mut delta = if execution_status_is_invalid { + let delta = if execution_status_is_invalid { // If the node has an invalid execution payload, reduce its weight to zero. 0_i64 .checked_sub(node.weight() as i64) @@ -427,37 +441,9 @@ impl ProtoArray { (0, 0) }; - // If we find the node for which the proposer boost was previously applied, decrease - // the delta by the previous score amount. - // TODO(gloas): implement `should_apply_proposer_boost` from the Gloas spec. - // The spec conditionally applies proposer boost based on parent weakness and - // early equivocations. Currently boost is applied unconditionally. - if self.previous_proposer_boost.root != Hash256::zero() - && self.previous_proposer_boost.root == node.root() - // Invalid nodes will always have a weight of zero so there's no need to subtract - // the proposer boost delta. - && !execution_status_is_invalid - { - delta = delta - .checked_sub(self.previous_proposer_boost.score as i64) - .ok_or(Error::DeltaOverflow(node_index))?; - } - // If we find the node matching the current proposer boost root, increase - // the delta by the new score amount (unless the block has an invalid execution status). - // For Gloas (V29), `should_apply_proposer_boost` is checked after the loop - // with final weights, and the boost is removed if needed. - if let Some(proposer_score_boost) = spec.proposer_score_boost - && proposer_boost_root != Hash256::zero() - && proposer_boost_root == node.root() - && !execution_status_is_invalid - { - proposer_score = - calculate_committee_fraction::(new_justified_balances, proposer_score_boost) - .ok_or(Error::ProposerBoostOverflow(node_index))?; - delta = delta - .checked_add(proposer_score as i64) - .ok_or(Error::DeltaOverflow(node_index))?; - } + // Proposer boost is NOT applied here. It is computed on-the-fly + // during the virtual tree walk in `get_weight`, matching the spec's + // `get_weight` which adds boost separately from `get_attestation_score`. // Apply the delta to the node. if execution_status_is_invalid { @@ -473,6 +459,9 @@ impl ProtoArray { apply_delta(node.empty_payload_weight, node_empty_delta, node_index)?; node.full_payload_weight = apply_delta(node.full_payload_weight, node_full_delta, node_index)?; + node.equivocating_attestation_score = node + .equivocating_attestation_score + .saturating_add(node_delta.equivocating_attestation_delta); } // Update the parent delta (if any). @@ -514,67 +503,6 @@ impl ProtoArray { } } - // Gloas: now that all weights are final, check `should_apply_proposer_boost`. - // If the boost should NOT apply, walk from the boosted node to root and subtract - // `proposer_score` from weight and payload weights in a single pass. - // We detect Gloas by checking the boosted node's variant (V29) directly. - if proposer_score > 0 - && let Some(&boost_index) = self.indices.get(&proposer_boost_root) - && self - .nodes - .get(boost_index) - .is_some_and(|n| n.as_v29().is_ok()) - && !self.should_apply_proposer_boost::( - proposer_boost_root, - new_justified_balances, - spec, - )? - { - // Single walk: subtract proposer_score from weight and payload weights. - let mut walk_index = Some(boost_index); - let mut child_payload_status: Option = None; - while let Some(idx) = walk_index { - let node = self - .nodes - .get_mut(idx) - .ok_or(Error::InvalidNodeIndex(idx))?; - - *node.weight_mut() = node - .weight() - .checked_sub(proposer_score) - .ok_or(Error::DeltaOverflow(idx))?; - - // Subtract from the payload bucket that the child-on-path - // contributed to (based on the child's parent_payload_status). - if let Some(child_ps) = child_payload_status - && let Ok(v29) = node.as_v29_mut() - { - if child_ps == PayloadStatus::Full { - v29.full_payload_weight = v29 - .full_payload_weight - .checked_sub(proposer_score) - .ok_or(Error::DeltaOverflow(idx))?; - } else { - v29.empty_payload_weight = v29 - .empty_payload_weight - .checked_sub(proposer_score) - .ok_or(Error::DeltaOverflow(idx))?; - } - } - - child_payload_status = node.parent_payload_status().ok(); - walk_index = node.parent(); - } - - proposer_score = 0; - } - - // After applying all deltas, update the `previous_proposer_boost`. - self.previous_proposer_boost = ProposerBoost { - root: proposer_boost_root, - score: proposer_score, - }; - // A second time, iterate backwards through all indices in `self.nodes`. // // We _must_ perform these functions separate from the weight-updating loop above to ensure @@ -611,6 +539,7 @@ impl ProtoArray { best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, spec: &ChainSpec, + time_into_slot: Duration, ) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { @@ -642,6 +571,8 @@ impl ProtoArray { unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }) } else { + let is_current_slot = current_slot == block.slot; + let execution_payload_block_hash = block .execution_payload_block_hash @@ -712,28 +643,27 @@ impl ProtoArray { // initialized to all-True, ensuring `is_payload_timely` and // `is_payload_data_available` return true for the anchor. payload_timeliness_votes: if is_genesis { - let mut bv = BitVector::new(); - for i in 0..bv.len() { - let _ = bv.set(i, true); - } - bv + all_true_bitvector() } else { BitVector::default() }, payload_data_availability_votes: if is_genesis { - let mut bv = BitVector::new(); - for i in 0..bv.len() { - let _ = bv.set(i, true); - } - bv + all_true_bitvector() } else { BitVector::default() }, payload_received: is_genesis, proposer_index: block.proposer_index.unwrap_or(0), - // TODO(gloas): initialise these based on block timing - block_timeliness_attestation_threshold: false, - block_timeliness_ptc_threshold: false, + // Spec: `record_block_timeliness` + `get_forkchoice_store`. + // Anchor gets [True, True]. Others computed from time_into_slot. + block_timeliness_attestation_threshold: is_genesis + || (is_current_slot + && time_into_slot < spec.get_unaggregated_attestation_due()), + // TODO(gloas): use GLOAS-specific PTC due threshold once + // `get_payload_attestation_due_ms` is on ChainSpec. + block_timeliness_ptc_threshold: is_genesis + || (is_current_slot && time_into_slot < spec.get_slot_duration() / 2), + equivocating_attestation_score: 0, }) }; @@ -776,6 +706,12 @@ impl ProtoArray { Ok(()) } + /// Spec: `is_head_weak`. + /// + /// The spec adds weight from equivocating validators in the head slot's + /// committees. We approximate this with `equivocating_attestation_score` + /// which tracks equivocating validators that voted for this block (close + /// but not identical to committee membership). fn is_head_weak( &self, head_node: &ProtoNode, @@ -788,11 +724,10 @@ impl ProtoArray { ) .unwrap_or(0); - let head_weight = head_node.attestation_score(PayloadStatus::Pending); + let head_weight = head_node + .attestation_score(PayloadStatus::Pending) + .saturating_add(head_node.equivocating_attestation_score().unwrap_or(0)); - // TODO(gloas): missing equivocating weight from spec - // idea: add equivocating_attestation_score on the proto node that is updated whenever - // an equivocation is processed. head_weight < reorg_threshold } @@ -1207,7 +1142,7 @@ impl ProtoArray { // In the post-Gloas world, always use a virtual tree walk. // // Best child/best descendant is dead. - let (best_fc_node, best_node) = self.find_head_walk::( + let best_fc_node = self.find_head_walk::( justified_index, current_slot, best_justified_checkpoint, @@ -1218,8 +1153,12 @@ impl ProtoArray { )?; // Perform a sanity check that the node is indeed valid to be the head. + let best_node = self + .nodes + .get(best_fc_node.proto_node_index) + .ok_or(Error::InvalidNodeIndex(best_fc_node.proto_node_index))?; if !self.node_is_viable_for_head::( - &best_node, + best_node, current_slot, best_justified_checkpoint, best_finalized_checkpoint, @@ -1238,80 +1177,79 @@ impl ProtoArray { Ok((best_fc_node.root, best_fc_node.payload_status)) } - /// Virtual tree walk for `find_head`. - /// - /// At each node, determine the preferred payload direction (FULL or EMPTY) - /// by comparing weights. Scan all nodes to find the best child matching - /// the preferred direction. + /// Spec: `get_head`. #[allow(clippy::too_many_arguments)] fn find_head_walk( &self, start_index: usize, current_slot: Slot, best_justified_checkpoint: Checkpoint, - _best_finalized_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, justified_balances: &JustifiedBalances, spec: &ChainSpec, - ) -> Result<(IndexedForkChoiceNode, ProtoNode), Error> { + ) -> Result { let mut head = IndexedForkChoiceNode { root: best_justified_checkpoint.root, proto_node_index: start_index, payload_status: PayloadStatus::Pending, }; - let mut head_proto_node = self - .nodes - .get(start_index) - .ok_or(Error::NodeUnknown(best_justified_checkpoint.root))? - .clone(); loop { - let children = self.get_node_children(&head)?; + let children: Vec<_> = self + .get_node_children(&head)? + .into_iter() + .filter(|(_, proto_node)| { + // Spec: `get_filtered_block_tree` pre-filters to only include + // blocks on viable branches. We approximate this by checking + // viability of each child during the walk. + self.node_is_viable_for_head::( + proto_node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ) + }) + .collect(); if children.is_empty() { - break; + return Ok(head); } - let scores = children + head = children .into_iter() - .map(|(child_fc_node, child_proto_node)| { + .map(|(child, _)| -> Result<_, Error> { + let proto_node = self + .nodes + .get(child.proto_node_index) + .ok_or(Error::InvalidNodeIndex(child.proto_node_index))?; let weight = self.get_weight::( - &child_fc_node, - &child_proto_node, + &child, + proto_node, proposer_boost_root, current_slot, justified_balances, spec, )?; let payload_status_tiebreaker = self.get_payload_status_tiebreaker::( - &child_fc_node, - &child_proto_node, + &child, + proto_node, current_slot, proposer_boost_root, )?; - Ok(( - child_fc_node, - child_proto_node, - weight, - payload_status_tiebreaker, - )) + Ok((child, weight, payload_status_tiebreaker)) }) - .collect::, Error>>()?; - // TODO(gloas): proper error - (head, head_proto_node) = scores + .collect::, Error>>()? .into_iter() - .max_by_key( - |(child_fc_node, _proto_node, weight, payload_status_tiebreaker)| { - (*weight, child_fc_node.root, *payload_status_tiebreaker) - }, - ) - .map(|(child_fc_node, child_proto_node, _, _)| (child_fc_node, child_proto_node)) - .unwrap(); + .max_by_key(|(child, weight, payload_status_tiebreaker)| { + (*weight, child.root, *payload_status_tiebreaker) + }) + .map(|(child, _, _)| child) + .expect("children is non-empty"); } - - Ok((head, head_proto_node)) } + /// Spec: `get_weight`. fn get_weight( &self, fc_node: &IndexedForkChoiceNode, @@ -1334,19 +1272,99 @@ impl ProtoArray { return Ok(attestation_score); } - // TODO(gloas): I don't think `is_supporting_vote` is necessary here, confirm by - // checking spec tests or with spec authors. - let proposer_score = if proto_node.root() == proposer_boost_root { + // Spec: proposer boost is treated as a synthetic vote. + let message = LatestMessage { + slot: current_slot, + root: proposer_boost_root, + payload_present: false, + }; + let proposer_score = if self.is_supporting_vote(fc_node, &message)? { get_proposer_score::(justified_balances, spec)? } else { 0 }; + Ok(attestation_score.saturating_add(proposer_score)) } else { Ok(0) } } + /// Spec: `is_supporting_vote`. + fn is_supporting_vote( + &self, + node: &IndexedForkChoiceNode, + message: &LatestMessage, + ) -> Result { + let block = self + .nodes + .get(node.proto_node_index) + .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; + + if node.root == message.root { + if node.payload_status == PayloadStatus::Pending { + return Ok(true); + } + if message.slot <= block.slot() { + return Ok(false); + } + if message.payload_present { + Ok(node.payload_status == PayloadStatus::Full) + } else { + Ok(node.payload_status == PayloadStatus::Empty) + } + } else { + let ancestor = self.get_ancestor_node(message.root, block.slot())?; + Ok(node.root == ancestor.root + && (node.payload_status == PayloadStatus::Pending + || node.payload_status == ancestor.payload_status)) + } + } + + /// Spec: `get_ancestor` (modified to return ForkChoiceNode with payload_status). + fn get_ancestor_node(&self, root: Hash256, slot: Slot) -> Result { + let index = *self.indices.get(&root).ok_or(Error::NodeUnknown(root))?; + let block = self + .nodes + .get(index) + .ok_or(Error::InvalidNodeIndex(index))?; + + if block.slot() <= slot { + return Ok(IndexedForkChoiceNode { + root, + proto_node_index: index, + payload_status: PayloadStatus::Pending, + }); + } + + // Walk up until we find the ancestor at `slot`. + let mut child_index = index; + let mut current_index = block.parent().ok_or(Error::NodeUnknown(root))?; + + loop { + let current = self + .nodes + .get(current_index) + .ok_or(Error::InvalidNodeIndex(current_index))?; + + if current.slot() <= slot { + let child = self + .nodes + .get(child_index) + .ok_or(Error::InvalidNodeIndex(child_index))?; + return Ok(IndexedForkChoiceNode { + root: current.root(), + proto_node_index: current_index, + payload_status: child.get_parent_payload_status(), + }); + } + + child_index = current_index; + current_index = current.parent().ok_or(Error::NodeUnknown(root))?; + } + } + + /// Spec: `get_node_children`. fn get_node_children( &self, node: &IndexedForkChoiceNode, @@ -1355,30 +1373,15 @@ impl ProtoArray { let proto_node = self .nodes .get(node.proto_node_index) - .ok_or(Error::InvalidNodeIndex(node.proto_node_index))? - .clone(); - let mut children = vec![( - IndexedForkChoiceNode { - root: node.root, - proto_node_index: node.proto_node_index, - payload_status: PayloadStatus::Empty, - }, - proto_node.clone(), - )]; + .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; + let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; // The FULL virtual child only exists if the payload has been received. if proto_node.payload_received().is_ok_and(|received| received) { - children.push(( - IndexedForkChoiceNode { - root: node.root, - proto_node_index: node.proto_node_index, - payload_status: PayloadStatus::Full, - }, - proto_node, - )); + children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); } Ok(children) } else { - let children = self + Ok(self .nodes .iter() .enumerate() @@ -1396,8 +1399,7 @@ impl ProtoArray { child_node.clone(), ) }) - .collect(); - Ok(children) + .collect()) } } @@ -1427,8 +1429,10 @@ impl ProtoArray { proto_node: &ProtoNode, proposer_boost_root: Hash256, ) -> Result { + // Per spec: `proposer_root == Root()` is one of the `or` conditions that + // makes `should_extend_payload` return True. if proposer_boost_root.is_zero() { - return Ok(false); + return Ok(true); } let proposer_boost_node_index = *self @@ -1440,20 +1444,18 @@ impl ProtoArray { .get(proposer_boost_node_index) .ok_or(Error::InvalidNodeIndex(proposer_boost_node_index))?; - // Check if the parent of the proposer boost node matches the fc_node's root - let Some(proposer_boost_parent_index) = proposer_boost_node.parent() else { - // TODO(gloas): could be an error - return Ok(false); - }; - let boost_parent_root = self + let parent_index = proposer_boost_node + .parent() + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let proposer_boost_parent_root = self .nodes - .get(proposer_boost_parent_index) - .ok_or(Error::InvalidNodeIndex(proposer_boost_parent_index))? + .get(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))? .root(); Ok( (proto_node.is_payload_timely::() && proto_node.is_payload_data_available::()) - || boost_parent_root != fc_node.root + || proposer_boost_parent_root != fc_node.root || proposer_boost_node.is_parent_node_full(), ) } @@ -1879,15 +1881,14 @@ pub fn calculate_committee_fraction( .checked_div(100) } -pub fn get_proposer_score( +/// Spec: `get_proposer_score`. +fn get_proposer_score( justified_balances: &JustifiedBalances, spec: &ChainSpec, ) -> Result { let Some(proposer_score_boost) = spec.proposer_score_boost else { - // TODO(gloas): make proposer boost non-optional in spec return Ok(0); }; - // TODO(gloas): fix error calculate_committee_fraction::(justified_balances, proposer_score_boost) .ok_or(Error::ProposerBoostOverflow(0)) } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 19be43511fc..4be77b61ad4 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -14,6 +14,7 @@ use ssz_derive::{Decode, Encode}; use std::{ collections::{BTreeSet, HashMap}, fmt, + time::Duration, }; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, @@ -75,6 +76,16 @@ pub struct IndexedForkChoiceNode { pub payload_status: PayloadStatus, } +impl IndexedForkChoiceNode { + pub fn with_status(&self, payload_status: PayloadStatus) -> Self { + Self { + root: self.root, + proto_node_index: self.proto_node_index, + payload_status, + } + } +} + impl ExecutionStatus { pub fn is_execution_enabled(&self) -> bool { !matches!(self, ExecutionStatus::Irrelevant(_)) @@ -491,6 +502,10 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, spec, + // Anchor block is always timely (delay=0 ensures both timeliness + // checks pass). Combined with `is_genesis` override in on_block, + // this matches spec's `block_timeliness = {anchor: [True, True]}`. + Duration::ZERO, ) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; @@ -590,6 +605,7 @@ impl ProtoArrayForkChoice { justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, spec: &ChainSpec, + time_into_slot: Duration, ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); @@ -602,6 +618,7 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, spec, + time_into_slot, ) .map_err(|e| format!("process_block_error: {:?}", e)) } @@ -705,8 +722,10 @@ impl ProtoArrayForkChoice { .into()); } - // Only re-org if the parent's weight is greater than the parents configured committee fraction. - let parent_weight = info.parent_node.weight(); + // Spec: `is_parent_strong`. Use payload-aware weight matching the + // payload path the head node is on from its parent. + let parent_payload_status = info.head_node.get_parent_payload_status(); + let parent_weight = info.parent_node.attestation_score(parent_payload_status); let re_org_parent_weight_threshold = info.re_org_parent_weight_threshold; let parent_strong = parent_weight > re_org_parent_weight_threshold; if !parent_strong { @@ -1130,6 +1149,7 @@ fn compute_deltas( delta: 0, empty_delta: 0, full_delta: 0, + equivocating_attestation_delta: 0, }; indices.len() ]; @@ -1171,6 +1191,11 @@ fn compute_deltas( block_slot(current_delta_index)?, ); node_delta.sub_payload_delta(status, old_balance, current_delta_index)?; + + // Track equivocating weight for `is_head_weak` monotonicity. + node_delta.equivocating_attestation_delta = node_delta + .equivocating_attestation_delta + .saturating_add(old_balance); } vote.current_root = Hash256::zero(); @@ -1322,6 +1347,7 @@ mod test_compute_deltas { genesis_checkpoint, genesis_checkpoint, &spec, + Duration::ZERO, ) .unwrap(); @@ -1351,6 +1377,7 @@ mod test_compute_deltas { genesis_checkpoint, genesis_checkpoint, &spec, + Duration::ZERO, ) .unwrap(); @@ -1487,6 +1514,7 @@ mod test_compute_deltas { genesis_checkpoint, genesis_checkpoint, &spec, + Duration::ZERO, ) .unwrap(); }; From 9f56fd07514867f8f0b1f538e436b88ce4bbd4d3 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 03:04:03 -0500 Subject: [PATCH 027/127] Review fixes: cache should_apply_proposer_boost, improve error context - Compute should_apply_proposer_boost once before the walk instead of per-child per-level - Clear previous_proposer_boost in apply_score_changes - Use correct node root in get_ancestor_node error messages - Add comment explaining is_supporting_vote slot comparison behavior --- consensus/proto_array/src/proto_array.rs | 29 ++++++++++++++++++------ 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 670ae31cfce..dae8c608f3a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -503,6 +503,10 @@ impl ProtoArray { } } + // Proposer boost is now applied on-the-fly in `get_weight` during the + // walk, so clear any stale boost from a prior call. + self.previous_proposer_boost = ProposerBoost::default(); + // A second time, iterate backwards through all indices in `self.nodes`. // // We _must_ perform these functions separate from the weight-updating loop above to ensure @@ -1195,6 +1199,13 @@ impl ProtoArray { payload_status: PayloadStatus::Pending, }; + // Compute once rather than per-child per-level. + let apply_proposer_boost = self.should_apply_proposer_boost::( + proposer_boost_root, + justified_balances, + spec, + )?; + loop { let children: Vec<_> = self .get_node_children(&head)? @@ -1226,6 +1237,7 @@ impl ProtoArray { let weight = self.get_weight::( &child, proto_node, + apply_proposer_boost, proposer_boost_root, current_slot, justified_balances, @@ -1250,10 +1262,12 @@ impl ProtoArray { } /// Spec: `get_weight`. + #[allow(clippy::too_many_arguments)] fn get_weight( &self, fc_node: &IndexedForkChoiceNode, proto_node: &ProtoNode, + apply_proposer_boost: bool, proposer_boost_root: Hash256, current_slot: Slot, justified_balances: &JustifiedBalances, @@ -1264,11 +1278,7 @@ impl ProtoArray { { let attestation_score = proto_node.attestation_score(fc_node.payload_status); - if !self.should_apply_proposer_boost::( - proposer_boost_root, - justified_balances, - spec, - )? { + if !apply_proposer_boost { return Ok(attestation_score); } @@ -1305,6 +1315,9 @@ impl ProtoArray { if node.payload_status == PayloadStatus::Pending { return Ok(true); } + // For the proposer boost case: message.slot == current_slot == block.slot, + // so this returns false — boost does not support EMPTY/FULL of the + // boosted block itself, only its ancestors. if message.slot <= block.slot() { return Ok(false); } @@ -1339,7 +1352,7 @@ impl ProtoArray { // Walk up until we find the ancestor at `slot`. let mut child_index = index; - let mut current_index = block.parent().ok_or(Error::NodeUnknown(root))?; + let mut current_index = block.parent().ok_or(Error::NodeUnknown(block.root()))?; loop { let current = self @@ -1360,7 +1373,9 @@ impl ProtoArray { } child_index = current_index; - current_index = current.parent().ok_or(Error::NodeUnknown(root))?; + current_index = current + .parent() + .ok_or(Error::NodeUnknown(current.root()))?; } } From e943888ee71c32cbb6fe9d8520d298c3708ae416 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 16:04:43 -0500 Subject: [PATCH 028/127] Fix test_03: update weight assertions for spec-parity boost MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Proposer boost is no longer baked into node.weight() — it is applied on-the-fly in the walk's get_weight via is_supporting_vote. Update the expected weights in test_03 to reflect pure attestation scores. --- .../src/fork_choice_test_definition/execution_status.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index 8743363f9c1..794310ef89a 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -1053,13 +1053,15 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { proposer_boost_root: get_root(3), }); + // Stored weights are pure attestation scores (proposer boost is applied + // on-the-fly in the walk's `get_weight`, not baked into `node.weight()`). ops.push(Operation::AssertWeight { block_root: get_root(0), - weight: 33_250, + weight: 2_000, }); ops.push(Operation::AssertWeight { block_root: get_root(1), - weight: 33_250, + weight: 2_000, }); ops.push(Operation::AssertWeight { block_root: get_root(2), @@ -1067,8 +1069,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }); ops.push(Operation::AssertWeight { block_root: get_root(3), - // This is a "magic number" generated from `calculate_committee_fraction`. - weight: 31_250, + weight: 0, }); // Invalidate the payload of 3. From 93ef1e395ca3377958720d51ebbfe967289d055f Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 16:40:17 -0500 Subject: [PATCH 029/127] Include head_payload_status in ForkChoiceView comparison The early exit check in recompute_head_at_slot compared only head_block_root and checkpoints. When on_execution_payload changed the payload status from Empty to Full without changing the head root, the CachedHead was not updated. Add head_payload_status to ForkChoiceView so the change is detected. --- beacon_node/beacon_chain/src/canonical_head.rs | 1 + consensus/fork_choice/src/fork_choice.rs | 2 ++ consensus/proto_array/src/proto_array.rs | 11 +++-------- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 30d4a597097..b39b51a9b54 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -608,6 +608,7 @@ impl BeaconChain { // was last run. let old_view = ForkChoiceView { head_block_root: old_cached_head.head_block_root(), + head_payload_status: old_cached_head.head_payload_status(), justified_checkpoint: old_cached_head.justified_checkpoint(), finalized_checkpoint: old_cached_head.finalized_checkpoint(), }; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c6def1562bd..896bb87c2da 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -342,6 +342,7 @@ pub struct ForkchoiceUpdateParameters { #[derive(Clone, Copy, Debug, PartialEq)] pub struct ForkChoiceView { pub head_block_root: Hash256, + pub head_payload_status: PayloadStatus, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, } @@ -674,6 +675,7 @@ where pub fn cached_fork_choice_view(&self) -> ForkChoiceView { ForkChoiceView { head_block_root: self.forkchoice_update_parameters.head_root, + head_payload_status: self.forkchoice_update_parameters.head_payload_status, justified_checkpoint: self.justified_checkpoint(), finalized_checkpoint: self.finalized_checkpoint(), } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index dae8c608f3a..e4e02d58720 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1200,11 +1200,8 @@ impl ProtoArray { }; // Compute once rather than per-child per-level. - let apply_proposer_boost = self.should_apply_proposer_boost::( - proposer_boost_root, - justified_balances, - spec, - )?; + let apply_proposer_boost = + self.should_apply_proposer_boost::(proposer_boost_root, justified_balances, spec)?; loop { let children: Vec<_> = self @@ -1373,9 +1370,7 @@ impl ProtoArray { } child_index = current_index; - current_index = current - .parent() - .ok_or(Error::NodeUnknown(current.root()))?; + current_index = current.parent().ok_or(Error::NodeUnknown(current.root()))?; } } From 324c61d2e2bbe5116ba2fd3798967478603c2e66 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 17:12:17 -0500 Subject: [PATCH 030/127] Implement get_filtered_block_tree and fix remaining test failures - Add get_filtered_block_tree/filter_block_tree matching the spec's recursive viability pre-filter for get_head - Skip invalid execution status nodes in the filter (they aren't in store.blocks in the spec) - Fix attestation_score for V17 nodes: fall back to weight() for Empty/Full since pre-Gloas has no payload separation - Include head_payload_status in ForkChoiceView so CachedHead updates when payload status changes - Update votes test: branch with incompatible finalized leaf is now correctly excluded by the recursive filter - Update execution_status test_03: stored weights no longer include proposer boost All 30 proto_array/fork_choice tests pass. All 9 EF fork_choice test suites pass. --- .../src/fork_choice_test_definition/votes.rs | 11 +- consensus/proto_array/src/proto_array.rs | 122 ++++++++++++++++-- 2 files changed, 115 insertions(+), 18 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index cdd95531278..ac97a592b74 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -357,17 +357,18 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { execution_payload_block_hash: None, }); - // Ensure that 5 is filtered out and the head stays at 4. + // Block 5 has incompatible finalized checkpoint, so `get_filtered_block_tree` + // excludes the entire 1->3->4->5 branch (no viable leaf). Head moves to 2. // // 0 // / \ - // 2 1 + // head-> 2 1 // | // 3 // | - // 4 <- head + // 4 // / - // 5 + // 5 <- incompatible finalized checkpoint ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -378,7 +379,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_root(4), + expected_head: get_root(2), current_slot: Slot::new(0), expected_payload_status: None, }); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index e4e02d58720..43c6d749625 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -193,8 +193,12 @@ impl ProtoNode { pub fn attestation_score(&self, payload_status: PayloadStatus) -> u64 { match payload_status { PayloadStatus::Pending => self.weight(), - PayloadStatus::Empty => self.empty_payload_weight().unwrap_or(0), - PayloadStatus::Full => self.full_payload_weight().unwrap_or(0), + // Pre-Gloas (V17) nodes have no payload separation — all weight + // is in `weight()`. Post-Gloas (V29) nodes track per-status weights. + PayloadStatus::Empty => self + .empty_payload_weight() + .unwrap_or_else(|_| self.weight()), + PayloadStatus::Full => self.full_payload_weight().unwrap_or_else(|_| self.weight()), } } @@ -1181,6 +1185,100 @@ impl ProtoArray { Ok((best_fc_node.root, best_fc_node.payload_status)) } + /// Spec: `get_filtered_block_tree`. + /// + /// Returns the set of node indices on viable branches — those with at least + /// one leaf descendant with correct justified/finalized checkpoints. + fn get_filtered_block_tree( + &self, + start_index: usize, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> HashSet { + let mut viable = HashSet::new(); + self.filter_block_tree::( + start_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + &mut viable, + ); + viable + } + + /// Spec: `filter_block_tree`. + fn filter_block_tree( + &self, + node_index: usize, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + viable: &mut HashSet, + ) -> bool { + let Some(node) = self.nodes.get(node_index) else { + return false; + }; + + // Nodes with invalid execution payloads are never viable. + // (The spec doesn't need this check because invalid blocks aren't in store.blocks.) + if node + .execution_status() + .is_ok_and(|status| status.is_invalid()) + { + return false; + } + + // Skip invalid children — they aren't in store.blocks in the spec. + let children: Vec = self + .nodes + .iter() + .enumerate() + .filter(|(_, child)| { + child.parent() == Some(node_index) + && !child + .execution_status() + .is_ok_and(|status| status.is_invalid()) + }) + .map(|(i, _)| i) + .collect(); + + if !children.is_empty() { + // Evaluate ALL children (no short-circuit) to mark all viable branches. + let any_viable = children + .iter() + .map(|&child_index| { + self.filter_block_tree::( + child_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + viable, + ) + }) + .collect::>() + .into_iter() + .any(|v| v); + if any_viable { + viable.insert(node_index); + return true; + } + return false; + } + + // Leaf node: check viability. + if self.node_is_viable_for_head::( + node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ) { + viable.insert(node_index); + return true; + } + false + } + /// Spec: `get_head`. #[allow(clippy::too_many_arguments)] fn find_head_walk( @@ -1199,6 +1297,14 @@ impl ProtoArray { payload_status: PayloadStatus::Pending, }; + // Spec: `get_filtered_block_tree`. + let viable_nodes = self.get_filtered_block_tree::( + start_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ); + // Compute once rather than per-child per-level. let apply_proposer_boost = self.should_apply_proposer_boost::(proposer_boost_root, justified_balances, spec)?; @@ -1207,17 +1313,7 @@ impl ProtoArray { let children: Vec<_> = self .get_node_children(&head)? .into_iter() - .filter(|(_, proto_node)| { - // Spec: `get_filtered_block_tree` pre-filters to only include - // blocks on viable branches. We approximate this by checking - // viability of each child during the walk. - self.node_is_viable_for_head::( - proto_node, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - ) - }) + .filter(|(fc_node, _)| viable_nodes.contains(&fc_node.proto_node_index)) .collect(); if children.is_empty() { From 66f71b3f22640178c861bfb313cd65b8d98e8e19 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 10:01:33 +1100 Subject: [PATCH 031/127] Remove unused params --- consensus/proto_array/src/proto_array.rs | 3 --- consensus/proto_array/src/proto_array_fork_choice.rs | 3 --- 2 files changed, 6 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 43c6d749625..8ae8d202347 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -391,10 +391,7 @@ impl ProtoArray { mut deltas: Vec, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - _new_justified_balances: &JustifiedBalances, - _proposer_boost_root: Hash256, current_slot: Slot, - _spec: &ChainSpec, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { return Err(Error::InvalidDeltaLen { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 4be77b61ad4..94660f24fff 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -658,10 +658,7 @@ impl ProtoArrayForkChoice { deltas, justified_checkpoint, finalized_checkpoint, - new_balances, - proposer_boost_root, current_slot, - spec, ) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; From 84679b1c071f415b88bcd7120fe2a041eb57d0ca Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 18:22:28 -0500 Subject: [PATCH 032/127] Remove redundant invalid-node check from filter_block_tree The children filter already skips invalid nodes, so a valid parent whose only children are invalid becomes a leaf and gets rejected by node_is_viable_for_head. The top-level check was redundant. --- consensus/proto_array/src/proto_array.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 8ae8d202347..1731a79afb1 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1217,14 +1217,7 @@ impl ProtoArray { return false; }; - // Nodes with invalid execution payloads are never viable. - // (The spec doesn't need this check because invalid blocks aren't in store.blocks.) - if node - .execution_status() - .is_ok_and(|status| status.is_invalid()) - { - return false; - } + // Skip invalid children — they aren't in store.blocks in the spec. let children: Vec = self From e77651ac06ff6b5b81e1677dafbb849a27565862 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 10:27:20 +1100 Subject: [PATCH 033/127] Revert changes in load_parent --- beacon_node/beacon_chain/src/block_verification.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a452d528a12..d4c63a05518 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1961,13 +1961,13 @@ fn load_parent>( { if block.as_block().is_parent_block_full(parent_bid_block_hash) { // TODO(gloas): loading the envelope here is not very efficient - if let Some(envelope) = chain.store.get_payload_envelope(&root)? { - (StatePayloadStatus::Full, envelope.message.state_root) - } else { - // The envelope hasn't been stored yet (e.g. genesis block, or payload - // not yet delivered). Fall back to the pending/empty state. - (StatePayloadStatus::Pending, parent_block.state_root()) - } + // TODO(gloas): check parent payload existence prior to this point? + let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + })?; + (StatePayloadStatus::Full, envelope.message.state_root) } else { (StatePayloadStatus::Pending, parent_block.state_root()) } From fdf2fd22673f08fda47b5eb96932eb462c3c6381 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 18:38:48 -0500 Subject: [PATCH 034/127] Simplify reorg weight logic, TODO(gloas) for payload-aware version Remove V17/V29 branching in beacon_chain reorg weight computation. Use total weight for both pre and post-GLOAS, which is correct for pre-GLOAS and conservative for post-GLOAS. The payload-aware version will be needed when reorg logic is enabled for GLOAS. --- beacon_node/beacon_chain/src/beacon_chain.rs | 25 +++----------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 60cb9c8dea1..c4cc6925ba8 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4854,29 +4854,10 @@ impl BeaconChain { return Err(Box::new(DoNotReOrg::NotProposing.into())); } - // If the current slot is already equal to the proposal slot (or we are in the tail end of - // the prior slot), then check the actual weight of the head against the head re-org threshold - // and the actual weight of the parent against the parent re-org threshold. - // Per spec `is_head_weak`: uses get_attestation_score(head, PENDING) which is - // the total weight. Per spec `is_parent_strong`: uses - // get_attestation_score(parent, parent_payload_status) where parent_payload_status - // is determined by the head block's relationship to its parent. + // TODO(gloas): reorg weight logic needs updating for GLOAS. For now use + // total weight which is correct for pre-GLOAS and conservative for post-GLOAS. let head_weight = info.head_node.weight(); - let parent_weight = if let (Ok(head_payload_status), Ok(parent_v29)) = ( - info.head_node.parent_payload_status(), - info.parent_node.as_v29(), - ) { - // Post-GLOAS: use the payload-filtered weight matching how the head - // extends from its parent. - match head_payload_status { - proto_array::PayloadStatus::Full => parent_v29.full_payload_weight, - proto_array::PayloadStatus::Empty => parent_v29.empty_payload_weight, - proto_array::PayloadStatus::Pending => info.parent_node.weight(), - } - } else { - // Pre-GLOAS or fork boundary: use total weight. - info.parent_node.weight() - }; + let parent_weight = info.parent_node.weight(); let (head_weak, parent_strong) = if fork_choice_slot == re_org_block_slot { ( From 9f1f68c3ee940bc4bf4830af3dbc8b7f2966fe9e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 10:39:04 +1100 Subject: [PATCH 035/127] Add back AttestationFromBlock --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 ++-- .../beacon_chain/src/block_verification.rs | 11 +++++-- consensus/fork_choice/src/fork_choice.rs | 32 ++++++++++++------- consensus/fork_choice/src/lib.rs | 8 ++--- consensus/fork_choice/tests/tests.rs | 18 ++++++++--- consensus/proto_array/src/proto_array.rs | 2 -- 6 files changed, 51 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c4cc6925ba8..9fa32d5dcc7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -85,8 +85,8 @@ use execution_layer::{ }; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, - PayloadVerificationStatus, ResetPayloadStatuses, + AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, + InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, }; use futures::channel::mpsc::Sender; use itertools::Itertools; @@ -2297,7 +2297,7 @@ impl BeaconChain { .on_attestation( self.slot()?, verified.indexed_attestation().to_ref(), - false, + AttestationFromBlock::False, &self.spec, ) .map_err(Into::into) @@ -4757,6 +4757,7 @@ impl BeaconChain { }) } + // TODO(gloas): wrong for Gloas, needs an update pub fn overridden_forkchoice_update_params_or_failure_reason( &self, canonical_forkchoice_params: &ForkchoiceUpdateParameters, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index d4c63a05518..bc29486326a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -71,7 +71,7 @@ use bls::{PublicKey, PublicKeyBytes}; use educe::Educe; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; -pub use fork_choice::PayloadVerificationStatus; +pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -1666,7 +1666,12 @@ impl ExecutionPendingBlock { .get_indexed_attestation(&state, attestation) .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; - match fork_choice.on_attestation(current_slot, indexed_attestation, true, &chain.spec) { + match fork_choice.on_attestation( + current_slot, + indexed_attestation, + AttestationFromBlock::True, + &chain.spec, + ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The // block might be very old and therefore the attestations useless to fork choice. @@ -1689,7 +1694,7 @@ impl ExecutionPendingBlock { match fork_choice.on_payload_attestation( current_slot, indexed_payload_attestation, - true, + AttestationFromBlock::True, &ptc.0, ) { Ok(()) => Ok(()), diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 896bb87c2da..d970b437b7b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -328,6 +328,15 @@ fn dequeue_payload_attestations( } /// Denotes whether an attestation we are processing was received from a block or from gossip. +/// Equivalent to the `is_from_block` `bool` in: +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation +#[derive(Clone, Copy)] +pub enum AttestationFromBlock { + True, + False, +} + /// Parameters which are cached between calls to `ForkChoice::get_head`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ForkchoiceUpdateParameters { @@ -1082,7 +1091,7 @@ where fn validate_on_attestation( &self, indexed_attestation: IndexedAttestationRef, - is_from_block: bool, + is_from_block: AttestationFromBlock, spec: &ChainSpec, ) -> Result<(), InvalidAttestation> { // There is no point in processing an attestation with an empty bitfield. Reject @@ -1096,7 +1105,7 @@ where let target = indexed_attestation.data().target; - if !is_from_block { + if matches!(is_from_block, AttestationFromBlock::False) { self.validate_target_epoch_against_current_time(target.epoch)?; } @@ -1193,7 +1202,7 @@ where fn validate_on_payload_attestation( &self, indexed_payload_attestation: &IndexedPayloadAttestation, - is_from_block: bool, + is_from_block: AttestationFromBlock, ) -> Result<(), InvalidAttestation> { if indexed_payload_attestation.attesting_indices.is_empty() { return Err(InvalidAttestation::EmptyAggregationBitfield); @@ -1215,7 +1224,7 @@ where // Gossip payload attestations must be for the current slot. // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md - if !is_from_block + if matches!(is_from_block, AttestationFromBlock::False) && indexed_payload_attestation.data.slot != self.fc_store.get_current_slot() { return Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { @@ -1227,7 +1236,7 @@ where // A payload attestation voting payload_present for a block in the current slot is // invalid: the payload cannot be known yet. This only applies to gossip attestations; // payload attestations from blocks have already been validated by the block producer. - if !is_from_block + if matches!(is_from_block, AttestationFromBlock::False) && self.fc_store.get_current_slot() == block.slot && indexed_payload_attestation.data.payload_present { @@ -1258,7 +1267,7 @@ where &mut self, system_time_current_slot: Slot, attestation: IndexedAttestationRef, - is_from_block: bool, + is_from_block: AttestationFromBlock, spec: &ChainSpec, ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTATION_TIMES); @@ -1319,7 +1328,7 @@ where &mut self, system_time_current_slot: Slot, attestation: &IndexedPayloadAttestation, - is_from_block: bool, + is_from_block: AttestationFromBlock, ptc: &[usize], ) -> Result<(), Error> { self.update_time(system_time_current_slot)?; @@ -1339,10 +1348,11 @@ where let processing_slot = self.fc_store.get_current_slot(); // Payload attestations from blocks can be applied in the next slot (S+1 for data.slot=S), // while gossiped payload attestations are delayed one extra slot. - let should_process_now = if is_from_block { - attestation.data.slot < processing_slot - } else { - attestation.data.slot.saturating_add(1_u64) < processing_slot + let should_process_now = match is_from_block { + AttestationFromBlock::True => attestation.data.slot < processing_slot, + AttestationFromBlock::False => { + attestation.data.slot.saturating_add(1_u64) < processing_slot + } }; if should_process_now { diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index de3e709a844..93a8b376f5a 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -3,10 +3,10 @@ mod fork_choice_store; mod metrics; pub use crate::fork_choice::{ - Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, - InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, PersistedForkChoiceV17, - PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, QueuedPayloadAttestation, - ResetPayloadStatuses, + AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, + InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, + PersistedForkChoiceV17, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, + QueuedPayloadAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 44da1af148e..0a7000d476f 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -10,8 +10,8 @@ use beacon_chain::{ use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, - QueuedAttestation, QueuedPayloadAttestation, + AttestationFromBlock, ForkChoiceStore, InvalidAttestation, InvalidBlock, + PayloadVerificationStatus, QueuedAttestation, QueuedPayloadAttestation, }; use state_processing::state_advance::complete_state_advance; use std::fmt; @@ -1033,7 +1033,12 @@ async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { let result = chain .canonical_head .fork_choice_write_lock() - .on_payload_attestation(current_slot, &payload_attestation, true, ptc); + .on_payload_attestation( + current_slot, + &payload_attestation, + AttestationFromBlock::True, + ptc, + ); assert!( result.is_ok(), @@ -1082,7 +1087,12 @@ async fn non_block_payload_attestation_for_previous_slot_is_rejected() { let result = chain .canonical_head .fork_choice_write_lock() - .on_payload_attestation(s_plus_1, &payload_attestation, false, ptc); + .on_payload_attestation( + s_plus_1, + &payload_attestation, + AttestationFromBlock::False, + ptc, + ); assert!( matches!( result, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 1731a79afb1..3f8db8d1fb1 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1217,8 +1217,6 @@ impl ProtoArray { return false; }; - - // Skip invalid children — they aren't in store.blocks in the spec. let children: Vec = self .nodes From a69a848590aa0fd22f10cf0f1ea19b8ff3b37a82 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 10:48:18 +1100 Subject: [PATCH 036/127] Remove expect --- consensus/proto_array/src/error.rs | 1 + consensus/proto_array/src/proto_array.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 04e747f5f6f..bb5eea569d6 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -60,6 +60,7 @@ pub enum Error { BrokenBlock { block_root: Hash256, }, + NoViableChildren, } impl From for Error { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3f8db8d1fb1..e3671eef7d9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1338,7 +1338,7 @@ impl ProtoArray { (*weight, child.root, *payload_status_tiebreaker) }) .map(|(child, _, _)| child) - .expect("children is non-empty"); + .ok_or(Error::NoViableChildren)?; } } From f1b261f3360732a43dbb983f27dc47f7c1ce8b82 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 10:57:37 +1100 Subject: [PATCH 037/127] Safeguard attestation index check --- consensus/fork_choice/src/fork_choice.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index d970b437b7b..25716a93ce5 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1285,7 +1285,7 @@ where // 2. Ignore all attestations to the zero hash. // // (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is - // fine because votes to the genesis block are not useful; all validators implicitly attest + // fine because votes.gloas_enabled() to the genesis block are not useful; all validators implicitly attest // to genesis just by being present in the chain. if attestation.data().beacon_block_root == Hash256::zero() { return Ok(()); @@ -1293,8 +1293,11 @@ where self.validate_on_attestation(attestation, is_from_block, spec)?; - // Per GLOAS spec: `payload_present = attestation.data.index == 1`. - let payload_present = attestation.data().index == 1; + // Per Gloas spec: `payload_present = attestation.data.index == 1`. + let payload_present = spec + .fork_name_at_slot::(attestation.data().slot) + .gloas_enabled() + && attestation.data().index == 1; if attestation.data().slot < self.fc_store.get_current_slot() { for validator_index in attestation.attesting_indices_iter() { From d58df3a656f7965d98b8a957475804f7538915ab Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 11:07:38 +1100 Subject: [PATCH 038/127] Make proposer_index mandatory in on_block --- consensus/proto_array/src/error.rs | 1 + consensus/proto_array/src/fork_choice_test_definition.rs | 2 +- consensus/proto_array/src/proto_array.rs | 8 +++++++- consensus/proto_array/src/proto_array_fork_choice.rs | 8 ++++---- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index bb5eea569d6..bb47af97d91 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -61,6 +61,7 @@ pub enum Error { block_root: Hash256, }, NoViableChildren, + OnBlockRequiresProposerIndex, } impl From for Error { diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 4507e013ba6..b6ccc4d4359 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -280,7 +280,7 @@ impl ForkChoiceTestDefinition { unrealized_finalized_checkpoint: None, execution_payload_parent_hash, execution_payload_block_hash, - proposer_index: None, + proposer_index: Some(0), }; fork_choice .process_block::( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index e3671eef7d9..374190f9ed9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -551,6 +551,12 @@ impl ProtoArray { return Ok(()); } + // We do not allow `proposer_index=None` for calls to `on_block`, it is only non-optional + // for backwards-compatibility with pre-Gloas V17 proto nodes. + let Some(proposer_index) = block.proposer_index else { + return Err(Error::OnBlockRequiresProposerIndex); + }; + let node_index = self.nodes.len(); let parent_index = block @@ -658,7 +664,7 @@ impl ProtoArray { BitVector::default() }, payload_received: is_genesis, - proposer_index: block.proposer_index.unwrap_or(0), + proposer_index, // Spec: `record_block_timeliness` + `get_forkchoice_store`. // Anchor gets [True, True]. Others computed from time_into_slot. block_timeliness_attestation_threshold: is_genesis diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 94660f24fff..e8ad6c063a8 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -492,7 +492,7 @@ impl ProtoArrayForkChoice { unrealized_finalized_checkpoint: Some(finalized_checkpoint), execution_payload_parent_hash, execution_payload_block_hash, - proposer_index: None, + proposer_index: Some(0), }; proto_array @@ -1338,7 +1338,7 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), execution_payload_parent_hash: None, execution_payload_block_hash: None, - proposer_index: None, + proposer_index: Some(0), }, genesis_slot + 1, genesis_checkpoint, @@ -1368,7 +1368,7 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: None, execution_payload_parent_hash: None, execution_payload_block_hash: None, - proposer_index: None, + proposer_index: Some(0), }, genesis_slot + 1, genesis_checkpoint, @@ -1505,7 +1505,7 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), execution_payload_parent_hash: None, execution_payload_block_hash: None, - proposer_index: None, + proposer_index: Some(0), }, Slot::from(block.slot), genesis_checkpoint, From e7f027baddc6765ba4491790d406a837812a8e86 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 19:36:14 -0500 Subject: [PATCH 039/127] O(n) children index, fix load_parent for gloas blocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Build parent->children index once per find_head call, replacing O(n) scans in filter_block_tree and get_node_children with O(1) lookups. - Skip zero block_hash in is_parent_block_full check — default/zero hashes don't indicate a real payload relationship. - Fall back to block state_root for genesis when envelope not stored. - Store execution payload envelope in EF test harness during on_execution_payload step. --- .../beacon_chain/src/block_verification.rs | 28 +++++--- consensus/proto_array/src/proto_array.rs | 70 ++++++++++++------- testing/ef_tests/src/cases/fork_choice.rs | 25 ++++++- 3 files changed, 88 insertions(+), 35 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index bc29486326a..d09ac291ab0 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1964,15 +1964,27 @@ fn load_parent>( if block.as_block().fork_name_unchecked().gloas_enabled() && let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() { - if block.as_block().is_parent_block_full(parent_bid_block_hash) { + if !parent_bid_block_hash.into_root().is_zero() + && block.as_block().is_parent_block_full(parent_bid_block_hash) + { // TODO(gloas): loading the envelope here is not very efficient - // TODO(gloas): check parent payload existence prior to this point? - let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing envelope for parent block {root:?}", - )) - })?; - (StatePayloadStatus::Full, envelope.message.state_root) + let envelope = chain.store.get_payload_envelope(&root)?; + let state_root = if let Some(env) = envelope { + env.message.state_root + } else { + // The envelope may not be stored yet for the genesis/anchor + // block. Fall back to the block's state_root which is the + // post-payload state for the anchor per get_forkchoice_store. + if parent_block.slot() == chain.spec.genesis_slot { + parent_block.state_root() + } else { + return Err(BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + .into()); + } + }; + (StatePayloadStatus::Full, state_root) } else { (StatePayloadStatus::Pending, parent_block.state_root()) } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 374190f9ed9..8e59071baf7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1188,6 +1188,26 @@ impl ProtoArray { Ok((best_fc_node.root, best_fc_node.payload_status)) } + /// Build a parent->children index. Invalid nodes are excluded + /// (they aren't in store.blocks in the spec). + fn build_children_index(&self) -> Vec> { + let mut children = vec![vec![]; self.nodes.len()]; + for (i, node) in self.nodes.iter().enumerate() { + if node + .execution_status() + .is_ok_and(|status| status.is_invalid()) + { + continue; + } + if let Some(parent) = node.parent() { + if parent < children.len() { + children[parent].push(i); + } + } + } + children + } + /// Spec: `get_filtered_block_tree`. /// /// Returns the set of node indices on viable branches — those with at least @@ -1198,6 +1218,7 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, + children_index: &[Vec], ) -> HashSet { let mut viable = HashSet::new(); self.filter_block_tree::( @@ -1205,6 +1226,7 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, + children_index, &mut viable, ); viable @@ -1217,25 +1239,17 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, + children_index: &[Vec], viable: &mut HashSet, ) -> bool { let Some(node) = self.nodes.get(node_index) else { return false; }; - // Skip invalid children — they aren't in store.blocks in the spec. - let children: Vec = self - .nodes - .iter() - .enumerate() - .filter(|(_, child)| { - child.parent() == Some(node_index) - && !child - .execution_status() - .is_ok_and(|status| status.is_invalid()) - }) - .map(|(i, _)| i) - .collect(); + let children = children_index + .get(node_index) + .map(|c| c.as_slice()) + .unwrap_or(&[]); if !children.is_empty() { // Evaluate ALL children (no short-circuit) to mark all viable branches. @@ -1247,6 +1261,7 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, + children_index, viable, ) }) @@ -1291,12 +1306,16 @@ impl ProtoArray { payload_status: PayloadStatus::Pending, }; + // Build parent->children index once for O(1) lookups. + let children_index = self.build_children_index(); + // Spec: `get_filtered_block_tree`. let viable_nodes = self.get_filtered_block_tree::( start_index, current_slot, best_justified_checkpoint, best_finalized_checkpoint, + &children_index, ); // Compute once rather than per-child per-level. @@ -1305,7 +1324,7 @@ impl ProtoArray { loop { let children: Vec<_> = self - .get_node_children(&head)? + .get_node_children(&head, &children_index)? .into_iter() .filter(|(fc_node, _)| viable_nodes.contains(&fc_node.proto_node_index)) .collect(); @@ -1468,6 +1487,7 @@ impl ProtoArray { fn get_node_children( &self, node: &IndexedForkChoiceNode, + children_index: &[Vec], ) -> Result, Error> { if node.payload_status == PayloadStatus::Pending { let proto_node = self @@ -1481,23 +1501,25 @@ impl ProtoArray { } Ok(children) } else { - Ok(self - .nodes + let child_indices = children_index + .get(node.proto_node_index) + .map(|c| c.as_slice()) + .unwrap_or(&[]); + Ok(child_indices .iter() - .enumerate() - .filter(|(_, child_node)| { - child_node.parent() == Some(node.proto_node_index) - && child_node.get_parent_payload_status() == node.payload_status - }) - .map(|(child_index, child_node)| { - ( + .filter_map(|&child_index| { + let child_node = self.nodes.get(child_index)?; + if child_node.get_parent_payload_status() != node.payload_status { + return None; + } + Some(( IndexedForkChoiceNode { root: child_node.root(), proto_node_index: child_index, payload_status: PayloadStatus::Pending, }, child_node.clone(), - ) + )) }) .collect()) } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 9f0e6de2ea5..0c95d1c2d2f 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -449,8 +449,7 @@ impl Case for ForkChoiceTest { execution_payload, valid, } => { - tester - .process_execution_payload(execution_payload.beacon_block_root(), *valid)?; + tester.process_execution_payload(execution_payload, *valid)?; } } } @@ -993,7 +992,27 @@ impl Tester { check_equal("proposer_head", proposer_head, expected_proposer_head) } - pub fn process_execution_payload(&self, block_root: Hash256, valid: bool) -> Result<(), Error> { + pub fn process_execution_payload( + &self, + signed_envelope: &SignedExecutionPayloadEnvelope, + valid: bool, + ) -> Result<(), Error> { + let block_root = signed_envelope.message.beacon_block_root; + + // Store the envelope in the database so that child blocks extending + // the FULL path can load the parent's post-payload state. + if valid { + self.harness + .chain + .store + .put_payload_envelope(&block_root, signed_envelope.clone()) + .map_err(|e| { + Error::InternalError(format!( + "Failed to store payload envelope for {block_root:?}: {e:?}", + )) + })?; + } + let result = self .harness .chain From f31a93697e0898e3b57db17de8dc10776c6bccf1 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 20:08:43 -0500 Subject: [PATCH 040/127] Fix test review issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove wrong latest_message assertions from payload attestation test (on_payload_attestation writes to PTC bitfields, not vote tracker) - Fix corrupted comment: "votes.gloas_enabled() to the genesis block" → "votes to the genesis block" - Fix http_api test fallback string: "n/a" → "irrelevant" to match production code - Add issue link to #[ignore] test - Add comment explaining head_payload_status as u8 cast --- beacon_node/http_api/tests/tests.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 2 +- consensus/fork_choice/tests/tests.rs | 13 ++++--------- testing/ef_tests/src/cases/fork_choice.rs | 8 +++----- 4 files changed, 9 insertions(+), 16 deletions(-) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index bfec7130f60..14bfb5ce920 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3176,7 +3176,7 @@ impl ApiTester { .execution_status() .ok() .map(|status| status.to_string()) - .unwrap_or_else(|| "n/a".to_string()), + .unwrap_or_else(|| "irrelevant".to_string()), best_child: node .best_child() .and_then(|index| expected_proto_array.nodes.get(index)) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 25716a93ce5..8fcf1373e18 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1285,7 +1285,7 @@ where // 2. Ignore all attestations to the zero hash. // // (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is - // fine because votes.gloas_enabled() to the genesis block are not useful; all validators implicitly attest + // fine because votes to the genesis block are not useful; all validators implicitly attest // to genesis just by being present in the chain. if attestation.data().beacon_block_root == Hash256::zero() { return Ok(()); diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 0a7000d476f..532dd7fc4bf 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -172,7 +172,8 @@ impl ForkChoiceTest { self } - /// Inspect the queued payload attestations in fork choice. + // TODO(gloas): add inspect_queued_payload_attestations when payload + // attestation queueing tests are implemented. #[allow(dead_code)] pub fn inspect_queued_payload_attestations(self, mut func: F) -> Self where @@ -971,6 +972,8 @@ async fn invalid_attestation_future_block() { /// `validate_on_attestation`, which requires a block to exist at a GLOAS-enabled slot. /// Currently the mock execution layer cannot produce Gloas blocks (no /// `signed_execution_payload_bid` support). +/// TODO(gloas): un-ignore once mock EL supports Gloas blocks. +/// https://github.com/sigp/lighthouse/issues/9025 #[ignore] #[tokio::test] async fn invalid_attestation_payload_during_same_slot() { @@ -1045,14 +1048,6 @@ async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { "payload attestation at slot S should be accepted at S+1, got: {:?}", result ); - - let latest_message = chain - .canonical_head - .fork_choice_read_lock() - .latest_message(0) - .expect("latest message should exist"); - assert_eq!(latest_message.slot, current_slot); - assert!(latest_message.payload_present); } /// Gossip payload attestations must be for the current slot. A payload attestation for slot S diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 0c95d1c2d2f..22e8453e14a 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -1039,11 +1039,9 @@ impl Tester { pub fn check_head_payload_status(&self, expected_status: u8) -> Result<(), Error> { let head = self.find_head()?; - check_equal( - "head_payload_status", - head.head_payload_status() as u8, - expected_status, - ) + // PayloadStatus repr: Empty=0, Full=1, Pending=2 (matches spec constants). + let actual = head.head_payload_status() as u8; + check_equal("head_payload_status", actual, expected_status) } pub fn check_should_override_fcu( From a34b7c99dce8c02b78246f1438f328f521aac424 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 20:33:17 -0500 Subject: [PATCH 041/127] Fix CI: collapse nested if, ignore payload attestation test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Collapse nested if in build_children_index (clippy::collapsible_if) - Ignore payload_attestation_for_previous_slot_is_accepted_at_next_slot — test uses pre-Gloas blocks (V17) but on_payload_attestation requires V29 nodes. Needs mock EL Gloas block support. --- consensus/fork_choice/tests/tests.rs | 4 ++++ consensus/proto_array/src/proto_array.rs | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 532dd7fc4bf..b2a6cd56686 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1005,6 +1005,10 @@ async fn invalid_attestation_payload_during_same_slot() { } /// A payload attestation for block A at slot S should be accepted when processed at slot S+1. +/// TODO(gloas): un-ignore once mock EL supports Gloas blocks. Payload +/// attestations require V29 nodes which need Gloas block production. +/// https://github.com/sigp/lighthouse/issues/9025 +#[ignore] #[tokio::test] async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { let test = ForkChoiceTest::new() diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 8e59071baf7..81211690ff5 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1199,10 +1199,10 @@ impl ProtoArray { { continue; } - if let Some(parent) = node.parent() { - if parent < children.len() { - children[parent].push(i); - } + if let Some(parent) = node.parent() + && parent < children.len() + { + children[parent].push(i); } } children From bb3e9e12d27e7f6e33adea62fa4e2c3307aa81c1 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 20:38:15 -0500 Subject: [PATCH 042/127] Fix arithmetic lint in committee_cache (saturating_sub) --- consensus/types/src/state/committee_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/types/src/state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs index 4a28f3c6892..e76b0d24217 100644 --- a/consensus/types/src/state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -332,7 +332,7 @@ impl CommitteeCache { self.shuffling_positions .get(validator_index)? .0 - .map(|p| p.get() - 1) + .map(|p| p.get().saturating_sub(1)) } } From bc28e63585dfca966949eced279a28081844868a Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 20:42:18 -0500 Subject: [PATCH 043/127] Revert "Fix arithmetic lint in committee_cache (saturating_sub)" This reverts commit bb3e9e12d27e7f6e33adea62fa4e2c3307aa81c1. --- consensus/types/src/state/committee_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/types/src/state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs index e76b0d24217..4a28f3c6892 100644 --- a/consensus/types/src/state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -332,7 +332,7 @@ impl CommitteeCache { self.shuffling_positions .get(validator_index)? .0 - .map(|p| p.get().saturating_sub(1)) + .map(|p| p.get() - 1) } } From 93f987f3cfd1ae7e0f9c55492be2ec750ecf299e Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 21:16:06 -0500 Subject: [PATCH 044/127] Remove head_payload_status from ForkchoiceUpdateParameters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit head_payload_status is internal fork choice state, not an EL forkchoiceUpdated parameter. It already lives on CachedHead — source it directly from get_head() return in recompute_head_at_slot instead of threading through ForkchoiceUpdateParameters. Also add TODO(gloas) for parent_head_hash in re-org path (V29 nodes don't carry execution_status). --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +++- beacon_node/beacon_chain/src/canonical_head.rs | 18 ++++++++---------- .../src/test_utils/mock_builder.rs | 3 +-- .../src/test_utils/mock_execution_layer.rs | 1 - consensus/fork_choice/src/fork_choice.rs | 6 ------ .../src/test_rig.rs | 3 +-- 6 files changed, 13 insertions(+), 22 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9fa32d5dcc7..3c8ea307791 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4898,6 +4898,9 @@ impl BeaconChain { return Err(Box::new(DoNotReOrg::HeadNotLate.into())); } + // TODO(gloas): V29 nodes don't carry execution_status, so this returns + // None for post-Gloas re-orgs. Need to source the EL block hash from + // the bid's block_hash instead. Re-org is disabled for Gloas for now. let parent_head_hash = info .parent_node .execution_status() @@ -4905,7 +4908,6 @@ impl BeaconChain { .and_then(|execution_status| execution_status.block_hash()); let forkchoice_update_params = ForkchoiceUpdateParameters { head_root: info.parent_node.root(), - head_payload_status: canonical_forkchoice_params.head_payload_status, head_hash: parent_head_hash, justified_hash: canonical_forkchoice_params.justified_hash, finalized_hash: canonical_forkchoice_params.finalized_hash, diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 2715567cb3f..d74f162ded2 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -229,7 +229,6 @@ impl CachedHead { pub fn forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { ForkchoiceUpdateParameters { head_root: self.snapshot.beacon_block_root, - head_payload_status: self.head_payload_status, head_hash: self.head_hash, justified_hash: self.justified_hash, finalized_hash: self.finalized_hash, @@ -276,7 +275,7 @@ impl CanonicalHead { snapshot, justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, - head_payload_status: forkchoice_update_params.head_payload_status, + head_payload_status: proto_array::PayloadStatus::Pending, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, @@ -337,7 +336,7 @@ impl CanonicalHead { snapshot: Arc::new(snapshot), justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, - head_payload_status: forkchoice_update_params.head_payload_status, + head_payload_status: proto_array::PayloadStatus::Pending, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, @@ -615,15 +614,15 @@ impl BeaconChain { // was last run. let old_view = ForkChoiceView { head_block_root: old_cached_head.head_block_root(), - head_payload_status: old_cached_head.head_payload_status(), justified_checkpoint: old_cached_head.justified_checkpoint(), finalized_checkpoint: old_cached_head.finalized_checkpoint(), }; + let old_payload_status = old_cached_head.head_payload_status(); let mut fork_choice_write_lock = self.canonical_head.fork_choice_write_lock(); // Recompute the current head via the fork choice algorithm. - let _ = fork_choice_write_lock.get_head(current_slot, &self.spec)?; + let (_, new_payload_status) = fork_choice_write_lock.get_head(current_slot, &self.spec)?; // Downgrade the fork choice write-lock to a read lock, without allowing access to any // other writers. @@ -668,9 +667,8 @@ impl BeaconChain { }); } - // Exit early if the head or justified/finalized checkpoints have not changed, there's - // nothing to do. - if new_view == old_view { + // Exit early if the head, checkpoints, and payload status have not changed. + if new_view == old_view && new_payload_status == old_payload_status { debug!( head = ?new_view.head_block_root, "No change in canonical head" @@ -727,7 +725,7 @@ impl BeaconChain { snapshot: Arc::new(new_snapshot), justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, - head_payload_status: new_forkchoice_update_parameters.head_payload_status, + head_payload_status: new_payload_status, head_hash: new_forkchoice_update_parameters.head_hash, justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, @@ -755,7 +753,7 @@ impl BeaconChain { snapshot: old_cached_head.snapshot.clone(), justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, - head_payload_status: new_forkchoice_update_parameters.head_payload_status, + head_payload_status: new_payload_status, head_hash: new_forkchoice_update_parameters.head_hash, justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index aa7e309f2c0..7b6c4e8310c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -12,7 +12,7 @@ use eth2::{ BeaconNodeHttpClient, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER, Timeouts, }; -use fork_choice::{ForkchoiceUpdateParameters, PayloadStatus as FcPayloadStatus}; +use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use ssz::Encode; @@ -934,7 +934,6 @@ impl MockBuilder { finalized_hash: Some(finalized_execution_hash), justified_hash: Some(justified_execution_hash), head_root: head_block_root, - head_payload_status: FcPayloadStatus::Pending, }; let _status = self diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 0aee30dff0f..91966ff65e3 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -92,7 +92,6 @@ impl MockExecutionLayer { let head_block_root = Hash256::repeat_byte(42); let forkchoice_update_params = ForkchoiceUpdateParameters { head_root: head_block_root, - head_payload_status: fork_choice::PayloadStatus::Pending, head_hash: Some(parent_hash), justified_hash: None, finalized_hash: None, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 8fcf1373e18..16bccc06186 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -342,7 +342,6 @@ pub enum AttestationFromBlock { pub struct ForkchoiceUpdateParameters { /// The most recent result of running `ForkChoice::get_head`. pub head_root: Hash256, - pub head_payload_status: PayloadStatus, pub head_hash: Option, pub justified_hash: Option, pub finalized_hash: Option, @@ -351,7 +350,6 @@ pub struct ForkchoiceUpdateParameters { #[derive(Clone, Copy, Debug, PartialEq)] pub struct ForkChoiceView { pub head_block_root: Hash256, - pub head_payload_status: PayloadStatus, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, } @@ -483,7 +481,6 @@ where finalized_hash: None, // These will be updated during the next call to `Self::get_head`. head_root: Hash256::zero(), - head_payload_status: PayloadStatus::Pending, }, _phantom: PhantomData, }; @@ -588,7 +585,6 @@ where .and_then(|b| b.execution_status.block_hash()); self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, - head_payload_status, head_hash, justified_hash, finalized_hash, @@ -684,7 +680,6 @@ where pub fn cached_fork_choice_view(&self) -> ForkChoiceView { ForkChoiceView { head_block_root: self.forkchoice_update_parameters.head_root, - head_payload_status: self.forkchoice_update_parameters.head_payload_status, justified_checkpoint: self.justified_checkpoint(), finalized_checkpoint: self.finalized_checkpoint(), } @@ -1769,7 +1764,6 @@ where finalized_hash: None, // Will be updated in the following call to `Self::get_head`. head_root: Hash256::zero(), - head_payload_status: PayloadStatus::Pending, }, _phantom: PhantomData, }; diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 2c20a414893..6bf4a1aa529 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -13,7 +13,7 @@ use execution_layer::{ LATEST_TAG, PayloadAttributes, PayloadParameters, PayloadStatus, }; use fixed_bytes::FixedBytesExtended; -use fork_choice::{ForkchoiceUpdateParameters, PayloadStatus as FcPayloadStatus}; +use fork_choice::ForkchoiceUpdateParameters; use reqwest::{Client, header::CONTENT_TYPE}; use sensitive_url::SensitiveUrl; use serde_json::{Value, json}; @@ -294,7 +294,6 @@ impl TestRig { let finalized_block_hash = ExecutionBlockHash::zero(); let forkchoice_update_params = ForkchoiceUpdateParameters { head_root, - head_payload_status: FcPayloadStatus::Pending, head_hash: Some(parent_hash), justified_hash: Some(justified_block_hash), finalized_hash: Some(finalized_block_hash), From c7670ede0289bf395baeb6961e27d209f2db9dea Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 23:07:45 -0500 Subject: [PATCH 045/127] Cleanup and spec parity fixes - Add missing payload attestation slot check: spec returns early when data.slot != block.slot (PTC votes only for assigned block) - Remove dead ignored tests (need mock EL Gloas support to run) - Remove unused new_with_gloas and inspect_queued_payload_attestations - Remove gloas entries from bin.rs (not part of this PR) - Collapse nested if in payload attestation error handling (clippy) - Rename env -> envelope in load_parent - Add TODO(gloas) for parent_head_hash in re-org path - Remove head_payload_status from ForkchoiceUpdateParameters (lives on CachedHead, sourced from get_head return) --- .../beacon_chain/src/block_verification.rs | 7 +- consensus/fork_choice/src/fork_choice.rs | 6 + consensus/fork_choice/tests/tests.rs | 128 +----------------- consensus/proto_array/src/bin.rs | 8 -- 4 files changed, 10 insertions(+), 139 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index cb561dff24b..53acc70b6e4 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1698,10 +1698,9 @@ impl ExecutionPendingBlock { indexed_payload_attestation, AttestationFromBlock::True, &ptc.0, - ) { - if !matches!(e, ForkChoiceError::InvalidAttestation(_)) { - return Err(BlockError::BeaconChainError(Box::new(e.into()))); - } + ) && !matches!(e, ForkChoiceError::InvalidAttestation(_)) + { + return Err(BlockError::BeaconChainError(Box::new(e.into()))); } } } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 38fcfb3400a..1f25afee8e5 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1216,6 +1216,12 @@ where }); } + // Spec: `if data.slot != state.slot: return` — PTC votes can only + // change the vote for their assigned beacon block. + if block.slot != indexed_payload_attestation.data.slot { + return Ok(()); + } + // Gossip payload attestations must be for the current slot. // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md if matches!(is_from_block, AttestationFromBlock::False) diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index b2a6cd56686..839d0f4c5c3 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -11,7 +11,7 @@ use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ AttestationFromBlock, ForkChoiceStore, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, QueuedAttestation, QueuedPayloadAttestation, + PayloadVerificationStatus, QueuedAttestation, }; use state_processing::state_advance::complete_state_advance; use std::fmt; @@ -76,19 +76,6 @@ impl ForkChoiceTest { /// Creates a new tester with the GLOAS fork active at epoch 1. /// Genesis is a standard Fulu block (epoch 0), so block production works normally. /// Tests that need GLOAS semantics should advance the chain into epoch 1 first. - pub fn new_with_gloas() -> Self { - let mut spec = ForkName::latest_stable().make_genesis_spec(ChainSpec::default()); - spec.gloas_fork_epoch = Some(Epoch::new(1)); - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.into()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); - - Self { harness } - } - /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where @@ -172,29 +159,6 @@ impl ForkChoiceTest { self } - // TODO(gloas): add inspect_queued_payload_attestations when payload - // attestation queueing tests are implemented. - #[allow(dead_code)] - pub fn inspect_queued_payload_attestations(self, mut func: F) -> Self - where - F: FnMut(&[QueuedPayloadAttestation]), - { - self.harness - .chain - .canonical_head - .fork_choice_write_lock() - .update_time(self.harness.chain.slot().unwrap()) - .unwrap(); - func( - self.harness - .chain - .canonical_head - .fork_choice_read_lock() - .queued_payload_attestations(), - ); - self - } - /// Skip a slot, without producing a block. pub fn skip_slot(self) -> Self { self.harness.advance_slot(); @@ -964,96 +928,6 @@ async fn invalid_attestation_future_block() { .await; } -/// Payload attestations (index == 1) are invalid when they refer to a block in the same slot. -/// This check only applies when GLOAS is active. -/// -/// TODO(gloas): un-ignore once the test harness supports Gloas block production. -/// The validation logic is gated on `spec.fork_name_at_slot().gloas_enabled()` in -/// `validate_on_attestation`, which requires a block to exist at a GLOAS-enabled slot. -/// Currently the mock execution layer cannot produce Gloas blocks (no -/// `signed_execution_payload_bid` support). -/// TODO(gloas): un-ignore once mock EL supports Gloas blocks. -/// https://github.com/sigp/lighthouse/issues/9025 -#[ignore] -#[tokio::test] -async fn invalid_attestation_payload_during_same_slot() { - ForkChoiceTest::new_with_gloas() - .apply_blocks_without_new_attestations(1) - .await - .apply_attestation_to_chain( - MutationDelay::NoDelay, - |attestation, chain| { - let block_slot = chain - .get_blinded_block(&attestation.data().beacon_block_root) - .expect("should read attested block") - .expect("attested block should exist") - .slot(); - - attestation.data_mut().slot = block_slot; - attestation.data_mut().target.epoch = block_slot.epoch(E::slots_per_epoch()); - attestation.data_mut().index = 1; - }, - |result| { - assert_invalid_attestation!( - result, - InvalidAttestation::InvalidSameSlotAttestationIndex { slot } - if slot == Slot::new(1) - ) - }, - ) - .await; -} - -/// A payload attestation for block A at slot S should be accepted when processed at slot S+1. -/// TODO(gloas): un-ignore once mock EL supports Gloas blocks. Payload -/// attestations require V29 nodes which need Gloas block production. -/// https://github.com/sigp/lighthouse/issues/9025 -#[ignore] -#[tokio::test] -async fn payload_attestation_for_previous_slot_is_accepted_at_next_slot() { - let test = ForkChoiceTest::new() - .apply_blocks_without_new_attestations(1) - .await; - - let chain = &test.harness.chain; - let block_a = chain - .block_at_slot(Slot::new(1), WhenSlotSkipped::Prev) - .expect("lookup should succeed") - .expect("block A should exist"); - let block_a_root = block_a.canonical_root(); - let current_slot = block_a.slot().saturating_add(1_u64); - - let payload_attestation = IndexedPayloadAttestation:: { - attesting_indices: vec![0_u64].try_into().expect("valid attesting indices"), - data: PayloadAttestationData { - beacon_block_root: block_a_root, - slot: Slot::new(1), - payload_present: true, - blob_data_available: true, - }, - signature: AggregateSignature::empty(), - }; - - // PTC mapping: validator 0 is at ptc position 0. - let ptc = &[0_usize]; - - let result = chain - .canonical_head - .fork_choice_write_lock() - .on_payload_attestation( - current_slot, - &payload_attestation, - AttestationFromBlock::True, - ptc, - ); - - assert!( - result.is_ok(), - "payload attestation at slot S should be accepted at S+1, got: {:?}", - result - ); -} - /// Gossip payload attestations must be for the current slot. A payload attestation for slot S /// received at slot S+1 should be rejected per the spec. #[tokio::test] diff --git a/consensus/proto_array/src/bin.rs b/consensus/proto_array/src/bin.rs index c5df3f17e4a..e1d307affb4 100644 --- a/consensus/proto_array/src/bin.rs +++ b/consensus/proto_array/src/bin.rs @@ -18,14 +18,6 @@ fn main() { "execution_status_03.yaml", get_execution_status_test_definition_03(), ); - write_test_def_to_yaml( - "gloas_chain_following.yaml", - get_gloas_chain_following_test_definition(), - ); - write_test_def_to_yaml( - "gloas_payload_probe.yaml", - get_gloas_payload_probe_test_definition(), - ); } fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) { From ea1e99b2f78426ae1c1edcdcb988317ab5f20b68 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 23:09:11 -0500 Subject: [PATCH 046/127] Add TODO for head_payload_status initialization (re: #8998) --- beacon_node/beacon_chain/src/canonical_head.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index d74f162ded2..4f0a9817300 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -275,6 +275,7 @@ impl CanonicalHead { snapshot, justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, + // TODO(gloas): compute from snapshot state once #8998 lands. head_payload_status: proto_array::PayloadStatus::Pending, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, @@ -336,6 +337,7 @@ impl CanonicalHead { snapshot: Arc::new(snapshot), justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, + // TODO(gloas): compute from snapshot state once #8998 lands. head_payload_status: proto_array::PayloadStatus::Pending, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, From ac5357532b5b56bd87ebad9e14395fef64b18d0c Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 25 Mar 2026 23:19:54 -0500 Subject: [PATCH 047/127] Source head_payload_status from get_head, not hardcoded Pending Thread head_payload_status from get_head() return through to CanonicalHead::new(). In restore_from_store, call get_head() on the loaded fork choice to get the correct status. Removes Pending defaults. --- beacon_node/beacon_chain/src/builder.rs | 5 +++-- beacon_node/beacon_chain/src/canonical_head.rs | 11 ++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5c99f5c4c8b..ec549a17e10 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -776,7 +776,7 @@ where slot_clock.now().ok_or("Unable to read slot")? }; - let (initial_head_block_root, _head_payload_status) = fork_choice + let (initial_head_block_root, head_payload_status) = fork_choice .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; @@ -923,7 +923,8 @@ where let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); - let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let canonical_head = + CanonicalHead::new(fork_choice, Arc::new(head_snapshot), head_payload_status); let shuffling_cache_size = self.chain_config.shuffling_cache_size; let complete_blob_backfill = self.chain_config.complete_blob_backfill; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 4f0a9817300..bfd3d795124 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -268,6 +268,7 @@ impl CanonicalHead { pub fn new( fork_choice: BeaconForkChoice, snapshot: Arc>, + head_payload_status: proto_array::PayloadStatus, ) -> Self { let fork_choice_view = fork_choice.cached_fork_choice_view(); let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); @@ -275,8 +276,7 @@ impl CanonicalHead { snapshot, justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, - // TODO(gloas): compute from snapshot state once #8998 lands. - head_payload_status: proto_array::PayloadStatus::Pending, + head_payload_status, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, @@ -304,9 +304,11 @@ impl CanonicalHead { store: &BeaconStore, spec: &ChainSpec, ) -> Result<(), Error> { - let fork_choice = + let mut fork_choice = >::load_fork_choice(store.clone(), reset_payload_statuses, spec)? .ok_or(Error::MissingPersistedForkChoice)?; + let current_slot_for_head = fork_choice.fc_store().get_current_slot(); + let (_, head_payload_status) = fork_choice.get_head(current_slot_for_head, spec)?; let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; let beacon_block = store @@ -337,8 +339,7 @@ impl CanonicalHead { snapshot: Arc::new(snapshot), justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, - // TODO(gloas): compute from snapshot state once #8998 lands. - head_payload_status: proto_array::PayloadStatus::Pending, + head_payload_status, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, From 12f5ab04f33a94111fcef8a3823310338618167a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 16:03:55 +1100 Subject: [PATCH 048/127] Load the state corresponding to head payload status yay --- beacon_node/beacon_chain/src/builder.rs | 7 +++---- .../proto_array/src/proto_array_fork_choice.rs | 15 ++++++++++++++- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ec549a17e10..11b87351b19 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -45,7 +45,7 @@ use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, StatePayloadStatus, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -786,13 +786,12 @@ where .map_err(|e| descriptive_db_error("head block", &e))? .ok_or("Head block not found in store")?; - // TODO(gloas): update head loading to load Full block once fork choice works - let payload_status = StatePayloadStatus::Pending; + let state_payload_status = head_payload_status.as_state_payload_status(); let (_head_state_root, head_state) = store .get_advanced_hot_state( head_block_root, - payload_status, + state_payload_status, current_slot, head_block.state_root(), ) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index e8ad6c063a8..18d593f0e6e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -18,7 +18,7 @@ use std::{ }; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - Slot, + Slot, StatePayloadStatus, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -69,6 +69,19 @@ pub enum PayloadStatus { Pending = 2, } +impl PayloadStatus { + /// Convert a `PayloadStatus` into the equivalent `StatePayloadStatus`. + /// + /// This maps `Empty` onto `StatePayloadStatus::Pending` because empty and pending fork choice + /// nodes correspond to the exact same state. + pub fn as_state_payload_status(self) -> StatePayloadStatus { + match self { + Self::Empty | Self::Pending => StatePayloadStatus::Pending, + Self::Full => StatePayloadStatus::Full, + } + } +} + /// Spec's `ForkChoiceNode` augmented with ProtoNode index. pub struct IndexedForkChoiceNode { pub root: Hash256, From 09e9a5431498cea7ba5ebfb7e06f072fc35eaf9f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 26 Mar 2026 23:40:35 -0700 Subject: [PATCH 049/127] When a block comes in whose parent is unkown, queue the block for processing and lookup the parent envelope --- .../beacon_chain/src/block_verification.rs | 21 ++-- beacon_node/beacon_processor/src/lib.rs | 14 ++- .../src/beacon/execution_payload_envelope.rs | 82 +++++++++---- .../src/service/api_types.rs | 2 + .../gossip_methods.rs | 21 ++++ .../src/network_beacon_processor/mod.rs | 16 +++ .../network_beacon_processor/sync_methods.rs | 80 +++++++++++- beacon_node/network/src/router.rs | 50 +++++++- .../network/src/sync/block_lookups/mod.rs | 67 ++++++++++ .../sync/block_lookups/single_block_lookup.rs | 21 +++- beacon_node/network/src/sync/manager.rs | 106 +++++++++++++++- .../network/src/sync/network_context.rs | 114 +++++++++++++++++- .../src/sync/network_context/requests.rs | 4 + .../requests/payload_envelopes_by_root.rs | 53 ++++++++ 14 files changed, 608 insertions(+), 43 deletions(-) create mode 100644 beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_root.rs diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 802b090f6a8..916a207e623 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -321,6 +321,13 @@ pub enum BlockError { bid_parent_root: Hash256, block_parent_root: Hash256, }, + /// The parent block is known but its execution payload envelope has not been received yet. + /// + /// ## Peer scoring + /// + /// It's unclear if this block is valid, but it cannot be fully verified without the parent's + /// execution payload envelope. + ParentEnvelopeUnknown { parent_root: Hash256 }, } /// Which specific signature(s) are invalid in a SignedBeaconBlock @@ -1939,13 +1946,13 @@ fn load_parent>( && let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() { if block.as_block().is_parent_block_full(parent_bid_block_hash) { - // TODO(gloas): loading the envelope here is not very efficient - // TODO(gloas): check parent payload existence prior to this point? - let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing envelope for parent block {root:?}", - )) - })?; + // If the parent's execution payload envelope hasn't arrived yet, + // return an unknown parent error so the block gets sent to the + // reprocess queue. + let envelope = chain + .store + .get_payload_envelope(&root)? + .ok_or(BlockError::ParentEnvelopeUnknown { parent_root: root })?; (StatePayloadStatus::Full, envelope.message.state_root) } else { (StatePayloadStatus::Pending, parent_block.state_root()) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 724c41cfc94..229816ba77c 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -416,6 +416,9 @@ pub enum Work { RpcBlobs { process_fn: AsyncFn, }, + RpcPayloadEnvelope { + process_fn: AsyncFn, + }, RpcCustodyColumn(AsyncFn), ColumnReconstruction(AsyncFn), IgnoredRpcBlock { @@ -477,6 +480,7 @@ pub enum WorkType { GossipLightClientOptimisticUpdate, RpcBlock, RpcBlobs, + RpcPayloadEnvelope, RpcCustodyColumn, ColumnReconstruction, IgnoredRpcBlock, @@ -538,6 +542,7 @@ impl Work { Work::GossipProposerPreferences(_) => WorkType::GossipProposerPreferences, Work::RpcBlock { .. } => WorkType::RpcBlock, Work::RpcBlobs { .. } => WorkType::RpcBlobs, + Work::RpcPayloadEnvelope { .. } => WorkType::RpcPayloadEnvelope, Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, Work::ColumnReconstruction(_) => WorkType::ColumnReconstruction, Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, @@ -1169,7 +1174,9 @@ impl BeaconProcessor { Work::GossipLightClientOptimisticUpdate { .. } => work_queues .lc_gossip_optimistic_update_queue .push(work, work_id), - Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => { + Work::RpcBlock { .. } + | Work::IgnoredRpcBlock { .. } + | Work::RpcPayloadEnvelope { .. } => { work_queues.rpc_block_queue.push(work, work_id) } Work::RpcBlobs { .. } => work_queues.rpc_blob_queue.push(work, work_id), @@ -1301,7 +1308,9 @@ impl BeaconProcessor { WorkType::GossipLightClientOptimisticUpdate => { work_queues.lc_gossip_optimistic_update_queue.len() } - WorkType::RpcBlock => work_queues.rpc_block_queue.len(), + WorkType::RpcBlock | WorkType::RpcPayloadEnvelope => { + work_queues.rpc_block_queue.len() + } WorkType::RpcBlobs | WorkType::IgnoredRpcBlock => { work_queues.rpc_blob_queue.len() } @@ -1496,6 +1505,7 @@ impl BeaconProcessor { beacon_block_root: _, } | Work::RpcBlobs { process_fn } + | Work::RpcPayloadEnvelope { process_fn } | Work::RpcCustodyColumn(process_fn) | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index 81f2ea41ea9..584ef40009d 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -1,6 +1,10 @@ use crate::task_spawner::{Priority, TaskSpawner}; use crate::utils::{ChainFilter, EthV1Filter, NetworkTxFilter, ResponseFilter, TaskSpawnerFilter}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::payload_envelope_verification::gossip_verified_envelope::GossipVerifiedEnvelope; +use beacon_chain::{ + BeaconChain, BeaconChainTypes, NotifyExecutionLayer, + payload_envelope_verification::EnvelopeError, +}; use bytes::Bytes; use eth2::{CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use lighthouse_network::PubsubMessage; @@ -9,8 +13,11 @@ use ssz::Decode; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tracing::{info, warn}; -use types::SignedExecutionPayloadEnvelope; -use warp::{Filter, Rejection, Reply, reply::Response}; +use types::{BlockImportSource, SignedExecutionPayloadEnvelope}; +use warp::{ + Filter, Rejection, Reply, + hyper::{Body, Response}, +}; // POST beacon/execution_payload_envelope (SSZ) pub(crate) fn post_beacon_execution_payload_envelope_ssz( @@ -77,40 +84,71 @@ pub(crate) fn post_beacon_execution_payload_envelope( .boxed() } /// Publishes a signed execution payload envelope to the network. +/// TODO(gloas): Add gossip verification (BroadcastValidation::Gossip) before import. pub async fn publish_execution_payload_envelope( envelope: SignedExecutionPayloadEnvelope, chain: Arc>, network_tx: &UnboundedSender>, -) -> Result { +) -> Result, Rejection> { let slot = envelope.message.slot; let beacon_block_root = envelope.message.beacon_block_root; + let builder_index = envelope.message.builder_index; - // TODO(gloas): Replace this check once we have gossip validation. if !chain.spec.is_gloas_scheduled() { return Err(warp_utils::reject::custom_bad_request( "Execution payload envelopes are not supported before the Gloas fork".into(), )); } - // TODO(gloas): We should probably add validation here i.e. BroadcastValidation::Gossip - info!( - %slot, - %beacon_block_root, - builder_index = envelope.message.builder_index, - "Publishing signed execution payload envelope to network" - ); + let signed_envelope = Arc::new(envelope); - // Publish to the network - crate::utils::publish_pubsub_message( - network_tx, - PubsubMessage::ExecutionPayload(Box::new(envelope)), - ) - .map_err(|_| { - warn!(%slot, "Failed to publish execution payload envelope to network"); - warp_utils::reject::custom_server_error( - "Unable to publish execution payload envelope to network".into(), + // The publish_fn is called inside process_execution_payload_envelope after consensus + // verification but before the EL call. + let envelope_for_publish = signed_envelope.clone(); + let sender = network_tx.clone(); + let publish_fn = move || { + info!( + %slot, + %beacon_block_root, + builder_index, + "Publishing signed execution payload envelope to network" + ); + crate::utils::publish_pubsub_message( + &sender, + PubsubMessage::ExecutionPayload(Box::new((*envelope_for_publish).clone())), ) - })?; + .map_err(|_| { + warn!(%slot, "Failed to publish execution payload envelope to network"); + EnvelopeError::InternalError( + "Unable to publish execution payload envelope to network".to_owned(), + ) + }) + }; + + let ctx = chain.gossip_verification_context(); + let Ok(gossip_verifed_envelope) = GossipVerifiedEnvelope::new(signed_envelope, &ctx) else { + warn!(%slot, %beacon_block_root, "Execution payload envelope rejected"); + return Err(warp_utils::reject::custom_bad_request( + "execution payload envelope rejected, gossip verification".to_string(), + )); + }; + + // Import the envelope locally (runs state transition and notifies the EL). + chain + .process_execution_payload_envelope( + beacon_block_root, + gossip_verifed_envelope, + NotifyExecutionLayer::Yes, + BlockImportSource::HttpApi, + publish_fn, + ) + .await + .map_err(|e| { + warn!(%slot, %beacon_block_root, reason = ?e, "Execution payload envelope rejected"); + warp_utils::reject::custom_bad_request(format!( + "execution payload envelope rejected: {e:?}" + )) + })?; Ok(warp::reply().into_response()) } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 486a4438579..a190a42a80e 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -31,6 +31,8 @@ pub enum SyncRequestId { BlobsByRange(BlobsByRangeRequestId), /// Data columns by range request DataColumnsByRange(DataColumnsByRangeRequestId), + /// Request searching for an execution payload envelope given a block root. + SinglePayloadEnvelope { id: SingleLookupReqId }, } /// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 1f55d9a8789..2e04847630c 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1290,6 +1290,17 @@ impl NetworkBeaconProcessor { self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)); return None; } + Err(BlockError::ParentEnvelopeUnknown { parent_root }) => { + debug!( + ?block_root, + ?parent_root, + "Parent envelope not yet available for gossip block" + ); + self.send_sync_message(SyncMessage::UnknownParentEnvelope( + peer_id, block, block_root, + )); + return None; + } Err(e @ BlockError::BeaconChainError(_)) => { debug!( error = ?e, @@ -1578,6 +1589,16 @@ impl NetworkBeaconProcessor { "Block with unknown parent attempted to be processed" ); } + Err(BlockError::ParentEnvelopeUnknown { parent_root }) => { + debug!( + %block_root, + ?parent_root, + "Parent envelope not yet available, need envelope lookup" + ); + // Unlike ParentUnknown, this can legitimately happen during processing + // because the parent envelope may not have arrived yet. The lookup + // system will handle retrying via Action::ParentEnvelopeUnknown. + } Err(e @ BlockError::ExecutionPayloadError(epe)) if !epe.penalize_peer() => { debug!( error = %e, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f74e7dacfba..ca5710076b2 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -541,6 +541,22 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for an RPC payload envelope. + pub fn send_rpc_payload_envelope( + self: &Arc, + envelope: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let process_fn = + self.clone() + .generate_rpc_envelope_process_fn(envelope, seen_timestamp, process_type); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcPayloadEnvelope { process_fn }, + }) + } + /// Create a new `Work` event for some blobs, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn send_rpc_blobs( diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f7fbce8e568..b4586994e49 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -4,7 +4,7 @@ use crate::sync::BatchProcessResult; use crate::sync::manager::CustodyBatchProcessResult; use crate::sync::{ ChainId, - manager::{BlockProcessType, SyncMessage}, + manager::{BlockProcessType, BlockProcessingResult, SyncMessage}, }; use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; @@ -28,7 +28,9 @@ use store::KzgCommitment; use tracing::{debug, debug_span, error, info, instrument, warn}; use types::data::FixedBlobSidecarList; use types::kzg_ext::format_kzg_commitments; -use types::{BlockImportSource, DataColumnSidecarList, Epoch, Hash256}; +use types::{ + BlockImportSource, DataColumnSidecarList, Epoch, Hash256, SignedExecutionPayloadEnvelope, +}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -73,6 +75,80 @@ impl NetworkBeaconProcessor { Box::pin(process_fn) } + /// Returns an async closure which processes a payload envelope received via RPC. + pub fn generate_rpc_envelope_process_fn( + self: Arc, + envelope: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> AsyncFn { + let process_fn = async move { + self.process_rpc_envelope(envelope, seen_timestamp, process_type) + .await; + }; + Box::pin(process_fn) + } + + /// Process an execution payload envelope received via RPC. + async fn process_rpc_envelope( + self: Arc, + envelope: Arc>, + _seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + let beacon_block_root = envelope.beacon_block_root(); + + // Verify the envelope using the gossip verification path (same checks apply to RPC) + let verified_envelope = match self.chain.verify_envelope_for_gossip(envelope).await { + Ok(verified) => verified, + Err(e) => { + debug!( + error = ?e, + ?beacon_block_root, + "RPC payload envelope failed verification" + ); + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: BlockProcessingResult::Err(BlockError::InternalError(format!( + "Envelope verification failed: {e:?}" + ))), + }); + return; + } + }; + + // Process the verified envelope + let result = self + .chain + .process_execution_payload_envelope( + beacon_block_root, + verified_envelope, + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await; + + let processing_result = match result { + Ok(status) => BlockProcessingResult::Ok(status), + Err(e) => { + debug!( + error = ?e, + ?beacon_block_root, + "RPC payload envelope processing failed" + ); + BlockProcessingResult::Err(BlockError::InternalError(format!( + "Envelope processing failed: {e:?}" + ))) + } + }; + + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: processing_result, + }); + } + /// Returns the `process_fn` and `ignore_fn` required when requeuing an RPC block. pub fn generate_lookup_beacon_block_fns( self: Arc, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index e6982e6a847..3fb21969756 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -24,7 +24,10 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, +}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -327,10 +330,13 @@ impl Router { Response::DataColumnsByRange(data_column) => { self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } - // TODO(EIP-7732): implement outgoing payload envelopes by range and root - // responses once sync manager requests them. - Response::PayloadEnvelopesByRoot(_) | Response::PayloadEnvelopesByRange(_) => { - debug!("Requesting envelopes by root and by range not supported yet"); + Response::PayloadEnvelopesByRoot(envelope) => { + self.on_payload_envelopes_by_root_response(peer_id, app_request_id, envelope); + } + // TODO(EIP-7732): implement outgoing payload envelopes by range responses once + // range sync requests them. + Response::PayloadEnvelopesByRange(_) => { + unreachable!() } // Light client responses should not be received Response::LightClientBootstrap(_) @@ -703,6 +709,40 @@ impl Router { }); } + /// Handle a `PayloadEnvelopesByRoot` response from the peer. + pub fn on_payload_envelopes_by_root_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + envelope: Option>>, + ) { + let sync_request_id = match app_request_id { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::SinglePayloadEnvelope { .. } => id, + other => { + crit!(request = ?other, "PayloadEnvelopesByRoot response on incorrect request"); + return; + } + }, + AppRequestId::Router => { + crit!(%peer_id, "All PayloadEnvelopesByRoot requests belong to sync"); + return; + } + AppRequestId::Internal => unreachable!("Handled internally"), + }; + + trace!( + %peer_id, + "Received PayloadEnvelopesByRoot Response" + ); + self.send_to_sync(SyncMessage::RpcPayloadEnvelope { + peer_id, + sync_request_id, + envelope, + seen_timestamp: timestamp_now(), + }); + } + /// Handle a `BlobsByRoot` response from the peer. pub fn on_blobs_by_root_response( &mut self, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 394f2fc37d5..7b4e3ce753e 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -109,6 +109,7 @@ pub type SingleLookupId = u32; enum Action { Retry, ParentUnknown { parent_root: Hash256 }, + ParentEnvelopeUnknown { parent_root: Hash256 }, Drop(/* reason: */ String), Continue, } @@ -559,6 +560,19 @@ impl BlockLookups { BlockProcessType::SingleCustodyColumn(id) => { self.on_processing_result_inner::>(id, result, cx) } + BlockProcessType::SinglePayloadEnvelope { id, block_root } => { + match result { + BlockProcessingResult::Ok(_) => { + self.continue_envelope_child_lookups(block_root, cx); + } + BlockProcessingResult::Err(e) => { + debug!(%id, error = ?e, "Payload envelope processing failed"); + // TODO(EIP-7732): resolve awaiting_envelope on affected lookups so they can retry + } + _ => {} + } + return; + } }; self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); } @@ -645,6 +659,12 @@ impl BlockLookups { request_state.revert_to_awaiting_processing()?; Action::ParentUnknown { parent_root } } + BlockError::ParentEnvelopeUnknown { parent_root } => { + // The parent block is known but its execution payload envelope is missing. + // Revert to awaiting processing and fetch the envelope via RPC. + request_state.revert_to_awaiting_processing()?; + Action::ParentEnvelopeUnknown { parent_root } + } ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline // and failed to validate the execution payload. Do not downscore peer. @@ -742,6 +762,26 @@ impl BlockLookups { ))) } } + Action::ParentEnvelopeUnknown { parent_root } => { + let peers = lookup.all_peers(); + lookup.set_awaiting_envelope(parent_root); + // Pick a peer to request the envelope from + let peer_id = peers.first().copied().ok_or_else(|| { + LookupRequestError::Failed("No peers available for envelope request".to_owned()) + })?; + match cx.envelope_lookup_request(lookup_id, peer_id, parent_root) { + Ok(_) => { + debug!( + id = lookup_id, + ?block_root, + ?parent_root, + "Requesting missing parent envelope" + ); + Ok(LookupResult::Pending) + } + Err(e) => Err(LookupRequestError::SendFailedNetwork(e)), + } + } Action::Drop(reason) => { // Drop with noop Err(LookupRequestError::Failed(reason)) @@ -809,6 +849,33 @@ impl BlockLookups { } } + /// Makes progress on lookups that were waiting for a parent envelope to be imported. + pub fn continue_envelope_child_lookups( + &mut self, + block_root: Hash256, + cx: &mut SyncNetworkContext, + ) { + let mut lookup_results = vec![]; + + for (id, lookup) in self.single_block_lookups.iter_mut() { + if lookup.awaiting_envelope() == Some(block_root) { + lookup.resolve_awaiting_envelope(); + debug!( + envelope_root = ?block_root, + id, + block_root = ?lookup.block_root(), + "Continuing lookup after envelope imported" + ); + let result = lookup.continue_requests(cx); + lookup_results.push((*id, result)); + } + } + + for (id, result) in lookup_results { + self.on_lookup_result(id, result, "continue_envelope_child_lookups", cx); + } + } + /// Drops `dropped_id` lookup and all its children recursively. Lookups awaiting a parent need /// the parent to make progress to resolve, therefore we must drop them if the parent is /// dropped. diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 919526c2386..51cc1910567 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -70,6 +70,7 @@ pub struct SingleBlockLookup { peers: Arc>>, block_root: Hash256, awaiting_parent: Option, + awaiting_envelope: Option, created: Instant, pub(crate) span: Span, } @@ -104,6 +105,7 @@ impl SingleBlockLookup { peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, + awaiting_envelope: None, created: Instant::now(), span: lookup_span, } @@ -144,6 +146,20 @@ impl SingleBlockLookup { self.awaiting_parent = None; } + pub fn awaiting_envelope(&self) -> Option { + self.awaiting_envelope + } + + /// Mark this lookup as awaiting a parent envelope to be imported before processing. + pub fn set_awaiting_envelope(&mut self, parent_root: Hash256) { + self.awaiting_envelope = Some(parent_root); + } + + /// Mark this lookup as no longer awaiting a parent envelope. + pub fn resolve_awaiting_envelope(&mut self) { + self.awaiting_envelope = None; + } + /// Returns the time elapsed since this lookup was created pub fn elapsed_since_created(&self) -> Duration { self.created.elapsed() @@ -185,6 +201,7 @@ impl SingleBlockLookup { /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() + || self.awaiting_envelope.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { // If components are waiting for the block request to complete, here we should @@ -287,7 +304,7 @@ impl SingleBlockLookup { expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; - let awaiting_parent = self.awaiting_parent.is_some(); + let awaiting_event = self.awaiting_parent.is_some() || self.awaiting_envelope.is_some(); let request = R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; @@ -331,7 +348,7 @@ impl SingleBlockLookup { // Otherwise, attempt to progress awaiting processing // If this request is awaiting a parent lookup to be processed, do not send for processing. // The request will be rejected with unknown parent error. - } else if !awaiting_parent { + } else if !awaiting_event { // maybe_start_processing returns Some if state == AwaitingProcess. This pattern is // useful to conditionally access the result data. if let Some(result) = request.get_state_mut().maybe_start_processing() { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 7e618d89808..256752d5fbb 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -74,7 +74,8 @@ use strum::IntoStaticStr; use tokio::sync::mpsc; use tracing::{debug, error, info, trace}; use types::{ - BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, + BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, Slot, }; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync @@ -133,6 +134,14 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// An execution payload envelope has been received from the RPC. + RpcPayloadEnvelope { + sync_request_id: SyncRequestId, + peer_id: PeerId, + envelope: Option>>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. UnknownParentBlock(PeerId, Arc>, Hash256), @@ -142,6 +151,9 @@ pub enum SyncMessage { /// A data column with an unknown parent has been received. UnknownParentDataColumn(PeerId, Arc>), + /// A block's parent is known but its execution payload envelope has not been received yet. + UnknownParentEnvelope(PeerId, Arc>, Hash256), + /// A peer has sent an attestation that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. UnknownBlockHashFromAttestation(PeerId, Hash256), @@ -184,6 +196,7 @@ pub enum BlockProcessType { SingleBlock { id: Id }, SingleBlob { id: Id }, SingleCustodyColumn(Id), + SinglePayloadEnvelope { id: Id, block_root: Hash256 }, } impl BlockProcessType { @@ -191,7 +204,8 @@ impl BlockProcessType { match self { BlockProcessType::SingleBlock { id } | BlockProcessType::SingleBlob { id } - | BlockProcessType::SingleCustodyColumn(id) => *id, + | BlockProcessType::SingleCustodyColumn(id) + | BlockProcessType::SinglePayloadEnvelope { id, .. } => *id, } } } @@ -505,6 +519,9 @@ impl SyncManager { SyncRequestId::DataColumnsByRange(req_id) => { self.on_data_columns_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::SinglePayloadEnvelope { id } => { + self.on_single_envelope_response(id, peer_id, RpcEvent::RPCError(error)) + } } } @@ -839,6 +856,17 @@ impl SyncManager { } => { self.rpc_data_column_received(sync_request_id, peer_id, data_column, seen_timestamp) } + SyncMessage::RpcPayloadEnvelope { + sync_request_id, + peer_id, + envelope, + seen_timestamp, + } => self.rpc_payload_envelope_received( + sync_request_id, + peer_id, + envelope, + seen_timestamp, + ), SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -900,6 +928,27 @@ impl SyncManager { } } } + SyncMessage::UnknownParentEnvelope(peer_id, block, block_root) => { + let block_slot = block.slot(); + let parent_root = block.parent_root(); + debug!( + %block_root, + %parent_root, + "Parent envelope not yet available, creating lookup" + ); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + block_slot, + BlockComponent::Block(DownloadResult { + value: block.block_cloned(), + block_root, + seen_timestamp: timestamp_now(), + peer_group: PeerGroup::from_single(peer_id), + }), + ); + } SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { self.notified_unknown_roots.insert((peer_id, block_root)); @@ -1200,6 +1249,59 @@ impl SyncManager { } } + fn rpc_payload_envelope_received( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + envelope: Option>>, + seen_timestamp: Duration, + ) { + match sync_request_id { + SyncRequestId::SinglePayloadEnvelope { id } => self.on_single_envelope_response( + id, + peer_id, + RpcEvent::from_chunk(envelope, seen_timestamp), + ), + _ => { + crit!(%peer_id, "bad request id for payload envelope"); + } + } + } + + fn on_single_envelope_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) { + if let Some(resp) = self + .network + .on_single_envelope_response(id, peer_id, rpc_event) + { + match resp { + Ok((envelope, seen_timestamp)) => { + let block_root = envelope.beacon_block_root(); + debug!( + ?block_root, + %id, + "Downloaded payload envelope, sending for processing" + ); + if let Err(e) = self.network.send_envelope_for_processing( + id.req_id, + envelope, + seen_timestamp, + block_root, + ) { + error!(error = ?e, "Failed to send envelope for processing"); + } + } + Err(e) => { + debug!(error = ?e, %id, "Payload envelope download failed"); + } + } + } + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index ff630bb470a..e9d289b7771 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -37,6 +37,7 @@ pub use requests::LookupVerifyError; use requests::{ ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, + PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest, }; #[cfg(test)] use slot_clock::SlotClock; @@ -52,7 +53,7 @@ use tracing::{Span, debug, debug_span, error, warn}; use types::data::FixedBlobSidecarList; use types::{ BlobSidecar, BlockImportSource, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - ForkContext, Hash256, SignedBeaconBlock, Slot, + ForkContext, Hash256, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, }; pub mod custody; @@ -213,6 +214,9 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRange requests data_columns_by_range_requests: ActiveRequests>, + /// A mapping of active PayloadEnvelopesByRoot requests + payload_envelopes_by_root_requests: + ActiveRequests>, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, @@ -298,6 +302,7 @@ impl SyncNetworkContext { blocks_by_range_requests: ActiveRequests::new("blocks_by_range"), blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), + payload_envelopes_by_root_requests: ActiveRequests::new("payload_envelopes_by_root"), custody_by_root_requests: <_>::default(), components_by_range_requests: FnvHashMap::default(), custody_backfill_data_column_batch_requests: FnvHashMap::default(), @@ -326,6 +331,7 @@ impl SyncNetworkContext { blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, + payload_envelopes_by_root_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -361,12 +367,17 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRange(*req_id)); + let envelope_by_root_ids = payload_envelopes_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SinglePayloadEnvelope { id: *id }); blocks_by_root_ids .chain(blobs_by_root_ids) .chain(data_column_by_root_ids) .chain(blocks_by_range_ids) .chain(blobs_by_range_ids) .chain(data_column_by_range_ids) + .chain(envelope_by_root_ids) .collect() } @@ -423,6 +434,7 @@ impl SyncNetworkContext { blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, + payload_envelopes_by_root_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -445,6 +457,7 @@ impl SyncNetworkContext { .chain(blocks_by_range_requests.iter_request_peers()) .chain(blobs_by_range_requests.iter_request_peers()) .chain(data_columns_by_range_requests.iter_request_peers()) + .chain(payload_envelopes_by_root_requests.iter_request_peers()) { *active_request_count_by_peer.entry(peer_id).or_default() += 1; } @@ -927,6 +940,57 @@ impl SyncNetworkContext { Ok(LookupRequestResult::RequestSent(id.req_id)) } + /// Request a payload envelope for `block_root` from a peer. + pub fn envelope_lookup_request( + &mut self, + lookup_id: SingleLookupId, + peer_id: PeerId, + block_root: Hash256, + ) -> Result { + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; + + let request = PayloadEnvelopesByRootSingleRequest(block_root); + + let network_request = RequestType::PayloadEnvelopesByRoot( + request + .into_request(&self.fork_context) + .map_err(RpcRequestSendError::InternalError)?, + ); + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: network_request, + app_request_id: AppRequestId::Sync(SyncRequestId::SinglePayloadEnvelope { id }), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + + debug!( + method = "PayloadEnvelopesByRoot", + ?block_root, + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + + let request_span = debug_span!( + parent: Span::current(), + "lh_outgoing_envelope_by_root_request", + %block_root, + ); + self.payload_envelopes_by_root_requests.insert( + id, + peer_id, + true, + PayloadEnvelopesByRootRequestItems::new(request), + request_span, + ); + + Ok(id.req_id) + } + /// Request necessary blobs for `block_root`. Requests only the necessary blobs by checking: /// - If we have a downloaded but not yet processed block /// - If the da_checker has a pending block @@ -1435,6 +1499,27 @@ impl SyncNetworkContext { self.on_rpc_response_result(resp, peer_id) } + pub(crate) fn on_single_envelope_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>>> { + let resp = self + .payload_envelopes_by_root_requests + .on_response(id, rpc_event); + let resp = resp.map(|res| { + res.and_then(|(mut envelopes, seen_timestamp)| { + match envelopes.pop() { + Some(envelope) => Ok((envelope, seen_timestamp)), + // Should never happen, request items enforces at least 1 chunk. + None => Err(LookupVerifyError::NotEnoughResponsesReturned { actual: 0 }.into()), + } + }) + }); + self.on_rpc_response_result(resp, peer_id) + } + pub(crate) fn on_single_blob_response( &mut self, id: SingleLookupReqId, @@ -1610,6 +1695,33 @@ impl SyncNetworkContext { }) } + pub fn send_envelope_for_processing( + &self, + id: Id, + envelope: Arc>, + seen_timestamp: Duration, + block_root: Hash256, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(?block_root, ?id, "Sending payload envelope for processing"); + beacon_processor + .send_rpc_payload_envelope( + envelope, + seen_timestamp, + BlockProcessType::SinglePayloadEnvelope { id, block_root }, + ) + .map_err(|e| { + error!( + error = ?e, + "Failed to send sync envelope to processor" + ); + SendErrorProcessor::SendError + }) + } + pub fn send_blobs_for_processing( &self, id: Id, diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 8f9540693e1..5b5e779d9bf 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -16,6 +16,9 @@ pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +pub use payload_envelopes_by_root::{ + PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest, +}; use crate::metrics; @@ -27,6 +30,7 @@ mod blocks_by_range; mod blocks_by_root; mod data_columns_by_range; mod data_columns_by_root; +mod payload_envelopes_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { diff --git a/beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_root.rs b/beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_root.rs new file mode 100644 index 00000000000..7f7097971d6 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_root.rs @@ -0,0 +1,53 @@ +use lighthouse_network::rpc::methods::PayloadEnvelopesByRootRequest; +use std::sync::Arc; +use types::{EthSpec, ForkContext, Hash256, SignedExecutionPayloadEnvelope}; + +use super::{ActiveRequestItems, LookupVerifyError}; + +#[derive(Debug, Copy, Clone)] +pub struct PayloadEnvelopesByRootSingleRequest(pub Hash256); + +impl PayloadEnvelopesByRootSingleRequest { + pub fn into_request( + self, + fork_context: &ForkContext, + ) -> Result { + PayloadEnvelopesByRootRequest::new(vec![self.0], fork_context) + } +} + +pub struct PayloadEnvelopesByRootRequestItems { + request: PayloadEnvelopesByRootSingleRequest, + items: Vec>>, +} + +impl PayloadEnvelopesByRootRequestItems { + pub fn new(request: PayloadEnvelopesByRootSingleRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for PayloadEnvelopesByRootRequestItems { + type Item = Arc>; + + /// Append a response to the single chunk request. If the chunk is valid, the request is + /// resolved immediately. + /// The active request SHOULD be dropped after `add_response` returns an error + fn add(&mut self, envelope: Self::Item) -> Result { + let beacon_block_root = envelope.beacon_block_root(); + if self.request.0 != beacon_block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(beacon_block_root)); + } + + self.items.push(envelope); + // Always returns true, payload envelopes by root expects a single response + Ok(true) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} From 419645cc965f7eac1295493acd52167403e32fac Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 13:25:00 +1100 Subject: [PATCH 050/127] Update CURRENT_SCHEMA_VERSION to 29 --- beacon_node/store/src/metadata.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index cf494684515..215cdb2b64d 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(28); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(29); // All the keys that get stored under the `BeaconMeta` column. // From 5c6e171a229815a089e1d993d70666751478c572 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 13:52:17 +1100 Subject: [PATCH 051/127] Add schema v29 migration --- beacon_node/beacon_chain/src/schema_change.rs | 15 ++- .../src/schema_change/migration_schema_v29.rs | 100 ++++++++++++++++++ 2 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ed82143c389..fa2ab70d210 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,5 +1,8 @@ //! Utilities for managing database schema changes. +mod migration_schema_v29; + use crate::beacon_chain::BeaconChainTypes; +use migration_schema_v29::{downgrade_from_v29, upgrade_to_v29}; use std::sync::Arc; use store::Error as StoreError; use store::hot_cold_store::{HotColdDB, HotColdDBError}; @@ -10,13 +13,23 @@ use store::metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}; /// All migrations for schema versions up to and including v28 have been removed. Nodes on live /// networks are already running v28, so only the current version check remains. pub fn migrate_schema( - _db: Arc>, + db: Arc>, from: SchemaVersion, to: SchemaVersion, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), + // Upgrade from v28 to v29. + (SchemaVersion(28), SchemaVersion(29)) => { + upgrade_to_v29::(&db)?; + db.store_schema_version_atomically(to, vec![]) + } + // Downgrade from v29 to v28. + (SchemaVersion(29), SchemaVersion(28)) => { + downgrade_from_v29::(&db)?; + db.store_schema_version_atomically(to, vec![]) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs new file mode 100644 index 00000000000..c88a1cada14 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs @@ -0,0 +1,100 @@ +use crate::beacon_chain::BeaconChainTypes; +use crate::persisted_fork_choice::{PersistedForkChoiceV28, PersistedForkChoiceV29}; +use ssz::Decode; +use store::hot_cold_store::HotColdDB; +use store::{DBColumn, Error as StoreError, KeyValueStore}; +use types::{EthSpec, Hash256}; + +/// The key used to store the fork choice in the database. +const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; + +/// Upgrade from schema v28 to v29 (no-op). +/// +/// Fails if the persisted fork choice contains any V17 (pre-Gloas) proto nodes at or after the +/// Gloas fork slot. Such nodes indicate the node synced a broken sidechain with Gloas disabled +/// and would not be able to sync the v29 chain. +pub fn upgrade_to_v29( + db: &HotColdDB, +) -> Result<(), StoreError> { + let gloas_fork_slot = db + .spec + .gloas_fork_epoch + .map(|epoch| epoch.start_slot(T::EthSpec::slots_per_epoch())); + + // If Gloas is not configured, the upgrade is a safe no-op. + let Some(gloas_fork_slot) = gloas_fork_slot else { + return Ok(()); + }; + + // Load the persisted fork choice (v28 format, uncompressed SSZ). + let Some(fc_bytes) = db + .hot_db + .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? + else { + return Ok(()); + }; + + let persisted_v28 = + PersistedForkChoiceV28::from_ssz_bytes(&fc_bytes).map_err(StoreError::SszDecodeError)?; + + // In v28 format, all nodes are ProtoNodeV17. Check if any are at/after the Gloas fork slot. + let bad_node = persisted_v28 + .fork_choice_v28 + .proto_array_v28 + .nodes + .iter() + .find(|node| node.slot >= gloas_fork_slot); + + if let Some(node) = bad_node { + return Err(StoreError::MigrationError(format!( + "cannot upgrade from v28 to v29: found V17 proto node at slot {} (root: {:?}) \ + which is at or after the Gloas fork slot {}. This node has synced a chain with \ + Gloas disabled and cannot be upgraded. Please resync from scratch.", + node.slot, node.root, gloas_fork_slot, + ))); + } + + Ok(()) +} + +/// Downgrade from schema v29 to v28 (no-op). +/// +/// Fails if the persisted fork choice contains any V29 proto nodes, as these contain +/// payload-specific fields that cannot be losslessly converted back to V17 format. +pub fn downgrade_from_v29( + db: &HotColdDB, +) -> Result<(), StoreError> { + // Load the persisted fork choice (v29 format, compressed). + let Some(fc_bytes) = db + .hot_db + .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? + else { + return Ok(()); + }; + + let persisted_v29 = + PersistedForkChoiceV29::from_bytes(&fc_bytes, db.get_config()).map_err(|e| { + StoreError::MigrationError(format!( + "cannot downgrade from v29 to v28: failed to decode fork choice: {:?}", + e + )) + })?; + + let has_v29_node = persisted_v29 + .fork_choice + .proto_array + .nodes + .iter() + .any(|node| matches!(node, proto_array::core::ProtoNode::V29(_))); + + if has_v29_node { + return Err(StoreError::MigrationError( + "cannot downgrade from v29 to v28: the persisted fork choice contains V29 proto \ + nodes which cannot be losslessly converted to V17 format. The Gloas-specific \ + payload data would be lost." + .to_string(), + )); + } + + Ok(()) +} From e1dcd9e692a795b32724d19bdf7d9f75b7df5f6c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 14:17:37 +1100 Subject: [PATCH 052/127] Update schema test --- beacon_node/beacon_chain/tests/store_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b023b8a889a..bac3cbaf4e9 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3995,7 +3995,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo ) .await; - let min_version = CURRENT_SCHEMA_VERSION; + let min_version = SchemaVersion(28); // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); From 3bc1d882f23a070a15da731f687aba5b185bfb32 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 15:36:14 +1100 Subject: [PATCH 053/127] Clarify load_parent genesis behaviour --- .../beacon_chain/src/block_verification.rs | 54 +++++++++---------- 1 file changed, 24 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 53acc70b6e4..9bb519373ac 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1960,37 +1960,31 @@ fn load_parent>( // Post-Gloas we must also fetch a state with the correct payload status. If the current // block builds upon the payload of its parent block, then we know the parent block is FULL // and we need to load the full state. - let (payload_status, parent_state_root) = - if block.as_block().fork_name_unchecked().gloas_enabled() - && let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() - { - if !parent_bid_block_hash.into_root().is_zero() - && block.as_block().is_parent_block_full(parent_bid_block_hash) - { - // TODO(gloas): loading the envelope here is not very efficient - let envelope = chain.store.get_payload_envelope(&root)?; - let state_root = if let Some(envelope) = envelope { - envelope.message.state_root - } else { - // The envelope may not be stored yet for the genesis/anchor - // block. Fall back to the block's state_root which is the - // post-payload state for the anchor per get_forkchoice_store. - if parent_block.slot() == chain.spec.genesis_slot { - parent_block.state_root() - } else { - return Err(BeaconChainError::DBInconsistent(format!( - "Missing envelope for parent block {root:?}", - )) - .into()); - } - }; - (StatePayloadStatus::Full, state_root) - } else { - (StatePayloadStatus::Pending, parent_block.state_root()) - } - } else { - (StatePayloadStatus::Pending, parent_block.state_root()) + let (payload_status, parent_state_root) = if parent_block.slot() == chain.spec.genesis_slot + { + // Genesis state is always pending, there is no such thing as a "genesis envelope". + // See: https://github.com/ethereum/consensus-specs/issues/5043 + (StatePayloadStatus::Pending, parent_block.state_root()) + } else if !block.as_block().fork_name_unchecked().gloas_enabled() { + // All pre-Gloas parent states are pending. + (StatePayloadStatus::Pending, parent_block.state_root()) + } else if let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() + && block.as_block().is_parent_block_full(parent_bid_block_hash) + { + // Post-Gloas Full block case. + // TODO(gloas): loading the envelope here is not very efficient + let Some(envelope) = chain.store.get_payload_envelope(&root)? else { + return Err(BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + .into()); }; + let state_root = envelope.message.state_root; + (StatePayloadStatus::Full, state_root) + } else { + // Post-Gloas empty block case (also covers the Gloas fork transition). + (StatePayloadStatus::Pending, parent_block.state_root()) + }; let (parent_state_root, state) = chain .store .get_advanced_hot_state(root, payload_status, block.slot(), parent_state_root)? From 517d16f2fde8f6f6f47f1214943748d0a6c33fa8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 15:47:50 +1100 Subject: [PATCH 054/127] Revert parent->child optimisation --- consensus/proto_array/src/proto_array.rs | 70 ++++++++---------------- 1 file changed, 24 insertions(+), 46 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 81211690ff5..374190f9ed9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1188,26 +1188,6 @@ impl ProtoArray { Ok((best_fc_node.root, best_fc_node.payload_status)) } - /// Build a parent->children index. Invalid nodes are excluded - /// (they aren't in store.blocks in the spec). - fn build_children_index(&self) -> Vec> { - let mut children = vec![vec![]; self.nodes.len()]; - for (i, node) in self.nodes.iter().enumerate() { - if node - .execution_status() - .is_ok_and(|status| status.is_invalid()) - { - continue; - } - if let Some(parent) = node.parent() - && parent < children.len() - { - children[parent].push(i); - } - } - children - } - /// Spec: `get_filtered_block_tree`. /// /// Returns the set of node indices on viable branches — those with at least @@ -1218,7 +1198,6 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - children_index: &[Vec], ) -> HashSet { let mut viable = HashSet::new(); self.filter_block_tree::( @@ -1226,7 +1205,6 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, - children_index, &mut viable, ); viable @@ -1239,17 +1217,25 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - children_index: &[Vec], viable: &mut HashSet, ) -> bool { let Some(node) = self.nodes.get(node_index) else { return false; }; - let children = children_index - .get(node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); + // Skip invalid children — they aren't in store.blocks in the spec. + let children: Vec = self + .nodes + .iter() + .enumerate() + .filter(|(_, child)| { + child.parent() == Some(node_index) + && !child + .execution_status() + .is_ok_and(|status| status.is_invalid()) + }) + .map(|(i, _)| i) + .collect(); if !children.is_empty() { // Evaluate ALL children (no short-circuit) to mark all viable branches. @@ -1261,7 +1247,6 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, - children_index, viable, ) }) @@ -1306,16 +1291,12 @@ impl ProtoArray { payload_status: PayloadStatus::Pending, }; - // Build parent->children index once for O(1) lookups. - let children_index = self.build_children_index(); - // Spec: `get_filtered_block_tree`. let viable_nodes = self.get_filtered_block_tree::( start_index, current_slot, best_justified_checkpoint, best_finalized_checkpoint, - &children_index, ); // Compute once rather than per-child per-level. @@ -1324,7 +1305,7 @@ impl ProtoArray { loop { let children: Vec<_> = self - .get_node_children(&head, &children_index)? + .get_node_children(&head)? .into_iter() .filter(|(fc_node, _)| viable_nodes.contains(&fc_node.proto_node_index)) .collect(); @@ -1487,7 +1468,6 @@ impl ProtoArray { fn get_node_children( &self, node: &IndexedForkChoiceNode, - children_index: &[Vec], ) -> Result, Error> { if node.payload_status == PayloadStatus::Pending { let proto_node = self @@ -1501,25 +1481,23 @@ impl ProtoArray { } Ok(children) } else { - let child_indices = children_index - .get(node.proto_node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); - Ok(child_indices + Ok(self + .nodes .iter() - .filter_map(|&child_index| { - let child_node = self.nodes.get(child_index)?; - if child_node.get_parent_payload_status() != node.payload_status { - return None; - } - Some(( + .enumerate() + .filter(|(_, child_node)| { + child_node.parent() == Some(node.proto_node_index) + && child_node.get_parent_payload_status() == node.payload_status + }) + .map(|(child_index, child_node)| { + ( IndexedForkChoiceNode { root: child_node.root(), proto_node_index: child_index, payload_status: PayloadStatus::Pending, }, child_node.clone(), - )) + ) }) .collect()) } From b6728c20305ec80b2ab71405ab7d58e3fce38221 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 16:02:19 +1100 Subject: [PATCH 055/127] Start removing more best_child/best_descend --- consensus/proto_array/src/proto_array.rs | 290 +++++------------------ 1 file changed, 56 insertions(+), 234 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 374190f9ed9..456bbc5fdac 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -508,29 +508,6 @@ impl ProtoArray { // walk, so clear any stale boost from a prior call. self.previous_proposer_boost = ProposerBoost::default(); - // A second time, iterate backwards through all indices in `self.nodes`. - // - // We _must_ perform these functions separate from the weight-updating loop above to ensure - // that we have a fully coherent set of weights before updating parent - // best-child/descendant. - for node_index in (0..self.nodes.len()).rev() { - let node = self - .nodes - .get_mut(node_index) - .ok_or(Error::InvalidNodeIndex(node_index))?; - - // If the node has a parent, try to update its best-child and best-descendant. - if let Some(parent_index) = node.parent() { - self.maybe_update_best_child_and_descendant::( - parent_index, - node_index, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - } - } - Ok(()) } @@ -701,14 +678,6 @@ impl ProtoArray { self.nodes.push(node.clone()); if let Some(parent_index) = node.parent() { - self.maybe_update_best_child_and_descendant::( - parent_index, - node_index, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - if matches!(block.execution_status, ExecutionStatus::Valid(_)) { self.propagate_execution_payload_validation_by_index(parent_index)?; } @@ -977,26 +946,7 @@ impl ProtoArray { if !latest_valid_ancestor_is_descendant && node.root() != head_block_root { break; } else if op.latest_valid_ancestor() == Some(hash) { - // If the `best_child` or `best_descendant` of the latest valid hash was - // invalidated, set those fields to `None`. - // - // In theory, an invalid `best_child` necessarily infers an invalid - // `best_descendant`. However, we check each variable independently to - // defend against errors which might result in an invalid block being set as - // head. - if node - .best_child() - .is_some_and(|i| invalidated_indices.contains(&i)) - { - *node.best_child_mut() = None - } - if node - .best_descendant() - .is_some_and(|i| invalidated_indices.contains(&i)) - { - *node.best_descendant_mut() = None - } - + // Reached latest valid block, stop invalidating further. break; } } @@ -1026,14 +976,6 @@ impl ProtoArray { if let ProtoNode::V17(node) = node { node.execution_status = ExecutionStatus::Invalid(hash); } - - // It's impossible for an invalid block to lead to a "best" block, so set these - // fields to `None`. - // - // Failing to set these values will result in `Self::node_leads_to_viable_head` - // returning `false` for *valid* ancestors of invalid blocks. - *node.best_child_mut() = None; - *node.best_descendant_mut() = None; } // The block is already invalid, but keep going backwards to ensure all ancestors // are updated. @@ -1188,6 +1130,26 @@ impl ProtoArray { Ok((best_fc_node.root, best_fc_node.payload_status)) } + /// Build a parent->children index. Invalid nodes are excluded + /// (they aren't in store.blocks in the spec). + fn build_children_index(&self) -> Vec> { + let mut children = vec![vec![]; self.nodes.len()]; + for (i, node) in self.nodes.iter().enumerate() { + if node + .execution_status() + .is_ok_and(|status| status.is_invalid()) + { + continue; + } + if let Some(parent) = node.parent() + && parent < children.len() + { + children[parent].push(i); + } + } + children + } + /// Spec: `get_filtered_block_tree`. /// /// Returns the set of node indices on viable branches — those with at least @@ -1198,6 +1160,7 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, + children_index: &[Vec], ) -> HashSet { let mut viable = HashSet::new(); self.filter_block_tree::( @@ -1205,6 +1168,7 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, + children_index, &mut viable, ); viable @@ -1217,25 +1181,17 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, + children_index: &[Vec], viable: &mut HashSet, ) -> bool { let Some(node) = self.nodes.get(node_index) else { return false; }; - // Skip invalid children — they aren't in store.blocks in the spec. - let children: Vec = self - .nodes - .iter() - .enumerate() - .filter(|(_, child)| { - child.parent() == Some(node_index) - && !child - .execution_status() - .is_ok_and(|status| status.is_invalid()) - }) - .map(|(i, _)| i) - .collect(); + let children = children_index + .get(node_index) + .map(|c| c.as_slice()) + .unwrap_or(&[]); if !children.is_empty() { // Evaluate ALL children (no short-circuit) to mark all viable branches. @@ -1247,6 +1203,7 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, + children_index, viable, ) }) @@ -1291,12 +1248,16 @@ impl ProtoArray { payload_status: PayloadStatus::Pending, }; + // Build parent->children index once for O(1) lookups. + let children_index = self.build_children_index(); + // Spec: `get_filtered_block_tree`. let viable_nodes = self.get_filtered_block_tree::( start_index, current_slot, best_justified_checkpoint, best_finalized_checkpoint, + &children_index, ); // Compute once rather than per-child per-level. @@ -1305,7 +1266,7 @@ impl ProtoArray { loop { let children: Vec<_> = self - .get_node_children(&head)? + .get_node_children(&head, &children_index)? .into_iter() .filter(|(fc_node, _)| viable_nodes.contains(&fc_node.proto_node_index)) .collect(); @@ -1468,6 +1429,7 @@ impl ProtoArray { fn get_node_children( &self, node: &IndexedForkChoiceNode, + children_index: &[Vec], ) -> Result, Error> { if node.payload_status == PayloadStatus::Pending { let proto_node = self @@ -1481,23 +1443,25 @@ impl ProtoArray { } Ok(children) } else { - Ok(self - .nodes + let child_indices = children_index + .get(node.proto_node_index) + .map(|c| c.as_slice()) + .unwrap_or(&[]); + Ok(child_indices .iter() - .enumerate() - .filter(|(_, child_node)| { - child_node.parent() == Some(node.proto_node_index) - && child_node.get_parent_payload_status() == node.payload_status - }) - .map(|(child_index, child_node)| { - ( + .filter_map(|&child_index| { + let child_node = self.nodes.get(child_index)?; + if child_node.get_parent_payload_status() != node.payload_status { + return None; + } + Some(( IndexedForkChoiceNode { root: child_node.root(), proto_node_index: child_index, payload_status: PayloadStatus::Pending, }, child_node.clone(), - ) + )) }) .collect()) } @@ -1611,160 +1575,11 @@ impl ProtoArray { // If `node.parent` is less than `finalized_index`, set it to `None`. *node.parent_mut() = parent.checked_sub(finalized_index); } - if let Some(best_child) = node.best_child() { - *node.best_child_mut() = Some( - best_child - .checked_sub(finalized_index) - .ok_or(Error::IndexOverflow("best_child"))?, - ); - } - if let Some(best_descendant) = node.best_descendant() { - *node.best_descendant_mut() = Some( - best_descendant - .checked_sub(finalized_index) - .ok_or(Error::IndexOverflow("best_descendant"))?, - ); - } } Ok(()) } - /// Observe the parent at `parent_index` with respect to the child at `child_index` and - /// potentially modify the `parent.best_child` and `parent.best_descendant` values. - /// - /// ## Detail - /// - /// There are four outcomes: - /// - /// - The child is already the best child but it's now invalid due to a FFG change and should be removed. - /// - The child is already the best child and the parent is updated with the new - /// best-descendant. - /// - The child is not the best child but becomes the best child. - /// - The child is not the best child and does not become the best child. - fn maybe_update_best_child_and_descendant( - &mut self, - parent_index: usize, - child_index: usize, - current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - ) -> Result<(), Error> { - let child = self - .nodes - .get(child_index) - .ok_or(Error::InvalidNodeIndex(child_index))?; - - let parent = self - .nodes - .get(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))?; - - let child_leads_to_viable_head = self.node_leads_to_viable_head::( - child, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - - // These three variables are aliases to the three options that we may set the - // `parent.best_child` and `parent.best_descendant` to. - let change_to_none = (None, None); - let change_to_child = ( - Some(child_index), - child.best_descendant().or(Some(child_index)), - ); - let no_change = (parent.best_child(), parent.best_descendant()); - - let (new_best_child, new_best_descendant) = - if let Some(best_child_index) = parent.best_child() { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. - change_to_child - } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; - - let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( - best_child, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - no_change - } else if child.weight() > best_child.weight() { - change_to_child - } else if child.weight() < best_child.weight() { - no_change - } else if *child.root() >= *best_child.root() { - change_to_child - } else { - no_change - } - } - } else if child_leads_to_viable_head { - change_to_child - } else { - no_change - }; - - let parent = self - .nodes - .get_mut(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))?; - - *parent.best_child_mut() = new_best_child; - *parent.best_descendant_mut() = new_best_descendant; - - Ok(()) - } - - /// Indicates if the node itself is viable for the head, or if its best descendant is viable - /// for the head. - fn node_leads_to_viable_head( - &self, - node: &ProtoNode, - current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - ) -> Result { - let best_descendant_is_viable_for_head = - if let Some(best_descendant_index) = node.best_descendant() { - let best_descendant = self - .nodes - .get(best_descendant_index) - .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; - - self.node_is_viable_for_head::( - best_descendant, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - ) - } else { - false - }; - - Ok(best_descendant_is_viable_for_head - || self.node_is_viable_for_head::( - node, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )) - } - /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/fork-choice.md#filter_block_tree @@ -1955,8 +1770,15 @@ impl ProtoArray { ) -> Vec<&ProtoNode> { self.nodes .iter() - .filter(|node| { - node.best_child().is_none() + .enumerate() + .filter(|(i, node)| { + // TODO(gloas): we unoptimized this for Gloas fork choice, could re-optimize. + let num_children = self + .nodes + .iter() + .filter(|node| node.parent == Some(i)) + .count(); + num_children == 0 && self.is_finalized_checkpoint_or_descendant::( node.root(), best_finalized_checkpoint, From 5353710e0ae2fea78559daac76903b1e54166658 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 31 Mar 2026 00:18:24 -0500 Subject: [PATCH 056/127] Fix compilation, clear best_child/best_descendant in migration - Fix leaf detection in heads_descended_from_finalization (parent() method call, map away enumerate index) - Clear best_child and best_descendant in v28->v29 migration (no longer used, replaced by virtual tree walk) - Migration now rewrites fork choice data instead of being a no-op --- .../src/schema_change/migration_schema_v29.rs | 65 ++++++++++++------- consensus/fork_choice/src/fork_choice.rs | 2 - .../src/fork_choice_test_definition.rs | 9 +-- consensus/proto_array/src/proto_array.rs | 20 ++---- .../src/proto_array_fork_choice.rs | 26 +------- 5 files changed, 49 insertions(+), 73 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs index c88a1cada14..6c82e8a737d 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs @@ -2,17 +2,18 @@ use crate::beacon_chain::BeaconChainTypes; use crate::persisted_fork_choice::{PersistedForkChoiceV28, PersistedForkChoiceV29}; use ssz::Decode; use store::hot_cold_store::HotColdDB; -use store::{DBColumn, Error as StoreError, KeyValueStore}; +use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use types::{EthSpec, Hash256}; /// The key used to store the fork choice in the database. const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; -/// Upgrade from schema v28 to v29 (no-op). +/// Upgrade from schema v28 to v29. /// -/// Fails if the persisted fork choice contains any V17 (pre-Gloas) proto nodes at or after the -/// Gloas fork slot. Such nodes indicate the node synced a broken sidechain with Gloas disabled -/// and would not be able to sync the v29 chain. +/// - Clears `best_child` and `best_descendant` on all nodes (replaced by +/// virtual tree walk). +/// - Fails if the persisted fork choice contains any V17 (pre-Gloas) proto +/// nodes at or after the Gloas fork slot. pub fn upgrade_to_v29( db: &HotColdDB, ) -> Result<(), StoreError> { @@ -21,11 +22,6 @@ pub fn upgrade_to_v29( .gloas_fork_epoch .map(|epoch| epoch.start_slot(T::EthSpec::slots_per_epoch())); - // If Gloas is not configured, the upgrade is a safe no-op. - let Some(gloas_fork_slot) = gloas_fork_slot else { - return Ok(()); - }; - // Load the persisted fork choice (v28 format, uncompressed SSZ). let Some(fc_bytes) = db .hot_db @@ -34,26 +30,45 @@ pub fn upgrade_to_v29( return Ok(()); }; - let persisted_v28 = + let mut persisted_v28 = PersistedForkChoiceV28::from_ssz_bytes(&fc_bytes).map_err(StoreError::SszDecodeError)?; - // In v28 format, all nodes are ProtoNodeV17. Check if any are at/after the Gloas fork slot. - let bad_node = persisted_v28 - .fork_choice_v28 - .proto_array_v28 - .nodes - .iter() - .find(|node| node.slot >= gloas_fork_slot); + // Check for V17 nodes at/after the Gloas fork slot. + if let Some(gloas_fork_slot) = gloas_fork_slot { + let bad_node = persisted_v28 + .fork_choice_v28 + .proto_array_v28 + .nodes + .iter() + .find(|node| node.slot >= gloas_fork_slot); + + if let Some(node) = bad_node { + return Err(StoreError::MigrationError(format!( + "cannot upgrade from v28 to v29: found V17 proto node at slot {} (root: {:?}) \ + which is at or after the Gloas fork slot {}. This node has synced a chain with \ + Gloas disabled and cannot be upgraded. Please resync from scratch.", + node.slot, node.root, gloas_fork_slot, + ))); + } + } - if let Some(node) = bad_node { - return Err(StoreError::MigrationError(format!( - "cannot upgrade from v28 to v29: found V17 proto node at slot {} (root: {:?}) \ - which is at or after the Gloas fork slot {}. This node has synced a chain with \ - Gloas disabled and cannot be upgraded. Please resync from scratch.", - node.slot, node.root, gloas_fork_slot, - ))); + // Clear best_child/best_descendant — replaced by the virtual tree walk. + for node in &mut persisted_v28.fork_choice_v28.proto_array_v28.nodes { + node.best_child = None; + node.best_descendant = None; } + // Convert to v29 and write back. + let persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); + let fc_bytes = persisted_v29 + .as_bytes(db.get_config()) + .map_err(|e| StoreError::MigrationError(format!("failed to encode v29: {:?}", e)))?; + db.hot_db.do_atomically(vec![KeyValueStoreOp::PutKeyValue( + DBColumn::ForkChoice, + FORK_CHOICE_DB_KEY.as_slice().to_vec(), + fc_bytes, + )])?; + Ok(()) } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 1f25afee8e5..3b13cd4429f 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1008,8 +1008,6 @@ where proposer_index: Some(block.proposer_index()), }, current_slot, - self.justified_checkpoint(), - self.finalized_checkpoint(), spec, block_delay, )?; diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index b6ccc4d4359..34d7f2e48ee 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -283,14 +283,7 @@ impl ForkChoiceTestDefinition { proposer_index: Some(0), }; fork_choice - .process_block::( - block, - slot, - self.justified_checkpoint, - self.finalized_checkpoint, - &spec, - Duration::ZERO, - ) + .process_block::(block, slot, &spec, Duration::ZERO) .unwrap_or_else(|e| { panic!( "process_block op at index {} returned error: {:?}", diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 456bbc5fdac..f68d3eb71b0 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -389,9 +389,6 @@ impl ProtoArray { pub fn apply_score_changes( &mut self, mut deltas: Vec, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - current_slot: Slot, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { return Err(Error::InvalidDeltaLen { @@ -518,8 +515,6 @@ impl ProtoArray { &mut self, block: Block, current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, spec: &ChainSpec, time_into_slot: Duration, ) -> Result<(), Error> { @@ -677,10 +672,10 @@ impl ProtoArray { self.indices.insert(node.root(), node_index); self.nodes.push(node.clone()); - if let Some(parent_index) = node.parent() { - if matches!(block.execution_status, ExecutionStatus::Valid(_)) { - self.propagate_execution_payload_validation_by_index(parent_index)?; - } + if let Some(parent_index) = node.parent() + && matches!(block.execution_status, ExecutionStatus::Valid(_)) + { + self.propagate_execution_payload_validation_by_index(parent_index)?; } Ok(()) @@ -1773,17 +1768,14 @@ impl ProtoArray { .enumerate() .filter(|(i, node)| { // TODO(gloas): we unoptimized this for Gloas fork choice, could re-optimize. - let num_children = self - .nodes - .iter() - .filter(|node| node.parent == Some(i)) - .count(); + let num_children = self.nodes.iter().filter(|n| n.parent() == Some(*i)).count(); num_children == 0 && self.is_finalized_checkpoint_or_descendant::( node.root(), best_finalized_checkpoint, ) }) + .map(|(_, node)| node) .collect() } } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 18d593f0e6e..6c90af13028 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -512,8 +512,6 @@ impl ProtoArrayForkChoice { .on_block::( block, current_slot, - justified_checkpoint, - finalized_checkpoint, spec, // Anchor block is always timely (delay=0 ensures both timeliness // checks pass). Combined with `is_genesis` override in on_block, @@ -615,8 +613,6 @@ impl ProtoArrayForkChoice { &mut self, block: Block, current_slot: Slot, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, spec: &ChainSpec, time_into_slot: Duration, ) -> Result<(), String> { @@ -625,14 +621,7 @@ impl ProtoArrayForkChoice { } self.proto_array - .on_block::( - block, - current_slot, - justified_checkpoint, - finalized_checkpoint, - spec, - time_into_slot, - ) + .on_block::(block, current_slot, spec, time_into_slot) .map_err(|e| format!("process_block_error: {:?}", e)) } @@ -667,12 +656,7 @@ impl ProtoArrayForkChoice { .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; self.proto_array - .apply_score_changes::( - deltas, - justified_checkpoint, - finalized_checkpoint, - current_slot, - ) + .apply_score_changes::(deltas) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; *old_balances = new_balances.clone(); @@ -1354,8 +1338,6 @@ mod test_compute_deltas { proposer_index: Some(0), }, genesis_slot + 1, - genesis_checkpoint, - genesis_checkpoint, &spec, Duration::ZERO, ) @@ -1384,8 +1366,6 @@ mod test_compute_deltas { proposer_index: Some(0), }, genesis_slot + 1, - genesis_checkpoint, - genesis_checkpoint, &spec, Duration::ZERO, ) @@ -1521,8 +1501,6 @@ mod test_compute_deltas { proposer_index: Some(0), }, Slot::from(block.slot), - genesis_checkpoint, - genesis_checkpoint, &spec, Duration::ZERO, ) From 9f08f48880822937fd8d78475d9e088287564dd2 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Mon, 30 Mar 2026 23:50:31 -0700 Subject: [PATCH 057/127] import envelope status into fc --- .../src/payload_envelope_verification/import.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 2ee315e5592..ed121ccb94a 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -239,16 +239,14 @@ impl BeaconChain { // Note that a duplicate cache/payload status table should prevent this from happening // but it doesnt hurt to be defensive. - // TODO(gloas) when the code below is implemented we can delete this drop - drop(fork_choice_reader); - - // TODO(gloas) no fork choice logic yet // Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by // avoiding taking other locks whilst holding this lock. - // let fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); + let mut fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); - // TODO(gloas) Do we need this check? Do not import a block that doesn't descend from the finalized root. - // let signed_block = check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?; + // Update the node's payload_status from PENDING to FULL in fork choice. + fork_choice + .on_execution_payload(block_root) + .map_err(|e| EnvelopeError::InternalError(format!("{e:?}")))?; // TODO(gloas) emit SSE event if the payload became the new head payload @@ -302,10 +300,9 @@ impl BeaconChain { drop(db_span); - // TODO(gloas) drop fork choice lock // The fork choice write-lock is dropped *after* the on-disk database has been updated. // This prevents inconsistency between the two at the expense of concurrency. - // drop(fork_choice); + drop(fork_choice); // We're declaring the envelope "imported" at this point, since fork choice and the DB know // about it. From a1534bbfb34d2388a63dcd9fdd116629b6065d83 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 30 Mar 2026 17:43:57 +1100 Subject: [PATCH 058/127] Check `ChainSpec` consistency with upstream `config.yaml` (#9008) Closes: - https://github.com/sigp/lighthouse/issues/9002 - Commit `config.yaml` for minimal and mainnet to `consensus/types/configs`. For now we omit any auto-downloading logic, to avoid the hassles of dealing with Github rate limits etc on CI. Unfortunately these files are NOT bundled inside the spec tests. - Fix the values of `min_builder_withdrawability_delay` for minimal and mainnet. These discrepancies aren't caught by the current spec tests, because the spec tests are missing data: https://github.com/ethereum/consensus-specs/pull/5005. Will be fixed in the next release/when we update to nightly. - Fix the blob schedule for `minimal`, which should be empty, NOT inherited from mainnet. - Keep `SECONDS_PER_SLOT` for now because the Kurtosis tests fail upon their complete removal. We will be able to completely remove `SECONDS_PER_SLOT` soon. Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/test_utils.rs | 2 +- beacon_node/beacon_chain/tests/store_tests.rs | 8 +- .../http_api/tests/interactive_tests.rs | 14 +- .../mainnet/config.yaml | 40 +-- consensus/types/configs/mainnet.yaml | 227 ++++++++++++++++ consensus/types/configs/minimal.yaml | 220 ++++++++++++++++ consensus/types/src/core/chain_spec.rs | 242 +++++++++++++++++- 7 files changed, 703 insertions(+), 50 deletions(-) create mode 100644 consensus/types/configs/mainnet.yaml create mode 100644 consensus/types/configs/minimal.yaml diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c53c29438e9..13dcf221086 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -223,7 +223,7 @@ pub fn test_da_checker( let slot_clock = TestingSlotClock::new( Slot::new(0), Duration::from_secs(0), - Duration::from_secs(spec.seconds_per_slot), + spec.get_slot_duration(), ); let kzg = get_kzg(&spec); let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index bac3cbaf4e9..c6e13bd160b 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2910,7 +2910,7 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { let slot_clock = TestingSlotClock::new( Slot::new(0), Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(spec.seconds_per_slot), + spec.get_slot_duration(), ); slot_clock.set_slot(harness.get_current_slot().as_u64()); @@ -5334,8 +5334,8 @@ async fn test_safely_backfill_data_column_custody_info() { .await; let epoch_before_increase = Epoch::new(start_epochs); - let effective_delay_slots = - CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS / harness.chain.spec.seconds_per_slot; + let effective_delay_slots = CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS + / harness.chain.spec.get_slot_duration().as_secs(); let cgc_change_slot = epoch_before_increase.end_slot(E::slots_per_epoch()); @@ -6133,7 +6133,7 @@ async fn bellatrix_produce_and_store_payloads() { .genesis_time() .safe_add( slot.as_u64() - .safe_mul(harness.spec.seconds_per_slot) + .safe_mul(harness.spec.get_slot_duration().as_secs()) .unwrap(), ) .unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index e0e40298757..15f61537a06 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -975,9 +975,10 @@ async fn proposer_duties_with_gossip_tolerance() { assert_eq!(harness.chain.slot().unwrap(), num_initial); // Set the clock to just before the next epoch. - harness.chain.slot_clock.advance_time( - Duration::from_secs(spec.seconds_per_slot) - spec.maximum_gossip_clock_disparity(), - ); + harness + .chain + .slot_clock + .advance_time(spec.get_slot_duration() - spec.maximum_gossip_clock_disparity()); assert_eq!( harness .chain @@ -1081,9 +1082,10 @@ async fn proposer_duties_v2_with_gossip_tolerance() { assert_eq!(harness.chain.slot().unwrap(), num_initial); // Set the clock to just before the next epoch. - harness.chain.slot_clock.advance_time( - Duration::from_secs(spec.seconds_per_slot) - spec.maximum_gossip_clock_disparity(), - ); + harness + .chain + .slot_clock + .advance_time(spec.get_slot_duration() - spec.maximum_gossip_clock_disparity()); assert_eq!( harness .chain diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 5df6370abe2..02bf37cb551 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -56,21 +56,18 @@ ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC FULU_FORK_VERSION: 0x06000000 FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC # Gloas -GLOAS_FORK_VERSION: 0x07000000 # temporary stub +GLOAS_FORK_VERSION: 0x07000000 GLOAS_FORK_EPOCH: 18446744073709551615 -# EIP7441 -EIP7441_FORK_VERSION: 0x08000000 # temporary stub -EIP7441_FORK_EPOCH: 18446744073709551615 -# EIP7805 -EIP7805_FORK_VERSION: 0x0a000000 # temporary stub -EIP7805_FORK_EPOCH: 18446744073709551615 +# Heze +HEZE_FORK_VERSION: 0x08000000 +HEZE_FORK_EPOCH: 18446744073709551615 # EIP7928 -EIP7928_FORK_VERSION: 0x0b000000 # temporary stub +EIP7928_FORK_VERSION: 0xe7928000 # temporary stub EIP7928_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- -# 12 seconds (*deprecated*) +# 12 seconds SECONDS_PER_SLOT: 12 # 12000 milliseconds SLOT_DURATION_MS: 12000 @@ -96,8 +93,8 @@ SYNC_MESSAGE_DUE_BPS: 3333 CONTRIBUTION_DUE_BPS: 6667 # Gloas -# 2**12 (= 4,096) epochs -MIN_BUILDER_WITHDRAWABILITY_DELAY: 4096 +# 2**6 (= 64) epochs +MIN_BUILDER_WITHDRAWABILITY_DELAY: 64 # 2500 basis points, 25% of SLOT_DURATION_MS ATTESTATION_DUE_BPS_GLOAS: 2500 # 5000 basis points, 50% of SLOT_DURATION_MS @@ -109,7 +106,7 @@ CONTRIBUTION_DUE_BPS_GLOAS: 5000 # 7500 basis points, 75% of SLOT_DURATION_MS PAYLOAD_ATTESTATION_DUE_BPS: 7500 -# EIP7805 +# Heze # 7500 basis points, 75% of SLOT_DURATION_MS VIEW_FREEZE_CUTOFF_BPS: 7500 # 6667 basis points, ~67% of SLOT_DURATION_MS @@ -166,8 +163,6 @@ MAX_PAYLOAD_SIZE: 10485760 MAX_REQUEST_BLOCKS: 1024 # 2**8 (= 256) epochs EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33,024) epochs -MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 # 2**5 (= 32) slots ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms @@ -180,8 +175,6 @@ SUBNETS_PER_NODE: 2 ATTESTATION_SUBNET_COUNT: 64 # 0 bits ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS (= 6 + 0) bits -ATTESTATION_SUBNET_PREFIX_BITS: 6 # Deneb # 2**7 (= 128) blocks @@ -192,24 +185,18 @@ MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 BLOB_SIDECAR_SUBNET_COUNT: 6 # 6 blobs MAX_BLOBS_PER_BLOCK: 6 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK (= 128 * 6) sidecars -MAX_REQUEST_BLOB_SIDECARS: 768 # Electra # 9 subnets BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 # 9 blobs MAX_BLOBS_PER_BLOCK_ELECTRA: 9 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA (= 128 * 9) sidecars -MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu # 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 # 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars -MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 # 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 # 2**2 (= 4) sidecars @@ -225,18 +212,13 @@ MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 # 2**7 (= 128) payloads MAX_REQUEST_PAYLOADS: 128 -# EIP7441 -# 2**8 (= 256) epochs -EPOCHS_PER_SHUFFLING_PHASE: 256 -# 2**1 (= 2) epochs -PROPOSER_SELECTION_GAP: 2 - -# EIP7805 +# Heze # 2**4 (= 16) inclusion lists MAX_REQUEST_INCLUSION_LIST: 16 # 2**13 (= 8,192) bytes MAX_BYTES_PER_INCLUSION_LIST: 8192 + # Blob Scheduling # --------------------------------------------------------------- diff --git a/consensus/types/configs/mainnet.yaml b/consensus/types/configs/mainnet.yaml new file mode 100644 index 00000000000..ab85bd9e715 --- /dev/null +++ b/consensus/types/configs/mainnet.yaml @@ -0,0 +1,227 @@ +# Mainnet config + +# Extends the mainnet preset +PRESET_BASE: 'mainnet' + +# Free-form short name of the network that this configuration applies to - known +# canonical network names include: +# * 'mainnet' - there can be only one +# * 'sepolia' - testnet +# * 'holesky' - testnet +# * 'hoodi' - testnet +# Must match the regex: [a-z0-9\-] +CONFIG_NAME: 'mainnet' + +# Transition +# --------------------------------------------------------------- +# Estimated on Sept 15, 2022 +TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Genesis +# --------------------------------------------------------------- +# 2**14 (= 16,384) validators +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# Dec 1, 2020, 12pm UTC +MIN_GENESIS_TIME: 1606824000 +# Initial fork version for mainnet +GENESIS_FORK_VERSION: 0x00000000 +# 7 * 24 * 3,600 (= 604,800) seconds, 7 days +GENESIS_DELAY: 604800 + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x01000000 +ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC +# Bellatrix +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +# Capella +CAPELLA_FORK_VERSION: 0x03000000 +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC +# Deneb +DENEB_FORK_VERSION: 0x04000000 +DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC +# Electra +ELECTRA_FORK_VERSION: 0x05000000 +ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC +# Fulu +FULU_FORK_VERSION: 0x06000000 +FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC +# Gloas +GLOAS_FORK_VERSION: 0x07000000 +GLOAS_FORK_EPOCH: 18446744073709551615 +# Heze +HEZE_FORK_VERSION: 0x08000000 +HEZE_FORK_EPOCH: 18446744073709551615 +# EIP7928 +EIP7928_FORK_VERSION: 0xe7928000 # temporary stub +EIP7928_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12000 milliseconds +SLOT_DURATION_MS: 12000 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks +ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 + +# Gloas +# 2**6 (= 64) epochs +MIN_BUILDER_WITHDRAWABILITY_DELAY: 64 +# 2500 basis points, 25% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS_GLOAS: 5000 +# 2500 basis points, 25% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS_GLOAS: 5000 +# 7500 basis points, 75% of SLOT_DURATION_MS +PAYLOAD_ATTESTATION_DUE_BPS: 7500 + +# Heze +# 7500 basis points, 75% of SLOT_DURATION_MS +VIEW_FREEZE_CUTOFF_BPS: 7500 +# 6667 basis points, ~67% of SLOT_DURATION_MS +INCLUSION_LIST_SUBMISSION_DUE_BPS: 6667 +# 9167 basis points, ~92% of SLOT_DURATION_MS +PROPOSER_INCLUSION_LIST_CUTOFF_BPS: 9167 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) validators +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + +# Deneb +# 2**3 (= 8) (*deprecated*) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# 2 epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +# Ethereum PoW Mainnet +DEPOSIT_CHAIN_ID: 1 +DEPOSIT_NETWORK_ID: 1 +DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + +# Networking +# --------------------------------------------------------------- +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB +MAX_PAYLOAD_SIZE: 10485760 +# 2**10 (= 1,024) blocks +MAX_REQUEST_BLOCKS: 1024 +# 2**8 (= 256) epochs +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# 2**5 (= 32) slots +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**6 (= 64) subnets +ATTESTATION_SUBNET_COUNT: 64 +# 0 bits +ATTESTATION_SUBNET_EXTRA_BITS: 0 + +# Deneb +# 2**7 (= 128) blocks +MAX_REQUEST_BLOCKS_DENEB: 128 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# 6 subnets +BLOB_SIDECAR_SUBNET_COUNT: 6 +# 6 blobs +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 9 subnets +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# 9 blobs +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 + +# Fulu +# 2**7 (= 128) groups +NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# 2**3 (= 8) samples +SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars +CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars +VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# Gloas +# 2**7 (= 128) payloads +MAX_REQUEST_PAYLOADS: 128 + +# Heze +# 2**4 (= 16) inclusion lists +MAX_REQUEST_INCLUSION_LIST: 16 +# 2**13 (= 8,192) bytes +MAX_BYTES_PER_INCLUSION_LIST: 8192 + + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 412672 # December 9, 2025, 02:21:11pm UTC + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 419072 # January 7, 2026, 01:01:11am UTC + MAX_BLOBS_PER_BLOCK: 21 diff --git a/consensus/types/configs/minimal.yaml b/consensus/types/configs/minimal.yaml new file mode 100644 index 00000000000..8c0d7254fe0 --- /dev/null +++ b/consensus/types/configs/minimal.yaml @@ -0,0 +1,220 @@ +# Minimal config + +# Extends the minimal preset +PRESET_BASE: 'minimal' + +# Free-form short name of the network that this configuration applies to - known +# canonical network names include: +# * 'minimal' - spec-testing +# Must match the regex: [a-z0-9\-] +CONFIG_NAME: 'minimal' + +# Transition +# --------------------------------------------------------------- +# 2**256-2**10 for testing minimal network +TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Genesis +# --------------------------------------------------------------- +# [customized] 2**6 (= 64) validators +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 64 +# [customized] Jan 3, 2020, 12am UTC +MIN_GENESIS_TIME: 1578009600 +# [customized] Initial fork version for minimal +GENESIS_FORK_VERSION: 0x00000001 +# [customized] 5 * 60 (= 300) seconds +GENESIS_DELAY: 300 + +# Forking +# --------------------------------------------------------------- +# Values provided for illustrative purposes. +# Individual tests/testnets may set different values. + +# [customized] Altair +ALTAIR_FORK_VERSION: 0x01000001 +ALTAIR_FORK_EPOCH: 18446744073709551615 +# [customized] Bellatrix +BELLATRIX_FORK_VERSION: 0x02000001 +BELLATRIX_FORK_EPOCH: 18446744073709551615 +# [customized] Capella +CAPELLA_FORK_VERSION: 0x03000001 +CAPELLA_FORK_EPOCH: 18446744073709551615 +# [customized] Deneb +DENEB_FORK_VERSION: 0x04000001 +DENEB_FORK_EPOCH: 18446744073709551615 +# [customized] Electra +ELECTRA_FORK_VERSION: 0x05000001 +ELECTRA_FORK_EPOCH: 18446744073709551615 +# [customized] Fulu +FULU_FORK_VERSION: 0x06000001 +FULU_FORK_EPOCH: 18446744073709551615 +# [customized] Gloas +GLOAS_FORK_VERSION: 0x07000001 +GLOAS_FORK_EPOCH: 18446744073709551615 +# [customized] Heze +HEZE_FORK_VERSION: 0x08000001 +HEZE_FORK_EPOCH: 18446744073709551615 +# [customized] EIP7928 +EIP7928_FORK_VERSION: 0xe7928001 +EIP7928_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# [customized] 6000 milliseconds +SLOT_DURATION_MS: 6000 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# [customized] 2**6 (= 64) epochs +SHARD_COMMITTEE_PERIOD: 64 +# [customized] 2**4 (= 16) Eth1 blocks +ETH1_FOLLOW_DISTANCE: 16 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 + +# Gloas +# [customized] 2**1 (= 2) epochs +MIN_BUILDER_WITHDRAWABILITY_DELAY: 2 +# 2500 basis points, 25% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS_GLOAS: 5000 +# 2500 basis points, 25% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS_GLOAS: 5000 +# 7500 basis points, 75% of SLOT_DURATION_MS +PAYLOAD_ATTESTATION_DUE_BPS: 7500 + +# Heze +# 7500 basis points, 75% of SLOT_DURATION_MS +VIEW_FREEZE_CUTOFF_BPS: 7500 +# 6667 basis points, ~67% of SLOT_DURATION_MS +INCLUSION_LIST_SUBMISSION_DUE_BPS: 6667 +# 9167 basis points, ~92% of SLOT_DURATION_MS +PROPOSER_INCLUSION_LIST_CUTOFF_BPS: 9167 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# [customized] 2**1 (= 2) validators +MIN_PER_EPOCH_CHURN_LIMIT: 2 +# [customized] 2**5 (= 32) +CHURN_LIMIT_QUOTIENT: 32 + +# Deneb +# [customized] 2**2 (= 4) (*deprecated*) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 4 + +# Electra +# [customized] 2**6 * 10**9 (= 64,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000 +# [customized] 2**7 * 10**9 (= 128,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 128000000000 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# 2 epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +# Ethereum Goerli testnet +DEPOSIT_CHAIN_ID: 5 +DEPOSIT_NETWORK_ID: 5 +# Configured on a per testnet basis +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 + +# Networking +# --------------------------------------------------------------- +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB +MAX_PAYLOAD_SIZE: 10485760 +# 2**10 (= 1,024) blocks +MAX_REQUEST_BLOCKS: 1024 +# 2**8 (= 256) epochs +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# 2**5 (= 32) slots +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**6 (= 64) subnets +ATTESTATION_SUBNET_COUNT: 64 +# 0 bits +ATTESTATION_SUBNET_EXTRA_BITS: 0 + +# Deneb +# 2**7 (= 128) blocks +MAX_REQUEST_BLOCKS_DENEB: 128 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# 6 subnets +BLOB_SIDECAR_SUBNET_COUNT: 6 +# 6 blobs +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 9 subnets +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# 9 blobs +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 + +# Fulu +# 2**7 (= 128) groups +NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# 2**3 (= 8) samples +SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars +CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars +VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# Gloas +# 2**7 (= 128) payloads +MAX_REQUEST_PAYLOADS: 128 + +# Heze +# 2**4 (= 16) inclusion lists +MAX_REQUEST_INCLUSION_LIST: 16 +# 2**13 (= 8,192) bytes +MAX_BYTES_PER_INCLUSION_LIST: 8192 + + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: [] diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 8a2b3a23e8c..01c4c7bbfd9 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -96,8 +96,7 @@ pub struct ChainSpec { * Time parameters */ pub genesis_delay: u64, - // TODO deprecate seconds_per_slot - pub seconds_per_slot: u64, + seconds_per_slot: u64, // Private so that this value can't get changed except via the `set_slot_duration_ms` function. slot_duration_ms: u64, pub min_attestation_inclusion_delay: u64, @@ -914,6 +913,7 @@ impl ChainSpec { /// Set the duration of a slot (in ms). pub fn set_slot_duration_ms(mut self, slot_duration_ms: u64) -> Self { self.slot_duration_ms = slot_duration_ms; + self.seconds_per_slot = slot_duration_ms.saturating_div(1000); self.compute_derived_values::() } @@ -1235,7 +1235,7 @@ impl ChainSpec { gloas_fork_epoch: None, builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, - min_builder_withdrawability_delay: Epoch::new(4096), + min_builder_withdrawability_delay: Epoch::new(64), max_request_payloads: 128, /* @@ -1381,6 +1381,7 @@ impl ChainSpec { // Gloas gloas_fork_version: [0x07, 0x00, 0x00, 0x01], gloas_fork_epoch: None, + min_builder_withdrawability_delay: Epoch::new(2), /* * Derived time values (set by `compute_derived_values()`) @@ -1391,6 +1392,9 @@ impl ChainSpec { sync_message_due: Duration::from_millis(1999), contribution_and_proof_due: Duration::from_millis(4000), + // Networking Fulu + blob_schedule: BlobSchedule::default(), + // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1631,7 +1635,7 @@ impl ChainSpec { gloas_fork_epoch: None, builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, - min_builder_withdrawability_delay: Epoch::new(4096), + min_builder_withdrawability_delay: Epoch::new(64), max_request_payloads: 128, /* @@ -1908,8 +1912,9 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub gloas_fork_epoch: Option>, - #[serde(with = "serde_utils::quoted_u64")] - seconds_per_slot: u64, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + seconds_per_slot: Option>, #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] slot_duration_ms: Option>, @@ -2064,6 +2069,10 @@ pub struct Config { #[serde(default = "default_contribution_due_bps")] #[serde(with = "serde_utils::quoted_u64")] contribution_due_bps: u64, + + #[serde(default = "default_min_builder_withdrawability_delay")] + #[serde(with = "serde_utils::quoted_u64")] + min_builder_withdrawability_delay: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -2289,6 +2298,10 @@ const fn default_contribution_due_bps() -> u64 { 6667 } +const fn default_min_builder_withdrawability_delay() -> u64 { + 64 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::new( @@ -2459,7 +2472,9 @@ impl Config { .gloas_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), - seconds_per_slot: spec.seconds_per_slot, + seconds_per_slot: Some(MaybeQuoted { + value: spec.seconds_per_slot, + }), slot_duration_ms: Some(MaybeQuoted { value: spec.slot_duration_ms, }), @@ -2525,6 +2540,8 @@ impl Config { aggregate_due_bps: spec.aggregate_due_bps, sync_message_due_bps: spec.sync_message_due_bps, contribution_due_bps: spec.contribution_due_bps, + + min_builder_withdrawability_delay: spec.min_builder_withdrawability_delay.as_u64(), } } @@ -2616,12 +2633,21 @@ impl Config { aggregate_due_bps, sync_message_due_bps, contribution_due_bps, + min_builder_withdrawability_delay, } = self; if preset_base != E::spec_name().to_string().as_str() { return None; } + // Fail if seconds_per_slot and slot_duration_ms are both set but are inconsistent. + if let (Some(seconds_per_slot), Some(slot_duration_ms)) = + (seconds_per_slot, slot_duration_ms) + && seconds_per_slot.value.saturating_mul(1000) != slot_duration_ms.value + { + return None; + } + let spec = ChainSpec { config_name: config_name.clone(), min_genesis_active_validator_count, @@ -2642,10 +2668,12 @@ impl Config { fulu_fork_version, gloas_fork_version, gloas_fork_epoch: gloas_fork_epoch.map(|q| q.value), - seconds_per_slot, + seconds_per_slot: seconds_per_slot + .map(|q| q.value) + .or_else(|| slot_duration_ms.and_then(|q| q.value.checked_div(1000)))?, slot_duration_ms: slot_duration_ms .map(|q| q.value) - .unwrap_or_else(|| seconds_per_slot.saturating_mul(1000)), + .or_else(|| seconds_per_slot.map(|q| q.value.saturating_mul(1000)))?, seconds_per_eth1_block, min_validator_withdrawability_delay, shard_committee_period, @@ -2705,6 +2733,8 @@ impl Config { sync_message_due_bps, contribution_due_bps, + min_builder_withdrawability_delay: Epoch::new(min_builder_withdrawability_delay), + ..chain_spec.clone() }; Some(spec.compute_derived_values::()) @@ -2853,6 +2883,9 @@ mod yaml_tests { use super::*; use crate::core::MinimalEthSpec; use paste::paste; + use std::collections::BTreeSet; + use std::env; + use std::path::PathBuf; use std::sync::Arc; use tempfile::NamedTempFile; @@ -2902,6 +2935,67 @@ mod yaml_tests { assert_eq!(from, yamlconfig); } + #[test] + fn slot_duration_fallback_both_fields() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = Some(MaybeQuoted { value: 12 }); + config.slot_duration_ms = Some(MaybeQuoted { value: 12000 }); + let spec = config + .apply_to_chain_spec::(&mainnet) + .unwrap(); + assert_eq!(spec.seconds_per_slot, 12); + assert_eq!(spec.slot_duration_ms, 12000); + } + + #[test] + fn slot_duration_fallback_both_fields_inconsistent() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = Some(MaybeQuoted { value: 10 }); + config.slot_duration_ms = Some(MaybeQuoted { value: 12000 }); + assert_eq!(config.apply_to_chain_spec::(&mainnet), None); + } + + #[test] + fn slot_duration_fallback_seconds_only() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = Some(MaybeQuoted { value: 12 }); + config.slot_duration_ms = None; + let spec = config + .apply_to_chain_spec::(&mainnet) + .unwrap(); + assert_eq!(spec.seconds_per_slot, 12); + assert_eq!(spec.slot_duration_ms, 12000); + } + + #[test] + fn slot_duration_fallback_ms_only() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = None; + config.slot_duration_ms = Some(MaybeQuoted { value: 12000 }); + let spec = config + .apply_to_chain_spec::(&mainnet) + .unwrap(); + assert_eq!(spec.seconds_per_slot, 12); + assert_eq!(spec.slot_duration_ms, 12000); + } + + #[test] + fn slot_duration_fallback_neither() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = None; + config.slot_duration_ms = None; + assert!( + config + .apply_to_chain_spec::(&mainnet) + .is_none() + ); + } + #[test] fn blob_schedule_max_blobs_per_block() { let spec_contents = r#" @@ -3375,7 +3469,6 @@ mod yaml_tests { // Test slot duration let slot_duration = spec.get_slot_duration(); assert_eq!(slot_duration, Duration::from_millis(12000)); - assert_eq!(slot_duration, Duration::from_secs(spec.seconds_per_slot)); // Test edge cases with custom spec let mut custom_spec = spec.clone(); @@ -3485,4 +3578,133 @@ mod yaml_tests { spec.attestation_due_bps = 15000; spec.compute_derived_values::(); } + + fn configs_base_path() -> PathBuf { + env::var("CARGO_MANIFEST_DIR") + .expect("should know manifest dir") + .parse::() + .expect("should parse manifest dir as path") + .join("configs") + } + + /// Upstream config keys that Lighthouse intentionally does not include in its + /// `Config` struct. These are forks/features not yet implemented. Update this + /// list as new forks are added. + const UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE: &[&str] = &[ + // Forks not yet implemented + "HEZE_FORK_VERSION", + "HEZE_FORK_EPOCH", + "EIP7928_FORK_VERSION", + "EIP7928_FORK_EPOCH", + // Gloas params not yet in Config + "ATTESTATION_DUE_BPS_GLOAS", + "AGGREGATE_DUE_BPS_GLOAS", + "SYNC_MESSAGE_DUE_BPS_GLOAS", + "CONTRIBUTION_DUE_BPS_GLOAS", + "PAYLOAD_ATTESTATION_DUE_BPS", + "MAX_REQUEST_PAYLOADS", + // Gloas fork choice params not yet in Config + "REORG_HEAD_WEIGHT_THRESHOLD", + "REORG_PARENT_WEIGHT_THRESHOLD", + "REORG_MAX_EPOCHS_SINCE_FINALIZATION", + // Heze networking + "VIEW_FREEZE_CUTOFF_BPS", + "INCLUSION_LIST_SUBMISSION_DUE_BPS", + "PROPOSER_INCLUSION_LIST_CUTOFF_BPS", + "MAX_REQUEST_INCLUSION_LIST", + "MAX_BYTES_PER_INCLUSION_LIST", + ]; + + /// Compare a `ChainSpec` against an upstream consensus-specs config YAML file. + /// + /// 1. Extracts keys from the raw YAML text (to avoid yaml_serde's inability + /// to parse integers > u64 into `Value`/`Mapping` types) and checks that + /// every key is either known to `Config` or explicitly listed in + /// `UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE`. + /// 2. Deserializes the upstream YAML as `Config` (which has custom + /// deserializers for large values like `TERMINAL_TOTAL_DIFFICULTY`) and + /// compares against `Config::from_chain_spec`. + fn config_test(spec: &ChainSpec, config_name: &str) { + let file_path = configs_base_path().join(format!("{config_name}.yaml")); + let upstream_yaml = std::fs::read_to_string(&file_path) + .unwrap_or_else(|e| panic!("failed to read {}: {e}", file_path.display())); + + // Extract top-level keys from the raw YAML text. We can't parse as + // yaml_serde::Mapping because yaml_serde cannot represent integers + // exceeding u64 (e.g. TERMINAL_TOTAL_DIFFICULTY). Config YAML uses a + // simple `KEY: value` format with no indentation for top-level keys. + let upstream_keys: BTreeSet = upstream_yaml + .lines() + .filter_map(|line| { + // Skip comments, blank lines, and indented lines (nested YAML). + if line.is_empty() + || line.starts_with('#') + || line.starts_with(' ') + || line.starts_with('\t') + { + return None; + } + line.split(':').next().map(|k| k.to_string()) + }) + .collect(); + + // Get the set of keys that Config knows about by serializing and collecting + // keys. Also include keys for optional fields that may be skipped during + // serialization (e.g. CONFIG_NAME). + let our_config = Config::from_chain_spec::(spec); + let our_yaml = yaml_serde::to_string(&our_config).expect("failed to serialize Config"); + let our_mapping: yaml_serde::Mapping = + yaml_serde::from_str(&our_yaml).expect("failed to re-parse our Config"); + let mut known_keys: BTreeSet = our_mapping + .keys() + .filter_map(|k| k.as_str().map(String::from)) + .collect(); + // Fields that Config knows but may skip during serialization. + known_keys.insert("CONFIG_NAME".to_string()); + + // Check for upstream keys that our Config doesn't know about. + let mut missing_keys: Vec<&String> = upstream_keys + .iter() + .filter(|k| { + !known_keys.contains(k.as_str()) + && !UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE.contains(&k.as_str()) + }) + .collect(); + missing_keys.sort(); + + assert!( + missing_keys.is_empty(), + "Upstream {config_name} config has keys not present in Lighthouse Config \ + (add to Config or to UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE): {missing_keys:?}" + ); + + // Compare values for all fields Config knows about. + let mut upstream_config: Config = yaml_serde::from_str(&upstream_yaml) + .unwrap_or_else(|e| panic!("failed to parse {config_name} as Config: {e}")); + + // CONFIG_NAME is network metadata (not a spec parameter), so align it + // before comparing. + upstream_config.config_name = our_config.config_name.clone(); + // SECONDS_PER_SLOT is deprecated upstream but we still emit it, so + // fill it in if the upstream YAML omitted it. + if upstream_config.seconds_per_slot.is_none() { + upstream_config.seconds_per_slot = our_config.seconds_per_slot; + } + assert_eq!( + upstream_config, our_config, + "Config mismatch for {config_name}" + ); + } + + #[test] + fn mainnet_config_consistent() { + let spec = ChainSpec::mainnet(); + config_test::(&spec, "mainnet"); + } + + #[test] + fn minimal_config_consistent() { + let spec = ChainSpec::minimal(); + config_test::(&spec, "minimal"); + } } From 6044d79ad931779d1cf19e4a5c805cf5b63cde36 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 31 Mar 2026 11:19:18 +1100 Subject: [PATCH 059/127] Fix local testnet Tempo and Prometheus/Grafana config (#9054) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Pin Tempo image to `grafana/tempo:2.10.3` — `grafana/tempo:latest` now resolves to an unreleased 3.0 build that removed the `compactor` config field, causing startup failure - Replace deprecated `prometheus_grafana` additional service with separate `prometheus` + `grafana` services Co-Authored-By: Jimmy Chen --- scripts/local_testnet/network_params.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index 0c36e5c49cc..083f719c601 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -21,10 +21,13 @@ network_params: slot_duration_ms: 3000 snooper_enabled: false global_log_level: debug +tempo_params: + image: grafana/tempo:2.10.3 additional_services: - dora - spamoor - - prometheus_grafana + - prometheus + - grafana - tempo spamoor_params: image: ethpandaops/spamoor:master From 6f480e499e1b745333cd19dfa6037eb6738f3f43 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 31 Mar 2026 00:07:22 -0500 Subject: [PATCH 060/127] Add range sync tests (#8989) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- beacon_node/beacon_processor/src/lib.rs | 8 +- .../src/network_beacon_processor/mod.rs | 7 +- beacon_node/network/src/sync/tests/lookups.rs | 305 ++++++- beacon_node/network/src/sync/tests/range.rs | 827 +++++++----------- scripts/range-sync-coverage.sh | 136 +++ 5 files changed, 756 insertions(+), 527 deletions(-) create mode 100755 scripts/range-sync-coverage.sh diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 724c41cfc94..a6c76beb317 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -421,7 +421,11 @@ pub enum Work { IgnoredRpcBlock { process_fn: BlockingFn, }, - ChainSegment(AsyncFn), + ChainSegment { + process_fn: AsyncFn, + /// (chain_id, batch_epoch) for test observability + process_id: (u32, u64), + }, ChainSegmentBackfill(BlockingFn), Status(BlockingFn), BlocksByRangeRequest(AsyncFn), @@ -1473,7 +1477,7 @@ impl BeaconProcessor { } => task_spawner.spawn_blocking(move || { process_batch(aggregates); }), - Work::ChainSegment(process_fn) => task_spawner.spawn_async(async move { + Work::ChainSegment { process_fn, .. } => task_spawner.spawn_async(async move { process_fn.await; }), Work::UnknownBlockAttestation { process_fn } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f74e7dacfba..b3d6874b8a3 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -620,11 +620,14 @@ impl NetworkBeaconProcessor { // Back-sync batches are dispatched with a different `Work` variant so // they can be rate-limited. let work = match process_id { - ChainSegmentProcessId::RangeBatchId(_, _) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { let process_fn = async move { processor.process_chain_segment(process_id, blocks).await; }; - Work::ChainSegment(Box::pin(process_fn)) + Work::ChainSegment { + process_fn: Box::pin(process_fn), + process_id: (chain_id, epoch.as_u64()), + } } ChainSegmentProcessId::BackSyncBatchId(_) => { let process_fn = diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index cd872df8873..a26996ec5ee 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,16 +1,18 @@ use super::*; use crate::NetworkMessage; -use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; +use crate::network_beacon_processor::{ + ChainSegmentProcessId, InvalidBlockStorage, NetworkBeaconProcessor, +}; use crate::sync::block_lookups::{BlockLookupSummary, PARENT_DEPTH_TOLERANCE}; use crate::sync::{ SyncMessage, - manager::{BlockProcessType, BlockProcessingResult, SyncManager}, + manager::{BatchProcessResult, BlockProcessType, BlockProcessingResult, SyncManager}, }; use beacon_chain::blob_verification::KzgVerifiedBlob; use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ - AvailabilityProcessingStatus, BlockError, NotifyExecutionLayer, + AvailabilityProcessingStatus, BlockError, EngineState, NotifyExecutionLayer, block_verification_types::{AsBlock, AvailableBlockData}, data_availability_checker::Availability, test_utils::{ @@ -23,7 +25,7 @@ use educe::Educe; use itertools::Itertools; use lighthouse_network::discovery::CombinedKey; use lighthouse_network::{ - NetworkConfig, NetworkGlobals, PeerId, + NetworkConfig, NetworkGlobals, PeerAction, PeerId, rpc::{RPCError, RequestType}, service::api_types::{AppRequestId, SyncRequestId}, types::SyncState, @@ -64,14 +66,33 @@ pub struct SimulateConfig { Option Option + Send + Sync>>, // Import a block directly before processing it (for simulating race conditions) import_block_before_process: HashSet, + /// Number of range batch processing attempts that return FaultyFailure + range_faulty_failures: usize, + /// Number of range batch processing attempts that return NonFaultyFailure + range_non_faulty_failures: usize, + /// Number of BlocksByRange requests that return empty (no blocks) + return_no_range_blocks_n_times: usize, + /// Number of DataColumnsByRange requests that return empty (no columns) + return_no_range_columns_n_times: usize, + /// Number of DataColumnsByRange requests that return columns with unrequested indices + return_wrong_range_column_indices_n_times: usize, + /// Number of DataColumnsByRange requests that return columns with unrequested slots + return_wrong_range_column_slots_n_times: usize, + /// Number of DataColumnsByRange requests that return fewer columns than requested + /// (drops half the columns). Triggers CouplingError::DataColumnPeerFailure → retry_partial_batch + return_partial_range_columns_n_times: usize, + /// Set EE offline at start, bring back online after this many BlocksByRange responses + ee_offline_for_n_range_responses: Option, + /// Disconnect all peers after this many successful BlocksByRange responses. + successful_range_responses_before_disconnect: Option, } impl SimulateConfig { - fn new() -> Self { + pub(super) fn new() -> Self { Self::default() } - fn happy_path() -> Self { + pub(super) fn happy_path() -> Self { Self::default() } @@ -111,7 +132,7 @@ impl SimulateConfig { self } - fn return_rpc_error(mut self, error: RPCError) -> Self { + pub(super) fn return_rpc_error(mut self, error: RPCError) -> Self { self.return_rpc_error = Some(error); self } @@ -133,6 +154,51 @@ impl SimulateConfig { self.import_block_before_process.insert(block_root); self } + + pub(super) fn with_range_faulty_failures(mut self, n: usize) -> Self { + self.range_faulty_failures = n; + self + } + + pub(super) fn with_range_non_faulty_failures(mut self, n: usize) -> Self { + self.range_non_faulty_failures = n; + self + } + + pub(super) fn with_no_range_blocks_n_times(mut self, n: usize) -> Self { + self.return_no_range_blocks_n_times = n; + self + } + + pub(super) fn with_no_range_columns_n_times(mut self, n: usize) -> Self { + self.return_no_range_columns_n_times = n; + self + } + + pub(super) fn with_wrong_range_column_indices_n_times(mut self, n: usize) -> Self { + self.return_wrong_range_column_indices_n_times = n; + self + } + + pub(super) fn with_wrong_range_column_slots_n_times(mut self, n: usize) -> Self { + self.return_wrong_range_column_slots_n_times = n; + self + } + + pub(super) fn with_partial_range_columns_n_times(mut self, n: usize) -> Self { + self.return_partial_range_columns_n_times = n; + self + } + + pub(super) fn with_ee_offline_for_n_range_responses(mut self, n: usize) -> Self { + self.ee_offline_for_n_range_responses = Some(n); + self + } + + pub(super) fn with_disconnect_after_range_requests(mut self, n: usize) -> Self { + self.successful_range_responses_before_disconnect = Some(n); + self + } } fn genesis_fork() -> ForkName { @@ -256,6 +322,7 @@ impl TestRig { }) } + #[allow(dead_code)] pub fn with_custody_type(node_custody_type: NodeCustodyType) -> Self { Self::new(TestRigConfig { fulu_test_type: FuluTestType::WeFullnodeThemSupernode, @@ -267,13 +334,23 @@ impl TestRig { /// /// Processes events from sync_rx (sink), beacon processor, and network queues in fixed /// priority order each tick. Handles completed work before pulling new requests. - async fn simulate(&mut self, complete_strategy: SimulateConfig) { + pub(super) async fn simulate(&mut self, complete_strategy: SimulateConfig) { self.complete_strategy = complete_strategy; self.log(&format!( "Running simulate with config {:?}", self.complete_strategy )); + // Set EE offline at the start if configured + if self + .complete_strategy + .ee_offline_for_n_range_responses + .is_some() + { + self.sync_manager + .update_execution_engine_state(EngineState::Offline); + } + let mut i = 0; loop { @@ -352,9 +429,34 @@ impl TestRig { process_fn.await } } - Work::RpcBlobs { process_fn } - | Work::RpcCustodyColumn(process_fn) - | Work::ChainSegment(process_fn) => process_fn.await, + Work::RpcBlobs { process_fn } | Work::RpcCustodyColumn(process_fn) => { + process_fn.await + } + Work::ChainSegment { + process_fn, + process_id: (chain_id, batch_epoch), + } => { + let sync_type = + ChainSegmentProcessId::RangeBatchId(chain_id, batch_epoch.into()); + if self.complete_strategy.range_faulty_failures > 0 { + self.complete_strategy.range_faulty_failures -= 1; + self.push_sync_message(SyncMessage::BatchProcessed { + sync_type, + result: BatchProcessResult::FaultyFailure { + imported_blocks: 0, + penalty: PeerAction::LowToleranceError, + }, + }); + } else if self.complete_strategy.range_non_faulty_failures > 0 { + self.complete_strategy.range_non_faulty_failures -= 1; + self.push_sync_message(SyncMessage::BatchProcessed { + sync_type, + result: BatchProcessResult::NonFaultyFailure, + }); + } else { + process_fn.await; + } + } Work::Reprocess(_) => {} // ignore other => panic!("Unsupported Work event {}", other.str_id()), } @@ -573,15 +675,50 @@ impl TestRig { if self.complete_strategy.skip_by_range_routes { return; } - let blocks = (*req.start_slot()..req.start_slot() + req.count()) - .filter_map(|slot| { - self.network_blocks_by_slot - .get(&Slot::new(slot)) - .map(|block| block.block_cloned()) - }) - .collect::>(); - self.send_rpc_blocks_response(req_id, peer_id, &blocks); + // Check if we should disconnect all peers instead of continuing + if let Some(ref mut remaining) = self + .complete_strategy + .successful_range_responses_before_disconnect + { + if *remaining == 0 { + // Disconnect all peers — remaining responses become "late" + for peer in self.get_connected_peers() { + self.peer_disconnected(peer); + } + return; + } else { + *remaining -= 1; + } + } + + // Return empty response N times to simulate peer returning no blocks + if self.complete_strategy.return_no_range_blocks_n_times > 0 { + self.complete_strategy.return_no_range_blocks_n_times -= 1; + self.send_rpc_blocks_response(req_id, peer_id, &[]); + } else { + let blocks = (*req.start_slot()..req.start_slot() + req.count()) + .filter_map(|slot| { + self.network_blocks_by_slot + .get(&Slot::new(slot)) + .map(|block| block.block_cloned()) + }) + .collect::>(); + self.send_rpc_blocks_response(req_id, peer_id, &blocks); + } + + // Bring EE back online after N range responses + if let Some(ref mut remaining) = + self.complete_strategy.ee_offline_for_n_range_responses + { + if *remaining == 0 { + self.sync_manager + .update_execution_engine_state(EngineState::Online); + self.complete_strategy.ee_offline_for_n_range_responses = None; + } else { + *remaining -= 1; + } + } } (RequestType::BlobsByRange(req), AppRequestId::Sync(req_id)) => { @@ -605,10 +742,80 @@ impl TestRig { if self.complete_strategy.skip_by_range_routes { return; } - // Note: This function is permissive, blocks may have zero columns and it won't - // error. Some caveats: - // - The genesis block never has columns - // - Some blocks may not have columns as the blob count is random + + // Return empty columns N times + if self.complete_strategy.return_no_range_columns_n_times > 0 { + self.complete_strategy.return_no_range_columns_n_times -= 1; + self.send_rpc_columns_response(req_id, peer_id, &[]); + return; + } + + // Return columns with unrequested indices N times. + // Note: for supernodes this returns no columns since they custody all indices. + if self + .complete_strategy + .return_wrong_range_column_indices_n_times + > 0 + { + self.complete_strategy + .return_wrong_range_column_indices_n_times -= 1; + let wrong_columns = (req.start_slot..req.start_slot + req.count) + .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) + .filter_map(|block| block.block_data().data_columns()) + .flat_map(|columns| { + columns + .into_iter() + .filter(|c| !req.columns.contains(c.index())) + }) + .collect::>(); + self.send_rpc_columns_response(req_id, peer_id, &wrong_columns); + return; + } + + // Return columns from an out-of-range slot N times + if self + .complete_strategy + .return_wrong_range_column_slots_n_times + > 0 + { + self.complete_strategy + .return_wrong_range_column_slots_n_times -= 1; + // Get a column from a slot AFTER the requested range + let wrong_slot = req.start_slot + req.count; + let wrong_columns = self + .network_blocks_by_slot + .get(&Slot::new(wrong_slot)) + .and_then(|block| block.block_data().data_columns()) + .into_iter() + .flat_map(|columns| { + columns + .into_iter() + .filter(|c| req.columns.contains(c.index())) + }) + .collect::>(); + self.send_rpc_columns_response(req_id, peer_id, &wrong_columns); + return; + } + + // Return only half the requested columns N times — triggers CouplingError + if self.complete_strategy.return_partial_range_columns_n_times > 0 { + self.complete_strategy.return_partial_range_columns_n_times -= 1; + let columns = (req.start_slot..req.start_slot + req.count) + .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) + .filter_map(|block| block.block_data().data_columns()) + .flat_map(|columns| { + columns + .into_iter() + .filter(|c| req.columns.contains(c.index())) + }) + .enumerate() + .filter(|(i, _)| i % 2 == 0) // keep every other column + .map(|(_, c)| c) + .collect::>(); + self.send_rpc_columns_response(req_id, peer_id, &columns); + return; + } + let columns = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) .filter_map(|block| block.block_data().data_columns()) @@ -726,7 +933,7 @@ impl TestRig { // Preparation steps /// Returns the block root of the tip of the built chain - async fn build_chain(&mut self, block_count: usize) -> Hash256 { + pub(super) async fn build_chain(&mut self, block_count: usize) -> Hash256 { let mut blocks = vec![]; // Initialise a new beacon chain @@ -947,6 +1154,30 @@ impl TestRig { self.trigger_with_last_block(); } + /// Import blocks for slots 1..=up_to_slot into the local chain (advance local head) + pub(super) async fn import_blocks_up_to_slot(&mut self, up_to_slot: u64) { + for slot in 1..=up_to_slot { + let rpc_block = self + .network_blocks_by_slot + .get(&Slot::new(slot)) + .unwrap_or_else(|| panic!("No block at slot {slot}")) + .clone(); + let block_root = rpc_block.canonical_root(); + self.harness + .chain + .process_block( + block_root, + rpc_block, + NotifyExecutionLayer::Yes, + BlockImportSource::Gossip, + || Ok(()), + ) + .await + .unwrap(); + } + self.harness.chain.recompute_head_at_current_slot().await; + } + /// Import a block directly into the chain without going through lookup sync async fn import_block_by_root(&mut self, block_root: Hash256) { let range_sync_block = self @@ -1000,23 +1231,32 @@ impl TestRig { // Post-test assertions - fn head_slot(&self) -> Slot { + pub(super) fn head_slot(&self) -> Slot { self.harness.chain.head().head_slot() } - fn assert_head_slot(&self, slot: u64) { + pub(super) fn assert_head_slot(&self, slot: u64) { assert_eq!(self.head_slot(), Slot::new(slot), "Unexpected head slot"); } - fn max_known_slot(&self) -> Slot { + pub(super) fn max_known_slot(&self) -> Slot { self.network_blocks_by_slot .keys() .max() .copied() - .expect("no blocks") + .unwrap_or_default() + } + + pub(super) fn finalized_epoch(&self) -> types::Epoch { + self.harness + .chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch } - fn assert_penalties(&self, expected_penalties: &[&'static str]) { + pub(super) fn assert_penalties(&self, expected_penalties: &[&'static str]) { let penalties = self .penalties .iter() @@ -1034,7 +1274,7 @@ impl TestRig { } } - fn assert_penalties_of_type(&self, expected_penalty: &'static str) { + pub(super) fn assert_penalties_of_type(&self, expected_penalty: &'static str) { if self.penalties.is_empty() { panic!("No penalties but expected some of type {expected_penalty}"); } @@ -1051,7 +1291,7 @@ impl TestRig { } } - fn assert_no_penalties(&mut self) { + pub(super) fn assert_no_penalties(&mut self) { if !self.penalties.is_empty() { panic!("Some downscore events: {:?}", self.penalties); } @@ -1102,7 +1342,7 @@ impl TestRig { } /// Assert there is at least one range sync chain created and that all sync chains completed - fn assert_successful_range_sync(&self) { + pub(super) fn assert_successful_range_sync(&self) { assert!( self.range_sync_chains_added() > 0, "No created range sync chains" @@ -1425,6 +1665,7 @@ impl TestRig { } } + #[allow(dead_code)] pub fn pop_received_processor_event) -> Option>( &mut self, predicate_transform: F, diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index c19ee8eb6d4..891d9d1e978 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -1,110 +1,47 @@ +//! Range sync tests for `BlocksByRange`, `BlobsByRange`, `DataColumnsByRange`. +//! +//! Tests follow the pattern from `lookups.rs`: +//! ```ignore +//! async fn test_name() { +//! let mut r = TestRig::default(); +//! r.setup_xyz().await; +//! r.simulate(SimulateConfig::happy_path()).await; +//! r.assert_range_sync_completed(); +//! } +//! ``` +//! +//! Rules: +//! - Tests must be succinct and readable (3-10 lines per test body) +//! - All complex logic lives in helpers (setup, SimulateConfig, assert) +//! - Test bodies must not manually grab requests, send SyncMessages, or do anything overly specific +//! - All tests use `simulate()` if they need peers to fulfill requests +//! - Extend `SimulateConfig` for new range-specific behaviors +//! - Extend `simulate()` to support by_range methods + +use super::lookups::SimulateConfig; use super::*; -use crate::network_beacon_processor::ChainSegmentProcessId; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; -use crate::sync::network_context::RangeRequestId; use crate::sync::range_sync::RangeSyncType; -use beacon_chain::BeaconChain; -use beacon_chain::block_verification_types::AvailableBlockData; -use beacon_chain::custody_context::NodeCustodyType; -use beacon_chain::data_column_verification::CustodyDataColumn; -use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RangeSyncBlock}; -use beacon_processor::WorkType; -use lighthouse_network::rpc::RequestType; -use lighthouse_network::rpc::methods::{ - BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, - OldBlocksByRangeRequestV2, StatusMessageV2, -}; -use lighthouse_network::service::api_types::{ - AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, - SyncRequestId, -}; +use lighthouse_network::rpc::RPCError; +use lighthouse_network::rpc::methods::StatusMessageV2; use lighthouse_network::{PeerId, SyncInfo}; -use std::time::Duration; -use types::{ - BlobSidecarList, BlockImportSource, Epoch, EthSpec, Hash256, MinimalEthSpec as E, - SignedBeaconBlock, SignedBeaconBlockHash, Slot, -}; +use types::{Epoch, EthSpec, Hash256, MinimalEthSpec as E, Slot}; -const D: Duration = Duration::new(0, 0); - -pub(crate) enum DataSidecars { - Blobs(BlobSidecarList), - DataColumns(Vec>), -} - -enum ByRangeDataRequestIds { - PreDeneb, - PrePeerDAS(BlobsByRangeRequestId, PeerId), - PostPeerDAS(Vec<(DataColumnsByRangeRequestId, PeerId)>), -} - -/// Sync tests are usually written in the form: -/// - Do some action -/// - Expect a request to be sent -/// - Complete the above request -/// -/// To make writting tests succint, the machinery in this testing rig automatically identifies -/// _which_ request to complete. Picking the right request is critical for tests to pass, so this -/// filter allows better expressivity on the criteria to identify the right request. -#[derive(Default, Debug, Clone)] -struct RequestFilter { - peer: Option, - epoch: Option, -} - -impl RequestFilter { - fn peer(mut self, peer: PeerId) -> Self { - self.peer = Some(peer); - self - } - - fn epoch(mut self, epoch: u64) -> Self { - self.epoch = Some(epoch); - self - } -} - -fn filter() -> RequestFilter { - RequestFilter::default() -} +/// MinimalEthSpec has 8 slots per epoch +const SLOTS_PER_EPOCH: usize = 8; impl TestRig { - /// Produce a head peer with an advanced head fn add_head_peer(&mut self) -> PeerId { - self.add_head_peer_with_root(Hash256::random()) - } - - /// Produce a head peer with an advanced head - fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId { let local_info = self.local_info(); self.add_supernode_peer(SyncInfo { - head_root, + head_root: Hash256::random(), head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), ..local_info }) } - // Produce a finalized peer with an advanced finalized epoch - fn add_finalized_peer(&mut self) -> PeerId { - self.add_finalized_peer_with_root(Hash256::random()) - } - - // Produce a finalized peer with an advanced finalized epoch - fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId { - let local_info = self.local_info(); - let finalized_epoch = local_info.finalized_epoch + 2; - self.add_supernode_peer(SyncInfo { - finalized_epoch, - finalized_root, - head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), - head_root: Hash256::random(), - earliest_available_slot: None, - }) - } - fn finalized_remote_info_advanced_by(&self, advanced_epochs: Epoch) -> SyncInfo { let local_info = self.local_info(); let finalized_epoch = local_info.finalized_epoch + advanced_epochs; @@ -142,11 +79,7 @@ impl TestRig { } fn add_supernode_peer(&mut self, remote_info: SyncInfo) -> PeerId { - // Create valid peer known to network globals - // TODO(fulu): Using supernode peers to ensure we have peer across all column - // subnets for syncing. Should add tests connecting to full node peers. let peer_id = self.new_connected_supernode_peer(); - // Send peer to sync self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info)); peer_id } @@ -184,450 +117,362 @@ impl TestRig { ) } - #[track_caller] - fn assert_chain_segments(&mut self, count: usize) { - for i in 0..count { - self.pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expect ChainSegment work event count {i}: {e:?}")); - } - } + // -- Setup helpers -- - fn update_execution_engine_state(&mut self, state: EngineState) { - self.log(&format!("execution engine state updated: {state:?}")); - self.sync_manager.update_execution_engine_state(state); + /// Head sync: peers whose finalized root/epoch match ours (known to fork choice), + /// but whose head is ahead. Only head chain is created. + async fn setup_head_sync(&mut self) { + self.build_chain(SLOTS_PER_EPOCH).await; + self.add_head_peer(); + self.assert_state(RangeSyncType::Head); } - fn find_blocks_by_range_request( - &mut self, - request_filter: RequestFilter, - ) -> ((BlocksByRangeRequestId, PeerId), ByRangeDataRequestIds) { - let filter_f = |peer: PeerId, start_slot: u64| { - if let Some(expected_epoch) = request_filter.epoch { - let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); - if epoch != expected_epoch { - return false; - } - } - if let Some(expected_peer) = request_filter.peer - && peer != expected_peer - { - return false; - } - - true - }; + /// Finalized sync: peers whose finalized epoch is advanced and head == finalized start slot. + /// Returns the remote SyncInfo (needed for blacklist tests). + async fn setup_finalized_sync(&mut self) -> SyncInfo { + let advanced_epochs = 5; + self.build_chain(advanced_epochs * SLOTS_PER_EPOCH).await; + let remote_info = self.finalized_remote_info_advanced_by((advanced_epochs as u64).into()); + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info.clone()); + self.assert_state(RangeSyncType::Finalized); + remote_info + } - let block_req = self - .pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id, - request: - RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( - OldBlocksByRangeRequestV2 { start_slot, .. }, - )), - app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), - _ => None, - }) - .unwrap_or_else(|e| { - panic!("Should have a BlocksByRange request, filter {request_filter:?}: {e:?}") - }); - - let by_range_data_requests = if self.is_after_fulu() { - let mut data_columns_requests = vec![]; - while let Ok(data_columns_request) = self.pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id, - request: - RequestType::DataColumnsByRange(DataColumnsByRangeRequest { - start_slot, .. - }), - app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), - _ => None, - }) { - data_columns_requests.push(data_columns_request); - } - if data_columns_requests.is_empty() { - panic!("Found zero DataColumnsByRange requests, filter {request_filter:?}"); - } - ByRangeDataRequestIds::PostPeerDAS(data_columns_requests) - } else if self.is_after_deneb() { - let (id, peer) = self - .pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id, - request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot, .. }), - app_request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), - _ => None, - }) - .unwrap_or_else(|e| { - panic!("Should have a blobs by range request, filter {request_filter:?}: {e:?}") - }); - ByRangeDataRequestIds::PrePeerDAS(id, peer) - } else { - ByRangeDataRequestIds::PreDeneb + /// Finalized-to-head: peers whose finalized is advanced AND head is beyond finalized. + /// After finalized sync completes, head chains are created from awaiting_head_peers. + async fn setup_finalized_and_head_sync(&mut self) { + let finalized_epochs = 5; + let head_epochs = 7; + self.build_chain(head_epochs * SLOTS_PER_EPOCH).await; + let local_info = self.local_info(); + let finalized_epoch = local_info.finalized_epoch + Epoch::new(finalized_epochs as u64); + let head_slot = Slot::new((head_epochs * SLOTS_PER_EPOCH) as u64); + let remote_info = SyncInfo { + finalized_epoch, + finalized_root: Hash256::random(), + head_slot, + head_root: Hash256::random(), + earliest_available_slot: None, }; - - (block_req, by_range_data_requests) + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info); + self.assert_state(RangeSyncType::Finalized); } - fn find_and_complete_blocks_by_range_request( - &mut self, - request_filter: RequestFilter, - ) -> RangeRequestId { - let ((blocks_req_id, block_peer), by_range_data_request_ids) = - self.find_blocks_by_range_request(request_filter); - - // Complete the request with a single stream termination - self.log(&format!( - "Completing BlocksByRange request {blocks_req_id:?} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcBlock { - sync_request_id: SyncRequestId::BlocksByRange(blocks_req_id), - peer_id: block_peer, - beacon_block: None, - seen_timestamp: D, - }); - - match by_range_data_request_ids { - ByRangeDataRequestIds::PreDeneb => {} - ByRangeDataRequestIds::PrePeerDAS(id, peer_id) => { - // Complete the request with a single stream termination - self.log(&format!( - "Completing BlobsByRange request {id:?} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcBlob { - sync_request_id: SyncRequestId::BlobsByRange(id), - peer_id, - blob_sidecar: None, - seen_timestamp: D, - }); - } - ByRangeDataRequestIds::PostPeerDAS(data_column_req_ids) => { - // Complete the request with a single stream termination - for (id, peer_id) in data_column_req_ids { - self.log(&format!( - "Completing DataColumnsByRange request {id:?} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcDataColumn { - sync_request_id: SyncRequestId::DataColumnsByRange(id), - peer_id, - data_column: None, - seen_timestamp: D, - }); - } - } - } - - blocks_req_id.parent_request_id.requester + /// Finalized sync with only 1 fullnode peer (insufficient custody coverage). + /// Returns remote_info to pass to `add_remaining_finalized_peers`. + async fn setup_finalized_sync_insufficient_peers(&mut self) -> SyncInfo { + let advanced_epochs = 5; + self.build_chain(advanced_epochs * SLOTS_PER_EPOCH).await; + let remote_info = self.finalized_remote_info_advanced_by((advanced_epochs as u64).into()); + self.add_fullnode_peer(remote_info.clone()); + self.assert_state(RangeSyncType::Finalized); + remote_info } - fn find_and_complete_processing_chain_segment(&mut self, id: ChainSegmentProcessId) { - self.pop_received_processor_event(|ev| { - (ev.work_type() == WorkType::ChainSegment).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expected chain segment work event: {e}")); - - self.log(&format!( - "Completing ChainSegment processing work {id:?} with success" - )); - self.send_sync_message(SyncMessage::BatchProcessed { - sync_type: id, - result: crate::sync::BatchProcessResult::Success { - sent_blocks: 8, - imported_blocks: 8, - }, - }); + /// Finalized sync where local node already has blocks up to `local_epochs`. + /// Triggers optimistic start: the chain tries to download a batch at the local head + /// epoch concurrently with sequential processing from the start. + async fn setup_finalized_sync_with_local_head(&mut self, local_epochs: usize) { + let target_epochs = local_epochs + 3; // target beyond local head + self.build_chain(target_epochs * SLOTS_PER_EPOCH).await; + self.import_blocks_up_to_slot((local_epochs * SLOTS_PER_EPOCH) as u64) + .await; + let remote_info = self.finalized_remote_info_advanced_by((target_epochs as u64).into()); + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info); + self.assert_state(RangeSyncType::Finalized); } - fn complete_and_process_range_sync_until( - &mut self, - last_epoch: u64, - request_filter: RequestFilter, - ) { - for epoch in 0..last_epoch { - // Note: In this test we can't predict the block peer - let id = - self.find_and_complete_blocks_by_range_request(request_filter.clone().epoch(epoch)); - if let RangeRequestId::RangeSync { batch_id, .. } = id { - assert_eq!(batch_id.as_u64(), epoch, "Unexpected batch_id"); - } else { - panic!("unexpected RangeRequestId {id:?}"); - } - - let id = match id { - RangeRequestId::RangeSync { chain_id, batch_id } => { - ChainSegmentProcessId::RangeBatchId(chain_id, batch_id) - } - RangeRequestId::BackfillSync { batch_id } => { - ChainSegmentProcessId::BackSyncBatchId(batch_id) - } - }; - - self.find_and_complete_processing_chain_segment(id); - if epoch < last_epoch - 1 { - self.assert_state(RangeSyncType::Finalized); - } else { - self.assert_no_chains_exist(); - self.assert_no_failed_chains(); - } - } + /// Add enough peers to cover all custody columns (same chain as insufficient setup) + fn add_remaining_finalized_peers(&mut self, remote_info: SyncInfo) { + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info); } - async fn create_canonical_block(&mut self) -> (SignedBeaconBlock, Option>) { - self.harness.advance_slot(); + // -- Assert helpers -- - let block_root = self - .harness - .extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; + /// Assert range sync completed: chains created and removed, all blocks ingested, + /// finalized epoch advanced, no penalties, no leftover events. + fn assert_range_sync_completed(&mut self) { + self.assert_successful_range_sync(); + self.assert_no_failed_chains(); + assert_eq!( + self.head_slot(), + self.max_known_slot(), + "Head slot should match the last built block (all blocks ingested)" + ); + assert!( + self.finalized_epoch() > types::Epoch::new(0), + "Finalized epoch should have advanced past genesis, got {}", + self.finalized_epoch() + ); + self.assert_no_penalties(); + self.assert_empty_network(); + self.assert_empty_processor(); + } - let store = &self.harness.chain.store; - let block = store.get_full_block(&block_root).unwrap().unwrap(); - let fork = block.fork_name_unchecked(); - - let data_sidecars = if fork.fulu_enabled() { - store - .get_data_columns(&block_root, fork) - .unwrap() - .map(|columns| { - columns - .into_iter() - .map(CustodyDataColumn::from_asserted_custody) - .collect() - }) - .map(DataSidecars::DataColumns) - } else if fork.deneb_enabled() { - store - .get_blobs(&block_root) - .unwrap() - .blobs() - .map(DataSidecars::Blobs) - } else { - None - }; + /// Assert head sync completed (no finalization expected for short ranges) + fn assert_head_sync_completed(&mut self) { + self.assert_successful_range_sync(); + self.assert_no_failed_chains(); + assert_eq!( + self.head_slot(), + self.max_known_slot(), + "Head slot should match the last built block (all blocks ingested)" + ); + self.assert_no_penalties(); + } - (block, data_sidecars) + /// Assert chain was removed and peers received faulty_chain penalty + fn assert_range_sync_chain_failed(&mut self) { + self.assert_no_chains_exist(); + assert!( + self.penalties.iter().any(|p| p.msg == "faulty_chain"), + "Expected faulty_chain penalty, got {:?}", + self.penalties + ); } - async fn remember_block( - &mut self, - (block, data_sidecars): (SignedBeaconBlock, Option>), - ) { - // This code is kind of duplicated from Harness::process_block, but takes sidecars directly. - let block_root = block.canonical_root(); - self.harness.set_current_slot(block.slot()); - let _: SignedBeaconBlockHash = self - .harness - .chain - .process_block( - block_root, - build_range_sync_block(block.into(), &data_sidecars, self.harness.chain.clone()), - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await - .unwrap() - .try_into() - .unwrap(); - self.harness.chain.recompute_head_at_current_slot().await; + /// Assert range sync removed chains (e.g., all peers disconnected) + fn assert_range_sync_chain_removed(&mut self) { + self.assert_no_chains_exist(); } -} -fn build_range_sync_block( - block: Arc>, - data_sidecars: &Option>, - chain: Arc>, -) -> RangeSyncBlock { - match data_sidecars { - Some(DataSidecars::Blobs(blobs)) => { - let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RangeSyncBlock::new( - block, - block_data, - &chain.data_availability_checker, - chain.spec.clone(), - ) - .unwrap() - } - Some(DataSidecars::DataColumns(columns)) => { - let block_data = AvailableBlockData::new_with_data_columns( - columns - .iter() - .map(|c| c.as_data_column().clone()) - .collect::>(), - ); - RangeSyncBlock::new( - block, - block_data, - &chain.data_availability_checker, - chain.spec.clone(), - ) - .unwrap() - } - // Block has no data, expects zero columns - None => RangeSyncBlock::new( - block, - AvailableBlockData::NoData, - &chain.data_availability_checker, - chain.spec.clone(), - ) - .unwrap(), + /// Assert a new peer with a blacklisted root gets disconnected + fn assert_peer_blacklisted(&mut self, remote_info: SyncInfo) { + let new_peer = self.add_supernode_peer(remote_info); + self.pop_received_network_event(|ev| match ev { + NetworkMessage::GoodbyePeer { peer_id, .. } if *peer_id == new_peer => Some(()), + _ => None, + }) + .expect("Peer with blacklisted root should receive Goodbye"); } } -#[test] -fn head_chain_removed_while_finalized_syncing() { - // NOTE: this is a regression test. - // Added in PR https://github.com/sigp/lighthouse/pull/2821 - let mut rig = TestRig::default(); +// ============================================================================================ +// Tests +// ============================================================================================ - // Get a peer with an advanced head - let head_peer = rig.add_head_peer(); - rig.assert_state(RangeSyncType::Head); +/// Head sync: single peer slightly ahead → download batches → all blocks ingested. +#[tokio::test] +async fn head_sync_completes() { + let mut r = TestRig::default(); + r.setup_head_sync().await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_head_sync_completed(); + r.assert_head_slot(SLOTS_PER_EPOCH as u64); +} - // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); +/// Peers with advanced finalized AND head beyond finalized. Finalized sync completes first, +/// then head chains are created from awaiting_head_peers to sync the remaining gap. +#[tokio::test] +async fn finalized_to_head_transition() { + let mut r = TestRig::default(); + r.setup_finalized_and_head_sync().await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); + r.assert_head_slot(7 * SLOTS_PER_EPOCH as u64); +} - // Now get a peer with an advanced finalized epoch. - let finalized_peer = rig.add_finalized_peer(); - rig.assert_state(RangeSyncType::Finalized); +/// Finalized sync happy path: all batches download and process, head advances to target, +/// finalized epoch advances past genesis. +#[tokio::test] +async fn finalized_sync_completes() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); + r.assert_head_slot(5 * SLOTS_PER_EPOCH as u64); +} - // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); +/// First BlocksByRange request gets an RPC error. Batch retries from another peer, +/// sync completes with no penalties (RPC errors are not penalized). +#[tokio::test] +async fn batch_rpc_error_retries() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().return_rpc_error(RPCError::UnsupportedProtocol)) + .await; + r.assert_range_sync_completed(); +} - // Fail the head chain by disconnecting the peer. - rig.peer_disconnected(head_peer); - rig.assert_state(RangeSyncType::Finalized); +/// Peer returns zero blocks for a BlocksByRange request. Batch retries, sync completes. +#[tokio::test] +async fn batch_peer_returns_empty_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_no_range_blocks_n_times(1)) + .await; + r.assert_successful_range_sync(); } +/// Peer returns zero columns for a DataColumnsByRange request. Batch retries, sync completes. +/// Only exercises column logic on fulu+. #[tokio::test] -async fn state_update_while_purging() { - // NOTE: this is a regression test. - // Added in PR https://github.com/sigp/lighthouse/pull/2827 - let mut rig = TestRig::with_custody_type(NodeCustodyType::SemiSupernode); - - // Create blocks on a separate harness - // SemiSupernode ensures enough columns are stored for sampling + custody RPC block validation - let mut rig_2 = TestRig::with_custody_type(NodeCustodyType::SemiSupernode); - // Need to create blocks that can be inserted into the fork-choice and fit the "known - // conditions" below. - let head_peer_block = rig_2.create_canonical_block().await; - let head_peer_root = head_peer_block.0.canonical_root(); - let finalized_peer_block = rig_2.create_canonical_block().await; - let finalized_peer_root = finalized_peer_block.0.canonical_root(); - - // Get a peer with an advanced head - let head_peer = rig.add_head_peer_with_root(head_peer_root); - rig.assert_state(RangeSyncType::Head); - - // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); - - // Now get a peer with an advanced finalized epoch. - let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); - rig.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); - - // Now the chain knows both chains target roots. - rig.remember_block(head_peer_block).await; - rig.remember_block(finalized_peer_block).await; - - // Add an additional peer to the second chain to make range update it's status - rig.add_finalized_peer(); +async fn batch_peer_returns_no_columns_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_no_range_columns_n_times(1)) + .await; + r.assert_successful_range_sync(); } -#[test] -fn pause_and_resume_on_ee_offline() { - let mut rig = TestRig::default(); - - // add some peers - let peer1 = rig.add_head_peer(); - // make the ee offline - rig.update_execution_engine_state(EngineState::Offline); - // send the response to the request - rig.find_and_complete_blocks_by_range_request(filter().peer(peer1).epoch(0)); - // the beacon processor shouldn't have received any work - rig.assert_empty_processor(); - - // while the ee is offline, more peers might arrive. Add a new finalized peer. - let _peer2 = rig.add_finalized_peer(); - - // send the response to the request - // Don't filter requests and the columns requests may be sent to peer1 or peer2 - // We need to filter by epoch, because the previous batch eagerly sent requests for the next - // epoch for the other batch. So we can either filter by epoch of by sync type. - rig.find_and_complete_blocks_by_range_request(filter().epoch(0)); - // the beacon processor shouldn't have received any work - rig.assert_empty_processor(); - // make the beacon processor available again. - // update_execution_engine_state implicitly calls resume - // now resume range, we should have two processing requests in the beacon processor. - rig.update_execution_engine_state(EngineState::Online); - - // The head chain and finalized chain (2) should be in the processing queue - rig.assert_chain_segments(2); +/// Peer returns columns with indices it wasn't asked for → UnrequestedIndex verify error. +/// Batch retries from another peer, sync completes. +#[tokio::test] +async fn batch_peer_returns_wrong_column_indices_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_wrong_range_column_indices_n_times(1)) + .await; + r.assert_successful_range_sync(); } -/// To attempt to finalize the peer's status finalized checkpoint we synced to its finalized epoch + -/// 2 epochs + 1 slot. -const EXTRA_SYNCED_EPOCHS: u64 = 2 + 1; +/// Peer returns columns from a slot outside the requested range → UnrequestedSlot verify error. +/// Batch retries from another peer, sync completes. +#[tokio::test] +async fn batch_peer_returns_wrong_column_slots_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_wrong_range_column_slots_n_times(1)) + .await; + r.assert_successful_range_sync(); +} -#[test] -fn finalized_sync_enough_global_custody_peers_few_chain_peers() { - // Run for all forks +/// PeerDAS: peer returns only half the requested columns. Block-sidecar coupling detects +/// missing columns → CouplingError::DataColumnPeerFailure → retry_partial_batch from other peers. +#[tokio::test] +async fn batch_peer_returns_partial_columns_then_succeeds() { let mut r = TestRig::default(); + if !r.fork_name.fulu_enabled() { + return; + } + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_partial_range_columns_n_times(1)) + .await; + r.assert_successful_range_sync(); +} - let advanced_epochs: u64 = 2; - let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into()); +/// Batch processing returns NonFaultyFailure (e.g. transient error). Batch goes back to +/// AwaitingDownload, retries without penalty, sync completes. +#[tokio::test] +async fn batch_non_faulty_failure_retries() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_non_faulty_failures(1)) + .await; + r.assert_range_sync_completed(); +} - // Generate enough peers and supernodes to cover all custody columns - let peer_count = 100; - r.add_fullnode_peers(remote_info.clone(), peer_count); - r.add_supernode_peer(remote_info); - r.assert_state(RangeSyncType::Finalized); +/// Batch processing returns FaultyFailure once. Peer penalized with "faulty_batch", +/// batch redownloaded from a different peer, sync completes. +#[tokio::test] +async fn batch_faulty_failure_redownloads() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_faulty_failures(1)) + .await; + r.assert_successful_range_sync(); + r.assert_penalties_of_type("faulty_batch"); +} + +/// Batch processing fails MAX_BATCH_PROCESSING_ATTEMPTS (3) times with FaultyFailure. +/// Chain removed, all peers penalized with "faulty_chain". +#[tokio::test] +async fn batch_max_failures_removes_chain() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_faulty_failures(3)) + .await; + r.assert_range_sync_chain_failed(); +} - let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; - r.complete_and_process_range_sync_until(last_epoch, filter()); +/// Chain fails via max faulty retries → finalized root added to failed_chains LRU. +/// A new peer advertising the same finalized root gets disconnected with GoodbyeReason. +#[tokio::test] +async fn failed_chain_blacklisted() { + let mut r = TestRig::default(); + let remote_info = r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_faulty_failures(3)) + .await; + r.assert_range_sync_chain_failed(); + r.assert_peer_blacklisted(remote_info); } -#[test] -fn finalized_sync_not_enough_custody_peers_on_start() { +/// All peers disconnect before any request is fulfilled → chain removed (EmptyPeerPool). +#[tokio::test] +async fn all_peers_disconnect_removes_chain() { let mut r = TestRig::default(); - // Only run post-PeerDAS - if !r.fork_name.fulu_enabled() { - return; - } + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_disconnect_after_range_requests(0)) + .await; + r.assert_range_sync_chain_removed(); +} - let advanced_epochs: u64 = 2; - let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into()); +/// Peers disconnect after 1 request is served. Remaining in-flight responses arrive +/// for a chain that no longer exists — verified as a no-op (no crash). +#[tokio::test] +async fn late_response_for_removed_chain() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_disconnect_after_range_requests(1)) + .await; + r.assert_range_sync_chain_removed(); +} - // Unikely that the single peer we added has enough columns for us. Tests are deterministic and - // this error should never be hit - r.add_fullnode_peer(remote_info.clone()); - r.assert_state(RangeSyncType::Finalized); +/// Execution engine goes offline at sync start. Batch responses complete but processing +/// is paused. After 2 responses, EE comes back online, queued batches process, sync completes. +#[tokio::test] +async fn ee_offline_then_online_resumes_sync() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_ee_offline_for_n_range_responses(2)) + .await; + r.assert_range_sync_completed(); +} - // Because we don't have enough peers on all columns we haven't sent any request. - // NOTE: There's a small chance that this single peer happens to custody exactly the set we - // expect, in that case the test will fail. Find a way to make the test deterministic. - r.assert_empty_network(); +/// Local node already has blocks up to epoch 3. Finalized sync starts targeting epoch 6. +/// The chain uses optimistic start: downloads a batch at the local head epoch concurrently +/// with sequential processing from the start. All blocks ingested. +#[tokio::test] +async fn finalized_sync_with_local_head_partial() { + let mut r = TestRig::default(); + r.setup_finalized_sync_with_local_head(3).await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); +} - // Generate enough peers and supernodes to cover all custody columns - let peer_count = 100; - r.add_fullnode_peers(remote_info.clone(), peer_count); +/// Local node has all blocks except the last one. Finalized sync only needs to fill the +/// final gap. Tests optimistic start where local head is near the target. +#[tokio::test] +async fn finalized_sync_with_local_head_near_target() { + let mut r = TestRig::default(); + let target_epochs = 5; + let local_slots = (target_epochs * SLOTS_PER_EPOCH) - 1; // all blocks except last + r.build_chain(target_epochs * SLOTS_PER_EPOCH).await; + r.import_blocks_up_to_slot(local_slots as u64).await; + let remote_info = r.finalized_remote_info_advanced_by((target_epochs as u64).into()); + r.add_fullnode_peers(remote_info.clone(), 100); r.add_supernode_peer(remote_info); + r.assert_state(RangeSyncType::Finalized); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); + r.assert_head_slot((target_epochs * SLOTS_PER_EPOCH) as u64); +} - let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; - r.complete_and_process_range_sync_until(last_epoch, filter()); +/// PeerDAS only: single fullnode peer doesn't cover all custody columns → no requests sent. +/// Once enough fullnodes + a supernode arrive, sync proceeds and completes. +#[tokio::test] +async fn not_enough_custody_peers_then_peers_arrive() { + let mut r = TestRig::default(); + if !r.fork_name.fulu_enabled() { + return; + } + let remote_info = r.setup_finalized_sync_insufficient_peers().await; + r.assert_empty_network(); + r.add_remaining_finalized_peers(remote_info); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); } diff --git a/scripts/range-sync-coverage.sh b/scripts/range-sync-coverage.sh new file mode 100755 index 00000000000..df438c0c7f5 --- /dev/null +++ b/scripts/range-sync-coverage.sh @@ -0,0 +1,136 @@ +#!/bin/bash +# Aggregate range sync test coverage across all forks +# Usage: ./scripts/range-sync-coverage.sh [--html] +set -e + +REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)" +cd "$REPO_ROOT" + +TARGET_DIR="${CARGO_TARGET_DIR:-/mnt/ssd/builds/lighthouse-range-sync-tests}" +FORKS=(base altair bellatrix capella deneb electra fulu) +LCOV_DIR="/tmp/range-cov-forks" +MERGED="/tmp/range-cov-merged.lcov" + +rm -rf "$LCOV_DIR" +mkdir -p "$LCOV_DIR" + +echo "=== Running coverage for each fork ===" +for fork in "${FORKS[@]}"; do + echo "--- $fork ---" + CARGO_TARGET_DIR="$TARGET_DIR" FORK_NAME="$fork" \ + cargo llvm-cov --features "network/fake_crypto,network/fork_from_env" \ + -p network --lib --lcov --output-path "$LCOV_DIR/$fork.lcov" \ + -- "sync::tests::range" 2>&1 | grep -E "test result|running" +done + +echo "" +echo "=== Merging lcov files ===" + +# Merge all lcov files: for each source file, take max hit count per line +python3 - "$LCOV_DIR" "$MERGED" << 'PYEOF' +import sys, os, glob +from collections import defaultdict + +lcov_dir = sys.argv[1] +output = sys.argv[2] + +# Parse all lcov files: file -> line -> max hits +coverage = defaultdict(lambda: defaultdict(int)) +fn_coverage = defaultdict(lambda: defaultdict(int)) +current_sf = None + +for lcov_file in sorted(glob.glob(os.path.join(lcov_dir, "*.lcov"))): + with open(lcov_file) as f: + for line in f: + line = line.strip() + if line.startswith("SF:"): + current_sf = line[3:] + elif line.startswith("DA:") and current_sf: + parts = line[3:].split(",") + lineno = int(parts[0]) + hits = int(parts[1]) + coverage[current_sf][lineno] = max(coverage[current_sf][lineno], hits) + elif line.startswith("FNDA:") and current_sf: + parts = line[5:].split(",", 1) + hits = int(parts[0]) + fn_name = parts[1] + fn_coverage[current_sf][fn_name] = max(fn_coverage[current_sf][fn_name], hits) + +# Write merged lcov +with open(output, "w") as f: + for sf in sorted(coverage.keys()): + f.write(f"SF:{sf}\n") + for fn_name, hits in sorted(fn_coverage.get(sf, {}).items()): + f.write(f"FNDA:{hits},{fn_name}\n") + for lineno in sorted(coverage[sf].keys()): + f.write(f"DA:{lineno},{coverage[sf][lineno]}\n") + total = len(coverage[sf]) + covered = sum(1 for h in coverage[sf].values() if h > 0) + f.write(f"LH:{covered}\n") + f.write(f"LF:{total}\n") + f.write("end_of_record\n") + +print(f"Merged {len(glob.glob(os.path.join(lcov_dir, '*.lcov')))} lcov files -> {output}") +PYEOF + +echo "" +echo "=== Range sync coverage (merged across all forks) ===" + +# Extract and display range sync files +python3 - "$MERGED" << 'PYEOF' +import sys +from collections import defaultdict + +current_sf = None +files = {} # short_name -> (total_lines, covered_lines) +lines = defaultdict(dict) + +with open(sys.argv[1]) as f: + for line in f: + line = line.strip() + if line.startswith("SF:"): + current_sf = line[3:] + elif line.startswith("DA:") and current_sf: + parts = line[3:].split(",") + lineno, hits = int(parts[0]), int(parts[1]) + lines[current_sf][lineno] = hits + +# Filter to range sync files +targets = [ + "range_sync/chain.rs", + "range_sync/chain_collection.rs", + "range_sync/range.rs", + "requests/blocks_by_range.rs", + "requests/blobs_by_range.rs", + "requests/data_columns_by_range.rs", +] + +print(f"{'File':<45} {'Lines':>6} {'Covered':>8} {'Missed':>7} {'Coverage':>9}") +print("-" * 80) + +total_all = 0 +covered_all = 0 + +for sf in sorted(lines.keys()): + short = sf.split("sync/")[-1] if "sync/" in sf else sf.split("/")[-1] + if not any(t in sf for t in targets): + continue + total = len(lines[sf]) + covered = sum(1 for h in lines[sf].values() if h > 0) + missed = total - covered + pct = covered / total * 100 if total > 0 else 0 + total_all += total + covered_all += covered + print(f"{short:<45} {total:>6} {covered:>8} {missed:>7} {pct:>8.1f}%") + +print("-" * 80) +pct_all = covered_all / total_all * 100 if total_all > 0 else 0 +print(f"{'TOTAL':<45} {total_all:>6} {covered_all:>8} {total_all - covered_all:>7} {pct_all:>8.1f}%") +PYEOF + +if [ "$1" = "--html" ]; then + echo "" + echo "=== Generating HTML report ===" + genhtml "$MERGED" -o /tmp/range-cov-html --ignore-errors source 2>/dev/null + echo "HTML report: /tmp/range-cov-html/index.html" +fi From cd60ea80bb3383843d703443901dbb834dd47193 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 16:59:36 +1100 Subject: [PATCH 061/127] Update to spec v1.7.0-alpha.4 (#9046) Update our consensus code to v1.7.0-alpha.4 Co-Authored-By: Michael Sproul --- .../src/per_epoch_processing/altair.rs | 11 ++- .../src/per_epoch_processing/single_pass.rs | 76 +++++++++++++++++- .../state_processing/src/upgrade/gloas.rs | 43 +++++++++- consensus/types/src/core/eth_spec.rs | 20 ++++- consensus/types/src/state/beacon_state.rs | 79 ++++++++++++++++++- consensus/types/src/state/committee_cache.rs | 37 ++++++++- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 6 +- .../ef_tests/src/cases/epoch_processing.rs | 19 ++++- testing/ef_tests/src/cases/operations.rs | 6 +- testing/ef_tests/src/lib.rs | 2 +- testing/ef_tests/tests/tests.rs | 6 ++ 12 files changed, 279 insertions(+), 28 deletions(-) diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index d9e69647304..683d92d836f 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -51,8 +51,8 @@ pub fn process_epoch( // without loss of correctness. let current_epoch_progressive_balances = state.progressive_balances_cache().clone(); let current_epoch_total_active_balance = state.get_total_active_balance()?; - let participation_summary = - process_epoch_single_pass(state, spec, SinglePassConfig::default())?; + let epoch_result = process_epoch_single_pass(state, spec, SinglePassConfig::default())?; + let participation_summary = epoch_result.summary; // Reset eth1 data votes. process_eth1_data_reset(state)?; @@ -79,6 +79,13 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches()?; + + // Install the lookahead committee cache (built during PTC window processing) as the Next + // cache. After advance_caches, the lookahead epoch becomes the Next relative epoch. + if let Some(cache) = epoch_result.lookahead_committee_cache { + state.set_committee_cache(RelativeEpoch::Next, cache)?; + } + update_progressive_balances_on_epoch_transition(state, spec)?; Ok(EpochProcessingSummary::Altair { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 4eb1e36628b..976607aa764 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -12,12 +12,13 @@ use milhouse::{Cow, List, Vector}; use safe_arith::{SafeArith, SafeArithIter}; use std::cmp::{max, min}; use std::collections::{BTreeSet, HashMap}; +use std::sync::Arc; use tracing::instrument; use typenum::Unsigned; use types::{ ActivationQueue, BeaconState, BeaconStateError, BuilderPendingPayment, ChainSpec, Checkpoint, - DepositData, Epoch, EthSpec, ExitCache, ForkName, ParticipationFlags, PendingDeposit, - ProgressiveBalancesCache, RelativeEpoch, Validator, + CommitteeCache, DepositData, Epoch, EthSpec, ExitCache, ForkName, ParticipationFlags, + PendingDeposit, ProgressiveBalancesCache, RelativeEpoch, Validator, consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, @@ -34,6 +35,7 @@ pub struct SinglePassConfig { pub effective_balance_updates: bool, pub proposer_lookahead: bool, pub builder_pending_payments: bool, + pub ptc_window: bool, } impl Default for SinglePassConfig { @@ -54,6 +56,7 @@ impl SinglePassConfig { effective_balance_updates: true, proposer_lookahead: true, builder_pending_payments: true, + ptc_window: true, } } @@ -68,6 +71,7 @@ impl SinglePassConfig { effective_balance_updates: false, proposer_lookahead: false, builder_pending_payments: false, + ptc_window: false, } } } @@ -139,12 +143,20 @@ impl ValidatorInfo { } } +/// Result of single-pass epoch processing. +pub struct SinglePassEpochResult { + pub summary: ParticipationEpochSummary, + /// Committee cache for the lookahead epoch, built during PTC window processing. + /// Can be installed as the Next committee cache after `advance_caches`. + pub lookahead_committee_cache: Option>, +} + #[instrument(skip_all)] pub fn process_epoch_single_pass( state: &mut BeaconState, spec: &ChainSpec, conf: SinglePassConfig, -) -> Result, Error> { +) -> Result, Error> { initialize_epoch_cache(state, spec)?; initialize_progressive_balances_cache(state, spec)?; state.build_exit_cache(spec)?; @@ -479,7 +491,16 @@ pub fn process_epoch_single_pass( process_proposer_lookahead(state, spec)?; } - Ok(summary) + let lookahead_committee_cache = if conf.ptc_window && fork_name.gloas_enabled() { + Some(process_ptc_window(state, spec)?) + } else { + None + }; + + Ok(SinglePassEpochResult { + summary, + lookahead_committee_cache, + }) } // TOOO(EIP-7917): use balances cache @@ -512,6 +533,53 @@ pub fn process_proposer_lookahead( Ok(()) } +/// Process the PTC window, returning the committee cache built for the lookahead epoch. +/// +/// The returned cache can be injected into the state's Next committee cache slot after +/// `advance_caches` is called during the epoch transition, avoiding redundant recomputation. +pub fn process_ptc_window( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + let slots_per_epoch = E::slots_per_epoch() as usize; + + // Convert Vector -> List to use tree-efficient pop_front. + let ptc_window = state.ptc_window()?.clone(); + let mut window: List<_, E::PtcWindowLength> = List::from(ptc_window); + + // Drop the oldest epoch from the front (reuses shared tree nodes). + window + .pop_front(slots_per_epoch) + .map_err(|e| Error::BeaconStateError(BeaconStateError::MilhouseError(e)))?; + + // Compute PTC for the new lookahead epoch + let next_epoch = state + .current_epoch() + .safe_add(spec.min_seed_lookahead.as_u64())? + .safe_add(1)?; + let start_slot = next_epoch.start_slot(E::slots_per_epoch()); + + // Build a committee cache for the lookahead epoch (beyond the normal Next bound) + let committee_cache = state.initialize_committee_cache_for_lookahead(next_epoch, spec)?; + + for i in 0..slots_per_epoch { + let slot = start_slot.safe_add(i as u64)?; + let ptc = state.compute_ptc_with_cache(slot, &committee_cache, spec)?; + let ptc_u64: Vec = ptc.into_iter().map(|v| v as u64).collect(); + let entry = ssz_types::FixedVector::new(ptc_u64) + .map_err(|e| Error::BeaconStateError(BeaconStateError::SszTypesError(e)))?; + window + .push(entry) + .map_err(|e| Error::BeaconStateError(BeaconStateError::MilhouseError(e)))?; + } + + // Convert List back to Vector. + *state.ptc_window_mut()? = Vector::try_from(window) + .map_err(|e| Error::BeaconStateError(BeaconStateError::MilhouseError(e)))?; + + Ok(committee_cache) +} + /// Calculate the quorum threshold for builder payments based on total active balance. fn get_builder_payment_quorum_threshold( state_ctxt: &StateContext, diff --git a/consensus/state_processing/src/upgrade/gloas.rs b/consensus/state_processing/src/upgrade/gloas.rs index 7a88383ab0d..b39ee6048f7 100644 --- a/consensus/state_processing/src/upgrade/gloas.rs +++ b/consensus/state_processing/src/upgrade/gloas.rs @@ -2,7 +2,9 @@ use crate::per_block_processing::{ is_valid_deposit_signature, process_operations::apply_deposit_for_builder, }; use milhouse::{List, Vector}; +use safe_arith::SafeArith; use ssz_types::BitVector; +use ssz_types::FixedVector; use std::collections::HashSet; use std::mem; use typenum::Unsigned; @@ -102,13 +104,11 @@ pub fn upgrade_state_to_gloas( vec![0xFFu8; E::SlotsPerHistoricalRoot::to_usize() / 8].into(), ) .map_err(|_| Error::InvalidBitfield)?, - builder_pending_payments: Vector::new(vec![ - BuilderPendingPayment::default(); - E::builder_pending_payments_limit() - ])?, + builder_pending_payments: Vector::from_elem(BuilderPendingPayment::default())?, builder_pending_withdrawals: List::default(), // Empty list initially, latest_block_hash: pre.latest_execution_payload_header.block_hash, payload_expected_withdrawals: List::default(), + ptc_window: Vector::from_elem(FixedVector::from_elem(0))?, // placeholder, will be initialized below // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), @@ -120,10 +120,45 @@ pub fn upgrade_state_to_gloas( }); // [New in Gloas:EIP7732] onboard_builders_from_pending_deposits(&mut post, spec)?; + initialize_ptc_window(&mut post, spec)?; Ok(post) } +/// Initialize the `ptc_window` field in the beacon state at fork transition. +/// +/// The window contains: +/// - One epoch of empty entries (previous epoch) +/// - Computed PTC for the current epoch through `1 + MIN_SEED_LOOKAHEAD` epochs +fn initialize_ptc_window( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let slots_per_epoch = E::slots_per_epoch() as usize; + + let empty_previous_epoch = vec![FixedVector::::from_elem(0); slots_per_epoch]; + let mut ptcs = empty_previous_epoch; + + // Compute PTC for current epoch + lookahead epochs + let current_epoch = state.current_epoch(); + for e in 0..=spec.min_seed_lookahead.as_u64() { + let epoch = current_epoch.safe_add(e)?; + let committee_cache = state.initialize_committee_cache_for_lookahead(epoch, spec)?; + let start_slot = epoch.start_slot(E::slots_per_epoch()); + for i in 0..slots_per_epoch { + let slot = start_slot.safe_add(i as u64)?; + let ptc = state.compute_ptc_with_cache(slot, &committee_cache, spec)?; + let ptc_u64: Vec = ptc.into_iter().map(|v| v as u64).collect(); + let entry = FixedVector::new(ptc_u64)?; + ptcs.push(entry); + } + } + + *state.ptc_window_mut()? = Vector::new(ptcs)?; + + Ok(()) +} + /// Applies any pending deposit for builders, effectively onboarding builders at the fork. fn onboard_builders_from_pending_deposits( state: &mut BeaconState, diff --git a/consensus/types/src/core/eth_spec.rs b/consensus/types/src/core/eth_spec.rs index a4b22da3f88..36d61fbbf9b 100644 --- a/consensus/types/src/core/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -6,9 +6,9 @@ use std::{ use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use typenum::{ - U0, U1, U2, U4, U8, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, U8192, - U16384, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, - U1099511627776, UInt, Unsigned, bit::B0, + U0, U1, U2, U4, U8, U16, U17, U24, U32, U48, U64, U96, U128, U256, U512, U625, U1024, U2048, + U4096, U8192, U16384, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, + U1073741824, U1099511627776, UInt, Unsigned, bit::B0, }; use crate::core::{ChainSpec, Epoch}; @@ -176,6 +176,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + * New in Gloas */ type PTCSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PtcWindowLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxPayloadAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BuilderPendingPaymentsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BuilderPendingWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -428,6 +429,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::PTCSize::to_usize() } + /// Returns the `PtcWindowLength` constant for this specification. + fn ptc_window_length() -> usize { + Self::PtcWindowLength::to_usize() + } + /// Returns the `MaxPayloadAttestations` constant for this specification. fn max_payload_attestations() -> usize { Self::MaxPayloadAttestations::to_usize() @@ -515,6 +521,7 @@ impl EthSpec for MainnetEthSpec { type MaxWithdrawalRequestsPerPayload = U16; type MaxPendingDepositsPerEpoch = U16; type PTCSize = U512; + type PtcWindowLength = U96; // (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH type MaxPayloadAttestations = U4; type MaxBuildersPerWithdrawalsSweep = U16384; @@ -561,6 +568,7 @@ impl EthSpec for MinimalEthSpec { type ProposerLookaheadSlots = U16; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH type BuilderPendingPaymentsLimit = U16; // 2 * SLOTS_PER_EPOCH = 2 * 8 = 16 type PTCSize = U2; + type PtcWindowLength = U24; // (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH type MaxBuildersPerWithdrawalsSweep = U16; params_from_eth_spec!(MainnetEthSpec { @@ -668,6 +676,7 @@ impl EthSpec for GnosisEthSpec { type ProposerLookaheadSlots = U32; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH type BuilderRegistryLimit = U1099511627776; type PTCSize = U512; + type PtcWindowLength = U48; // (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH type MaxPayloadAttestations = U2; type MaxBuildersPerWithdrawalsSweep = U16384; @@ -694,6 +703,11 @@ mod test { E::proposer_lookahead_slots(), (spec.min_seed_lookahead.as_usize() + 1) * E::slots_per_epoch() as usize ); + assert_eq!( + E::ptc_window_length(), + (spec.min_seed_lookahead.as_usize() + 2) * E::slots_per_epoch() as usize, + "PtcWindowLength must equal (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH" + ); } #[test] diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index f431055c5fe..a033272b9d9 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -667,6 +667,11 @@ where #[superstruct(only(Gloas))] pub payload_expected_withdrawals: List, + #[compare_fields(as_iter)] + #[test_random(default)] + #[superstruct(only(Gloas))] + pub ptc_window: Vector, E::PtcWindowLength>, + // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -2431,6 +2436,18 @@ impl BeaconState { CommitteeCache::initialized(self, epoch, spec) } + /// Like [`initialize_committee_cache`](Self::initialize_committee_cache), but allows epochs + /// beyond `current_epoch + 1`. Only checks that the required randao seed is available. + /// + /// Used by PTC window computation which needs shufflings for lookahead epochs. + pub fn initialize_committee_cache_for_lookahead( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { + CommitteeCache::initialized_for_lookahead(self, epoch, spec) + } + /// Advances the cache for this state into the next epoch. /// /// This should be used if the `slot` of this state is advanced beyond an epoch boundary. @@ -2501,6 +2518,17 @@ impl BeaconState { .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } + /// Set the committee cache for the given `relative_epoch` to `cache`. + pub fn set_committee_cache( + &mut self, + relative_epoch: RelativeEpoch, + cache: Arc, + ) -> Result<(), BeaconStateError> { + let i = Self::committee_cache_index(relative_epoch); + *self.committee_cache_at_index_mut(i)? = cache; + Ok(()) + } + /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. pub fn committee_cache( @@ -3084,12 +3112,55 @@ impl BeaconState { } } - /// Get the payload timeliness committee for the given `slot`. - /// - /// Requires the committee cache to be initialized. - /// TODO(EIP-7732): definitely gonna have to cache this.. + /// Get the payload timeliness committee for the given `slot` from the `ptc_window`. pub fn get_ptc(&self, slot: Slot, spec: &ChainSpec) -> Result, BeaconStateError> { + let ptc_window = self.ptc_window()?; + let epoch = slot.epoch(E::slots_per_epoch()); + let state_epoch = self.current_epoch(); + let slots_per_epoch = E::slots_per_epoch() as usize; + let slot_in_epoch = slot.as_usize().safe_rem(slots_per_epoch)?; + + let index = if epoch < state_epoch { + if epoch.safe_add(1)? != state_epoch { + return Err(BeaconStateError::SlotOutOfBounds); + } + slot_in_epoch + } else { + if epoch > state_epoch.safe_add(spec.min_seed_lookahead)? { + return Err(BeaconStateError::SlotOutOfBounds); + } + let offset = epoch + .safe_sub(state_epoch)? + .safe_add(1)? + .as_usize() + .safe_mul(slots_per_epoch)?; + offset.safe_add(slot_in_epoch)? + }; + + let entry = ptc_window + .get(index) + .ok_or(BeaconStateError::SlotOutOfBounds)?; + + // Convert from FixedVector to PTC (FixedVector) + let indices: Vec = entry.iter().map(|&v| v as usize).collect(); + Ok(PTC(FixedVector::new(indices)?)) + } + + /// Compute the payload timeliness committee for the given `slot` from scratch. + /// + /// Requires the committee cache to be initialized for the slot's epoch. + pub fn compute_ptc(&self, slot: Slot, spec: &ChainSpec) -> Result, BeaconStateError> { let committee_cache = self.committee_cache_at_slot(slot)?; + self.compute_ptc_with_cache(slot, committee_cache, spec) + } + + /// Compute the PTC for a slot using a specific committee cache. + pub fn compute_ptc_with_cache( + &self, + slot: Slot, + committee_cache: &CommitteeCache, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let committees = committee_cache.get_beacon_committees_at_slot(slot)?; let seed = self.get_ptc_attester_seed(slot, spec)?; diff --git a/consensus/types/src/state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs index 4a28f3c6892..2e74ab760cb 100644 --- a/consensus/types/src/state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -62,6 +62,9 @@ fn compare_shuffling_positions(xs: &Vec, ys: &Vec( state: &BeaconState, @@ -81,12 +84,44 @@ impl CommitteeCache { || epoch > state .current_epoch() - .safe_add(1) + .safe_add(1u64) .map_err(BeaconStateError::ArithError)? { return Err(BeaconStateError::EpochOutOfBounds); } + Self::initialized_unchecked(state, epoch, spec) + } + + /// Return a new, fully initialized cache for a lookahead epoch. + /// + /// Like [`initialized`](Self::initialized), but allows epochs beyond `current_epoch + 1`. + /// The only bound enforced is that the required randao seed is available in the state. + /// + /// This is used by PTC window computation, which needs committee shufflings for + /// `current_epoch + 1 + MIN_SEED_LOOKAHEAD`. + pub fn initialized_for_lookahead( + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { + let reqd_randao_epoch = epoch + .saturating_sub(spec.min_seed_lookahead) + .saturating_sub(1u64); + + if reqd_randao_epoch < state.min_randao_epoch() { + return Err(BeaconStateError::EpochOutOfBounds); + } + + Self::initialized_unchecked(state, epoch, spec) + } + + /// Core committee cache construction. Callers are responsible for bounds-checking `epoch`. + fn initialized_unchecked( + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { // May cause divide-by-zero errors. if E::slots_per_epoch() == 0 { return Err(BeaconStateError::ZeroSlotsPerEpoch); diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 48378a4c958..ab24ea35a04 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.3 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.4 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index dd6be143067..2daafada31c 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -53,6 +53,8 @@ "tests/.*/gloas/fork_choice/.*", # Ignore MatrixEntry SSZ tests for now. "tests/.*/.*/ssz_static/MatrixEntry/.*", + # TODO: partial data column not implemented yet + "tests/.*/.*/ssz_static/PartialDataColumn.*/.*", # TODO(gloas): Ignore Gloas light client stuff for now "tests/.*/gloas/ssz_static/LightClient.*/.*", # Execution payload header is irrelevant after Gloas, this type will probably be deleted. @@ -73,7 +75,9 @@ "tests/.*/compute_verify_cell_kzg_proof_batch_challenge/.*", "tests/.*/compute_challenge/.*", # We don't need these manifest files at the moment. - "tests/.*/manifest.yaml" + "tests/.*/manifest.yaml", + # TODO: gossip condition tests not implemented yet + "tests/.*/.*/networking/.*" ] diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 7a90fc70d0b..a032aa917fe 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -12,7 +12,7 @@ use state_processing::per_epoch_processing::effective_balance_updates::{ process_effective_balance_updates, process_effective_balance_updates_slow, }; use state_processing::per_epoch_processing::single_pass::{ - SinglePassConfig, process_epoch_single_pass, process_proposer_lookahead, + SinglePassConfig, process_epoch_single_pass, process_proposer_lookahead, process_ptc_window, }; use state_processing::per_epoch_processing::{ altair, base, @@ -80,6 +80,8 @@ pub struct ParticipationFlagUpdates; #[derive(Debug)] pub struct ProposerLookahead; #[derive(Debug)] +pub struct PtcWindow; +#[derive(Debug)] pub struct BuilderPendingPayments; type_name!( @@ -102,6 +104,7 @@ type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); type_name!(ParticipationFlagUpdates, "participation_flag_updates"); type_name!(ProposerLookahead, "proposer_lookahead"); +type_name!(PtcWindow, "ptc_window"); type_name!(BuilderPendingPayments, "builder_pending_payments"); impl EpochTransition for JustificationAndFinalization { @@ -296,6 +299,16 @@ impl EpochTransition for ProposerLookahead { } } +impl EpochTransition for PtcWindow { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + if state.fork_name_unchecked().gloas_enabled() { + process_ptc_window(state, spec).map(|_| ()) + } else { + Ok(()) + } + } +} + impl EpochTransition for BuilderPendingPayments { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { process_epoch_single_pass( @@ -373,7 +386,9 @@ impl> Case for EpochProcessing { return false; } - if !fork_name.gloas_enabled() && T::name() == "builder_pending_payments" { + if !fork_name.gloas_enabled() + && (T::name() == "builder_pending_payments" || T::name() == "ptc_window") + { return false; } diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 798c66b6666..1399815763b 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -717,11 +717,7 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. let operation_path = path.join(O::filename()); - let (operation, bls_error) = if !operation_path.is_file() { - // Some test cases (e.g. builder_voluntary_exit__success) have no operation file. - // TODO(gloas): remove this once the test vectors are fixed - (None, None) - } else if metadata.bls_setting.unwrap_or_default().check().is_ok() { + let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { match O::decode(&operation_path, fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 94b19b66446..5587bbed413 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -3,7 +3,7 @@ pub use cases::{ BuilderPendingPayments, Case, EffectiveBalanceUpdates, Eth1DataReset, ExecutionPayloadBidBlock, FeatureName, HistoricalRootsUpdate, HistoricalSummariesUpdate, InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, RandaoMixesReset, + PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, PtcWindow, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, WithdrawalsPayload, }; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index cb4abed90ab..62eb2dd038e 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -960,6 +960,12 @@ fn epoch_processing_proposer_lookahead() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_ptc_window() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn epoch_processing_builder_pending_payments() { EpochProcessingHandler::::default().run(); From 7fe9da0043c9ad3b0b9392d08776a9a9d001f8b9 Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Tue, 31 Mar 2026 08:16:34 +0200 Subject: [PATCH 062/127] Add Gloas SSE event boilerplate (#9053) Implement boilerplate for new SSE events as specified in - https://github.com/ethereum/beacon-APIs/pull/588 While that one is not merged yet, I believe the SSE events might be utilized in Dora already. Implement the boilerplate, i.e. subscription tracking and publish queues. A PR to implement to fully implement already implementable events will follow. Co-Authored-By: Daniel Knopik --- beacon_node/beacon_chain/src/events.rs | 75 +++++++++++++++++++++ beacon_node/http_api/src/lib.rs | 15 +++++ common/eth2/src/types.rs | 92 ++++++++++++++++++++++++++ 3 files changed, 182 insertions(+) diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 276edc3fe6f..80667cd3991 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -25,6 +25,11 @@ pub struct ServerSentEventHandler { attester_slashing_tx: Sender>, bls_to_execution_change_tx: Sender>, block_gossip_tx: Sender>, + execution_payload_tx: Sender>, + execution_payload_gossip_tx: Sender>, + execution_payload_available_tx: Sender>, + execution_payload_bid_tx: Sender>, + payload_attestation_message_tx: Sender>, } impl ServerSentEventHandler { @@ -51,6 +56,11 @@ impl ServerSentEventHandler { let (attester_slashing_tx, _) = broadcast::channel(capacity); let (bls_to_execution_change_tx, _) = broadcast::channel(capacity); let (block_gossip_tx, _) = broadcast::channel(capacity); + let (execution_payload_tx, _) = broadcast::channel(capacity); + let (execution_payload_gossip_tx, _) = broadcast::channel(capacity); + let (execution_payload_available_tx, _) = broadcast::channel(capacity); + let (execution_payload_bid_tx, _) = broadcast::channel(capacity); + let (payload_attestation_message_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -71,6 +81,11 @@ impl ServerSentEventHandler { attester_slashing_tx, bls_to_execution_change_tx, block_gossip_tx, + execution_payload_tx, + execution_payload_gossip_tx, + execution_payload_available_tx, + execution_payload_bid_tx, + payload_attestation_message_tx, } } @@ -155,6 +170,26 @@ impl ServerSentEventHandler { .block_gossip_tx .send(kind) .map(|count| log_count("block gossip", count)), + EventKind::ExecutionPayload(_) => self + .execution_payload_tx + .send(kind) + .map(|count| log_count("execution payload", count)), + EventKind::ExecutionPayloadGossip(_) => self + .execution_payload_gossip_tx + .send(kind) + .map(|count| log_count("execution payload gossip", count)), + EventKind::ExecutionPayloadAvailable(_) => self + .execution_payload_available_tx + .send(kind) + .map(|count| log_count("execution payload available", count)), + EventKind::ExecutionPayloadBid(_) => self + .execution_payload_bid_tx + .send(kind) + .map(|count| log_count("execution payload bid", count)), + EventKind::PayloadAttestationMessage(_) => self + .payload_attestation_message_tx + .send(kind) + .map(|count| log_count("payload attestation message", count)), }; if let Err(SendError(event)) = result { trace!(?event, "No receivers registered to listen for event"); @@ -233,6 +268,26 @@ impl ServerSentEventHandler { self.block_gossip_tx.subscribe() } + pub fn subscribe_execution_payload(&self) -> Receiver> { + self.execution_payload_tx.subscribe() + } + + pub fn subscribe_execution_payload_gossip(&self) -> Receiver> { + self.execution_payload_gossip_tx.subscribe() + } + + pub fn subscribe_execution_payload_available(&self) -> Receiver> { + self.execution_payload_available_tx.subscribe() + } + + pub fn subscribe_execution_payload_bid(&self) -> Receiver> { + self.execution_payload_bid_tx.subscribe() + } + + pub fn subscribe_payload_attestation_message(&self) -> Receiver> { + self.payload_attestation_message_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -296,4 +351,24 @@ impl ServerSentEventHandler { pub fn has_block_gossip_subscribers(&self) -> bool { self.block_gossip_tx.receiver_count() > 0 } + + pub fn has_execution_payload_subscribers(&self) -> bool { + self.execution_payload_tx.receiver_count() > 0 + } + + pub fn has_execution_payload_gossip_subscribers(&self) -> bool { + self.execution_payload_gossip_tx.receiver_count() > 0 + } + + pub fn has_execution_payload_available_subscribers(&self) -> bool { + self.execution_payload_available_tx.receiver_count() > 0 + } + + pub fn has_execution_payload_bid_subscribers(&self) -> bool { + self.execution_payload_bid_tx.receiver_count() > 0 + } + + pub fn has_payload_attestation_message_subscribers(&self) -> bool { + self.payload_attestation_message_tx.receiver_count() > 0 + } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 19d73be89ae..68ab91dc4cd 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3168,6 +3168,21 @@ pub fn serve( api_types::EventTopic::BlockGossip => { event_handler.subscribe_block_gossip() } + api_types::EventTopic::ExecutionPayload => { + event_handler.subscribe_execution_payload() + } + api_types::EventTopic::ExecutionPayloadGossip => { + event_handler.subscribe_execution_payload_gossip() + } + api_types::EventTopic::ExecutionPayloadAvailable => { + event_handler.subscribe_execution_payload_available() + } + api_types::EventTopic::ExecutionPayloadBid => { + event_handler.subscribe_execution_payload_bid() + } + api_types::EventTopic::PayloadAttestationMessage => { + event_handler.subscribe_payload_attestation_message() + } }; receivers.push( diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 94dff95bc64..54e9c98b5b2 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1070,6 +1070,33 @@ pub struct BlockGossip { pub slot: Slot, pub block: Hash256, } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseExecutionPayload { + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_index: u64, + pub block_hash: ExecutionBlockHash, + pub block_root: Hash256, + pub state_root: Hash256, + pub execution_optimistic: bool, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseExecutionPayloadGossip { + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_index: u64, + pub block_hash: ExecutionBlockHash, + pub block_root: Hash256, + pub state_root: Hash256, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseExecutionPayloadAvailable { + pub slot: Slot, + pub block_root: Hash256, +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseChainReorg { pub slot: Slot, @@ -1134,6 +1161,8 @@ pub struct SseExtendedPayloadAttributesGeneric { pub type SseExtendedPayloadAttributes = SseExtendedPayloadAttributesGeneric; pub type VersionedSsePayloadAttributes = ForkVersionedResponse; +pub type VersionedSseExecutionPayloadBid = ForkVersionedResponse>; +pub type VersionedSsePayloadAttestationMessage = ForkVersionedResponse; impl<'de> ContextDeserialize<'de, ForkName> for SsePayloadAttributes { fn context_deserialize(deserializer: D, context: ForkName) -> Result @@ -1210,6 +1239,11 @@ pub enum EventKind { AttesterSlashing(Box>), BlsToExecutionChange(Box), BlockGossip(Box), + ExecutionPayload(SseExecutionPayload), + ExecutionPayloadGossip(SseExecutionPayloadGossip), + ExecutionPayloadAvailable(SseExecutionPayloadAvailable), + ExecutionPayloadBid(Box>), + PayloadAttestationMessage(Box), } impl EventKind { @@ -1233,6 +1267,11 @@ impl EventKind { EventKind::AttesterSlashing(_) => "attester_slashing", EventKind::BlsToExecutionChange(_) => "bls_to_execution_change", EventKind::BlockGossip(_) => "block_gossip", + EventKind::ExecutionPayload(_) => "execution_payload", + EventKind::ExecutionPayloadGossip(_) => "execution_payload_gossip", + EventKind::ExecutionPayloadAvailable(_) => "execution_payload_available", + EventKind::ExecutionPayloadBid(_) => "execution_payload_bid", + EventKind::PayloadAttestationMessage(_) => "payload_attestation_message", } } @@ -1322,6 +1361,40 @@ impl EventKind { "block_gossip" => Ok(EventKind::BlockGossip(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block Gossip: {:?}", e)), )?)), + "execution_payload" => Ok(EventKind::ExecutionPayload( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Execution Payload: {:?}", e)) + })?, + )), + "execution_payload_gossip" => Ok(EventKind::ExecutionPayloadGossip( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Execution Payload Gossip: {:?}", + e + )) + })?, + )), + "execution_payload_available" => Ok(EventKind::ExecutionPayloadAvailable( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Execution Payload Available: {:?}", + e + )) + })?, + )), + "execution_payload_bid" => Ok(EventKind::ExecutionPayloadBid(Box::new( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Execution Payload Bid: {:?}", e)) + })?, + ))), + "payload_attestation_message" => Ok(EventKind::PayloadAttestationMessage(Box::new( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Payload Attestation Message: {:?}", + e + )) + })?, + ))), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -1357,6 +1430,11 @@ pub enum EventTopic { ProposerSlashing, BlsToExecutionChange, BlockGossip, + ExecutionPayload, + ExecutionPayloadGossip, + ExecutionPayloadAvailable, + ExecutionPayloadBid, + PayloadAttestationMessage, } impl FromStr for EventTopic { @@ -1382,6 +1460,11 @@ impl FromStr for EventTopic { "proposer_slashing" => Ok(EventTopic::ProposerSlashing), "bls_to_execution_change" => Ok(EventTopic::BlsToExecutionChange), "block_gossip" => Ok(EventTopic::BlockGossip), + "execution_payload" => Ok(EventTopic::ExecutionPayload), + "execution_payload_gossip" => Ok(EventTopic::ExecutionPayloadGossip), + "execution_payload_available" => Ok(EventTopic::ExecutionPayloadAvailable), + "execution_payload_bid" => Ok(EventTopic::ExecutionPayloadBid), + "payload_attestation_message" => Ok(EventTopic::PayloadAttestationMessage), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -1408,6 +1491,15 @@ impl fmt::Display for EventTopic { EventTopic::ProposerSlashing => write!(f, "proposer_slashing"), EventTopic::BlsToExecutionChange => write!(f, "bls_to_execution_change"), EventTopic::BlockGossip => write!(f, "block_gossip"), + EventTopic::ExecutionPayload => write!(f, "execution_payload"), + EventTopic::ExecutionPayloadGossip => write!(f, "execution_payload_gossip"), + EventTopic::ExecutionPayloadAvailable => { + write!(f, "execution_payload_available") + } + EventTopic::ExecutionPayloadBid => write!(f, "execution_payload_bid"), + EventTopic::PayloadAttestationMessage => { + write!(f, "payload_attestation_message") + } } } } From 367972bb6e7058b4513dd024830eb0d2116e34d6 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 07:26:02 -0700 Subject: [PATCH 063/127] Default to full payload status if the payload has been received --- consensus/proto_array/src/proto_array.rs | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f68d3eb71b0..6bca814cb4e 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1431,11 +1431,24 @@ impl ProtoArray { .nodes .get(node.proto_node_index) .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; - let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; - // The FULL virtual child only exists if the payload has been received. - if proto_node.payload_received().is_ok_and(|received| received) { - children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); - } + + // TODO(gloas) this is the actual change we want to keep once PTC is implemented + // let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; + // // The FULL virtual child only exists if the payload has been received. + // if proto_node.payload_received().is_ok_and(|received| received) { + // children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); + // } + + // TODO(gloas) remove this and uncomment the code above once we implement PTC + // Skip Empty/Full split: go straight to Full when payload received, + // giving full payload weight 100% without PTC votes. + let children = if proto_node.payload_received().is_ok_and(|received| received) { + vec![(node.with_status(PayloadStatus::Full), proto_node.clone())] + } else { + vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())] + }; + // TODO(gloas) delete up to here + Ok(children) } else { let child_indices = children_index From 95a58393c652e28ed8d848db0a790892ddb47738 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 09:59:42 -0700 Subject: [PATCH 064/127] Smol fix --- consensus/proto_array/src/proto_array.rs | 32 +++++++++++++++++-- .../src/proto_array_fork_choice.rs | 6 +++- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 6bca814cb4e..96492e695db 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1431,7 +1431,30 @@ impl ProtoArray { .nodes .get(node.proto_node_index) .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; - + + // V17 (pre-GLOAS) nodes don't have payload_received or parent_payload_status. + // Skip the virtual Empty/Full split and return real children directly. + if proto_node.as_v17().is_ok() { + let child_indices = children_index + .get(node.proto_node_index) + .map(|c| c.as_slice()) + .unwrap_or(&[]); + return Ok(child_indices + .iter() + .filter_map(|&child_index| { + let child_node = self.nodes.get(child_index)?; + Some(( + IndexedForkChoiceNode { + root: child_node.root(), + proto_node_index: child_index, + payload_status: PayloadStatus::Pending, + }, + child_node.clone(), + )) + }) + .collect()); + } + // TODO(gloas) this is the actual change we want to keep once PTC is implemented // let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; // // The FULL virtual child only exists if the payload has been received. @@ -1448,7 +1471,7 @@ impl ProtoArray { vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())] }; // TODO(gloas) delete up to here - + Ok(children) } else { let child_indices = children_index @@ -1459,7 +1482,10 @@ impl ProtoArray { .iter() .filter_map(|&child_index| { let child_node = self.nodes.get(child_index)?; - if child_node.get_parent_payload_status() != node.payload_status { + // Skip parent_payload_status filter for V17 children (they don't have it) + if child_node.as_v17().is_err() + && child_node.get_parent_payload_status() != node.payload_status + { return None; } Some(( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 6c90af13028..cb467f2531e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -997,7 +997,11 @@ impl ProtoArrayForkChoice { /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; - block.execution_status().ok() + Some( + block + .execution_status() + .unwrap_or_else(|_| ExecutionStatus::irrelevant()), + ) } /// Returns whether the execution payload for a block has been received. From 7645064a38de1087af283f5e16576aa1d4bfa53f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 10:40:39 -0700 Subject: [PATCH 065/127] Fix fc --- .../execution_pending_envelope.rs | 14 ++++++++------ .../src/payload_envelope_verification/mod.rs | 16 ++++++++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs index 86f9293c8f2..a538f35689c 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs @@ -12,7 +12,7 @@ use crate::{ PayloadVerificationOutcome, block_verification::PayloadVerificationHandle, payload_envelope_verification::{ - EnvelopeError, EnvelopeImportData, MaybeAvailableEnvelope, + AvailableEnvelope, EnvelopeError, EnvelopeImportData, MaybeAvailableEnvelope, gossip_verified_envelope::GossipVerifiedEnvelope, load_snapshot_from_state_root, payload_notifier::PayloadNotifier, }, @@ -32,7 +32,6 @@ impl GossipVerifiedEnvelope { ) -> Result, EnvelopeError> { let signed_envelope = self.signed_envelope; let envelope = &signed_envelope.message; - let payload = &envelope.payload; // Define a future that will verify the execution payload with an execution engine. // @@ -91,10 +90,13 @@ impl GossipVerifiedEnvelope { )?; Ok(ExecutionPendingEnvelope { - signed_envelope: MaybeAvailableEnvelope::AvailabilityPending { - block_hash: payload.block_hash, - envelope: signed_envelope, - }, + signed_envelope: MaybeAvailableEnvelope::Available(AvailableEnvelope::new( + signed_envelope.block_hash(), + signed_envelope.clone(), + vec![], + None, + chain.spec.clone(), + )), import_data: EnvelopeImportData { block_root, post_state: Box::new(state), diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index c707d62dc7b..8ca6871dda9 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -59,6 +59,22 @@ pub struct AvailableEnvelope { } impl AvailableEnvelope { + pub fn new( + execution_block_hash: ExecutionBlockHash, + envelope: Arc>, + columns: DataColumnSidecarList, + columns_available_timestamp: Option, + spec: Arc, + ) -> Self { + Self { + execution_block_hash, + envelope, + columns, + columns_available_timestamp, + spec, + } + } + pub fn message(&self) -> &ExecutionPayloadEnvelope { &self.envelope.message } From 77382daab996617cd83539a5ed65eff5ecb61f1b Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 10:45:29 -0700 Subject: [PATCH 066/127] Rvert --- consensus/proto_array/src/proto_array.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 96492e695db..933e9eb078b 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1483,9 +1483,7 @@ impl ProtoArray { .filter_map(|&child_index| { let child_node = self.nodes.get(child_index)?; // Skip parent_payload_status filter for V17 children (they don't have it) - if child_node.as_v17().is_err() - && child_node.get_parent_payload_status() != node.payload_status - { + if child_node.get_parent_payload_status() != node.payload_status { return None; } Some(( From 0f996ddbe8ec6ef8fba3df8a0787f0a6ce917889 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 11:23:59 -0700 Subject: [PATCH 067/127] sse stuff --- .../src/payload_envelope_verification/import.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index ed121ccb94a..85332a12ea7 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use std::time::Duration; +use eth2::types::{EventKind, SseExecutionPayloadAvailable}; use fork_choice::PayloadVerificationStatus; use slot_clock::SlotClock; use store::StoreOp; @@ -346,6 +347,15 @@ impl BeaconChain { ); } - // TODO(gloas) emit SSE event for envelope import (similar to SseBlock for blocks). + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_execution_payload_available_subscribers() + { + event_handler.register(EventKind::ExecutionPayloadAvailable( + SseExecutionPayloadAvailable { + slot: envelope_slot, + block_root, + }, + )); + } } } From 4ca10e95be350da5fb996f54d8818aa841ae68c5 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 30 Mar 2026 17:00:29 -0700 Subject: [PATCH 068/127] Add range sync machinery on sync side --- .../src/block_verification_types.rs | 107 +++++++--- .../src/data_availability_checker.rs | 7 +- .../src/payload_envelope_verification/mod.rs | 2 +- .../src/service/api_types.rs | 16 ++ .../network_beacon_processor/sync_methods.rs | 9 + beacon_node/network/src/router.rs | 29 ++- beacon_node/network/src/sync/batch.rs | 1 + .../src/sync/block_sidecar_coupling.rs | 202 +++++++++++++++++- beacon_node/network/src/sync/manager.rs | 32 ++- .../network/src/sync/network_context.rs | 161 ++++++++++++-- .../src/sync/network_context/requests.rs | 2 + .../requests/payload_envelopes_by_range.rs | 42 ++++ 12 files changed, 538 insertions(+), 72 deletions(-) create mode 100644 beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_range.rs diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index be73ef15d73..456e8794e74 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -2,10 +2,11 @@ use crate::data_availability_checker::{AvailabilityCheckError, DataAvailabilityC pub use crate::data_availability_checker::{ AvailableBlock, AvailableBlockData, MaybeAvailableBlock, }; +use crate::payload_envelope_verification::AvailableEnvelope; use crate::{BeaconChainTypes, PayloadVerificationOutcome}; -use educe::Educe; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; +use std::hash::{Hash, Hasher}; use std::sync::Arc; use types::data::BlobIdentifier; use types::{ @@ -40,43 +41,63 @@ impl LookupBlock { } } -/// A fully available block that has been constructed by range sync. -/// The block contains all the data required to import into fork choice. -/// This includes any and all blobs/columns required, including zero if -/// none are required. This can happen if the block is pre-deneb or if -/// it's simply past the DA boundary. -#[derive(Clone, Educe)] -#[educe(Hash(bound(E: EthSpec)))] -pub struct RangeSyncBlock { - block: AvailableBlock, +/// A block that has been constructed by range sync with all required data. +/// +/// - `Base`: Pre-Gloas blocks bundled as an `AvailableBlock` (block + blobs/columns). +/// - `Gloas`: Post-Gloas blocks where the execution payload is a separate envelope. +#[derive(Clone)] +pub enum RangeSyncBlock { + Base(AvailableBlock), + Gloas { + block: Arc>, + envelope: Option>>, + }, +} + +impl Hash for RangeSyncBlock { + fn hash(&self, state: &mut H) { + self.block_root().hash(state); + } } impl Debug for RangeSyncBlock { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "RpcBlock({:?})", self.block_root()) + write!(f, "RangeSyncBlock({:?})", self.block_root()) } } impl RangeSyncBlock { pub fn block_root(&self) -> Hash256 { - self.block.block_root() + match self { + Self::Base(block) => block.block_root(), + Self::Gloas { block, .. } => block.canonical_root(), + } } pub fn as_block(&self) -> &SignedBeaconBlock { - self.block.block() + match self { + Self::Base(block) => block.block(), + Self::Gloas { block, .. } => block, + } } pub fn block_cloned(&self) -> Arc> { - self.block.block_cloned() + match self { + Self::Base(block) => block.block_cloned(), + Self::Gloas { block, .. } => block.clone(), + } } pub fn block_data(&self) -> &AvailableBlockData { - self.block.data() + match self { + Self::Base(block) => block.data(), + Self::Gloas { .. } => { + unreachable!("block_data called on Gloas variant — use envelope data instead") + } + } } -} -impl RangeSyncBlock { - /// Constructs an `RangeSyncBlock` from a block and availability data. + /// Constructs a `Base` variant from a block and availability data. /// /// # Errors /// @@ -94,32 +115,54 @@ impl RangeSyncBlock { T: BeaconChainTypes, { let available_block = AvailableBlock::new(block, block_data, da_checker, spec)?; - Ok(Self { - block: available_block, - }) + Ok(Self::Base(available_block)) + } + + /// Constructs a `Gloas` variant from a block and optional available envelope. + pub fn new_gloas( + block: Arc>, + envelope: Option>>, + ) -> Self { + Self::Gloas { block, envelope } } #[allow(clippy::type_complexity)] pub fn deconstruct(self) -> (Hash256, Arc>, AvailableBlockData) { - self.block.deconstruct() + match self { + Self::Base(block) => block.deconstruct(), + Self::Gloas { .. } => { + unreachable!("deconstruct called on Gloas variant") + } + } } pub fn n_blobs(&self) -> usize { - match self.block_data() { - AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, - AvailableBlockData::Blobs(blobs) => blobs.len(), + match self { + Self::Base(block) => match block.data() { + AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, + AvailableBlockData::Blobs(blobs) => blobs.len(), + }, + Self::Gloas { .. } => 0, } } pub fn n_data_columns(&self) -> usize { - match self.block_data() { - AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, - AvailableBlockData::DataColumns(columns) => columns.len(), + match self { + Self::Base(block) => match block.data() { + AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, + AvailableBlockData::DataColumns(columns) => columns.len(), + }, + Self::Gloas { .. } => 0, } } pub fn into_available_block(self) -> AvailableBlock { - self.block + match self { + Self::Base(block) => block, + Self::Gloas { .. } => { + unreachable!("into_available_block called on Gloas variant") + } + } } } @@ -405,13 +448,13 @@ impl AsBlock for RangeSyncBlock { self.as_block().message() } fn as_block(&self) -> &SignedBeaconBlock { - self.block.as_block() + RangeSyncBlock::as_block(self) } fn block_cloned(&self) -> Arc> { - self.block.block_cloned() + RangeSyncBlock::block_cloned(self) } fn canonical_root(&self) -> Hash256 { - self.block.block_root() + self.block_root() } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 4372efa8096..1eb59b9b7b1 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -20,7 +20,7 @@ use tracing::{debug, error, instrument}; use types::data::{BlobIdentifier, FixedBlobSidecarList}; use types::{ BlobSidecar, BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + DataColumnSidecarList, Epoch, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot, }; mod error; @@ -420,6 +420,11 @@ impl DataAvailabilityChecker { self.da_check_required_for_epoch(epoch) && self.spec.is_peer_das_enabled_for_epoch(epoch) } + /// Determines if execution payload envelopes are required for an epoch (Gloas and later). + pub fn envelopes_required_for_epoch(&self, epoch: Epoch) -> bool { + self.spec.fork_name_at_epoch(epoch) >= ForkName::Gloas + } + /// See `Self::blobs_required_for_epoch` fn blobs_required_for_block(&self, block: &SignedBeaconBlock) -> bool { block.num_expected_blobs() > 0 && self.blobs_required_for_epoch(block.epoch()) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 8ca6871dda9..b4628ae2d01 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -47,7 +47,7 @@ pub struct EnvelopeImportData { pub post_state: Box>, } -#[derive(Debug)] +#[derive(Debug, Clone)] #[allow(dead_code)] pub struct AvailableEnvelope { execution_block_hash: ExecutionBlockHash, diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index a190a42a80e..0b1d84b7066 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -33,6 +33,8 @@ pub enum SyncRequestId { DataColumnsByRange(DataColumnsByRangeRequestId), /// Request searching for an execution payload envelope given a block root. SinglePayloadEnvelope { id: SingleLookupReqId }, + /// Payload envelopes by range request + PayloadEnvelopesByRange(PayloadEnvelopesByRangeRequestId), } /// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. @@ -78,6 +80,14 @@ pub enum DataColumnsByRangeRequester { CustodyBackfillSync(CustodyBackFillBatchRequestId), } +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct PayloadEnvelopesByRangeRequestId { + /// Id to identify this attempt at a payload_envelopes_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request for block components. + pub parent_request_id: ComponentsByRangeRequestId, +} + /// Block components by range request for range sync. Includes an ID for downstream consumers to /// handle retries and tie all their sub requests together. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] @@ -254,6 +264,12 @@ macro_rules! impl_display { impl_display!(BlocksByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(BlobsByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(DataColumnsByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!( + PayloadEnvelopesByRangeRequestId, + "{}/{}", + id, + parent_request_id +); impl_display!(ComponentsByRangeRequestId, "{}/{}", id, requester); impl_display!(DataColumnsByRootRequestId, "{}/{}", id, requester); impl_display!(SingleLookupReqId, "{}/Lookup/{}", req_id, lookup_id); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f6d4940121e..f14816139c4 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -619,6 +619,15 @@ impl NetworkBeaconProcessor { return; }; + // TODO(gloas): Implement Gloas chain segment processing. + // Gloas blocks carry separate envelopes and need a different import path. + if downloaded_blocks + .iter() + .any(|b| matches!(b, RangeSyncBlock::Gloas { .. })) + { + todo!("Gloas chain segment processing"); + } + let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 3fb21969756..faa55b48bfe 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -333,10 +333,8 @@ impl Router { Response::PayloadEnvelopesByRoot(envelope) => { self.on_payload_envelopes_by_root_response(peer_id, app_request_id, envelope); } - // TODO(EIP-7732): implement outgoing payload envelopes by range responses once - // range sync requests them. - Response::PayloadEnvelopesByRange(_) => { - unreachable!() + Response::PayloadEnvelopesByRange(envelope) => { + self.on_payload_envelopes_by_range_response(peer_id, app_request_id, envelope); } // Light client responses should not be received Response::LightClientBootstrap(_) @@ -834,6 +832,29 @@ impl Router { } } + pub fn on_payload_envelopes_by_range_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + envelope: Option>>, + ) { + trace!( + %peer_id, + "Received PayloadEnvelopesByRange Response" + ); + + if let AppRequestId::Sync(sync_request_id) = app_request_id { + self.send_to_sync(SyncMessage::RpcPayloadEnvelope { + peer_id, + sync_request_id, + envelope, + seen_timestamp: timestamp_now(), + }); + } else { + crit!("All payload envelopes by range responses should belong to sync"); + } + } + fn handle_beacon_processor_send_result( &mut self, result: Result<(), crate::network_beacon_processor::Error>, diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index 10af1bf5038..e0704e25697 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -33,6 +33,7 @@ pub type BatchId = Epoch; #[strum(serialize_all = "snake_case")] pub enum ByRangeRequestType { BlocksAndColumns, + BlocksAndEnvelopesAndColumns, BlocksAndBlobs, Blocks, Columns(HashSet), diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 98cf3e0a1ff..e475b60de91 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -4,11 +4,13 @@ use beacon_chain::{ data_availability_checker::DataAvailabilityChecker, data_column_verification::CustodyDataColumn, get_block_root, + payload_envelope_verification::AvailableEnvelope, }; use lighthouse_network::{ PeerId, service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, + PayloadEnvelopesByRangeRequestId, }, }; use ssz_types::RuntimeVariableList; @@ -16,7 +18,7 @@ use std::{collections::HashMap, sync::Arc}; use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, SignedBeaconBlock, + Hash256, SignedBeaconBlock, SignedExecutionPayloadEnvelope, }; use crate::sync::network_context::MAX_COLUMN_RETRIES; @@ -35,6 +37,13 @@ use crate::sync::network_context::MAX_COLUMN_RETRIES; pub struct RangeBlockComponentsRequest { /// Blocks we have received awaiting for their corresponding sidecar. blocks_request: ByRangeRequest>>>, + /// Payload envelopes (Gloas+). None for pre-Gloas forks. + payloads_request: Option< + ByRangeRequest< + PayloadEnvelopesByRangeRequestId, + Vec>>, + >, + >, /// Sidecars we have received awaiting for their corresponding block. block_data_request: RangeBlockDataRequest, /// Span to track the range request and all children range requests. @@ -88,6 +97,7 @@ impl RangeBlockComponentsRequest { Vec<(DataColumnsByRangeRequestId, Vec)>, Vec, )>, + payloads_req_id: Option, request_span: Span, ) -> Self { let block_data_request = if let Some(blobs_req_id) = blobs_req_id { @@ -109,6 +119,7 @@ impl RangeBlockComponentsRequest { Self { blocks_request: ByRangeRequest::Active(blocks_req_id), + payloads_request: payloads_req_id.map(ByRangeRequest::Active), block_data_request, request_span, } @@ -191,6 +202,18 @@ impl RangeBlockComponentsRequest { } } + /// Adds received payload envelopes to the request. + pub fn add_payload_envelopes( + &mut self, + req_id: PayloadEnvelopesByRangeRequestId, + envelopes: Vec>>, + ) -> Result<(), String> { + match &mut self.payloads_request { + Some(req) => req.finish(req_id, envelopes), + None => Err("received payload envelopes but none expected".to_owned()), + } + } + /// Attempts to construct RPC blocks from all received components. /// /// Returns `None` if not all expected requests have completed. @@ -208,6 +231,13 @@ impl RangeBlockComponentsRequest { return None; }; + // If payloads are expected, they must also be complete before we can produce responses. + if let Some(payloads_req) = &self.payloads_request + && payloads_req.to_finished().is_none() + { + return None; + } + // Increment the attempt once this function returns the response or errors match &mut self.block_data_request { RangeBlockDataRequest::NoData => Some(Self::responses_with_blobs( @@ -254,15 +284,29 @@ impl RangeBlockComponentsRequest { } } - let resp = Self::responses_with_custody_columns( - blocks.to_vec(), - data_columns, - column_to_peer_id, - expected_custody_columns, - *attempt, - da_checker, - spec, - ); + // Gloas path: if payloads are present, produce Gloas blocks + let resp = if let Some(payloads_req) = &self.payloads_request { + let payloads = payloads_req.to_finished().expect("checked above").to_vec(); + Self::responses_gloas( + blocks.to_vec(), + payloads, + data_columns, + column_to_peer_id, + expected_custody_columns, + *attempt, + spec, + ) + } else { + Self::responses_with_custody_columns( + blocks.to_vec(), + data_columns, + column_to_peer_id, + expected_custody_columns, + *attempt, + da_checker, + spec, + ) + }; if let Err(CouplingError::DataColumnPeerFailure { error: _, @@ -460,6 +504,136 @@ impl RangeBlockComponentsRequest { Ok(range_sync_blocks) } + + /// Couples blocks with payload envelopes and custody columns for Gloas. + /// In Gloas, columns are associated with the envelope (not the block directly). + fn responses_gloas( + blocks: Vec>>, + payloads: Vec>>, + data_columns: DataColumnSidecarList, + column_to_peer: HashMap, + expects_custody_columns: &[ColumnIndex], + attempt: usize, + spec: Arc, + ) -> Result>, CouplingError> { + // Group data columns by block_root + let mut data_columns_by_block = + HashMap::>>>::new(); + + for column in data_columns { + let block_root = column.block_root(); + let index = *column.index(); + if data_columns_by_block + .entry(block_root) + .or_default() + .insert(index, column) + .is_some() + { + debug!(?block_root, ?index, "Repeated column for block_root"); + } + } + + let mut range_sync_blocks = Vec::with_capacity(blocks.len()); + let mut payload_iter = payloads.into_iter().peekable(); + let exceeded_retries = attempt >= MAX_COLUMN_RETRIES; + + for block in blocks { + // Match payload envelope to block by slot + let mut envelope_for_block = None; + if payload_iter + .peek() + .map(|e| e.message.slot == block.slot()) + .unwrap_or(false) + { + envelope_for_block = payload_iter.next(); + } + + let block_root = get_block_root(&block); + + let available_envelope = if block.num_expected_blobs() > 0 { + // Block has blobs — envelope and columns are required + let envelope = envelope_for_block.ok_or_else(|| { + CouplingError::InternalError(format!( + "Missing payload envelope for block {block_root:?} with blobs" + )) + })?; + + let Some(mut data_columns_by_index) = data_columns_by_block.remove(&block_root) + else { + let responsible_peers = column_to_peer.iter().map(|c| (*c.0, *c.1)).collect(); + return Err(CouplingError::DataColumnPeerFailure { + error: format!("No columns for block {block_root:?} with data"), + faulty_peers: responsible_peers, + exceeded_retries, + }); + }; + + let mut custody_columns = vec![]; + let mut naughty_peers = vec![]; + for index in expects_custody_columns { + if let Some(data_column) = data_columns_by_index.remove(index) { + custody_columns.push(data_column); + } else { + let Some(responsible_peer) = column_to_peer.get(index) else { + return Err(CouplingError::InternalError(format!( + "Internal error, no request made for column {index}" + ))); + }; + naughty_peers.push((*index, *responsible_peer)); + } + } + if !naughty_peers.is_empty() { + return Err(CouplingError::DataColumnPeerFailure { + error: format!( + "Peers did not return column for block_root {block_root:?} {naughty_peers:?}" + ), + faulty_peers: naughty_peers, + exceeded_retries, + }); + } + + Some(Box::new(AvailableEnvelope::new( + envelope.block_hash(), + envelope, + custody_columns, + None, + spec.clone(), + ))) + } else { + envelope_for_block.map(|envelope| { + Box::new(AvailableEnvelope::new( + envelope.block_hash(), + envelope, + vec![], + None, + spec.clone(), + )) + }) + }; + + range_sync_blocks.push(RangeSyncBlock::new_gloas(block, available_envelope)); + } + + // Log any remaining unmatched payloads + if payload_iter.next().is_some() { + let remaining = payload_iter.count() + 1; + debug!( + remaining, + "Received payload envelopes that don't pair with blocks" + ); + } + + // Log remaining unmatched columns + if !data_columns_by_block.is_empty() { + let remaining_roots = data_columns_by_block.keys().collect::>(); + debug!( + ?remaining_roots, + "Not all columns consumed for Gloas blocks" + ); + } + + Ok(range_sync_blocks) + } } impl ByRangeRequest { @@ -560,7 +734,7 @@ mod tests { let blocks_req_id = blocks_id(components_id()); let mut info = - RangeBlockComponentsRequest::::new(blocks_req_id, None, None, Span::none()); + RangeBlockComponentsRequest::::new(blocks_req_id, None, None, None, Span::none()); // Send blocks and complete terminate response info.add_blocks(blocks_req_id, blocks).unwrap(); @@ -591,6 +765,7 @@ mod tests { blocks_req_id, Some(blobs_req_id), None, + None, Span::none(), ); @@ -650,6 +825,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expects_custody_columns.clone())), + None, Span::none(), ); // Send blocks and complete terminate response @@ -726,6 +902,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_sampling_columns.clone())), + None, Span::none(), ); @@ -818,6 +995,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_sampling_columns.clone())), + None, Span::none(), ); @@ -915,6 +1093,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_sampling_columns.clone())), + None, Span::none(), ); @@ -1030,6 +1209,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_sampling_columns.clone())), + None, Span::none(), ); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 256752d5fbb..64c10e8f466 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -60,7 +60,8 @@ use lighthouse_network::service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, - DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, + DataColumnsByRootRequester, Id, PayloadEnvelopesByRangeRequestId, SingleLookupReqId, + SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::{PeerAction, PeerId}; @@ -522,6 +523,8 @@ impl SyncManager { SyncRequestId::SinglePayloadEnvelope { id } => { self.on_single_envelope_response(id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::PayloadEnvelopesByRange(req_id) => self + .on_payload_envelopes_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)), } } @@ -1262,8 +1265,15 @@ impl SyncManager { peer_id, RpcEvent::from_chunk(envelope, seen_timestamp), ), + SyncRequestId::PayloadEnvelopesByRange(req_id) => { + self.on_payload_envelopes_by_range_response( + req_id, + peer_id, + RpcEvent::from_chunk(envelope, seen_timestamp), + ); + } _ => { - crit!(%peer_id, "bad request id for payload envelope"); + crit!(%peer_id, "bad request id for payload_envelope"); } } } @@ -1302,6 +1312,24 @@ impl SyncManager { } } + fn on_payload_envelopes_by_range_response( + &mut self, + id: PayloadEnvelopesByRangeRequestId, + peer_id: PeerId, + envelope: RpcEvent>>, + ) { + if let Some(resp) = self + .network + .on_payload_envelopes_by_range_response(id, peer_id, envelope) + { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::PayloadEnvelope(id, resp), + ); + } + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index e9d289b7771..d512c9e24fc 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -22,14 +22,17 @@ use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, PayloadEnvelopesByRangeRequest, +}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; pub use lighthouse_network::service::api_types::RangeRequestId; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, - DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, + DataColumnsByRootRequester, Id, PayloadEnvelopesByRangeRequestId, SingleLookupReqId, + SyncRequestId, }; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; use parking_lot::RwLock; @@ -37,7 +40,8 @@ pub use requests::LookupVerifyError; use requests::{ ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, - PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest, + PayloadEnvelopesByRangeRequestItems, PayloadEnvelopesByRootRequestItems, + PayloadEnvelopesByRootSingleRequest, }; #[cfg(test)] use slot_clock::SlotClock; @@ -217,6 +221,11 @@ pub struct SyncNetworkContext { /// A mapping of active PayloadEnvelopesByRoot requests payload_envelopes_by_root_requests: ActiveRequests>, + /// A mapping of active PayloadEnvelopesByRange requests + payload_envelopes_by_range_requests: ActiveRequests< + PayloadEnvelopesByRangeRequestId, + PayloadEnvelopesByRangeRequestItems, + >, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, @@ -254,6 +263,10 @@ pub enum RangeBlockComponent { DataColumnsByRangeRequestId, RpcResponseResult>>>, ), + PayloadEnvelope( + PayloadEnvelopesByRangeRequestId, + RpcResponseResult>>>, + ), } #[cfg(test)] @@ -303,6 +316,7 @@ impl SyncNetworkContext { blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), payload_envelopes_by_root_requests: ActiveRequests::new("payload_envelopes_by_root"), + payload_envelopes_by_range_requests: ActiveRequests::new("payload_envelopes_by_range"), custody_by_root_requests: <_>::default(), components_by_range_requests: FnvHashMap::default(), custody_backfill_data_column_batch_requests: FnvHashMap::default(), @@ -332,6 +346,7 @@ impl SyncNetworkContext { blobs_by_range_requests, data_columns_by_range_requests, payload_envelopes_by_root_requests, + payload_envelopes_by_range_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -371,6 +386,10 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|id| SyncRequestId::SinglePayloadEnvelope { id: *id }); + let payload_envelope_by_range_ids = payload_envelopes_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::PayloadEnvelopesByRange(*req_id)); blocks_by_root_ids .chain(blobs_by_root_ids) .chain(data_column_by_root_ids) @@ -378,6 +397,7 @@ impl SyncNetworkContext { .chain(blobs_by_range_ids) .chain(data_column_by_range_ids) .chain(envelope_by_root_ids) + .chain(payload_envelope_by_range_ids) .collect() } @@ -435,6 +455,7 @@ impl SyncNetworkContext { blobs_by_range_requests, data_columns_by_range_requests, payload_envelopes_by_root_requests, + payload_envelopes_by_range_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -458,6 +479,7 @@ impl SyncNetworkContext { .chain(blobs_by_range_requests.iter_request_peers()) .chain(data_columns_by_range_requests.iter_request_peers()) .chain(payload_envelopes_by_root_requests.iter_request_peers()) + .chain(payload_envelopes_by_range_requests.iter_request_peers()) { *active_request_count_by_peer.entry(peer_id).or_default() += 1; } @@ -590,24 +612,26 @@ impl SyncNetworkContext { }; // Attempt to find all required custody peers before sending any request or creating an ID - let columns_by_range_peers_to_request = - if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); - let column_indexes = self - .chain - .sampling_columns_for_epoch(epoch) - .iter() - .cloned() - .collect(); - Some(self.select_columns_by_range_peers_to_request( - &column_indexes, - column_peers, - active_request_count_by_peer, - peers_to_deprioritize, - )?) - } else { - None - }; + let columns_by_range_peers_to_request = if matches!( + batch_type, + ByRangeRequestType::BlocksAndColumns | ByRangeRequestType::BlocksAndEnvelopesAndColumns + ) { + let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); + let column_indexes = self + .chain + .sampling_columns_for_epoch(epoch) + .iter() + .cloned() + .collect(); + Some(self.select_columns_by_range_peers_to_request( + &column_indexes, + column_peers, + active_request_count_by_peer, + peers_to_deprioritize, + )?) + } else { + None + }; // Create the overall components_by_range request ID before its individual components let id = ComponentsByRangeRequestId { @@ -672,6 +696,28 @@ impl SyncNetworkContext { .transpose()?; let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); + + // Send envelope request for Gloas epochs + let payloads_req_id = + if matches!(batch_type, ByRangeRequestType::BlocksAndEnvelopesAndColumns) { + Some(self.send_payload_envelopes_by_range_request( + block_peer, + PayloadEnvelopesByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }, + id, + new_range_request_span!( + self, + "outgoing_envelopes_by_range", + range_request_span.clone(), + block_peer + ), + )?) + } else { + None + }; + let info = RangeBlockComponentsRequest::new( blocks_req_id, blobs_req_id, @@ -681,6 +727,7 @@ impl SyncNetworkContext { self.chain.sampling_columns_for_epoch(epoch).to_vec(), ) }), + payloads_req_id, range_request_span, ); self.components_by_range_requests.insert(id, info); @@ -783,6 +830,17 @@ impl SyncNetworkContext { }) }) } + RangeBlockComponent::PayloadEnvelope(req_id, resp) => { + resp.and_then(|(envelopes, _)| { + request + .add_payload_envelopes(req_id, envelopes) + .map_err(|e| { + RpcResponseError::BlockComponentCouplingError( + CouplingError::InternalError(e), + ) + }) + }) + } } } { entry.remove(); @@ -1352,6 +1410,57 @@ impl SyncNetworkContext { Ok((id, requested_columns)) } + fn send_payload_envelopes_by_range_request( + &mut self, + peer_id: PeerId, + request: PayloadEnvelopesByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + request_span: Span, + ) -> Result { + let id = PayloadEnvelopesByRangeRequestId { + id: self.next_id(), + parent_request_id, + }; + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: RequestType::PayloadEnvelopesByRange(request.clone()), + app_request_id: AppRequestId::Sync(SyncRequestId::PayloadEnvelopesByRange(id)), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + + debug!( + method = "PayloadEnvelopesByRange", + slots = request.count, + epoch = %Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + + self.payload_envelopes_by_range_requests.insert( + id, + peer_id, + false, + PayloadEnvelopesByRangeRequestItems::new(request), + request_span, + ); + Ok(id) + } + + #[allow(clippy::type_complexity)] + pub(crate) fn on_payload_envelopes_by_range_response( + &mut self, + id: PayloadEnvelopesByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>>>> { + let resp = self + .payload_envelopes_by_range_requests + .on_response(id, rpc_event); + self.on_rpc_response_result(resp, peer_id) + } + pub fn is_execution_engine_online(&self) -> bool { self.execution_engine_state == EngineState::Online } @@ -1433,6 +1542,12 @@ impl SyncNetworkContext { ); if self + .chain + .data_availability_checker + .envelopes_required_for_epoch(epoch) + { + ByRangeRequestType::BlocksAndEnvelopesAndColumns + } else if self .chain .data_availability_checker .data_columns_required_for_epoch(epoch) @@ -1900,6 +2015,10 @@ impl SyncNetworkContext { "data_columns_by_range", self.data_columns_by_range_requests.len(), ), + ( + "payload_envelopes_by_range", + self.payload_envelopes_by_range_requests.len(), + ), ("custody_by_root", self.custody_by_root_requests.len()), ( "components_by_range", diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 5b5e779d9bf..7ba0838ee1d 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -19,6 +19,7 @@ pub use data_columns_by_root::{ pub use payload_envelopes_by_root::{ PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest, }; +pub use payload_envelopes_by_range::PayloadEnvelopesByRangeRequestItems; use crate::metrics; @@ -31,6 +32,7 @@ mod blocks_by_root; mod data_columns_by_range; mod data_columns_by_root; mod payload_envelopes_by_root; +mod payload_envelopes_by_range; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { diff --git a/beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_range.rs b/beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_range.rs new file mode 100644 index 00000000000..3d4ea8248b2 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/payload_envelopes_by_range.rs @@ -0,0 +1,42 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::methods::PayloadEnvelopesByRangeRequest; +use std::sync::Arc; +use types::{EthSpec, SignedExecutionPayloadEnvelope}; + +/// Accumulates results of a payload_envelopes_by_range request. Only returns items after +/// receiving the stream termination. +pub struct PayloadEnvelopesByRangeRequestItems { + request: PayloadEnvelopesByRangeRequest, + items: Vec>>, +} + +impl PayloadEnvelopesByRangeRequestItems { + pub fn new(request: PayloadEnvelopesByRangeRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for PayloadEnvelopesByRangeRequestItems { + type Item = Arc>; + + fn add(&mut self, envelope: Self::Item) -> Result { + let slot = envelope.slot(); + if slot < self.request.start_slot || slot >= self.request.start_slot + self.request.count { + return Err(LookupVerifyError::UnrequestedSlot(slot)); + } + if self.items.iter().any(|existing| existing.slot() == slot) { + return Err(LookupVerifyError::DuplicatedData(slot, 0)); + } + + self.items.push(envelope); + + Ok(false) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} From 2a2647411b95a7dd304da93dec8fde27998b1d4d Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 26 Mar 2026 14:45:49 -0700 Subject: [PATCH 069/127] Process envelopes correctly --- beacon_node/beacon_chain/src/beacon_chain.rs | 25 +++- .../beacon_chain/src/block_verification.rs | 50 +++++-- .../src/block_verification_types.rs | 8 +- .../src/data_availability_checker.rs | 16 ++- .../overflow_lru_cache.rs | 2 +- .../beacon_chain/src/early_attester_cache.rs | 2 +- .../beacon_chain/src/historical_blocks.rs | 2 +- .../payload_envelope_verification/import.rs | 136 +++++++++++++++++- .../src/payload_envelope_verification/mod.rs | 4 + .../gossip_methods.rs | 5 +- .../network_beacon_processor/sync_methods.rs | 9 -- 11 files changed, 219 insertions(+), 40 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3c8ea307791..b378cca4c00 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2951,12 +2951,13 @@ impl BeaconChain { } }; - // Import the blocks into the chain. - for signature_verified_block in signature_verified_blocks { + // Import the blocks (and envelopes for Gloas) into the chain. + for (signature_verified_block, maybe_envelope) in signature_verified_blocks { + let block_root = signature_verified_block.block_root(); let block_slot = signature_verified_block.slot(); match self .process_block( - signature_verified_block.block_root(), + block_root, signature_verified_block, notify_execution_layer, BlockImportSource::RangeSync, @@ -2969,6 +2970,22 @@ impl BeaconChain { AvailabilityProcessingStatus::Imported(block_root) => { // The block was imported successfully. imported_blocks.push((block_root, block_slot)); + + // Gloas: process the envelope now that the block is in fork choice. + if let Some(envelope) = maybe_envelope + && let Err(error) = self + .process_range_sync_envelope( + block_root, + envelope, + notify_execution_layer, + ) + .await + { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::EnvelopeError(Box::new(error)), + }; + } } AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { warn!( @@ -7219,7 +7236,7 @@ impl BeaconChain { block_data: AvailableBlockData, ) -> Option> { match block_data { - AvailableBlockData::NoData => None, + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => None, AvailableBlockData::Blobs(blobs) => { debug!( %block_root, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ae9acdefd5a..4da2562364a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -60,6 +60,7 @@ use crate::execution_payload::{ }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; +use crate::payload_envelope_verification::{AvailableEnvelope, EnvelopeError}; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ @@ -328,6 +329,8 @@ pub enum BlockError { /// It's unclear if this block is valid, but it cannot be fully verified without the parent's /// execution payload envelope. ParentEnvelopeUnknown { parent_root: Hash256 }, + /// An error occurred while processing the execution payload envelope during range sync. + EnvelopeError(Box), } /// Which specific signature(s) are invalid in a SignedBeaconBlock @@ -591,10 +594,17 @@ pub(crate) fn process_block_slash_info( mut chain_segment: Vec<(Hash256, RangeSyncBlock)>, chain: &BeaconChain, -) -> Result>, BlockError> { +) -> Result< + Vec<( + SignatureVerifiedBlock, + Option>>, + )>, + BlockError, +> { if chain_segment.is_empty() { return Ok(vec![]); } @@ -623,14 +633,30 @@ pub fn signature_verify_chain_segment( let consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); - let available_block = block.into_available_block(); + let (available_block, envelope) = match block { + RangeSyncBlock::Base(ab) => (ab, None), + RangeSyncBlock::Gloas { block, envelope } => { + let ab = AvailableBlock::new( + block, + AvailableBlockData::DataInEnvelope, + &chain.data_availability_checker, + chain.spec.clone(), + ) + .map_err(BlockError::AvailabilityCheck)?; + (ab, envelope) + } + }; + available_blocks.push(available_block.clone()); - signature_verified_blocks.push(SignatureVerifiedBlock { - block: MaybeAvailableBlock::Available(available_block), - block_root, - parent: None, - consensus_context, - }); + signature_verified_blocks.push(( + SignatureVerifiedBlock { + block: MaybeAvailableBlock::Available(available_block), + block_root, + parent: None, + consensus_context, + }, + envelope, + )); } chain @@ -640,7 +666,7 @@ pub fn signature_verify_chain_segment( // verify signatures let pubkey_cache = get_validator_pubkey_cache(chain)?; let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - for svb in &mut signature_verified_blocks { + for (svb, _) in &mut signature_verified_blocks { signature_verifier .include_all_signatures(svb.block.as_block(), &mut svb.consensus_context)?; } @@ -651,7 +677,7 @@ pub fn signature_verify_chain_segment( drop(pubkey_cache); - if let Some(signature_verified_block) = signature_verified_blocks.first_mut() { + if let Some((signature_verified_block, _)) = signature_verified_blocks.first_mut() { signature_verified_block.parent = Some(parent); } @@ -1199,7 +1225,7 @@ impl SignatureVerifiedBlock { let result = info_span!("signature_verify").in_scope(|| signature_verifier.verify()); match result { Ok(_) => { - // gloas blocks are always available. + // Gloas blocks are always available — data arrives via the envelope. let maybe_available = if chain .spec .fork_name_at_slot::(block.slot()) @@ -1208,7 +1234,7 @@ impl SignatureVerifiedBlock { MaybeAvailableBlock::Available( AvailableBlock::new( block, - AvailableBlockData::NoData, + AvailableBlockData::DataInEnvelope, &chain.data_availability_checker, chain.spec.clone(), ) diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 456e8794e74..e94752cfc02 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -139,7 +139,9 @@ impl RangeSyncBlock { pub fn n_blobs(&self) -> usize { match self { Self::Base(block) => match block.data() { - AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, + AvailableBlockData::NoData + | AvailableBlockData::DataInEnvelope + | AvailableBlockData::DataColumns(_) => 0, AvailableBlockData::Blobs(blobs) => blobs.len(), }, Self::Gloas { .. } => 0, @@ -149,7 +151,9 @@ impl RangeSyncBlock { pub fn n_data_columns(&self) -> usize { match self { Self::Base(block) => match block.data() { - AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, + AvailableBlockData::NoData + | AvailableBlockData::DataInEnvelope + | AvailableBlockData::Blobs(_) => 0, AvailableBlockData::DataColumns(columns) => columns.len(), }, Self::Gloas { .. } => 0, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 1eb59b9b7b1..87c240e906a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -366,7 +366,7 @@ impl DataAvailabilityChecker { available_block: &AvailableBlock, ) -> Result<(), AvailabilityCheckError> { match available_block.data() { - AvailableBlockData::NoData => Ok(()), + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => Ok(()), AvailableBlockData::Blobs(blobs) => verify_kzg_for_blob_list(blobs.iter(), &self.kzg) .map_err(AvailabilityCheckError::InvalidBlobs), AvailableBlockData::DataColumns(columns) => { @@ -388,7 +388,7 @@ impl DataAvailabilityChecker { for available_block in available_blocks { match available_block.data().to_owned() { - AvailableBlockData::NoData => {} + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => {} AvailableBlockData::Blobs(blobs) => all_blobs.extend(blobs), AvailableBlockData::DataColumns(columns) => all_data_columns.extend(columns), } @@ -655,6 +655,9 @@ pub enum AvailableBlockData { Blobs(BlobSidecarList), /// Block is post-PeerDAS and has more than zero blobs DataColumns(DataColumnSidecarList), + /// Gloas: block data (payload + columns) arrives via the execution payload envelope, + /// not the block itself. + DataInEnvelope, } impl AvailableBlockData { @@ -676,7 +679,7 @@ impl AvailableBlockData { pub fn blobs(&self) -> Option> { match self { - AvailableBlockData::NoData => None, + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => None, AvailableBlockData::Blobs(blobs) => Some(blobs.clone()), AvailableBlockData::DataColumns(_) => None, } @@ -692,7 +695,7 @@ impl AvailableBlockData { pub fn data_columns(&self) -> Option> { match self { - AvailableBlockData::NoData => None, + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => None, AvailableBlockData::Blobs(_) => None, AvailableBlockData::DataColumns(data_columns) => Some(data_columns.clone()), } @@ -757,6 +760,8 @@ impl AvailableBlock { return Err(AvailabilityCheckError::MissingBlobs); } } + // Gloas: data availability is handled by the envelope path, not the block. + AvailableBlockData::DataInEnvelope => {} AvailableBlockData::Blobs(blobs) => { if !blobs_required { return Err(AvailabilityCheckError::InvalidAvailableBlockData); @@ -835,7 +840,7 @@ impl AvailableBlock { pub fn has_blobs(&self) -> bool { match self.blob_data { - AvailableBlockData::NoData => false, + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => false, AvailableBlockData::Blobs(..) => true, AvailableBlockData::DataColumns(_) => false, } @@ -863,6 +868,7 @@ impl AvailableBlock { AvailableBlockData::DataColumns(data_columns) => { AvailableBlockData::DataColumns(data_columns.clone()) } + AvailableBlockData::DataInEnvelope => AvailableBlockData::DataInEnvelope, }, blobs_available_timestamp: self.blobs_available_timestamp, spec: self.spec.clone(), diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index c0403595ee6..f26e1d7ad42 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -275,7 +275,7 @@ impl PendingComponents { // Block is available, construct `AvailableExecutedBlock` let blobs_available_timestamp = match blob_data { - AvailableBlockData::NoData => None, + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => None, AvailableBlockData::Blobs(_) => self .verified_blobs .iter() diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 752e4d1a967..644d60f2382 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -138,7 +138,7 @@ impl EarlyAttesterCache { }; let (blobs, data_columns) = match block.data() { - AvailableBlockData::NoData => (None, None), + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => (None, None), AvailableBlockData::Blobs(blobs) => (Some(blobs.clone()), None), AvailableBlockData::DataColumns(data_columns) => (None, Some(data_columns.clone())), }; diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index bfda52558e4..ebb1bc4b767 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -157,7 +157,7 @@ impl BeaconChain { } match &block_data { - AvailableBlockData::NoData => (), + AvailableBlockData::NoData | AvailableBlockData::DataInEnvelope => (), AvailableBlockData::Blobs(_) => new_oldest_blob_slot = Some(block.slot()), AvailableBlockData::DataColumns(_) => { new_oldest_data_column_slot = Some(block.slot()) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 85332a12ea7..d07b7cde735 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -6,16 +6,25 @@ use fork_choice::PayloadVerificationStatus; use slot_clock::SlotClock; use store::StoreOp; use tracing::{debug, error, info, info_span, instrument, warn}; -use types::{BeaconState, BlockImportSource, Hash256, Slot}; +use types::{BeaconState, BlockImportSource, EthSpec, Hash256, Slot}; + +use state_processing::{ + VerifySignatures, + envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, +}; +use store::DatabaseBlock; use super::{ AvailableEnvelope, AvailableExecutedEnvelope, EnvelopeError, EnvelopeImportData, - ExecutedEnvelope, gossip_verified_envelope::GossipVerifiedEnvelope, + ExecutedEnvelope, MaybeAvailableEnvelope, gossip_verified_envelope::GossipVerifiedEnvelope, + gossip_verified_envelope::verify_envelope_consistency, load_snapshot_from_state_root, + payload_notifier::PayloadNotifier, }; use crate::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, - NotifyExecutionLayer, block_verification_types::AvailableBlockData, metrics, - payload_envelope_verification::ExecutionPendingEnvelope, validator_monitor::get_slot_delay_ms, + NotifyExecutionLayer, PayloadVerificationOutcome, block_verification_types::AvailableBlockData, + metrics, payload_envelope_verification::ExecutionPendingEnvelope, + validator_monitor::get_slot_delay_ms, }; const ENVELOPE_METRICS_CACHE_SLOT_LIMIT: u32 = 64; @@ -148,6 +157,125 @@ impl BeaconChain { } } + /// Process an `AvailableEnvelope` from range sync. Unlike the gossip path, the block has + /// already been imported into fork choice so we can skip gossip-specific checks. + /// + /// Steps: consistency checks, signature verification, EL verification (newPayload), + /// state processing, await EL result, and import. + #[instrument(skip_all, fields(block_root = ?block_root))] + pub async fn process_range_sync_envelope( + self: &Arc, + block_root: Hash256, + available_envelope: Box>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result { + let signed_envelope = available_envelope.envelope().clone(); + let block_slot = signed_envelope.slot(); + + // Load the block from store (just imported, guaranteed to exist). + let block = match self.store.try_get_full_block(&block_root)? { + Some(DatabaseBlock::Full(block)) => Arc::new(block), + Some(DatabaseBlock::Blinded(_)) | None => { + return Err(EnvelopeError::BlockRootUnknown { block_root }); + } + }; + + // Envelope consistency checks. + let execution_bid = &block + .message() + .body() + .signed_execution_payload_bid()? + .message; + let latest_finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + verify_envelope_consistency( + &signed_envelope.message, + &block, + execution_bid, + latest_finalized_slot, + )?; + + // Load state for signature verification and state processing. + let snapshot = + load_snapshot_from_state_root::(block_root, block.state_root(), &self.store)?; + + // Verify the envelope signature. + let is_valid = + signed_envelope.verify_signature_with_state(&snapshot.pre_state, &self.spec)?; + if !is_valid { + return Err(EnvelopeError::BadSignature); + } + + // Start EL verification (newPayload) as early as possible. + let payload_notifier = PayloadNotifier::new( + self.clone(), + signed_envelope.clone(), + block.clone(), + notify_execution_layer, + )?; + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + if let Some(started_execution) = chain.slot_clock.now_duration() { + chain + .envelope_times_cache + .write() + .set_time_started_execution(block_root, block_slot, started_execution); + } + let payload_verification_status = payload_notifier.notify_new_payload().await?; + Ok(PayloadVerificationOutcome { + payload_verification_status, + }) + }; + let payload_verification_handle = self + .task_executor + .spawn_handle( + payload_verification_future, + "range_sync_envelope_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; + + // Run state processing (signatures already verified above). + let mut state = snapshot.pre_state; + process_execution_payload_envelope( + &mut state, + Some(snapshot.state_root), + &signed_envelope, + VerifySignatures::False, + VerifyStateRoot::True, + &self.spec, + )?; + + // Build the ExecutionPendingEnvelope with Available status (columns already bundled). + let execution_pending = ExecutionPendingEnvelope { + signed_envelope: MaybeAvailableEnvelope::Available(*available_envelope), + import_data: EnvelopeImportData { + block_root, + post_state: Box::new(state), + }, + payload_verification_handle, + }; + + // Await EL verification and import. + let executed_envelope = self + .clone() + .into_executed_payload_envelope(execution_pending) + .await?; + + match executed_envelope { + ExecutedEnvelope::Available(envelope) => { + self.import_available_execution_payload_envelope(Box::new(envelope)) + .await + } + ExecutedEnvelope::AvailabilityPending() => Err(EnvelopeError::InternalError( + "Pending payload envelope not yet implemented".to_owned(), + )), + } + } + /// Accepts a fully-verified payload envelope and awaits on its payload verification handle to /// get a fully `ExecutedEnvelope`. /// diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index b4628ae2d01..1e7d00fce32 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -75,6 +75,10 @@ impl AvailableEnvelope { } } + pub fn envelope(&self) -> &Arc> { + &self.envelope + } + pub fn message(&self) -> &ExecutionPayloadEnvelope { &self.envelope.message } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 2e04847630c..8de90f991bd 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1390,7 +1390,10 @@ impl NetworkBeaconProcessor { return None; } // BlobNotRequired is unreachable. Only constructed in `process_gossip_blob` - Err(e @ BlockError::InternalError(_)) | Err(e @ BlockError::BlobNotRequired(_)) => { + // EnvelopeError is unreachable. Only constructed during range sync envelope processing. + Err(e @ BlockError::InternalError(_)) + | Err(e @ BlockError::BlobNotRequired(_)) + | Err(e @ BlockError::EnvelopeError(_)) => { error!(error = %e, "Internal block gossip validation error"); return None; } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f14816139c4..f6d4940121e 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -619,15 +619,6 @@ impl NetworkBeaconProcessor { return; }; - // TODO(gloas): Implement Gloas chain segment processing. - // Gloas blocks carry separate envelopes and need a different import path. - if downloaded_blocks - .iter() - .any(|b| matches!(b, RangeSyncBlock::Gloas { .. })) - { - todo!("Gloas chain segment processing"); - } - let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); From 93d3343482e3f7f5157247075c64a04688a28126 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 30 Mar 2026 15:36:01 -0700 Subject: [PATCH 070/127] Apply envelopes even if block is duplicate --- beacon_node/beacon_chain/src/beacon_chain.rs | 28 ++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b378cca4c00..9a305536784 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2819,7 +2819,15 @@ impl BeaconChain { // // Note that `check_block_relevancy` is incapable of returning // `DuplicateImportStatusUnknown` so we don't need to handle that case here. - Err(BlockError::DuplicateFullyImported(_)) => continue, + // + // Gloas: keep duplicate blocks so their envelopes can still be processed + // in `process_chain_segment`. This handles the case where a node restarts + // before an envelope was persisted to the DB. + Err(BlockError::DuplicateFullyImported(_)) => { + if block.as_block().fork_name_unchecked().gloas_enabled() { + filtered_chain_segment.push((block_root, block)); + } + } // If the block is the genesis block, simply ignore this block. Err(BlockError::GenesisBlock) => continue, // If the block is is for a finalized slot, simply ignore this block. @@ -3005,8 +3013,24 @@ impl BeaconChain { Err(BlockError::DuplicateFullyImported(block_root)) => { debug!( ?block_root, - "Ignoring already known blocks while processing chain segment" + "Ignoring already known block while processing chain segment" ); + // Gloas: still process the envelope for duplicate blocks. The envelope + // may not have been persisted before a restart. + if let Some(envelope) = maybe_envelope + && let Err(error) = self + .process_range_sync_envelope( + block_root, + envelope, + notify_execution_layer, + ) + .await + { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::EnvelopeError(Box::new(error)), + }; + } continue; } Err(error) => { From aa5292df99a7552ca6dc82ab00514dbe9a685bb0 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 30 Mar 2026 15:36:17 -0700 Subject: [PATCH 071/127] Fix replayer --- .../state_processing/src/block_replayer.rs | 72 +++++++++++-------- 1 file changed, 44 insertions(+), 28 deletions(-) diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index f5f06d1cb9d..f6eec77e26a 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -9,6 +9,7 @@ use crate::{ per_slot_processing, }; use itertools::Itertools; +use std::collections::HashMap; use std::iter::Peekable; use std::marker::PhantomData; use types::{ @@ -288,17 +289,11 @@ where payload_envelopes: Vec>, target_slot: Option, ) -> Result { - let mut envelopes_iter = payload_envelopes.into_iter(); - - let mut next_envelope_at_slot = |slot| { - if let Some(envelope) = envelopes_iter.next() - && envelope.message.slot == slot - { - Ok(envelope) - } else { - Err(BlockReplayError::MissingPayloadEnvelope { slot }) - } - }; + let mut envelopes_by_slot: HashMap> = + payload_envelopes + .into_iter() + .map(|e| (e.message.slot, e)) + .collect(); for (i, block) in blocks.iter().enumerate() { // Allow one additional block at the start which is only used for its state root. @@ -313,24 +308,41 @@ where // indicates that the parent is full (and it hasn't already been applied). state_root = if block.fork_name_unchecked().gloas_enabled() && self.state.slot() == self.state.latest_block_header().slot - && self.state.payload_status() == StatePayloadStatus::Pending { - let latest_bid_block_hash = self - .state - .latest_execution_payload_bid() - .map_err(BlockReplayError::from)? - .block_hash; - - // Similar to `is_parent_block_full`, but reading the block hash from the - // not-yet-applied `block`. The slot 0 case covers genesis (no block replay reqd). - if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) { - let envelope = next_envelope_at_slot(self.state.slot())?; - // State root for the next slot processing is now the envelope's state root. - self.apply_payload_envelope(&envelope, state_root)? + if self.state.payload_status() == StatePayloadStatus::Pending { + let latest_bid_block_hash = self + .state + .latest_execution_payload_bid() + .map_err(BlockReplayError::from)? + .block_hash; + + // Similar to `is_parent_block_full`, but reading the block hash from the + // not-yet-applied `block`. The slot 0 case covers genesis (no block replay + // reqd). + if self.state.slot() != 0 + && block.is_parent_block_full(latest_bid_block_hash) + { + let envelope = envelopes_by_slot.remove(&self.state.slot()).ok_or( + BlockReplayError::MissingPayloadEnvelope { + slot: self.state.slot(), + }, + )?; + // State root for the next slot processing is now the envelope's + // state root. + self.apply_payload_envelope(&envelope, state_root)? + } else { + // Empty payload at this slot, the state root is unchanged from + // when the beacon block was applied. + state_root + } } else { - // Empty payload at this slot, the state root is unchanged from when the - // beacon block was applied. - state_root + // Full: the envelope was already applied. Use its state_root so + // per_slot_processing stores the correct post-envelope root + // (not the pre-envelope block state root). + envelopes_by_slot + .get(&self.state.slot()) + .map(|e| e.message.state_root) + .unwrap_or(state_root) } } else { // Pre-Gloas or at skipped slots post-Gloas, the state root of the parent state @@ -384,7 +396,11 @@ where let mut opt_state_root = if let StatePayloadStatus::Full = self.desired_state_payload_status && let Some(last_block) = blocks.last() { - let envelope = next_envelope_at_slot(self.state.slot())?; + let envelope = envelopes_by_slot.remove(&self.state.slot()).ok_or( + BlockReplayError::MissingPayloadEnvelope { + slot: self.state.slot(), + }, + )?; Some(self.apply_payload_envelope(&envelope, last_block.state_root())?) } else { None From 993cecee83cb1b73189638b4446910c018d2b6f9 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 31 Mar 2026 15:22:31 -0500 Subject: [PATCH 072/127] Clear best_child/best_descendant during V28->V29 conversion --- consensus/proto_array/src/ssz_container.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 5edc1cd3138..f8b1f6634e9 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -77,7 +77,17 @@ impl From for SszContainerV29 { Self { votes: v28.votes, prune_threshold: v28.prune_threshold, - nodes: v28.nodes.into_iter().map(ProtoNode::V17).collect(), + nodes: v28 + .nodes + .into_iter() + .map(|mut node| { + // best_child/best_descendant are no longer used (replaced by + // the virtual tree walk). Clear during conversion. + node.best_child = None; + node.best_descendant = None; + ProtoNode::V17(node) + }) + .collect(), indices: v28.indices, previous_proposer_boost: v28.previous_proposer_boost, } From 1ee2ce4258335e23bef4542adc6dc7a61db2065e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 10:10:48 +1100 Subject: [PATCH 073/127] Fix schema migrations --- .../beacon_chain/src/persisted_fork_choice.rs | 54 ++++++++++++------ beacon_node/beacon_chain/src/schema_change.rs | 8 +-- .../src/schema_change/migration_schema_v29.rs | 55 ++++++++----------- consensus/fork_choice/src/fork_choice.rs | 9 +++ 4 files changed, 74 insertions(+), 52 deletions(-) diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 592ea9ecd75..8edccbbe98b 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -22,26 +22,37 @@ pub struct PersistedForkChoice { pub fork_choice_store: PersistedForkChoiceStoreV28, } -macro_rules! impl_store_item { - ($type:ty) => { - impl store::StoreItem for $type { - fn db_column() -> DBColumn { - DBColumn::ForkChoice - } +impl PersistedForkChoiceV28 { + pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { + let decompressed_bytes = store_config + .decompress_bytes(bytes) + .map_err(Error::Compression)?; + Self::from_ssz_bytes(&decompressed_bytes).map_err(Into::into) + } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } + pub fn as_bytes(&self, store_config: &StoreConfig) -> Result, Error> { + let encode_timer = metrics::start_timer(&metrics::FORK_CHOICE_ENCODE_TIMES); + let ssz_bytes = self.as_ssz_bytes(); + drop(encode_timer); - fn from_store_bytes(bytes: &[u8]) -> std::result::Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } - } - }; -} + let _compress_timer = metrics::start_timer(&metrics::FORK_CHOICE_COMPRESS_TIMES); + store_config + .compress_bytes(&ssz_bytes) + .map_err(Error::Compression) + } -impl_store_item!(PersistedForkChoiceV28); -impl_store_item!(PersistedForkChoiceV29); + pub fn as_kv_store_op( + &self, + key: Hash256, + store_config: &StoreConfig, + ) -> Result { + Ok(KeyValueStoreOp::PutKeyValue( + DBColumn::ForkChoice, + key.as_slice().to_vec(), + self.as_bytes(store_config)?, + )) + } +} impl PersistedForkChoiceV29 { pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { @@ -83,3 +94,12 @@ impl From for PersistedForkChoiceV29 { } } } + +impl From for PersistedForkChoiceV28 { + fn from(v29: PersistedForkChoiceV29) -> Self { + Self { + fork_choice_v28: v29.fork_choice.into(), + fork_choice_store: v29.fork_choice_store, + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index fa2ab70d210..841f28e37de 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -22,13 +22,13 @@ pub fn migrate_schema( (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), // Upgrade from v28 to v29. (SchemaVersion(28), SchemaVersion(29)) => { - upgrade_to_v29::(&db)?; - db.store_schema_version_atomically(to, vec![]) + let ops = upgrade_to_v29::(&db)?; + db.store_schema_version_atomically(to, ops) } // Downgrade from v29 to v28. (SchemaVersion(29), SchemaVersion(28)) => { - downgrade_from_v29::(&db)?; - db.store_schema_version_atomically(to, vec![]) + let ops = downgrade_from_v29::(&db)?; + db.store_schema_version_atomically(to, ops) } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs index 6c82e8a737d..3069200fce5 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs @@ -1,12 +1,8 @@ -use crate::beacon_chain::BeaconChainTypes; +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{PersistedForkChoiceV28, PersistedForkChoiceV29}; -use ssz::Decode; use store::hot_cold_store::HotColdDB; use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; -use types::{EthSpec, Hash256}; - -/// The key used to store the fork choice in the database. -const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; +use types::EthSpec; /// Upgrade from schema v28 to v29. /// @@ -14,24 +10,25 @@ const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; /// virtual tree walk). /// - Fails if the persisted fork choice contains any V17 (pre-Gloas) proto /// nodes at or after the Gloas fork slot. +/// +/// Returns a list of store ops to be applied atomically with the schema version write. pub fn upgrade_to_v29( db: &HotColdDB, -) -> Result<(), StoreError> { +) -> Result, StoreError> { let gloas_fork_slot = db .spec .gloas_fork_epoch .map(|epoch| epoch.start_slot(T::EthSpec::slots_per_epoch())); - // Load the persisted fork choice (v28 format, uncompressed SSZ). + // Load the persisted fork choice (v28 format). let Some(fc_bytes) = db .hot_db .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? else { - return Ok(()); + return Ok(vec![]); }; - let mut persisted_v28 = - PersistedForkChoiceV28::from_ssz_bytes(&fc_bytes).map_err(StoreError::SszDecodeError)?; + let persisted_v28 = PersistedForkChoiceV28::from_bytes(&fc_bytes, db.get_config())?; // Check for V17 nodes at/after the Gloas fork slot. if let Some(gloas_fork_slot) = gloas_fork_slot { @@ -52,39 +49,30 @@ pub fn upgrade_to_v29( } } - // Clear best_child/best_descendant — replaced by the virtual tree walk. - for node in &mut persisted_v28.fork_choice_v28.proto_array_v28.nodes { - node.best_child = None; - node.best_descendant = None; - } - - // Convert to v29 and write back. + // Convert to v29 and encode. let persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); - let fc_bytes = persisted_v29 - .as_bytes(db.get_config()) - .map_err(|e| StoreError::MigrationError(format!("failed to encode v29: {:?}", e)))?; - db.hot_db.do_atomically(vec![KeyValueStoreOp::PutKeyValue( - DBColumn::ForkChoice, - FORK_CHOICE_DB_KEY.as_slice().to_vec(), - fc_bytes, - )])?; - Ok(()) + Ok(vec![ + persisted_v29.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, + ]) } -/// Downgrade from schema v29 to v28 (no-op). +/// Downgrade from schema v29 to v28. /// +/// Converts the persisted fork choice from V29 format back to V28. /// Fails if the persisted fork choice contains any V29 proto nodes, as these contain /// payload-specific fields that cannot be losslessly converted back to V17 format. +/// +/// Returns a list of store ops to be applied atomically with the schema version write. pub fn downgrade_from_v29( db: &HotColdDB, -) -> Result<(), StoreError> { +) -> Result, StoreError> { // Load the persisted fork choice (v29 format, compressed). let Some(fc_bytes) = db .hot_db .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? else { - return Ok(()); + return Ok(vec![]); }; let persisted_v29 = @@ -111,5 +99,10 @@ pub fn downgrade_from_v29( )); } - Ok(()) + // Convert to v28 and encode. + let persisted_v28 = PersistedForkChoiceV28::from(persisted_v29); + + Ok(vec![ + persisted_v28.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, + ]) } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 3b13cd4429f..771104a02f8 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1840,6 +1840,15 @@ impl From for PersistedForkChoiceV29 { } } +impl From for PersistedForkChoiceV28 { + fn from(v29: PersistedForkChoiceV29) -> Self { + Self { + proto_array_v28: v29.proto_array.into(), + queued_attestations: v29.queued_attestations, + } + } +} + #[cfg(test)] mod tests { use types::MainnetEthSpec; From bc6cf0f88290cb6c8de35ab8623dc0363e65cb92 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 11:17:33 +1100 Subject: [PATCH 074/127] Remove payload attestation queueing and more cleanups --- beacon_node/http_api/src/lib.rs | 4 + beacon_node/http_api/tests/tests.rs | 4 + consensus/fork_choice/src/fork_choice.rs | 113 +++--------------- consensus/proto_array/src/proto_array.rs | 6 +- .../src/proto_array_fork_choice.rs | 6 +- 5 files changed, 34 insertions(+), 99 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5df10786175..0bb04888b73 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2148,10 +2148,14 @@ pub fn serve( execution_status: execution_status_string, best_child: node .best_child() + .ok() + .flatten() .and_then(|index| proto_array.nodes.get(index)) .map(|child| child.root()), best_descendant: node .best_descendant() + .ok() + .flatten() .and_then(|index| proto_array.nodes.get(index)) .map(|descendant| descendant.root()), }, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 14bfb5ce920..b28816302cf 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3179,10 +3179,14 @@ impl ApiTester { .unwrap_or_else(|| "irrelevant".to_string()), best_child: node .best_child() + .ok() + .flatten() .and_then(|index| expected_proto_array.nodes.get(index)) .map(|child| child.root()), best_descendant: node .best_descendant() + .ok() + .flatten() .and_then(|index| expected_proto_array.nodes.get(index)) .map(|descendant| descendant.root()), }, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 771104a02f8..a80ec99a25e 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -138,10 +138,6 @@ pub enum InvalidBlock { finalized_root: Hash256, block_ancestor: Option, }, - MissingExecutionPayloadBid { - block_slot: Slot, - block_root: Hash256, - }, } #[derive(Debug)] @@ -310,22 +306,6 @@ fn dequeue_attestations( std::mem::replace(queued_attestations, remaining) } -/// Returns all values in `queued` that have `slot + 1 < current_slot`. -/// Payload attestations need an extra slot of delay compared to regular attestations. -fn dequeue_payload_attestations( - current_slot: Slot, - queued: &mut Vec, -) -> Vec { - let remaining = queued.split_off( - queued - .iter() - .position(|a| a.slot.saturating_add(1_u64) >= current_slot) - .unwrap_or(queued.len()), - ); - - std::mem::replace(queued, remaining) -} - /// Denotes whether an attestation we are processing was received from a block or from gossip. /// Equivalent to the `is_from_block` `bool` in: /// @@ -370,9 +350,6 @@ pub struct ForkChoice { proto_array: ProtoArrayForkChoice, /// Attestations that arrived at the current slot and must be queued for later processing. queued_attestations: Vec, - /// Payload attestations (PTC votes) that must be queued for later processing. - /// These have different dequeue timing than regular attestations. - queued_payload_attestations: Vec, /// Stores a cache of the values required to be sent to the execution layer. forkchoice_update_parameters: ForkchoiceUpdateParameters, _phantom: PhantomData, @@ -387,7 +364,6 @@ where self.fc_store == other.fc_store && self.proto_array == other.proto_array && self.queued_attestations == other.queued_attestations - && self.queued_payload_attestations == other.queued_payload_attestations } } @@ -472,7 +448,6 @@ where fc_store, proto_array, queued_attestations: vec![], - queued_payload_attestations: vec![], // This will be updated during the next call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -966,14 +941,6 @@ where Some(signed_bid.message.block_hash), ) } else { - if spec.fork_name_at_slot::(block.slot()).gloas_enabled() { - return Err(Error::InvalidBlock( - InvalidBlock::MissingExecutionPayloadBid { - block_slot: block.slot(), - block_root, - }, - )); - } (None, None) }; @@ -1334,11 +1301,21 @@ where ) -> Result<(), Error> { self.update_time(system_time_current_slot)?; - if attestation.data.beacon_block_root == Hash256::zero() { + if attestation.data.beacon_block_root.is_zero() { return Ok(()); } - self.validate_on_payload_attestation(attestation, is_from_block)?; + match self.validate_on_payload_attestation(attestation, is_from_block) { + Ok(()) => (), + Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { .. }) => { + // Just ignore wrong-slot payload attestations, they could have been processed at + // the correct slot when received on gossip, but then have the wrong-slot by the + // time they make it to here (TOCTOU). + // TODO(gloas): consider moving this to the call site for gossip processing + return Ok(()); + } + Err(e) => return Err(e.into()), + } // Resolve validator indices to PTC committee positions. let ptc_indices: Vec = attestation @@ -1346,34 +1323,13 @@ where .filter_map(|vi| ptc.iter().position(|&p| p == *vi as usize)) .collect(); - let processing_slot = self.fc_store.get_current_slot(); - // Payload attestations from blocks can be applied in the next slot (S+1 for data.slot=S), - // while gossiped payload attestations are delayed one extra slot. - let should_process_now = match is_from_block { - AttestationFromBlock::True => attestation.data.slot < processing_slot, - AttestationFromBlock::False => { - attestation.data.slot.saturating_add(1_u64) < processing_slot - } - }; - - if should_process_now { - for &ptc_index in &ptc_indices { - self.proto_array.process_payload_attestation( - attestation.data.beacon_block_root, - ptc_index, - attestation.data.payload_present, - attestation.data.blob_data_available, - )?; - } - } else { - self.queued_payload_attestations - .push(QueuedPayloadAttestation { - slot: attestation.data.slot, - ptc_indices, - block_root: attestation.data.beacon_block_root, - payload_present: attestation.data.payload_present, - blob_data_available: attestation.data.blob_data_available, - }); + for &ptc_index in &ptc_indices { + self.proto_array.process_payload_attestation( + attestation.data.beacon_block_root, + ptc_index, + attestation.data.payload_present, + attestation.data.blob_data_available, + )?; } Ok(()) @@ -1408,7 +1364,6 @@ where // Process any attestations that might now be eligible. self.process_attestation_queue()?; - self.process_payload_attestation_queue()?; Ok(self.fc_store.get_current_slot()) } @@ -1495,26 +1450,6 @@ where Ok(()) } - /// Processes and removes from the queue any queued payload attestations which may now be - /// eligible for processing. Payload attestations use `slot + 1 < current_slot` timing. - fn process_payload_attestation_queue(&mut self) -> Result<(), Error> { - let current_slot = self.fc_store.get_current_slot(); - for attestation in - dequeue_payload_attestations(current_slot, &mut self.queued_payload_attestations) - { - for &ptc_index in &attestation.ptc_indices { - self.proto_array.process_payload_attestation( - attestation.block_root, - ptc_index, - attestation.payload_present, - attestation.blob_data_available, - )?; - } - } - - Ok(()) - } - /// Returns `true` if the block is known **and** a descendant of the finalized root. pub fn contains_block(&self, block_root: &Hash256) -> bool { self.proto_array.contains_block(block_root) @@ -1670,11 +1605,6 @@ where &self.queued_attestations } - /// Returns a reference to the currently queued payload attestations. - pub fn queued_payload_attestations(&self) -> &[QueuedPayloadAttestation] { - &self.queued_payload_attestations - } - /// Returns the store's `proposer_boost_root`. pub fn proposer_boost_root(&self) -> Hash256 { self.fc_store.proposer_boost_root() @@ -1759,7 +1689,6 @@ where fc_store, proto_array, queued_attestations: persisted.queued_attestations, - queued_payload_attestations: persisted.queued_payload_attestations, // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -1800,7 +1729,6 @@ where PersistedForkChoice { proto_array: self.proto_array().as_ssz_container(), queued_attestations: self.queued_attestations().to_vec(), - queued_payload_attestations: self.queued_payload_attestations.clone(), } } @@ -1824,8 +1752,6 @@ pub struct PersistedForkChoice { #[superstruct(only(V29))] pub proto_array: proto_array::core::SszContainerV29, pub queued_attestations: Vec, - #[superstruct(only(V29))] - pub queued_payload_attestations: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV29; @@ -1835,7 +1761,6 @@ impl From for PersistedForkChoiceV29 { Self { proto_array: v28.proto_array_v28.into(), queued_attestations: v28.queued_attestations, - queued_payload_attestations: vec![], } } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f68d3eb71b0..452679d7a3a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -117,10 +117,10 @@ pub struct ProtoNode { pub finalized_checkpoint: Checkpoint, #[superstruct(getter(copy))] pub weight: u64, - #[superstruct(getter(copy))] + #[superstruct(only(V17), partial_getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_child: Option, - #[superstruct(getter(copy))] + #[superstruct(only(V17), partial_getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option, /// Indicates if an execution node has marked this block as valid. Also contains the execution @@ -614,8 +614,6 @@ impl ProtoArray { justified_checkpoint: block.justified_checkpoint, finalized_checkpoint: block.finalized_checkpoint, weight: 0, - best_child: None, - best_descendant: None, unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, parent_payload_status, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 6c90af13028..cb467f2531e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -997,7 +997,11 @@ impl ProtoArrayForkChoice { /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; - block.execution_status().ok() + Some( + block + .execution_status() + .unwrap_or_else(|_| ExecutionStatus::irrelevant()), + ) } /// Returns whether the execution payload for a block has been received. From 4684d972e0b9fe4c574b885de765ee97bab82246 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 11:31:51 +1100 Subject: [PATCH 075/127] Remove TOCTOU early return --- consensus/fork_choice/src/fork_choice.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index a80ec99a25e..630de11281a 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1305,17 +1305,10 @@ where return Ok(()); } - match self.validate_on_payload_attestation(attestation, is_from_block) { - Ok(()) => (), - Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { .. }) => { - // Just ignore wrong-slot payload attestations, they could have been processed at - // the correct slot when received on gossip, but then have the wrong-slot by the - // time they make it to here (TOCTOU). - // TODO(gloas): consider moving this to the call site for gossip processing - return Ok(()); - } - Err(e) => return Err(e.into()), - } + // TODO(gloas): Should ignore wrong-slot payload attestations at the caller, they could + // have been processed at the correct slot when received on gossip, but then have the + // wrong-slot by the time they make it to here (TOCTOU). + self.validate_on_payload_attestation(attestation, is_from_block)?; // Resolve validator indices to PTC committee positions. let ptc_indices: Vec = attestation From 51e78fd157456d90c8e7c8bb79b929c670843b2f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 11:51:03 +1100 Subject: [PATCH 076/127] Fix queued attestation decoding from disk --- consensus/fork_choice/src/fork_choice.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 630de11281a..b075a23ddd4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -260,6 +260,15 @@ pub struct QueuedAttestation { payload_present: bool, } +/// Legacy queued attestation without payload_present (pre-Gloas, schema V28). +#[derive(Clone, PartialEq, Encode, Decode)] +pub struct QueuedAttestationV28 { + slot: Slot, + attesting_indices: Vec, + block_root: Hash256, + target_epoch: Epoch, +} + impl<'a, E: EthSpec> From> for QueuedAttestation { fn from(a: IndexedAttestationRef<'a, E>) -> Self { Self { @@ -1681,7 +1690,7 @@ where let mut fork_choice = Self { fc_store, proto_array, - queued_attestations: persisted.queued_attestations, + queued_attestations: vec![], // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -1721,7 +1730,6 @@ where pub fn to_persisted(&self) -> PersistedForkChoice { PersistedForkChoice { proto_array: self.proto_array().as_ssz_container(), - queued_attestations: self.queued_attestations().to_vec(), } } @@ -1744,7 +1752,8 @@ pub struct PersistedForkChoice { pub proto_array_v28: proto_array::core::SszContainerV28, #[superstruct(only(V29))] pub proto_array: proto_array::core::SszContainerV29, - pub queued_attestations: Vec, + #[superstruct(only(V28))] + pub queued_attestations_v28: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV29; @@ -1753,7 +1762,6 @@ impl From for PersistedForkChoiceV29 { fn from(v28: PersistedForkChoiceV28) -> Self { Self { proto_array: v28.proto_array_v28.into(), - queued_attestations: v28.queued_attestations, } } } @@ -1762,7 +1770,7 @@ impl From for PersistedForkChoiceV28 { fn from(v29: PersistedForkChoiceV29) -> Self { Self { proto_array_v28: v29.proto_array.into(), - queued_attestations: v29.queued_attestations, + queued_attestations_v28: vec![], } } } From f6f9eae4ef7314d6a9a73b8792a8c7f2f825f39b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 12:04:32 +1100 Subject: [PATCH 077/127] More cleanup --- consensus/fork_choice/src/fork_choice.rs | 16 ++-------------- consensus/fork_choice/src/lib.rs | 3 +-- .../src/fork_choice_test_definition.rs | 1 + .../proto_array/src/proto_array_fork_choice.rs | 5 ++++- 4 files changed, 8 insertions(+), 17 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index b075a23ddd4..eac9820de38 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -281,19 +281,6 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { } } -/// Used for queuing payload attestations (PTC votes) from the current slot. -/// Payload attestations have different dequeue timing than regular attestations: -/// gossiped payload attestations need an extra slot of delay (slot + 1 < current_slot). -#[derive(Clone, PartialEq, Encode, Decode)] -pub struct QueuedPayloadAttestation { - slot: Slot, - /// Resolved PTC committee positions (not validator indices). - ptc_indices: Vec, - block_root: Hash256, - payload_present: bool, - blob_data_available: bool, -} - /// Returns all values in `self.queued_attestations` that have a slot that is earlier than the /// current slot. Also removes those values from `self.queued_attestations`. fn dequeue_attestations( @@ -450,6 +437,7 @@ where execution_status, execution_payload_parent_hash, execution_payload_block_hash, + anchor_block.message().proposer_index(), spec, )?; @@ -462,7 +450,7 @@ where head_hash: None, justified_hash: None, finalized_hash: None, - // These will be updated during the next call to `Self::get_head`. + // This will be updated during the next call to `Self::get_head`. head_root: Hash256::zero(), }, _phantom: PhantomData, diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 70f1dbc215e..8f479125b76 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -5,8 +5,7 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, QueuedPayloadAttestation, - ResetPayloadStatuses, + PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 34d7f2e48ee..ff9d70bad58 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -144,6 +144,7 @@ impl ForkChoiceTestDefinition { ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), self.execution_payload_parent_hash, self.execution_payload_block_hash, + 0, &spec, ) .expect("should create fork choice struct"); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cb467f2531e..71a5a46f8c1 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -479,6 +479,7 @@ impl ProtoArrayForkChoice { execution_status: ExecutionStatus, execution_payload_parent_hash: Option, execution_payload_block_hash: Option, + proposer_index: u64, spec: &ChainSpec, ) -> Result { let mut proto_array = ProtoArray { @@ -505,7 +506,7 @@ impl ProtoArrayForkChoice { unrealized_finalized_checkpoint: Some(finalized_checkpoint), execution_payload_parent_hash, execution_payload_block_hash, - proposer_index: Some(0), + proposer_index: Some(proposer_index), }; proto_array @@ -1317,6 +1318,7 @@ mod test_compute_deltas { execution_status, None, None, + 0, &spec, ) .unwrap(); @@ -1471,6 +1473,7 @@ mod test_compute_deltas { execution_status, None, None, + 0, &spec, ) .unwrap(); From ad0f3cf89bf3034df4efeb5700795c96d20cebab Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 12:10:11 +1100 Subject: [PATCH 078/127] Use None for post-Gloas payload hashes pre-Gloas --- consensus/fork_choice/src/fork_choice.rs | 26 +++++++++++------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index eac9820de38..1f5b2cf1b08 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -395,8 +395,16 @@ where .map_err(Error::BeaconStateError)?; let (execution_status, execution_payload_parent_hash, execution_payload_block_hash) = - if let Ok(execution_payload) = anchor_block.message().execution_payload() { - // Pre-Gloas forks: hashes come from the execution payload. + if let Ok(signed_bid) = anchor_block.message().body().signed_execution_payload_bid() { + // Gloas: execution status is irrelevant post-Gloas; payload validation + // is decoupled from beacon blocks. + ( + ExecutionStatus::irrelevant(), + Some(signed_bid.message.parent_block_hash), + Some(signed_bid.message.block_hash), + ) + } else if let Ok(execution_payload) = anchor_block.message().execution_payload() { + // Pre-Gloas forks: do not set payload hashes, they are only used post-Gloas. if execution_payload.is_default_with_empty_roots() { (ExecutionStatus::irrelevant(), None, None) } else { @@ -404,20 +412,10 @@ where // trusted block and state. ( ExecutionStatus::Valid(execution_payload.block_hash()), - Some(execution_payload.parent_hash()), - Some(execution_payload.block_hash()), + None, + None, ) } - } else if let Ok(signed_bid) = - anchor_block.message().body().signed_execution_payload_bid() - { - // Gloas: execution status is irrelevant post-Gloas; payload validation - // is decoupled from beacon blocks. - ( - ExecutionStatus::irrelevant(), - Some(signed_bid.message.parent_block_hash), - Some(signed_bid.message.block_hash), - ) } else { // Pre-merge: no execution payload at all. (ExecutionStatus::irrelevant(), None, None) From 9ef73d0af01c1eb53239a12976d849866c784098 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 12:25:22 +1100 Subject: [PATCH 079/127] Add new cross-boundary test --- .../gloas_payload.rs | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 0fb120328c2..9a7acbde091 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -718,6 +718,133 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe mod tests { use super::*; + fn gloas_fork_boundary_spec() -> ChainSpec { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + spec.gloas_fork_epoch = Some(Epoch::new(1)); + spec + } + + /// Gloas fork boundary: a chain starting pre-Gloas (V17 nodes) that crosses into + /// Gloas (V29 nodes). The head should advance through the fork boundary. + /// + /// Parameters: + /// - `skip_first_gloas_slot`: if true, there is no block at the first Gloas slot (slot 32); + /// the first V29 block appears at slot 33. + /// - `first_gloas_block_full`: if true, the first V29 block extends the parent V17 node's + /// EL chain (Full parent payload status). If false, it doesn't (Empty). + fn get_gloas_fork_boundary_test_definition( + skip_first_gloas_slot: bool, + first_gloas_block_full: bool, + ) -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Block at slot 31 — last pre-Gloas slot. Created as a V17 node because + // gloas_fork_epoch = 1 → Gloas starts at slot 32. + // + // The test harness sets execution_status = Optimistic(ExecutionBlockHash::from_root(root)), + // so this V17 node's EL block hash = ExecutionBlockHash::from_root(get_root(1)). + ops.push(Operation::ProcessBlock { + slot: Slot::new(31), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + }); + + // First Gloas block (V29 node). + let gloas_slot = if skip_first_gloas_slot { 33 } else { 32 }; + + // For Full: execution_payload_parent_hash must match the V17 parent's EL hash. + // The V17 parent's EL hash = ExecutionBlockHash::from_root(get_root(1)) = get_hash(1). + // For Empty: use a non-matching hash. + let parent_hash = if first_gloas_block_full { + get_hash(1) + } else { + get_hash(99) + }; + + ops.push(Operation::ProcessBlock { + slot: Slot::new(gloas_slot), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(parent_hash), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Verify the parent_payload_status is correctly set. + let expected_parent_status = if first_gloas_block_full { + PayloadStatus::Full + } else { + PayloadStatus::Empty + }; + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), + expected_status: expected_parent_status, + }); + + // Mark root 2's execution payload as received so the Full virtual child exists. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(2), + }); + + // Extend the chain with another V29 block (Full child of root 2). + ops.push(Operation::ProcessBlock { + slot: Slot::new(gloas_slot + 1), + root: get_root(3), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(2)), + execution_payload_block_hash: Some(get_hash(3)), + }); + + // Head should advance to the tip of the chain through the fork boundary. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(gloas_slot + 1), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + // Genesis is V17 (slot 0 < Gloas fork slot 32), these are unused for V17. + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: Some(gloas_fork_boundary_spec()), + } + } + + #[test] + fn fork_boundary_no_skip_full() { + get_gloas_fork_boundary_test_definition(false, true).run(); + } + + #[test] + fn fork_boundary_no_skip_empty() { + get_gloas_fork_boundary_test_definition(false, false).run(); + } + + #[test] + fn fork_boundary_skip_first_gloas_slot_full() { + get_gloas_fork_boundary_test_definition(true, true).run(); + } + + #[test] + fn fork_boundary_skip_first_gloas_slot_empty() { + get_gloas_fork_boundary_test_definition(true, false).run(); + } + #[test] fn chain_following() { let test = get_gloas_chain_following_test_definition(); From afb1f0ae2d476cb5c864d6d4ead3a119097b2abc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 12:48:34 +1100 Subject: [PATCH 080/127] Fix VoteTracker decoding --- .../src/proto_array_fork_choice.rs | 39 +++++++++++++++++++ consensus/proto_array/src/ssz_container.rs | 9 +++-- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 71a5a46f8c1..5269957dfff 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -33,6 +33,45 @@ pub struct VoteTracker { next_payload_present: bool, } +// Can be deleted once the V28 schema migration is buried. +#[derive(Default, PartialEq, Clone, Encode, Decode)] +pub struct VoteTrackerV28 { + current_root: Hash256, + next_root: Hash256, + current_slot: Slot, + next_slot: Slot, +} + +// This impl is only used upon upgrade from pre-Gloas to Gloas with all pre-Gloas nodes. +// The payload status is `false` for pre-Gloas nodes. +impl From for VoteTracker { + fn from(v: VoteTrackerV28) -> Self { + VoteTracker { + current_root: v.current_root, + next_root: v.next_root, + current_slot: v.current_slot, + next_slot: v.next_slot, + // TODO(gloas): check that this is correct + current_payload_present: false, + next_payload_present: false, + } + } +} + +// This impl is only used upon downgrade from V29 to V28, with exclusively pre-Gloas nodes. +impl From for VoteTrackerV28 { + fn from(v: VoteTracker) -> Self { + // Drop the payload_present, but this is safe because this is only called on pre-Gloas + // nodes. + VoteTrackerV28 { + current_root: v.current_root, + next_root: v.next_root, + current_slot: v.current_slot, + next_slot: v.next_slot, + } + } +} + pub struct LatestMessage { pub slot: Slot, pub root: Hash256, diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index f8b1f6634e9..80a67022104 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -2,7 +2,7 @@ use crate::proto_array::ProposerBoost; use crate::{ Error, JustifiedBalances, proto_array::{ProtoArray, ProtoNode, ProtoNodeV17}, - proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, + proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker, VoteTrackerV28}, }; use ssz::{Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; @@ -22,6 +22,9 @@ pub type SszContainer = SszContainerV29; no_enum )] pub struct SszContainer { + #[superstruct(only(V28))] + pub votes_v28: Vec, + #[superstruct(only(V29))] pub votes: Vec, pub prune_threshold: usize, // Deprecated, remove in a future schema migration @@ -75,7 +78,7 @@ impl TryFrom<(SszContainerV29, JustifiedBalances)> for ProtoArrayForkChoice { impl From for SszContainerV29 { fn from(v28: SszContainerV28) -> Self { Self { - votes: v28.votes, + votes: v28.votes_v28.into_iter().map(Into::into).collect(), prune_threshold: v28.prune_threshold, nodes: v28 .nodes @@ -98,7 +101,7 @@ impl From for SszContainerV29 { impl From for SszContainerV28 { fn from(v29: SszContainerV29) -> Self { Self { - votes: v29.votes, + votes_v28: v29.votes.into_iter().map(Into::into).collect(), prune_threshold: v29.prune_threshold, // These checkpoints are not consumed in v28 paths since the upgrade from v17, // we can safely default the values. From edae39cc298fc211597f3082c9e2ec81ac19961e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 13:29:48 +1100 Subject: [PATCH 081/127] Fix fork transition case --- .../gloas_payload.rs | 44 +++++++++-------- consensus/proto_array/src/proto_array.rs | 48 +++++++++---------- 2 files changed, 48 insertions(+), 44 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 9a7acbde091..5b82e1de8c8 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -757,40 +757,31 @@ mod tests { // First Gloas block (V29 node). let gloas_slot = if skip_first_gloas_slot { 33 } else { 32 }; - // For Full: execution_payload_parent_hash must match the V17 parent's EL hash. - // The V17 parent's EL hash = ExecutionBlockHash::from_root(get_root(1)) = get_hash(1). - // For Empty: use a non-matching hash. - let parent_hash = if first_gloas_block_full { - get_hash(1) - } else { - get_hash(99) - }; - + // The first Gloas block should always have the pre-Gloas block as its execution parent, + // although this is currently not checked anywhere (the spec doesn't mention this). ops.push(Operation::ProcessBlock { slot: Slot::new(gloas_slot), root: get_root(2), parent_root: get_root(1), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), - execution_payload_parent_hash: Some(parent_hash), + execution_payload_parent_hash: Some(get_hash(1)), execution_payload_block_hash: Some(get_hash(2)), }); - // Verify the parent_payload_status is correctly set. - let expected_parent_status = if first_gloas_block_full { - PayloadStatus::Full - } else { - PayloadStatus::Empty - }; + // Parent payload status of fork boundary block should always be Empty. + let expected_parent_status = PayloadStatus::Empty; ops.push(Operation::AssertParentPayloadStatus { block_root: get_root(2), expected_status: expected_parent_status, }); // Mark root 2's execution payload as received so the Full virtual child exists. - ops.push(Operation::ProcessExecutionPayload { - block_root: get_root(2), - }); + if first_gloas_block_full { + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(2), + }); + } // Extend the chain with another V29 block (Full child of root 2). ops.push(Operation::ProcessBlock { @@ -799,7 +790,11 @@ mod tests { parent_root: get_root(2), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), - execution_payload_parent_hash: Some(get_hash(2)), + execution_payload_parent_hash: if first_gloas_block_full { + Some(get_hash(2)) + } else { + Some(get_hash(1)) + }, execution_payload_block_hash: Some(get_hash(3)), }); @@ -813,6 +808,15 @@ mod tests { expected_payload_status: None, }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(3), + expected_status: if first_gloas_block_full { + PayloadStatus::Full + } else { + PayloadStatus::Empty + }, + }); + ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), justified_checkpoint: get_checkpoint(0), diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 452679d7a3a..ffe60d3a500 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -570,31 +570,31 @@ impl ProtoArray { block_root: block.root, })?; - let parent_payload_status: PayloadStatus = if let Some(parent_node) = - parent_index.and_then(|idx| self.nodes.get(idx)) - { - // Get the parent's execution block hash, handling both V17 and V29 nodes. - // V17 parents occur during the Gloas fork transition. - // TODO(gloas): the spec's `get_parent_payload_status` assumes all blocks are - // post-Gloas with bids. Revisit once the spec clarifies fork-transition behavior. - let parent_el_block_hash = match parent_node { - ProtoNode::V29(v29) => Some(v29.execution_payload_block_hash), - ProtoNode::V17(v17) => v17.execution_status.block_hash(), - }; - // Per spec's `is_parent_node_full`: if the child's EL parent hash - // matches the parent's EL block hash, the child extends the parent's - // payload chain, meaning the parent was Full. - if parent_el_block_hash.is_some_and(|hash| execution_payload_parent_hash == hash) { - PayloadStatus::Full + let parent_payload_status: PayloadStatus = + if let Some(parent_node) = parent_index.and_then(|idx| self.nodes.get(idx)) { + match parent_node { + ProtoNode::V29(v29) => { + // Both parent and child are Gloas blocks. The parent is full if the + // block hash in the parent node matches the parent block hash in the + // child bid. + if execution_payload_parent_hash == v29.execution_payload_block_hash { + PayloadStatus::Full + } else { + PayloadStatus::Empty + } + } + ProtoNode::V17(_) => { + // Parent is pre-Gloas, pre-Gloas blocks are treated as having Empty + // payload status. This case is reached during the fork transition. + PayloadStatus::Empty + } + } } else { - PayloadStatus::Empty - } - } else { - // Parent is missing (genesis or pruned due to finalization). Default to Full - // since this path should only be hit at Gloas genesis, and extending the payload - // chain is the safe default. - PayloadStatus::Full - }; + // TODO(gloas): re-assess this assumption + // Parent is missing (genesis or pruned due to finalization). Default to Full + // since this path should only be hit at Gloas genesis. + PayloadStatus::Full + }; // Per spec `get_forkchoice_store`: the anchor (genesis) block has // its payload state initialized (`payload_states = {anchor_root: ...}`). From 4e44cec249eafadc5b964499b4ad98364eb6411c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 13:45:19 +1100 Subject: [PATCH 082/127] Fix markdown lint --- .../src/fork_choice_test_definition/gloas_payload.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 5b82e1de8c8..e3f81fb3ff7 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -730,9 +730,9 @@ mod tests { /// /// Parameters: /// - `skip_first_gloas_slot`: if true, there is no block at the first Gloas slot (slot 32); - /// the first V29 block appears at slot 33. + /// the first V29 block appears at slot 33. /// - `first_gloas_block_full`: if true, the first V29 block extends the parent V17 node's - /// EL chain (Full parent payload status). If false, it doesn't (Empty). + /// EL chain (Full parent payload status). If false, it doesn't (Empty). fn get_gloas_fork_boundary_test_definition( skip_first_gloas_slot: bool, first_gloas_block_full: bool, From ab023a7231eb0aad285a987c000c0e2b21d33d9d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 14:54:18 +1100 Subject: [PATCH 083/127] Fix the VoteTrackerV28 definition --- .../proto_array/src/proto_array_fork_choice.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 5269957dfff..f6427ba8638 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -34,12 +34,12 @@ pub struct VoteTracker { } // Can be deleted once the V28 schema migration is buried. +// Matches the on-disk format from schema v28: current_root, next_root, next_epoch. #[derive(Default, PartialEq, Clone, Encode, Decode)] pub struct VoteTrackerV28 { current_root: Hash256, next_root: Hash256, - current_slot: Slot, - next_slot: Slot, + next_epoch: Epoch, } // This impl is only used upon upgrade from pre-Gloas to Gloas with all pre-Gloas nodes. @@ -49,9 +49,10 @@ impl From for VoteTracker { VoteTracker { current_root: v.current_root, next_root: v.next_root, - current_slot: v.current_slot, - next_slot: v.next_slot, - // TODO(gloas): check that this is correct + // The v28 format stored next_epoch rather than slots. Default to 0 since the + // vote tracker will be updated on the next attestation. + current_slot: Slot::new(0), + next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, } @@ -61,13 +62,14 @@ impl From for VoteTracker { // This impl is only used upon downgrade from V29 to V28, with exclusively pre-Gloas nodes. impl From for VoteTrackerV28 { fn from(v: VoteTracker) -> Self { - // Drop the payload_present, but this is safe because this is only called on pre-Gloas + // Drop the payload_present fields. This is safe because this is only called on pre-Gloas // nodes. VoteTrackerV28 { current_root: v.current_root, next_root: v.next_root, - current_slot: v.current_slot, - next_slot: v.next_slot, + // The v28 format stored next_epoch. Default to 0 since the vote tracker will be + // updated on the next attestation. + next_epoch: Epoch::new(0), } } } From 3cf19e724fd6bd8dcb70324298043407022abd99 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 15:47:10 +1100 Subject: [PATCH 084/127] Fix Gloas check in on_block --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index ffe60d3a500..256cb48c778 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -535,7 +535,7 @@ impl ProtoArray { .parent_root .and_then(|parent| self.indices.get(&parent).copied()); - let node = if !spec.fork_name_at_slot::(current_slot).gloas_enabled() { + let node = if !spec.fork_name_at_slot::(block.slot).gloas_enabled() { ProtoNode::V17(ProtoNodeV17 { slot: block.slot, root: block.root, From 871697280e59dbd71db2496c0f4428ff937256e8 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 30 Mar 2026 19:41:09 -0700 Subject: [PATCH 085/127] Add checkpoint sync --- beacon_node/beacon_chain/src/beacon_chain.rs | 22 +++++++ beacon_node/beacon_chain/src/builder.rs | 26 +++++++- beacon_node/client/src/builder.rs | 18 +++++- beacon_node/http_api/src/state_id.rs | 22 ++++++- .../network/src/sync/backfill_sync/mod.rs | 10 +++- .../src/sync/network_context/requests.rs | 4 +- beacon_node/store/src/hot_cold_store.rs | 60 +++++++++++++++++++ 7 files changed, 155 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9a305536784..047610a4a7d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -941,6 +941,28 @@ impl BeaconChain { )? } + /// Returns the Pending (pre-payload) state root at the given slot in the canonical chain. + /// + /// In ePBS (Gloas+), if the canonical state at `slot` is Full (post-payload), this resolves + /// to the same-slot Pending state root. For skipped slots or pre-Gloas, returns the canonical + /// state root unchanged. + pub fn pending_state_root_at_slot(&self, request_slot: Slot) -> Result, Error> { + let Some(root) = self.state_root_at_slot(request_slot)? else { + return Ok(None); + }; + + // Pre-Gloas: all states are inherently Pending. + if !self + .spec + .fork_name_at_slot::(request_slot) + .gloas_enabled() + { + return Ok(Some(root)); + } + + Ok(Some(self.store.resolve_pending_state_root(&root)?)) + } + /// Returns the block root at the given slot, if any. Only returns roots in the canonical chain. /// /// ## Notes diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 11b87351b19..f848b48f057 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -42,6 +42,7 @@ use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; +use types::SignedExecutionPayloadEnvelope; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, @@ -426,6 +427,7 @@ where mut weak_subj_state: BeaconState, weak_subj_block: SignedBeaconBlock, weak_subj_blobs: Option>, + weak_subj_payload: Option>, genesis_state: BeaconState, ) -> Result { let store = self @@ -601,6 +603,13 @@ where .map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?; } } + if let Some(ref envelope) = weak_subj_payload { + store + .put_payload_envelope(&weak_subj_block_root, envelope.clone()) + .map_err(|e| { + format!("Failed to store weak subjectivity payload envelope: {e:?}") + })?; + } // Stage the database's metadata fields for atomic storage when `build` is called. // This prevents the database from restarting in an inconsistent state if the anchor @@ -617,10 +626,25 @@ where .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, ); + if self + .spec + .fork_name_at_slot::(weak_subj_slot) + .gloas_enabled() + { + let envelope = weak_subj_payload.as_ref().ok_or_else(|| { + "Gloas checkpoint sync requires an execution payload envelope".to_string() + })?; + if envelope.message.beacon_block_root != weak_subj_block_root { + return Err(format!( + "Envelope beacon_block_root {:?} does not match block root {:?}", + envelope.message.beacon_block_root, weak_subj_block_root + )); + } + } // TODO(gloas): add check that checkpoint state is Pending let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, - execution_envelope: None, + execution_envelope: weak_subj_payload.map(Arc::new), beacon_block: Arc::new(weak_subj_block), beacon_state: weak_subj_state, }; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 865599b9bd2..f4c8689b1e3 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -372,6 +372,7 @@ where anchor_state, anchor_block, anchor_blobs, + None, genesis_state, )? } @@ -445,6 +446,21 @@ where None }; + let envelope = if spec + .fork_name_at_slot::(finalized_block_slot) + .gloas_enabled() + { + debug!("Downloading payload"); + remote + .get_beacon_execution_payload_envelope(BlockId::Slot(finalized_block_slot)) + .await + .map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))? + .map(|resp| resp.into_data()) + } else { + None + }; + debug!("Downloaded finalized payload"); + let genesis_state = genesis_state(&runtime_context, &config).await?; info!( @@ -454,7 +470,7 @@ where "Loaded checkpoint block and state" ); - builder.weak_subjectivity_state(state, block, blobs, genesis_state)? + builder.weak_subjectivity_state(state, block, blobs, envelope, genesis_state)? } ClientGenesis::DepositContract => { return Err("Loading genesis from deposit contract no longer supported".to_string()); diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 13fb9b2c585..9f9a01d48bc 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -43,14 +43,32 @@ impl StateId { chain.canonical_head.cached_head().finalized_checkpoint(); let (slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; - (slot, execution_optimistic, true) + let root = chain + .pending_state_root_at_slot(slot) + .map_err(warp_utils::reject::unhandled_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon state at slot {}", + slot + )) + })?; + return Ok((root, execution_optimistic, true)); } CoreStateId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); let (slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; - (slot, execution_optimistic, false) + let root = chain + .pending_state_root_at_slot(slot) + .map_err(warp_utils::reject::unhandled_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon state at slot {}", + slot + )) + })?; + return Ok((root, execution_optimistic, false)); } CoreStateId::Slot(slot) => ( *slot, diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 0f80138d240..29beb96e5a5 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -35,7 +35,7 @@ use std::marker::PhantomData; use std::sync::Arc; use strum::IntoEnumIterator; use tracing::{debug, error, info, warn}; -use types::{ColumnIndex, Epoch, EthSpec}; +use types::{ColumnIndex, Epoch, EthSpec, ForkName}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// blocks per batch are requested _at most_. A batch may request less blocks to account for @@ -218,6 +218,14 @@ impl BackFillSync { match self.state() { BackFillState::Syncing => {} // already syncing ignore. BackFillState::Paused => { + if self + .beacon_chain + .spec + .fork_name_at_epoch(self.to_be_downloaded) + >= ForkName::Gloas + { + return Ok(SyncStart::NotSyncing); + } if self .network_globals .peers diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 7ba0838ee1d..8c9e1b2b34e 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -16,10 +16,10 @@ pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +pub use payload_envelopes_by_range::PayloadEnvelopesByRangeRequestItems; pub use payload_envelopes_by_root::{ PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest, }; -pub use payload_envelopes_by_range::PayloadEnvelopesByRangeRequestItems; use crate::metrics; @@ -31,8 +31,8 @@ mod blocks_by_range; mod blocks_by_root; mod data_columns_by_range; mod data_columns_by_root; -mod payload_envelopes_by_root; mod payload_envelopes_by_range; +mod payload_envelopes_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 78dd69e55a2..9a99b56348d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1951,6 +1951,66 @@ impl, Cold: ItemStore> HotColdDB } } + /// Resolve a canonical state root to the Pending (pre-payload) state root at the same slot. + /// + /// In ePBS, checkpoint states (finalized, justified) should be returned as their Pending + /// variant. This function takes a canonical state root and: + /// + /// - If the state is already Pending (or pre-Gloas), returns it unchanged. + /// - If the state is Full due to a payload applied at this slot, returns the same-slot + /// Pending state root via `previous_state_root`. + /// - If the state is at a skipped slot (inheriting Full status from a prior slot), returns + /// it unchanged — there is no distinct Pending state at a skipped slot. + pub fn resolve_pending_state_root(&self, state_root: &Hash256) -> Result { + // Fast path: split state is always Pending. + let split = self.get_split_info(); + if *state_root == split.state_root { + return Ok(split.state_root); + } + + // Try hot DB first. + if let Some(summary) = self.load_hot_state_summary(state_root)? { + // Pre-Gloas states are always Pending. + if !self + .spec + .fork_name_at_slot::(summary.slot) + .gloas_enabled() + { + return Ok(*state_root); + } + + // Genesis state is always Pending. + if summary.previous_state_root.is_zero() { + return Ok(*state_root); + } + + // Load the previous state summary. If it has the same slot, the current state is + // Full (post-payload) and the previous state is Pending (post-block). Return the + // Pending state root. + let previous_summary = self + .load_hot_state_summary(&summary.previous_state_root)? + .ok_or(Error::MissingHotStateSummary(summary.previous_state_root))?; + + if previous_summary.slot == summary.slot { + // This is a Full state at a non-skipped slot. Return the Pending state root. + return Ok(summary.previous_state_root); + } + + // Either already Pending (block at this slot) or a skipped slot — return as-is. + return Ok(*state_root); + } + + // Try cold DB. + if let Some(_slot) = self.load_cold_state_slot(state_root)? { + // Cold DB states: the non-canonical payload variant is pruned during migration. + // Return whatever is stored. In practice, finalized/justified states are almost + // always in the hot DB or at the split point. + return Ok(*state_root); + } + + Err(Error::MissingHotStateSummary(*state_root)) + } + fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache From 08c5ec44d8aff8a1fbdd45d08034ebbf07e408e4 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 31 Mar 2026 22:39:22 -0700 Subject: [PATCH 086/127] Fix fork choice bug --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 933e9eb078b..d806547cc0a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -535,7 +535,7 @@ impl ProtoArray { .parent_root .and_then(|parent| self.indices.get(&parent).copied()); - let node = if !spec.fork_name_at_slot::(current_slot).gloas_enabled() { + let node = if !spec.fork_name_at_slot::(block.slot).gloas_enabled() { ProtoNode::V17(ProtoNodeV17 { slot: block.slot, root: block.root, From ddff03d26f269c773e3774e0eaf1b327bd254e8b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 16:42:49 +1100 Subject: [PATCH 087/127] Store parent_payload_hash in ProtoNode --- consensus/fork_choice/src/fork_choice.rs | 1 + consensus/proto_array/src/proto_array.rs | 4 +++- consensus/proto_array/src/proto_array_fork_choice.rs | 5 +++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 1f5b2cf1b08..7e189b0a1b3 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1138,6 +1138,7 @@ where } // index == 1 (payload_present) requires the block's payload to have been received. + // TODO(gloas): could optimise by adding `payload_received` to `Block` if index == 1 && !self .proto_array diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 256cb48c778..412e217bf85 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -143,6 +143,8 @@ pub struct ProtoNode { pub full_payload_weight: u64, #[superstruct(only(V29), partial_getter(copy))] pub execution_payload_block_hash: ExecutionBlockHash, + #[superstruct(only(V29), partial_getter(copy))] + pub execution_payload_parent_hash: ExecutionBlockHash, /// Equivalent to spec's `block_timeliness[root][ATTESTATION_TIMELINESS_INDEX]`. #[superstruct(only(V29), partial_getter(copy))] pub block_timeliness_attestation_threshold: bool, @@ -181,7 +183,6 @@ pub struct ProtoNode { impl ProtoNode { /// Generic version of spec's `parent_payload_status` that works for pre-Gloas nodes by /// considering their parents Empty. - /// Pre-Gloas nodes have no ePBS, default to Empty. pub fn get_parent_payload_status(&self) -> PayloadStatus { self.parent_payload_status().unwrap_or(PayloadStatus::Empty) } @@ -620,6 +621,7 @@ impl ProtoArray { empty_payload_weight: 0, full_payload_weight: 0, execution_payload_block_hash, + execution_payload_parent_hash, // Per spec `get_forkchoice_store`: the anchor block's PTC votes are // initialized to all-True, ensuring `is_payload_timely` and // `is_payload_data_available` return true for the anchor. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index f6427ba8638..72440b83b8a 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1030,7 +1030,7 @@ impl ProtoArrayForkChoice { .unwrap_or_else(|_| ExecutionStatus::irrelevant()), unrealized_justified_checkpoint: block.unrealized_justified_checkpoint(), unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint(), - execution_payload_parent_hash: None, + execution_payload_parent_hash: block.execution_payload_parent_hash().ok(), execution_payload_block_hash: block.execution_payload_block_hash().ok(), proposer_index: block.proposer_index().ok(), }) @@ -1047,7 +1047,8 @@ impl ProtoArrayForkChoice { } /// Returns whether the execution payload for a block has been received. - /// Returns `false` for pre-GLOAS (V17) nodes or unknown blocks. + /// + /// Returns `false` for pre-Gloas (V17) nodes or unknown blocks. pub fn is_payload_received(&self, block_root: &Hash256) -> bool { self.get_proto_node(block_root) .and_then(|node| node.payload_received().ok()) From f5b2445d09d62c5ff8258ecabe4a8dab2655e7b0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 16:46:02 +1100 Subject: [PATCH 088/127] Remove stupid GLOAS comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++-- beacon_node/http_api/src/validator/mod.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 10 +++++----- consensus/fork_choice/tests/tests.rs | 4 ++-- .../src/fork_choice_test_definition/gloas_payload.rs | 8 ++++---- consensus/proto_array/src/proto_array.rs | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3c8ea307791..f05b972679d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4855,8 +4855,8 @@ impl BeaconChain { return Err(Box::new(DoNotReOrg::NotProposing.into())); } - // TODO(gloas): reorg weight logic needs updating for GLOAS. For now use - // total weight which is correct for pre-GLOAS and conservative for post-GLOAS. + // TODO(gloas): reorg weight logic needs updating for Gloas. For now use + // total weight which is correct for pre-Gloas and conservative for post-Gloas. let head_weight = info.head_node.weight(); let parent_weight = info.parent_node.weight(); diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 3d96b858702..412851233ed 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -671,7 +671,7 @@ pub fn post_validator_prepare_beacon_proposer( .await; // TODO(gloas): verify this is correct. We skip proposer preparation for - // GLOAS because the execution payload is no longer embedded in the beacon + // Gloas because the execution payload is no longer embedded in the beacon // block (it's in the payload envelope), so the head block's // execution_payload() is unavailable. let next_slot = current_slot + 1; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7e189b0a1b3..cedd42cf015 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -170,11 +170,11 @@ pub enum InvalidAttestation { /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). AttestsToFutureBlock { block: Slot, attestation: Slot }, - /// Post-GLOAS: attestation index must be 0 or 1. + /// Post-Gloas: attestation index must be 0 or 1. InvalidAttestationIndex { index: u64 }, - /// A same-slot attestation has a non-zero index, which is invalid post-GLOAS. + /// A same-slot attestation has a non-zero index, which is invalid post-Gloas. InvalidSameSlotAttestationIndex { slot: Slot }, - /// Post-GLOAS: attestation with index == 1 (payload_present) requires the block's + /// Post-Gloas: attestation with index == 1 (payload_present) requires the block's /// payload to have been received (`root in store.payload_states`). PayloadNotReceived { beacon_block_root: Hash256 }, /// A payload attestation votes payload_present for a block in the current slot, which is @@ -256,7 +256,7 @@ pub struct QueuedAttestation { attesting_indices: Vec, block_root: Hash256, target_epoch: Epoch, - /// Per GLOAS spec: `payload_present = attestation.data.index == 1`. + /// Per Gloas spec: `payload_present = attestation.data.index == 1`. payload_present: bool, } @@ -1125,7 +1125,7 @@ where { let index = indexed_attestation.data().index; - // Post-GLOAS: attestation index must be 0 or 1. + // Post-Gloas: attestation index must be 0 or 1. if index > 1 { return Err(InvalidAttestation::InvalidAttestationIndex { index }); } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 839d0f4c5c3..241e25d3e26 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -73,9 +73,9 @@ impl ForkChoiceTest { Self { harness } } - /// Creates a new tester with the GLOAS fork active at epoch 1. + /// Creates a new tester with the Gloas fork active at epoch 1. /// Genesis is a standard Fulu block (epoch 0), so block production works normally. - /// Tests that need GLOAS semantics should advance the chain into epoch 1 first. + /// Tests that need Gloas semantics should advance the chain into epoch 1 first. /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index e3f81fb3ff7..18d7a40b828 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -52,7 +52,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); @@ -262,7 +262,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); @@ -367,7 +367,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); @@ -537,7 +537,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 412e217bf85..361e4a86e2a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -642,7 +642,7 @@ impl ProtoArray { block_timeliness_attestation_threshold: is_genesis || (is_current_slot && time_into_slot < spec.get_unaggregated_attestation_due()), - // TODO(gloas): use GLOAS-specific PTC due threshold once + // TODO(gloas): use Gloas-specific PTC due threshold once // `get_payload_attestation_due_ms` is on ChainSpec. block_timeliness_ptc_threshold: is_genesis || (is_current_slot && time_into_slot < spec.get_slot_duration() / 2), From 39f07106afcb05be8d780c6fda848060d20454e7 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 23:53:06 -0700 Subject: [PATCH 089/127] revert --- consensus/proto_array/src/proto_array_fork_choice.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cb467f2531e..6c90af13028 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -997,11 +997,7 @@ impl ProtoArrayForkChoice { /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; - Some( - block - .execution_status() - .unwrap_or_else(|_| ExecutionStatus::irrelevant()), - ) + block.execution_status().ok() } /// Returns whether the execution payload for a block has been received. From f6baea4b08e095a869fc9a3e0f70b6650d784909 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 23:55:42 -0700 Subject: [PATCH 090/127] revert --- consensus/proto_array/src/proto_array.rs | 26 ++++-------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index d3abf53b641..53b963b9747 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1432,29 +1432,11 @@ impl ProtoArray { .get(node.proto_node_index) .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; - // V17 (pre-GLOAS) nodes don't have payload_received or parent_payload_status. - // Skip the virtual Empty/Full split and return real children directly. - if proto_node.as_v17().is_ok() { - let child_indices = children_index - .get(node.proto_node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); - return Ok(child_indices - .iter() - .filter_map(|&child_index| { - let child_node = self.nodes.get(child_index)?; - Some(( - IndexedForkChoiceNode { - root: child_node.root(), - proto_node_index: child_index, - payload_status: PayloadStatus::Pending, - }, - child_node.clone(), - )) - }) - .collect()); + let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; + // The FULL virtual child only exists if the payload has been received. + if proto_node.payload_received().is_ok_and(|received| received) { + children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); } - // TODO(gloas) this is the actual change we want to keep once PTC is implemented // let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; // // The FULL virtual child only exists if the payload has been received. From 5aae563d84f3d7ae67bc63de5f230024a78cdaa4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 17:25:50 +1100 Subject: [PATCH 091/127] Remove proposer boost weight during upgrade --- .../src/schema_change/migration_schema_v29.rs | 47 ++++++++++++++++++- .../tests/payload_invalidation.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 6 +-- consensus/proto_array/src/proto_array.rs | 5 -- .../src/proto_array_fork_choice.rs | 31 ++---------- consensus/proto_array/src/ssz_container.rs | 7 ++- 6 files changed, 54 insertions(+), 44 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs index 3069200fce5..77d4be3443e 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs @@ -1,7 +1,9 @@ use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{PersistedForkChoiceV28, PersistedForkChoiceV29}; +use std::collections::HashMap; use store::hot_cold_store::HotColdDB; use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; +use tracing::warn; use types::EthSpec; /// Upgrade from schema v28 to v29. @@ -49,8 +51,49 @@ pub fn upgrade_to_v29( } } - // Convert to v29 and encode. - let persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); + // Read the previous proposer boost before converting to V29 (V29 no longer stores it). + let previous_proposer_boost = persisted_v28 + .fork_choice_v28 + .proto_array_v28 + .previous_proposer_boost; + + // Convert to v29. + let mut persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); + + // Subtract the proposer boost from the boosted node and all its ancestors. + // + // In the V28 schema, `apply_score_changes` baked the proposer boost directly into node + // weights and back-propagated it up the parent chain. In V29, the boost is computed + // on-the-fly during the virtual tree walk. If we don't subtract the baked-in boost here, + // it will be double-counted after the upgrade. + if !previous_proposer_boost.root.is_zero() && previous_proposer_boost.score > 0 { + let score = previous_proposer_boost.score; + let indices: HashMap<_, _> = persisted_v29 + .fork_choice + .proto_array + .indices + .iter() + .cloned() + .collect(); + + if let Some(node_index) = indices.get(&previous_proposer_boost.root).copied() { + let nodes = &mut persisted_v29.fork_choice.proto_array.nodes; + let mut current = Some(node_index); + while let Some(idx) = current { + if let Some(node) = nodes.get_mut(idx) { + *node.weight_mut() = node.weight().saturating_sub(score); + current = node.parent(); + } else { + break; + } + } + } else { + warn!( + root = ?previous_proposer_boost.root, + "Proposer boost node missing from fork choice" + ); + } + } Ok(vec![ persisted_v29.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 13672bbb63e..947024e8c20 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1438,7 +1438,7 @@ async fn weights_after_resetting_optimistic_status() { .canonical_head .fork_choice_write_lock() .proto_array_mut() - .set_all_blocks_to_optimistic::(&rig.harness.chain.spec) + .set_all_blocks_to_optimistic::() .unwrap(); let new_weights = rig diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index cedd42cf015..c08e76020b2 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1614,7 +1614,6 @@ where persisted_proto_array: proto_array::core::SszContainer, justified_balances: JustifiedBalances, reset_payload_statuses: ResetPayloadStatuses, - spec: &ChainSpec, ) -> Result> { let mut proto_array = ProtoArrayForkChoice::from_container( persisted_proto_array.clone(), @@ -1639,7 +1638,7 @@ where // Reset all blocks back to being "optimistic". This helps recover from an EL consensus // fault where an invalid payload becomes valid. - if let Err(e) = proto_array.set_all_blocks_to_optimistic::(spec) { + if let Err(e) = proto_array.set_all_blocks_to_optimistic::() { // If there is an error resetting the optimistic status then log loudly and revert // back to a proto-array which does not have the reset applied. This indicates a // significant error in Lighthouse and warrants detailed investigation. @@ -1669,7 +1668,6 @@ where persisted.proto_array, justified_balances, reset_payload_statuses, - spec, )?; let current_slot = fc_store.get_current_slot(); @@ -1703,7 +1701,7 @@ where // get a different result. fork_choice .proto_array - .set_all_blocks_to_optimistic::(spec)?; + .set_all_blocks_to_optimistic::()?; // If the second attempt at finding a head fails, return an error since we do not // expect this scenario. let _ = fork_choice.get_head(current_slot, spec)?; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 361e4a86e2a..f2a6f6d0dca 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -369,7 +369,6 @@ pub struct ProtoArray { pub prune_threshold: usize, pub nodes: Vec, pub indices: HashMap, - pub previous_proposer_boost: ProposerBoost, } impl ProtoArray { @@ -502,10 +501,6 @@ impl ProtoArray { } } - // Proposer boost is now applied on-the-fly in `get_weight` during the - // walk, so clear any stale boost from a prior call. - self.previous_proposer_boost = ProposerBoost::default(); - Ok(()) } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 72440b83b8a..634a78823d8 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -2,8 +2,7 @@ use crate::{ JustifiedBalances, error::Error, proto_array::{ - InvalidationOperation, Iter, NodeDelta, ProposerBoost, ProtoArray, ProtoNode, - calculate_committee_fraction, + InvalidationOperation, Iter, NodeDelta, ProtoArray, ProtoNode, calculate_committee_fraction, }, ssz_container::SszContainer, }; @@ -527,7 +526,6 @@ impl ProtoArrayForkChoice { prune_threshold: DEFAULT_PRUNE_THRESHOLD, nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), - previous_proposer_boost: ProposerBoost::default(), }; let block = Block { @@ -880,10 +878,7 @@ impl ProtoArrayForkChoice { /// status to be optimistic. /// /// In practice this means forgetting any `VALID` or `INVALID` statuses. - pub fn set_all_blocks_to_optimistic( - &mut self, - spec: &ChainSpec, - ) -> Result<(), String> { + pub fn set_all_blocks_to_optimistic(&mut self) -> Result<(), String> { // Iterate backwards through all nodes in the `proto_array`. Whilst it's not strictly // required to do this process in reverse, it seems natural when we consider how LMD votes // are counted. @@ -906,7 +901,7 @@ impl ProtoArrayForkChoice { // Restore the weight of the node, it would have been set to `0` in // `apply_score_changes` when it was invalidated. - let mut restored_weight: u64 = self + let restored_weight: u64 = self .votes .0 .iter() @@ -922,26 +917,6 @@ impl ProtoArrayForkChoice { }) .sum(); - // If the invalid root was boosted, apply the weight to it and - // ancestors. - if let Some(proposer_score_boost) = spec.proposer_score_boost - && self.proto_array.previous_proposer_boost.root == node.root() - { - // Compute the score based upon the current balances. We can't rely on - // the `previous_proposr_boost.score` since it is set to zero with an - // invalid node. - let proposer_score = - calculate_committee_fraction::(&self.balances, proposer_score_boost) - .ok_or("Failed to compute proposer boost")?; - // Store the score we've applied here so it can be removed in - // a later call to `apply_score_changes`. - self.proto_array.previous_proposer_boost.score = proposer_score; - // Apply this boost to this node. - restored_weight = restored_weight - .checked_add(proposer_score) - .ok_or("Overflow when adding boost to weight")?; - } - // Add the restored weight to the node and all ancestors. if restored_weight > 0 { let mut node_or_ancestor = node; diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 80a67022104..69efb35027c 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -38,6 +38,7 @@ pub struct SszContainer { #[superstruct(only(V29))] pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, + #[superstruct(only(V28))] pub previous_proposer_boost: ProposerBoost, } @@ -50,7 +51,6 @@ impl SszContainerV29 { prune_threshold: proto_array.prune_threshold, nodes: proto_array.nodes.clone(), indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), - previous_proposer_boost: proto_array.previous_proposer_boost, } } } @@ -63,7 +63,6 @@ impl TryFrom<(SszContainerV29, JustifiedBalances)> for ProtoArrayForkChoice { prune_threshold: from.prune_threshold, nodes: from.nodes, indices: from.indices.into_iter().collect::>(), - previous_proposer_boost: from.previous_proposer_boost, }; Ok(Self { @@ -92,7 +91,6 @@ impl From for SszContainerV29 { }) .collect(), indices: v28.indices, - previous_proposer_boost: v28.previous_proposer_boost, } } } @@ -116,7 +114,8 @@ impl From for SszContainerV28 { }) .collect(), indices: v29.indices, - previous_proposer_boost: v29.previous_proposer_boost, + // Proposer boost is not tracked in V29 (computed on-the-fly), so reset it. + previous_proposer_boost: ProposerBoost::default(), } } } From 12e6595b324fb419d2718fd51371b61b6b41630f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Wed, 1 Apr 2026 00:02:11 -0700 Subject: [PATCH 092/127] revcert --- consensus/proto_array/src/proto_array.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 53b963b9747..95e19e21192 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1431,7 +1431,6 @@ impl ProtoArray { .nodes .get(node.proto_node_index) .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; - let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; // The FULL virtual child only exists if the payload has been received. if proto_node.payload_received().is_ok_and(|received| received) { @@ -1464,7 +1463,6 @@ impl ProtoArray { .iter() .filter_map(|&child_index| { let child_node = self.nodes.get(child_index)?; - // Skip parent_payload_status filter for V17 children (they don't have it) if child_node.get_parent_payload_status() != node.payload_status { return None; } From d978e3dabe56be6c56ea7765a15f4ce7f844f864 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Wed, 1 Apr 2026 00:35:14 -0700 Subject: [PATCH 093/127] revbert --- consensus/proto_array/src/proto_array.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 95e19e21192..e9d5e02db58 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1446,11 +1446,7 @@ impl ProtoArray { // TODO(gloas) remove this and uncomment the code above once we implement PTC // Skip Empty/Full split: go straight to Full when payload received, // giving full payload weight 100% without PTC votes. - let children = if proto_node.payload_received().is_ok_and(|received| received) { - vec![(node.with_status(PayloadStatus::Full), proto_node.clone())] - } else { - vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())] - }; + // TODO(gloas) delete up to here Ok(children) From a5bdd0c5979f68be615aeb3d610ffeb5d5ce25fc Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Wed, 1 Apr 2026 00:59:00 -0700 Subject: [PATCH 094/127] Smol revert --- consensus/proto_array/src/proto_array.rs | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index e9d5e02db58..361e4a86e2a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1436,19 +1436,6 @@ impl ProtoArray { if proto_node.payload_received().is_ok_and(|received| received) { children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); } - // TODO(gloas) this is the actual change we want to keep once PTC is implemented - // let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; - // // The FULL virtual child only exists if the payload has been received. - // if proto_node.payload_received().is_ok_and(|received| received) { - // children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); - // } - - // TODO(gloas) remove this and uncomment the code above once we implement PTC - // Skip Empty/Full split: go straight to Full when payload received, - // giving full payload weight 100% without PTC votes. - - // TODO(gloas) delete up to here - Ok(children) } else { let child_indices = children_index From 5f8605f67e9341112db44ebe27e39b099e1e8543 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 10:40:23 +1100 Subject: [PATCH 095/127] Disable optimistic sync for Gloas --- .../src/payload_envelope_verification/import.rs | 10 ++++++++++ .../src/payload_envelope_verification/mod.rs | 2 ++ 2 files changed, 12 insertions(+) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index ed121ccb94a..6efabcdfa84 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -167,6 +167,16 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; + // TODO(gloas): optimistic sync is not supported for Gloas, maybe we could re-add it + if payload_verification_outcome + .payload_verification_status + .is_optimistic() + { + return Err(EnvelopeError::OptimisticSyncNotSupported { + block_root: import_data.block_root, + }); + } + Ok(ExecutedEnvelope::new( signed_envelope, import_data, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index c707d62dc7b..225d5a98924 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -182,6 +182,8 @@ pub enum EnvelopeError { payload_slot: Slot, latest_finalized_slot: Slot, }, + /// Optimistic sync is not supported for Gloas payload envelopes. + OptimisticSyncNotSupported { block_root: Hash256 }, /// Some Beacon Chain Error BeaconChainError(Arc), /// Some Beacon State error From 1c5a7bed7402481baddafe39c9a9c928e54ce7f1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 10:56:34 +1100 Subject: [PATCH 096/127] Clarify name of `on_valid_payload_envelope_received` --- .../payload_envelope_verification/import.rs | 5 ++-- consensus/fork_choice/src/fork_choice.rs | 22 ++++++++++----- .../src/fork_choice_test_definition.rs | 2 +- consensus/proto_array/src/proto_array.rs | 27 +++++++------------ .../src/proto_array_fork_choice.rs | 11 ++++++-- testing/ef_tests/src/cases/fork_choice.rs | 2 +- 6 files changed, 40 insertions(+), 29 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 6efabcdfa84..4a0a188e5e8 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -253,9 +253,10 @@ impl BeaconChain { // avoiding taking other locks whilst holding this lock. let mut fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); - // Update the node's payload_status from PENDING to FULL in fork choice. + // Update the block's payload to received in fork choice, which creates the `Full` virtual + // node which can be eligible for head. fork_choice - .on_execution_payload(block_root) + .on_valid_payload_envelope_received(block_root) .map_err(|e| EnvelopeError::InternalError(format!("{e:?}")))?; // TODO(gloas) emit SSE event if the payload became the new head payload diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c08e76020b2..7f5ef512174 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -654,6 +654,20 @@ where } } + /// Mark a Gloas payload envelope as valid and received. + /// + /// This must only be called for valid Gloas payloads. + pub fn on_valid_payload_envelope_received( + &mut self, + block_root: Hash256, + ) -> Result<(), Error> { + self.proto_array + .on_valid_payload_envelope_received(block_root) + .map_err(Error::FailedToProcessValidExecutionPayload) + } + + /// Pre-Gloas only. + /// /// See `ProtoArrayForkChoice::process_execution_payload_validation` for documentation. pub fn on_valid_execution_payload( &mut self, @@ -664,6 +678,8 @@ where .map_err(Error::FailedToProcessValidExecutionPayload) } + /// Pre-Gloas only. + /// /// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation. pub fn on_invalid_execution_payload( &mut self, @@ -977,12 +993,6 @@ where Ok(()) } - pub fn on_execution_payload(&mut self, block_root: Hash256) -> Result<(), Error> { - self.proto_array - .on_execution_payload(block_root) - .map_err(Error::FailedToProcessValidExecutionPayload) - } - /// Update checkpoints in store if necessary fn update_checkpoints( &mut self, diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index ff9d70bad58..1901091dd66 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -502,7 +502,7 @@ impl ForkChoiceTestDefinition { } Operation::ProcessExecutionPayload { block_root } => { fork_choice - .on_execution_payload(block_root) + .on_valid_payload_envelope_received(block_root) .unwrap_or_else(|e| { panic!( "on_execution_payload op at index {} returned error: {}", diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f2a6f6d0dca..6695079cd2c 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -768,12 +768,10 @@ impl ProtoArray { Ok(!has_equivocation) } - /// Process an execution payload for a Gloas block. + /// Process a valid execution payload envelope for a Gloas block. /// - /// Sets `payload_received` to true, which makes `is_payload_timely` and - /// `is_payload_data_available` return true regardless of PTC votes. - /// This maps to `store.payload_states[root] = state` in the spec. - pub fn on_valid_execution_payload(&mut self, block_root: Hash256) -> Result<(), Error> { + /// Sets `payload_received` to true. + pub fn on_valid_payload_envelope_received(&mut self, block_root: Hash256) -> Result<(), Error> { let index = *self .indices .get(&block_root) @@ -809,6 +807,8 @@ impl ProtoArray { /// Updates the `verified_node_index` and all ancestors to have validated execution payloads. /// + /// This function is a no-op if called for a Gloas block. + /// /// Returns an error if: /// /// - The `verified_node_index` is unknown. @@ -852,18 +852,10 @@ impl ProtoArray { }); } }, - // Gloas nodes don't carry `ExecutionStatus`. Mark the validated - // block as payload-received so that `is_payload_timely` / - // `is_payload_data_available` and `index == 1` attestations work. - ProtoNode::V29(node) => { - if index == verified_node_index { - node.payload_received = true; - } - if let Some(parent_index) = node.parent { - parent_index - } else { - return Ok(()); - } + // Gloas nodes should not be marked valid by this function, which exists only + // for pre-Gloas fork choice. + ProtoNode::V29(_) => { + return Ok(()); } }; @@ -874,6 +866,7 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. + // TODO(gloas): this needs some tests for the mixed Gloas/pre-Gloas case. pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 634a78823d8..842cdfaa334 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -567,11 +567,18 @@ impl ProtoArrayForkChoice { }) } - pub fn on_execution_payload(&mut self, block_root: Hash256) -> Result<(), String> { + /// Mark a Gloas payload envelope as valid and received. + /// + /// This must only be called for valid Gloas payloads. + pub fn on_valid_payload_envelope_received( + &mut self, + block_root: Hash256, + ) -> Result<(), String> { self.proto_array - .on_valid_execution_payload(block_root) + .on_valid_payload_envelope_received(block_root) .map_err(|e| format!("Failed to process execution payload: {:?}", e)) } + /// See `ProtoArray::propagate_execution_payload_validation` for documentation. pub fn process_execution_payload_validation( &mut self, diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 22e8453e14a..06f204ab014 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -1018,7 +1018,7 @@ impl Tester { .chain .canonical_head .fork_choice_write_lock() - .on_execution_payload(block_root); + .on_valid_payload_envelope_received(block_root); if valid { result.map_err(|e| { From 958c8cad3939f02bda2c6667bdb3f5835ad61db6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:05:33 +1100 Subject: [PATCH 097/127] Rename fork choice test def for clarity --- .../proto_array/src/fork_choice_test_definition.rs | 4 ++-- .../fork_choice_test_definition/gloas_payload.rs | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 1901091dd66..c9764d3e44d 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -98,7 +98,7 @@ pub enum Operation { }, /// Simulate receiving and validating an execution payload for `block_root`. /// Sets `payload_received = true` on the V29 node via the live validation path. - ProcessExecutionPayload { + ProcessExecutionPayloadEnvelope { block_root: Hash256, }, AssertPayloadReceived { @@ -500,7 +500,7 @@ impl ForkChoiceTestDefinition { // the payload to be in payload_states (payload_received). node_v29.payload_received = is_timely || is_data_available; } - Operation::ProcessExecutionPayload { block_root } => { + Operation::ProcessExecutionPayloadEnvelope { block_root } => { fork_choice .on_valid_payload_envelope_received(block_root) .unwrap_or_else(|e| { diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 18d7a40b828..ea377807951 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -53,7 +53,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -263,7 +263,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -368,7 +368,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -538,7 +538,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -674,8 +674,8 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe expected_payload_status: None, }); - // ProcessExecutionPayload on genesis is a no-op (already received at init). - ops.push(Operation::ProcessExecutionPayload { + // ProcessExecutionPayloadEnvelope on genesis is a no-op (already received at init). + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(0), }); @@ -778,7 +778,7 @@ mod tests { // Mark root 2's execution payload as received so the Full virtual child exists. if first_gloas_block_full { - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(2), }); } From 69d725097183a740d1a2cc81b42bb1da7d1b1cab Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:27:55 +1100 Subject: [PATCH 098/127] Tidy up payload attestation verification --- consensus/fork_choice/src/fork_choice.rs | 27 ++++++++++++++++--- .../indexed_payload_attestation.rs | 7 ----- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7f5ef512174..dd68497e232 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -185,6 +185,11 @@ pub enum InvalidAttestation { attestation_slot: Slot, current_slot: Slot, }, + /// One or more payload attesters are not part of the PTC. + PayloadAttestationAttestersNotInPtc { + attesting_indices_len: usize, + attesting_indices_in_ptc: usize, + }, } impl From for Error { @@ -1169,10 +1174,14 @@ where indexed_payload_attestation: &IndexedPayloadAttestation, is_from_block: AttestationFromBlock, ) -> Result<(), InvalidAttestation> { + // This check is from `is_valid_indexed_payload_attestation`, but we do it immediately to + // avoid wasting time on junk attestations. if indexed_payload_attestation.attesting_indices.is_empty() { return Err(InvalidAttestation::EmptyAggregationBitfield); } + // PTC attestation must be for a known block. If block is unknown, delay consideration until + // the block is found (responsibility of caller). let block = self .proto_array .get_block(&indexed_payload_attestation.data.beacon_block_root) @@ -1180,6 +1189,8 @@ where beacon_block_root: indexed_payload_attestation.data.beacon_block_root, })?; + // Not strictly part of the spec, but payload attestations to future slots are MORE INVALID + // than payload attestations to blocks at previous slots. if block.slot > indexed_payload_attestation.data.slot { return Err(InvalidAttestation::AttestsToFutureBlock { block: block.slot, @@ -1187,13 +1198,13 @@ where }); } - // Spec: `if data.slot != state.slot: return` — PTC votes can only - // change the vote for their assigned beacon block. + // PTC votes can only change the vote for their assigned beacon block, return early otherwise if block.slot != indexed_payload_attestation.data.slot { return Ok(()); } // Gossip payload attestations must be for the current slot. + // NOTE: signature is assumed to have been verified by caller. // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md if matches!(is_from_block, AttestationFromBlock::False) && indexed_payload_attestation.data.slot != self.fc_store.get_current_slot() @@ -1318,10 +1329,20 @@ where // Resolve validator indices to PTC committee positions. let ptc_indices: Vec = attestation - .attesting_indices_iter() + .attesting_indices + .iter() .filter_map(|vi| ptc.iter().position(|&p| p == *vi as usize)) .collect(); + // Check that all the attesters are in the PTC + if ptc_indices.len() != attestation.attesting_indices.len() { + return Err(InvalidAttestation::PayloadAttestationAttestersNotInPtc { + attesting_indices_len: attestation.attesting_indices.len(), + attesting_indices_in_ptc: ptc_indices.len(), + } + .into()); + } + for &ptc_index in &ptc_indices { self.proto_array.process_payload_attestation( attestation.data.beacon_block_root, diff --git a/consensus/types/src/attestation/indexed_payload_attestation.rs b/consensus/types/src/attestation/indexed_payload_attestation.rs index 4de805570cf..bb2087e3301 100644 --- a/consensus/types/src/attestation/indexed_payload_attestation.rs +++ b/consensus/types/src/attestation/indexed_payload_attestation.rs @@ -2,7 +2,6 @@ use crate::test_utils::TestRandom; use crate::{EthSpec, ForkName, PayloadAttestationData}; use bls::AggregateSignature; use context_deserialize::context_deserialize; -use core::slice::Iter; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -21,12 +20,6 @@ pub struct IndexedPayloadAttestation { pub signature: AggregateSignature, } -impl IndexedPayloadAttestation { - pub fn attesting_indices_iter(&self) -> Iter<'_, u64> { - self.attesting_indices.iter() - } -} - #[cfg(test)] mod tests { use super::*; From bc7864b076ebe87c52a9ad33b669a188ec889a58 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:44:02 +1100 Subject: [PATCH 099/127] Split out InvalidPayloadAttestation error --- .../beacon_chain/src/block_verification.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 57 +++++++++++++------ consensus/fork_choice/src/lib.rs | 5 +- consensus/fork_choice/tests/tests.rs | 6 +- 4 files changed, 47 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 9bb519373ac..1ce1137f1ea 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1698,7 +1698,7 @@ impl ExecutionPendingBlock { indexed_payload_attestation, AttestationFromBlock::True, &ptc.0, - ) && !matches!(e, ForkChoiceError::InvalidAttestation(_)) + ) && !matches!(e, ForkChoiceError::InvalidPayloadAttestation(_)) { return Err(BlockError::BeaconChainError(Box::new(e.into()))); } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index dd68497e232..2dbefc763f2 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -26,6 +26,7 @@ use types::{ #[derive(Debug)] pub enum Error { InvalidAttestation(InvalidAttestation), + InvalidPayloadAttestation(InvalidPayloadAttestation), InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), ProtoArrayStringError(String), @@ -85,6 +86,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: InvalidPayloadAttestation) -> Self { + Error::InvalidPayloadAttestation(e) + } +} + impl From for Error { fn from(e: AttesterSlashingValidationError) -> Self { Error::InvalidAttesterSlashing(e) @@ -177,14 +184,24 @@ pub enum InvalidAttestation { /// Post-Gloas: attestation with index == 1 (payload_present) requires the block's /// payload to have been received (`root in store.payload_states`). PayloadNotReceived { beacon_block_root: Hash256 }, - /// A payload attestation votes payload_present for a block in the current slot, which is - /// invalid because the payload cannot be known yet. - PayloadPresentDuringSameSlot { slot: Slot }, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum InvalidPayloadAttestation { + /// The payload attestation's attesting indices were empty. + EmptyAggregationBitfield, + /// The `payload_attestation.data.beacon_block_root` block is unknown. + UnknownHeadBlock { beacon_block_root: Hash256 }, + /// The payload attestation is attesting to a block that is later than itself. + AttestsToFutureBlock { block: Slot, attestation: Slot }, /// A gossip payload attestation must be for the current slot. PayloadAttestationNotCurrentSlot { attestation_slot: Slot, current_slot: Slot, }, + /// A payload attestation votes payload_present for a block in the current slot, which is + /// invalid because the payload cannot be known yet. + PayloadPresentDuringSameSlot { slot: Slot }, /// One or more payload attesters are not part of the PTC. PayloadAttestationAttestersNotInPtc { attesting_indices_len: usize, @@ -1173,11 +1190,11 @@ where &self, indexed_payload_attestation: &IndexedPayloadAttestation, is_from_block: AttestationFromBlock, - ) -> Result<(), InvalidAttestation> { + ) -> Result<(), InvalidPayloadAttestation> { // This check is from `is_valid_indexed_payload_attestation`, but we do it immediately to // avoid wasting time on junk attestations. if indexed_payload_attestation.attesting_indices.is_empty() { - return Err(InvalidAttestation::EmptyAggregationBitfield); + return Err(InvalidPayloadAttestation::EmptyAggregationBitfield); } // PTC attestation must be for a known block. If block is unknown, delay consideration until @@ -1185,14 +1202,14 @@ where let block = self .proto_array .get_block(&indexed_payload_attestation.data.beacon_block_root) - .ok_or(InvalidAttestation::UnknownHeadBlock { + .ok_or(InvalidPayloadAttestation::UnknownHeadBlock { beacon_block_root: indexed_payload_attestation.data.beacon_block_root, })?; // Not strictly part of the spec, but payload attestations to future slots are MORE INVALID // than payload attestations to blocks at previous slots. if block.slot > indexed_payload_attestation.data.slot { - return Err(InvalidAttestation::AttestsToFutureBlock { + return Err(InvalidPayloadAttestation::AttestsToFutureBlock { block: block.slot, attestation: indexed_payload_attestation.data.slot, }); @@ -1209,10 +1226,12 @@ where if matches!(is_from_block, AttestationFromBlock::False) && indexed_payload_attestation.data.slot != self.fc_store.get_current_slot() { - return Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { - attestation_slot: indexed_payload_attestation.data.slot, - current_slot: self.fc_store.get_current_slot(), - }); + return Err( + InvalidPayloadAttestation::PayloadAttestationNotCurrentSlot { + attestation_slot: indexed_payload_attestation.data.slot, + current_slot: self.fc_store.get_current_slot(), + }, + ); } // A payload attestation voting payload_present for a block in the current slot is @@ -1222,7 +1241,9 @@ where && self.fc_store.get_current_slot() == block.slot && indexed_payload_attestation.data.payload_present { - return Err(InvalidAttestation::PayloadPresentDuringSameSlot { slot: block.slot }); + return Err(InvalidPayloadAttestation::PayloadPresentDuringSameSlot { + slot: block.slot, + }); } Ok(()) @@ -1336,11 +1357,13 @@ where // Check that all the attesters are in the PTC if ptc_indices.len() != attestation.attesting_indices.len() { - return Err(InvalidAttestation::PayloadAttestationAttestersNotInPtc { - attesting_indices_len: attestation.attesting_indices.len(), - attesting_indices_in_ptc: ptc_indices.len(), - } - .into()); + return Err( + InvalidPayloadAttestation::PayloadAttestationAttestersNotInPtc { + attesting_indices_len: attestation.attesting_indices.len(), + attesting_indices_in_ptc: ptc_indices.len(), + } + .into(), + ); } for &ptc_index in &ptc_indices { diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 8f479125b76..159eab0ec05 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -4,8 +4,9 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, - InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, ResetPayloadStatuses, + InvalidAttestation, InvalidBlock, InvalidPayloadAttestation, PayloadVerificationStatus, + PersistedForkChoice, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, + ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 241e25d3e26..d6f937c0ca9 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -11,7 +11,7 @@ use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ AttestationFromBlock, ForkChoiceStore, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, QueuedAttestation, + InvalidPayloadAttestation, PayloadVerificationStatus, QueuedAttestation, }; use state_processing::state_advance::complete_state_advance; use std::fmt; @@ -969,8 +969,8 @@ async fn non_block_payload_attestation_for_previous_slot_is_rejected() { assert!( matches!( result, - Err(ForkChoiceError::InvalidAttestation( - InvalidAttestation::PayloadAttestationNotCurrentSlot { .. } + Err(ForkChoiceError::InvalidPayloadAttestation( + InvalidPayloadAttestation::PayloadAttestationNotCurrentSlot { .. } )) ), "gossip payload attestation for previous slot should be rejected, got: {:?}", From cc7e727f90bf28e1ba33413d31c5b34f4147d2ad Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:46:34 +1100 Subject: [PATCH 100/127] Tidy latest_message --- consensus/proto_array/src/proto_array_fork_choice.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 842cdfaa334..0ecaea39713 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -73,6 +73,7 @@ impl From for VoteTrackerV28 { } } +/// Spec's `LatestMessage` type. Only used in tests. pub struct LatestMessage { pub slot: Slot, pub root: Hash256, @@ -1064,10 +1065,9 @@ impl ProtoArrayForkChoice { .is_finalized_checkpoint_or_descendant::(descendant_root, best_finalized_checkpoint) } + /// NOTE: only used in tests. pub fn latest_message(&self, validator_index: usize) -> Option { - if validator_index < self.votes.0.len() { - let vote = &self.votes.0[validator_index]; - + if let Some(vote) = self.votes.0.get(validator_index) { if *vote == VoteTracker::default() { None } else { From 727535bcc9768c99589dcc60e8ee0f5f2807814b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:50:16 +1100 Subject: [PATCH 101/127] Remove spurious payload attestation condition --- consensus/fork_choice/src/fork_choice.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 2dbefc763f2..0993ae95a3c 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -199,9 +199,6 @@ pub enum InvalidPayloadAttestation { attestation_slot: Slot, current_slot: Slot, }, - /// A payload attestation votes payload_present for a block in the current slot, which is - /// invalid because the payload cannot be known yet. - PayloadPresentDuringSameSlot { slot: Slot }, /// One or more payload attesters are not part of the PTC. PayloadAttestationAttestersNotInPtc { attesting_indices_len: usize, @@ -1234,18 +1231,6 @@ where ); } - // A payload attestation voting payload_present for a block in the current slot is - // invalid: the payload cannot be known yet. This only applies to gossip attestations; - // payload attestations from blocks have already been validated by the block producer. - if matches!(is_from_block, AttestationFromBlock::False) - && self.fc_store.get_current_slot() == block.slot - && indexed_payload_attestation.data.payload_present - { - return Err(InvalidPayloadAttestation::PayloadPresentDuringSameSlot { - slot: block.slot, - }); - } - Ok(()) } From d7f67dae21203a3691f96b4d9d2c63995ffaba53 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:27:43 +1100 Subject: [PATCH 102/127] Remove incorrect comment --- consensus/proto_array/src/proto_array.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 6695079cd2c..f259ed7e061 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -165,8 +165,6 @@ pub struct ProtoNode { pub payload_data_availability_votes: BitVector, /// Whether the execution payload for this block has been received and validated locally. /// Maps to `root in store.payload_states` in the spec. - /// When true, `is_payload_timely` and `is_payload_data_available` return true - /// regardless of PTC vote counts. #[superstruct(only(V29), partial_getter(copy))] pub payload_received: bool, /// The proposer index for this block, used by `should_apply_proposer_boost` From 6cc65848da74fe29a13604d8290445e85a0632bb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:35:51 +1100 Subject: [PATCH 103/127] Remove dead code --- consensus/proto_array/src/proto_array.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f259ed7e061..14290fd784a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -489,12 +489,10 @@ impl ProtoArray { .ok_or(Error::DeltaOverflow(parent_index))?; } } else { - // V17 child of a V29 parent (fork transition): treat as FULL - // since V17 nodes always have execution payloads inline. - parent_delta.full_delta = parent_delta - .full_delta - .checked_add(delta) - .ok_or(Error::DeltaOverflow(parent_index))?; + // This is a v17 node with a v17 parent. + // There is no empty or full weight for v17 nodes, so nothing to propagate. + // In the tree walk, the v17 nodes have an empty child with 0 weight, which + // wins by default (it is the only child). } } } From 85934b4d5fcd55884abd2580089154dd419d2ad0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:52:21 +1100 Subject: [PATCH 104/127] Remove some noisy TODOs --- consensus/proto_array/src/proto_array.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 14290fd784a..392f11a198d 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -720,7 +720,6 @@ impl ProtoArray { .nodes .get(block_index) .ok_or(Error::InvalidNodeIndex(block_index))?; - // TODO(gloas): handle parent unknown case? let parent_index = block .parent() .ok_or(Error::NodeUnknown(proposer_boost_root))?; @@ -744,7 +743,6 @@ impl ProtoArray { // the parent's slot from the same proposer. let parent_slot = parent.slot(); let parent_root = parent.root(); - // TODO(gloas): handle proposer index for pre-Gloas blocks? let parent_proposer = parent.proposer_index(); let has_equivocation = self.nodes.iter().any(|node| { From 5284486af08216cdaca1e88c4e02cbce8c0a85f3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:55:29 +1100 Subject: [PATCH 105/127] Break out of invalidation loop on Gloas block --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 392f11a198d..faaf675565a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -960,7 +960,7 @@ impl ProtoArray { // This block is pre-merge, therefore it has no execution status. Nor do its // ancestors. Ok(ExecutionStatus::Irrelevant(_)) => break, - Err(_) => (), + Err(_) => break, } } From 7570fd155503a45d27c2be95d23ab3263d81dc48 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 14:10:11 +1100 Subject: [PATCH 106/127] Remove comment --- consensus/proto_array/src/proto_array.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index faaf675565a..20554c963ee 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1069,9 +1069,6 @@ impl ProtoArray { }); } - // In the post-Gloas world, always use a virtual tree walk. - // - // Best child/best descendant is dead. let best_fc_node = self.find_head_walk::( justified_index, current_slot, From eceedaf7b69c558a2a8aeb81c25e2547b59b98b3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 15:47:50 +1100 Subject: [PATCH 107/127] Revert parent->child optimisation AGAIN --- consensus/proto_array/src/proto_array.rs | 70 ++++++++---------------- 1 file changed, 24 insertions(+), 46 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 20554c963ee..3bf8994c672 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1104,26 +1104,6 @@ impl ProtoArray { Ok((best_fc_node.root, best_fc_node.payload_status)) } - /// Build a parent->children index. Invalid nodes are excluded - /// (they aren't in store.blocks in the spec). - fn build_children_index(&self) -> Vec> { - let mut children = vec![vec![]; self.nodes.len()]; - for (i, node) in self.nodes.iter().enumerate() { - if node - .execution_status() - .is_ok_and(|status| status.is_invalid()) - { - continue; - } - if let Some(parent) = node.parent() - && parent < children.len() - { - children[parent].push(i); - } - } - children - } - /// Spec: `get_filtered_block_tree`. /// /// Returns the set of node indices on viable branches — those with at least @@ -1134,7 +1114,6 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - children_index: &[Vec], ) -> HashSet { let mut viable = HashSet::new(); self.filter_block_tree::( @@ -1142,7 +1121,6 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, - children_index, &mut viable, ); viable @@ -1155,17 +1133,25 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - children_index: &[Vec], viable: &mut HashSet, ) -> bool { let Some(node) = self.nodes.get(node_index) else { return false; }; - let children = children_index - .get(node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); + // Skip invalid children — they aren't in store.blocks in the spec. + let children: Vec = self + .nodes + .iter() + .enumerate() + .filter(|(_, child)| { + child.parent() == Some(node_index) + && !child + .execution_status() + .is_ok_and(|status| status.is_invalid()) + }) + .map(|(i, _)| i) + .collect(); if !children.is_empty() { // Evaluate ALL children (no short-circuit) to mark all viable branches. @@ -1177,7 +1163,6 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, - children_index, viable, ) }) @@ -1222,16 +1207,12 @@ impl ProtoArray { payload_status: PayloadStatus::Pending, }; - // Build parent->children index once for O(1) lookups. - let children_index = self.build_children_index(); - // Spec: `get_filtered_block_tree`. let viable_nodes = self.get_filtered_block_tree::( start_index, current_slot, best_justified_checkpoint, best_finalized_checkpoint, - &children_index, ); // Compute once rather than per-child per-level. @@ -1240,7 +1221,7 @@ impl ProtoArray { loop { let children: Vec<_> = self - .get_node_children(&head, &children_index)? + .get_node_children(&head)? .into_iter() .filter(|(fc_node, _)| viable_nodes.contains(&fc_node.proto_node_index)) .collect(); @@ -1403,7 +1384,6 @@ impl ProtoArray { fn get_node_children( &self, node: &IndexedForkChoiceNode, - children_index: &[Vec], ) -> Result, Error> { if node.payload_status == PayloadStatus::Pending { let proto_node = self @@ -1417,25 +1397,23 @@ impl ProtoArray { } Ok(children) } else { - let child_indices = children_index - .get(node.proto_node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); - Ok(child_indices + Ok(self + .nodes .iter() - .filter_map(|&child_index| { - let child_node = self.nodes.get(child_index)?; - if child_node.get_parent_payload_status() != node.payload_status { - return None; - } - Some(( + .enumerate() + .filter(|(_, child_node)| { + child_node.parent() == Some(node.proto_node_index) + && child_node.get_parent_payload_status() == node.payload_status + }) + .map(|(child_index, child_node)| { + ( IndexedForkChoiceNode { root: child_node.root(), proto_node_index: child_index, payload_status: PayloadStatus::Pending, }, child_node.clone(), - )) + ) }) .collect()) } From 6df55970aab1b138ebe10cf7f87c4a6148f25462 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 14:24:37 +1100 Subject: [PATCH 108/127] Simplify find_head_walk --- consensus/proto_array/src/proto_array.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3bf8994c672..39c638f4a88 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1232,11 +1232,7 @@ impl ProtoArray { head = children .into_iter() - .map(|(child, _)| -> Result<_, Error> { - let proto_node = self - .nodes - .get(child.proto_node_index) - .ok_or(Error::InvalidNodeIndex(child.proto_node_index))?; + .map(|(child, ref proto_node)| -> Result<_, Error> { let weight = self.get_weight::( &child, proto_node, From f5413c6e9721e46a9a383e16bd607ec8ce2b6acb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 15:42:57 +1100 Subject: [PATCH 109/127] Remove useless let _ --- consensus/fork_choice/src/fork_choice.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 0993ae95a3c..92fd4c1faf3 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -474,7 +474,7 @@ where }; // Ensure that `fork_choice.forkchoice_update_parameters.head_root` is updated. - let _ = fork_choice.get_head(current_slot, spec)?; + fork_choice.get_head(current_slot, spec)?; Ok(fork_choice) } @@ -1743,7 +1743,7 @@ where .set_all_blocks_to_optimistic::()?; // If the second attempt at finding a head fails, return an error since we do not // expect this scenario. - let _ = fork_choice.get_head(current_slot, spec)?; + fork_choice.get_head(current_slot, spec)?; } Ok(fork_choice) From a1296fc0c7df2f28eac8c2c52fce1cdb3e9c51b2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 16:38:40 +1100 Subject: [PATCH 110/127] Fix block_timeliness_ptc_threshold hardcoded constants --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 39c638f4a88..f1145598a94 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -636,7 +636,7 @@ impl ProtoArray { // TODO(gloas): use Gloas-specific PTC due threshold once // `get_payload_attestation_due_ms` is on ChainSpec. block_timeliness_ptc_threshold: is_genesis - || (is_current_slot && time_into_slot < spec.get_slot_duration() / 2), + || (is_current_slot && time_into_slot < 3 * spec.get_slot_duration() / 4), equivocating_attestation_score: 0, }) }; From 9db37b8bd3d93917413aa35b880a87137e20eb06 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 2 Apr 2026 00:50:46 -0500 Subject: [PATCH 111/127] Document is_head_weak spec divergence and impact --- consensus/proto_array/src/proto_array.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f1145598a94..b0f471dd6da 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -672,12 +672,17 @@ impl ProtoArray { Ok(()) } - /// Spec: `is_head_weak`. - /// - /// The spec adds weight from equivocating validators in the head slot's - /// committees. We approximate this with `equivocating_attestation_score` - /// which tracks equivocating validators that voted for this block (close - /// but not identical to committee membership). + // TODO(gloas): the spec adds weight from equivocating validators in the + // head slot's *committees*, regardless of who they voted for. We approximate + // with `equivocating_attestation_score` which only tracks equivocating + // validators whose vote pointed at this block. This under-counts when an + // equivocating validator is in the committee but voted for a different fork, + // which could allow a re-org the spec wouldn't. In practice the deviation + // is small — it requires equivocating validators voting for competing forks + // AND the head weight to be exactly at the reorg threshold boundary. + // Fixing this properly requires committee computation from BeaconState, + // which is not available in proto_array. The fix would be to pass + // pre-computed equivocating committee weight from the beacon_chain caller. fn is_head_weak( &self, head_node: &ProtoNode, From 6763862e0f796cfda42f0781503715c90c14f651 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 2 Apr 2026 00:56:42 -0500 Subject: [PATCH 112/127] Add attestation_due_bps_gloas and payload_attestation_due_bps to ChainSpec Spec: `get_attestation_due_ms(epoch)` uses ATTESTATION_DUE_BPS_GLOAS (2500) for Gloas epochs vs ATTESTATION_DUE_BPS (3333) pre-Gloas. `get_payload_attestation_due_ms` uses PAYLOAD_ATTESTATION_DUE_BPS (7500). - Add both BPS fields to ChainSpec with derived Duration values - Add `get_attestation_due::(slot)` that returns epoch-appropriate threshold matching the spec - Add `get_payload_attestation_due()` matching the spec - Use them in proto_array record_block_timeliness instead of hardcoded values --- consensus/proto_array/src/proto_array.rs | 7 ++- consensus/types/src/core/chain_spec.rs | 54 +++++++++++++++++++++++- 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index b0f471dd6da..dfb43f5f343 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -632,11 +632,9 @@ impl ProtoArray { // Anchor gets [True, True]. Others computed from time_into_slot. block_timeliness_attestation_threshold: is_genesis || (is_current_slot - && time_into_slot < spec.get_unaggregated_attestation_due()), - // TODO(gloas): use Gloas-specific PTC due threshold once - // `get_payload_attestation_due_ms` is on ChainSpec. + && time_into_slot < spec.get_attestation_due::(current_slot)), block_timeliness_ptc_threshold: is_genesis - || (is_current_slot && time_into_slot < 3 * spec.get_slot_duration() / 4), + || (is_current_slot && time_into_slot < spec.get_payload_attestation_due()), equivocating_attestation_score: 0, }) }; @@ -672,6 +670,7 @@ impl ProtoArray { Ok(()) } + /// Spec: `is_head_weak`. // TODO(gloas): the spec adds weight from equivocating validators in the // head slot's *committees*, regardless of who they voted for. We approximate // with `equivocating_attestation_score` which only tracks equivocating diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index cc79d3fc297..9ffa7d6f7ed 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -107,6 +107,8 @@ pub struct ChainSpec { pub shard_committee_period: u64, pub proposer_reorg_cutoff_bps: u64, pub attestation_due_bps: u64, + pub attestation_due_bps_gloas: u64, + pub payload_attestation_due_bps: u64, pub aggregate_due_bps: u64, pub sync_message_due_bps: u64, pub contribution_due_bps: u64, @@ -115,6 +117,8 @@ pub struct ChainSpec { * Derived time values (computed at startup via `compute_derived_values()`) */ pub unaggregated_attestation_due: Duration, + pub unaggregated_attestation_due_gloas: Duration, + pub payload_attestation_due: Duration, pub aggregate_attestation_due: Duration, pub sync_message_due: Duration, pub contribution_and_proof_due: Duration, @@ -877,6 +881,20 @@ impl ChainSpec { self.unaggregated_attestation_due } + /// Spec: `get_attestation_due_ms`. Returns the epoch-appropriate threshold. + pub fn get_attestation_due(&self, slot: Slot) -> Duration { + if self.fork_name_at_slot::(slot).gloas_enabled() { + self.unaggregated_attestation_due_gloas + } else { + self.unaggregated_attestation_due + } + } + + /// Spec: `get_payload_attestation_due_ms`. + pub fn get_payload_attestation_due(&self) -> Duration { + self.payload_attestation_due + } + /// Get the duration into a slot in which an aggregated attestation is due. /// Returns the pre-computed value from `compute_derived_values()`. pub fn get_aggregate_attestation_due(&self) -> Duration { @@ -949,6 +967,12 @@ impl ChainSpec { self.unaggregated_attestation_due = self .compute_slot_component_duration(self.attestation_due_bps) .expect("invalid chain spec: cannot compute unaggregated_attestation_due"); + self.unaggregated_attestation_due_gloas = self + .compute_slot_component_duration(self.attestation_due_bps_gloas) + .expect("invalid chain spec: cannot compute unaggregated_attestation_due_gloas"); + self.payload_attestation_due = self + .compute_slot_component_duration(self.payload_attestation_due_bps) + .expect("invalid chain spec: cannot compute payload_attestation_due"); self.aggregate_attestation_due = self .compute_slot_component_duration(self.aggregate_due_bps) .expect("invalid chain spec: cannot compute aggregate_attestation_due"); @@ -1079,6 +1103,8 @@ impl ChainSpec { shard_committee_period: 256, proposer_reorg_cutoff_bps: 1667, attestation_due_bps: 3333, + attestation_due_bps_gloas: 2500, + payload_attestation_due_bps: 7500, aggregate_due_bps: 6667, sync_message_due_bps: 3333, contribution_due_bps: 6667, @@ -1087,6 +1113,8 @@ impl ChainSpec { * Derived time values (set by `compute_derived_values()`) */ unaggregated_attestation_due: Duration::from_millis(3999), + unaggregated_attestation_due_gloas: Duration::from_millis(3000), + payload_attestation_due: Duration::from_millis(9000), aggregate_attestation_due: Duration::from_millis(8000), sync_message_due: Duration::from_millis(3999), contribution_and_proof_due: Duration::from_millis(8000), @@ -1479,6 +1507,8 @@ impl ChainSpec { shard_committee_period: 256, proposer_reorg_cutoff_bps: 1667, attestation_due_bps: 3333, + attestation_due_bps_gloas: 2500, + payload_attestation_due_bps: 7500, aggregate_due_bps: 6667, /* @@ -1486,6 +1516,8 @@ impl ChainSpec { * Precomputed for 5000ms slot: 3333 bps = 1666ms, 6667 bps = 3333ms */ unaggregated_attestation_due: Duration::from_millis(1666), + unaggregated_attestation_due_gloas: Duration::from_millis(1250), + payload_attestation_due: Duration::from_millis(3750), aggregate_attestation_due: Duration::from_millis(3333), sync_message_due: Duration::from_millis(1666), contribution_and_proof_due: Duration::from_millis(3333), @@ -2062,6 +2094,12 @@ pub struct Config { #[serde(default = "default_attestation_due_bps")] #[serde(with = "serde_utils::quoted_u64")] attestation_due_bps: u64, + #[serde(default = "default_attestation_due_bps_gloas")] + #[serde(with = "serde_utils::quoted_u64")] + attestation_due_bps_gloas: u64, + #[serde(default = "default_payload_attestation_due_bps")] + #[serde(with = "serde_utils::quoted_u64")] + payload_attestation_due_bps: u64, #[serde(default = "default_aggregate_due_bps")] #[serde(with = "serde_utils::quoted_u64")] aggregate_due_bps: u64, @@ -2288,6 +2326,14 @@ const fn default_attestation_due_bps() -> u64 { 3333 } +const fn default_attestation_due_bps_gloas() -> u64 { + 2500 +} + +const fn default_payload_attestation_due_bps() -> u64 { + 7500 +} + const fn default_aggregate_due_bps() -> u64 { 6667 } @@ -2539,6 +2585,8 @@ impl Config { proposer_reorg_cutoff_bps: spec.proposer_reorg_cutoff_bps, attestation_due_bps: spec.attestation_due_bps, + attestation_due_bps_gloas: spec.attestation_due_bps_gloas, + payload_attestation_due_bps: spec.payload_attestation_due_bps, aggregate_due_bps: spec.aggregate_due_bps, sync_message_due_bps: spec.sync_message_due_bps, contribution_due_bps: spec.contribution_due_bps, @@ -2632,6 +2680,8 @@ impl Config { min_epochs_for_data_column_sidecars_requests, proposer_reorg_cutoff_bps, attestation_due_bps, + attestation_due_bps_gloas, + payload_attestation_due_bps, aggregate_due_bps, sync_message_due_bps, contribution_due_bps, @@ -2731,6 +2781,8 @@ impl Config { proposer_reorg_cutoff_bps, attestation_due_bps, + attestation_due_bps_gloas, + payload_attestation_due_bps, aggregate_due_bps, sync_message_due_bps, contribution_due_bps, @@ -3634,11 +3686,9 @@ mod yaml_tests { "EIP7928_FORK_VERSION", "EIP7928_FORK_EPOCH", // Gloas params not yet in Config - "ATTESTATION_DUE_BPS_GLOAS", "AGGREGATE_DUE_BPS_GLOAS", "SYNC_MESSAGE_DUE_BPS_GLOAS", "CONTRIBUTION_DUE_BPS_GLOAS", - "PAYLOAD_ATTESTATION_DUE_BPS", "MAX_REQUEST_PAYLOADS", // Gloas fork choice params not yet in Config "REORG_HEAD_WEIGHT_THRESHOLD", From 68f18efbe5df5c4e629a9851f951839a927c9444 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 19:24:29 +1100 Subject: [PATCH 113/127] Fix minimal spec --- consensus/types/src/core/chain_spec.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 9ffa7d6f7ed..e612c8b6db2 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1418,6 +1418,8 @@ impl ChainSpec { * Precomputed for 6000ms slot: 3333 bps = 1999ms, 6667 bps = 4000ms */ unaggregated_attestation_due: Duration::from_millis(1999), + unaggregated_attestation_due_gloas: Duration::from_millis(1500), + payload_attestation_due: Duration::from_millis(4500), aggregate_attestation_due: Duration::from_millis(4000), sync_message_due: Duration::from_millis(1999), contribution_and_proof_due: Duration::from_millis(4000), From 86ddd0d88d3f4650f56312830262a070137940ee Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 2 Apr 2026 19:09:56 -0700 Subject: [PATCH 114/127] Add EnvelopeRequestState logic --- .../network/src/sync/block_lookups/common.rs | 58 ++++++++- .../network/src/sync/block_lookups/mod.rs | 111 +++++++++++++----- .../sync/block_lookups/single_block_lookup.rs | 67 +++++++++-- beacon_node/network/src/sync/manager.rs | 30 ++--- .../network/src/sync/network_context.rs | 23 +++- 5 files changed, 221 insertions(+), 68 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index edd99345b43..bb8d81cc6e7 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -2,7 +2,7 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; use crate::sync::block_lookups::{ - BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, + BlobRequestState, BlockRequestState, CustodyRequestState, EnvelopeRequestState, PeerId, }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; @@ -12,16 +12,17 @@ use parking_lot::RwLock; use std::collections::HashSet; use std::sync::Arc; use types::data::FixedBlobSidecarList; -use types::{DataColumnSidecarList, SignedBeaconBlock}; +use types::{DataColumnSidecarList, SignedBeaconBlock, SignedExecutionPayloadEnvelope}; use super::SingleLookupId; use super::single_block_lookup::{ComponentRequests, DownloadResult}; -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ResponseType { Block, Blob, CustodyColumn, + Envelope, } /// This trait unifies common single block lookup functionality across blocks and blobs. This @@ -151,6 +152,7 @@ impl RequestState for BlobRequestState { ComponentRequests::WaitingForBlock => Err("waiting for block"), ComponentRequests::ActiveBlobRequest(request, _) => Ok(request), ComponentRequests::ActiveCustodyRequest { .. } => Err("expecting custody request"), + ComponentRequests::ActiveEnvelopeRequest { .. } => Err("expecting envelope request"), ComponentRequests::NotNeeded { .. } => Err("not needed"), } } @@ -205,6 +207,7 @@ impl RequestState for CustodyRequestState { ComponentRequests::WaitingForBlock => Err("waiting for block"), ComponentRequests::ActiveBlobRequest { .. } => Err("expecting blob request"), ComponentRequests::ActiveCustodyRequest(request) => Ok(request), + ComponentRequests::ActiveEnvelopeRequest { .. } => Err("expecting envelope request"), ComponentRequests::NotNeeded { .. } => Err("not needed"), } } @@ -215,3 +218,52 @@ impl RequestState for CustodyRequestState { &mut self.state } } + +impl RequestState for EnvelopeRequestState { + type VerifiedResponseType = Arc>; + + fn make_request( + &self, + id: Id, + lookup_peers: Arc>>, + _: usize, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.envelope_lookup_request(id, lookup_peers, self.block_root) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let DownloadResult { + value, + block_root, + seen_timestamp, + .. + } = download_result; + cx.send_envelope_for_processing(id, value, seen_timestamp, block_root) + .map_err(LookupRequestError::SendFailedProcessor) + } + + fn response_type() -> ResponseType { + ResponseType::Envelope + } + + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::ActiveEnvelopeRequest(request) => Ok(request), + _ => Err("expecting envelope request"), + } + } + + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 7b4e3ce753e..b33c38d1476 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -39,7 +39,9 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; +pub use single_block_lookup::{ + BlobRequestState, BlockRequestState, CustodyRequestState, EnvelopeRequestState, +}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; @@ -344,6 +346,57 @@ impl BlockLookups { self.new_current_lookup(block_root_to_search, None, None, peers, cx) } + /// A block triggers the search of a parent envelope. + #[must_use = "only reference the new lookup if returns true"] + pub fn search_parent_envelope_of_child( + &mut self, + parent_root: Hash256, + peers: &[PeerId], + cx: &mut SyncNetworkContext, + ) -> bool { + // Check if there's already a lookup for this root (could be a block lookup or envelope + // lookup). If so, add peers and let it handle the envelope. + if let Some((&lookup_id, _lookup)) = self + .single_block_lookups + .iter_mut() + .find(|(_, lookup)| lookup.is_for_block(parent_root)) + { + if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers, cx) { + warn!(error = ?e, "Error adding peers to envelope lookup"); + } + return true; + } + + if self.single_block_lookups.len() >= MAX_LOOKUPS { + warn!(?parent_root, "Dropping envelope lookup reached max"); + return false; + } + + let lookup = SingleBlockLookup::new_envelope_only(parent_root, peers, cx.next_id()); + let _guard = lookup.span.clone().entered(); + + let id = lookup.id; + let lookup = match self.single_block_lookups.entry(id) { + Entry::Vacant(entry) => entry.insert(lookup), + Entry::Occupied(_) => { + warn!(id, "Lookup exists with same id"); + return false; + } + }; + + debug!( + ?peers, + ?parent_root, + id = lookup.id, + "Created envelope-only lookup" + ); + metrics::inc_counter(&metrics::SYNC_LOOKUP_CREATED); + self.metrics.created_lookups += 1; + + let result = lookup.continue_requests(cx); + self.on_lookup_result(id, result, "new_envelope_lookup", cx) + } + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. /// Returns true if the lookup is created or already exists @@ -561,17 +614,13 @@ impl BlockLookups { self.on_processing_result_inner::>(id, result, cx) } BlockProcessType::SinglePayloadEnvelope { id, block_root } => { - match result { - BlockProcessingResult::Ok(_) => { - self.continue_envelope_child_lookups(block_root, cx); - } - BlockProcessingResult::Err(e) => { - debug!(%id, error = ?e, "Payload envelope processing failed"); - // TODO(EIP-7732): resolve awaiting_envelope on affected lookups so they can retry - } - _ => {} + let result = self + .on_processing_result_inner::>(id, result, cx); + // On successful envelope import, unblock child lookups waiting for this envelope + if matches!(&result, Ok(LookupResult::Completed)) { + self.continue_envelope_child_lookups(block_root, cx); } - return; + result } }; self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); @@ -721,6 +770,7 @@ impl BlockLookups { ResponseType::CustodyColumn => { "lookup_custody_column_processing_failure" } + ResponseType::Envelope => "lookup_envelope_processing_failure", }, ); } @@ -764,22 +814,20 @@ impl BlockLookups { } Action::ParentEnvelopeUnknown { parent_root } => { let peers = lookup.all_peers(); - lookup.set_awaiting_envelope(parent_root); - // Pick a peer to request the envelope from - let peer_id = peers.first().copied().ok_or_else(|| { - LookupRequestError::Failed("No peers available for envelope request".to_owned()) - })?; - match cx.envelope_lookup_request(lookup_id, peer_id, parent_root) { - Ok(_) => { - debug!( - id = lookup_id, - ?block_root, - ?parent_root, - "Requesting missing parent envelope" - ); - Ok(LookupResult::Pending) - } - Err(e) => Err(LookupRequestError::SendFailedNetwork(e)), + lookup.set_awaiting_parent_envelope(parent_root); + let envelope_lookup_exists = self.search_parent_envelope_of_child(parent_root, &peers, cx); + if envelope_lookup_exists { + debug!( + id = lookup_id, + ?block_root, + ?parent_root, + "Marking lookup as awaiting parent envelope" + ); + Ok(LookupResult::Pending) + } else { + Err(LookupRequestError::Failed(format!( + "Envelope lookup could not be created for {parent_root:?}" + ))) } } Action::Drop(reason) => { @@ -858,8 +906,8 @@ impl BlockLookups { let mut lookup_results = vec![]; for (id, lookup) in self.single_block_lookups.iter_mut() { - if lookup.awaiting_envelope() == Some(block_root) { - lookup.resolve_awaiting_envelope(); + if lookup.awaiting_parent_envelope() == Some(block_root) { + lookup.resolve_awaiting_parent_envelope(); debug!( envelope_root = ?block_root, id, @@ -894,7 +942,10 @@ impl BlockLookups { let child_lookups = self .single_block_lookups .iter() - .filter(|(_, lookup)| lookup.awaiting_parent() == Some(dropped_lookup.block_root())) + .filter(|(_, lookup)| { + lookup.awaiting_parent() == Some(dropped_lookup.block_root()) + || lookup.awaiting_parent_envelope() == Some(dropped_lookup.block_root()) + }) .map(|(id, _)| *id) .collect::>(); diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 51cc1910567..d59753b9607 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -16,7 +16,9 @@ use store::Hash256; use strum::IntoStaticStr; use tracing::{Span, debug_span}; use types::data::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; +use types::{ + DataColumnSidecarList, EthSpec, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, +}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -70,7 +72,7 @@ pub struct SingleBlockLookup { peers: Arc>>, block_root: Hash256, awaiting_parent: Option, - awaiting_envelope: Option, + awaiting_parent_envelope: Option, created: Instant, pub(crate) span: Span, } @@ -80,6 +82,7 @@ pub(crate) enum ComponentRequests { WaitingForBlock, ActiveBlobRequest(BlobRequestState, usize), ActiveCustodyRequest(CustodyRequestState), + ActiveEnvelopeRequest(EnvelopeRequestState), // When printing in debug this state display the reason why it's not needed #[allow(dead_code)] NotNeeded(&'static str), @@ -105,12 +108,26 @@ impl SingleBlockLookup { peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, - awaiting_envelope: None, + awaiting_parent_envelope: None, created: Instant::now(), span: lookup_span, } } + /// Create an envelope-only lookup. The block is already imported, we just need the envelope. + pub fn new_envelope_only(block_root: Hash256, peers: &[PeerId], id: Id) -> Self { + let mut lookup = Self::new(block_root, peers, id, None); + // Block is already imported, mark as completed + lookup + .block_request_state + .state + .on_completed_request("block already imported") + .expect("block state starts as AwaitingDownload"); + lookup.component_requests = + ComponentRequests::ActiveEnvelopeRequest(EnvelopeRequestState::new(block_root)); + lookup + } + /// Reset the status of all internal requests pub fn reset_requests(&mut self) { self.block_request_state = BlockRequestState::new(self.block_root); @@ -146,18 +163,18 @@ impl SingleBlockLookup { self.awaiting_parent = None; } - pub fn awaiting_envelope(&self) -> Option { - self.awaiting_envelope + pub fn awaiting_parent_envelope(&self) -> Option { + self.awaiting_parent_envelope } /// Mark this lookup as awaiting a parent envelope to be imported before processing. - pub fn set_awaiting_envelope(&mut self, parent_root: Hash256) { - self.awaiting_envelope = Some(parent_root); + pub fn set_awaiting_parent_envelope(&mut self, parent_root: Hash256) { + self.awaiting_parent_envelope = Some(parent_root); } /// Mark this lookup as no longer awaiting a parent envelope. - pub fn resolve_awaiting_envelope(&mut self) { - self.awaiting_envelope = None; + pub fn resolve_awaiting_parent_envelope(&mut self) { + self.awaiting_parent_envelope = None; } /// Returns the time elapsed since this lookup was created @@ -194,6 +211,7 @@ impl SingleBlockLookup { ComponentRequests::WaitingForBlock => false, ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::ActiveEnvelopeRequest(request) => request.state.is_processed(), ComponentRequests::NotNeeded { .. } => true, } } @@ -201,7 +219,7 @@ impl SingleBlockLookup { /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() - || self.awaiting_envelope.is_some() + || self.awaiting_parent_envelope.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { // If components are waiting for the block request to complete, here we should @@ -214,6 +232,9 @@ impl SingleBlockLookup { ComponentRequests::ActiveCustodyRequest(request) => { request.state.is_awaiting_event() } + ComponentRequests::ActiveEnvelopeRequest(request) => { + request.state.is_awaiting_event() + } ComponentRequests::NotNeeded { .. } => false, } } @@ -283,6 +304,9 @@ impl SingleBlockLookup { ComponentRequests::ActiveCustodyRequest(_) => { self.continue_request::>(cx, 0)? } + ComponentRequests::ActiveEnvelopeRequest(_) => { + self.continue_request::>(cx, 0)? + } ComponentRequests::NotNeeded { .. } => {} // do nothing } @@ -304,7 +328,8 @@ impl SingleBlockLookup { expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; - let awaiting_event = self.awaiting_parent.is_some() || self.awaiting_envelope.is_some(); + let awaiting_event = + self.awaiting_parent.is_some() || self.awaiting_parent_envelope.is_some(); let request = R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; @@ -444,6 +469,26 @@ impl BlockRequestState { } } +/// The state of the envelope request component of a `SingleBlockLookup`. +/// Used for envelope-only lookups where the parent block is already imported +/// but its execution payload envelope is missing. +#[derive(Educe)] +#[educe(Debug)] +pub struct EnvelopeRequestState { + #[educe(Debug(ignore))] + pub block_root: Hash256, + pub state: SingleLookupRequestState>>, +} + +impl EnvelopeRequestState { + pub fn new(block_root: Hash256) -> Self { + Self { + block_root, + state: SingleLookupRequestState::new(), + } + } +} + #[derive(Debug, Clone)] pub struct DownloadResult { pub value: T, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 256752d5fbb..2cc35081b7f 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -45,6 +45,7 @@ use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, + EnvelopeRequestState, }; use crate::sync::custody_backfill_sync::CustodyBackFillSync; use crate::sync::network_context::{PeerGroup, RpcResponseResult}; @@ -1278,27 +1279,14 @@ impl SyncManager { .network .on_single_envelope_response(id, peer_id, rpc_event) { - match resp { - Ok((envelope, seen_timestamp)) => { - let block_root = envelope.beacon_block_root(); - debug!( - ?block_root, - %id, - "Downloaded payload envelope, sending for processing" - ); - if let Err(e) = self.network.send_envelope_for_processing( - id.req_id, - envelope, - seen_timestamp, - block_root, - ) { - error!(error = ?e, "Failed to send envelope for processing"); - } - } - Err(e) => { - debug!(error = ?e, %id, "Payload envelope download failed"); - } - } + self.block_lookups + .on_download_response::>( + id, + resp.map(|(value, seen_timestamp)| { + (value, PeerGroup::from_single(peer_id), seen_timestamp) + }), + &mut self.network, + ) } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index e9d289b7771..328940d6729 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -944,9 +944,26 @@ impl SyncNetworkContext { pub fn envelope_lookup_request( &mut self, lookup_id: SingleLookupId, - peer_id: PeerId, + lookup_peers: Arc>>, block_root: Hash256, - ) -> Result { + ) -> Result { + let active_request_count_by_peer = self.active_request_count_by_peer(); + let Some(peer_id) = lookup_peers + .read() + .iter() + .map(|peer| { + ( + active_request_count_by_peer.get(peer).copied().unwrap_or(0), + rand::random::(), + peer, + ) + }) + .min() + .map(|(_, _, peer)| *peer) + else { + return Ok(LookupRequestResult::Pending("no peers")); + }; + let id = SingleLookupReqId { lookup_id, req_id: self.next_id(), @@ -988,7 +1005,7 @@ impl SyncNetworkContext { request_span, ); - Ok(id.req_id) + Ok(LookupRequestResult::RequestSent(id.req_id)) } /// Request necessary blobs for `block_root`. Requests only the necessary blobs by checking: From 3523804515f9e05f8c1782152694d35a1951b0e5 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 2 Apr 2026 19:30:12 -0700 Subject: [PATCH 115/127] cleanup --- .../src/beacon/execution_payload_envelope.rs | 15 ++++--- .../network/src/sync/block_lookups/mod.rs | 44 ++++++++++++++++++- beacon_node/network/src/sync/manager.rs | 38 +++++++++++++++- 3 files changed, 88 insertions(+), 9 deletions(-) diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index ea8c0d4b8a2..7f81f7bf25f 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -132,18 +132,21 @@ pub async fn publish_execution_payload_envelope( }; let ctx = chain.gossip_verification_context(); - let Ok(gossip_verifed_envelope) = GossipVerifiedEnvelope::new(signed_envelope, &ctx) else { - warn!(%slot, %beacon_block_root, "Execution payload envelope rejected"); - return Err(warp_utils::reject::custom_bad_request( - "execution payload envelope rejected, gossip verification".to_string(), - )); + let gossip_verified_envelope = match GossipVerifiedEnvelope::new(signed_envelope, &ctx) { + Ok(envelope) => envelope, + Err(e) => { + warn!(%slot, %beacon_block_root, error = ?e, "Execution payload envelope rejected"); + return Err(warp_utils::reject::custom_bad_request(format!( + "execution payload envelope rejected: {e:?}", + ))); + } }; // Import the envelope locally (runs state transition and notifies the EL). chain .process_execution_payload_envelope( beacon_block_root, - gossip_verifed_envelope, + gossip_verified_envelope, NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index b33c38d1476..4d14479627a 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -228,6 +228,47 @@ impl BlockLookups { } } + /// A child block's parent envelope is missing. Create a child lookup (with the block component) + /// that waits for the parent envelope, and an envelope-only lookup for the parent. + /// + /// Returns true if both lookups are created or already exist. + #[must_use = "only reference the new lookup if returns true"] + pub fn search_child_and_parent_envelope( + &mut self, + block_root: Hash256, + block_component: BlockComponent, + parent_root: Hash256, + peer_id: PeerId, + cx: &mut SyncNetworkContext, + ) -> bool { + let envelope_lookup_exists = + self.search_parent_envelope_of_child(parent_root, &[peer_id], cx); + if envelope_lookup_exists { + // Create child lookup that waits for the parent envelope (not parent block). + // The child block itself is available, so we pass it as a component. + let child_created = self.new_current_lookup( + block_root, + Some(block_component), + None, // not awaiting parent block + &[], + cx, + ); + // Set awaiting_parent_envelope on the child lookup + if child_created { + if let Some((_, lookup)) = self + .single_block_lookups + .iter_mut() + .find(|(_, l)| l.is_for_block(block_root)) + { + lookup.set_awaiting_parent_envelope(parent_root); + } + } + child_created + } else { + false + } + } + /// Seach a block whose parent root is unknown. /// /// Returns true if the lookup is created or already exists @@ -815,7 +856,8 @@ impl BlockLookups { Action::ParentEnvelopeUnknown { parent_root } => { let peers = lookup.all_peers(); lookup.set_awaiting_parent_envelope(parent_root); - let envelope_lookup_exists = self.search_parent_envelope_of_child(parent_root, &peers, cx); + let envelope_lookup_exists = + self.search_parent_envelope_of_child(parent_root, &peers, cx); if envelope_lookup_exists { debug!( id = lookup_id, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 2cc35081b7f..1ca338ccd39 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -935,9 +935,9 @@ impl SyncManager { debug!( %block_root, %parent_root, - "Parent envelope not yet available, creating lookup" + "Parent envelope not yet available, creating envelope lookup" ); - self.handle_unknown_parent( + self.handle_unknown_parent_envelope( peer_id, block_root, parent_root, @@ -1055,6 +1055,40 @@ impl SyncManager { } } + /// Handle a block whose parent block is known but parent envelope is missing. + /// Creates an envelope-only lookup for the parent and a child lookup that waits for it. + fn handle_unknown_parent_envelope( + &mut self, + peer_id: PeerId, + block_root: Hash256, + parent_root: Hash256, + slot: Slot, + block_component: BlockComponent, + ) { + match self.should_search_for_block(Some(slot), &peer_id) { + Ok(_) => { + if self.block_lookups.search_child_and_parent_envelope( + block_root, + block_component, + parent_root, + peer_id, + &mut self.network, + ) { + // Lookups created + } else { + debug!( + ?block_root, + ?parent_root, + "No lookup created for child and parent envelope" + ); + } + } + Err(reason) => { + debug!(%block_root, %parent_root, reason, "Ignoring unknown parent envelope request"); + } + } + } + fn handle_unknown_block_root(&mut self, peer_id: PeerId, block_root: Hash256) { match self.should_search_for_block(None, &peer_id) { Ok(_) => { From 1cd4d57204f9283bbbceba46585476385a1c0c53 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 2 Apr 2026 19:37:51 -0700 Subject: [PATCH 116/127] Fixes --- beacon_node/network/src/router.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 3fb21969756..3d82252a0c9 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -336,7 +336,7 @@ impl Router { // TODO(EIP-7732): implement outgoing payload envelopes by range responses once // range sync requests them. Response::PayloadEnvelopesByRange(_) => { - unreachable!() + error!(%peer_id, "Unexpected PayloadEnvelopesByRange response"); } // Light client responses should not be received Response::LightClientBootstrap(_) From 214e3ce9f0ec4a4042e085c213b267fec63342f6 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Fri, 3 Apr 2026 00:02:24 -0700 Subject: [PATCH 117/127] Cleanup --- .../beacon_chain/src/block_verification.rs | 37 ++++++++++++++- .../src/beacon/execution_payload_envelope.rs | 1 - .../gossip_methods.rs | 4 +- .../network_beacon_processor/sync_methods.rs | 8 +--- .../network/src/sync/block_lookups/mod.rs | 47 ++++++++++++------- 5 files changed, 72 insertions(+), 25 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 916a207e623..2b468439017 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -60,6 +60,7 @@ use crate::execution_payload::{ }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; +use crate::payload_envelope_verification::EnvelopeError; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ @@ -321,13 +322,18 @@ pub enum BlockError { bid_parent_root: Hash256, block_parent_root: Hash256, }, - /// The parent block is known but its execution payload envelope has not been received yet. + /// The child block is known but its parent execution payload envelope has not been received yet. /// /// ## Peer scoring /// /// It's unclear if this block is valid, but it cannot be fully verified without the parent's /// execution payload envelope. ParentEnvelopeUnknown { parent_root: Hash256 }, + + PayloadEnvelopeError { + e: Box, + penalize_peer: bool, + }, } /// Which specific signature(s) are invalid in a SignedBeaconBlock @@ -494,6 +500,35 @@ impl From for BlockError { } } +impl From for BlockError { + fn from(e: EnvelopeError) -> Self { + let penalize_peer = match &e { + // REJECT per spec: peer sent invalid envelope data + EnvelopeError::BadSignature + | EnvelopeError::BuilderIndexMismatch { .. } + | EnvelopeError::BlockHashMismatch { .. } + | EnvelopeError::SlotMismatch { .. } + | EnvelopeError::IncorrectBlockProposer { .. } => true, + // IGNORE per spec: not the peer's fault + EnvelopeError::BlockRootUnknown { .. } + | EnvelopeError::PriorToFinalization { .. } + | EnvelopeError::UnknownValidator { .. } => false, + // Internal errors: not the peer's fault + EnvelopeError::BeaconChainError(_) + | EnvelopeError::BeaconStateError(_) + | EnvelopeError::BlockProcessingError(_) + | EnvelopeError::EnvelopeProcessingError(_) + | EnvelopeError::ExecutionPayloadError(_) + | EnvelopeError::BlockError(_) + | EnvelopeError::InternalError(_) => false, + }; + BlockError::PayloadEnvelopeError { + e: Box::new(e), + penalize_peer, + } + } +} + /// Stores information about verifying a payload against an execution engine. #[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct PayloadVerificationOutcome { diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index 7f81f7bf25f..3479d62f6ad 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -90,7 +90,6 @@ pub(crate) fn post_beacon_execution_payload_envelope( .boxed() } /// Publishes a signed execution payload envelope to the network. -/// TODO(gloas): Add gossip verification (BroadcastValidation::Gossip) before import. pub async fn publish_execution_payload_envelope( envelope: SignedExecutionPayloadEnvelope, chain: Arc>, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 2e04847630c..fe9e1755b6a 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1390,7 +1390,9 @@ impl NetworkBeaconProcessor { return None; } // BlobNotRequired is unreachable. Only constructed in `process_gossip_blob` - Err(e @ BlockError::InternalError(_)) | Err(e @ BlockError::BlobNotRequired(_)) => { + Err(e @ BlockError::InternalError(_)) + | Err(e @ BlockError::BlobNotRequired(_)) + | Err(e @ BlockError::PayloadEnvelopeError { .. }) => { error!(error = %e, "Internal block gossip validation error"); return None; } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f6d4940121e..57d3d7d2206 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -109,9 +109,7 @@ impl NetworkBeaconProcessor { ); self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: BlockProcessingResult::Err(BlockError::InternalError(format!( - "Envelope verification failed: {e:?}" - ))), + result: BlockProcessingResult::Err(e.into()), }); return; } @@ -138,9 +136,7 @@ impl NetworkBeaconProcessor { ?beacon_block_root, "RPC payload envelope processing failed" ); - BlockProcessingResult::Err(BlockError::InternalError(format!( - "Envelope processing failed: {e:?}" - ))) + BlockProcessingResult::Err(e.into()) } }; diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 4d14479627a..8a183a0b1b3 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -217,6 +217,7 @@ impl BlockLookups { block_root, Some(block_component), Some(parent_root), + None, // On a `UnknownParentBlock` or `UnknownParentBlob` event the peer is not required // to have the rest of the block components (refer to decoupled blob gossip). Create // the lookup with zero peers to house the block components. @@ -246,30 +247,20 @@ impl BlockLookups { if envelope_lookup_exists { // Create child lookup that waits for the parent envelope (not parent block). // The child block itself is available, so we pass it as a component. - let child_created = self.new_current_lookup( + self.new_current_lookup( block_root, Some(block_component), None, // not awaiting parent block + Some(parent_root), &[], cx, - ); - // Set awaiting_parent_envelope on the child lookup - if child_created { - if let Some((_, lookup)) = self - .single_block_lookups - .iter_mut() - .find(|(_, l)| l.is_for_block(block_root)) - { - lookup.set_awaiting_parent_envelope(parent_root); - } - } - child_created + ) } else { false } } - /// Seach a block whose parent root is unknown. + /// Search a block whose parent root is unknown. /// /// Returns true if the lookup is created or already exists #[must_use = "only reference the new lookup if returns true"] @@ -279,7 +270,7 @@ impl BlockLookups { peer_source: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { - self.new_current_lookup(block_root, None, None, peer_source, cx) + self.new_current_lookup(block_root, None, None, None, peer_source, cx) } /// A block or blob triggers the search of a parent. @@ -384,7 +375,7 @@ impl BlockLookups { } // `block_root_to_search` is a failed chain check happens inside new_current_lookup - self.new_current_lookup(block_root_to_search, None, None, peers, cx) + self.new_current_lookup(block_root_to_search, None, None, None, peers, cx) } /// A block triggers the search of a parent envelope. @@ -447,6 +438,7 @@ impl BlockLookups { block_root: Hash256, block_component: Option>, awaiting_parent: Option, + awaiting_parent_envelope: Option, peers: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { @@ -501,6 +493,9 @@ impl BlockLookups { // If we know that this lookup has unknown parent (is awaiting a parent lookup to resolve), // signal here to hold processing downloaded data. let mut lookup = SingleBlockLookup::new(block_root, peers, cx.next_id(), awaiting_parent); + if let Some(parent_root) = awaiting_parent_envelope { + lookup.set_awaiting_parent_envelope(parent_root); + } let _guard = lookup.span.clone().entered(); // Add block components to the new request @@ -777,6 +772,26 @@ impl BlockLookups { // We opt to drop the lookup instead. Action::Drop(format!("{e:?}")) } + BlockError::PayloadEnvelopeError { e, penalize_peer } => { + debug!( + ?block_root, + error = ?e, + "Payload envelope processing error" + ); + if penalize_peer { + let peer_group = request_state.on_processing_failure()?; + for peer in peer_group.all() { + cx.report_peer( + *peer, + PeerAction::MidToleranceError, + "lookup_envelope_processing_failure", + ); + } + Action::Retry + } else { + Action::Drop(format!("{e:?}")) + } + } other => { debug!( ?block_root, From f897215684c8b1a91fc0d95f991f8e1aee17a96f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Fri, 3 Apr 2026 01:02:57 -0700 Subject: [PATCH 118/127] refactor awaiting_parent field and some metrics --- .../network/src/sync/block_lookups/mod.rs | 50 +++++++-------- .../src/sync/block_lookups/parent_chain.rs | 2 +- .../sync/block_lookups/single_block_lookup.rs | 64 ++++++++++++------- .../network/src/sync/network_context.rs | 4 ++ 4 files changed, 68 insertions(+), 52 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 8a183a0b1b3..8dedcba2f42 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -22,7 +22,9 @@ use self::parent_chain::{NodeChain, compute_parent_chains}; pub use self::single_block_lookup::DownloadResult; -use self::single_block_lookup::{LookupRequestError, LookupResult, SingleBlockLookup}; +use self::single_block_lookup::{ + AwaitingParent, LookupRequestError, LookupResult, SingleBlockLookup, +}; use super::manager::{BlockProcessType, BlockProcessingResult, SLOT_IMPORT_TOLERANCE}; use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; @@ -216,8 +218,7 @@ impl BlockLookups { self.new_current_lookup( block_root, Some(block_component), - Some(parent_root), - None, + Some(AwaitingParent::Block(parent_root)), // On a `UnknownParentBlock` or `UnknownParentBlob` event the peer is not required // to have the rest of the block components (refer to decoupled blob gossip). Create // the lookup with zero peers to house the block components. @@ -250,8 +251,7 @@ impl BlockLookups { self.new_current_lookup( block_root, Some(block_component), - None, // not awaiting parent block - Some(parent_root), + Some(AwaitingParent::Envelope(parent_root)), &[], cx, ) @@ -270,7 +270,7 @@ impl BlockLookups { peer_source: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { - self.new_current_lookup(block_root, None, None, None, peer_source, cx) + self.new_current_lookup(block_root, None, None, peer_source, cx) } /// A block or blob triggers the search of a parent. @@ -375,7 +375,7 @@ impl BlockLookups { } // `block_root_to_search` is a failed chain check happens inside new_current_lookup - self.new_current_lookup(block_root_to_search, None, None, None, peers, cx) + self.new_current_lookup(block_root_to_search, None, None, peers, cx) } /// A block triggers the search of a parent envelope. @@ -437,8 +437,7 @@ impl BlockLookups { &mut self, block_root: Hash256, block_component: Option>, - awaiting_parent: Option, - awaiting_parent_envelope: Option, + awaiting_parent: Option, peers: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { @@ -473,13 +472,14 @@ impl BlockLookups { } // Ensure that awaiting parent exists, otherwise this lookup won't be able to make progress - if let Some(awaiting_parent) = awaiting_parent + if let Some(AwaitingParent::Block(parent_root) | AwaitingParent::Envelope(parent_root)) = + awaiting_parent && !self .single_block_lookups .iter() - .any(|(_, lookup)| lookup.is_for_block(awaiting_parent)) + .any(|(_, lookup)| lookup.is_for_block(parent_root)) { - warn!(block_root = ?awaiting_parent, "Ignoring child lookup parent lookup not found"); + warn!(block_root = ?parent_root, "Ignoring child lookup parent lookup not found"); return false; } @@ -493,9 +493,6 @@ impl BlockLookups { // If we know that this lookup has unknown parent (is awaiting a parent lookup to resolve), // signal here to hold processing downloaded data. let mut lookup = SingleBlockLookup::new(block_root, peers, cx.next_id(), awaiting_parent); - if let Some(parent_root) = awaiting_parent_envelope { - lookup.set_awaiting_parent_envelope(parent_root); - } let _guard = lookup.span.clone().entered(); // Add block components to the new request @@ -516,9 +513,7 @@ impl BlockLookups { debug!( ?peers, ?block_root, - awaiting_parent = awaiting_parent - .map(|root| root.to_string()) - .unwrap_or("none".to_owned()), + ?awaiting_parent, id = lookup.id, "Created block lookup" ); @@ -936,7 +931,7 @@ impl BlockLookups { let mut lookup_results = vec![]; // < need to buffer lookup results to not re-borrow &mut self for (id, lookup) in self.single_block_lookups.iter_mut() { - if lookup.awaiting_parent() == Some(block_root) { + if lookup.awaiting_parent_block() == Some(block_root) { lookup.resolve_awaiting_parent(); debug!( parent_root = ?block_root, @@ -964,7 +959,7 @@ impl BlockLookups { for (id, lookup) in self.single_block_lookups.iter_mut() { if lookup.awaiting_parent_envelope() == Some(block_root) { - lookup.resolve_awaiting_parent_envelope(); + lookup.resolve_awaiting_parent(); debug!( envelope_root = ?block_root, id, @@ -996,12 +991,13 @@ impl BlockLookups { metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[reason]); self.metrics.dropped_lookups += 1; + let dropped_root = dropped_lookup.block_root(); let child_lookups = self .single_block_lookups .iter() .filter(|(_, lookup)| { - lookup.awaiting_parent() == Some(dropped_lookup.block_root()) - || lookup.awaiting_parent_envelope() == Some(dropped_lookup.block_root()) + lookup.awaiting_parent_block() == Some(dropped_root) + || lookup.awaiting_parent_envelope() == Some(dropped_root) }) .map(|(id, _)| *id) .collect::>(); @@ -1170,17 +1166,15 @@ impl BlockLookups { &'a self, lookup: &'a SingleBlockLookup, ) -> Result<&'a SingleBlockLookup, String> { - if let Some(awaiting_parent) = lookup.awaiting_parent() { + if let Some(parent_root) = lookup.awaiting_parent_block() { if let Some(lookup) = self .single_block_lookups .values() - .find(|l| l.block_root() == awaiting_parent) + .find(|l| l.block_root() == parent_root) { self.find_oldest_ancestor_lookup(lookup) } else { - Err(format!( - "Lookup references unknown parent {awaiting_parent:?}" - )) + Err(format!("Lookup references unknown parent {parent_root:?}")) } } else { Ok(lookup) @@ -1213,7 +1207,7 @@ impl BlockLookups { } } - if let Some(parent_root) = lookup.awaiting_parent() { + if let Some(parent_root) = lookup.awaiting_parent_block() { if let Some((&child_id, _)) = self .single_block_lookups .iter() diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs index 5deea1dd94e..18363e9b8dc 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_chain.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -13,7 +13,7 @@ impl From<&SingleBlockLookup> for Node { fn from(value: &SingleBlockLookup) -> Self { Self { block_root: value.block_root(), - parent_root: value.awaiting_parent(), + parent_root: value.awaiting_parent_block(), } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index d59753b9607..6687a1ec75e 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -58,6 +58,14 @@ pub enum LookupRequestError { }, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AwaitingParent { + /// Waiting for the parent block to be imported. + Block(Hash256), + /// The parent block is imported but its execution payload envelope is missing. + Envelope(Hash256), +} + #[derive(Educe)] #[educe(Debug(bound(T: BeaconChainTypes)))] pub struct SingleBlockLookup { @@ -71,8 +79,7 @@ pub struct SingleBlockLookup { #[educe(Debug(method(fmt_peer_set_as_len)))] peers: Arc>>, block_root: Hash256, - awaiting_parent: Option, - awaiting_parent_envelope: Option, + awaiting_parent: Option, created: Instant, pub(crate) span: Span, } @@ -93,7 +100,7 @@ impl SingleBlockLookup { requested_block_root: Hash256, peers: &[PeerId], id: Id, - awaiting_parent: Option, + awaiting_parent: Option, ) -> Self { let lookup_span = debug_span!( "lh_single_block_lookup", @@ -108,7 +115,6 @@ impl SingleBlockLookup { peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, - awaiting_parent_envelope: None, created: Instant::now(), span: lookup_span, } @@ -131,7 +137,16 @@ impl SingleBlockLookup { /// Reset the status of all internal requests pub fn reset_requests(&mut self) { self.block_request_state = BlockRequestState::new(self.block_root); - self.component_requests = ComponentRequests::WaitingForBlock; + match &self.component_requests { + ComponentRequests::ActiveEnvelopeRequest(_) => { + self.component_requests = ComponentRequests::ActiveEnvelopeRequest( + EnvelopeRequestState::new(self.block_root), + ); + } + _ => { + self.component_requests = ComponentRequests::WaitingForBlock; + } + } } /// Return the slot of this lookup's block if it's currently cached as `AwaitingProcessing` @@ -147,34 +162,39 @@ impl SingleBlockLookup { self.block_root } - pub fn awaiting_parent(&self) -> Option { + pub fn awaiting_parent(&self) -> Option { self.awaiting_parent } - /// Mark this lookup as awaiting a parent lookup from being processed. Meanwhile don't send - /// components for processing. - pub fn set_awaiting_parent(&mut self, parent_root: Hash256) { - self.awaiting_parent = Some(parent_root) + /// Returns the parent root if awaiting a parent block. + pub fn awaiting_parent_block(&self) -> Option { + match self.awaiting_parent { + Some(AwaitingParent::Block(root)) => Some(root), + _ => None, + } } - /// Mark this lookup as no longer awaiting a parent lookup. Components can be sent for - /// processing. - pub fn resolve_awaiting_parent(&mut self) { - self.awaiting_parent = None; + /// Returns the parent root if awaiting a parent envelope. + pub fn awaiting_parent_envelope(&self) -> Option { + match self.awaiting_parent { + Some(AwaitingParent::Envelope(root)) => Some(root), + _ => None, + } } - pub fn awaiting_parent_envelope(&self) -> Option { - self.awaiting_parent_envelope + /// Mark this lookup as awaiting a parent block to be imported before processing. + pub fn set_awaiting_parent(&mut self, parent_root: Hash256) { + self.awaiting_parent = Some(AwaitingParent::Block(parent_root)); } /// Mark this lookup as awaiting a parent envelope to be imported before processing. pub fn set_awaiting_parent_envelope(&mut self, parent_root: Hash256) { - self.awaiting_parent_envelope = Some(parent_root); + self.awaiting_parent = Some(AwaitingParent::Envelope(parent_root)); } - /// Mark this lookup as no longer awaiting a parent envelope. - pub fn resolve_awaiting_parent_envelope(&mut self) { - self.awaiting_parent_envelope = None; + /// Mark this lookup as no longer awaiting any parent. + pub fn resolve_awaiting_parent(&mut self) { + self.awaiting_parent = None; } /// Returns the time elapsed since this lookup was created @@ -219,7 +239,6 @@ impl SingleBlockLookup { /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() - || self.awaiting_parent_envelope.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { // If components are waiting for the block request to complete, here we should @@ -328,8 +347,7 @@ impl SingleBlockLookup { expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; - let awaiting_event = - self.awaiting_parent.is_some() || self.awaiting_parent_envelope.is_some(); + let awaiting_event = self.awaiting_parent.is_some(); let request = R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 328940d6729..1176442202f 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1917,6 +1917,10 @@ impl SyncNetworkContext { "data_columns_by_range", self.data_columns_by_range_requests.len(), ), + ( + "payload_envelopes_by_root", + self.payload_envelopes_by_root_requests.len(), + ), ("custody_by_root", self.custody_by_root_requests.len()), ( "components_by_range", From b333841229c45349bf5226f7c12af77a577b50b6 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Fri, 3 Apr 2026 01:04:34 -0700 Subject: [PATCH 119/127] update --- beacon_node/network/src/sync/block_lookups/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 8dedcba2f42..27d96de51d7 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -246,8 +246,8 @@ impl BlockLookups { let envelope_lookup_exists = self.search_parent_envelope_of_child(parent_root, &[peer_id], cx); if envelope_lookup_exists { - // Create child lookup that waits for the parent envelope (not parent block). - // The child block itself is available, so we pass it as a component. + // Create child lookup that waits for the parent envelope. + // The child block itself has already been seen, so we pass it as a component. self.new_current_lookup( block_root, Some(block_component), From 5472c300dca2772ed3d1b0600ca3ba09ba1d51c7 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sat, 4 Apr 2026 00:16:09 -0700 Subject: [PATCH 120/127] Relax requirements that a checkpoint state must be epoch aligned post-gloas --- .../src/beacon_fork_choice_store.rs | 5 ++- beacon_node/beacon_chain/src/builder.rs | 23 ++++++++-- beacon_node/store/src/errors.rs | 1 + beacon_node/store/src/hot_cold_store.rs | 19 ++++++++ beacon_node/store/src/state_cache.rs | 45 +++++++++++++++++++ consensus/fork_choice/src/fork_choice.rs | 5 ++- 6 files changed, 90 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 95fde28f5b2..80f3be7565b 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -172,8 +172,9 @@ where let mut anchor_state = anchor.beacon_state; let mut anchor_block_header = anchor_state.latest_block_header().clone(); - // The anchor state MUST be on an epoch boundary (it should be advanced by the caller). - if !anchor_state + // Pre-gloas the anchor state MUST be on an epoch boundary (it should be advanced by the caller). + // Post-gloas this requirement is relaxed. + if !anchor_state.fork_name_unchecked().gloas_enabled() && !anchor_state .slot() .as_u64() .is_multiple_of(E::slots_per_epoch()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 11b87351b19..5920243c506 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -42,6 +42,7 @@ use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; +use types::StatePayloadStatus; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, @@ -433,9 +434,15 @@ where .clone() .ok_or("weak_subjectivity_state requires a store")?; - // Ensure the state is advanced to an epoch boundary. + // Pre-gloas ensure the state is advanced to an epoch boundary. + // Post-gloas checkpoint states are always pending (post-block) and cannot + // be advanced across epoch boundaries without first checking for a payload + // envelope. let slots_per_epoch = E::slots_per_epoch(); - if weak_subj_state.slot() % slots_per_epoch != 0 { + + if !weak_subj_state.fork_name_unchecked().gloas_enabled() + && weak_subj_state.slot() % slots_per_epoch != 0 + { debug!( state_slot = %weak_subj_state.slot(), block_slot = %weak_subj_block.slot(), @@ -568,7 +575,7 @@ where // Write the state, block and blobs non-atomically, it doesn't matter if they're forgotten // about on a crash restart. store - .update_finalized_state( + .set_initial_finalized_state( weak_subj_state_root, weak_subj_block_root, weak_subj_state.clone(), @@ -617,7 +624,15 @@ where .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, ); - // TODO(gloas): add check that checkpoint state is Pending + if weak_subj_state.fork_name_unchecked().gloas_enabled() + && weak_subj_state.payload_status() != StatePayloadStatus::Pending + { + return Err(format!( + "Checkpoint sync state must be Pending (post-block) for Gloas, got {:?}", + weak_subj_state.payload_status() + )); + } + let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, execution_envelope: None, diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index a07cc838863..e403df483a4 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -101,6 +101,7 @@ pub enum Error { from_state_slot: Slot, target_slot: Slot, }, + FinalizedStateAlreadySet, } pub trait HandleUnavailable { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 78dd69e55a2..83aa3f0cc41 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -474,6 +474,25 @@ impl, Cold: ItemStore> HotColdDB } } + /// See [`StateCache::set_initial_finalized_state`](crate::state_cache::StateCache::set_initial_finalized_state). + pub fn set_initial_finalized_state( + &self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + ) -> Result<(), Error> { + let start_slot = self.get_anchor_info().anchor_slot; + let pre_finalized_slots_to_retain = self + .hierarchy + .closest_layer_points(state.slot(), start_slot); + self.state_cache.lock().set_initial_finalized_state( + state_root, + block_root, + state, + &pre_finalized_slots_to_retain, + ) + } + pub fn update_finalized_state( &self, state_root: Hash256, diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index d016922adeb..afe909a45cb 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -124,6 +124,36 @@ impl StateCache { roots } + /// Used by checkpoint sync to initialize the finalized state in the state cache. + /// + /// Post-gloas the checkpoint state may not be epoch-aligned, e.g when the epoch boundary + /// slot is skipped. We relax the epoch-alignment requirement for the initial state only. + /// Runtime finalization updates should use [`update_finalized_state`](Self::update_finalized_state), + /// which enforces alignment. + pub fn set_initial_finalized_state( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + pre_finalized_slots_to_retain: &[Slot], + ) -> Result<(), Error> { + if self.finalized_state.is_some() { + return Err(Error::FinalizedStateAlreadySet); + } + + if !state.fork_name_unchecked().gloas_enabled() && state.slot() % E::slots_per_epoch() != 0 + { + return Err(Error::FinalizedStateUnaligned); + } + + self.update_finalized_state_inner( + state_root, + block_root, + state, + pre_finalized_slots_to_retain, + ) + } + pub fn update_finalized_state( &mut self, state_root: Hash256, @@ -135,6 +165,21 @@ impl StateCache { return Err(Error::FinalizedStateUnaligned); } + self.update_finalized_state_inner( + state_root, + block_root, + state, + pre_finalized_slots_to_retain, + ) + } + + fn update_finalized_state_inner( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + pre_finalized_slots_to_retain: &[Slot], + ) -> Result<(), Error> { if self .finalized_state .as_ref() diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 92fd4c1faf3..0a734748cd1 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -396,8 +396,9 @@ where current_slot: Option, spec: &ChainSpec, ) -> Result> { - // Sanity check: the anchor must lie on an epoch boundary. - if anchor_state.slot() % E::slots_per_epoch() != 0 { + // Pre-gloas sanity check: the anchor must lie on an epoch boundary. + // Post-gloas we relax this requirement + if !anchor_state.fork_name_unchecked().gloas_enabled() && anchor_state.slot() % E::slots_per_epoch() != 0 { return Err(Error::InvalidAnchor { block_slot: anchor_block.slot(), state_slot: anchor_state.slot(), From 9306767d1a85d40aad92a388083687cec83563ef Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sat, 4 Apr 2026 00:41:03 -0700 Subject: [PATCH 121/127] add test --- beacon_node/beacon_chain/tests/store_tests.rs | 147 ++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index c6e13bd160b..68aaa9c1120 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5465,6 +5465,153 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { ); } +// Checkpoint sync with a Gloas Pending state at a non-epoch-boundary slot. +// +// Post-Gloas, the finalized state is always the post-block (Pending) state. +// If the epoch boundary slot is skipped, the checkpoint state will not be +// epoch-aligned. This test verifies that checkpoint sync accepts such states +// and builds the chain correctly. +#[tokio::test] +async fn weak_subjectivity_sync_gloas_pending_non_aligned() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let spec = test_spec::(); + + // Build a chain with a skipped slot at the epoch boundary. + // For MinimalEthSpec (8 slots/epoch), skip slot 8 so the last block before + // the epoch boundary is at slot 7 (not epoch-aligned). + let epoch_boundary_slot = E::slots_per_epoch(); + let num_initial_slots = E::slots_per_epoch() * 4; + let checkpoint_slot = Slot::new(epoch_boundary_slot); + + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| { + // Skip the epoch boundary slot so the checkpoint resolves to the + // block at slot epoch_boundary - 1. + slot.as_u64() != epoch_boundary_slot + }) + .collect::>(); + + let temp1 = tempdir().unwrap(); + let full_store = get_store_generic(&temp1, StoreConfig::default(), spec.clone()); + let harness = get_harness_import_all_data_columns(full_store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + // Extract the checkpoint block and its Pending (post-block) state. + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + + // The block's state_root points to the Pending state in Gloas. + let wss_state_root = wss_block.state_root(); + let wss_state = full_store + .get_state(&wss_state_root, Some(wss_block.slot()), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); + + // Verify test preconditions: state is Pending and not epoch-aligned. + assert_eq!( + wss_state.payload_status(), + StatePayloadStatus::Pending, + "Checkpoint state should be Pending (post-block, pre-payload)" + ); + assert_ne!( + wss_state.slot() % E::slots_per_epoch(), + 0, + "Test invalid: checkpoint state is epoch-aligned, expected non-aligned" + ); + + let wss_blobs_opt = harness + .chain + .get_or_reconstruct_blobs(&wss_block_root) + .unwrap(); + + // Build a new chain from the non-aligned Pending checkpoint state. + let temp2 = tempdir().unwrap(); + let store = get_store_generic(&temp2, StoreConfig::default(), spec.clone()); + + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + spec.get_slot_duration(), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let chain_config = ChainConfig { + archive: true, + ..ChainConfig::default() + }; + + let trusted_setup = get_kzg(&spec); + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + let mock = mock_execution_layer_from_parts( + harness.spec.clone(), + harness.runtime.task_executor.clone(), + ); + + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, trusted_setup) + .chain_config(chain_config) + .store(store.clone()) + .custom_spec(spec.clone().into()) + .task_executor(harness.chain.task_executor.clone()) + .weak_subjectivity_state( + wss_state, + wss_block, + wss_blobs_opt, + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) + .execution_layer(Some(mock.el)) + .ordered_custody_column_indices(generate_data_column_indices_rand_order::()) + .rng(Box::new(StdRng::seed_from_u64(42))) + .build(); + + assert!( + beacon_chain.is_ok(), + "Beacon chain should build from non-aligned Gloas Pending checkpoint state. Error: {:?}", + beacon_chain.err() + ); + + let chain = beacon_chain.unwrap(); + + // The head state should be at the block's slot (not advanced to the epoch boundary). + assert_eq!( + chain.head_snapshot().beacon_state.slot(), + Slot::new(epoch_boundary_slot - 1), + "Head state should be at the checkpoint block's slot" + ); + assert_eq!( + chain.head_snapshot().beacon_state.payload_status(), + StatePayloadStatus::Pending, + "Head state should be Pending after checkpoint sync" + ); +} + // ===================== Gloas Store Tests ===================== /// Test basic Gloas block + envelope storage and retrieval. From a12969a4d247342e03ee930b99fabd08901211c1 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sat, 4 Apr 2026 01:02:55 -0700 Subject: [PATCH 122/127] Clean up --- .../src/beacon_fork_choice_store.rs | 9 +++-- beacon_node/beacon_chain/tests/store_tests.rs | 39 +++++++++---------- beacon_node/store/src/state_cache.rs | 4 +- consensus/fork_choice/src/fork_choice.rs | 4 +- 4 files changed, 27 insertions(+), 29 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 80f3be7565b..8fc771aa7d0 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -174,10 +174,11 @@ where // Pre-gloas the anchor state MUST be on an epoch boundary (it should be advanced by the caller). // Post-gloas this requirement is relaxed. - if !anchor_state.fork_name_unchecked().gloas_enabled() && !anchor_state - .slot() - .as_u64() - .is_multiple_of(E::slots_per_epoch()) + if !anchor_state.fork_name_unchecked().gloas_enabled() + && !anchor_state + .slot() + .as_u64() + .is_multiple_of(E::slots_per_epoch()) { return Err(Error::UnalignedCheckpoint { block_slot: anchor_block_header.slot, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 68aaa9c1120..d5f1705ffe8 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5465,12 +5465,14 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { ); } -// Checkpoint sync with a Gloas Pending state at a non-epoch-boundary slot. +// Verify that post-gloas checkpoint sync accepts a non-epoch aligned state and builds +// the chain. // -// Post-Gloas, the finalized state is always the post-block (Pending) state. -// If the epoch boundary slot is skipped, the checkpoint state will not be -// epoch-aligned. This test verifies that checkpoint sync accepts such states -// and builds the chain correctly. +// Since post-gloas checkpoint sync states are always the post block state, if the epoch boundary +// slot is skipped, we'll receive a checkpoint state that is not epoch aligned. +// +// Example: slot `n` is the epoch boundary slot and is skipped. We'll receive the post block state for +// slot `n - 1`. This is the state before the payload for slot `n - 1` was processed. #[tokio::test] async fn weak_subjectivity_sync_gloas_pending_non_aligned() { if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { @@ -5480,8 +5482,6 @@ async fn weak_subjectivity_sync_gloas_pending_non_aligned() { let spec = test_spec::(); // Build a chain with a skipped slot at the epoch boundary. - // For MinimalEthSpec (8 slots/epoch), skip slot 8 so the last block before - // the epoch boundary is at slot 7 (not epoch-aligned). let epoch_boundary_slot = E::slots_per_epoch(); let num_initial_slots = E::slots_per_epoch() * 4; let checkpoint_slot = Slot::new(epoch_boundary_slot); @@ -5489,8 +5489,7 @@ async fn weak_subjectivity_sync_gloas_pending_non_aligned() { let slots = (1..num_initial_slots) .map(Slot::new) .filter(|&slot| { - // Skip the epoch boundary slot so the checkpoint resolves to the - // block at slot epoch_boundary - 1. + // Skip the epoch boundary slot slot.as_u64() != epoch_boundary_slot }) .collect::>(); @@ -5510,7 +5509,7 @@ async fn weak_subjectivity_sync_gloas_pending_non_aligned() { ) .await; - // Extract the checkpoint block and its Pending (post-block) state. + // Extract the checkpoint block and its Pending state. let wss_block_root = harness .chain .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) @@ -5526,20 +5525,23 @@ async fn weak_subjectivity_sync_gloas_pending_non_aligned() { // The block's state_root points to the Pending state in Gloas. let wss_state_root = wss_block.state_root(); let wss_state = full_store - .get_state(&wss_state_root, Some(wss_block.slot()), CACHE_STATE_IN_TESTS) + .get_state( + &wss_state_root, + Some(wss_block.slot()), + CACHE_STATE_IN_TESTS, + ) .unwrap() .unwrap(); - // Verify test preconditions: state is Pending and not epoch-aligned. assert_eq!( wss_state.payload_status(), StatePayloadStatus::Pending, - "Checkpoint state should be Pending (post-block, pre-payload)" + "Checkpoint state should be Pending" ); assert_ne!( wss_state.slot() % E::slots_per_epoch(), 0, - "Test invalid: checkpoint state is epoch-aligned, expected non-aligned" + "Checkpoint state is epoch-aligned, expected non-aligned" ); let wss_blobs_opt = harness @@ -5575,12 +5577,7 @@ async fn weak_subjectivity_sync_gloas_pending_non_aligned() { .store(store.clone()) .custom_spec(spec.clone().into()) .task_executor(harness.chain.task_executor.clone()) - .weak_subjectivity_state( - wss_state, - wss_block, - wss_blobs_opt, - genesis_state, - ) + .weak_subjectivity_state(wss_state, wss_block, wss_blobs_opt, genesis_state) .unwrap() .store_migrator_config(MigratorConfig::default().blocking()) .slot_clock(slot_clock) @@ -5599,7 +5596,7 @@ async fn weak_subjectivity_sync_gloas_pending_non_aligned() { let chain = beacon_chain.unwrap(); - // The head state should be at the block's slot (not advanced to the epoch boundary). + // The head state should be at the block's slot assert_eq!( chain.head_snapshot().beacon_state.slot(), Slot::new(epoch_boundary_slot - 1), diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index afe909a45cb..288b0a7d699 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -127,9 +127,7 @@ impl StateCache { /// Used by checkpoint sync to initialize the finalized state in the state cache. /// /// Post-gloas the checkpoint state may not be epoch-aligned, e.g when the epoch boundary - /// slot is skipped. We relax the epoch-alignment requirement for the initial state only. - /// Runtime finalization updates should use [`update_finalized_state`](Self::update_finalized_state), - /// which enforces alignment. + /// slot is skipped. Regular finalization updates should use `update_finalized_state`. pub fn set_initial_finalized_state( &mut self, state_root: Hash256, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 0a734748cd1..5f2c3fc8615 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -398,7 +398,9 @@ where ) -> Result> { // Pre-gloas sanity check: the anchor must lie on an epoch boundary. // Post-gloas we relax this requirement - if !anchor_state.fork_name_unchecked().gloas_enabled() && anchor_state.slot() % E::slots_per_epoch() != 0 { + if !anchor_state.fork_name_unchecked().gloas_enabled() + && anchor_state.slot() % E::slots_per_epoch() != 0 + { return Err(Error::InvalidAnchor { block_slot: anchor_block.slot(), state_slot: anchor_state.slot(), From 9c825cf2e1ca56eadac9a6f609e4157764c8733d Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sat, 4 Apr 2026 01:19:02 -0700 Subject: [PATCH 123/127] Fmt --- beacon_node/beacon_chain/tests/store_tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index d5f1705ffe8..b98a3f4cffb 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5466,12 +5466,12 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { } // Verify that post-gloas checkpoint sync accepts a non-epoch aligned state and builds -// the chain. +// the chain. // // Since post-gloas checkpoint sync states are always the post block state, if the epoch boundary // slot is skipped, we'll receive a checkpoint state that is not epoch aligned. // -// Example: slot `n` is the epoch boundary slot and is skipped. We'll receive the post block state for +// Example: slot `n` is the epoch boundary slot and is skipped. We'll receive the post block state for // slot `n - 1`. This is the state before the payload for slot `n - 1` was processed. #[tokio::test] async fn weak_subjectivity_sync_gloas_pending_non_aligned() { From 0d36ee0fbee3c5e58caa900e5380609806a7929f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sun, 5 Apr 2026 01:01:27 -0700 Subject: [PATCH 124/127] testing --- beacon_node/store/src/hot_cold_store.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index e0fd0c48f87..b6564bed1b6 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1899,14 +1899,18 @@ impl, Cold: ItemStore> HotColdDB return Ok(StatePayloadStatus::Pending); } + // If the latest block was at this slot, the state is definitively `Pending` (post-block, + // pre-payload). Check this before loading the previous summary to avoid errors when the + // previous state doesn't exist (e.g. checkpoint sync where only one state is stored). + if summary.slot == summary.latest_block_slot { + return Ok(StatePayloadStatus::Pending); + } + // Load the hot state summary for the previous state. // // If it has the same slot as this summary then we know this summary is for a `Full` state // (payload state), because they are always diffed against their same-slot `Pending` state. // - // If the previous summary has a different slot AND the latest block is from `summary.slot`, - // then this state *must* be `Pending` (it is the summary for latest block itself). - // // Otherwise, we are at a skipped slot and must traverse the graph of state summaries // backwards until we reach a summary for the latest block. This recursion could be quite // far in the case of a long skip. We could optimise this in future using the @@ -1918,8 +1922,6 @@ impl, Cold: ItemStore> HotColdDB if previous_state_summary.slot == summary.slot { Ok(StatePayloadStatus::Full) - } else if summary.slot == summary.latest_block_slot { - Ok(StatePayloadStatus::Pending) } else { self.get_hot_state_summary_payload_status(&previous_state_summary) } From c8f69b5a736db41c4ed38d0eca78b2629c8c2930 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sun, 5 Apr 2026 02:15:57 -0700 Subject: [PATCH 125/127] Temp dart throws --- .../network_beacon_processor/sync_methods.rs | 57 +++++++++++++------ .../network/src/sync/backfill_sync/mod.rs | 3 +- beacon_node/network/src/sync/manager.rs | 39 ++++++++++++- .../network/src/sync/range_sync/chain.rs | 17 +++++- .../src/sync/range_sync/chain_collection.rs | 26 ++++++++- .../network/src/sync/range_sync/range.rs | 9 +++ 6 files changed, 129 insertions(+), 22 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 57d3d7d2206..3f98b6c28fe 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -41,6 +41,7 @@ pub enum ChainSegmentProcessId { BackSyncBatchId(Epoch), } +/// Returned when a chain segment import fails. /// Returned when a chain segment import fails. struct ChainSegmentFailed { /// To be displayed in logs. @@ -49,6 +50,13 @@ struct ChainSegmentFailed { peer_action: Option, } +/// Result of processing a batch of blocks. +enum BlockBatchResult { + Ok { imported_blocks: usize }, + ParentEnvelopeUnknown { parent_root: Hash256 }, + Err { imported_blocks: usize, failed: Option }, +} + impl NetworkBeaconProcessor { /// Returns an async closure which processes a beacon block received via RPC. /// @@ -633,7 +641,7 @@ impl NetworkBeaconProcessor { .process_blocks(downloaded_blocks.iter(), notify_execution_layer) .await { - (imported_blocks, Ok(_)) => { + BlockBatchResult::Ok { imported_blocks } => { debug!( batch_epoch = %epoch, first_block_slot = start_slot, @@ -647,17 +655,27 @@ impl NetworkBeaconProcessor { imported_blocks, } } - (imported_blocks, Err(e)) => { - debug!( - batch_epoch = %epoch, - first_block_slot = start_slot, - chain = chain_id, - last_block_slot = end_slot, - imported_blocks, - error = %e.message, - service = "sync", - "Batch processing failed"); - match e.peer_action { + BlockBatchResult::ParentEnvelopeUnknown { parent_root } => { + warn!( + batch_epoch = %epoch, + ?parent_root, + "Batch processing paused: parent envelope unknown" + ); + BatchProcessResult::ParentEnvelopeUnknown { parent_root } + } + BlockBatchResult::Err { imported_blocks, failed } => { + if let Some(e) = &failed { + debug!( + batch_epoch = %epoch, + first_block_slot = start_slot, + chain = chain_id, + last_block_slot = end_slot, + imported_blocks, + error = %e.message, + service = "sync", + "Batch processing failed"); + } + match failed.and_then(|e| e.peer_action) { Some(penalty) => BatchProcessResult::FaultyFailure { imported_blocks, penalty, @@ -758,7 +776,7 @@ impl NetworkBeaconProcessor { &self, downloaded_blocks: impl Iterator>, notify_execution_layer: NotifyExecutionLayer, - ) -> (usize, Result<(), ChainSegmentFailed>) { + ) -> BlockBatchResult { let blocks: Vec<_> = downloaded_blocks.cloned().collect(); match self .chain @@ -770,18 +788,25 @@ impl NetworkBeaconProcessor { if !imported_blocks.is_empty() { self.chain.recompute_head_at_current_slot().await; } - (imported_blocks.len(), Ok(())) + BlockBatchResult::Ok { imported_blocks: imported_blocks.len() } } ChainSegmentResult::Failed { imported_blocks, error, } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); - let r = self.handle_failed_chain_segment(error); if !imported_blocks.is_empty() { self.chain.recompute_head_at_current_slot().await; } - (imported_blocks.len(), r) + // Intercept ParentEnvelopeUnknown before normal error handling. + if let BlockError::ParentEnvelopeUnknown { parent_root } = error { + return BlockBatchResult::ParentEnvelopeUnknown { parent_root }; + } + let r = self.handle_failed_chain_segment(error); + BlockBatchResult::Err { + imported_blocks: imported_blocks.len(), + failed: r.err(), + } } } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 29beb96e5a5..4fef78a47a0 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -672,7 +672,8 @@ impl BackFillSync { } } } - BatchProcessResult::NonFaultyFailure => { + BatchProcessResult::NonFaultyFailure + | BatchProcessResult::ParentEnvelopeUnknown { .. } => { if let Err(e) = batch.processing_completed(BatchProcessingResult::NonFaultyFailure) { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c1c1029446b..da67dd5dc4f 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -233,6 +233,11 @@ pub enum BatchProcessResult { penalty: PeerAction, }, NonFaultyFailure, + /// The batch processing failed because the parent block's execution payload envelope + /// is not yet available. The chain should pause until the envelope is fetched. + ParentEnvelopeUnknown { + parent_root: Hash256, + }, } /// The result of processing multiple data columns. @@ -972,9 +977,20 @@ impl SyncManager { SyncMessage::BlockComponentProcessed { process_type, result, - } => self - .block_lookups - .on_processing_result(process_type, result, &mut self.network), + } => { + // If a payload envelope was successfully imported, resume any range + // sync chains that were waiting for it. + if let BlockProcessType::SinglePayloadEnvelope { block_root, .. } = &process_type { + if matches!(&result, BlockProcessingResult::Ok(_)) { + self.range_sync.resume_chains_awaiting_envelope( + *block_root, + &mut self.network, + ); + } + } + self.block_lookups + .on_processing_result(process_type, result, &mut self.network) + } SyncMessage::GossipBlockProcessResult { block_root, imported, @@ -985,6 +1001,23 @@ impl SyncManager { ), SyncMessage::BatchProcessed { sync_type, result } => match sync_type { ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { + // If the batch failed due to a missing parent envelope, trigger + // an envelope lookup before pausing the chain. + if let BatchProcessResult::ParentEnvelopeUnknown { parent_root } = &result { + let peers: Vec<_> = self + .network + .network_globals() + .peers + .read() + .synced_peers() + .cloned() + .collect(); + let _ = self.block_lookups.search_parent_envelope_of_child( + *parent_root, + &peers, + &mut self.network, + ); + } self.range_sync.handle_block_process_result( &mut self.network, chain_id, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d533d8ed0db..bd2c7af3851 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -156,6 +156,8 @@ pub enum ChainSyncingState { Stopped, /// The chain is undergoing syncing. Syncing, + /// The chain is paused waiting for a parent envelope to be fetched. + AwaitingEnvelope { parent_root: Hash256 }, } impl SyncingChain { @@ -639,6 +641,19 @@ impl SyncingChain { // Simply re-download all batches in `AwaitingDownload` state. self.attempt_send_awaiting_download_batches(network, "non-faulty-failure") } + BatchProcessResult::ParentEnvelopeUnknown { parent_root } => { + batch.processing_completed(BatchProcessingResult::NonFaultyFailure)?; + + // Pause the chain until the missing parent envelope is fetched. + debug!( + ?parent_root, + "Chain paused: awaiting parent envelope" + ); + self.state = ChainSyncingState::AwaitingEnvelope { + parent_root: *parent_root, + }; + Ok(KeepChain) + } } } @@ -1175,7 +1190,7 @@ impl SyncingChain { pub fn is_syncing(&self) -> bool { match self.state { ChainSyncingState::Syncing => true, - ChainSyncingState::Stopped => false, + ChainSyncingState::Stopped | ChainSyncingState::AwaitingEnvelope { .. } => false, } } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index a087fdecdf8..7a642f1e025 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -3,7 +3,7 @@ //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. -use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; +use super::chain::{ChainId, ChainSyncingState, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::sync::batch::BatchMetricsState; @@ -562,6 +562,30 @@ impl ChainCollection { } } + /// Resume any chains that were paused waiting for the given parent envelope. + pub fn resume_chains_awaiting_envelope( + &mut self, + parent_root: Hash256, + network: &mut SyncNetworkContext, + ) { + for chain in self + .finalized_chains + .values_mut() + .chain(self.head_chains.values_mut()) + { + if chain.state + == (ChainSyncingState::AwaitingEnvelope { parent_root }) + { + debug!( + ?parent_root, + "Resuming chain after parent envelope received" + ); + chain.state = ChainSyncingState::Syncing; + let _ = chain.resume(network); + } + } + } + fn update_metrics(&self) { metrics::set_gauge_vec( &metrics::SYNCING_CHAINS_COUNT, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 6509ac3cb3c..80915df86a0 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -266,6 +266,15 @@ where } } + /// Resume any chains that were paused waiting for the given parent envelope. + pub fn resume_chains_awaiting_envelope( + &mut self, + parent_root: Hash256, + network: &mut SyncNetworkContext, + ) { + self.chains.resume_chains_awaiting_envelope(parent_root, network); + } + /// A peer has disconnected. This removes the peer from any ongoing chains and mappings. A /// disconnected peer could remove a chain pub fn peer_disconnect(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { From f7cf8fca8d9d40dde95318c58daab49de828f4d1 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Mon, 6 Apr 2026 00:16:02 -0700 Subject: [PATCH 126/127] Temp fixes --- beacon_node/beacon_chain/src/beacon_chain.rs | 50 +++++++++++++++- .../network_beacon_processor/sync_methods.rs | 57 ++++++------------- .../network/src/sync/backfill_sync/mod.rs | 3 +- beacon_node/network/src/sync/manager.rs | 39 +------------ .../network/src/sync/range_sync/chain.rs | 17 +----- .../src/sync/range_sync/chain_collection.rs | 26 +-------- .../network/src/sync/range_sync/range.rs | 9 --- 7 files changed, 69 insertions(+), 132 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 723d64489eb..9588aa7a2b4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2866,7 +2866,15 @@ impl BeaconChain { // In the case of (2), skipping the block is valid since we should never import it. // However, we will potentially get a `ParentUnknown` on a later block. The sync // protocol will need to ensure this is handled gracefully. - Err(BlockError::WouldRevertFinalizedSlot { .. }) => continue, + Err(BlockError::WouldRevertFinalizedSlot { .. }) => { + // Gloas: keep blocks at finalized slots so their envelopes can + // still be processed. This handles the checkpoint sync case where + // the checkpoint block is already finalized but its envelope hasn't + // been stored yet. + if block.as_block().fork_name_unchecked().gloas_enabled() { + filtered_chain_segment.push((block_root, block)); + } + } // The block has a known parent that does not descend from the finalized block. // There is no need to process this block or any children. Err(BlockError::NotFinalizedDescendant { block_parent_root }) => { @@ -2935,6 +2943,39 @@ impl BeaconChain { } }; + // Strip already-known blocks (e.g. the checkpoint sync anchor) from the + // front of the segment and process only their envelopes. These blocks + // can't go through signature_verify_chain_segment because their parents + // may not be available. + while let Some((root, _)) = filtered_chain_segment.first() { + if !self + .canonical_head + .fork_choice_read_lock() + .contains_block(root) + { + break; + } + let (block_root, block) = filtered_chain_segment.remove(0); + let maybe_envelope = match block { + RangeSyncBlock::Gloas { envelope, .. } => envelope, + _ => None, + }; + if let Some(envelope) = maybe_envelope + && let Err(error) = self + .process_range_sync_envelope( + block_root, + envelope, + notify_execution_layer, + ) + .await + { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::EnvelopeError(Box::new(error)), + }; + } + } + while let Some((_root, block)) = filtered_chain_segment.first() { // Determine the epoch of the first block in the remaining segment. let start_epoch = block.epoch(); @@ -2978,6 +3019,7 @@ impl BeaconChain { for (signature_verified_block, maybe_envelope) in signature_verified_blocks { let block_root = signature_verified_block.block_root(); let block_slot = signature_verified_block.slot(); + match self .process_block( block_root, @@ -3025,13 +3067,15 @@ impl BeaconChain { } } } - Err(BlockError::DuplicateFullyImported(block_root)) => { + Err(BlockError::DuplicateFullyImported(_)) + | Err(BlockError::WouldRevertFinalizedSlot { .. }) => { debug!( ?block_root, "Ignoring already known block while processing chain segment" ); // Gloas: still process the envelope for duplicate blocks. The envelope - // may not have been persisted before a restart. + // may not have been persisted before a restart, or the block may be the + // checkpoint sync anchor whose envelope was never stored. if let Some(envelope) = maybe_envelope && let Err(error) = self .process_range_sync_envelope( diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 3f98b6c28fe..57d3d7d2206 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -41,7 +41,6 @@ pub enum ChainSegmentProcessId { BackSyncBatchId(Epoch), } -/// Returned when a chain segment import fails. /// Returned when a chain segment import fails. struct ChainSegmentFailed { /// To be displayed in logs. @@ -50,13 +49,6 @@ struct ChainSegmentFailed { peer_action: Option, } -/// Result of processing a batch of blocks. -enum BlockBatchResult { - Ok { imported_blocks: usize }, - ParentEnvelopeUnknown { parent_root: Hash256 }, - Err { imported_blocks: usize, failed: Option }, -} - impl NetworkBeaconProcessor { /// Returns an async closure which processes a beacon block received via RPC. /// @@ -641,7 +633,7 @@ impl NetworkBeaconProcessor { .process_blocks(downloaded_blocks.iter(), notify_execution_layer) .await { - BlockBatchResult::Ok { imported_blocks } => { + (imported_blocks, Ok(_)) => { debug!( batch_epoch = %epoch, first_block_slot = start_slot, @@ -655,27 +647,17 @@ impl NetworkBeaconProcessor { imported_blocks, } } - BlockBatchResult::ParentEnvelopeUnknown { parent_root } => { - warn!( - batch_epoch = %epoch, - ?parent_root, - "Batch processing paused: parent envelope unknown" - ); - BatchProcessResult::ParentEnvelopeUnknown { parent_root } - } - BlockBatchResult::Err { imported_blocks, failed } => { - if let Some(e) = &failed { - debug!( - batch_epoch = %epoch, - first_block_slot = start_slot, - chain = chain_id, - last_block_slot = end_slot, - imported_blocks, - error = %e.message, - service = "sync", - "Batch processing failed"); - } - match failed.and_then(|e| e.peer_action) { + (imported_blocks, Err(e)) => { + debug!( + batch_epoch = %epoch, + first_block_slot = start_slot, + chain = chain_id, + last_block_slot = end_slot, + imported_blocks, + error = %e.message, + service = "sync", + "Batch processing failed"); + match e.peer_action { Some(penalty) => BatchProcessResult::FaultyFailure { imported_blocks, penalty, @@ -776,7 +758,7 @@ impl NetworkBeaconProcessor { &self, downloaded_blocks: impl Iterator>, notify_execution_layer: NotifyExecutionLayer, - ) -> BlockBatchResult { + ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec<_> = downloaded_blocks.cloned().collect(); match self .chain @@ -788,25 +770,18 @@ impl NetworkBeaconProcessor { if !imported_blocks.is_empty() { self.chain.recompute_head_at_current_slot().await; } - BlockBatchResult::Ok { imported_blocks: imported_blocks.len() } + (imported_blocks.len(), Ok(())) } ChainSegmentResult::Failed { imported_blocks, error, } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); + let r = self.handle_failed_chain_segment(error); if !imported_blocks.is_empty() { self.chain.recompute_head_at_current_slot().await; } - // Intercept ParentEnvelopeUnknown before normal error handling. - if let BlockError::ParentEnvelopeUnknown { parent_root } = error { - return BlockBatchResult::ParentEnvelopeUnknown { parent_root }; - } - let r = self.handle_failed_chain_segment(error); - BlockBatchResult::Err { - imported_blocks: imported_blocks.len(), - failed: r.err(), - } + (imported_blocks.len(), r) } } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 4fef78a47a0..29beb96e5a5 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -672,8 +672,7 @@ impl BackFillSync { } } } - BatchProcessResult::NonFaultyFailure - | BatchProcessResult::ParentEnvelopeUnknown { .. } => { + BatchProcessResult::NonFaultyFailure => { if let Err(e) = batch.processing_completed(BatchProcessingResult::NonFaultyFailure) { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index da67dd5dc4f..c1c1029446b 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -233,11 +233,6 @@ pub enum BatchProcessResult { penalty: PeerAction, }, NonFaultyFailure, - /// The batch processing failed because the parent block's execution payload envelope - /// is not yet available. The chain should pause until the envelope is fetched. - ParentEnvelopeUnknown { - parent_root: Hash256, - }, } /// The result of processing multiple data columns. @@ -977,20 +972,9 @@ impl SyncManager { SyncMessage::BlockComponentProcessed { process_type, result, - } => { - // If a payload envelope was successfully imported, resume any range - // sync chains that were waiting for it. - if let BlockProcessType::SinglePayloadEnvelope { block_root, .. } = &process_type { - if matches!(&result, BlockProcessingResult::Ok(_)) { - self.range_sync.resume_chains_awaiting_envelope( - *block_root, - &mut self.network, - ); - } - } - self.block_lookups - .on_processing_result(process_type, result, &mut self.network) - } + } => self + .block_lookups + .on_processing_result(process_type, result, &mut self.network), SyncMessage::GossipBlockProcessResult { block_root, imported, @@ -1001,23 +985,6 @@ impl SyncManager { ), SyncMessage::BatchProcessed { sync_type, result } => match sync_type { ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { - // If the batch failed due to a missing parent envelope, trigger - // an envelope lookup before pausing the chain. - if let BatchProcessResult::ParentEnvelopeUnknown { parent_root } = &result { - let peers: Vec<_> = self - .network - .network_globals() - .peers - .read() - .synced_peers() - .cloned() - .collect(); - let _ = self.block_lookups.search_parent_envelope_of_child( - *parent_root, - &peers, - &mut self.network, - ); - } self.range_sync.handle_block_process_result( &mut self.network, chain_id, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index bd2c7af3851..d533d8ed0db 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -156,8 +156,6 @@ pub enum ChainSyncingState { Stopped, /// The chain is undergoing syncing. Syncing, - /// The chain is paused waiting for a parent envelope to be fetched. - AwaitingEnvelope { parent_root: Hash256 }, } impl SyncingChain { @@ -641,19 +639,6 @@ impl SyncingChain { // Simply re-download all batches in `AwaitingDownload` state. self.attempt_send_awaiting_download_batches(network, "non-faulty-failure") } - BatchProcessResult::ParentEnvelopeUnknown { parent_root } => { - batch.processing_completed(BatchProcessingResult::NonFaultyFailure)?; - - // Pause the chain until the missing parent envelope is fetched. - debug!( - ?parent_root, - "Chain paused: awaiting parent envelope" - ); - self.state = ChainSyncingState::AwaitingEnvelope { - parent_root: *parent_root, - }; - Ok(KeepChain) - } } } @@ -1190,7 +1175,7 @@ impl SyncingChain { pub fn is_syncing(&self) -> bool { match self.state { ChainSyncingState::Syncing => true, - ChainSyncingState::Stopped | ChainSyncingState::AwaitingEnvelope { .. } => false, + ChainSyncingState::Stopped => false, } } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 7a642f1e025..a087fdecdf8 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -3,7 +3,7 @@ //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. -use super::chain::{ChainId, ChainSyncingState, ProcessingResult, RemoveChain, SyncingChain}; +use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::sync::batch::BatchMetricsState; @@ -562,30 +562,6 @@ impl ChainCollection { } } - /// Resume any chains that were paused waiting for the given parent envelope. - pub fn resume_chains_awaiting_envelope( - &mut self, - parent_root: Hash256, - network: &mut SyncNetworkContext, - ) { - for chain in self - .finalized_chains - .values_mut() - .chain(self.head_chains.values_mut()) - { - if chain.state - == (ChainSyncingState::AwaitingEnvelope { parent_root }) - { - debug!( - ?parent_root, - "Resuming chain after parent envelope received" - ); - chain.state = ChainSyncingState::Syncing; - let _ = chain.resume(network); - } - } - } - fn update_metrics(&self) { metrics::set_gauge_vec( &metrics::SYNCING_CHAINS_COUNT, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 80915df86a0..6509ac3cb3c 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -266,15 +266,6 @@ where } } - /// Resume any chains that were paused waiting for the given parent envelope. - pub fn resume_chains_awaiting_envelope( - &mut self, - parent_root: Hash256, - network: &mut SyncNetworkContext, - ) { - self.chains.resume_chains_awaiting_envelope(parent_root, network); - } - /// A peer has disconnected. This removes the peer from any ongoing chains and mappings. A /// disconnected peer could remove a chain pub fn peer_disconnect(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { From 4e8415f3dc47f35a9160a1310ee6fe4f1e59fd93 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Mon, 6 Apr 2026 00:50:04 -0700 Subject: [PATCH 127/127] Enable optimistic sync and change rate limitis --- .../src/payload_envelope_verification/import.rs | 10 ---------- beacon_node/lighthouse_network/src/rpc/config.rs | 8 ++++---- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index c09f5f9b4d2..0922331e654 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -294,16 +294,6 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; - // TODO(gloas): optimistic sync is not supported for Gloas, maybe we could re-add it - if payload_verification_outcome - .payload_verification_status - .is_optimistic() - { - return Err(EnvelopeError::OptimisticSyncNotSupported { - block_root: import_data.block_root, - }); - } - Ok(ExecutedEnvelope::new( signed_envelope, import_data, diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 9e1c6541ec8..f2b55d01c78 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -110,13 +110,13 @@ impl RateLimiterConfig { // blocks and a decent syncing rate for honest nodes. Malicious nodes would need to // spread out their requests over the time window to max out bandwidth on the server. pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = - Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + Quota::n_every(NonZeroU64::new(32).unwrap(), 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = - Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + Quota::n_every(NonZeroU64::new(32).unwrap(), 10); pub const DEFAULT_PAYLOAD_ENVELOPES_BY_RANGE_QUOTA: Quota = - Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + Quota::n_every(NonZeroU64::new(32).unwrap(), 10); pub const DEFAULT_PAYLOAD_ENVELOPES_BY_ROOT_QUOTA: Quota = - Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + Quota::n_every(NonZeroU64::new(32).unwrap(), 10); // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(NonZeroU64::new(896).unwrap(), 10);