From c33e351422b18585595140434a9786685e6ab1d3 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 11 Mar 2026 14:53:11 +0100 Subject: [PATCH 01/28] feat: add claimsync package and integrate with bridgesync - Add new claimsync package with its own storage (SQLite), processor, embedded mode, and ClaimsReader interface - Remove duplicated claim event handlers from bridgesync/downloader.go; bridgesync now delegates to claimsync via ClaimsSyncProcessor interface - Add ProcessBlockWithTx(insertBlock bool) to allow shared tx reuse between bridgesync and claimsync (atomic writes) - Add ReorgWithTx to ClaimsSyncProcessor so bridgesync can call claimsync reorg within its own transaction - Add ClaimsReader interface grouping read-only claim queries Co-Authored-By: Claude Sonnet 4.6 --- aggsender/aggsender.go | 4 + aggsender/aggsender_test.go | 2 + aggsender/aggsender_validator.go | 9 +- bridgesync/bridgesync.go | 12 +- bridgesync/bridgesync_test.go | 6 + bridgesync/downloader.go | 348 +----------------- bridgesync/processor.go | 78 ++-- bridgesync/processor_test.go | 54 +-- claimsync/claimsync.go | 164 +++++++++ claimsync/downloader.go | 258 +++++++++++++ claimsync/embedded.go | 150 ++++++++ claimsync/processor.go | 114 ++++++ claimsync/reader.go | 45 +++ .../storage/migrations/claimsync0001.sql | 53 +++ claimsync/storage/migrations/migrations.go | 41 +++ claimsync/storage/storage.go | 296 +++++++++++++++ claimsync/types/claim_reader.go | 16 + claimsync/types/claim_storager.go | 37 ++ claimsync/types/processor.go | 11 + claimsync/types/syncer_id.go | 18 + cmd/run.go | 9 +- test/helpers/e2e.go | 2 +- 22 files changed, 1327 insertions(+), 400 deletions(-) create mode 100644 claimsync/claimsync.go create mode 100644 claimsync/downloader.go create mode 100644 claimsync/embedded.go create mode 100644 claimsync/processor.go create mode 100644 claimsync/reader.go create mode 100644 claimsync/storage/migrations/claimsync0001.sql create mode 100644 claimsync/storage/migrations/migrations.go create mode 100644 claimsync/storage/storage.go create mode 100644 claimsync/types/claim_reader.go create mode 100644 claimsync/types/claim_storager.go create mode 100644 claimsync/types/processor.go create mode 100644 claimsync/types/syncer_id.go diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 31c0301c9..0f8f6be94 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -22,6 +22,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/aggsender/validator" aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/claimsync" "github.com/agglayer/aggkit/db/compatibility" "github.com/agglayer/aggkit/log" aggkittypes "github.com/agglayer/aggkit/types" @@ -67,6 +68,7 @@ func New( aggLayerClient agglayer.AgglayerClientInterface, l1InfoTreeSyncer types.L1InfoTreeSyncer, l2Syncer types.L2BridgeSyncer, + claimSyncer claimsync.ClaimSyncer, l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier types.RollupDataQuerier, @@ -102,6 +104,7 @@ func New( aggLayerClient, l1InfoTreeSyncer, l2Syncer, + claimSyncer, l1Client, l2Client, rollupDataQuerier, @@ -119,6 +122,7 @@ func newAggsender( aggLayerClient agglayer.AgglayerClientInterface, l1InfoTreeSyncer types.L1InfoTreeSyncer, l2Syncer types.L2BridgeSyncer, + _ claimsync.ClaimSyncer, l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier types.RollupDataQuerier, diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index b01179b25..4a884c0c1 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -118,6 +118,7 @@ func TestAggSenderStart(t *testing.T) { aggLayerMock, mockL1InfoTreeSyncer, // l1 info tree syncer bridgeL2SyncerMock, + nil, // claim syncer nil, // l1 client nil, // l2 client rollupQuerierMock, @@ -555,6 +556,7 @@ func TestNewAggSender(t *testing.T) { mockAgglayerClient, mockL1InfoTreeSyncer, // l1 info tree syncer mockBridgeSyncer, + nil, // claim syncer nil, // l1 client nil, // l2 client mockRollupQuerier, diff --git a/aggsender/aggsender_validator.go b/aggsender/aggsender_validator.go index 8203fb3db..4de29965f 100644 --- a/aggsender/aggsender_validator.go +++ b/aggsender/aggsender_validator.go @@ -4,15 +4,17 @@ import ( "context" "errors" + signertypes "github.com/agglayer/go_signer/signer/types" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/agglayer/aggkit/agglayer" "github.com/agglayer/aggkit/aggsender/metrics" "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/aggsender/validator" v1 "github.com/agglayer/aggkit/aggsender/validator/proto/v1" + "github.com/agglayer/aggkit/claimsync" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/grpc" - signertypes "github.com/agglayer/go_signer/signer/types" - ethcommon "github.com/ethereum/go-ethereum/common" ) var ( @@ -36,7 +38,8 @@ func NewAggsenderValidator(ctx context.Context, certQuerier types.CertificateQuerier, aggchainFEPQuerier types.AggchainFEPRollupQuerier, initialLER ethcommon.Hash, - signer signertypes.Signer) (*AggsenderValidator, error) { + signer signertypes.Signer, + _ claimsync.ClaimSyncer) (*AggsenderValidator, error) { validatorCert := validator.NewAggsenderValidator( logger, flow, l1InfoTreeDataQuerier, certQuerier, initialLER) grpcServer, err := grpc.NewServer(cfg.ServerConfig) diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index 56a6b5cea..06846187d 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -96,10 +96,12 @@ func NewL1( false, syncFromInBridges, bridgesynctypes.EmptyLER, + nil, ) } -// NewL2 creates a bridge syncer that synchronizes the local exit tree +// NewL2 creates a bridge syncer that synchronizes the local exit tree. +// Pass a non-nil claimEventsProcessor to delegate claim storage to claimsync. func NewL2( ctx context.Context, cfg Config, @@ -109,6 +111,7 @@ func NewL2( syncFullClaims bool, syncFromInBridges bool, initialLER common.Hash, + claimEventsProcessor ClaimsSyncProcessor, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -121,6 +124,7 @@ func NewL2( syncFullClaims, syncFromInBridges, initialLER, + claimEventsProcessor, ) } @@ -135,6 +139,7 @@ func newBridgeSync( syncFullClaims bool, syncFromInBridges bool, initialLER common.Hash, + claimEventsProcessor ClaimsSyncProcessor, ) (*BridgeSync, error) { logger := log.WithFields("module", syncerID.String()) @@ -153,7 +158,7 @@ func newBridgeSync( return nil, err } - processor, err := newProcessor(cfg.DBPath, "bridge_sync_"+syncerID.String(), logger, cfg.DBQueryTimeout.Duration) + processor, err := newProcessor(cfg.DBPath, "bridge_sync_"+syncerID.String(), logger, cfg.DBQueryTimeout.Duration, claimEventsProcessor) if err != nil { return nil, err } @@ -189,8 +194,7 @@ func newBridgeSync( return nil, fmt.Errorf("failed to resolve bridge deployment. Reason: %w", err) } - appender, err := buildAppender(ctx, ethClient, processor, cfg.BridgeAddr, syncFullClaims, - syncFromInBridges, bridgeDeployment, logger) + appender, err := buildAppender(ctx, ethClient, cfg.BridgeAddr, syncFromInBridges, bridgeDeployment, logger, claimEventsProcessor) if err != nil { return nil, err } diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go index bdae7a2e4..be54cbf54 100644 --- a/bridgesync/bridgesync_test.go +++ b/bridgesync/bridgesync_test.go @@ -119,6 +119,7 @@ func TestNewLx(t *testing.T) { false, testSyncFromInBridges, bridgesynctypes.EmptyLER, + nil, ) require.NoError(t, err) @@ -138,6 +139,7 @@ func TestNewLx(t *testing.T) { false, testSyncFromInBridges, bridgesynctypes.EmptyLER, + nil, ) require.Error(t, err) require.Nil(t, l2BridgeSyncer) @@ -342,6 +344,7 @@ func TestBridgeSync_GetTokenMappings(t *testing.T) { false, testSyncFromInBridges, bridgesynctypes.EmptyLER, + nil, ) require.NoError(t, err) @@ -513,6 +516,7 @@ func TestBridgeSync_GetLegacyTokenMigrations(t *testing.T) { false, testSyncFromInBridges, bridgesynctypes.EmptyLER, + nil, ) require.NoError(t, err) @@ -719,6 +723,7 @@ func TestBridgeSync_GetLastRoot(t *testing.T) { false, testSyncFromInBridges, bridgesynctypes.EmptyLER, + nil, ) require.NoError(t, err) @@ -901,6 +906,7 @@ func TestBridgeSync_SubscribeToSync(t *testing.T) { false, testSyncFromInBridges, bridgesynctypes.EmptyLER, + nil, ) require.NoError(t, err) diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 246e0d6d1..8a1ea785c 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -3,20 +3,17 @@ package bridgesync import ( "bytes" "context" - "errors" "fmt" "math/big" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" rpctypes "github.com/0xPolygon/cdk-rpc/types" bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" "github.com/agglayer/aggkit/db" logger "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/sync" - treetypes "github.com/agglayer/aggkit/tree/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -29,16 +26,9 @@ var ( bridgeEventSignature = crypto.Keccak256Hash([]byte( "BridgeEvent(uint8,uint32,address,uint32,address,uint256,bytes,uint32)", )) - claimEventSignature = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)")) - claimEventSignaturePreEtrog = crypto.Keccak256Hash([]byte("ClaimEvent(uint32,uint32,address,address,uint256)")) - tokenMappingEventSignature = crypto.Keccak256Hash([]byte("NewWrappedToken(uint32,address,address,bytes)")) + tokenMappingEventSignature = crypto.Keccak256Hash([]byte("NewWrappedToken(uint32,address,address,bytes)")) // sovereign chain contract events - detailedClaimEventSignature = crypto.Keccak256Hash([]byte( - "DetailedClaimEvent(bytes32[32],bytes32[32]," + - "uint256,bytes32,bytes32,uint8,uint32," + - "address,uint32,address,uint256,bytes)", - )) setSovereignTokenEventSignature = crypto.Keccak256Hash([]byte( "SetSovereignTokenAddress(uint32,address,address,bool)", )) @@ -48,20 +38,9 @@ var ( removeLegacySovereignTokenEventSignature = crypto.Keccak256Hash([]byte( "RemoveLegacySovereignTokenAddress(address)", )) - unsetClaimEventSignature = crypto.Keccak256Hash([]byte( - "UpdatedUnsetGlobalIndexHashChain(bytes32,bytes32)", - )) - setClaimEventSignature = crypto.Keccak256Hash([]byte( - "SetClaim(bytes32)", - )) backwardLETEventSignature = crypto.Keccak256Hash([]byte("BackwardLET(uint256,bytes32,uint256,bytes32)")) forwardLETEventSignature = crypto.Keccak256Hash([]byte("ForwardLET(uint256,bytes32,uint256,bytes32,bytes)")) - claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") - claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") - claimAssetPreEtrogMethodID = common.Hex2Bytes("2cffd02e") - claimMessagePreEtrogMethodID = common.Hex2Bytes("2d2c9d94") - // bridgeAsset(uint32 destinationNetwork,address destinationAddress,uint256 amount, // address token,bool forceUpdateGlobalExitRoot,bytes permitData) BridgeAssetMethodID = common.Hex2Bytes("cd586579") @@ -88,36 +67,28 @@ const ( func buildAppender( ctx context.Context, client aggkittypes.EthClienter, - querier BridgeQuerier, bridgeAddr common.Address, - syncFullClaims bool, syncFromInBridges bool, bridgeDeployment *bridgeDeployment, logger *logger.Logger, + claimSync ClaimsSyncProcessor, ) (sync.LogAppenderMap, error) { - legacyBridge, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridgeAddr, client) - if err != nil { - return nil, fmt.Errorf("failed to create PolygonZkEVMBridge SC binding (bridge addr: %s): %w", bridgeAddr, err) + var appender sync.LogAppenderMap + if claimSync != nil { + appender = claimSync.BuildAppender() + } else { + appender = make(sync.LogAppenderMap) } - appender := make(sync.LogAppenderMap) - // Add event handlers for the bridge contract appender[bridgeEventSignature] = buildBridgeEventHandler( ctx, bridgeDeployment.agglayerBridge, bridgeAddr, client, syncFromInBridges, logger) - appender[claimEventSignaturePreEtrog] = buildClaimEventHandlerPreEtrog( - legacyBridge, client, bridgeAddr, syncFullClaims, logger) - appender[claimEventSignature] = buildClaimEventHandler(ctx, bridgeDeployment.agglayerBridge, client, querier, - bridgeAddr, syncFullClaims, logger) appender[tokenMappingEventSignature] = buildTokenMappingHandler(bridgeDeployment.agglayerBridge) if bridgeDeployment.kind == SovereignChain { - appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[setSovereignTokenEventSignature] = buildSetSovereignTokenHandler(bridgeDeployment.agglayerBridgeL2) appender[migrateLegacyTokenEventSignature] = buildMigrateLegacyTokenHandler(bridgeDeployment.agglayerBridgeL2) appender[removeLegacySovereignTokenEventSignature] = buildRemoveLegacyTokenHandler(bridgeDeployment.agglayerBridgeL2) - appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) - appender[setClaimEventSignature] = buildSetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[backwardLETEventSignature] = buildBackwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) appender[forwardLETEventSignature] = buildForwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) @@ -421,166 +392,6 @@ func buildBridgeEventHandler( } } -// buildClaimEventHandler creates a handler for the Claim event log. -func buildClaimEventHandler(ctx context.Context, agglayerBridge *agglayerbridge.Agglayerbridge, - client aggkittypes.EthClienter, querier BridgeQuerier, bridgeAddr common.Address, - syncFullClaims bool, logger *logger.Logger, -) func(*sync.EVMBlock, types.Log) error { - return func(b *sync.EVMBlock, l types.Log) error { - // check if we already have passed the block which started indexing DetailedClaimEvent - boundaryBlock, err := querier.GetBoundaryBlockForClaimType(ctx, DetailedClaimEvent) - if err != nil && !errors.Is(err, db.ErrNotFound) { - return fmt.Errorf("failed checking DetailedClaimEvent boundary: %w", err) - } - - if err == nil && l.BlockNumber >= boundaryBlock { - logger.Debugf( - "Skipping ClaimEvent at block %d; DetailedClaimEvent indexing already started at block %d", - l.BlockNumber, boundaryBlock, - ) - return nil - } - - // Skip if a DetailedClaimEvent for the same transaction is already in this block's events. - // Check early to avoid the expensive extractCallData RPC call. - for _, raw := range b.Events { - if e, ok := raw.(Event); ok && e.Claim != nil && e.Claim.Type == DetailedClaimEvent && e.Claim.TxHash == l.TxHash { - logger.Debugf( - "Skipping ClaimEvent at block %d tx %s; DetailedClaimEvent already present in block", - l.BlockNumber, l.TxHash.Hex(), - ) - return nil - } - } - - claimEvent, err := agglayerBridge.ParseClaimEvent(l) - if err != nil { - return fmt.Errorf("error parsing Claim event log %+v: %w", l, err) - } - - claim := &Claim{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - BlockTimestamp: b.Timestamp, - TxHash: l.TxHash, - GlobalIndex: claimEvent.GlobalIndex, - OriginNetwork: claimEvent.OriginNetwork, - OriginAddress: claimEvent.OriginAddress, - DestinationAddress: claimEvent.DestinationAddress, - Amount: claimEvent.Amount, - Type: ClaimEvent, - } - - // Extract root call for txn_sender and error checking - _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, logger, nil) - if err != nil { - return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) - } - // Check if the root call was successful - if rootCall.Err != nil { - return fmt.Errorf("execution reverted in root call (block %d, tx hash: %s): %s", b.Num, l.TxHash, *rootCall.Err) - } - - if syncFullClaims { - if err := claim.setClaimCalldataFromRoot(rootCall, bridgeAddr, logger); err != nil { - return err - } - } - - b.Events = append(b.Events, Event{Claim: claim}) - return nil - } -} - -// buildDetailedClaimEventHandler creates a handler for the DetailedClaimEvent event log. -func buildDetailedClaimEventHandler(contract *agglayerbridgel2.Agglayerbridgel2, -) func(*sync.EVMBlock, types.Log) error { - return func(b *sync.EVMBlock, l types.Log) error { - claimEvent, err := contract.ParseDetailedClaimEvent(l) - if err != nil { - return fmt.Errorf("error parsing DetailedClaimEvent event log %+v: %w", l, err) - } - - claim := &Claim{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - BlockTimestamp: b.Timestamp, - TxHash: l.TxHash, - GlobalIndex: claimEvent.GlobalIndex, - OriginNetwork: claimEvent.OriginNetwork, - OriginAddress: claimEvent.OriginTokenAddress, - DestinationNetwork: claimEvent.DestinationNetwork, - DestinationAddress: claimEvent.DestinationAddress, - Amount: claimEvent.Amount, - Metadata: claimEvent.Metadata, - MainnetExitRoot: claimEvent.MainnetExitRoot, - RollupExitRoot: claimEvent.RollupExitRoot, - ProofLocalExitRoot: treetypes.NewProof(claimEvent.SmtProofLocalExitRoot), - ProofRollupExitRoot: treetypes.NewProof(claimEvent.SmtProofRollupExitRoot), - GlobalExitRoot: crypto.Keccak256Hash(claimEvent.MainnetExitRoot[:], claimEvent.RollupExitRoot[:]), - IsMessage: claimEvent.LeafType == uint8(bridgesynctypes.LeafTypeMessage), - Type: DetailedClaimEvent, - } - - // Remove any ClaimEvent for the same transaction already collected in this block. - // Both ClaimEvent and DetailedClaimEvent are emitted on sovereign chains; DetailedClaimEvent takes precedence. - newEvents := make([]interface{}, 0, len(b.Events)) - for _, raw := range b.Events { - if e, ok := raw.(Event); ok && e.Claim != nil && e.Claim.Type == ClaimEvent && e.Claim.TxHash == l.TxHash { - continue - } - newEvents = append(newEvents, raw) - } - b.Events = newEvents - - b.Events = append(b.Events, Event{Claim: claim}) - return nil - } -} - -// buildClaimEventHandlerPreEtrog creates a handler for the Claim event log for pre-Etrog contracts. -func buildClaimEventHandlerPreEtrog(contract *polygonzkevmbridge.Polygonzkevmbridge, - client aggkittypes.EthClienter, bridgeAddr common.Address, syncFullClaims bool, logger *logger.Logger, -) func(*sync.EVMBlock, types.Log) error { - return func(b *sync.EVMBlock, l types.Log) error { - claimEvent, err := contract.ParseClaimEvent(l) - if err != nil { - return fmt.Errorf("error parsing Claim event log %+v: %w", l, err) - } - - claim := &Claim{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - BlockTimestamp: b.Timestamp, - TxHash: l.TxHash, - GlobalIndex: new(big.Int).SetUint64(uint64(claimEvent.Index)), - OriginNetwork: claimEvent.OriginNetwork, - OriginAddress: claimEvent.OriginAddress, - DestinationAddress: claimEvent.DestinationAddress, - Amount: claimEvent.Amount, - } - - // Extract root call for txn_sender and error checking - _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, logger, nil) - if err != nil { - return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) - } - // Check if the root call was successful - if rootCall.Err != nil { - return fmt.Errorf("execution reverted in root call (block %d, tx hash: %s): %s", b.Num, l.TxHash, *rootCall.Err) - } - - if syncFullClaims { - if err := claim.setClaimCalldataFromRoot(rootCall, bridgeAddr, logger); err != nil { - return err - } - } - - b.Events = append(b.Events, Event{Claim: claim}) - return nil - } -} - // buildTokenMappingHandler creates a handler for the NewWrappedToken event log. func buildTokenMappingHandler(contract *agglayerbridge.Agglayerbridge, ) func(*sync.EVMBlock, types.Log) error { @@ -673,50 +484,6 @@ func buildRemoveLegacyTokenHandler(contract *agglayerbridgel2.Agglayerbridgel2) } } -// buildUnsetClaimEventHandler creates a handler for the UpdatedUnsetGlobalIndexHashChain event log -func buildUnsetClaimEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func(*sync.EVMBlock, - types.Log) error { - return func(b *sync.EVMBlock, l types.Log) error { - event, err := contract.ParseUpdatedUnsetGlobalIndexHashChain(l) - if err != nil { - return fmt.Errorf("error parsing UpdatedUnsetGlobalIndexHashChain event log %+v: %w", l, err) - } - - // Convert bytes32 to big.Int - globalIndex := new(big.Int).SetBytes(event.UnsetGlobalIndex[:]) - - b.Events = append(b.Events, Event{UnsetClaim: &UnsetClaim{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - TxHash: l.TxHash, - GlobalIndex: globalIndex, - UnsetGlobalIndexHashChain: event.NewUnsetGlobalIndexHashChain, - }}) - return nil - } -} - -// buildSetClaimEventHandler creates a handler for the SetClaim event log -func buildSetClaimEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func(*sync.EVMBlock, types.Log) error { - return func(b *sync.EVMBlock, l types.Log) error { - event, err := contract.ParseSetClaim(l) - if err != nil { - return fmt.Errorf("error parsing SetClaim event log %+v: %w", l, err) - } - - // Convert bytes32 to big.Int - globalIndex := new(big.Int).SetBytes(event.GlobalIndex[:]) - - b.Events = append(b.Events, Event{SetClaim: &SetClaim{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - TxHash: l.TxHash, - GlobalIndex: globalIndex, - }}) - return nil - } -} - // buildBackwardLETEventHandler creates a handler for the BackwardLET event log func buildBackwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func(*sync.EVMBlock, types.Log) error { return func(b *sync.EVMBlock, l types.Log) error { @@ -851,104 +618,3 @@ func extractCallData( return foundCalls, rootCall, nil } - -// setClaimCalldataFromRoot finds and decodes calldata for the given bridge address using an already traced root call. -// -// Parameters: -// - rootCall: Already traced root call. -// - bridge: Target contract address. -// - logger: Logger instance for debug logging. -// -// Returns an error if calldata isn't found. -func (c *Claim) setClaimCalldataFromRoot( - rootCall *Call, - bridge common.Address, - logger *logger.Logger, -) error { - _, err := findCall(*rootCall, bridge, - func(call Call) (bool, error) { - // Skip reverted calls - if call.Err != nil { - return false, nil - } - return c.tryDecodeClaimCalldata(call.Input, logger) - }, logger) - - return err -} - -// tryDecodeClaimCalldata attempts to find and decode the claim calldata from the provided input bytes. -// It checks if the method ID corresponds to either the claim asset or claim message methods. -// If a match is found, it decodes the calldata using the ABI of the bridge contract and updates the claim object. -// Returns true if the calldata is successfully decoded and matches the expected format, otherwise returns false. -func (c *Claim) tryDecodeClaimCalldata(input []byte, logger *logger.Logger) (bool, error) { - if len(input) < methodIDLength { - return false, fmt.Errorf("input too short: %d bytes", len(input)) - } - methodID := input[:methodIDLength] - switch { - case bytes.Equal(methodID, claimAssetEtrogMethodID): - fallthrough - case bytes.Equal(methodID, claimMessageEtrogMethodID): - bridgeV2ABI, err := agglayerbridge.AgglayerbridgeMetaData.GetAbi() - if err != nil { - return false, err - } - // Recover Method from signature and ABI - method, err := bridgeV2ABI.MethodById(methodID) - if err != nil { - return false, err - } - - data, err := method.Inputs.Unpack(input[methodIDLength:]) - if err != nil { - return false, err - } - - found, err := c.decodeEtrogCalldata(data) - if err != nil { - return false, err - } - - if found { - c.IsMessage = bytes.Equal(methodID, claimMessageEtrogMethodID) - } - - return found, nil - - case bytes.Equal(methodID, claimAssetPreEtrogMethodID): - fallthrough - case bytes.Equal(methodID, claimMessagePreEtrogMethodID): - bridgeABI, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() - if err != nil { - return false, err - } - - // Recover Method from signature and ABI - method, err := bridgeABI.MethodById(methodID) - if err != nil { - return false, err - } - - data, err := method.Inputs.Unpack(input[methodIDLength:]) - if err != nil { - return false, err - } - - found, err := c.decodePreEtrogCalldata(data) - if err != nil { - return false, err - } - - if found { - c.IsMessage = bytes.Equal(methodID, claimMessagePreEtrogMethodID) - } - - return found, nil - - default: - // Log unrecognized method ID for debugging but returns false to continue searching (DFS) - logger.Debugf("unrecognized method ID encountered during claim calldata extraction: %x", methodID) - return false, nil - } -} diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 431daf4f7..7edf9955f 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -707,17 +707,34 @@ type BridgeQuerier interface { var _ BridgeQuerier = (*processor)(nil) +// ClaimsSyncProcessor handles storage of claim-related events within a bridgesync transaction. +// Pass an implementation (e.g. from claimsync.NewEmbedded) to bridgesync to delegate claim +// storage and event parsing to claimsync, keeping the two components in sync atomically. +type ClaimsSyncProcessor interface { + // ProcessBlockWithTx stores Claim, UnsetClaim and SetClaim events using an existing tx. + // Bridgesync calls this from ProcessBlock, reusing its own tx so no new tx is needed. + // insertBlock must be false when bridgesync already inserted the block row. + ProcessBlockWithTx(ctx context.Context, tx dbtypes.Querier, block *sync.Block, insertBlock bool) error + // ReorgWithTx deletes claim data for all blocks >= firstReorgedBlock using the provided tx. + // The caller is responsible for commit and rollback. + ReorgWithTx(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) + // BuildAppender returns the LogAppenderMap for claim-related log events. + // Bridgesync merges this into its own appender so claimsync's handlers are used. + BuildAppender() sync.LogAppenderMap +} + type processor struct { - syncerID string - db *sql.DB - exitTree types.FullTreer - log *log.Logger - mu mutex.RWMutex - halted bool - haltedReason string - dbQueryTimeout time.Duration - bridgeSubscriber aggkitcommon.PubSub[uint64] - initialLER common.Hash + syncerID string + db *sql.DB + exitTree types.FullTreer + log *log.Logger + mu mutex.RWMutex + halted bool + haltedReason string + dbQueryTimeout time.Duration + bridgeSubscriber aggkitcommon.PubSub[uint64] + initialLER common.Hash + claimEventsProcessor ClaimsSyncProcessor compatibility.CompatibilityDataStorager[BridgeSyncRuntimeData] } @@ -726,6 +743,7 @@ func newProcessor( syncerID string, logger *log.Logger, dbQueryTimeout time.Duration, + claimEventsProcessor ClaimsSyncProcessor, ) (*processor, error) { err := migrations.RunMigrations(dbPath) if err != nil { @@ -739,12 +757,13 @@ func newProcessor( exitTree := tree.NewAppendOnlyTree(database, "") return &processor{ - syncerID: syncerID, - db: database, - exitTree: exitTree, - log: logger, - dbQueryTimeout: dbQueryTimeout, - bridgeSubscriber: aggkitcommon.NewGenericSubscriber[uint64](), + syncerID: syncerID, + db: database, + exitTree: exitTree, + log: logger, + dbQueryTimeout: dbQueryTimeout, + bridgeSubscriber: aggkitcommon.NewGenericSubscriber[uint64](), + claimEventsProcessor: claimEventsProcessor, CompatibilityDataStorager: compatibility.NewKeyValueToCompatibilityStorage[BridgeSyncRuntimeData]( db.NewKeyValueStorage(database), syncerID, @@ -1649,6 +1668,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { var blockPos *uint64 var hasAnyBridge bool + var claimEvents []Event for _, e := range block.Events { event, ok := e.(Event) if !ok { @@ -1681,9 +1701,11 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { // Mark that this block has at least one bridge hasAnyBridge = true } - + // TODO: remove if event.Claim != nil { - if err = meddler.Insert(tx, claimTableName, event.Claim); err != nil { + if p.claimEventsProcessor != nil { + claimEvents = append(claimEvents, event) + } else if err = meddler.Insert(tx, claimTableName, event.Claim); err != nil { p.log.Errorf("failed to insert claim event at block %d: %v", block.Num, err) return err } @@ -1710,16 +1732,20 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } } - + // TODO: remove if event.UnsetClaim != nil { - if err = meddler.Insert(tx, unsetClaimTableName, event.UnsetClaim); err != nil { + if p.claimEventsProcessor != nil { + claimEvents = append(claimEvents, event) + } else if err = meddler.Insert(tx, unsetClaimTableName, event.UnsetClaim); err != nil { p.log.Errorf("failed to insert unset claim event at block %d: %v", block.Num, err) return err } } - + // TODO: remove if event.SetClaim != nil { - if err = meddler.Insert(tx, setClaimTableName, event.SetClaim); err != nil { + if p.claimEventsProcessor != nil { + claimEvents = append(claimEvents, event) + } else if err = meddler.Insert(tx, setClaimTableName, event.SetClaim); err != nil { p.log.Errorf("failed to insert set claim event at block %d: %v", block.Num, err) return err } @@ -1740,8 +1766,14 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { blockPos = &newBlockPos } + if p.claimEventsProcessor != nil { + // ProcessBlock(ctx context.Context, block sync.Block) + if err := p.claimEventsProcessor.ProcessBlockWithTx(ctx, tx, &block, false); err != nil { + p.log.Errorf("failed to process claim events for block %d: %v", block.Num, err) + return err + } + } } - if err := tx.Commit(); err != nil { p.log.Errorf("failed to commit db transaction (block number %d): %v", block.Num, err) return err diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 895e4af70..ae4e1e7bf 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -97,7 +97,7 @@ func TestBigIntString(t *testing.T) { func TestProcessor(t *testing.T) { path := path.Join(t.TempDir(), "bridgeSyncerProcessor.db") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) actions := []processAction{ // processed: ~ @@ -870,7 +870,7 @@ func TestInsertAndGetClaim(t *testing.T) { err := migrations.RunMigrations(path) require.NoError(t, err) logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "foo", logger, dbQueryTimeout) + p, err := newProcessor(path, "foo", logger, dbQueryTimeout, nil) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) @@ -956,7 +956,7 @@ func TestGetBridgesPublished(t *testing.T) { path := path.Join(t.TempDir(), fmt.Sprintf("bridgesyncTestGetBridgesPublished_%s.sqlite", tc.name)) require.NoError(t, migrations.RunMigrations(path)) logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "foo", logger, dbQueryTimeout) + p, err := newProcessor(path, "foo", logger, dbQueryTimeout, nil) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) @@ -989,7 +989,7 @@ func TestGetBridgesPublished(t *testing.T) { func TestProcessBlockInvalidIndex(t *testing.T) { path := path.Join(t.TempDir(), "aggsenderTestProcessor.sqlite") logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "foo", logger, dbQueryTimeout) + p, err := newProcessor(path, "foo", logger, dbQueryTimeout, nil) require.NoError(t, err) err = p.ProcessBlock(context.Background(), sync.Block{ Num: 0, @@ -1041,7 +1041,7 @@ func TestGetBridgesPaged(t *testing.T) { path := path.Join(t.TempDir(), "bridgesyncGetBridgesPaged.sqlite") require.NoError(t, migrations.RunMigrations(path)) logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) @@ -1262,7 +1262,7 @@ func TestGetClaimsPaged(t *testing.T) { path := path.Join(t.TempDir(), "bridgesyncGetClaimsPaged.sqlite") require.NoError(t, migrations.RunMigrations(path)) logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) @@ -1397,7 +1397,7 @@ func TestProcessor_GetTokenMappings(t *testing.T) { require.NoError(t, err) logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) allTokenMappings := make([]*TokenMapping, 0, tokenMappingsCount) @@ -1496,7 +1496,7 @@ func TestProcessor_GetLegacyTokenMigrations(t *testing.T) { require.NoError(t, err) logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) const ( @@ -2041,7 +2041,7 @@ func TestDecodeEtrogCalldata(t *testing.T) { func TestQueryBlockRangeOrdering(t *testing.T) { path := path.Join(t.TempDir(), "bridgeSyncerProcessorOrdering.db") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Create test data with events in different blocks and positions @@ -2262,7 +2262,7 @@ func TestBridgeSyncRuntimeData_IsCompatible(t *testing.T) { func TestGetClaimByGlobalIndex(t *testing.T) { path := path.Join(t.TempDir(), "bridgesyncTestGetClaimByGlobalIndex.sqlite") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) ctx := context.Background() @@ -2817,7 +2817,7 @@ func TestGetClaimsByGlobalIndex_Compact(t *testing.T) { // Create a fresh database for each test case dbPath := filepath.Join(t.TempDir(), "testcase.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout) + testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Setup blocks @@ -3065,7 +3065,7 @@ func createTestProcessor(t *testing.T, dbName string) *processor { path := path.Join(t.TempDir(), dbName+".db") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) return p } @@ -3113,7 +3113,7 @@ func TestGetUnsetClaimsPaged(t *testing.T) { path := path.Join(t.TempDir(), "bridgesyncGetUnsetClaimsPaged.sqlite") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Create test unset claims @@ -3232,7 +3232,7 @@ func TestGetSetClaimsPaged(t *testing.T) { path := path.Join(t.TempDir(), "bridgesyncGetSetClaimsPaged.sqlite") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Create test set claims @@ -3357,7 +3357,7 @@ func TestDatabaseQueryTimeout(t *testing.T) { logger := log.WithFields("module", "bridge-syncer-timeout") // Create processor with normal timeout for setup - p, err := newProcessor(path, "bridge-syncer-timeout", logger, normalTimeout) + p, err := newProcessor(path, "bridge-syncer-timeout", logger, normalTimeout, nil) require.NoError(t, err) // Insert some test data to ensure the database is working @@ -3372,7 +3372,7 @@ func TestDatabaseQueryTimeout(t *testing.T) { require.NoError(t, err) // Create a new processor with short timeout for testing timeout behavior - pShortTimeout, err := newProcessor(path, "bridge-syncer-short-timeout", logger, shortTimeout) + pShortTimeout, err := newProcessor(path, "bridge-syncer-short-timeout", logger, shortTimeout, nil) require.NoError(t, err) // Test that operations timeout with short timeout @@ -4522,7 +4522,7 @@ func TestGetClaims_Compact(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() dbPath := t.TempDir() + "/test.db" - p, err := newProcessor(dbPath, "test", log.GetDefaultLogger(), time.Second*10) + p, err := newProcessor(dbPath, "test", log.GetDefaultLogger(), time.Second*10, nil) require.NoError(t, err) // Setup blocks @@ -4558,7 +4558,7 @@ func TestGetClaimsPaged_CompactionAcrossPages(t *testing.T) { path := path.Join(t.TempDir(), "claimsPaged_compaction.sqlite") require.NoError(t, migrations.RunMigrations(path)) logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) ctx := context.Background() @@ -4878,7 +4878,7 @@ func TestGetClaimsPaged_CompactionAcrossPages(t *testing.T) { // Create a new database for this test dbPath := filepath.Join(t.TempDir(), "case1.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout) + testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Setup: Insert 3 claims with same global_index and 1 unset_claim @@ -4958,7 +4958,7 @@ func TestGetClaimsPaged_CompactionAcrossPages(t *testing.T) { // Create a new database for this test dbPath := filepath.Join(t.TempDir(), "case2.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout) + testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Setup: Insert 3 claims with same global_index, NO unset_claim @@ -5058,7 +5058,7 @@ func TestGetClaimsPaged_CompactionAcrossPages(t *testing.T) { // Create a new database for this test dbPath := filepath.Join(t.TempDir(), "case3.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout) + testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Setup: Insert claims with two global_indexes to create valid pagination @@ -5209,7 +5209,7 @@ func TestGetClaimsPaged_CompactionAcrossPages(t *testing.T) { // Create a new database for this test dbPath := filepath.Join(t.TempDir(), "case3_exception.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout) + testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Setup: Insert claims + unset_claim @@ -5293,7 +5293,7 @@ func TestGetClaimsPaged_CompactionAcrossPages(t *testing.T) { // Create a new database for this test dbPath := filepath.Join(t.TempDir(), "multiple_indexes.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout) + testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) require.NoError(t, err) // Setup: Multiple global indexes with different scenarios @@ -5785,7 +5785,7 @@ func TestProcessor_BackwardLET(t *testing.T) { t.Run(c.name, func(t *testing.T) { dbPath := filepath.Join(t.TempDir(), "backward_let_cases.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout) + p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout, nil) require.NoError(t, err) blocks := c.setupBlocks() @@ -5914,7 +5914,7 @@ func TestGetBoundaryBlock(t *testing.T) { t.Run(tc.name, func(t *testing.T) { dbPath := filepath.Join(t.TempDir(), "get_boundary_block.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout) + p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout, nil) require.NoError(t, err) // Insert claims if any @@ -6621,7 +6621,7 @@ func setupProcessorWithTransaction(t *testing.T) (*processor, dbtypes.Txer) { require.NoError(t, err) logger := log.WithFields("module", "test") - p, err := newProcessor(dbPath, "test", logger, dbQueryTimeout) + p, err := newProcessor(dbPath, "test", logger, dbQueryTimeout, nil) require.NoError(t, err) p.initialLER = bridgesynctypes.EmptyLER @@ -6652,7 +6652,7 @@ func calculateExpectedRootAfterForwardLET(t *testing.T, initialDepositCount uint require.NoError(t, err) logger := log.WithFields("module", "test-calc") - tempP, err := newProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout) + tempP, err := newProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout, nil) require.NoError(t, err) tempTx, err := db.NewTx(t.Context(), tempP.db) diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go new file mode 100644 index 000000000..04598e952 --- /dev/null +++ b/claimsync/claimsync.go @@ -0,0 +1,164 @@ +package claimsync + +import ( + "context" + "fmt" + "time" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" + "github.com/agglayer/aggkit/bridgesync" + claimsyncStorage "github.com/agglayer/aggkit/claimsync/storage" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db/compatibility" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +const ( + downloadBufferSize = 1000 + defaultDBTimeout = 30 * time.Second +) + +// ClaimSyncer is the interface for the claim syncer component used by aggsender. +type ClaimSyncer interface { + Start(ctx context.Context) +} + +// NewFromBridgeSync creates a ClaimSyncer backed by an existing BridgeSync that +// has an embedded claim processor. It returns nil if bs is nil. +func NewFromBridgeSync(bs *bridgesync.BridgeSync) ClaimSyncer { + if bs == nil { + return nil + } + return &bridgeSyncClaimSyncer{bs: bs} +} + +type bridgeSyncClaimSyncer struct { + bs *bridgesync.BridgeSync +} + +func (b *bridgeSyncClaimSyncer) Start(_ context.Context) {} + +// ClaimSync is the standalone implementation that independently processes claim events. +type ClaimSync struct { + processor *processor + driver *sync.EVMDriver +} + +// NewStandaloneClaimSync creates a standalone ClaimSync that indexes claim events from the bridge contract directly. +func NewStandaloneClaimSync( + ctx context.Context, + cfg bridgesync.Config, + rd sync.ReorgDetector, + ethClient aggkittypes.EthClienter, + syncerID claimsynctypes.ClaimSyncerID, +) (*ClaimSync, error) { + logger := log.WithFields("module", syncerID.String()) + return NewClaimSync(ctx, cfg, rd, ethClient, syncerID, logger) +} + +// NewClaimSync creates a standalone ClaimSync that indexes claim events from the bridge contract directly. +func NewClaimSync( + ctx context.Context, + cfg bridgesync.Config, + rd sync.ReorgDetector, + ethClient aggkittypes.EthClienter, + syncerID claimsynctypes.ClaimSyncerID, + logger aggkitcommon.Logger, +) (*ClaimSync, error) { + + dbQueryTimeout := cfg.DBQueryTimeout.Duration + if dbQueryTimeout == 0 { + dbQueryTimeout = defaultDBTimeout + } + store, err := claimsyncStorage.NewStandalone(logger, cfg.DBPath, syncerID.String()) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to create storage: %w", err) + } + + proc, err := newProcessor(logger, store, dbQueryTimeout) + if err != nil { + return nil, err + } + + agglayerBridgeContract, err := agglayerbridge.NewAgglayerbridge(cfg.BridgeAddr, ethClient) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to create AgglayerBridge binding: %w", err) + } + + isSovereign, agglayerBridgeL2Contract, err := detectSovereignChain(ctx, cfg.BridgeAddr, ethClient) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to detect chain type: %w", err) + } + + appender, err := buildAppender(ctx, ethClient, proc, cfg.BridgeAddr, + agglayerBridgeContract, agglayerBridgeL2Contract, isSovereign, logger) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to build appender: %w", err) + } + + rh := &sync.RetryHandler{ + MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, + RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod.Duration, + } + + downloader, err := sync.NewEVMDownloader( + syncerID.String(), + sync.NewAdapterEthClientToMultidownloader(ethClient), + cfg.SyncBlockChunkSize, + cfg.BlockFinality, + cfg.WaitForNewBlocksPeriod.Duration, + appender, + []common.Address{cfg.BridgeAddr}, + rh, + rd.GetFinalizedBlockType(), + rd, + syncerID.String(), + ) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to create EVMDownloader: %w", err) + } + + lastBlock, err := proc.GetLastProcessedBlock(ctx) + if err != nil { + return nil, fmt.Errorf("claimsync: get last processed block: %w", err) + } + if lastBlock < cfg.InitialBlockNum { + header, err := ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(cfg.InitialBlockNum)) + if err != nil { + return nil, fmt.Errorf("claimsync: get initial block %d: %w", cfg.InitialBlockNum, err) + } + if err := proc.ProcessBlock(ctx, sync.Block{Num: cfg.InitialBlockNum, Hash: header.Hash}); err != nil { + return nil, fmt.Errorf("claimsync: process initial block %d: %w", cfg.InitialBlockNum, err) + } + } + + compatibilityChecker := compatibility.NewCompatibilityCheck( + cfg.RequireStorageContentCompatibility, + downloader.RuntimeData, + proc, + ) + + driver, err := sync.NewEVMDriver(rd, proc, downloader, syncerID.String(), downloadBufferSize, rh, compatibilityChecker) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to create EVMDriver: %w", err) + } + + logger.Infof( + "claimsync created: dbPath=%s initialBlock=%d blockFinality=%s bridgeAddr=%s sovereign=%t", + cfg.DBPath, cfg.InitialBlockNum, cfg.BlockFinality.String(), cfg.BridgeAddr.String(), isSovereign, + ) + + return &ClaimSync{ + processor: proc, + driver: driver, + }, nil +} + +// Start starts the synchronization process. +func (c *ClaimSync) Start(ctx context.Context) { + c.driver.Sync(ctx) +} diff --git a/claimsync/downloader.go b/claimsync/downloader.go new file mode 100644 index 000000000..bcbb9be07 --- /dev/null +++ b/claimsync/downloader.go @@ -0,0 +1,258 @@ +package claimsync + +import ( + "context" + "errors" + "fmt" + "math/big" + "strings" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" + "github.com/agglayer/aggkit/bridgesync" + bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db" + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/sync" + treetypes "github.com/agglayer/aggkit/tree/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + gethvm "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" +) + +var ( + claimEventSignature = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)")) + claimEventSignaturePreEtrog = crypto.Keccak256Hash([]byte("ClaimEvent(uint32,uint32,address,address,uint256)")) + detailedClaimEventSignature = crypto.Keccak256Hash([]byte( + "DetailedClaimEvent(bytes32[32],bytes32[32]," + + "uint256,bytes32,bytes32,uint8,uint32," + + "address,uint32,address,uint256,bytes)", + )) + unsetClaimEventSignature = crypto.Keccak256Hash([]byte("UpdatedUnsetGlobalIndexHashChain(bytes32,bytes32)")) + setClaimEventSignature = crypto.Keccak256Hash([]byte("SetClaim(bytes32)")) +) + +// claimQuerier is used by event handlers to check the DetailedClaimEvent boundary. +type ClaimQuerier interface { + GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) +} + +// buildAppender creates the LogAppenderMap for claim events from the bridge contract. +func buildAppender( + ctx context.Context, + ethClient aggkittypes.EthClienter, + querier ClaimQuerier, + bridgeAddr common.Address, + agglayerBridgeContract *agglayerbridge.Agglayerbridge, + agglayerBridgeL2Contract *agglayerbridgel2.Agglayerbridgel2, + isSovereign bool, + log aggkitcommon.Logger, +) (sync.LogAppenderMap, error) { + legacyBridge, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridgeAddr, ethClient) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to create PolygonZkEVMBridge binding: %w", err) + } + + appender := make(sync.LogAppenderMap) + appender[claimEventSignaturePreEtrog] = buildClaimEventHandlerPreEtrog(legacyBridge, log) + appender[claimEventSignature] = buildClaimEventHandler(ctx, agglayerBridgeContract, querier, log) + + if isSovereign { + appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(agglayerBridgeL2Contract) + appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(agglayerBridgeL2Contract) + appender[setClaimEventSignature] = buildSetClaimEventHandler(agglayerBridgeL2Contract) + } + + return appender, nil +} + +// detectSovereignChain returns true if bridgeAddr is a sovereign chain bridge (AgglayerBridgeL2). +// It also returns the AgglayerBridgeL2 binding regardless (always created). +func detectSovereignChain( + ctx context.Context, + bridgeAddr common.Address, + backend bind.ContractBackend, +) (bool, *agglayerbridgel2.Agglayerbridgel2, error) { + contract, err := agglayerbridgel2.NewAgglayerbridgel2(bridgeAddr, backend) + if err != nil { + return false, nil, fmt.Errorf("claimsync: failed to create AgglayerBridgeL2 binding: %w", err) + } + + callOpts := &bind.CallOpts{Pending: false, Context: ctx} + if _, err := contract.BridgeManager(callOpts); err == nil { + return true, contract, nil + } else if !strings.Contains(err.Error(), gethvm.ErrExecutionReverted.Error()) { + return false, nil, fmt.Errorf("claimsync: unexpected error querying AgglayerBridgeL2.BridgeManager: %w", err) + } + + return false, contract, nil +} + +// buildClaimEventHandler creates a handler for the ClaimEvent log. +func buildClaimEventHandler( + ctx context.Context, + contract *agglayerbridge.Agglayerbridge, + querier ClaimQuerier, + log aggkitcommon.Logger, +) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + // Skip if DetailedClaimEvent indexing has already started at this block + boundaryBlock, err := querier.GetBoundaryBlockForClaimType(nil, bridgesync.DetailedClaimEvent) + if err != nil && !errors.Is(err, db.ErrNotFound) { + return fmt.Errorf("claimsync: failed checking DetailedClaimEvent boundary: %w", err) + } + if err == nil && l.BlockNumber >= boundaryBlock { + log.Debugf("claimsync: skipping ClaimEvent at block %d; DetailedClaimEvent started at %d", + l.BlockNumber, boundaryBlock) + return nil + } + + // Skip if a DetailedClaimEvent for the same tx is already in the block's events + for _, raw := range b.Events { + if e, ok := raw.(bridgesync.Event); ok && e.Claim != nil && + e.Claim.Type == bridgesync.DetailedClaimEvent && e.Claim.TxHash == l.TxHash { + log.Debugf("claimsync: skipping ClaimEvent at block %d tx %s; DetailedClaimEvent already present", + l.BlockNumber, l.TxHash.Hex()) + return nil + } + } + + claimEvent, err := contract.ParseClaimEvent(l) + if err != nil { + return fmt.Errorf("claimsync: error parsing ClaimEvent log: %w", err) + } + + b.Events = append(b.Events, bridgesync.Event{Claim: &bridgesync.Claim{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + BlockTimestamp: b.Timestamp, + TxHash: l.TxHash, + GlobalIndex: claimEvent.GlobalIndex, + OriginNetwork: claimEvent.OriginNetwork, + OriginAddress: claimEvent.OriginAddress, + DestinationAddress: claimEvent.DestinationAddress, + Amount: claimEvent.Amount, + Type: bridgesync.ClaimEvent, + }}) + return nil + } +} + +// buildDetailedClaimEventHandler creates a handler for the DetailedClaimEvent log (sovereign chains). +func buildDetailedClaimEventHandler( + contract *agglayerbridgel2.Agglayerbridgel2, +) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + claimEvent, err := contract.ParseDetailedClaimEvent(l) + if err != nil { + return fmt.Errorf("claimsync: error parsing DetailedClaimEvent log: %w", err) + } + + claim := &bridgesync.Claim{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + BlockTimestamp: b.Timestamp, + TxHash: l.TxHash, + GlobalIndex: claimEvent.GlobalIndex, + OriginNetwork: claimEvent.OriginNetwork, + OriginAddress: claimEvent.OriginTokenAddress, + DestinationNetwork: claimEvent.DestinationNetwork, + DestinationAddress: claimEvent.DestinationAddress, + Amount: claimEvent.Amount, + Metadata: claimEvent.Metadata, + MainnetExitRoot: claimEvent.MainnetExitRoot, + RollupExitRoot: claimEvent.RollupExitRoot, + ProofLocalExitRoot: treetypes.NewProof(claimEvent.SmtProofLocalExitRoot), + ProofRollupExitRoot: treetypes.NewProof(claimEvent.SmtProofRollupExitRoot), + GlobalExitRoot: crypto.Keccak256Hash(claimEvent.MainnetExitRoot[:], claimEvent.RollupExitRoot[:]), + IsMessage: claimEvent.LeafType == uint8(bridgesynctypes.LeafTypeMessage), + Type: bridgesync.DetailedClaimEvent, + } + + // Remove any ClaimEvent for the same tx (DetailedClaimEvent takes precedence) + newEvents := make([]interface{}, 0, len(b.Events)) + for _, raw := range b.Events { + if e, ok := raw.(bridgesync.Event); ok && e.Claim != nil && + e.Claim.Type == bridgesync.ClaimEvent && e.Claim.TxHash == l.TxHash { + continue + } + newEvents = append(newEvents, raw) + } + b.Events = newEvents + b.Events = append(b.Events, bridgesync.Event{Claim: claim}) + return nil + } +} + +// buildClaimEventHandlerPreEtrog creates a handler for the pre-Etrog ClaimEvent log. +func buildClaimEventHandlerPreEtrog( + contract *polygonzkevmbridge.Polygonzkevmbridge, + log aggkitcommon.Logger, +) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + claimEvent, err := contract.ParseClaimEvent(l) + if err != nil { + return fmt.Errorf("claimsync: error parsing pre-Etrog ClaimEvent log: %w", err) + } + + log.Debugf("claimsync: parsed pre-Etrog ClaimEvent: index %d block %d", claimEvent.Index, b.Num) + b.Events = append(b.Events, bridgesync.Event{Claim: &bridgesync.Claim{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + BlockTimestamp: b.Timestamp, + TxHash: l.TxHash, + GlobalIndex: new(big.Int).SetUint64(uint64(claimEvent.Index)), + OriginNetwork: claimEvent.OriginNetwork, + OriginAddress: claimEvent.OriginAddress, + DestinationAddress: claimEvent.DestinationAddress, + Amount: claimEvent.Amount, + }}) + return nil + } +} + +// buildUnsetClaimEventHandler creates a handler for the UpdatedUnsetGlobalIndexHashChain log. +func buildUnsetClaimEventHandler( + contract *agglayerbridgel2.Agglayerbridgel2, +) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + event, err := contract.ParseUpdatedUnsetGlobalIndexHashChain(l) + if err != nil { + return fmt.Errorf("claimsync: error parsing UpdatedUnsetGlobalIndexHashChain log: %w", err) + } + + b.Events = append(b.Events, bridgesync.Event{UnsetClaim: &bridgesync.UnsetClaim{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + TxHash: l.TxHash, + GlobalIndex: new(big.Int).SetBytes(event.UnsetGlobalIndex[:]), + UnsetGlobalIndexHashChain: event.NewUnsetGlobalIndexHashChain, + }}) + return nil + } +} + +// buildSetClaimEventHandler creates a handler for the SetClaim log. +func buildSetClaimEventHandler( + contract *agglayerbridgel2.Agglayerbridgel2, +) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + event, err := contract.ParseSetClaim(l) + if err != nil { + return fmt.Errorf("claimsync: error parsing SetClaim log: %w", err) + } + + b.Events = append(b.Events, bridgesync.Event{SetClaim: &bridgesync.SetClaim{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + TxHash: l.TxHash, + GlobalIndex: new(big.Int).SetBytes(event.GlobalIndex[:]), + }}) + return nil + } +} diff --git a/claimsync/embedded.go b/claimsync/embedded.go new file mode 100644 index 000000000..c7d26904a --- /dev/null +++ b/claimsync/embedded.go @@ -0,0 +1,150 @@ +package claimsync + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" + "github.com/agglayer/aggkit/bridgesync" + claimsyncStorage "github.com/agglayer/aggkit/claimsync/storage" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +type claimEmbeddedProcessor struct { + log aggkitcommon.Logger + storage claimsynctypes.ClaimStorager +} + +func newEmbeddedProcessor(logger aggkitcommon.Logger, storage claimsynctypes.ClaimStorager) *claimEmbeddedProcessor { + return &claimEmbeddedProcessor{ + log: logger, + storage: storage, + } +} + +// --- Embedded mode --- + +// embeddedClaimSync is passed to bridgesync as a ClaimEventsProcessor. +// It has no own EVMDriver; bridgesync drives event download and calls ProcessClaimEvents +// from its own ProcessBlock, reusing bridgesync's transaction for atomicity. +type embeddedClaimSync struct { + Appender sync.LogAppenderMap + Processor *claimEmbeddedProcessor + Reader claimsynctypes.ClaimsReader +} + +// NewClaimStorage creates a claim storage instance for embedded mode, using the provided database connection. +func NewClaimStorage( + database *sql.DB, + logger aggkitcommon.Logger, + syncerID claimsynctypes.ClaimSyncerID, +) (claimsynctypes.ClaimStorager, error) { + store, err := claimsyncStorage.New(logger, database, syncerID.String()) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to create storage: %w", err) + } + return store, nil +} + +// NewEmbedded creates a ClaimEventsProcessor for embedding inside bridgesync. +// It provides claimsync's claim event handlers (for appender merging) and processes +// claim events using bridgesync's own transaction — no separate DB or EVMDriver is created. +// The querier is typically bridgesync's processor (satisfies ClaimQuerier). +func NewEmbedded( + ctx context.Context, + storage claimsynctypes.ClaimStorager, + bridgeAddr common.Address, + ethClient aggkittypes.EthClienter, + querier ClaimQuerier, + syncerID claimsynctypes.ClaimSyncerID, + dbQueryTimeout time.Duration, + logger aggkitcommon.Logger, +) (*embeddedClaimSync, error) { + proc := newEmbeddedProcessor(logger, storage) + agglayerBridgeContract, err := agglayerbridge.NewAgglayerbridge(bridgeAddr, ethClient) + if err != nil { + return nil, fmt.Errorf("claimsync embedded: failed to create AgglayerBridge binding: %w", err) + } + reader := NewProcessorReader(logger, storage) + + isSovereign, agglayerBridgeL2Contract, err := detectSovereignChain(ctx, bridgeAddr, ethClient) + if err != nil { + return nil, fmt.Errorf("claimsync embedded: failed to detect chain type: %w", err) + } + + appender, err := buildAppender(ctx, ethClient, reader, bridgeAddr, + agglayerBridgeContract, agglayerBridgeL2Contract, isSovereign, logger) + if err != nil { + return nil, fmt.Errorf("claimsync embedded: failed to build appender: %w", err) + } + + logger.Infof("claimsync embedded created: bridgeAddr=%s sovereign=%t", bridgeAddr.String(), isSovereign) + + return &embeddedClaimSync{ + Processor: proc, + Reader: reader, + Appender: appender}, nil +} +func (p *claimEmbeddedProcessor) ProcessBlockWithTx(tx dbtypes.Querier, block *sync.Block, insertBlock bool) error { + if insertBlock { + if err := p.storage.InsertBlock(tx, block.Num, block.Hash.String()); err != nil { + p.log.Errorf("failed to insert block %d: %v", block.Num, err) + return err + } + } + + for _, e := range block.Events { + event, ok := e.(bridgesync.Event) + if !ok { + p.log.Errorf("failed to convert event to bridgesync.Event type in block %d", block.Num) + return fmt.Errorf("claimsync ProcessBlock: unexpected event type %T in block %d", e, block.Num) + } + + if event.Claim != nil { + if err := p.storage.InsertClaim(tx, *event.Claim); err != nil { + p.log.Errorf("failed to insert claim event at block %d: %v", block.Num, err) + return err + } + } + + if event.UnsetClaim != nil { + if err := p.storage.InsertUnsetClaim(tx, *event.UnsetClaim); err != nil { + p.log.Errorf("failed to insert unset_claim event at block %d: %v", block.Num, err) + return err + } + } + + if event.SetClaim != nil { + if err := p.storage.InsertSetClaim(tx, *event.SetClaim); err != nil { + p.log.Errorf("failed to insert set_claim event at block %d: %v", block.Num, err) + return err + } + } + } + return nil +} + +// ReorgWithTx deletes all blocks >= firstReorgedBlock using the provided transaction. +// The caller is responsible for commit and rollback. +// If it's embbedded maybe have been deleted the block already in the same tx +// it returns: +// - the number of rows affected (currently the number of blocks deleted) +// - error if the deletion failed, or nil if successful +func (p *claimEmbeddedProcessor) ReorgWithTx(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { + return p.deleteBlocksFrom(tx, firstReorgedBlock) +} + +func (p *claimEmbeddedProcessor) deleteBlocksFrom(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { + rowsAffected, err := p.storage.DeleteBlocksFrom(tx, firstReorgedBlock) + if err != nil { + return 0, fmt.Errorf("claimsync deleteBlocksFrom: %w", err) + } + return rowsAffected, nil +} diff --git a/claimsync/processor.go b/claimsync/processor.go new file mode 100644 index 000000000..4e373da40 --- /dev/null +++ b/claimsync/processor.go @@ -0,0 +1,114 @@ +package claimsync + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db/compatibility" + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/sync" +) + +type processor struct { + storage claimsynctypes.ClaimStorager + log aggkitcommon.Logger + dbQueryTimeout time.Duration + compatibility.CompatibilityDataStorager[sync.RuntimeData] + embeddedProcessor claimsynctypes.EmbeddedProcessor +} + +func newProcessor(logger aggkitcommon.Logger, storage claimsynctypes.ClaimStorager, dbQueryTimeout time.Duration) (*processor, error) { + return &processor{ + storage: storage, + log: logger, + dbQueryTimeout: dbQueryTimeout, + CompatibilityDataStorager: storage, + embeddedProcessor: newEmbeddedProcessor(logger, storage), + }, nil +} + +// ProcessBlock stores the block and its claim-related events atomically. +func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { + dbCtx, cancel := p.withDatabaseTimeout(ctx) + defer cancel() + + tx, err := p.storage.NewTx(dbCtx) + if err != nil { + p.log.Errorf("failed to start transaction for block %d: %v", block.Num, err) + return err + } + shouldRollback := true + defer func() { + if shouldRollback { + p.rollbackTx(tx) + } + }() + result := p.embeddedProcessor.ProcessBlockWithTx(tx, &block, true) + if result != nil { + return result + } + if err := tx.Commit(); err != nil { + p.log.Errorf("failed to commit block %d: %v", block.Num, err) + return err + } + shouldRollback = false + return nil +} + +// Reorg deletes all blocks >= firstReorgedBlock (cascade-deletes claims, unset_claims, set_claims via FK). +func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + p.log.Infof("reorg detected at block %d", firstReorgedBlock) + + dbCtx, cancel := p.withDatabaseTimeout(ctx) + defer cancel() + + tx, err := p.storage.NewTx(dbCtx) + if err != nil { + return fmt.Errorf("claimsync Reorg: start tx: %w", err) + } + shouldRollback := true + defer func() { + if shouldRollback { + p.rollbackTx(tx) + } + }() + + rowsAffected, err := p.embeddedProcessor.ReorgWithTx(tx, firstReorgedBlock) + if err != nil { + return fmt.Errorf("claimsync Reorg: %w", err) + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("claimsync Reorg: commit: %w", err) + } + shouldRollback = false + + p.log.Infof("reorged to block %d, %d rows deleted", firstReorgedBlock, rowsAffected) + return nil +} + +// GetLastProcessedBlock returns the highest block number stored. +func (p *processor) GetLastProcessedBlock(_ context.Context) (uint64, error) { + return p.storage.GetLastProcessedBlock(nil) +} + +// GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. +func (p *processor) GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) { + return p.storage.GetBoundaryBlockForClaimType(tx, claimType) +} + +func (p *processor) withDatabaseTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + return context.WithTimeout(ctx, p.dbQueryTimeout) +} + +func (p *processor) rollbackTx(tx dbtypes.SQLTxer) { + if err := tx.Rollback(); err != nil && !errors.Is(err, sql.ErrTxDone) { + p.log.Errorf("error rolling back tx: %v", err) + } +} diff --git a/claimsync/reader.go b/claimsync/reader.go new file mode 100644 index 000000000..f70eb9b68 --- /dev/null +++ b/claimsync/reader.go @@ -0,0 +1,45 @@ +package claimsync + +import ( + "math/big" + + "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" +) + +var _ claimsynctypes.ClaimsReader = (*processorReader)(nil) + +type processorReader struct { + storage claimsynctypes.ClaimStorager + log aggkitcommon.Logger +} + +func NewProcessorReader(logger aggkitcommon.Logger, storage claimsynctypes.ClaimStorager) *processorReader { + return &processorReader{ + storage: storage, + log: logger, + } +} + +// GetLastProcessedBlock returns the highest block number stored. +func (p *processorReader) GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) { + return p.storage.GetLastProcessedBlock(tx) +} + +// GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. +// Returns db.ErrNotFound if no claims of that type exist. +func (p *processorReader) GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) { + return p.storage.GetBoundaryBlockForClaimType(tx, claimType) +} + +// GetClaims returns claims in [fromBlock, toBlock] using compaction logic. +func (p *processorReader) GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) { + return p.storage.GetClaims(tx, fromBlock, toBlock) +} + +// GetClaimsByGlobalIndex returns claims for the given global index using compaction logic. +func (p *processorReader) GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) { + return p.storage.GetClaimsByGlobalIndex(tx, globalIndex) +} diff --git a/claimsync/storage/migrations/claimsync0001.sql b/claimsync/storage/migrations/claimsync0001.sql new file mode 100644 index 000000000..6086e99df --- /dev/null +++ b/claimsync/storage/migrations/claimsync0001.sql @@ -0,0 +1,53 @@ +-- +migrate Down +DROP TABLE IF EXISTS claim; +DROP TABLE IF EXISTS block; +DROP TABLE IF EXISTS set_claim; +DROP TABLE IF EXISTS unset_claim; +-- +migrate Up +CREATE TABLE block ( + num BIGINT PRIMARY KEY, + hash VARCHAR +); + +CREATE TABLE claim ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + global_index TEXT NOT NULL, + origin_network INTEGER NOT NULL, + origin_address VARCHAR NOT NULL, + destination_address VARCHAR NOT NULL, + amount TEXT NOT NULL, + proof_local_exit_root VARCHAR, + proof_rollup_exit_root VARCHAR, + mainnet_exit_root VARCHAR, + rollup_exit_root VARCHAR, + global_exit_root VARCHAR, + destination_network INTEGER NOT NULL, + metadata BLOB, + is_message BOOLEAN, + tx_hash VARCHAR, + block_timestamp INTEGER, + type TEXT NOT NULL DEFAULT '', + PRIMARY KEY (block_num, block_pos) +); + +CREATE INDEX IF NOT EXISTS idx_claim_type_block ON claim (type, block_num); + +CREATE TABLE unset_claim ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + tx_hash VARCHAR NOT NULL, + global_index TEXT NOT NULL, + unset_global_index_hash_chain VARCHAR NOT NULL, + created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')), + PRIMARY KEY (block_num, block_pos) +); + +CREATE TABLE set_claim ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + tx_hash VARCHAR NOT NULL, + global_index TEXT NOT NULL, + created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')), + PRIMARY KEY (block_num, block_pos) +); diff --git a/claimsync/storage/migrations/migrations.go b/claimsync/storage/migrations/migrations.go new file mode 100644 index 000000000..a84f67274 --- /dev/null +++ b/claimsync/storage/migrations/migrations.go @@ -0,0 +1,41 @@ +package migrations + +import ( + "database/sql" + _ "embed" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db" + dbmigrations "github.com/agglayer/aggkit/db/migrations" + "github.com/agglayer/aggkit/db/types" +) + +// ClaimSync0001 is public because bridegsync needs it to +// set the migrations:this 0001 is equivalent to bridgesync0014, +// +//go:embed claimsync0001.sql +var ClaimSync0001 string + +func GetClaimSyncMigrations() []types.Migration { + return []types.Migration{ + { + ID: "claimsync0001", + SQL: ClaimSync0001, + }, + } +} + +func GetFullMigrations() []types.Migration { + baseMigrations := dbmigrations.GetBaseMigrations() + claimSyncMigrations := GetClaimSyncMigrations() + total := len(baseMigrations) + len(claimSyncMigrations) + combined := make([]types.Migration, 0, total) + combined = append(combined, baseMigrations...) + combined = append(combined, claimSyncMigrations...) + return combined +} + +// RunMigrations applies all pending migrations to the given database +func RunMigrations(logger aggkitcommon.Logger, database *sql.DB) error { + return db.RunMigrationsDB(logger, database, GetFullMigrations()) +} diff --git a/claimsync/storage/storage.go b/claimsync/storage/storage.go new file mode 100644 index 000000000..612e2a9ba --- /dev/null +++ b/claimsync/storage/storage.go @@ -0,0 +1,296 @@ +package storage + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math/big" + + "github.com/agglayer/aggkit/bridgesync" + "github.com/agglayer/aggkit/claimsync/storage/migrations" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db" + "github.com/agglayer/aggkit/db/compatibility" + dbtypes "github.com/agglayer/aggkit/db/types" + aggsync "github.com/agglayer/aggkit/sync" + "github.com/russross/meddler" +) + +var _ claimsynctypes.ClaimStorager = (*claimStorage)(nil) + +// blockRecord is the meddler-tagged struct for the block table. +type blockRecord struct { + Num uint64 `meddler:"num"` + Hash string `meddler:"hash"` +} + +const ( + claimColumnsSQL = `block_num, + block_pos, + tx_hash, + global_index, + origin_network, + origin_address, + destination_address, + amount, + proof_local_exit_root, + proof_rollup_exit_root, + mainnet_exit_root, + rollup_exit_root, + global_exit_root, + destination_network, + metadata, + is_message, + block_timestamp, + type` + + compactedClaimsSelectSQL = ` + o.block_num, + o.block_pos, + o.tx_hash, + o.global_index, + o.origin_network, + o.origin_address, + o.destination_address, + o.amount, + n.proof_local_exit_root, + n.proof_rollup_exit_root, + n.mainnet_exit_root, + n.rollup_exit_root, + n.global_exit_root, + o.destination_network, + o.metadata, + o.is_message, + o.block_timestamp, + o.type` +) + +type claimStorage struct { + database dbtypes.DBer + compatStore compatibility.CompatibilityDataStorager[aggsync.RuntimeData] +} + +// NewStandalone opens (or creates) the SQLite database at dbPath, runs all pending migrations, +// and returns a ready-to-use Storage along with the underlying *sql.DB +// (needed by the processor for transaction management). +func NewStandalone(logger aggkitcommon.Logger, dbPath string, ownerName string) (claimsynctypes.ClaimStorager, error) { + database, err := db.NewSQLiteDB(dbPath) + if err != nil { + return nil, fmt.Errorf("claimsync storage: failed to open SQLite DB at %s: %w", dbPath, err) + } + + if err := migrations.RunMigrations(logger, database); err != nil { + database.Close() //nolint:errcheck + return nil, fmt.Errorf("claimsync storage: failed to run migrations: %w", err) + } + + return &claimStorage{ + database: database, + compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + }, nil +} + +// New creates a Storage using the provided sql.DB, so it can share +func New(logger aggkitcommon.Logger, database *sql.DB, ownerName string) (claimsynctypes.ClaimStorager, error) { + return &claimStorage{ + database: database, + compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + }, nil +} + +// NewTx implements claimsynctypes.ClaimStorager. +func (s *claimStorage) NewTx(ctx context.Context) (dbtypes.Txer, error) { + return db.NewTx(ctx, s.database) +} + +// GetCompatibilityData implements claimsynctypes.ClaimStorager. +func (s *claimStorage) GetCompatibilityData(ctx context.Context, tx dbtypes.Querier) (bool, aggsync.RuntimeData, error) { + return s.compatStore.GetCompatibilityData(ctx, tx) +} + +// SetCompatibilityData implements claimsynctypes.ClaimStorager. +func (s *claimStorage) SetCompatibilityData(ctx context.Context, tx dbtypes.Querier, data aggsync.RuntimeData) error { + return s.compatStore.SetCompatibilityData(ctx, tx, data) +} + +// getQuerier returns tx if non-nil, otherwise falls back to the default DB connection. +func (s *claimStorage) getQuerier(tx dbtypes.Querier) dbtypes.Querier { + if tx != nil { + return tx + } + return s.database +} + +// InsertBlock inserts a block row using meddler. +func (s *claimStorage) InsertBlock(tx dbtypes.Querier, blockNum uint64, blockHash string) error { + if err := meddler.Insert(s.getQuerier(tx), "block", &blockRecord{Num: blockNum, Hash: blockHash}); err != nil { + return fmt.Errorf("InsertBlock %d: %w", blockNum, err) + } + return nil +} + +// InsertClaim persists a claim. The referenced block must already exist. +func (s *claimStorage) InsertClaim(tx dbtypes.Querier, claim bridgesync.Claim) error { + if err := meddler.Insert(s.getQuerier(tx), "claim", &claim); err != nil { + return fmt.Errorf("InsertClaim (block %d, pos %d): %w", claim.BlockNum, claim.BlockPos, err) + } + return nil +} + +// InsertUnsetClaim persists an unset claim. The referenced block must already exist. +func (s *claimStorage) InsertUnsetClaim(tx dbtypes.Querier, u bridgesync.UnsetClaim) error { + if err := meddler.Insert(s.getQuerier(tx), "unset_claim", &u); err != nil { + return fmt.Errorf("InsertUnsetClaim (block %d, pos %d): %w", u.BlockNum, u.BlockPos, err) + } + return nil +} + +// InsertSetClaim persists a set claim. The referenced block must already exist. +func (s *claimStorage) InsertSetClaim(tx dbtypes.Querier, sc bridgesync.SetClaim) error { + if err := meddler.Insert(s.getQuerier(tx), "set_claim", &sc); err != nil { + return fmt.Errorf("InsertSetClaim (block %d, pos %d): %w", sc.BlockNum, sc.BlockPos, err) + } + return nil +} + +// GetClaims returns claims in [fromBlock, toBlock] using compaction logic: +// claims with an unset_claim are returned uncompacted; others are compacted +// (oldest metadata + newest proofs per global_index). +func (s *claimStorage) GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) { + query := fmt.Sprintf(` + WITH all_claims_ranked AS ( + SELECT + *, + ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num ASC, block_pos ASC) AS rn_oldest_global, + ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num DESC, block_pos DESC) AS rn_newest_global + FROM claim + ), + claims_in_range AS ( + SELECT * FROM all_claims_ranked WHERE block_num >= $1 AND block_num <= $2 + ), + claims_with_unset AS ( + SELECT c.%s + FROM claims_in_range c + WHERE EXISTS (SELECT 1 FROM unset_claim uc WHERE uc.global_index = c.global_index) + ), + compactable_claims AS ( + SELECT %s + FROM claims_in_range o + JOIN claims_in_range n ON o.global_index = n.global_index AND n.rn_newest_global = 1 + WHERE o.rn_oldest_global = 1 + AND NOT EXISTS (SELECT 1 FROM unset_claim uc WHERE uc.global_index = o.global_index) + ) + SELECT * FROM claims_with_unset + UNION ALL + SELECT * FROM compactable_claims + ORDER BY block_num ASC, block_pos ASC; + `, claimColumnsSQL, compactedClaimsSelectSQL) + + rows, err := s.getQuerier(tx).Query(query, fromBlock, toBlock) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return []bridgesync.Claim{}, nil + } + return nil, fmt.Errorf("GetClaims [%d, %d]: %w", fromBlock, toBlock, err) + } + defer rows.Close() + + return scanClaims(rows) +} + +// GetClaimsByGlobalIndex returns claims for the given global index using compaction logic. +func (s *claimStorage) GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) { + if globalIndex == nil { + return nil, errors.New("GetClaimsByGlobalIndex: globalIndex cannot be nil") + } + + query := fmt.Sprintf(` + WITH all_claims_for_index AS ( + SELECT + *, + ROW_NUMBER() OVER (ORDER BY block_num ASC, block_pos ASC) AS rn_oldest, + ROW_NUMBER() OVER (ORDER BY block_num DESC, block_pos DESC) AS rn_newest + FROM claim + WHERE global_index = $1 + ), + claims_with_unset AS ( + SELECT c.%s + FROM all_claims_for_index c + WHERE EXISTS (SELECT 1 FROM unset_claim uc WHERE uc.global_index = $1) + ), + compactable_claims AS ( + SELECT %s + FROM all_claims_for_index o + JOIN all_claims_for_index n ON n.rn_newest = 1 + WHERE o.rn_oldest = 1 + AND NOT EXISTS (SELECT 1 FROM unset_claim uc WHERE uc.global_index = $1) + ) + SELECT * FROM claims_with_unset + UNION ALL + SELECT * FROM compactable_claims + ORDER BY block_num ASC, block_pos ASC; + `, claimColumnsSQL, compactedClaimsSelectSQL) + + rows, err := s.getQuerier(tx).Query(query, globalIndex.String()) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return []bridgesync.Claim{}, nil + } + return nil, fmt.Errorf("GetClaimsByGlobalIndex %s: %w", globalIndex.String(), err) + } + defer rows.Close() + + return scanClaims(rows) +} + +// GetLastProcessedBlock returns the highest block number stored. +func (s *claimStorage) GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) { + var num uint64 + err := s.getQuerier(tx).QueryRow(`SELECT num FROM block ORDER BY num DESC LIMIT 1`).Scan(&num) + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return num, err +} + +// GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. +// Returns db.ErrNotFound if no claims of that type exist. +func (s *claimStorage) GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) { + var blockNum *uint64 + if err := s.getQuerier(tx).QueryRow(`SELECT MAX(block_num) FROM claim WHERE type = $1`, claimType). + Scan(&blockNum); err != nil { + return 0, err + } + if blockNum == nil { + return 0, db.ErrNotFound + } + return *blockNum, nil +} + +// DeleteBlocksFrom deletes all blocks with num >= firstBlock and returns the count deleted. +// Cascade constraints automatically remove associated claims, unset_claims and set_claims. +func (s *claimStorage) DeleteBlocksFrom(tx dbtypes.Querier, firstBlock uint64) (int64, error) { + res, err := s.getQuerier(tx).Exec(`DELETE FROM block WHERE num >= $1`, firstBlock) + if err != nil { + return 0, fmt.Errorf("DeleteBlocksFrom %d: %w", firstBlock, err) + } + n, _ := res.RowsAffected() + return n, nil +} + +func scanClaims(rows *sql.Rows) ([]bridgesync.Claim, error) { + var ptrs []*bridgesync.Claim + if err := meddler.ScanAll(rows, &ptrs); err != nil { + return nil, fmt.Errorf("scanClaims: %w", err) + } + + iface := db.SlicePtrsToSlice(ptrs) + claims, ok := iface.([]bridgesync.Claim) + if !ok { + return nil, errors.New("scanClaims: type assertion from []*Claim to []Claim failed") + } + + return claims, nil +} diff --git a/claimsync/types/claim_reader.go b/claimsync/types/claim_reader.go new file mode 100644 index 000000000..e62205326 --- /dev/null +++ b/claimsync/types/claim_reader.go @@ -0,0 +1,16 @@ +package types + +import ( + "math/big" + + "github.com/agglayer/aggkit/bridgesync" + dbtypes "github.com/agglayer/aggkit/db/types" +) + +// ClaimsReader provides read-only access +type ClaimsReader interface { + GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) + GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) + GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) + GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) +} diff --git a/claimsync/types/claim_storager.go b/claimsync/types/claim_storager.go new file mode 100644 index 000000000..c888b4cae --- /dev/null +++ b/claimsync/types/claim_storager.go @@ -0,0 +1,37 @@ +package types + +import ( + "context" + "math/big" + + "github.com/agglayer/aggkit/bridgesync" + "github.com/agglayer/aggkit/db/compatibility" + dbtypes "github.com/agglayer/aggkit/db/types" + aggsync "github.com/agglayer/aggkit/sync" +) + +// Storage defines the interface for claim storage operations. +// Each method accepts an optional tx dbtypes.Querier; pass nil to use the default DB connection. +type ClaimStorager interface { + // InsertBlock records a block so claims can reference it via foreign key + InsertBlock(tx dbtypes.Querier, blockNum uint64, blockHash string) error + // InsertClaim persists a single claim record + InsertClaim(tx dbtypes.Querier, claim bridgesync.Claim) error + // InsertUnsetClaim persists an unset claim record + InsertUnsetClaim(tx dbtypes.Querier, u bridgesync.UnsetClaim) error + // InsertSetClaim persists a set claim record + InsertSetClaim(tx dbtypes.Querier, s bridgesync.SetClaim) error + // GetClaims returns claims in [fromBlock, toBlock] using compaction logic + GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) + // GetClaimsByGlobalIndex returns claims for the given global index using compaction logic + GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) + // GetLastProcessedBlock returns the highest block number stored + GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) + // GetBoundaryBlockForClaimType returns the max block_num for claims of the given type + GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) + // DeleteBlocksFrom deletes all blocks with num >= firstBlock (cascade-deletes claims etc.) + DeleteBlocksFrom(tx dbtypes.Querier, firstBlock uint64) (int64, error) + // NewTx begins a new database transaction. + NewTx(ctx context.Context) (dbtypes.Txer, error) + compatibility.CompatibilityDataStorager[aggsync.RuntimeData] +} diff --git a/claimsync/types/processor.go b/claimsync/types/processor.go new file mode 100644 index 000000000..13a0151c2 --- /dev/null +++ b/claimsync/types/processor.go @@ -0,0 +1,11 @@ +package types + +import ( + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/sync" +) + +type EmbeddedProcessor interface { + ProcessBlockWithTx(tx dbtypes.Querier, block *sync.Block, insertBlock bool) error + ReorgWithTx(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) +} diff --git a/claimsync/types/syncer_id.go b/claimsync/types/syncer_id.go new file mode 100644 index 000000000..e9d016a2d --- /dev/null +++ b/claimsync/types/syncer_id.go @@ -0,0 +1,18 @@ +package types + +// ClaimSyncerID represents the type of bridge syncer +type ClaimSyncerID int + +const ( + L1ClaimSyncer ClaimSyncerID = iota + L2ClaimSyncer + + // CurrentDBVersion represents the current version of the bridge syncer's database schema. + // It is used to ensure the database is reset if an upgrade requires a full resync. + // Increment this value whenever the database schema changes in a way that is not backward-compatible. + CurrentDBVersion = 1 +) + +func (b ClaimSyncerID) String() string { + return [...]string{"L1ClaimSyncer", "L2ClaimSyncer"}[b] +} diff --git a/cmd/run.go b/cmd/run.go index 169bbc8f2..baf1f8459 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -30,6 +30,7 @@ import ( "github.com/agglayer/aggkit/aggsender/validator" "github.com/agglayer/aggkit/bridgeservice" "github.com/agglayer/aggkit/bridgesync" + "github.com/agglayer/aggkit/claimsync" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config" "github.com/agglayer/aggkit/etherman" @@ -196,6 +197,7 @@ func start(cliCtx *cli.Context) error { l1Client, l1InfoTreeSync, l2BridgeSync, + claimsync.NewFromBridgeSync(l2BridgeSync), l2Client, rollupDataQuerier, committeeQuerier, @@ -227,6 +229,7 @@ func start(cliCtx *cli.Context) error { cfg.Validator, l1InfoTreeSync, l2BridgeSync, + claimsync.NewFromBridgeSync(l2BridgeSync), l1Client, l2Client, rollupDataQuerier, @@ -293,6 +296,7 @@ func createAggSenderValidator(ctx context.Context, cfg validator.Config, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, l2Syncer *bridgesync.BridgeSync, + claimSyncer claimsync.ClaimSyncer, l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier *ethermanquierier.RollupDataQuerier, @@ -360,6 +364,7 @@ func createAggSenderValidator(ctx context.Context, aggchainFEPQuerier, flowParams.InitialLER, flowParams.Signer, + claimSyncer, ) } @@ -369,6 +374,7 @@ func createAggSender( l1EthClient aggkittypes.BaseEthereumClienter, l1InfoTreeSync aggsendertypes.L1InfoTreeSyncer, l2Syncer aggsendertypes.L2BridgeSyncer, + claimSyncer claimsync.ClaimSyncer, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier aggsendertypes.RollupDataQuerier, committeeQuerier aggsendertypes.MultisigQuerier, @@ -386,7 +392,7 @@ func createAggSender( } aggsender, err := aggsender.New(ctx, logger, cfg, agglayerClient, - l1InfoTreeSync, l2Syncer, l1EthClient, l2Client, rollupDataQuerier, committeeQuerier, initialLER) + l1InfoTreeSync, l2Syncer, claimSyncer, l1EthClient, l2Client, rollupDataQuerier, committeeQuerier, initialLER) if err != nil { return nil, fmt.Errorf("failed to create AggSender: %w", err) } @@ -819,6 +825,7 @@ func runBridgeSyncL2IfNeeded( fullClaimsNeeded, syncFromInBridges, initialLER, + nil, ) if err != nil { log.Fatalf("error creating bridgeSyncL2: %s", err) diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 1795d93d9..46078fc1a 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -342,7 +342,7 @@ func L2Setup(t *testing.T, cfg *EnvironmentConfig, l1Setup *L1Environment) *L2En RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(defaultDBQueryTimeout), } - bridgeL2Sync, err := bridgesync.NewL2(ctx, bridgeSyncCfg, rdL2, testClient, originNetwork, false, false, bridgesynctypes.EmptyLER) + bridgeL2Sync, err := bridgesync.NewL2(ctx, bridgeSyncCfg, rdL2, testClient, originNetwork, false, false, bridgesynctypes.EmptyLER, nil) require.NoError(t, err) go bridgeL2Sync.Start(ctx) From b6a7027c131be3a2f2df4a8af19746e099915c63 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 16 Mar 2026 14:28:33 +0100 Subject: [PATCH 02/28] feat: migrate claim types to claimsync and add RPC interface (pm285) - Move Claim/Unclaim types from bridgesync to claimsync/types package - Add ClaimSyncRPC server exposing l2claimsync_* JSON-RPC methods - Add paged storage implementation for claimsync - Add claimsync config and claim_data types - Update aggsender interfaces to use claimsync.Claim instead of bridgesync.Claim - Add GetNextBlockNumber and GeneratePreBuildParams to AggsenderBuilderFlow/FlowBaser interfaces - Extend GetLastProcessedBlock signature to return (uint64, bool, error) - Add op-pp local config and e2e bridge tests - Regenerate all affected mocks Co-Authored-By: Claude Sonnet 4.6 --- .mockery.yaml | 4 + aggsender/aggsender.go | 40 +- aggsender/aggsender_validator.go | 4 +- .../imported_bridge_exit_converter.go | 13 +- .../flows/builder_flow_aggchain_prover.go | 11 + aggsender/flows/builder_flow_factory.go | 9 +- aggsender/flows/builder_flow_pp.go | 11 + aggsender/flows/flow_base.go | 28 +- aggsender/flows/verifier_flow_factory.go | 6 +- aggsender/flows/verifier_flow_factory_test.go | 1 + .../mocks/mock_agglayer_bridge_l2_reader.go | 2 +- .../mocks/mock_aggsender_builder_flow.go | 112 ++ aggsender/mocks/mock_aggsender_flow_baser.go | 73 +- aggsender/mocks/mock_bridge_querier.go | 57 +- aggsender/mocks/mock_l2_bridge_syncer.go | 146 +-- aggsender/mocks/mock_optimistic_signer.go | 20 +- aggsender/optimistic/optimistic_sign.go | 4 +- .../calculate_hash_commit_imported_bridges.go | 8 +- aggsender/prover/proof_generation_tool.go | 19 +- aggsender/query/aggchain_proof_query.go | 6 +- aggsender/query/bridge_query.go | 104 +- aggsender/query/certificate_query.go | 6 +- aggsender/trigger/trigger_by_bridge.go | 6 +- aggsender/types/certificate_build_params.go | 10 +- aggsender/types/interfaces.go | 25 +- aggsender/types/optimistic_interface.go | 4 +- bridgeservice/bridge.go | 11 +- bridgeservice/bridge_interfaces.go | 11 +- bridgeservice/bridge_test.go | 6 +- bridgeservice/mocks/mock_bridger.go | 127 +- bridgeservice/utils.go | 3 +- bridgesync/agglayer_bridge_l2_reader.go | 18 +- bridgesync/agglayer_bridge_l2_reader_test.go | 22 +- bridgesync/bridgesync.go | 155 ++- bridgesync/bridgesync_test.go | 1 + bridgesync/claim.go | 51 + bridgesync/config.go | 88 +- bridgesync/config_test.go | 68 +- bridgesync/downloader.go | 6 +- bridgesync/mock_bridge_querier.go | 17 +- bridgesync/processor.go | 809 +------------ bridgesync/types/types.go | 7 - claimsync/claim_data.go | 22 + .../claimcalldata_test.go | 4 +- claimsync/claimsync.go | 141 ++- claimsync/claimsync_rpc.go | 115 ++ claimsync/claimsync_test.go | 96 ++ claimsync/config.go | 54 + {bridgesync => claimsync}/docker-compose.yml | 0 claimsync/downloader.go | 302 ++++- claimsync/embedded.go | 97 +- {bridgesync => claimsync}/helpers_test.go | 2 +- claimsync/processor.go | 29 +- claimsync/reader.go | 45 - claimsync/storage/storage.go | 152 ++- claimsync/storage/storage_paged.go | 360 ++++++ claimsync/types/claim_data.go | 245 ++++ claimsync/types/claim_reader.go | 24 +- claimsync/types/claim_storager.go | 58 +- claimsync/types/claim_syncer.go | 21 + claimsync/types/mocks/mock_claim_storager.go | 1037 +++++++++++++++++ claimsync/types/mocks/mock_claim_syncer.go | 313 +++++ claimsync/types/mocks/mock_claims_reader.go | 550 +++++++++ .../types/mocks/mock_embedded_processor.go | 146 +++ claimsync/types/processor.go | 6 +- cmd/run.go | 158 ++- common/components.go | 3 + config/default.go | 2 + l1infotreesync/l1infotreesync.go | 7 +- l1infotreesync/processor.go | 16 +- l2gersync/l2_ger_syncer.go | 3 +- l2gersync/processor.go | 9 +- multidownloader/evm_multidownloader.go | 2 + scripts/request_aggsender_status.sh | 2 + sync/evmdriver.go | 17 +- sync/mock_processor_interface.go | 25 +- test/e2e/bridge_test.go | 19 + test/e2e/envs/loader.go | 11 +- test/e2e/envs/op-pp/config_local/README.md | 20 + .../op-pp/config_local/aggkit-parallel.toml | 122 ++ test/helpers/e2e.go | 4 +- 81 files changed, 4860 insertions(+), 1508 deletions(-) create mode 100644 bridgesync/claim.go create mode 100644 claimsync/claim_data.go rename {bridgesync => claimsync}/claimcalldata_test.go (99%) create mode 100644 claimsync/claimsync_rpc.go create mode 100644 claimsync/claimsync_test.go create mode 100644 claimsync/config.go rename {bridgesync => claimsync}/docker-compose.yml (100%) rename {bridgesync => claimsync}/helpers_test.go (99%) delete mode 100644 claimsync/reader.go create mode 100644 claimsync/storage/storage_paged.go create mode 100644 claimsync/types/claim_data.go create mode 100644 claimsync/types/claim_syncer.go create mode 100644 claimsync/types/mocks/mock_claim_storager.go create mode 100644 claimsync/types/mocks/mock_claim_syncer.go create mode 100644 claimsync/types/mocks/mock_claims_reader.go create mode 100644 claimsync/types/mocks/mock_embedded_processor.go create mode 100644 scripts/request_aggsender_status.sh create mode 100644 test/e2e/bridge_test.go create mode 100644 test/e2e/envs/op-pp/config_local/README.md create mode 100644 test/e2e/envs/op-pp/config_local/aggkit-parallel.toml diff --git a/.mockery.yaml b/.mockery.yaml index 36fa370a5..7fa9b1dd7 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -166,3 +166,7 @@ packages: config: dir: "{{ .InterfaceDir }}/mocks" all: true + github.com/agglayer/aggkit/claimsync/types: + config: + dir: "{{ .InterfaceDir }}/mocks" + all: true diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 0f8f6be94..0c55e0331 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -21,8 +21,8 @@ import ( "github.com/agglayer/aggkit/aggsender/trigger" "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/aggsender/validator" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" - "github.com/agglayer/aggkit/claimsync" "github.com/agglayer/aggkit/db/compatibility" "github.com/agglayer/aggkit/log" aggkittypes "github.com/agglayer/aggkit/types" @@ -49,6 +49,7 @@ type AggSender struct { l1Client aggkittypes.BaseEthereumClienter l1InfoTreeSyncer types.L1InfoTreeSyncer + l2ClaimSyncer claimsynctypes.ClaimSyncer certificateSendTrigger types.CertificateSendTrigger @@ -68,7 +69,7 @@ func New( aggLayerClient agglayer.AgglayerClientInterface, l1InfoTreeSyncer types.L1InfoTreeSyncer, l2Syncer types.L2BridgeSyncer, - claimSyncer claimsync.ClaimSyncer, + l2ClaimSyncer claimsynctypes.ClaimSyncer, l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier types.RollupDataQuerier, @@ -104,7 +105,7 @@ func New( aggLayerClient, l1InfoTreeSyncer, l2Syncer, - claimSyncer, + l2ClaimSyncer, l1Client, l2Client, rollupDataQuerier, @@ -122,7 +123,7 @@ func newAggsender( aggLayerClient agglayer.AgglayerClientInterface, l1InfoTreeSyncer types.L1InfoTreeSyncer, l2Syncer types.L2BridgeSyncer, - _ claimsync.ClaimSyncer, + l2ClaimSyncer claimsynctypes.ClaimSyncer, l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier types.RollupDataQuerier, @@ -148,6 +149,7 @@ func newAggsender( certQuerier := query.NewCertificateQuerier( l2Syncer, + l2ClaimSyncer, aggchainFEPCaller, aggLayerClient, initialLER, @@ -162,6 +164,7 @@ func newAggsender( l2Client, l1InfoTreeSyncer, l2Syncer, + l2ClaimSyncer, rollupDataQuerier, committeeQuerier, certQuerier, @@ -234,6 +237,7 @@ func newAggsender( logger, storage, aggLayerClient, certQuerier, l2OriginNetwork), l1Client: l1Client, l1InfoTreeSyncer: l1InfoTreeSyncer, + l2ClaimSyncer: l2ClaimSyncer, certificateSendTrigger: certificateSendTrigger, }, nil } @@ -323,6 +327,7 @@ func (a *AggSender) sendCertificates(ctx context.Context, returnAfterNIterations a.log.Debugf("AggSender: OnIdle") a.certificateSendTrigger.OnIdle() } + a.setClaimSyncerNextRequiredBlock(ctx) a.status.Status = types.StatusCertificateStage iteration := 0 @@ -391,6 +396,33 @@ func (a *AggSender) sendCertificates(ctx context.Context, returnAfterNIterations } } +func (a *AggSender) setClaimSyncerNextRequiredBlock(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + default: + } + nextBlock, err := a.flow.GetNextBlockNumber() + if err != nil { + a.log.Errorf("error getting next block number for claim syncer: %v", err) + time.Sleep(a.cfg.DelayBetweenRetries.Duration) + continue + } + a.log.Infof("Setting starting Claim L2 Syncer block to %d", nextBlock) + if a.l2ClaimSyncer == nil { + a.log.Fatalf("l2 claim syncer is nil, so we are not going to set the next required block for claim syncer") + } + if err := a.l2ClaimSyncer.SetNextRequiredBlock(ctx, nextBlock); err != nil { + a.log.Errorf("error setting next required block for claim syncer: %v", err) + time.Sleep(a.cfg.DelayBetweenRetries.Duration) + continue + } + a.log.Infof("Set next required block for claim syncer to %d", nextBlock) + break + } +} + func (a *AggSender) sendCertificateWithRetries(ctx context.Context) (*agglayertypes.Certificate, error) { retryHandler, err := a.cfg.RetriesToBuildAndSendCertificate.NewRetryHandler() if err != nil { diff --git a/aggsender/aggsender_validator.go b/aggsender/aggsender_validator.go index 4de29965f..c432ae241 100644 --- a/aggsender/aggsender_validator.go +++ b/aggsender/aggsender_validator.go @@ -12,7 +12,6 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/aggsender/validator" v1 "github.com/agglayer/aggkit/aggsender/validator/proto/v1" - "github.com/agglayer/aggkit/claimsync" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/grpc" ) @@ -38,8 +37,7 @@ func NewAggsenderValidator(ctx context.Context, certQuerier types.CertificateQuerier, aggchainFEPQuerier types.AggchainFEPRollupQuerier, initialLER ethcommon.Hash, - signer signertypes.Signer, - _ claimsync.ClaimSyncer) (*AggsenderValidator, error) { + signer signertypes.Signer) (*AggsenderValidator, error) { validatorCert := validator.NewAggsenderValidator( logger, flow, l1InfoTreeDataQuerier, certQuerier, initialLER) grpcServer, err := grpc.NewServer(cfg.ServerConfig) diff --git a/aggsender/converters/imported_bridge_exit_converter.go b/aggsender/converters/imported_bridge_exit_converter.go index 0638fab98..aaa7a8021 100644 --- a/aggsender/converters/imported_bridge_exit_converter.go +++ b/aggsender/converters/imported_bridge_exit_converter.go @@ -8,6 +8,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgetypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/tree" "github.com/ethereum/go-ethereum/common" ) @@ -19,7 +20,7 @@ import ( // into its mainnet flag, rollup index, and leaf index components. Returns the constructed // ImportedBridgeExit or an error if the global index cannot be decoded. func ConvertToImportedBridgeExitWithoutClaimData( - claim bridgesync.Claim) (*agglayertypes.ImportedBridgeExit, error) { + claim claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error) { bridgeExit := ConvertBridgeExitFromClaim(claim) mainnetFlag, rollupIndex, leafIndex, err := bridgesync.DecodeGlobalIndex(claim.GlobalIndex) @@ -49,7 +50,7 @@ func ConvertToImportedBridgeExitWithoutClaimData( // // Returns: // - *agglayertypes.BridgeExit: The constructed bridge exit object with core claim data. -func ConvertBridgeExitFromClaim(claim bridgesync.Claim) *agglayertypes.BridgeExit { +func ConvertBridgeExitFromClaim(claim claimsynctypes.Claim) *agglayertypes.BridgeExit { leafType := bridgetypes.LeafTypeAsset if claim.IsMessage { leafType = bridgetypes.LeafTypeMessage @@ -85,7 +86,7 @@ func ConvertBridgeExitFromClaim(claim bridgesync.Claim) *agglayertypes.BridgeExi // - error: An error if any step in the conversion or proof retrieval fails. func ConvertToImportedBridgeExit( ctx context.Context, - claim bridgesync.Claim, + claim claimsynctypes.Claim, rootFromWhichToProve common.Hash, l1InfoTreeQuerier types.L1InfoTreeDataQuerier) (*agglayertypes.ImportedBridgeExit, error) { ibe, err := ConvertToImportedBridgeExitWithoutClaimData(claim) @@ -171,7 +172,7 @@ func ConvertToImportedBridgeExit( // - An error if any claim fails to convert. func ConvertToImportedBridgeExits( ctx context.Context, - claims []bridgesync.Claim, + claims []claimsynctypes.Claim, rootFromWhichToProve common.Hash, l1InfoTreeQuerier types.L1InfoTreeDataQuerier, ) ([]*agglayertypes.ImportedBridgeExit, error) { @@ -202,13 +203,13 @@ func ConvertToImportedBridgeExits( // use the debug endpoint to retrieve the claim data. // // Parameters: -// - claims: A slice of bridgesync.Claim objects to be converted +// - claims: A slice of claimsynctypes.Claim objects to be converted // // Returns: // - A slice of *agglayertypes.ImportedBridgeExit objects on success // - An error if any claim conversion fails func ConvertToImportedBridgeExitsWithoutClaimData( - claims []bridgesync.Claim, + claims []claimsynctypes.Claim, ) ([]*agglayertypes.ImportedBridgeExit, error) { if len(claims) == 0 { // no claims to convert diff --git a/aggsender/flows/builder_flow_aggchain_prover.go b/aggsender/flows/builder_flow_aggchain_prover.go index 6bf071bef..373093a45 100644 --- a/aggsender/flows/builder_flow_aggchain_prover.go +++ b/aggsender/flows/builder_flow_aggchain_prover.go @@ -386,6 +386,17 @@ func (a *AggchainProverBuilderFlow) getLastProvenBlock( return fromBlock - 1 } +// GeneratePreBuildParams generates the pre-build parameters delegating to the base flow +func (a *AggchainProverBuilderFlow) GeneratePreBuildParams(ctx context.Context, + certType types.CertificateType) (*types.CertificatePreBuildParams, error) { + return a.baseFlow.GeneratePreBuildParams(ctx, certType) +} + +// GetNextBlockNumber returns the first block number of the next certificate to generate +func (a *AggchainProverBuilderFlow) GetNextBlockNumber() (uint64, error) { + return a.baseFlow.GetNextBlockNumber() +} + // Signer returns the signer used to sign the certificate func (a *AggchainProverBuilderFlow) Signer() signertypes.Signer { return a.certificateSigner diff --git a/aggsender/flows/builder_flow_factory.go b/aggsender/flows/builder_flow_factory.go index 944f5bf2c..6ea650e1f 100644 --- a/aggsender/flows/builder_flow_factory.go +++ b/aggsender/flows/builder_flow_factory.go @@ -13,6 +13,7 @@ import ( "github.com/agglayer/aggkit/aggsender/query" "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/l2gersync" "github.com/agglayer/aggkit/log" @@ -37,6 +38,7 @@ func NewBuilderFlow( l2Client aggkittypes.BaseEthereumClienter, l1InfoTreeSyncer types.L1InfoTreeSyncer, l2Syncer types.L2BridgeSyncer, + l2ClaimSyncer claimsynctypes.ClaimSyncer, rollupDataQuerier types.RollupDataQuerier, committeeQuerier types.MultisigQuerier, certQuerier types.CertificateQuerier, @@ -45,7 +47,7 @@ func NewBuilderFlow( switch cfg.Mode { case types.PessimisticProofMode: commonFlowComponents, err := CreateCommonFlowComponents( - ctx, logger, storage, l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, + ctx, logger, storage, l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, l2ClaimSyncer, rollupDataQuerier, committeeQuerier, 0, false, cfg.MaxCertSize, @@ -97,7 +99,7 @@ func NewBuilderFlow( } commonFlowComponents, err := CreateCommonFlowComponents( - ctx, logger, storage, l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, + ctx, logger, storage, l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, l2ClaimSyncer, rollupDataQuerier, committeeQuerier, aggchainFEPQuerier.StartL2Block(), cfg.RequireNoFEPBlockGap, cfg.MaxCertSize, @@ -164,6 +166,7 @@ func CreateCommonFlowComponents( l2Client aggkittypes.BaseEthereumClienter, l1InfoTreeSyncer types.L1InfoTreeSyncer, l2Syncer types.L2BridgeSyncer, + l2ClaimSyncer claimsynctypes.ClaimSyncer, rollupDataQuerier types.RollupDataQuerier, committeeQuerier types.MultisigQuerier, startL2Block uint64, @@ -200,7 +203,7 @@ func CreateCommonFlowComponents( return nil, fmt.Errorf("failed to create bridge L2 sovereign reader: %w", err) } - l2BridgeQuerier := query.NewBridgeDataQuerier(logger, l2Syncer, delayBetweenRetries, agglayerBridgeL2Reader) + l2BridgeQuerier := query.NewBridgeDataQuerier(logger, l2Syncer, l2ClaimSyncer, delayBetweenRetries, agglayerBridgeL2Reader) l1InfoTreeQuerier, err := query.NewL1InfoTreeDataQuerier(l1Client, globalExitRootL1Addr, l1InfoTreeSyncer, blockFinalityForL1InfoTree) if err != nil { diff --git a/aggsender/flows/builder_flow_pp.go b/aggsender/flows/builder_flow_pp.go index cf0ce811e..94b5b5113 100644 --- a/aggsender/flows/builder_flow_pp.go +++ b/aggsender/flows/builder_flow_pp.go @@ -147,6 +147,17 @@ func (p *PPBuilderFlow) UpdateAggchainData( return nil } +// GeneratePreBuildParams generates the pre-build parameters delegating to the base flow +func (p *PPBuilderFlow) GeneratePreBuildParams(ctx context.Context, + certType types.CertificateType) (*types.CertificatePreBuildParams, error) { + return p.baseFlow.GeneratePreBuildParams(ctx, certType) +} + +// GetNextBlockNumber returns the first block number of the next certificate to generate +func (p *PPBuilderFlow) GetNextBlockNumber() (uint64, error) { + return p.baseFlow.GetNextBlockNumber() +} + // Signer returns the signer used to sign the certificate func (p *PPBuilderFlow) Signer() signertypes.Signer { return p.certificateSigner diff --git a/aggsender/flows/flow_base.go b/aggsender/flows/flow_base.go index 76463bf62..2fb47052c 100644 --- a/aggsender/flows/flow_base.go +++ b/aggsender/flows/flow_base.go @@ -11,7 +11,7 @@ import ( "github.com/agglayer/aggkit/aggsender/db" "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" - bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" aggkitdb "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/l1infotreesync" @@ -110,13 +110,27 @@ func (f *baseFlow) StartL2Block() uint64 { return f.cfg.StartL2Block } +// GetNextBlockNumber returns the first block number of the next certificate to generate. +// It reads the last sent certificate from storage to determine the starting block. +func (f *baseFlow) GetNextBlockNumber() (uint64, error) { + lastSentCertificate, err := f.storage.GetLastSentCertificateHeader() + if err != nil { + return 0, fmt.Errorf("error getting last sent certificate: %w", err) + } + previousToBlock, _ := f.getLastSentBlockAndRetryCount(lastSentCertificate) + return previousToBlock + 1, nil +} + // NextCertificateBlockRange returns the block range and retryCount for the next certificate func (f *baseFlow) NextCertificateBlockRange(ctx context.Context, lastSentCertificate *types.CertificateHeader) (aggkitcommon.BlockRange, int, error) { - lastL2BlockSynced, err := f.l2BridgeQuerier.GetLastProcessedBlock(ctx) + lastL2BlockSynced, found, err := f.l2BridgeQuerier.GetLastProcessedBlock(ctx) if err != nil { return aggkitcommon.BlockRangeZero, 0, fmt.Errorf("error getting last processed block from l2: %w", err) } + if !found { + return aggkitcommon.BlockRangeZero, 0, fmt.Errorf("no processed block yet found from l2") + } previousToBlock, retryCount := f.getLastSentBlockAndRetryCount(lastSentCertificate) @@ -361,7 +375,7 @@ func (f *baseFlow) getNewLocalExitRoot( } // ConvertClaimToImportedBridgeExit converts a claim to an ImportedBridgeExit object -func (f *baseFlow) ConvertClaimToImportedBridgeExit(claim bridgesync.Claim) (*agglayertypes.ImportedBridgeExit, error) { +func (f *baseFlow) ConvertClaimToImportedBridgeExit(claim claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error) { return converters.ConvertToImportedBridgeExitWithoutClaimData(claim) } @@ -373,8 +387,8 @@ func (f *baseFlow) getBridgeExits(bridges []bridgesync.Bridge) []*agglayertypes. // getImportedBridgeExits converts claims to agglayertypes.ImportedBridgeExit objects and calculates necessary proofs func (f *baseFlow) getImportedBridgeExits( ctx context.Context, - claims []bridgesync.Claim, - unclaims []bridgesynctypes.Unclaim, + claims []claimsynctypes.Claim, + unclaims []claimsynctypes.Unclaim, rootFromWhichToProve common.Hash, ) ([]*agglayertypes.ImportedBridgeExit, error) { // Build unclaim counts by GlobalIndex @@ -387,7 +401,7 @@ func (f *baseFlow) getImportedBridgeExits( } } - filteredClaims := make([]bridgesync.Claim, 0) + filteredClaims := make([]claimsynctypes.Claim, 0) for _, c := range claims { if c.GlobalIndex != nil { key := c.GlobalIndex.String() @@ -455,7 +469,7 @@ func (f *baseFlow) getNextHeightAndPreviousLER( } // verifyClaimGERs verifies the correctnes GERs of the claims -func (f *baseFlow) verifyClaimGERs(claims []bridgesync.Claim) error { +func (f *baseFlow) verifyClaimGERs(claims []claimsynctypes.Claim) error { for _, claim := range claims { ger := l1infotreesync.CalculateGER(claim.MainnetExitRoot, claim.RollupExitRoot) if ger != claim.GlobalExitRoot { diff --git a/aggsender/flows/verifier_flow_factory.go b/aggsender/flows/verifier_flow_factory.go index b33e96937..bdab297bf 100644 --- a/aggsender/flows/verifier_flow_factory.go +++ b/aggsender/flows/verifier_flow_factory.go @@ -9,6 +9,7 @@ import ( "github.com/agglayer/aggkit/aggsender/query" "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/aggsender/validator" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/opnode" aggkittypes "github.com/agglayer/aggkit/types" @@ -24,6 +25,7 @@ func NewVerifierFlow( l2Client aggkittypes.BaseEthereumClienter, l1InfoTreeSyncer types.L1InfoTreeSyncer, l2Syncer types.L2BridgeSyncer, + l2ClaimSycer claimsynctypes.ClaimSyncer, rollupDataQuerier types.RollupDataQuerier, committeeQuerier types.MultisigQuerier, initialLER common.Hash, @@ -33,7 +35,7 @@ func NewVerifierFlow( commonFlowComponents, err := CreateCommonFlowComponents( ctx, logger, nil, // storage is not used in validator, - l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, rollupDataQuerier, committeeQuerier, 0, false, + l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, l2ClaimSycer, rollupDataQuerier, committeeQuerier, 0, false, cfg.MaxCertSize, cfg.DelayBetweenRetries.Duration, cfg.Signer, true, // full claims are (eventually) needed in validator mode cfg.RequireCommitteeMembershipCheck, @@ -64,7 +66,7 @@ func NewVerifierFlow( commonFlowComponents, err := CreateCommonFlowComponents( ctx, logger, nil, // storage is not used in validator, - l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, rollupDataQuerier, committeeQuerier, + l1Client, l2Client, l1InfoTreeSyncer, l2Syncer, l2ClaimSycer, rollupDataQuerier, committeeQuerier, 0, cfg.FEPConfig.RequireNoBlockGap, cfg.MaxCertSize, cfg.DelayBetweenRetries.Duration, cfg.Signer, diff --git a/aggsender/flows/verifier_flow_factory_test.go b/aggsender/flows/verifier_flow_factory_test.go index 28a8a3d3a..8fe728fe6 100644 --- a/aggsender/flows/verifier_flow_factory_test.go +++ b/aggsender/flows/verifier_flow_factory_test.go @@ -129,6 +129,7 @@ func TestNewVerifierFlow(t *testing.T) { nil, mockL1InfoTreeSyncer, mockL2Syncer, + nil, mockRollupDataQuerier, mockCommitteeQuerier, common.Hash{}, diff --git a/aggsender/mocks/mock_agglayer_bridge_l2_reader.go b/aggsender/mocks/mock_agglayer_bridge_l2_reader.go index a97430c9b..e25975fd9 100644 --- a/aggsender/mocks/mock_agglayer_bridge_l2_reader.go +++ b/aggsender/mocks/mock_agglayer_bridge_l2_reader.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - types "github.com/agglayer/aggkit/bridgesync/types" + types "github.com/agglayer/aggkit/claimsync/types" mock "github.com/stretchr/testify/mock" ) diff --git a/aggsender/mocks/mock_aggsender_builder_flow.go b/aggsender/mocks/mock_aggsender_builder_flow.go index 2963c3653..91f7fea66 100644 --- a/aggsender/mocks/mock_aggsender_builder_flow.go +++ b/aggsender/mocks/mock_aggsender_builder_flow.go @@ -343,6 +343,118 @@ func (_c *AggsenderBuilderFlow_UpdateAggchainData_Call) RunAndReturn(run func(*a return _c } +// GeneratePreBuildParams provides a mock function with given fields: ctx, certType +func (_m *AggsenderBuilderFlow) GeneratePreBuildParams(ctx context.Context, certType types.CertificateType) (*types.CertificatePreBuildParams, error) { + ret := _m.Called(ctx, certType) + + if len(ret) == 0 { + panic("no return value specified for GeneratePreBuildParams") + } + + var r0 *types.CertificatePreBuildParams + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)); ok { + return rf(ctx, certType) + } + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) *types.CertificatePreBuildParams); ok { + r0 = rf(ctx, certType) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.CertificatePreBuildParams) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.CertificateType) error); ok { + r1 = rf(ctx, certType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggsenderBuilderFlow_GeneratePreBuildParams_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GeneratePreBuildParams' +type AggsenderBuilderFlow_GeneratePreBuildParams_Call struct { + *mock.Call +} + +// GeneratePreBuildParams is a helper method to define mock.On call +func (_e *AggsenderBuilderFlow_Expecter) GeneratePreBuildParams(ctx interface{}, certType interface{}) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + return &AggsenderBuilderFlow_GeneratePreBuildParams_Call{Call: _e.mock.On("GeneratePreBuildParams", ctx, certType)} +} + +func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Run(run func(ctx context.Context, certType types.CertificateType)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateType)) + }) + return _c +} + +func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Return(_a0 *types.CertificatePreBuildParams, _a1 error) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) RunAndReturn(run func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + _c.Call.Return(run) + return _c +} + +// GetNextBlockNumber provides a mock function with no fields +func (_m *AggsenderBuilderFlow) GetNextBlockNumber() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetNextBlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggsenderBuilderFlow_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' +type AggsenderBuilderFlow_GetNextBlockNumber_Call struct { + *mock.Call +} + +// GetNextBlockNumber is a helper method to define mock.On call +func (_e *AggsenderBuilderFlow_Expecter) GetNextBlockNumber() *AggsenderBuilderFlow_GetNextBlockNumber_Call { + return &AggsenderBuilderFlow_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} +} + +func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Run(run func()) *AggsenderBuilderFlow_GetNextBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderBuilderFlow_GetNextBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderBuilderFlow_GetNextBlockNumber_Call { + _c.Call.Return(run) + return _c +} + // NewAggsenderBuilderFlow creates a new instance of AggsenderBuilderFlow. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewAggsenderBuilderFlow(t interface { diff --git a/aggsender/mocks/mock_aggsender_flow_baser.go b/aggsender/mocks/mock_aggsender_flow_baser.go index ab816984c..b558cb5a2 100644 --- a/aggsender/mocks/mock_aggsender_flow_baser.go +++ b/aggsender/mocks/mock_aggsender_flow_baser.go @@ -4,7 +4,7 @@ package mocks import ( agglayertypes "github.com/agglayer/aggkit/agglayer/types" - bridgesync "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" common "github.com/ethereum/go-ethereum/common" @@ -90,7 +90,7 @@ func (_c *AggsenderFlowBaser_BuildCertificate_Call) RunAndReturn(run func(contex } // ConvertClaimToImportedBridgeExit provides a mock function with given fields: claim -func (_m *AggsenderFlowBaser) ConvertClaimToImportedBridgeExit(claim bridgesync.Claim) (*agglayertypes.ImportedBridgeExit, error) { +func (_m *AggsenderFlowBaser) ConvertClaimToImportedBridgeExit(claim claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error) { ret := _m.Called(claim) if len(ret) == 0 { @@ -99,10 +99,10 @@ func (_m *AggsenderFlowBaser) ConvertClaimToImportedBridgeExit(claim bridgesync. var r0 *agglayertypes.ImportedBridgeExit var r1 error - if rf, ok := ret.Get(0).(func(bridgesync.Claim) (*agglayertypes.ImportedBridgeExit, error)); ok { + if rf, ok := ret.Get(0).(func(claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error)); ok { return rf(claim) } - if rf, ok := ret.Get(0).(func(bridgesync.Claim) *agglayertypes.ImportedBridgeExit); ok { + if rf, ok := ret.Get(0).(func(claimsynctypes.Claim) *agglayertypes.ImportedBridgeExit); ok { r0 = rf(claim) } else { if ret.Get(0) != nil { @@ -110,7 +110,7 @@ func (_m *AggsenderFlowBaser) ConvertClaimToImportedBridgeExit(claim bridgesync. } } - if rf, ok := ret.Get(1).(func(bridgesync.Claim) error); ok { + if rf, ok := ret.Get(1).(func(claimsynctypes.Claim) error); ok { r1 = rf(claim) } else { r1 = ret.Error(1) @@ -125,14 +125,14 @@ type AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call struct { } // ConvertClaimToImportedBridgeExit is a helper method to define mock.On call -// - claim bridgesync.Claim +// - claim claimsynctypes.Claim func (_e *AggsenderFlowBaser_Expecter) ConvertClaimToImportedBridgeExit(claim interface{}) *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call { return &AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call{Call: _e.mock.On("ConvertClaimToImportedBridgeExit", claim)} } -func (_c *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call) Run(run func(claim bridgesync.Claim)) *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call { +func (_c *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call) Run(run func(claim claimsynctypes.Claim)) *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(bridgesync.Claim)) + run(args[0].(claimsynctypes.Claim)) }) return _c } @@ -142,7 +142,7 @@ func (_c *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call) Return(_a0 * return _c } -func (_c *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call) RunAndReturn(run func(bridgesync.Claim) (*agglayertypes.ImportedBridgeExit, error)) *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call { +func (_c *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call) RunAndReturn(run func(claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error)) *AggsenderFlowBaser_ConvertClaimToImportedBridgeExit_Call { _c.Call.Return(run) return _c } @@ -486,6 +486,61 @@ func (_c *AggsenderFlowBaser_StartL2Block_Call) RunAndReturn(run func() uint64) return _c } +// GetNextBlockNumber provides a mock function with no fields +func (_m *AggsenderFlowBaser) GetNextBlockNumber() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetNextBlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggsenderFlowBaser_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' +type AggsenderFlowBaser_GetNextBlockNumber_Call struct { + *mock.Call +} + +// GetNextBlockNumber is a helper method to define mock.On call +func (_e *AggsenderFlowBaser_Expecter) GetNextBlockNumber() *AggsenderFlowBaser_GetNextBlockNumber_Call { + return &AggsenderFlowBaser_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} +} + +func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Run(run func()) *AggsenderFlowBaser_GetNextBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderFlowBaser_GetNextBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderFlowBaser_GetNextBlockNumber_Call { + _c.Call.Return(run) + return _c +} + // VerifyBlockRangeGaps provides a mock function with given fields: ctx, lastSentCertificate, newFromBlock, newToBlock func (_m *AggsenderFlowBaser) VerifyBlockRangeGaps(ctx context.Context, lastSentCertificate *types.CertificateHeader, newFromBlock uint64, newToBlock uint64) error { ret := _m.Called(ctx, lastSentCertificate, newFromBlock, newToBlock) diff --git a/aggsender/mocks/mock_bridge_querier.go b/aggsender/mocks/mock_bridge_querier.go index 8916837fb..61f1c0836 100644 --- a/aggsender/mocks/mock_bridge_querier.go +++ b/aggsender/mocks/mock_bridge_querier.go @@ -4,13 +4,13 @@ package mocks import ( bridgesync "github.com/agglayer/aggkit/bridgesync" - bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" - common "github.com/ethereum/go-ethereum/common" context "context" mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/claimsync/types" ) // BridgeQuerier is an autogenerated mock type for the BridgeQuerier type @@ -27,7 +27,7 @@ func (_m *BridgeQuerier) EXPECT() *BridgeQuerier_Expecter { } // GetBridgesAndClaims provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *BridgeQuerier) GetBridgesAndClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, []bridgesync.Claim, error) { +func (_m *BridgeQuerier) GetBridgesAndClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, []types.Claim, error) { ret := _m.Called(ctx, fromBlock, toBlock) if len(ret) == 0 { @@ -35,9 +35,9 @@ func (_m *BridgeQuerier) GetBridgesAndClaims(ctx context.Context, fromBlock uint } var r0 []bridgesync.Bridge - var r1 []bridgesync.Claim + var r1 []types.Claim var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, []bridgesync.Claim, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, []types.Claim, error)); ok { return rf(ctx, fromBlock, toBlock) } if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Bridge); ok { @@ -48,11 +48,11 @@ func (_m *BridgeQuerier) GetBridgesAndClaims(ctx context.Context, fromBlock uint } } - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) []types.Claim); ok { r1 = rf(ctx, fromBlock, toBlock) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).([]bridgesync.Claim) + r1 = ret.Get(1).([]types.Claim) } } @@ -85,12 +85,12 @@ func (_c *BridgeQuerier_GetBridgesAndClaims_Call) Run(run func(ctx context.Conte return _c } -func (_c *BridgeQuerier_GetBridgesAndClaims_Call) Return(_a0 []bridgesync.Bridge, _a1 []bridgesync.Claim, _a2 error) *BridgeQuerier_GetBridgesAndClaims_Call { +func (_c *BridgeQuerier_GetBridgesAndClaims_Call) Return(_a0 []bridgesync.Bridge, _a1 []types.Claim, _a2 error) *BridgeQuerier_GetBridgesAndClaims_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *BridgeQuerier_GetBridgesAndClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, []bridgesync.Claim, error)) *BridgeQuerier_GetBridgesAndClaims_Call { +func (_c *BridgeQuerier_GetBridgesAndClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, []types.Claim, error)) *BridgeQuerier_GetBridgesAndClaims_Call { _c.Call.Return(run) return _c } @@ -155,7 +155,7 @@ func (_c *BridgeQuerier_GetExitRootByIndex_Call) RunAndReturn(run func(context.C } // GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *BridgeQuerier) GetLastProcessedBlock(ctx context.Context) (uint64, error) { +func (_m *BridgeQuerier) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -163,8 +163,9 @@ func (_m *BridgeQuerier) GetLastProcessedBlock(ctx context.Context) (uint64, err } var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, bool, error)); ok { return rf(ctx) } if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { @@ -173,13 +174,19 @@ func (_m *BridgeQuerier) GetLastProcessedBlock(ctx context.Context) (uint64, err r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { + if rf, ok := ret.Get(1).(func(context.Context) bool); ok { r1 = rf(ctx) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(bool) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // BridgeQuerier_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' @@ -200,34 +207,34 @@ func (_c *BridgeQuerier_GetLastProcessedBlock_Call) Run(run func(ctx context.Con return _c } -func (_c *BridgeQuerier_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *BridgeQuerier_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) +func (_c *BridgeQuerier_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *BridgeQuerier_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *BridgeQuerier_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *BridgeQuerier_GetLastProcessedBlock_Call { +func (_c *BridgeQuerier_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, bool, error)) *BridgeQuerier_GetLastProcessedBlock_Call { _c.Call.Return(run) return _c } // GetUnsetClaimsForBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *BridgeQuerier) GetUnsetClaimsForBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesynctypes.Unclaim, error) { +func (_m *BridgeQuerier) GetUnsetClaimsForBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) ([]types.Unclaim, error) { ret := _m.Called(ctx, fromBlock, toBlock) if len(ret) == 0 { panic("no return value specified for GetUnsetClaimsForBlockRange") } - var r0 []bridgesynctypes.Unclaim + var r0 []types.Unclaim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesynctypes.Unclaim, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]types.Unclaim, error)); ok { return rf(ctx, fromBlock, toBlock) } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesynctypes.Unclaim); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []types.Unclaim); ok { r0 = rf(ctx, fromBlock, toBlock) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesynctypes.Unclaim) + r0 = ret.Get(0).([]types.Unclaim) } } @@ -260,12 +267,12 @@ func (_c *BridgeQuerier_GetUnsetClaimsForBlockRange_Call) Run(run func(ctx conte return _c } -func (_c *BridgeQuerier_GetUnsetClaimsForBlockRange_Call) Return(_a0 []bridgesynctypes.Unclaim, _a1 error) *BridgeQuerier_GetUnsetClaimsForBlockRange_Call { +func (_c *BridgeQuerier_GetUnsetClaimsForBlockRange_Call) Return(_a0 []types.Unclaim, _a1 error) *BridgeQuerier_GetUnsetClaimsForBlockRange_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *BridgeQuerier_GetUnsetClaimsForBlockRange_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesynctypes.Unclaim, error)) *BridgeQuerier_GetUnsetClaimsForBlockRange_Call { +func (_c *BridgeQuerier_GetUnsetClaimsForBlockRange_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]types.Unclaim, error)) *BridgeQuerier_GetUnsetClaimsForBlockRange_Call { _c.Call.Return(run) return _c } diff --git a/aggsender/mocks/mock_l2_bridge_syncer.go b/aggsender/mocks/mock_l2_bridge_syncer.go index 23fd452c4..41ca8d6fe 100644 --- a/aggsender/mocks/mock_l2_bridge_syncer.go +++ b/aggsender/mocks/mock_l2_bridge_syncer.go @@ -3,8 +3,6 @@ package mocks import ( - big "math/big" - bridgesync "github.com/agglayer/aggkit/bridgesync" common "github.com/ethereum/go-ethereum/common" @@ -147,125 +145,6 @@ func (_c *L2BridgeSyncer_GetBridges_Call) RunAndReturn(run func(context.Context, return _c } -// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *L2BridgeSyncer) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Claim, error) { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetClaims") - } - - var r0 []bridgesync.Claim - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)); ok { - return rf(ctx, fromBlock, toBlock) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Claim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, fromBlock, toBlock) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' -type L2BridgeSyncer_GetClaims_Call struct { - *mock.Call -} - -// GetClaims is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *L2BridgeSyncer_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncer_GetClaims_Call { - return &L2BridgeSyncer_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} -} - -func (_c *L2BridgeSyncer_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetClaims_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Return(run) - return _c -} - -// GetClaimsByGlobalIndex provides a mock function with given fields: ctx, globalIndex -func (_m *L2BridgeSyncer) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]bridgesync.Claim, error) { - ret := _m.Called(ctx, globalIndex) - - if len(ret) == 0 { - panic("no return value specified for GetClaimsByGlobalIndex") - } - - var r0 []bridgesync.Claim - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) ([]bridgesync.Claim, error)); ok { - return rf(ctx, globalIndex) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) []bridgesync.Claim); ok { - r0 = rf(ctx, globalIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Claim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, globalIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetClaimsByGlobalIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGlobalIndex' -type L2BridgeSyncer_GetClaimsByGlobalIndex_Call struct { - *mock.Call -} - -// GetClaimsByGlobalIndex is a helper method to define mock.On call -// - ctx context.Context -// - globalIndex *big.Int -func (_e *L2BridgeSyncer_Expecter) GetClaimsByGlobalIndex(ctx interface{}, globalIndex interface{}) *L2BridgeSyncer_GetClaimsByGlobalIndex_Call { - return &L2BridgeSyncer_GetClaimsByGlobalIndex_Call{Call: _e.mock.On("GetClaimsByGlobalIndex", ctx, globalIndex)} -} - -func (_c *L2BridgeSyncer_GetClaimsByGlobalIndex_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *L2BridgeSyncer_GetClaimsByGlobalIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetClaimsByGlobalIndex_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncer_GetClaimsByGlobalIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetClaimsByGlobalIndex_Call) RunAndReturn(run func(context.Context, *big.Int) ([]bridgesync.Claim, error)) *L2BridgeSyncer_GetClaimsByGlobalIndex_Call { - _c.Call.Return(run) - return _c -} - // GetExitRootByHash provides a mock function with given fields: ctx, root func (_m *L2BridgeSyncer) GetExitRootByHash(ctx context.Context, root common.Hash) (*treetypes.Root, error) { ret := _m.Called(ctx, root) @@ -383,7 +262,7 @@ func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) RunAndReturn(run func(context. } // GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *L2BridgeSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, error) { +func (_m *L2BridgeSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -391,8 +270,9 @@ func (_m *L2BridgeSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, er } var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, bool, error)); ok { return rf(ctx) } if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { @@ -401,13 +281,19 @@ func (_m *L2BridgeSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, er r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { + if rf, ok := ret.Get(1).(func(context.Context) bool); ok { r1 = rf(ctx) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(bool) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // L2BridgeSyncer_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' @@ -428,12 +314,12 @@ func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Run(run func(ctx context.Co return _c } -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncer_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) +func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *L2BridgeSyncer_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2BridgeSyncer_GetLastProcessedBlock_Call { +func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, bool, error)) *L2BridgeSyncer_GetLastProcessedBlock_Call { _c.Call.Return(run) return _c } diff --git a/aggsender/mocks/mock_optimistic_signer.go b/aggsender/mocks/mock_optimistic_signer.go index bd259d59e..05f3d0e1e 100644 --- a/aggsender/mocks/mock_optimistic_signer.go +++ b/aggsender/mocks/mock_optimistic_signer.go @@ -3,7 +3,7 @@ package mocks import ( - bridgesync "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" common "github.com/ethereum/go-ethereum/common" context "context" @@ -27,7 +27,7 @@ func (_m *OptimisticSigner) EXPECT() *OptimisticSigner_Expecter { } // Sign provides a mock function with given fields: ctx, aggchainReq, newLocalExitRoot, claims -func (_m *OptimisticSigner) Sign(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []bridgesync.Claim) ([]byte, string, error) { +func (_m *OptimisticSigner) Sign(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []claimsynctypes.Claim) ([]byte, string, error) { ret := _m.Called(ctx, aggchainReq, newLocalExitRoot, claims) if len(ret) == 0 { @@ -37,10 +37,10 @@ func (_m *OptimisticSigner) Sign(ctx context.Context, aggchainReq types.Aggchain var r0 []byte var r1 string var r2 error - if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) ([]byte, string, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) ([]byte, string, error)); ok { return rf(ctx, aggchainReq, newLocalExitRoot, claims) } - if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) []byte); ok { + if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) []byte); ok { r0 = rf(ctx, aggchainReq, newLocalExitRoot, claims) } else { if ret.Get(0) != nil { @@ -48,13 +48,13 @@ func (_m *OptimisticSigner) Sign(ctx context.Context, aggchainReq types.Aggchain } } - if rf, ok := ret.Get(1).(func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) string); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) string); ok { r1 = rf(ctx, aggchainReq, newLocalExitRoot, claims) } else { r1 = ret.Get(1).(string) } - if rf, ok := ret.Get(2).(func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) error); ok { r2 = rf(ctx, aggchainReq, newLocalExitRoot, claims) } else { r2 = ret.Error(2) @@ -72,14 +72,14 @@ type OptimisticSigner_Sign_Call struct { // - ctx context.Context // - aggchainReq types.AggchainProofRequest // - newLocalExitRoot common.Hash -// - claims []bridgesync.Claim +// - claims []claimsynctypes.Claim func (_e *OptimisticSigner_Expecter) Sign(ctx interface{}, aggchainReq interface{}, newLocalExitRoot interface{}, claims interface{}) *OptimisticSigner_Sign_Call { return &OptimisticSigner_Sign_Call{Call: _e.mock.On("Sign", ctx, aggchainReq, newLocalExitRoot, claims)} } -func (_c *OptimisticSigner_Sign_Call) Run(run func(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []bridgesync.Claim)) *OptimisticSigner_Sign_Call { +func (_c *OptimisticSigner_Sign_Call) Run(run func(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []claimsynctypes.Claim)) *OptimisticSigner_Sign_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.AggchainProofRequest), args[2].(common.Hash), args[3].([]bridgesync.Claim)) + run(args[0].(context.Context), args[1].(types.AggchainProofRequest), args[2].(common.Hash), args[3].([]claimsynctypes.Claim)) }) return _c } @@ -89,7 +89,7 @@ func (_c *OptimisticSigner_Sign_Call) Return(_a0 []byte, _a1 string, _a2 error) return _c } -func (_c *OptimisticSigner_Sign_Call) RunAndReturn(run func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) ([]byte, string, error)) *OptimisticSigner_Sign_Call { +func (_c *OptimisticSigner_Sign_Call) RunAndReturn(run func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) ([]byte, string, error)) *OptimisticSigner_Sign_Call { _c.Call.Return(run) return _c } diff --git a/aggsender/optimistic/optimistic_sign.go b/aggsender/optimistic/optimistic_sign.go index 6f05dac8e..e881b9118 100644 --- a/aggsender/optimistic/optimistic_sign.go +++ b/aggsender/optimistic/optimistic_sign.go @@ -7,7 +7,7 @@ import ( optimistichash "github.com/agglayer/aggkit/aggsender/optimistic/optimistichash" "github.com/agglayer/aggkit/aggsender/query" "github.com/agglayer/aggkit/aggsender/types" - "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/opnode" "github.com/agglayer/go_signer/signer" @@ -107,7 +107,7 @@ func validateSignerAgainstContract( func (o *OptimisticSignatureCalculatorImpl) Sign(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, - claims []bridgesync.Claim, + claims []claimsynctypes.Claim, ) ([]byte, string, error) { o.logger.Debugf("OptimisticSignatureCalculatorImpl.Sign. L1InfoTreeLeaf.BlockNumber=%d", aggchainReq.L1InfoTreeLeaf.BlockNumber) diff --git a/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges.go b/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges.go index 08b8c8481..5943b798c 100644 --- a/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges.go +++ b/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges.go @@ -4,7 +4,7 @@ import ( "math/big" "github.com/agglayer/aggkit/aggsender/converters" - "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -17,7 +17,7 @@ import ( // CalculateCommitImportedBrdigeExitsHashFromClaims(...) // CalculateCommitImportedBrdigeExitsHashFromClaims calculate hash from certBuildParams ([]bridgesync.Claim) -func CalculateCommitImportedBrdigeExitsHashFromClaims(claims []bridgesync.Claim) common.Hash { +func CalculateCommitImportedBrdigeExitsHashFromClaims(claims []claimsynctypes.Claim) common.Hash { data := newCommitImportedBrigesData(claims) return data.hash() } @@ -31,7 +31,7 @@ type optimisticCommitImportedBrigeData struct { bridgeExitHash common.Hash } -func newCommitImportedBrigesData(claims []bridgesync.Claim) *optimisticCommitImportedBrigesData { +func newCommitImportedBrigesData(claims []claimsynctypes.Claim) *optimisticCommitImportedBrigesData { res := optimisticCommitImportedBrigesData{} res.bridges = make([]optimisticCommitImportedBrigeData, len(claims)) for i, claim := range claims { @@ -50,7 +50,7 @@ func (o *optimisticCommitImportedBrigesData) hash() common.Hash { return crypto.Keccak256Hash(combined) } -func (o *optimisticCommitImportedBrigeData) setBridgeExitHash(claim *bridgesync.Claim) { +func (o *optimisticCommitImportedBrigeData) setBridgeExitHash(claim *claimsynctypes.Claim) { be := converters.ConvertBridgeExitFromClaim(*claim) o.bridgeExitHash = be.Hash() } diff --git a/aggsender/prover/proof_generation_tool.go b/aggsender/prover/proof_generation_tool.go index 39f3b8c78..e8219e8c8 100644 --- a/aggsender/prover/proof_generation_tool.go +++ b/aggsender/prover/proof_generation_tool.go @@ -12,6 +12,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitgrpc "github.com/agglayer/aggkit/grpc" "github.com/agglayer/aggkit/l2gersync" "github.com/agglayer/aggkit/log" @@ -56,8 +57,9 @@ type Config struct { type AggchainProofGenerationTool struct { cfg Config - logger *log.Logger - l2Syncer types.L2BridgeSyncer + logger *log.Logger + l2Syncer types.L2BridgeSyncer + l2ClaimSyncer claimsynctypes.ClaimSyncer aggchainProofClient types.AggchainProofClientInterface flow AggchainProofFlow @@ -77,6 +79,7 @@ func NewAggchainProofGenerationTool( l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, l2Syncer types.L2BridgeSyncer, + l2ClaimSyncer claimsynctypes.ClaimSyncer, l1InfoTreeSyncer types.L1InfoTreeSyncer, ) (*AggchainProofGenerationTool, error) { if err := cfg.AggkitProverClient.Validate(); err != nil { @@ -104,7 +107,7 @@ func NewAggchainProofGenerationTool( return nil, fmt.Errorf("failed to create bridge L2 sovereign reader: %w", err) } - l2BridgeQuerier := query.NewBridgeDataQuerier(logger, l2Syncer, time.Second, agglayerBridgeL2Reader) + l2BridgeQuerier := query.NewBridgeDataQuerier(logger, l2Syncer, l2ClaimSyncer, time.Second, agglayerBridgeL2Reader) baseFlow := flows.NewBaseFlow( logger, @@ -122,13 +125,14 @@ func NewAggchainProofGenerationTool( nil, // optimistic signer is not used in the tool, so we pass nil baseFlow, query.NewGERDataQuerier(l1InfoTreeQuerier, l2GERReader), - query.NewBridgeDataQuerier(logger, l2Syncer, time.Second, agglayerBridgeL2Reader), + query.NewBridgeDataQuerier(logger, l2Syncer, l2ClaimSyncer, time.Second, agglayerBridgeL2Reader), ) return &AggchainProofGenerationTool{ cfg: cfg, logger: logger, l2Syncer: l2Syncer, + l2ClaimSyncer: l2ClaimSyncer, flow: aggchainProofQuerier, aggchainProofClient: aggchainProofClient, }, nil @@ -152,10 +156,13 @@ func (a *AggchainProofGenerationTool) GenerateAggchainProof( "Max end block: %d", lastProvenBlock, maxEndBlock) // get last L2 block synced - lastL2BlockSynced, err := a.l2Syncer.GetLastProcessedBlock(ctx) + lastL2BlockSynced, found, err := a.l2Syncer.GetLastProcessedBlock(ctx) if err != nil { return nil, fmt.Errorf("error getting last processed block from l2: %w", err) } + if !found { + return nil, fmt.Errorf("no processed block yet found from l2") + } a.logger.Debugf("Last L2 block synced: %d", lastL2BlockSynced) @@ -173,7 +180,7 @@ func (a *AggchainProofGenerationTool) GenerateAggchainProof( // get claims for the block range a.logger.Debugf("Getting claims for block range [%d : %d]", fromBlock, maxEndBlock) - claims, err := a.l2Syncer.GetClaims(ctx, fromBlock, maxEndBlock) + claims, err := a.l2ClaimSyncer.GetClaims(ctx, fromBlock, maxEndBlock) if err != nil { return nil, fmt.Errorf("error getting claims (imported bridge exits): %w", err) } diff --git a/aggsender/query/aggchain_proof_query.go b/aggsender/query/aggchain_proof_query.go index ffc599978..a12d6fb91 100644 --- a/aggsender/query/aggchain_proof_query.go +++ b/aggsender/query/aggchain_proof_query.go @@ -10,7 +10,7 @@ import ( "github.com/agglayer/aggkit/aggsender/metrics" "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" - bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/grpc" "google.golang.org/grpc/codes" ) @@ -184,7 +184,7 @@ func (a *aggchainProofQuery) generateOptimisticAggchainProof(ctx context.Context // getImportedBridgeExitsForProver converts the claims to imported bridge exits // so that the aggchain prover can use them to generate the aggchain proof func (a *aggchainProofQuery) getImportedBridgeExitsForProver( - claims []bridgesync.Claim) ([]*agglayertypes.ImportedBridgeExitWithBlockNumber, error) { + claims []claimsynctypes.Claim) ([]*agglayertypes.ImportedBridgeExitWithBlockNumber, error) { importedBridgeExits := make([]*agglayertypes.ImportedBridgeExitWithBlockNumber, 0, len(claims)) for _, claim := range claims { // we do not need claim data and proofs here, only imported bridge exit data like: @@ -207,7 +207,7 @@ func (a *aggchainProofQuery) getImportedBridgeExitsForProver( } func (a *aggchainProofQuery) convertUnclaimsToAgglayerUnclaims( - unclaims []bridgesynctypes.Unclaim) ([]*agglayertypes.Unclaim, error) { + unclaims []claimsynctypes.Unclaim) ([]*agglayertypes.Unclaim, error) { unclaimsConverted := make([]*agglayertypes.Unclaim, 0, len(unclaims)) for _, unclaim := range unclaims { diff --git a/aggsender/query/bridge_query.go b/aggsender/query/bridge_query.go index d1b63d8f7..33976e636 100644 --- a/aggsender/query/bridge_query.go +++ b/aggsender/query/bridge_query.go @@ -7,7 +7,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" - bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/ethereum/go-ethereum/common" ) @@ -17,6 +17,7 @@ var _ types.BridgeQuerier = (*bridgeDataQuerier)(nil) type bridgeDataQuerier struct { log types.Logger bridgeSyncer types.L2BridgeSyncer + claimSyncer claimsynctypes.ClaimSyncer delayBetweenRetries time.Duration agglayerBridgeL2Reader types.AgglayerBridgeL2Reader @@ -27,12 +28,14 @@ type bridgeDataQuerier struct { func NewBridgeDataQuerier( log types.Logger, bridgeSyncer types.L2BridgeSyncer, + claimSyncer claimsynctypes.ClaimSyncer, delayBetweenRetries time.Duration, agglayerBridgeL2Reader types.AgglayerBridgeL2Reader, ) *bridgeDataQuerier { return &bridgeDataQuerier{ log: log, bridgeSyncer: bridgeSyncer, + claimSyncer: claimSyncer, delayBetweenRetries: delayBetweenRetries, originNetwork: bridgeSyncer.OriginNetwork(), agglayerBridgeL2Reader: agglayerBridgeL2Reader, @@ -56,13 +59,13 @@ func NewBridgeDataQuerier( func (b *bridgeDataQuerier) GetBridgesAndClaims( ctx context.Context, fromBlock, toBlock uint64, -) ([]bridgesync.Bridge, []bridgesync.Claim, error) { +) ([]bridgesync.Bridge, []claimsynctypes.Claim, error) { bridges, err := b.bridgeSyncer.GetBridges(ctx, fromBlock, toBlock) if err != nil { return nil, nil, fmt.Errorf("error getting bridges: %w", err) } - claims, err := b.bridgeSyncer.GetClaims(ctx, fromBlock, toBlock) + claims, err := b.claimSyncer.GetClaims(ctx, fromBlock, toBlock) if err != nil { return nil, nil, fmt.Errorf("error getting claims: %w", err) } @@ -83,17 +86,30 @@ func (b *bridgeDataQuerier) GetExitRootByIndex(ctx context.Context, index uint32 return exitRoot.Hash, nil } -// GetLastProcessedBlock retrieves the last processed block number from the bridge syncer. -// Returns: -// - uint64: The last processed block number. -// - error: An error if there is an issue retrieving the block number. -func (b *bridgeDataQuerier) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - lastProcessedBlock, err := b.bridgeSyncer.GetLastProcessedBlock(ctx) +// GetLastProcessedBlock retrieves the last processed block number considering both the bridge syncer +// and the claim syncer. It returns the minimum of the two so that the reported block is one where +// both syncers have completed processing. +func (b *bridgeDataQuerier) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { + bridgeBlock, found, err := b.bridgeSyncer.GetLastProcessedBlock(ctx) if err != nil { - return 0, fmt.Errorf("error getting last processed block: %w", err) + return 0, false, fmt.Errorf("error getting bridge syncer last processed block: %w", err) + } + if !found { + return 0, false, nil } - return lastProcessedBlock, nil + claimBlock, claimFound, err := b.claimSyncer.GetLastProcessedBlock(ctx) + if err != nil { + return 0, false, fmt.Errorf("error getting claim syncer last processed block: %w", err) + } + if !claimFound { + return 0, false, nil + } + + if claimBlock < bridgeBlock { + return claimBlock, true, nil + } + return bridgeBlock, true, nil } // OriginNetwork returns the origin network id related to given bridge syncer. @@ -101,10 +117,10 @@ func (b *bridgeDataQuerier) OriginNetwork() uint32 { return b.originNetwork } -// WaitForSyncerToCatchUp waits for the bridge syncer to catch up to a specified block. +// WaitForSyncerToCatchUp waits for both the bridge syncer and the claim syncer to catch up to a specified block. func (b *bridgeDataQuerier) WaitForSyncerToCatchUp(ctx context.Context, block uint64) error { - b.log.Infof("bridgeDataQuerier - waiting for L2 syncer to catch up to block: %d", block) - defer b.log.Infof("bridgeDataQuerier - finished waiting for L2 syncer to catch up to block: %d", block) + b.log.Infof("bridgeDataQuerier - waiting for L2 syncers to catch up to block: %d", block) + defer b.log.Infof("bridgeDataQuerier - finished waiting for L2 syncers to catch up to block: %d", block) if b.delayBetweenRetries <= 0 { b.log.Warnf("bridgeDataQuerier - invalid delayBetweenRetries: %v, falling back to default value of 1s", @@ -116,18 +132,19 @@ func (b *bridgeDataQuerier) WaitForSyncerToCatchUp(ctx context.Context, block ui defer ticker.Stop() for { - lastProcessedBlock, err := b.bridgeSyncer.GetLastProcessedBlock(ctx) + bridgeReady, err := b.isSyncerCaughtUp(ctx, block) if err != nil { - return fmt.Errorf("bridgeDataQuerier - error getting last processed block: %w", err) + return fmt.Errorf("bridgeDataQuerier - error checking bridge syncer: %w", err) } - if lastProcessedBlock >= block { - b.log.Infof("bridgeDataQuerier - L2 syncer caught up to block: %d", block) - return nil + claimReady, err := b.isClaimSyncerCaughtUp(ctx, block) + if err != nil { + return fmt.Errorf("bridgeDataQuerier - error checking claim syncer: %w", err) } - b.log.Infof("bridgeDataQuerier - waiting for L2 syncer to catch up to block: %d, current last processed block: %d", - block, lastProcessedBlock) + if bridgeReady && claimReady { + return nil + } select { case <-ctx.Done(): @@ -138,9 +155,52 @@ func (b *bridgeDataQuerier) WaitForSyncerToCatchUp(ctx context.Context, block ui } } +// isSyncerCaughtUp checks whether the bridge syncer has processed up to the given block. +// Returns true if caught up, false if not yet. +func (b *bridgeDataQuerier) isSyncerCaughtUp(ctx context.Context, block uint64) (bool, error) { + lastProcessedBlock, found, err := b.bridgeSyncer.GetLastProcessedBlock(ctx) + if err != nil { + return false, err + } + + if !found { + b.log.Infof("bridgeDataQuerier - bridge syncer: no blocks have been processed yet, waiting to reach block: %d", block) + return false, nil + } + + if lastProcessedBlock >= block { + b.log.Infof("bridgeDataQuerier - bridge syncer caught up to block: %d", block) + return true, nil + } + + b.log.Infof("bridgeDataQuerier - bridge syncer waiting to reach block: %d, current: %d", block, lastProcessedBlock) + return false, nil +} + +// isClaimSyncerCaughtUp checks whether the claim syncer has processed up to the given block. +// Returns true if caught up, false if not yet. +func (b *bridgeDataQuerier) isClaimSyncerCaughtUp(ctx context.Context, block uint64) (bool, error) { + lastProcessedBlock, found, err := b.claimSyncer.GetLastProcessedBlock(ctx) + if err != nil { + return false, err + } + if !found { + b.log.Infof("bridgeDataQuerier - claim syncer: no blocks have been processed yet, waiting to reach block: %d", block) + return false, nil + } + + if lastProcessedBlock >= block { + b.log.Infof("bridgeDataQuerier - claim syncer caught up to block: %d", block) + return true, nil + } + + b.log.Infof("bridgeDataQuerier - claim syncer waiting to reach block: %d, current: %d", block, lastProcessedBlock) + return false, nil +} + // GetUnsetClaimsForBlockRange gets unset claims from agglayer bridge L2 and converts to unclaim map func (b *bridgeDataQuerier) GetUnsetClaimsForBlockRange(ctx context.Context, - fromBlock, toBlock uint64) ([]bridgesynctypes.Unclaim, error) { + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { b.log.Debugf("getting unset claims for block range %d to %d", fromBlock, toBlock) return b.agglayerBridgeL2Reader.GetUnsetClaimsForBlockRange(ctx, fromBlock, toBlock) } diff --git a/aggsender/query/certificate_query.go b/aggsender/query/certificate_query.go index 8b5ab39ec..f0dac3d40 100644 --- a/aggsender/query/certificate_query.go +++ b/aggsender/query/certificate_query.go @@ -9,6 +9,7 @@ import ( agglayertypes "github.com/agglayer/aggkit/agglayer/types" "github.com/agglayer/aggkit/aggsender/converters" "github.com/agglayer/aggkit/aggsender/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/ethereum/go-ethereum/common" ) @@ -18,6 +19,7 @@ var _ types.CertificateQuerier = (*certificateQuerier)(nil) // settled and pending certificates type certificateQuerier struct { l2BridgeSyncer types.L2BridgeSyncer + l2ClaimSyncer claimsynctypes.ClaimSyncer aggchainFEPQuerier types.AggchainFEPRollupQuerier agglayerClient agglayer.AgglayerClientInterface initialLER common.Hash @@ -25,12 +27,14 @@ type certificateQuerier struct { func NewCertificateQuerier( bridgeSyncer types.L2BridgeSyncer, + l2ClaimSyncer claimsynctypes.ClaimSyncer, aggchainFEPQuerier types.AggchainFEPRollupQuerier, agglayerClient agglayer.AgglayerClientInterface, initialLER common.Hash, ) types.CertificateQuerier { return &certificateQuerier{ l2BridgeSyncer: bridgeSyncer, + l2ClaimSyncer: l2ClaimSyncer, aggchainFEPQuerier: aggchainFEPQuerier, agglayerClient: agglayerClient, initialLER: initialLER, @@ -202,7 +206,7 @@ func (c *certificateQuerier) getBlockNumFromLER(ctx context.Context, localExitRo func (c *certificateQuerier) getBlockNumFromGlobalIndex( ctx context.Context, globalIndex *big.Int, bridgeExitHash common.Hash) (uint64, error) { - claims, err := c.l2BridgeSyncer.GetClaimsByGlobalIndex(ctx, globalIndex) + claims, err := c.l2ClaimSyncer.GetClaimsByGlobalIndex(ctx, globalIndex) if err != nil { return 0, fmt.Errorf("failed to get claim(s) by global index %s: %w", globalIndex.String(), err) } diff --git a/aggsender/trigger/trigger_by_bridge.go b/aggsender/trigger/trigger_by_bridge.go index 3c0b03972..7054224a5 100644 --- a/aggsender/trigger/trigger_by_bridge.go +++ b/aggsender/trigger/trigger_by_bridge.go @@ -75,11 +75,15 @@ func (r *preconfTrigger) TriggerCh(ctx context.Context) <-chan types.Certificate // ForceTriggerEvent forces the preconf trigger to emit a synchronization event. func (r *preconfTrigger) ForceTriggerEvent() { - blockNumber, err := r.l2BridgeSync.GetLastProcessedBlock(context.Background()) + blockNumber, found, err := r.l2BridgeSync.GetLastProcessedBlock(context.Background()) if err != nil { r.log.Errorf("ForceTriggerEvent: Failed to get last processed block: %v", err) return } + if !found { + r.log.Errorf("ForceTriggerEvent: No processed block found, cannot emit trigger event") + return + } if r.ch == nil { return } diff --git a/aggsender/types/certificate_build_params.go b/aggsender/types/certificate_build_params.go index e84d8cfbf..7d1434bdf 100644 --- a/aggsender/types/certificate_build_params.go +++ b/aggsender/types/certificate_build_params.go @@ -6,7 +6,7 @@ import ( agglayertypes "github.com/agglayer/aggkit/agglayer/types" "github.com/agglayer/aggkit/bridgesync" - bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/l1infotreesync" "github.com/ethereum/go-ethereum/common" @@ -54,8 +54,8 @@ type CertificateBuildParams struct { FromBlock uint64 ToBlock uint64 Bridges []bridgesync.Bridge - Claims []bridgesync.Claim - Unclaims []bridgesynctypes.Unclaim + Claims []claimsynctypes.Claim + Unclaims []claimsynctypes.Unclaim CreatedAt uint32 RetryCount int LastSentCertificate *CertificateHeader @@ -96,9 +96,9 @@ func (c *CertificateBuildParams) Range(fromBlock, toBlock uint64) (*CertificateB ToBlock: toBlock, Bridges: make([]bridgesync.Bridge, 0, aggkitcommon.EstimateSliceCapacity(len(c.Bridges), span, fullSpan)), - Claims: make([]bridgesync.Claim, 0, + Claims: make([]claimsynctypes.Claim, 0, aggkitcommon.EstimateSliceCapacity(len(c.Claims), span, fullSpan)), - Unclaims: make([]bridgesynctypes.Unclaim, 0, + Unclaims: make([]claimsynctypes.Unclaim, 0, aggkitcommon.EstimateSliceCapacity(len(c.Unclaims), span, fullSpan)), CreatedAt: c.CreatedAt, RetryCount: c.RetryCount, diff --git a/aggsender/types/interfaces.go b/aggsender/types/interfaces.go index 87bcb801d..e145dda37 100644 --- a/aggsender/types/interfaces.go +++ b/aggsender/types/interfaces.go @@ -10,7 +10,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayermanager" agglayertypes "github.com/agglayer/aggkit/agglayer/types" "github.com/agglayer/aggkit/bridgesync" - bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/l2gersync" "github.com/agglayer/aggkit/sync" @@ -31,6 +31,9 @@ type AggsenderBuilderFlow interface { // BuildCertificate builds a certificate based on the buildParams BuildCertificate(ctx context.Context, buildParams *CertificateBuildParams) (*agglayertypes.Certificate, error) + // GeneratePreBuildParams generates the pre-build parameters based on the certificate type + GeneratePreBuildParams(ctx context.Context, + certType CertificateType) (*CertificatePreBuildParams, error) // GenerateBuildParams generates the build parameters based on the preParams GenerateBuildParams(ctx context.Context, preParams *CertificatePreBuildParams) (*CertificateBuildParams, error) @@ -38,6 +41,8 @@ type AggsenderBuilderFlow interface { UpdateAggchainData(cert *agglayertypes.Certificate, multisig *agglayertypes.Multisig) error // Signer is the signer used to sign the certificate Signer() signertypes.Signer + // GetNextBlockNumber returns the first block number of the next certificate to generate + GetNextBlockNumber() (uint64, error) } // AggsenderVerifierFlow is an interface that defines the methods to verify the certificate @@ -70,8 +75,10 @@ type AggsenderFlowBaser interface { ctx context.Context, lastSentCertificate *CertificateHeader, newFromBlock, newToBlock uint64) error - ConvertClaimToImportedBridgeExit(claim bridgesync.Claim) (*agglayertypes.ImportedBridgeExit, error) + ConvertClaimToImportedBridgeExit(claim claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error) StartL2Block() uint64 + // GetNextBlockNumber returns the first block number of the next certificate to generate + GetNextBlockNumber() (uint64, error) GeneratePreBuildParams(ctx context.Context, certType CertificateType) (*CertificatePreBuildParams, error) GenerateBuildParams(ctx context.Context, @@ -99,11 +106,11 @@ type L2BridgeSyncer interface { GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) GetBridges(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Bridge, error) - GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) + //GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) OriginNetwork() uint32 - GetLastProcessedBlock(ctx context.Context) (uint64, error) + GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) GetExitRootByHash(ctx context.Context, root common.Hash) (*treetypes.Root, error) - GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]bridgesync.Claim, error) + //GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) SubscribeToSync(subscriberID string) <-chan sync.Block SubscribeToNewBridge(subscriberID string) <-chan uint64 } @@ -113,13 +120,13 @@ type BridgeQuerier interface { GetBridgesAndClaims( ctx context.Context, fromBlock, toBlock uint64, - ) ([]bridgesync.Bridge, []bridgesync.Claim, error) + ) ([]bridgesync.Bridge, []claimsynctypes.Claim, error) GetExitRootByIndex(ctx context.Context, index uint32) (common.Hash, error) - GetLastProcessedBlock(ctx context.Context) (uint64, error) + GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) OriginNetwork() uint32 WaitForSyncerToCatchUp(ctx context.Context, block uint64) error GetUnsetClaimsForBlockRange(ctx context.Context, - fromBlock, toBlock uint64) ([]bridgesynctypes.Unclaim, error) + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) } // ChainGERReader is an interface defining functions that an ChainGERReader should implement @@ -133,7 +140,7 @@ type ChainGERReader interface { // AgglayerBridgeL2Reader is an interface defining functions that an AgglayerBridgeL2Reader should implement type AgglayerBridgeL2Reader interface { GetUnsetClaimsForBlockRange(ctx context.Context, - fromBlock, toBlock uint64) ([]bridgesynctypes.Unclaim, error) + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) } // L1InfoTreeDataQuerier is an interface defining functions that an L1InfoTreeDataQuerier should implement diff --git a/aggsender/types/optimistic_interface.go b/aggsender/types/optimistic_interface.go index 7a63defba..a89b618d9 100644 --- a/aggsender/types/optimistic_interface.go +++ b/aggsender/types/optimistic_interface.go @@ -3,7 +3,7 @@ package types import ( "context" - "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/ethereum/go-ethereum/common" ) @@ -17,6 +17,6 @@ type OptimisticSigner interface { Sign(ctx context.Context, aggchainReq AggchainProofRequest, newLocalExitRoot common.Hash, - claims []bridgesync.Claim, + claims []claimsynctypes.Claim, ) ([]byte, string, error) } diff --git a/bridgeservice/bridge.go b/bridgeservice/bridge.go index cd9864b18..a23dce425 100644 --- a/bridgeservice/bridge.go +++ b/bridgeservice/bridge.go @@ -30,6 +30,7 @@ import ( "github.com/agglayer/aggkit/bridgeservice/metrics" "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/l1infotreesync" @@ -482,7 +483,7 @@ func (b *BridgeService) GetClaimsHandler(c *gin.Context) { networkID, pageNumber, pageSize, networkIDs, includeAllFieldsFlag, globalIndex) var ( - claims []*bridgesync.Claim + claims []*claimsynctypes.Claim count int ) @@ -579,7 +580,7 @@ func (b *BridgeService) GetUnsetClaimsHandler(c *gin.Context) { b.networkID, pageNumber, pageSize, globalIndex) var ( - unsetClaims []*bridgesync.UnsetClaim + unsetClaims []*claimsynctypes.UnsetClaim count int err error ) @@ -653,7 +654,7 @@ func (b *BridgeService) GetSetClaimsHandler(c *gin.Context) { b.networkID, pageNumber, pageSize, globalIndex) var ( - setClaims []*bridgesync.SetClaim + setClaims []*claimsynctypes.SetClaim count int err error ) @@ -1322,7 +1323,7 @@ func (b *BridgeService) populateNetworkSyncInfo( networkInfo.IsSynced = networkInfo.ContractDepositCount == networkInfo.SynchronizedDepositCount if !networkInfo.IsSynced { - lastProcessedBlock, err := bridge.GetLastProcessedBlock(ctx) + lastProcessedBlock, _, err := bridge.GetLastProcessedBlock(ctx) if err != nil { b.logger.Warnf("failed to get last processed block for %s: %s", networkName, err) } else { @@ -1645,7 +1646,7 @@ func (b *BridgeService) GetClaimsByGERHandler(c *gin.Context) { } ger := common.HexToHash(gerStr) - var claims []*bridgesync.Claim + var claims []*claimsynctypes.Claim switch networkID { case mainnetNetworkID: if b.bridgeL1 == nil { diff --git a/bridgeservice/bridge_interfaces.go b/bridgeservice/bridge_interfaces.go index d7f186556..1be67e05e 100644 --- a/bridgeservice/bridge_interfaces.go +++ b/bridgeservice/bridge_interfaces.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/l2gersync" tree "github.com/agglayer/aggkit/tree/types" @@ -22,17 +23,17 @@ type Bridger interface { GetLegacyTokenMigrations(ctx context.Context, pageNumber, pageSize uint32) ([]*bridgesync.LegacyTokenMigration, int, error) GetClaimsPaged(ctx context.Context, page, pageSize uint32, - networkIDs []uint32, globalIndex *big.Int) ([]*bridgesync.Claim, int, error) + networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) GetUnsetClaimsPaged(ctx context.Context, page, pageSize uint32, - globalIndex *big.Int) ([]*bridgesync.UnsetClaim, int, error) + globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) GetSetClaimsPaged(ctx context.Context, page, pageSize uint32, - globalIndex *big.Int) ([]*bridgesync.SetClaim, int, error) + globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) GetLastReorgEvent(ctx context.Context) (*bridgesync.LastReorg, error) GetContractDepositCount(ctx context.Context) (uint32, error) - GetLastProcessedBlock(ctx context.Context) (uint64, error) + GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) GetLatestNetworkBlock(ctx context.Context) (uint64, error) IsActive(ctx context.Context) bool - GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*bridgesync.Claim, error) + GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) GetBridgeByDepositCount(ctx context.Context, depositCount uint32) (*bridgesync.Bridge, error) GetBridgesByContent(ctx context.Context, leafType uint8, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, diff --git a/bridgeservice/bridge_test.go b/bridgeservice/bridge_test.go index 3be77e6f6..9a059b8aa 100644 --- a/bridgeservice/bridge_test.go +++ b/bridgeservice/bridge_test.go @@ -3078,7 +3078,7 @@ func TestGetSyncStatusHandler(t *testing.T) { // Add expectations for block information when not synced if !tc.l1IsSynced { b.bridgeL1.EXPECT().GetLastProcessedBlock(mock.Anything). - Return(uint64(1234), nil). + Return(uint64(1234), false, nil). Once() b.bridgeL1.EXPECT().GetLatestNetworkBlock(mock.Anything). Return(uint64(2555), nil). @@ -3086,7 +3086,7 @@ func TestGetSyncStatusHandler(t *testing.T) { } if !tc.l2IsSynced { b.bridgeL2.EXPECT().GetLastProcessedBlock(mock.Anything). - Return(uint64(1234), nil). + Return(uint64(1234), false, nil). Once() b.bridgeL2.EXPECT().GetLatestNetworkBlock(mock.Anything). Return(uint64(2555), nil). @@ -3457,7 +3457,7 @@ func TestPopulateNetworkSyncInfo(t *testing.T) { if !tc.expectedIsSynced { b.bridgeL1.EXPECT().GetLastProcessedBlock(mock.Anything). - Return(tc.lastProcessedBlock, nil). + Return(tc.lastProcessedBlock, false, nil). Once() b.bridgeL1.EXPECT().GetLatestNetworkBlock(mock.Anything). Return(tc.networkBlock, nil). diff --git a/bridgeservice/mocks/mock_bridger.go b/bridgeservice/mocks/mock_bridger.go index dfd26f20c..5eae58095 100644 --- a/bridgeservice/mocks/mock_bridger.go +++ b/bridgeservice/mocks/mock_bridger.go @@ -13,7 +13,9 @@ import ( mock "github.com/stretchr/testify/mock" - types "github.com/agglayer/aggkit/tree/types" + treetypes "github.com/agglayer/aggkit/tree/types" + + types "github.com/agglayer/aggkit/claimsync/types" ) // Bridger is an autogenerated mock type for the Bridger type @@ -223,23 +225,23 @@ func (_c *Bridger_GetBridgesPaged_Call) RunAndReturn(run func(context.Context, u } // GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot -func (_m *Bridger) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*bridgesync.Claim, error) { +func (_m *Bridger) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*types.Claim, error) { ret := _m.Called(ctx, globalExitRoot) if len(ret) == 0 { panic("no return value specified for GetClaimsByGER") } - var r0 []*bridgesync.Claim + var r0 []*types.Claim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*bridgesync.Claim, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*types.Claim, error)); ok { return rf(ctx, globalExitRoot) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*bridgesync.Claim); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*types.Claim); ok { r0 = rf(ctx, globalExitRoot) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*bridgesync.Claim) + r0 = ret.Get(0).([]*types.Claim) } } @@ -271,35 +273,35 @@ func (_c *Bridger_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalE return _c } -func (_c *Bridger_GetClaimsByGER_Call) Return(_a0 []*bridgesync.Claim, _a1 error) *Bridger_GetClaimsByGER_Call { +func (_c *Bridger_GetClaimsByGER_Call) Return(_a0 []*types.Claim, _a1 error) *Bridger_GetClaimsByGER_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Bridger_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*bridgesync.Claim, error)) *Bridger_GetClaimsByGER_Call { +func (_c *Bridger_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*types.Claim, error)) *Bridger_GetClaimsByGER_Call { _c.Call.Return(run) return _c } // GetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, networkIDs, globalIndex -func (_m *Bridger) GetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*bridgesync.Claim, int, error) { +func (_m *Bridger) GetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*types.Claim, int, error) { ret := _m.Called(ctx, page, pageSize, networkIDs, globalIndex) if len(ret) == 0 { panic("no return value specified for GetClaimsPaged") } - var r0 []*bridgesync.Claim + var r0 []*types.Claim var r1 int var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*bridgesync.Claim, int, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*types.Claim, int, error)); ok { return rf(ctx, page, pageSize, networkIDs, globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*bridgesync.Claim); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*types.Claim); ok { r0 = rf(ctx, page, pageSize, networkIDs, globalIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*bridgesync.Claim) + r0 = ret.Get(0).([]*types.Claim) } } @@ -340,12 +342,12 @@ func (_c *Bridger_GetClaimsPaged_Call) Run(run func(ctx context.Context, page ui return _c } -func (_c *Bridger_GetClaimsPaged_Call) Return(_a0 []*bridgesync.Claim, _a1 int, _a2 error) *Bridger_GetClaimsPaged_Call { +func (_c *Bridger_GetClaimsPaged_Call) Return(_a0 []*types.Claim, _a1 int, _a2 error) *Bridger_GetClaimsPaged_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Bridger_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*bridgesync.Claim, int, error)) *Bridger_GetClaimsPaged_Call { +func (_c *Bridger_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*types.Claim, int, error)) *Bridger_GetClaimsPaged_Call { _c.Call.Return(run) return _c } @@ -407,7 +409,7 @@ func (_c *Bridger_GetContractDepositCount_Call) RunAndReturn(run func(context.Co } // GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *Bridger) GetLastProcessedBlock(ctx context.Context) (uint64, error) { +func (_m *Bridger) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -415,8 +417,9 @@ func (_m *Bridger) GetLastProcessedBlock(ctx context.Context) (uint64, error) { } var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, bool, error)); ok { return rf(ctx) } if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { @@ -425,13 +428,19 @@ func (_m *Bridger) GetLastProcessedBlock(ctx context.Context) (uint64, error) { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { + if rf, ok := ret.Get(1).(func(context.Context) bool); ok { r1 = rf(ctx) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(bool) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // Bridger_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' @@ -452,12 +461,12 @@ func (_c *Bridger_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) return _c } -func (_c *Bridger_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *Bridger_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) +func (_c *Bridger_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *Bridger_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Bridger_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *Bridger_GetLastProcessedBlock_Call { +func (_c *Bridger_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, bool, error)) *Bridger_GetLastProcessedBlock_Call { _c.Call.Return(run) return _c } @@ -521,23 +530,23 @@ func (_c *Bridger_GetLastReorgEvent_Call) RunAndReturn(run func(context.Context) } // GetLastRoot provides a mock function with given fields: ctx -func (_m *Bridger) GetLastRoot(ctx context.Context) (*types.Root, error) { +func (_m *Bridger) GetLastRoot(ctx context.Context) (*treetypes.Root, error) { ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for GetLastRoot") } - var r0 *types.Root + var r0 *treetypes.Root var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*types.Root, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) (*treetypes.Root, error)); ok { return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) *types.Root); ok { + if rf, ok := ret.Get(0).(func(context.Context) *treetypes.Root); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Root) + r0 = ret.Get(0).(*treetypes.Root) } } @@ -568,12 +577,12 @@ func (_c *Bridger_GetLastRoot_Call) Run(run func(ctx context.Context)) *Bridger_ return _c } -func (_c *Bridger_GetLastRoot_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetLastRoot_Call { +func (_c *Bridger_GetLastRoot_Call) Return(_a0 *treetypes.Root, _a1 error) *Bridger_GetLastRoot_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Bridger_GetLastRoot_Call) RunAndReturn(run func(context.Context) (*types.Root, error)) *Bridger_GetLastRoot_Call { +func (_c *Bridger_GetLastRoot_Call) RunAndReturn(run func(context.Context) (*treetypes.Root, error)) *Bridger_GetLastRoot_Call { _c.Call.Return(run) return _c } @@ -702,23 +711,23 @@ func (_c *Bridger_GetLegacyTokenMigrations_Call) RunAndReturn(run func(context.C } // GetProof provides a mock function with given fields: ctx, depositCount, localExitRoot -func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (types.Proof, error) { +func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (treetypes.Proof, error) { ret := _m.Called(ctx, depositCount, localExitRoot) if len(ret) == 0 { panic("no return value specified for GetProof") } - var r0 types.Proof + var r0 treetypes.Proof var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { return rf(ctx, depositCount, localExitRoot) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { r0 = rf(ctx, depositCount, localExitRoot) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Proof) + r0 = ret.Get(0).(treetypes.Proof) } } @@ -751,34 +760,34 @@ func (_c *Bridger_GetProof_Call) Run(run func(ctx context.Context, depositCount return _c } -func (_c *Bridger_GetProof_Call) Return(_a0 types.Proof, _a1 error) *Bridger_GetProof_Call { +func (_c *Bridger_GetProof_Call) Return(_a0 treetypes.Proof, _a1 error) *Bridger_GetProof_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *Bridger_GetProof_Call { +func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *Bridger_GetProof_Call { _c.Call.Return(run) return _c } // GetRootByLER provides a mock function with given fields: ctx, ler -func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*types.Root, error) { +func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*treetypes.Root, error) { ret := _m.Called(ctx, ler) if len(ret) == 0 { panic("no return value specified for GetRootByLER") } - var r0 *types.Root + var r0 *treetypes.Root var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Root, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*treetypes.Root, error)); ok { return rf(ctx, ler) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Root); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *treetypes.Root); ok { r0 = rf(ctx, ler) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Root) + r0 = ret.Get(0).(*treetypes.Root) } } @@ -810,35 +819,35 @@ func (_c *Bridger_GetRootByLER_Call) Run(run func(ctx context.Context, ler commo return _c } -func (_c *Bridger_GetRootByLER_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetRootByLER_Call { +func (_c *Bridger_GetRootByLER_Call) Return(_a0 *treetypes.Root, _a1 error) *Bridger_GetRootByLER_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Root, error)) *Bridger_GetRootByLER_Call { +func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*treetypes.Root, error)) *Bridger_GetRootByLER_Call { _c.Call.Return(run) return _c } // GetSetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex -func (_m *Bridger) GetSetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*bridgesync.SetClaim, int, error) { +func (_m *Bridger) GetSetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*types.SetClaim, int, error) { ret := _m.Called(ctx, page, pageSize, globalIndex) if len(ret) == 0 { panic("no return value specified for GetSetClaimsPaged") } - var r0 []*bridgesync.SetClaim + var r0 []*types.SetClaim var r1 int var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*bridgesync.SetClaim, int, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*types.SetClaim, int, error)); ok { return rf(ctx, page, pageSize, globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*bridgesync.SetClaim); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*types.SetClaim); ok { r0 = rf(ctx, page, pageSize, globalIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*bridgesync.SetClaim) + r0 = ret.Get(0).([]*types.SetClaim) } } @@ -878,12 +887,12 @@ func (_c *Bridger_GetSetClaimsPaged_Call) Run(run func(ctx context.Context, page return _c } -func (_c *Bridger_GetSetClaimsPaged_Call) Return(_a0 []*bridgesync.SetClaim, _a1 int, _a2 error) *Bridger_GetSetClaimsPaged_Call { +func (_c *Bridger_GetSetClaimsPaged_Call) Return(_a0 []*types.SetClaim, _a1 int, _a2 error) *Bridger_GetSetClaimsPaged_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Bridger_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*bridgesync.SetClaim, int, error)) *Bridger_GetSetClaimsPaged_Call { +func (_c *Bridger_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*types.SetClaim, int, error)) *Bridger_GetSetClaimsPaged_Call { _c.Call.Return(run) return _c } @@ -957,24 +966,24 @@ func (_c *Bridger_GetTokenMappings_Call) RunAndReturn(run func(context.Context, } // GetUnsetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex -func (_m *Bridger) GetUnsetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*bridgesync.UnsetClaim, int, error) { +func (_m *Bridger) GetUnsetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*types.UnsetClaim, int, error) { ret := _m.Called(ctx, page, pageSize, globalIndex) if len(ret) == 0 { panic("no return value specified for GetUnsetClaimsPaged") } - var r0 []*bridgesync.UnsetClaim + var r0 []*types.UnsetClaim var r1 int var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*bridgesync.UnsetClaim, int, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*types.UnsetClaim, int, error)); ok { return rf(ctx, page, pageSize, globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*bridgesync.UnsetClaim); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*types.UnsetClaim); ok { r0 = rf(ctx, page, pageSize, globalIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*bridgesync.UnsetClaim) + r0 = ret.Get(0).([]*types.UnsetClaim) } } @@ -1014,12 +1023,12 @@ func (_c *Bridger_GetUnsetClaimsPaged_Call) Run(run func(ctx context.Context, pa return _c } -func (_c *Bridger_GetUnsetClaimsPaged_Call) Return(_a0 []*bridgesync.UnsetClaim, _a1 int, _a2 error) *Bridger_GetUnsetClaimsPaged_Call { +func (_c *Bridger_GetUnsetClaimsPaged_Call) Return(_a0 []*types.UnsetClaim, _a1 int, _a2 error) *Bridger_GetUnsetClaimsPaged_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Bridger_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*bridgesync.UnsetClaim, int, error)) *Bridger_GetUnsetClaimsPaged_Call { +func (_c *Bridger_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*types.UnsetClaim, int, error)) *Bridger_GetUnsetClaimsPaged_Call { _c.Call.Return(run) return _c } diff --git a/bridgeservice/utils.go b/bridgeservice/utils.go index f686e6e4d..82b252c68 100644 --- a/bridgeservice/utils.go +++ b/bridgeservice/utils.go @@ -8,6 +8,7 @@ import ( bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/gin-gonic/gin" ) @@ -162,7 +163,7 @@ func isPreEtrogBridge(bridge *bridgesync.Bridge, l1EtrogUpdateBlock uint64) bool } // NewClaimResponse creates ClaimResponse instance out of the provided Claim -func NewClaimResponse(claim *bridgesync.Claim, populateProofs bool) *bridgetypes.ClaimResponse { +func NewClaimResponse(claim *claimsynctypes.Claim, populateProofs bool) *bridgetypes.ClaimResponse { response := &bridgetypes.ClaimResponse{ GlobalIndex: bridgetypes.BigIntString(claim.GlobalIndex.String()), DestinationNetwork: claim.DestinationNetwork, diff --git a/bridgesync/agglayer_bridge_l2_reader.go b/bridgesync/agglayer_bridge_l2_reader.go index 99e71adfd..03b21fc27 100644 --- a/bridgesync/agglayer_bridge_l2_reader.go +++ b/bridgesync/agglayer_bridge_l2_reader.go @@ -6,7 +6,7 @@ import ( "math/big" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" - "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/log" aggkittypes "github.com/agglayer/aggkit/types" @@ -70,7 +70,7 @@ func NewAgglayerBridgeL2ReaderWithMaxLogBlockRange( // - []types.Unclaim: A slice of Unclaim objects containing global index, block number, and block index // - error: Any error that occurred during the event filtering or iteration func (r *AgglayerBridgeL2Reader) GetUnsetClaimsForBlockRange(ctx context.Context, - fromBlock, toBlock uint64) ([]types.Unclaim, error) { + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { if fromBlock > toBlock { return nil, fmt.Errorf("invalid block range: fromBlock(%d) > toBlock(%d)", fromBlock, toBlock) } @@ -83,7 +83,7 @@ func (r *AgglayerBridgeL2Reader) GetUnsetClaimsForBlockRange(ctx context.Context } func (r *AgglayerBridgeL2Reader) fetchUnsetClaimsWithFallbackChunking(ctx context.Context, - fromBlock, toBlock uint64) ([]types.Unclaim, error) { + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { unclaims, err := r.fetchUnsetClaims(ctx, fromBlock, toBlock) if err != nil { // Check if error is due to block range being too large @@ -99,21 +99,21 @@ func (r *AgglayerBridgeL2Reader) fetchUnsetClaimsWithFallbackChunking(ctx contex } func (r *AgglayerBridgeL2Reader) getUnsetClaimsInChunks(ctx context.Context, - fromBlock, toBlock, maxRange uint64) ([]types.Unclaim, error) { + fromBlock, toBlock, maxRange uint64) ([]claimsynctypes.Unclaim, error) { log.Debugf("block range too large, splitting into chunks of max %d blocks", maxRange) return aggkitcommon.ChunkedRangeQuery( ctx, fromBlock, toBlock, maxRange, r.fetchUnsetClaimsWithFallbackChunking, - func(all, chunk []types.Unclaim) []types.Unclaim { + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { return append(all, chunk...) }, - make([]types.Unclaim, 0), + make([]claimsynctypes.Unclaim, 0), ) } // fetchUnsetClaims performs the actual event filtering for a given block range func (r *AgglayerBridgeL2Reader) fetchUnsetClaims(ctx context.Context, - fromBlock, toBlock uint64) ([]types.Unclaim, error) { + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { unclaimIterator, err := r.agglayerBridgeL2.FilterUpdatedUnsetGlobalIndexHashChain( &bind.FilterOpts{Context: ctx, Start: fromBlock, End: &toBlock}) if err != nil { @@ -126,12 +126,12 @@ func (r *AgglayerBridgeL2Reader) fetchUnsetClaims(ctx context.Context, } }() - unclaims := make([]types.Unclaim, 0) + unclaims := make([]claimsynctypes.Unclaim, 0) for unclaimIterator.Next() { globalIndex := unclaimIterator.Event.UnsetGlobalIndex log.Infof("unset claim: %s at block %d, index %d", new(big.Int).SetBytes(globalIndex[:]), unclaimIterator.Event.Raw.BlockNumber, unclaimIterator.Event.Raw.Index) - unclaims = append(unclaims, types.Unclaim{ + unclaims = append(unclaims, claimsynctypes.Unclaim{ GlobalIndex: new(big.Int).SetBytes(globalIndex[:]), BlockNumber: unclaimIterator.Event.Raw.BlockNumber, LogIndex: uint64(unclaimIterator.Event.Raw.Index), diff --git a/bridgesync/agglayer_bridge_l2_reader_test.go b/bridgesync/agglayer_bridge_l2_reader_test.go index 7b2ea46b7..e3217c1f1 100644 --- a/bridgesync/agglayer_bridge_l2_reader_test.go +++ b/bridgesync/agglayer_bridge_l2_reader_test.go @@ -5,7 +5,7 @@ import ( "errors" "testing" - "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/etherman" aggkittypes "github.com/agglayer/aggkit/types" @@ -432,10 +432,10 @@ func TestGetUnsetClaimsInChunks(t *testing.T) { unclaims, err := aggkitcommon.ChunkedRangeQuery( ctx, 0, 2999, 1000, reader.fetchUnsetClaims, - func(all, chunk []types.Unclaim) []types.Unclaim { + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { return append(all, chunk...) }, - []types.Unclaim{}, + []claimsynctypes.Unclaim{}, ) require.NoError(t, err) require.NotNil(t, unclaims) @@ -455,10 +455,10 @@ func TestGetUnsetClaimsInChunks(t *testing.T) { unclaims, err := aggkitcommon.ChunkedRangeQuery( ctx, 0, 2500, 1000, reader.fetchUnsetClaims, - func(all, chunk []types.Unclaim) []types.Unclaim { + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { return append(all, chunk...) }, - []types.Unclaim{}, + []claimsynctypes.Unclaim{}, ) require.NoError(t, err) require.NotNil(t, unclaims) @@ -475,10 +475,10 @@ func TestGetUnsetClaimsInChunks(t *testing.T) { unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 500, 1000, reader.fetchUnsetClaims, - func(all, chunk []types.Unclaim) []types.Unclaim { + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { return append(all, chunk...) }, - []types.Unclaim{}, + []claimsynctypes.Unclaim{}, ) require.NoError(t, err) require.NotNil(t, unclaims) @@ -498,10 +498,10 @@ func TestGetUnsetClaimsInChunks(t *testing.T) { unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 2000, 1000, reader.fetchUnsetClaims, - func(all, chunk []types.Unclaim) []types.Unclaim { + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { return append(all, chunk...) }, - []types.Unclaim{}, + []claimsynctypes.Unclaim{}, ) require.ErrorContains(t, err, "rpc error") require.Empty(t, unclaims) @@ -517,10 +517,10 @@ func TestGetUnsetClaimsInChunks(t *testing.T) { // Should return error immediately without making any calls unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 1000, 0, reader.fetchUnsetClaims, - func(all, chunk []types.Unclaim) []types.Unclaim { + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { return append(all, chunk...) }, - []types.Unclaim{}, + []claimsynctypes.Unclaim{}, ) require.ErrorContains(t, err, "maxRange must be greater than 0") require.Empty(t, unclaims) diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index 06846187d..923380588 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -10,6 +10,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + "github.com/agglayer/aggkit/claimsync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/db/compatibility" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/reorgdetector" @@ -66,9 +68,10 @@ type ReorgDetector interface { // BridgeSync manages the state of the exit tree for the bridge contract by processing Ethereum blockchain events. type BridgeSync struct { - processor *processor - driver *sync.EVMDriver - downloader *sync.EVMDownloader + processor *processor + driver *sync.EVMDriver + downloader *sync.EVMDownloader + claimReader claimsynctypes.ClaimsReader originNetwork uint32 reorgDetector ReorgDetector @@ -83,7 +86,6 @@ func NewL1( rd ReorgDetector, ethClient aggkittypes.EthClienter, originNetwork uint32, - syncFromInBridges bool, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -94,9 +96,9 @@ func NewL1( L1BridgeSyncer, originNetwork, false, - syncFromInBridges, + *cfg.SyncFromInBridgesResolved, bridgesynctypes.EmptyLER, - nil, + *cfg.EmbeddedClaimSyncResolved, ) } @@ -109,9 +111,7 @@ func NewL2( ethClient aggkittypes.EthClienter, originNetwork uint32, syncFullClaims bool, - syncFromInBridges bool, initialLER common.Hash, - claimEventsProcessor ClaimsSyncProcessor, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -122,9 +122,9 @@ func NewL2( L2BridgeSyncer, originNetwork, syncFullClaims, - syncFromInBridges, + *cfg.SyncFromInBridgesResolved, initialLER, - claimEventsProcessor, + *cfg.EmbeddedClaimSyncResolved, ) } @@ -139,7 +139,7 @@ func newBridgeSync( syncFullClaims bool, syncFromInBridges bool, initialLER common.Hash, - claimEventsProcessor ClaimsSyncProcessor, + embeddedClaimSyncFlag bool, ) (*BridgeSync, error) { logger := log.WithFields("module", syncerID.String()) @@ -157,19 +157,48 @@ func newBridgeSync( cfg.BridgeAddr.String(), err) return nil, err } + database, err := newSqliteDB(cfg.DBPath) + if err != nil { + return nil, fmt.Errorf("failed to create sqlite database %s: %w", cfg.DBPath, err) + } + + var embeddedClaimSync claimsync.EmbeddedClaimSync + if embeddedClaimSyncFlag { + claimID := claimsynctypes.ClaimSyncerID(syncerID) + logger.Info("initializing embedded claim sync for bridge sync %s", claimID) + claimStorage, err := claimsync.NewClaimStorage(database, logger, claimID, cfg.DBQueryTimeout.Duration) + if err != nil { + return nil, fmt.Errorf("failed to create claim storage: %w", err) + } + + embeddedClaimSyncObject, err := claimsync.NewEmbedded( + ctx, claimStorage, + cfg.BridgeAddr, + ethClient, + nil, + claimID, + cfg.DBQueryTimeout.Duration, + logger, + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize embedded claim sync: %w", err) + } + embeddedClaimSync = *embeddedClaimSyncObject - processor, err := newProcessor(cfg.DBPath, "bridge_sync_"+syncerID.String(), logger, cfg.DBQueryTimeout.Duration, claimEventsProcessor) + } + + processor, err := newProcessor(database, "bridge_sync_"+syncerID.String(), logger, cfg.DBQueryTimeout.Duration, embeddedClaimSync.Processor) if err != nil { return nil, err } processor.initialLER = initialLER - lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) + lastProcessedBlock, found, err := processor.GetLastProcessedBlock(ctx) if err != nil { return nil, err } - if lastProcessedBlock < cfg.InitialBlockNum { + if !found || (lastProcessedBlock < cfg.InitialBlockNum) { header, err := ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(cfg.InitialBlockNum)) if err != nil { return nil, fmt.Errorf("failed to get initial block %d: %w", cfg.InitialBlockNum, err) @@ -194,7 +223,7 @@ func newBridgeSync( return nil, fmt.Errorf("failed to resolve bridge deployment. Reason: %w", err) } - appender, err := buildAppender(ctx, ethClient, cfg.BridgeAddr, syncFromInBridges, bridgeDeployment, logger, claimEventsProcessor) + appender, err := buildAppender(ctx, ethClient, cfg.BridgeAddr, syncFromInBridges, bridgeDeployment, logger, embeddedClaimSync.Appender) if err != nil { return nil, err } @@ -248,7 +277,10 @@ func newBridgeSync( " retryAfterErrorPeriod: %s\n"+ " syncBlockChunkSize: %d\n"+ " ReorgDetector: %s\n"+ - " waitForNewBlocksPeriod: %s", + " waitForNewBlocksPeriod: %s\n"+ + " syncFullClaims: %t\n"+ + " syncFromInBridges: %t\n"+ + " embeddedClaimSyncFlag: %t", syncerID, cfg.DBPath, cfg.InitialBlockNum, @@ -260,6 +292,9 @@ func newBridgeSync( cfg.SyncBlockChunkSize, rd.String(), cfg.WaitForNewBlocksPeriod.String(), + syncFullClaims, + syncFromInBridges, + embeddedClaimSyncFlag, ) return &BridgeSync{ @@ -339,38 +374,46 @@ func (s *BridgeSync) GetBridgesPaged( func (s *BridgeSync) GetClaimsPaged( ctx context.Context, - page, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*Claim, int, error) { - if s.processor.isHalted() { - s.processor.log.Error("processor is halted, cannot get claims") - return nil, 0, sync.ErrInconsistentState - } - return s.processor.GetClaimsPaged(ctx, page, pageSize, networkIDs, globalIndex) + page, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { + return claimReaderDelegatePaged(s, func() ([]*claimsynctypes.Claim, int, error) { + return s.claimReader.GetClaimsPaged(ctx, page, pageSize, networkIDs, globalIndex) + }) } func (s *BridgeSync) GetUnsetClaimsPaged( ctx context.Context, - page, pageSize uint32, globalIndex *big.Int) ([]*UnsetClaim, int, error) { - if s.processor.isHalted() { - s.processor.log.Error("processor is halted, cannot get unset claims") - return nil, 0, sync.ErrInconsistentState - } - return s.processor.GetUnsetClaimsPaged(ctx, page, pageSize, globalIndex) + page, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { + return claimReaderDelegatePaged(s, func() ([]*claimsynctypes.UnsetClaim, int, error) { + return s.claimReader.GetUnsetClaimsPaged(ctx, page, pageSize, globalIndex) + }) } func (s *BridgeSync) GetSetClaimsPaged( ctx context.Context, - page, pageSize uint32, globalIndex *big.Int) ([]*SetClaim, int, error) { - if s.processor.isHalted() { - s.processor.log.Error("processor is halted, cannot get set claims") - return nil, 0, sync.ErrInconsistentState + page, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { + return claimReaderDelegatePaged(s, func() ([]*claimsynctypes.SetClaim, int, error) { + return s.claimReader.GetSetClaimsPaged(ctx, page, pageSize, globalIndex) + }) +} + +func (c *BridgeSync) SetNextRequiredBlock(ctx context.Context, nextBlockNum uint64) error { + num, found, err := c.GetLastProcessedBlock(ctx) + if err != nil { + return fmt.Errorf("failed to get last processed block: %w", err) + } + if !found { + return fmt.Errorf("last processed block not found") } - return s.processor.GetSetClaimsPaged(ctx, page, pageSize, globalIndex) + if nextBlockNum > num { + return fmt.Errorf("cannot set next required block to %d, last processed block is %d", nextBlockNum, num) + } + return nil } -func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { +func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { if s.processor.isHalted() { s.processor.log.Error("processor is halted, cannot get last processed block") - return 0, sync.ErrInconsistentState + return 0, false, sync.ErrInconsistentState } return s.processor.GetLastProcessedBlock(ctx) } @@ -382,18 +425,16 @@ func (s *BridgeSync) GetExitRootByHash(ctx context.Context, root common.Hash) (* return s.processor.exitTree.GetRootByHash(ctx, root) } -func (s *BridgeSync) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]Claim, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetClaimsByGlobalIndex(ctx, globalIndex) +func (s *BridgeSync) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) { + return claimReaderDelegate(s, func() ([]claimsynctypes.Claim, error) { + return s.claimReader.GetClaimsByGlobalIndex(ctx, nil, globalIndex) + }) } -func (s *BridgeSync) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]Claim, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetClaims(ctx, fromBlock, toBlock) +func (s *BridgeSync) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) { + return claimReaderDelegate(s, func() ([]claimsynctypes.Claim, error) { + return s.claimReader.GetClaims(ctx, nil, fromBlock, toBlock) + }) } func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) ([]Bridge, error) { @@ -567,8 +608,28 @@ func (s *BridgeSync) IsActive(ctx context.Context) bool { } // GetClaimsByGER returns all DetailedClaimEvent claims for the given global exit root. -func (s *BridgeSync) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) { - return s.processor.GetClaimsByGER(ctx, globalExitRoot) +func (s *BridgeSync) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { + return claimReaderDelegate(s, func() ([]*claimsynctypes.Claim, error) { + return s.claimReader.GetClaimsByGER(ctx, globalExitRoot) + }) +} + +// claimReaderDelegate checks the halted state and, if healthy, calls fn. +func claimReaderDelegate[T any](s *BridgeSync, fn func() (T, error)) (T, error) { + if s.processor.isHalted() { + var zero T + return zero, sync.ErrInconsistentState + } + return fn() +} + +// claimReaderDelegatePaged is like claimReaderDelegate for functions returning (T, int, error). +func claimReaderDelegatePaged[T any](s *BridgeSync, fn func() (T, int, error)) (T, int, error) { + if s.processor.isHalted() { + var zero T + return zero, 0, sync.ErrInconsistentState + } + return fn() } // GetBridgeByDepositCount returns the bridge with the given deposit count (bridge or bridge_archive). diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go index be54cbf54..2d208d4ed 100644 --- a/bridgesync/bridgesync_test.go +++ b/bridgesync/bridgesync_test.go @@ -85,6 +85,7 @@ func TestNewLx(t *testing.T) { RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(dbQueryTimeout), } + l1BridgeSync, err := NewL1( ctx, bridgeSyncL1Cfg, diff --git a/bridgesync/claim.go b/bridgesync/claim.go new file mode 100644 index 000000000..d36fb3a36 --- /dev/null +++ b/bridgesync/claim.go @@ -0,0 +1,51 @@ +package bridgesync + +import ( + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// Type aliases to maintain backward compatibility after types were moved to claimsync/types. + +// Claim is an alias for claimsynctypes.Claim. +type Claim = claimsynctypes.Claim + +// ClaimType is an alias for claimsynctypes.ClaimType. +type ClaimType = claimsynctypes.ClaimType + +// UnsetClaim is an alias for claimsynctypes.UnsetClaim. +type UnsetClaim = claimsynctypes.UnsetClaim + +// SetClaim is an alias for claimsynctypes.SetClaim. +type SetClaim = claimsynctypes.SetClaim + +const ( + // ClaimEvent is an alias for claimsynctypes.ClaimEvent. + ClaimEvent ClaimType = claimsynctypes.ClaimEvent + // DetailedClaimEvent is an alias for claimsynctypes.DetailedClaimEvent. + DetailedClaimEvent ClaimType = claimsynctypes.DetailedClaimEvent +) + +var ( + // claim event signatures (moved to claimsync package, re-exported here for test compatibility) + claimEventSignature = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)")) + claimEventSignaturePreEtrog = crypto.Keccak256Hash([]byte("ClaimEvent(uint32,uint32,address,address,uint256)")) + detailedClaimEventSignature = crypto.Keccak256Hash([]byte( + "DetailedClaimEvent(bytes32[32],bytes32[32]," + + "uint256,bytes32,bytes32,uint8,uint32," + + "address,uint32,address,uint256,bytes)", + )) + unsetClaimEventSignature = crypto.Keccak256Hash([]byte( + "UpdatedUnsetGlobalIndexHashChain(bytes32,bytes32)", + )) + setClaimEventSignature = crypto.Keccak256Hash([]byte( + "SetClaim(bytes32)", + )) + + // claim method IDs (moved to claimsync package, re-exported here for test compatibility) + claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") + claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") + claimAssetPreEtrogMethodID = common.Hex2Bytes("2cffd02e") + claimMessagePreEtrogMethodID = common.Hex2Bytes("2d2c9d94") +) diff --git a/bridgesync/config.go b/bridgesync/config.go index 78f668f48..175b9f536 100644 --- a/bridgesync/config.go +++ b/bridgesync/config.go @@ -9,49 +9,57 @@ import ( "github.com/ethereum/go-ethereum/common" ) -// SyncFromInBridgesMode represents the mode for FromAddress extraction -type SyncFromInBridgesMode string +// TrueFalseAutoMode represents the mode for FromAddress extraction +type TrueFalseAutoMode string const ( - // SyncFromInBridgesTrue always extracts FromAddress using debug_traceTransaction - SyncFromInBridgesTrue SyncFromInBridgesMode = "true" - // SyncFromInBridgesFalse never extracts FromAddress - SyncFromInBridgesFalse SyncFromInBridgesMode = "false" - // SyncFromInBridgesAuto decides automatically based on whether BRIDGE component is active - SyncFromInBridgesAuto SyncFromInBridgesMode = "auto" + // TrueMode always extracts FromAddress using debug_traceTransaction + TrueMode TrueFalseAutoMode = "true" + // FalseMode never extracts FromAddress + FalseMode TrueFalseAutoMode = "false" + // AutoMode decides automatically based on whether BRIDGE component is active + AutoMode TrueFalseAutoMode = "auto" ) // UnmarshalText implements encoding.TextUnmarshaler -func (m *SyncFromInBridgesMode) UnmarshalText(text []byte) error { +func (m *TrueFalseAutoMode) UnmarshalText(text []byte) error { str := strings.ToLower(strings.TrimSpace(string(text))) switch str { case "true": - *m = SyncFromInBridgesTrue + *m = TrueMode case "false": - *m = SyncFromInBridgesFalse + *m = FalseMode case "auto": - *m = SyncFromInBridgesAuto + *m = AutoMode default: - return fmt.Errorf("invalid SyncFromInBridgesMode: %s (valid values: true, false, auto)", str) + return fmt.Errorf("invalid TrueFalseAutoMode: value %s (valid values: true, false, auto)", str) } return nil } // String returns the string representation -func (m SyncFromInBridgesMode) String() string { +func (m TrueFalseAutoMode) String() string { return string(m) } +func (m TrueFalseAutoMode) Validate(fieldName string) error { + cpy := m + if err := cpy.UnmarshalText([]byte(m.String())); err != nil { + return fmt.Errorf("invalid %s configuration: %w", fieldName, err) + } + return nil +} + // Resolve converts the mode to a boolean, using the provided components list to resolve "auto" -func (m SyncFromInBridgesMode) Resolve(hasBridgeComponent bool) bool { +func (m TrueFalseAutoMode) Resolve(autoModeResult bool) bool { switch m { - case SyncFromInBridgesTrue: + case TrueMode: return true - case SyncFromInBridgesFalse: + case FalseMode: return false - case SyncFromInBridgesAuto: - // If BRIDGE component is active, we need FromAddress extraction - return hasBridgeComponent + case AutoMode: + // Resolve to auto mode + return autoModeResult default: // Default to false return false @@ -90,7 +98,16 @@ type Config struct { // - "auto": automatically decides based on whether BRIDGE component is active // Note: TxnSender and ToAddress are always extracted via standard eth_getTransactionByHash. // Default: "auto" - SyncFromInBridges SyncFromInBridgesMode `mapstructure:"SyncFromInBridges"` + SyncFromInBridges TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"SyncFromInBridges"` + // EmbeddedClaimSync controls whether to use embedded claim synchronization mode. + // If brridge-service is running then we must use embedded claim sync, if not it runs in standalone + EmbeddedClaimSync TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"EmbeddedClaimSync"` + // SyncFromInBridgesResolved is the resolved boolean value of SyncFromInBridges after "auto" is evaluated. + // Not read from config file; set programmatically after resolution. + SyncFromInBridgesResolved *bool `mapstructure:"-"` + // EmbeddedClaimSyncResolved is the resolved boolean value of EmbeddedClaimSync after "auto" is evaluated. + // Not read from config file; set programmatically after resolution. + EmbeddedClaimSyncResolved *bool `mapstructure:"-"` } // Validate checks if the configuration is valid @@ -99,11 +116,30 @@ func (c Config) Validate() error { return fmt.Errorf("invalid BlockFinality configuration: %w", err) } // Validate SyncFromInBridges - switch c.SyncFromInBridges { - case SyncFromInBridgesTrue, SyncFromInBridgesFalse, SyncFromInBridgesAuto, "": - // Valid values, including empty (will use default) - default: - return fmt.Errorf("invalid SyncFromInBridges value: %s (valid values: true, false, auto)", c.SyncFromInBridges) + if err := c.SyncFromInBridges.Validate("SyncFromInBridges"); err != nil { + return err + } + // Validate EmbeddedClaimSync + if err := c.EmbeddedClaimSync.Validate("EmbeddedClaimSync"); err != nil { + return err } return nil } + +// ResolvedString returns a string representation of the resolved configuration +// to log it +func (c *Config) ResolvedString() []string { + var result []string + if c.SyncFromInBridgesResolved != nil { + result = append(result, fmt.Sprintf("SyncFromInBridges:%s -> %t", c.SyncFromInBridges, *c.SyncFromInBridgesResolved)) + } else { + result = append(result, fmt.Sprintf("SyncFromInBridges: %s -> ???", c.SyncFromInBridges)) + } + if c.EmbeddedClaimSyncResolved != nil { + result = append(result, fmt.Sprintf("EmbeddedClaimSync:%s -> %t", c.EmbeddedClaimSync, *c.EmbeddedClaimSyncResolved)) + } else { + result = append(result, fmt.Sprintf("EmbeddedClaimSync: %s -> ???", c.EmbeddedClaimSync)) + } + return result + +} diff --git a/bridgesync/config_test.go b/bridgesync/config_test.go index 4d1c0eaca..3c42513bf 100644 --- a/bridgesync/config_test.go +++ b/bridgesync/config_test.go @@ -11,79 +11,79 @@ func TestSyncFromInBridgesMode_UnmarshalText(t *testing.T) { tests := []struct { name string input string - expected SyncFromInBridgesMode + expected TrueFalseAutoMode expectedError string }{ { name: "true lowercase", input: "true", - expected: SyncFromInBridgesTrue, + expected: TrueMode, expectedError: "", }, { name: "true uppercase", input: "TRUE", - expected: SyncFromInBridgesTrue, + expected: TrueMode, expectedError: "", }, { name: "true mixed case", input: "TrUe", - expected: SyncFromInBridgesTrue, + expected: TrueMode, expectedError: "", }, { name: "true with whitespace", input: " true ", - expected: SyncFromInBridgesTrue, + expected: TrueMode, expectedError: "", }, { name: "false lowercase", input: "false", - expected: SyncFromInBridgesFalse, + expected: FalseMode, expectedError: "", }, { name: "false uppercase", input: "FALSE", - expected: SyncFromInBridgesFalse, + expected: FalseMode, expectedError: "", }, { name: "false mixed case", input: "FaLsE", - expected: SyncFromInBridgesFalse, + expected: FalseMode, expectedError: "", }, { name: "false with whitespace", input: " false ", - expected: SyncFromInBridgesFalse, + expected: FalseMode, expectedError: "", }, { name: "auto lowercase", input: "auto", - expected: SyncFromInBridgesAuto, + expected: AutoValue, expectedError: "", }, { name: "auto uppercase", input: "AUTO", - expected: SyncFromInBridgesAuto, + expected: AutoValue, expectedError: "", }, { name: "auto mixed case", input: "AuTo", - expected: SyncFromInBridgesAuto, + expected: AutoValue, expectedError: "", }, { name: "auto with whitespace", input: " auto ", - expected: SyncFromInBridgesAuto, + expected: AutoValue, expectedError: "", }, { @@ -114,7 +114,7 @@ func TestSyncFromInBridgesMode_UnmarshalText(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var mode SyncFromInBridgesMode + var mode TrueFalseAutoMode err := mode.UnmarshalText([]byte(tt.input)) if tt.expectedError == "" { @@ -131,32 +131,32 @@ func TestSyncFromInBridgesMode_UnmarshalText(t *testing.T) { func TestSyncFromInBridgesMode_String(t *testing.T) { tests := []struct { name string - mode SyncFromInBridgesMode + mode TrueFalseAutoMode expected string }{ { name: "true mode", - mode: SyncFromInBridgesTrue, + mode: TrueMode, expected: "true", }, { name: "false mode", - mode: SyncFromInBridgesFalse, + mode: FalseMode, expected: "false", }, { name: "auto mode", - mode: SyncFromInBridgesAuto, + mode: AutoValue, expected: "auto", }, { name: "empty mode", - mode: SyncFromInBridgesMode(""), + mode: TrueFalseAutoMode(""), expected: "", }, { name: "invalid mode", - mode: SyncFromInBridgesMode("invalid"), + mode: TrueFalseAutoMode("invalid"), expected: "invalid", }, } @@ -172,67 +172,67 @@ func TestSyncFromInBridgesMode_String(t *testing.T) { func TestSyncFromInBridgesMode_Resolve(t *testing.T) { tests := []struct { name string - mode SyncFromInBridgesMode + mode TrueFalseAutoMode hasBridgeComponent bool expected bool }{ { name: "true mode with bridge component", - mode: SyncFromInBridgesTrue, + mode: TrueMode, hasBridgeComponent: true, expected: true, }, { name: "true mode without bridge component", - mode: SyncFromInBridgesTrue, + mode: TrueMode, hasBridgeComponent: false, expected: true, }, { name: "false mode with bridge component", - mode: SyncFromInBridgesFalse, + mode: FalseMode, hasBridgeComponent: true, expected: false, }, { name: "false mode without bridge component", - mode: SyncFromInBridgesFalse, + mode: FalseMode, hasBridgeComponent: false, expected: false, }, { name: "auto mode with bridge component", - mode: SyncFromInBridgesAuto, + mode: AutoValue, hasBridgeComponent: true, expected: true, }, { name: "auto mode without bridge component", - mode: SyncFromInBridgesAuto, + mode: AutoValue, hasBridgeComponent: false, expected: false, }, { name: "invalid mode with bridge component", - mode: SyncFromInBridgesMode("invalid"), + mode: TrueFalseAutoMode("invalid"), hasBridgeComponent: true, expected: false, }, { name: "invalid mode without bridge component", - mode: SyncFromInBridgesMode("invalid"), + mode: TrueFalseAutoMode("invalid"), hasBridgeComponent: false, expected: false, }, { name: "empty mode with bridge component", - mode: SyncFromInBridgesMode(""), + mode: TrueFalseAutoMode(""), hasBridgeComponent: true, expected: false, }, { name: "empty mode without bridge component", - mode: SyncFromInBridgesMode(""), + mode: TrueFalseAutoMode(""), hasBridgeComponent: false, expected: false, }, @@ -263,7 +263,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with SyncFromInBridges true", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: SyncFromInBridgesTrue, + SyncFromInBridges: TrueMode, }, expectedError: "", }, @@ -271,7 +271,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with SyncFromInBridges false", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: SyncFromInBridgesFalse, + SyncFromInBridges: FalseMode, }, expectedError: "", }, @@ -279,7 +279,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with SyncFromInBridges auto", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: SyncFromInBridgesAuto, + SyncFromInBridges: AutoValue, }, expectedError: "", }, diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 8a1ea785c..8b27ff050 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -71,11 +71,11 @@ func buildAppender( syncFromInBridges bool, bridgeDeployment *bridgeDeployment, logger *logger.Logger, - claimSync ClaimsSyncProcessor, + claimAppender sync.LogAppenderMap, ) (sync.LogAppenderMap, error) { var appender sync.LogAppenderMap - if claimSync != nil { - appender = claimSync.BuildAppender() + if claimAppender != nil { + appender = claimAppender } else { appender = make(sync.LogAppenderMap) } diff --git a/bridgesync/mock_bridge_querier.go b/bridgesync/mock_bridge_querier.go index 9f0b01f1e..56dc03ab6 100644 --- a/bridgesync/mock_bridge_querier.go +++ b/bridgesync/mock_bridge_querier.go @@ -5,6 +5,7 @@ package bridgesync import ( context "context" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" mock "github.com/stretchr/testify/mock" ) @@ -22,7 +23,7 @@ func (_m *BridgeQuerierMock) EXPECT() *BridgeQuerierMock_Expecter { } // GetBoundaryBlockForClaimType provides a mock function with given fields: ctx, claimType -func (_m *BridgeQuerierMock) GetBoundaryBlockForClaimType(ctx context.Context, claimType ClaimType) (uint64, error) { +func (_m *BridgeQuerierMock) GetBoundaryBlockForClaimType(ctx context.Context, claimType claimsynctypes.ClaimType) (uint64, error) { ret := _m.Called(ctx, claimType) if len(ret) == 0 { @@ -31,16 +32,16 @@ func (_m *BridgeQuerierMock) GetBoundaryBlockForClaimType(ctx context.Context, c var r0 uint64 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ClaimType) (uint64, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, claimsynctypes.ClaimType) (uint64, error)); ok { return rf(ctx, claimType) } - if rf, ok := ret.Get(0).(func(context.Context, ClaimType) uint64); ok { + if rf, ok := ret.Get(0).(func(context.Context, claimsynctypes.ClaimType) uint64); ok { r0 = rf(ctx, claimType) } else { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context, ClaimType) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, claimsynctypes.ClaimType) error); ok { r1 = rf(ctx, claimType) } else { r1 = ret.Error(1) @@ -56,14 +57,14 @@ type BridgeQuerierMock_GetBoundaryBlockForClaimType_Call struct { // GetBoundaryBlockForClaimType is a helper method to define mock.On call // - ctx context.Context -// - claimType ClaimType +// - claimType claimsynctypes.ClaimType func (_e *BridgeQuerierMock_Expecter) GetBoundaryBlockForClaimType(ctx interface{}, claimType interface{}) *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call { return &BridgeQuerierMock_GetBoundaryBlockForClaimType_Call{Call: _e.mock.On("GetBoundaryBlockForClaimType", ctx, claimType)} } -func (_c *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call) Run(run func(ctx context.Context, claimType ClaimType)) *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call { +func (_c *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call) Run(run func(ctx context.Context, claimType claimsynctypes.ClaimType)) *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ClaimType)) + run(args[0].(context.Context), args[1].(claimsynctypes.ClaimType)) }) return _c } @@ -73,7 +74,7 @@ func (_c *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call) Return(_a0 uint64 return _c } -func (_c *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call) RunAndReturn(run func(context.Context, ClaimType) (uint64, error)) *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call { +func (_c *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call) RunAndReturn(run func(context.Context, claimsynctypes.ClaimType) (uint64, error)) *BridgeQuerierMock_GetBoundaryBlockForClaimType_Call { _c.Call.Return(run) return _c } diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 7edf9955f..a0258b9e9 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -14,6 +14,7 @@ import ( bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync/migrations" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/db/compatibility" @@ -107,11 +108,6 @@ const ( o.block_timestamp, o.type` - // claimsByGERSQL is the query used by GetClaimsByGER. - claimsByGERSQL = "SELECT " + claimColumnsSQL + - " FROM claim WHERE global_exit_root = $1 AND type = $2" + - " ORDER BY block_num ASC, block_pos ASC" - // bridgeByDepositCountSQL is the query used by GetBridgeByDepositCount for the main bridge table. // deposit_count is a unique monotonic counter per bridge event in the contract, so no // additional origin_network filter is needed (it would incorrectly exclude L2-native tokens). @@ -233,179 +229,6 @@ func (b *Bridge) Hash() common.Hash { ) } -type ClaimType string - -const ( - ClaimEvent ClaimType = "ClaimEvent" - DetailedClaimEvent ClaimType = "DetailedClaimEvent" -) - -// Claim representation of a claim event -type Claim struct { - BlockNum uint64 `meddler:"block_num"` - BlockPos uint64 `meddler:"block_pos"` - TxHash common.Hash `meddler:"tx_hash,hash"` - GlobalIndex *big.Int `meddler:"global_index,bigint"` - OriginNetwork uint32 `meddler:"origin_network"` - OriginAddress common.Address `meddler:"origin_address"` - DestinationAddress common.Address `meddler:"destination_address"` - Amount *big.Int `meddler:"amount,bigint"` - ProofLocalExitRoot types.Proof `meddler:"proof_local_exit_root,merkleproof"` - ProofRollupExitRoot types.Proof `meddler:"proof_rollup_exit_root,merkleproof"` - MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` - DestinationNetwork uint32 `meddler:"destination_network"` - Metadata []byte `meddler:"metadata"` - IsMessage bool `meddler:"is_message"` - BlockTimestamp uint64 `meddler:"block_timestamp"` - Type ClaimType `meddler:"type"` -} - -func (c *Claim) String() string { - globalIndexStr := nilStr - if c.GlobalIndex != nil { - globalIndexStr = c.GlobalIndex.String() - } - - amountStr := nilStr - if c.Amount != nil { - amountStr = c.Amount.String() - } - - return fmt.Sprintf("Claim{BlockNum: %d, BlockPos: %d, TxHash: %s, GlobalIndex: %s, "+ - "OriginNetwork: %d, OriginAddress: %s, DestinationAddress: %s, Amount: %s, "+ - "ProofLocalExitRoot: %v, ProofRollupExitRoot: %v, MainnetExitRoot: %s, "+ - "RollupExitRoot: %s, GlobalExitRoot: %s, DestinationNetwork: %d, Metadata: %x, "+ - "IsMessage: %t, BlockTimestamp: %d, Type: %s}", - c.BlockNum, c.BlockPos, c.TxHash.String(), globalIndexStr, - c.OriginNetwork, c.OriginAddress.String(), c.DestinationAddress.String(), amountStr, - c.ProofLocalExitRoot.String(), c.ProofRollupExitRoot.String(), c.MainnetExitRoot.String(), - c.RollupExitRoot.String(), c.GlobalExitRoot.String(), c.DestinationNetwork, c.Metadata, - c.IsMessage, c.BlockTimestamp, c.Type) -} - -// decodeEtrogCalldata decodes claim calldata for Etrog fork -func (c *Claim) decodeEtrogCalldata(data []any) (bool, error) { - // Unpack method inputs. Note that both claimAsset and claimMessage have the same interface - // for the relevant parts - // claimAsset/claimMessage( - // 0: smtProofLocalExitRoot, - // 1: smtProofRollupExitRoot, - // 2: globalIndex, - // 3: mainnetExitRoot, - // 4: rollupExitRoot, - // 5: originNetwork, - // 6: originTokenAddress/originAddress, - // 7: destinationNetwork, - // 8: destinationAddress, - // 9: amount, - // 10: metadata, - // ) - - actualGlobalIndex, ok := data[2].(*big.Int) - if !ok { - return false, fmt.Errorf("unexpected type for actualGlobalIndex, expected *big.Int got '%T'", data[2]) - } - if actualGlobalIndex.Cmp(c.GlobalIndex) != 0 { - // not the claim we're looking for - return false, nil - } - - rawLERProof, ok := data[0].([types.DefaultHeight][common.HashLength]byte) - if !ok { - return false, fmt.Errorf("unexpected type for rawLERProof, expected [32][32]byte got '%T'", data[0]) - } - - rawRERProof, ok := data[1].([types.DefaultHeight][common.HashLength]byte) - if !ok { - return false, fmt.Errorf("unexpected type for rawRERProof, expected [32][32]byte got '%T'", data[1]) - } - - c.ProofLocalExitRoot = types.NewProof(rawLERProof) - c.ProofRollupExitRoot = types.NewProof(rawRERProof) - - c.MainnetExitRoot, ok = data[3].([common.HashLength]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'MainnetExitRoot'. Expected '[32]byte', got '%T'", data[3]) - } - - c.RollupExitRoot, ok = data[4].([common.HashLength]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'RollupExitRoot'. Expected '[32]byte', got '%T'", data[4]) - } - - c.DestinationNetwork, ok = data[7].(uint32) - if !ok { - return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[7]) - } - - c.Metadata, ok = data[10].([]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'claim Metadata'. Expected '[]byte', got '%T'", data[10]) - } - - c.GlobalExitRoot = crypto.Keccak256Hash(c.MainnetExitRoot.Bytes(), c.RollupExitRoot.Bytes()) - - return true, nil -} - -// decodePreEtrogCalldata decodes the claim calldata for pre-Etrog forks -func (c *Claim) decodePreEtrogCalldata(data []any) (bool, error) { - // claimMessage/claimAsset( - // 0: bytes32[32] smtProof, - // 1: uint32 index, - // 2: bytes32 mainnetExitRoot, - // 3: bytes32 rollupExitRoot, - // 4: uint32 originNetwork, - // 5: address originTokenAddress, - // 6: uint32 destinationNetwork, - // 7: address destinationAddress, - // 8: uint256 amount, - // 9: bytes metadata - // ) - actualGlobalIndex, ok := data[1].(uint32) - if !ok { - return false, fmt.Errorf("unexpected type for actualGlobalIndex, expected uint32 got '%T'", data[1]) - } - - if new(big.Int).SetUint64(uint64(actualGlobalIndex)).Cmp(c.GlobalIndex) != 0 { - // not the claim we're looking for - return false, nil - } - - rawLERProof, ok := data[0].([types.DefaultHeight][common.HashLength]byte) - if !ok { - return false, fmt.Errorf("unexpected type for proofLERBytes, expected [32][32]byte got '%T'", data[0]) - } - - c.ProofLocalExitRoot = types.NewProof(rawLERProof) - - c.MainnetExitRoot, ok = data[2].([common.HashLength]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'MainnetExitRoot'. Expected '[32]byte', got '%T'", data[2]) - } - - c.RollupExitRoot, ok = data[3].([common.HashLength]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'RollupExitRoot'. Expected '[32]byte', got '%T'", data[3]) - } - - c.DestinationNetwork, ok = data[6].(uint32) - if !ok { - return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[6]) - } - - c.Metadata, ok = data[9].([]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'Metadata'. Expected '[]byte', got '%T'", data[9]) - } - - c.GlobalExitRoot = crypto.Keccak256Hash(c.MainnetExitRoot.Bytes(), c.RollupExitRoot.Bytes()) - - return true, nil -} - // TokenMapping representation of a NewWrappedToken event, that is emitted by the bridge contract type TokenMapping struct { BlockNum uint64 `meddler:"block_num"` @@ -471,50 +294,6 @@ func (r *RemoveLegacyToken) String() string { r.LegacyTokenAddress.String()) } -// UnsetClaim representation of an UpdatedUnsetGlobalIndexHashChain event, -// that is emitted by the bridge contract when a claim is unset. -type UnsetClaim struct { - BlockNum uint64 `meddler:"block_num"` - BlockPos uint64 `meddler:"block_pos"` - TxHash common.Hash `meddler:"tx_hash,hash"` - GlobalIndex *big.Int `meddler:"global_index,bigint"` - UnsetGlobalIndexHashChain common.Hash `meddler:"unset_global_index_hash_chain,hash"` - CreatedAt uint64 `meddler:"created_at"` -} - -func (u *UnsetClaim) String() string { - globalIndexStr := nilStr - if u.GlobalIndex != nil { - globalIndexStr = u.GlobalIndex.String() - } - - return fmt.Sprintf("UnsetClaim{BlockNum: %d, BlockPos: %d, TxHash: %s, "+ - "GlobalIndex: %s, UnsetGlobalIndexHashChain: %s, CreatedAt: %d}", - u.BlockNum, u.BlockPos, u.TxHash.String(), - globalIndexStr, u.UnsetGlobalIndexHashChain.String(), u.CreatedAt) -} - -// SetClaim representation of a SetClaim event, -// that is emitted by the L2 bridge contract when a claim is set. -type SetClaim struct { - BlockNum uint64 `meddler:"block_num"` - BlockPos uint64 `meddler:"block_pos"` - TxHash common.Hash `meddler:"tx_hash,hash"` - GlobalIndex *big.Int `meddler:"global_index,bigint"` - CreatedAt uint64 `meddler:"created_at"` -} - -func (s *SetClaim) String() string { - globalIndexStr := nilStr - if s.GlobalIndex != nil { - globalIndexStr = s.GlobalIndex.String() - } - return fmt.Sprintf("SetClaim{BlockNum: %d, BlockPos: %d, TxHash: %s, "+ - "GlobalIndex: %s, CreatedAt: %d}", - s.BlockNum, s.BlockPos, s.TxHash.String(), - globalIndexStr, s.CreatedAt) -} - // BackwardLET representation of a BackwardLET event, // that is emitted by the L2 bridge contract when a LET is rolled back. type BackwardLET struct { @@ -579,15 +358,17 @@ func (f *ForwardLET) String() string { // Event combination of bridge, claim, token mapping and legacy token migration events type Event struct { - Bridge *Bridge - Claim *Claim + Bridge *Bridge + TokenMapping *TokenMapping LegacyTokenMigration *LegacyTokenMigration RemoveLegacyToken *RemoveLegacyToken - UnsetClaim *UnsetClaim - SetClaim *SetClaim BackwardLET *BackwardLET ForwardLET *ForwardLET + //Claim *Claim + //UnsetClaim *UnsetClaim + //SetClaim *SetClaim + } func (e Event) String() string { @@ -595,9 +376,6 @@ func (e Event) String() string { if e.Bridge != nil { parts = append(parts, e.Bridge.String()) } - if e.Claim != nil { - parts = append(parts, e.Claim.String()) - } if e.TokenMapping != nil { parts = append(parts, e.TokenMapping.String()) } @@ -607,19 +385,13 @@ func (e Event) String() string { if e.RemoveLegacyToken != nil { parts = append(parts, e.RemoveLegacyToken.String()) } - if e.UnsetClaim != nil { - parts = append(parts, e.UnsetClaim.String()) - } - if e.SetClaim != nil { - parts = append(parts, e.SetClaim.String()) - } if e.BackwardLET != nil { parts = append(parts, e.BackwardLET.String()) } if e.ForwardLET != nil { parts = append(parts, e.ForwardLET.String()) } - return "Event{" + strings.Join(parts, ", ") + "}" + return "bridgesync.Event{" + strings.Join(parts, ", ") + "}" } // BridgeSyncRuntimeData contains runtime environment data used for database compatibility checks. @@ -701,28 +473,6 @@ func (b BridgeSyncRuntimeData) IsCompatible(storage BridgeSyncRuntimeData) (*Bri return nil, nil } -type BridgeQuerier interface { - GetBoundaryBlockForClaimType(ctx context.Context, claimType ClaimType) (uint64, error) -} - -var _ BridgeQuerier = (*processor)(nil) - -// ClaimsSyncProcessor handles storage of claim-related events within a bridgesync transaction. -// Pass an implementation (e.g. from claimsync.NewEmbedded) to bridgesync to delegate claim -// storage and event parsing to claimsync, keeping the two components in sync atomically. -type ClaimsSyncProcessor interface { - // ProcessBlockWithTx stores Claim, UnsetClaim and SetClaim events using an existing tx. - // Bridgesync calls this from ProcessBlock, reusing its own tx so no new tx is needed. - // insertBlock must be false when bridgesync already inserted the block row. - ProcessBlockWithTx(ctx context.Context, tx dbtypes.Querier, block *sync.Block, insertBlock bool) error - // ReorgWithTx deletes claim data for all blocks >= firstReorgedBlock using the provided tx. - // The caller is responsible for commit and rollback. - ReorgWithTx(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) - // BuildAppender returns the LogAppenderMap for claim-related log events. - // Bridgesync merges this into its own appender so claimsync's handlers are used. - BuildAppender() sync.LogAppenderMap -} - type processor struct { syncerID string db *sql.DB @@ -734,17 +484,12 @@ type processor struct { dbQueryTimeout time.Duration bridgeSubscriber aggkitcommon.PubSub[uint64] initialLER common.Hash - claimEventsProcessor ClaimsSyncProcessor + claimEventsProcessor claimsynctypes.EmbeddedProcessor + compatibility.CompatibilityDataStorager[BridgeSyncRuntimeData] } -func newProcessor( - dbPath string, - syncerID string, - logger *log.Logger, - dbQueryTimeout time.Duration, - claimEventsProcessor ClaimsSyncProcessor, -) (*processor, error) { +func newSqliteDB(dbPath string) (*sql.DB, error) { err := migrations.RunMigrations(dbPath) if err != nil { return nil, err @@ -753,7 +498,16 @@ func newProcessor( if err != nil { return nil, err } + return database, nil +} +func newProcessor( + database *sql.DB, + syncerID string, + logger *log.Logger, + dbQueryTimeout time.Duration, + claimEventsProcessor claimsynctypes.EmbeddedProcessor, +) (*processor, error) { exitTree := tree.NewAppendOnlyTree(database, "") return &processor{ @@ -804,165 +558,6 @@ func (p *processor) GetBridges( return bridges, nil } -func (p *processor) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]Claim, error) { - // SQL query with compaction logic implementing three cases: - // Case 1: If unset_claim exists for a global_index, return all claims in range uncompacted - // Case 2: If no unset_claim exists and globally oldest is in range, return compacted claim - // Case 3: If globally oldest is outside range and no unset_claim exists, return nothing - query := fmt.Sprintf(` - WITH all_claims_ranked AS ( - SELECT - *, - ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num ASC, block_pos ASC) AS rn_oldest_global, - ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num DESC, block_pos DESC) AS rn_newest_global - FROM claim - ), - claims_in_range AS ( - SELECT * - FROM all_claims_ranked - WHERE block_num >= $1 AND block_num <= $2 - ), - claims_with_unset AS ( - -- Case 1: Return all claims in range if unset_claim exists (no compaction) - SELECT - c.%s - FROM claims_in_range c - WHERE EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = c.global_index - ) - ), - compactable_claims AS ( - -- Case 2 & 3: Handle claims without unset_claim - SELECT - %s - FROM claims_in_range o - JOIN claims_in_range n ON o.global_index = n.global_index AND n.rn_newest_global = 1 - WHERE o.rn_oldest_global = 1 -- Globally oldest claim must be in range - AND NOT EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = o.global_index - ) - ) - SELECT * FROM claims_with_unset - UNION ALL - SELECT * FROM compactable_claims - ORDER BY block_num ASC, block_pos ASC; -`, claimColumnsSQL, compactedClaimsSelectSQL) - - rows, err := p.queryBlockRange(ctx, p.db, fromBlock, toBlock, query) - if err != nil { - if errors.Is(err, db.ErrNotFound) { - p.log.Debugf("no claims were found for block range [%d..%d]", fromBlock, toBlock) - return []Claim{}, nil - } - p.log.Errorf("GetClaims: queryBlockRange failed for block range [%d..%d]: %v", fromBlock, toBlock, err) - return nil, err - } - - defer func() { - if cerr := rows.Close(); cerr != nil { - p.log.Errorf("error closing rows: %v", cerr) - } - }() - - claimPtrs := []*Claim{} - if err = meddler.ScanAll(rows, &claimPtrs); err != nil { - p.log.Errorf("GetClaims: meddler.ScanAll failed for block range [%d..%d]: %v", fromBlock, toBlock, err) - return nil, err - } - claimsIface := db.SlicePtrsToSlice(claimPtrs) - claims, ok := claimsIface.([]Claim) - if !ok { - p.log.Errorf("GetClaims: failed to convert from []*Claim to []Claim for block range [%d..%d]", fromBlock, toBlock) - return nil, errFailToConvertClaims - } - return claims, nil -} - -func (p *processor) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]Claim, error) { - if globalIndex == nil { - return nil, fmt.Errorf("global index parameter cannot be nil") - } - - // SQL query with compaction logic implementing three cases: - // Case 1: If unset_claim exists for the global_index, return all claims uncompacted - // Case 2: If no unset_claim exists, return compacted claim (oldest metadata + newest proofs) - // Case 3: Same as case 2 (all claims for this global_index are considered "in range") - query := fmt.Sprintf(` - WITH all_claims_for_index AS ( - SELECT - *, - ROW_NUMBER() OVER (ORDER BY block_num ASC, block_pos ASC) AS rn_oldest, - ROW_NUMBER() OVER (ORDER BY block_num DESC, block_pos DESC) AS rn_newest - FROM claim - WHERE global_index = $1 - ), - claims_with_unset AS ( - -- Case 1: Return all claims if unset_claim exists (no compaction) - SELECT - c.%s - FROM all_claims_for_index c - WHERE EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = $1 - ) - ), - compactable_claims AS ( - -- Case 2: Handle claims without unset_claim (compact) - SELECT - %s - FROM all_claims_for_index o - JOIN all_claims_for_index n ON n.rn_newest = 1 - WHERE o.rn_oldest = 1 - AND NOT EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = $1 - ) - ) - SELECT * FROM claims_with_unset - UNION ALL - SELECT * FROM compactable_claims - ORDER BY block_num ASC, block_pos ASC; -`, claimColumnsSQL, compactedClaimsSelectSQL) - - // Create a context with database timeout - dbCtx, cancel := p.withDatabaseTimeout(ctx) - defer cancel() - - rows, err := p.db.QueryContext(dbCtx, query, globalIndex.String()) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - p.log.Debugf("no claims were found for global index: %s", globalIndex.String()) - return []Claim{}, nil - } - p.log.Errorf("GetClaimsByGlobalIndex: query failed for global index %s: %v", globalIndex.String(), err) - return nil, fmt.Errorf("failed to query claims by global index: %s: %w", globalIndex.String(), err) - } - - defer func() { - if cerr := rows.Close(); cerr != nil { - p.log.Errorf("error closing rows: %v", cerr) - } - }() - - claimPtrs := []*Claim{} - if err = meddler.ScanAll(rows, &claimPtrs); err != nil { - p.log.Errorf("GetClaimsByGlobalIndex: meddler.ScanAll failed for global index %s: %v", globalIndex.String(), err) - return nil, fmt.Errorf("failed to scan claims for global index: %s: %w", globalIndex.String(), err) - } - - claimsIface := db.SlicePtrsToSlice(claimPtrs) - claims, ok := claimsIface.([]Claim) - if !ok { - p.log.Errorf("GetClaimsByGlobalIndex: failed to convert from []*Claim to []Claim for global index: %s", - globalIndex.String()) - return nil, errFailToConvertClaims - } - - return claims, nil -} - func (p *processor) GetBridgesPaged( ctx context.Context, pageNumber, pageSize uint32, depositCount *uint64, networkIDs []uint32, fromAddress string, @@ -1053,118 +648,8 @@ func (p *processor) buildBridgesFilterClause(depositCount *uint64, networkIDs [] return "", nil } -func (p *processor) GetClaimsPaged( - ctx context.Context, pageNumber, pageSize uint32, - networkIDs []uint32, globalIndex *big.Int, -) ([]*Claim, int, error) { - whereClause := p.buildClaimsFilterClause(networkIDs, globalIndex) - claimsCount, err := p.getCompactedClaimsCount(ctx, whereClause) - if err != nil { - return nil, 0, err - } - - if claimsCount == 0 { - return []*Claim{}, 0, nil - } - - offset, err := p.calculateOffset(pageNumber, pageSize, claimsCount, "claims") - if err != nil { - return nil, 0, err - } - - // Create a context with database timeout - dbCtx, cancel := p.withDatabaseTimeout(ctx) - defer cancel() - - // Pagination query with compaction logic implementing three cases: - // Case 1: If unset_claim exists for a global_index, return all claims on page uncompacted - // Case 2: If no unset_claim exists and globally oldest is on page, return compacted claim - // Case 3: If globally oldest is outside page and no unset_claim exists, exclude from results - // - // This query: - // - Gets claims for the requested page (DESC order: newest first) - // - Ranks all claims globally by global_index to find oldest and newest - // - For claims with unset_claim: returns all instances on the page uncompacted - // - For claims without unset_claim: only returns compacted version if newest is on page - //nolint:gosec - query := fmt.Sprintf(` - WITH page_claims AS ( - SELECT * - FROM claim - %s - ORDER BY block_num DESC, block_pos DESC - LIMIT $1 OFFSET $2 - ), - all_claims_ranked AS ( - SELECT - *, - ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num ASC, block_pos ASC) AS rn_oldest_global, - ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num DESC, block_pos DESC) AS rn_newest_global - FROM claim - %s - ), - claims_with_unset_on_page AS ( - -- Case 1: Return all claims on page if unset_claim exists (no compaction) - SELECT - pc.%s - FROM page_claims pc - WHERE EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = pc.global_index - ) - ), - newest_on_page AS ( - SELECT DISTINCT pc.global_index - FROM page_claims pc - JOIN all_claims_ranked acr ON pc.global_index = acr.global_index AND acr.rn_newest_global = 1 - WHERE pc.block_num = acr.block_num AND pc.block_pos = acr.block_pos - AND NOT EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = pc.global_index - ) - ), - compactable_claims AS ( - -- Case 2 & 3: Handle claims without unset_claim - SELECT - %s - FROM all_claims_ranked o - JOIN all_claims_ranked n ON o.global_index = n.global_index AND n.rn_newest_global = 1 - WHERE o.rn_oldest_global = 1 -- Globally oldest claim - AND o.global_index IN (SELECT global_index FROM newest_on_page) - ) - SELECT * FROM claims_with_unset_on_page - UNION ALL - SELECT * FROM compactable_claims - ORDER BY block_num DESC, block_pos DESC; - `, whereClause, whereClause, claimColumnsSQL, compactedClaimsSelectSQL) - - rows, err := p.db.QueryContext(dbCtx, query, pageSize, offset) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - p.log.Debugf("no claims were found for provided parameters (pageNumber=%d, pageSize=%d)", - pageNumber, pageSize) - return nil, claimsCount, nil - } - p.log.Errorf("GetClaimsPaged: queryPaged failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) - return nil, 0, err - } - defer func() { - if cerr := rows.Close(); cerr != nil { - p.log.Errorf("error closing rows: %v", cerr) - } - }() - - claims := []*Claim{} - if err = meddler.ScanAll(rows, &claims); err != nil { - p.log.Errorf("GetClaimsPaged: meddler.ScanAll failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) - return nil, 0, err - } - - return claims, claimsCount, nil -} - // GetBoundaryBlockForClaimType returns the max (latest) block number for a given claim type -func (p *processor) GetBoundaryBlockForClaimType(ctx context.Context, claimType ClaimType) (uint64, error) { +func (p *processor) GetBoundaryBlockForClaimType(ctx context.Context, claimType claimsynctypes.ClaimType) (uint64, error) { dbCtx, cancel := p.withDatabaseTimeout(ctx) defer cancel() @@ -1182,54 +667,6 @@ func (p *processor) GetBoundaryBlockForClaimType(ctx context.Context, claimType return *blockNumber, nil } -// GetUnsetClaimsPaged returns a paginated list of unset claims -// -//nolint:dupl -func (p *processor) GetUnsetClaimsPaged( - ctx context.Context, pageNumber, pageSize uint32, - globalIndex *big.Int, -) ([]*UnsetClaim, int, error) { - whereClause := buildGlobalIndexFilterClause(globalIndex) - unclaimsCount, err := p.GetTotalNumberOfRecords(ctx, unsetClaimTableName, whereClause) - if err != nil { - return nil, 0, err - } - - if unclaimsCount == 0 { - return []*UnsetClaim{}, 0, nil - } - - offset, err := p.calculateOffset(pageNumber, pageSize, unclaimsCount, unsetClaimTableName) - if err != nil { - return nil, 0, err - } - - rows, err := p.queryPaged(ctx, p.db, offset, pageSize, unsetClaimTableName, orderByBlockDesc, whereClause) - if err != nil { - if errors.Is(err, db.ErrNotFound) { - p.log.Debugf("no unset claims were found for provided parameters (pageNumber=%d, pageSize=%d)", - pageNumber, pageSize) - return nil, unclaimsCount, nil - } - p.log.Errorf("GetUnsetClaimsPaged: queryPaged failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) - return nil, 0, err - } - defer func() { - if cerr := rows.Close(); cerr != nil { - p.log.Errorf("error closing rows: %v", cerr) - } - }() - - unsetClaims := []*UnsetClaim{} - if err = meddler.ScanAll(rows, &unsetClaims); err != nil { - p.log.Errorf("GetUnsetClaimsPaged: meddler.ScanAll failed for pageNumber=%d, pageSize=%d: %v", - pageNumber, pageSize, err) - return nil, 0, err - } - - return unsetClaims, unclaimsCount, nil -} - // buildGlobalIndexFilterClause builds a WHERE clause for filtering by global_index func buildGlobalIndexFilterClause(globalIndex *big.Int) string { if globalIndex != nil { @@ -1239,75 +676,6 @@ func buildGlobalIndexFilterClause(globalIndex *big.Int) string { return "" } -// GetSetClaimsPaged returns a paginated list of set claims -// -//nolint:dupl -func (p *processor) GetSetClaimsPaged( - ctx context.Context, pageNumber, pageSize uint32, - globalIndex *big.Int, -) ([]*SetClaim, int, error) { - whereClause := buildGlobalIndexFilterClause(globalIndex) - setClaimsCount, err := p.GetTotalNumberOfRecords(ctx, setClaimTableName, whereClause) - if err != nil { - return nil, 0, err - } - - if setClaimsCount == 0 { - return []*SetClaim{}, 0, nil - } - - offset, err := p.calculateOffset(pageNumber, pageSize, setClaimsCount, setClaimTableName) - if err != nil { - return nil, 0, err - } - - rows, err := p.queryPaged(ctx, p.db, offset, pageSize, setClaimTableName, orderByBlockDesc, whereClause) - if err != nil { - if errors.Is(err, db.ErrNotFound) { - p.log.Debugf("no set claims were found for provided parameters (pageNumber=%d, pageSize=%d)", - pageNumber, pageSize) - return nil, setClaimsCount, nil - } - p.log.Errorf("GetSetClaimsPaged: queryPaged failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) - return nil, 0, err - } - defer func() { - if cerr := rows.Close(); cerr != nil { - p.log.Errorf("error closing rows: %v", cerr) - } - }() - - setClaims := []*SetClaim{} - if err = meddler.ScanAll(rows, &setClaims); err != nil { - p.log.Errorf("GetSetClaimsPaged: meddler.ScanAll failed for pageNumber=%d, pageSize=%d: %v", - pageNumber, pageSize, err) - return nil, 0, err - } - - return setClaims, setClaimsCount, nil -} - -// buildClaimsFilterClause builds the WHERE clause for the claims table -// based on the provided networkIDs and globalIndex -func (p *processor) buildClaimsFilterClause(networkIDs []uint32, globalIndex *big.Int) string { - const clauseCapacity = 2 - clauses := make([]string, 0, clauseCapacity) - if len(networkIDs) > 0 { - clauses = append(clauses, buildNetworkIDsFilter(networkIDs, "origin_network")) - } - - if globalIndex != nil { - clauses = append(clauses, - fmt.Sprintf("global_index = '%s'", globalIndex.String()), - ) - } - - if len(clauses) > 0 { - return " WHERE " + strings.Join(clauses, " AND ") - } - return "" -} - // buildTokenMappingsFilterClause builds the WHERE clause for the token_mapping table // based on the provided originTokenAddress func (p *processor) buildTokenMappingsFilterClause(originTokenAddress string) string { @@ -1442,11 +810,11 @@ func (p *processor) queryPagedWithParams(ctx context.Context, tx dbtypes.Querier // GetLastProcessedBlock returns the last processed block by the processor, including blocks // that don't have events -func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { +func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { return p.getLastProcessedBlockWithTx(ctx, p.db) } -func (p *processor) getLastProcessedBlockWithTx(ctx context.Context, tx dbtypes.Querier) (uint64, error) { +func (p *processor) getLastProcessedBlockWithTx(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) { var lastProcessedBlockNum uint64 // Create a context with database timeout @@ -1456,9 +824,9 @@ func (p *processor) getLastProcessedBlockWithTx(ctx context.Context, tx dbtypes. row := tx.QueryRowContext(dbCtx, "SELECT num FROM block ORDER BY num DESC LIMIT 1;") err := row.Scan(&lastProcessedBlockNum) if errors.Is(err, sql.ErrNoRows) { - return 0, nil + return 0, false, nil } - return lastProcessedBlockNum, err + return lastProcessedBlockNum, err == nil, err } // Reorg triggers a purge and reset process on the processor to leaf it on a state @@ -1668,12 +1036,23 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { var blockPos *uint64 var hasAnyBridge bool - var claimEvents []Event for _, e := range block.Events { event, ok := e.(Event) if !ok { - p.log.Errorf("failed to convert event to Event type in block %d", block.Num) - return errors.New("failed to convert sync.Block.Event to Event") + // Try to process with embedded claimProcessor + if p.claimEventsProcessor != nil { + if err := p.claimEventsProcessor.ProcessBlockWithTx(dbCtx, tx, block, e); err != nil { + p.log.Errorf("ProcessBlock: failed to process event type %T using embedded claimProcessor in block %d: %v", + e, + block.Num, err) + return err + } + // It have been processed by embedded claimProcessor, do next item in loop + continue + } + err = fmt.Errorf("ProcessBlock: failed to convert event %T to Event type in block %d", e, block.Num) + p.log.Errorf(err.Error()) + return err } if event.Bridge != nil { @@ -1701,15 +1080,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { // Mark that this block has at least one bridge hasAnyBridge = true } - // TODO: remove - if event.Claim != nil { - if p.claimEventsProcessor != nil { - claimEvents = append(claimEvents, event) - } else if err = meddler.Insert(tx, claimTableName, event.Claim); err != nil { - p.log.Errorf("failed to insert claim event at block %d: %v", block.Num, err) - return err - } - } if event.TokenMapping != nil { if err = meddler.Insert(tx, tokenMappingTableName, event.TokenMapping); err != nil { @@ -1732,24 +1102,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } } - // TODO: remove - if event.UnsetClaim != nil { - if p.claimEventsProcessor != nil { - claimEvents = append(claimEvents, event) - } else if err = meddler.Insert(tx, unsetClaimTableName, event.UnsetClaim); err != nil { - p.log.Errorf("failed to insert unset claim event at block %d: %v", block.Num, err) - return err - } - } - // TODO: remove - if event.SetClaim != nil { - if p.claimEventsProcessor != nil { - claimEvents = append(claimEvents, event) - } else if err = meddler.Insert(tx, setClaimTableName, event.SetClaim); err != nil { - p.log.Errorf("failed to insert set claim event at block %d: %v", block.Num, err) - return err - } - } if event.BackwardLET != nil { if err := p.insertBackwardLET(ctx, tx, block.Num, event.BackwardLET); err != nil { @@ -1766,13 +1118,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { blockPos = &newBlockPos } - if p.claimEventsProcessor != nil { - // ProcessBlock(ctx context.Context, block sync.Block) - if err := p.claimEventsProcessor.ProcessBlockWithTx(ctx, tx, &block, false); err != nil { - p.log.Errorf("failed to process claim events for block %d: %v", block.Num, err) - return err - } - } } if err := tx.Commit(); err != nil { p.log.Errorf("failed to commit db transaction (block number %d): %v", block.Num, err) @@ -2194,73 +1539,6 @@ func (p *processor) fetchTokenMappings(ctx context.Context, pageSize uint32, off return tokenMappings, nil } -// getCompactedClaimsCount returns the count of claims with compaction logic applied. -// - If unset_claim exists for a global_index, count all claims with that global_index -// - If no unset_claim exists, count only one per global_index (compacted) -// The count represents the total across all pages, matching what would be returned -// if all pages were queried. -func (p *processor) getCompactedClaimsCount(ctx context.Context, whereClause string) (int, error) { - // Create a context with database timeout - dbCtx, cancel := p.withDatabaseTimeout(ctx) - defer cancel() - - // Count query with compaction logic matching GetClaimsPaged: - // 1. Count all claims with unset_claim (no compaction, all returned) - // 2. Count distinct global_index for claims without unset_claim (compacted, one per global_index) - //nolint:gosec - query := fmt.Sprintf(` - WITH filtered_claims AS ( - SELECT * FROM claim %s - ) - SELECT - (SELECT COUNT(*) FROM filtered_claims - WHERE EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = filtered_claims.global_index - )) + - (SELECT COUNT(DISTINCT global_index) FROM filtered_claims - WHERE NOT EXISTS ( - SELECT 1 FROM unset_claim uc - WHERE uc.global_index = filtered_claims.global_index - )) AS total_count; - `, whereClause) - - count := 0 - err := p.db.QueryRowContext(dbCtx, query).Scan(&count) - if err != nil { - return 0, err - } - - return count, nil -} - -// GetClaimsByGER returns all DetailedClaimEvent claims with the given global exit root, -// ordered by block_num/block_pos ascending. If the claim table does not exist (e.g. L1 -// processor), returns nil, nil gracefully. -func (p *processor) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) { - dbCtx, cancel := p.withDatabaseTimeout(ctx) - defer cancel() - - rows, err := p.db.QueryContext(dbCtx, claimsByGERSQL, globalExitRoot.Hex(), DetailedClaimEvent) - if err != nil { - if strings.Contains(err.Error(), "no such table") { - return nil, nil - } - return nil, fmt.Errorf("GetClaimsByGER: %w", err) - } - defer func() { - if cerr := rows.Close(); cerr != nil { - p.log.Errorf("error closing rows: %v", cerr) - } - }() - - claims := []*Claim{} - if err = meddler.ScanAll(rows, &claims); err != nil { - return nil, fmt.Errorf("GetClaimsByGER: scan: %w", err) - } - return claims, nil -} - // GetBridgeByDepositCount returns the bridge with the given deposit count from the bridge table, // falling back to bridge_archive if not found. Returns db.ErrNotFound if absent in both tables. func (p *processor) GetBridgeByDepositCount(ctx context.Context, depositCount uint32) (*Bridge, error) { @@ -2363,15 +1641,6 @@ func (p *processor) GetBridgesByContent( return result, nil } -// buildNetworkIDsFilter builds SQL filter for the given network IDs -func buildNetworkIDsFilter(networkIDs []uint32, networkIDColumn string) string { - placeholders := make([]string, len(networkIDs)) - for i, id := range networkIDs { - placeholders[i] = fmt.Sprintf("%d", id) - } - return fmt.Sprintf("%s IN (%s)", networkIDColumn, strings.Join(placeholders, ", ")) -} - // GenerateGlobalIndexForNetworkID builds the "global index" used to identify bridges and claims. func GenerateGlobalIndexForNetworkID(networkID uint32, depositCount uint32) *big.Int { rollupIndex := uint32(0) diff --git a/bridgesync/types/types.go b/bridgesync/types/types.go index ae839f6ee..5aca6224b 100644 --- a/bridgesync/types/types.go +++ b/bridgesync/types/types.go @@ -2,7 +2,6 @@ package types import ( "fmt" - "math/big" "strings" "github.com/ethereum/go-ethereum/common" @@ -10,12 +9,6 @@ import ( var EmptyLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") -type Unclaim struct { - GlobalIndex *big.Int `json:"global_index"` - BlockNumber uint64 `json:"block_number"` - LogIndex uint64 `json:"log_index"` -} - const ( LeafTypeAsset LeafType = iota LeafTypeMessage diff --git a/claimsync/claim_data.go b/claimsync/claim_data.go new file mode 100644 index 000000000..158f85746 --- /dev/null +++ b/claimsync/claim_data.go @@ -0,0 +1,22 @@ +package claimsync + +import ( + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" +) + +// ClaimType is an alias for claimsynctypes.ClaimType +type ClaimType = claimsynctypes.ClaimType + +const ( + ClaimEvent ClaimType = claimsynctypes.ClaimEvent + DetailedClaimEvent ClaimType = claimsynctypes.DetailedClaimEvent +) + +// Claim is an alias for claimsynctypes.Claim +type Claim = claimsynctypes.Claim + +// UnsetClaim is an alias for claimsynctypes.UnsetClaim +type UnsetClaim = claimsynctypes.UnsetClaim + +// SetClaim is an alias for claimsynctypes.SetClaim +type SetClaim = claimsynctypes.SetClaim diff --git a/bridgesync/claimcalldata_test.go b/claimsync/claimcalldata_test.go similarity index 99% rename from bridgesync/claimcalldata_test.go rename to claimsync/claimcalldata_test.go index cdc9c40cc..c975b2539 100644 --- a/bridgesync/claimcalldata_test.go +++ b/claimsync/claimcalldata_test.go @@ -1,4 +1,4 @@ -package bridgesync +package claimsync import ( "context" @@ -1103,7 +1103,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) // Use setClaimCalldataFromRoot instead of setClaimCalldata - err = actualClaim.setClaimCalldataFromRoot(rootCall, bridgeAddr, logger) + err = setClaimCalldataFromRoot(&actualClaim, rootCall, bridgeAddr, logger) require.NoError(t, err) require.Equal(t, tc.expectedClaim, actualClaim) }) diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index 04598e952..8f1a06d4b 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -3,10 +3,10 @@ package claimsync import ( "context" "fmt" + "math/big" "time" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" - "github.com/agglayer/aggkit/bridgesync" claimsyncStorage "github.com/agglayer/aggkit/claimsync/storage" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" @@ -22,50 +22,36 @@ const ( defaultDBTimeout = 30 * time.Second ) -// ClaimSyncer is the interface for the claim syncer component used by aggsender. -type ClaimSyncer interface { - Start(ctx context.Context) -} - -// NewFromBridgeSync creates a ClaimSyncer backed by an existing BridgeSync that -// has an embedded claim processor. It returns nil if bs is nil. -func NewFromBridgeSync(bs *bridgesync.BridgeSync) ClaimSyncer { - if bs == nil { - return nil - } - return &bridgeSyncClaimSyncer{bs: bs} -} - -type bridgeSyncClaimSyncer struct { - bs *bridgesync.BridgeSync -} - -func (b *bridgeSyncClaimSyncer) Start(_ context.Context) {} - // ClaimSync is the standalone implementation that independently processes claim events. type ClaimSync struct { - processor *processor - driver *sync.EVMDriver + processor *processor + driver *sync.EVMDriver + reader claimsynctypes.ClaimsReader + ethClient aggkittypes.EthClienter + logger aggkitcommon.Logger + originNetwork uint32 } // NewStandaloneClaimSync creates a standalone ClaimSync that indexes claim events from the bridge contract directly. func NewStandaloneClaimSync( ctx context.Context, - cfg bridgesync.Config, + cfg ConfigStandalone, rd sync.ReorgDetector, ethClient aggkittypes.EthClienter, syncerID claimsynctypes.ClaimSyncerID, + originNetwork uint32, ) (*ClaimSync, error) { logger := log.WithFields("module", syncerID.String()) - return NewClaimSync(ctx, cfg, rd, ethClient, syncerID, logger) + return NewClaimSync(ctx, cfg, rd, ethClient, originNetwork, syncerID, logger) } // NewClaimSync creates a standalone ClaimSync that indexes claim events from the bridge contract directly. func NewClaimSync( ctx context.Context, - cfg bridgesync.Config, + cfg ConfigStandalone, rd sync.ReorgDetector, ethClient aggkittypes.EthClienter, + originNetwork uint32, syncerID claimsynctypes.ClaimSyncerID, logger aggkitcommon.Logger, ) (*ClaimSync, error) { @@ -74,7 +60,7 @@ func NewClaimSync( if dbQueryTimeout == 0 { dbQueryTimeout = defaultDBTimeout } - store, err := claimsyncStorage.NewStandalone(logger, cfg.DBPath, syncerID.String()) + store, err := claimsyncStorage.NewStandalone(logger, cfg.DBPath, syncerID.String(), cfg.DBQueryTimeout.Duration) if err != nil { return nil, fmt.Errorf("claimsync: failed to create storage: %w", err) } @@ -121,20 +107,20 @@ func NewClaimSync( if err != nil { return nil, fmt.Errorf("claimsync: failed to create EVMDownloader: %w", err) } - - lastBlock, err := proc.GetLastProcessedBlock(ctx) - if err != nil { - return nil, fmt.Errorf("claimsync: get last processed block: %w", err) - } - if lastBlock < cfg.InitialBlockNum { - header, err := ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(cfg.InitialBlockNum)) - if err != nil { - return nil, fmt.Errorf("claimsync: get initial block %d: %w", cfg.InitialBlockNum, err) - } - if err := proc.ProcessBlock(ctx, sync.Block{Num: cfg.InitialBlockNum, Hash: header.Hash}); err != nil { - return nil, fmt.Errorf("claimsync: process initial block %d: %w", cfg.InitialBlockNum, err) - } - } + // TODO: Remove + // lastBlock, _, err := proc.GetLastProcessedBlock(ctx) + // if err != nil { + // return nil, fmt.Errorf("claimsync: get last processed block: %w", err) + // } + // if lastBlock < cfg.InitialBlockNum { + // header, err := ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(cfg.InitialBlockNum)) + // if err != nil { + // return nil, fmt.Errorf("claimsync: get initial block %d: %w", cfg.InitialBlockNum, err) + // } + // if err := proc.ProcessBlock(ctx, sync.Block{Num: cfg.InitialBlockNum, Hash: header.Hash}); err != nil { + // return nil, fmt.Errorf("claimsync: process initial block %d: %w", cfg.InitialBlockNum, err) + // } + // } compatibilityChecker := compatibility.NewCompatibilityCheck( cfg.RequireStorageContentCompatibility, @@ -153,12 +139,81 @@ func NewClaimSync( ) return &ClaimSync{ - processor: proc, - driver: driver, + processor: proc, + driver: driver, + reader: store, + ethClient: ethClient, + logger: logger, + originNetwork: originNetwork, }, nil } // Start starts the synchronization process. func (c *ClaimSync) Start(ctx context.Context) { + c.logger.Info("starting claim synchronizer") c.driver.Sync(ctx) } + +// OriginNetwork returns the network ID of the origin chain + +func (c *ClaimSync) OriginNetwork() uint32 { + return c.originNetwork +} + +func (c *ClaimSync) SetNextRequiredBlock(ctx context.Context, blockNumber uint64) error { + lastBlock, found, err := c.processor.GetLastProcessedBlock(ctx) + if err != nil { + return fmt.Errorf("claimsync: failed to get last processed block: %w", err) + } + if !found { + if blockNumber == 0 { + err := fmt.Errorf("claimsync: cannot set next required block to 0, invalid block number") + c.logger.Error(err) + return err + } + if err := c.createStartingPoint(ctx, blockNumber-1); err != nil { + return fmt.Errorf("claimsync: failed to createStartingPoint: %w", err) + } + c.logger.Infof("Set next required block to %d (no processed blocks found)", blockNumber) + return nil + } + firstBlock, _, err := c.processor.GetFirstProcessedBlock(ctx) + if err != nil { + return fmt.Errorf("claimsync: failed to get first processed block: %w", err) + } + if blockNumber <= firstBlock { + return fmt.Errorf("claimsync: cannot set next required block to %d, it must be greater than the first block in DB (%d)", + blockNumber, firstBlock) + } + if blockNumber > lastBlock { + c.logger.Infof("Cannot set next required block to %d because is running,"+ + " last processed block is %d. Distance: %d", blockNumber, lastBlock, + blockNumber-lastBlock) + } + + return nil +} + +func (c *ClaimSync) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { + return c.reader.GetLastProcessedBlock(ctx, nil) +} + +func (c *ClaimSync) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) { + return c.reader.GetClaims(ctx, nil, fromBlock, toBlock) +} + +func (c *ClaimSync) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) { + return c.reader.GetClaimsByGlobalIndex(ctx, nil, globalIndex) +} + +func (c *ClaimSync) createStartingPoint(ctx context.Context, blockNumber uint64) error { + c.logger.Infof("creating starting point at block %d:", blockNumber) + header, err := c.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) + if err != nil { + return fmt.Errorf("claimsync: get header for block %d: %w", blockNumber, err) + } + if err := c.processor.ProcessBlock(ctx, sync.Block{Num: blockNumber, Hash: header.Hash}); err != nil { + return fmt.Errorf("claimsync: process block %d: %w", blockNumber, err) + } + return nil +} diff --git a/claimsync/claimsync_rpc.go b/claimsync/claimsync_rpc.go new file mode 100644 index 000000000..122f66e76 --- /dev/null +++ b/claimsync/claimsync_rpc.go @@ -0,0 +1,115 @@ +package claimsync + +import ( + "context" + "fmt" + "math/big" + + jRPC "github.com/0xPolygon/cdk-rpc/rpc" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/log" +) + +// ClaimSyncer is the interface required by ClaimSyncRPC. +type ClaimSyncer interface { + GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) + GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) + GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) + SetNextRequiredBlock(ctx context.Context, blockNum uint64) error +} + +// ClaimSyncRPC is the RPC interface for the ClaimSync component. +type ClaimSyncRPC struct { + logger aggkitcommon.Logger + claimSync ClaimSyncer +} + +// NewClaimSyncRPC creates a new ClaimSyncRPC. +func NewClaimSyncRPC(logger aggkitcommon.Logger, claimSync ClaimSyncer) *ClaimSyncRPC { + return &ClaimSyncRPC{ + logger: logger, + claimSync: claimSync, + } +} + +// Status returns the sync status of the ClaimSync component. +// curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ +// -d '{"method":"l2claimsync_status", "params":[], "id":1}' +func (r *ClaimSyncRPC) Status() (interface{}, jRPC.Error) { + lastBlock, _, err := r.claimSync.GetLastProcessedBlock(context.Background()) + if err != nil { + return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, + "ClaimSyncRPC.Status: getting last processed block: %v", err) + } + info := struct { + Status string `json:"status"` + LastProcessedBlock uint64 `json:"lastProcessedBlock"` + }{ + Status: "running", + LastProcessedBlock: lastBlock, + } + return info, nil +} + +// GetClaims returns claims indexed between fromBlock and toBlock. +// curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ +// -d '{"method":"l2claimsync_getClaims", "params":[0, 1000], "id":1}' +func (r *ClaimSyncRPC) GetClaims(fromBlock, toBlock uint64) (interface{}, jRPC.Error) { + r.logger.Infof("RPC call: l2claimsync_getClaims(%d, %d)", fromBlock, toBlock) + claims, err := r.claimSync.GetClaims(context.Background(), fromBlock, toBlock) + if err != nil { + return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, + fmt.Sprintf("ClaimSyncRPC.GetClaims: %v", err)) + } + return claims, nil +} + +// GetClaimsByGlobalIndex returns claims for the given global index. +// curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ +// -d '{"method":"l2claimsync_getClaimsByGlobalIndex", "params":["123"], "id":1}' +func (r *ClaimSyncRPC) GetClaimsByGlobalIndex(globalIndexStr string) (interface{}, jRPC.Error) { + r.logger.Infof("RPC call: l2claimsync_getClaimsByGlobalIndex(%s)", globalIndexStr) + globalIndex := new(big.Int) + if _, ok := globalIndex.SetString(globalIndexStr, 10); !ok { + return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, + "ClaimSyncRPC.GetClaimsByGlobalIndex: invalid global index: %s", globalIndexStr) + } + claims, err := r.claimSync.GetClaimsByGlobalIndex(context.Background(), globalIndex) + if err != nil { + return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, + fmt.Sprintf("ClaimSyncRPC.GetClaimsByGlobalIndex: %v", err)) + } + if len(claims) == 0 { + return nil, jRPC.NewRPCError(jRPC.NotFoundErrorCode, + "no claims found for global index %s", globalIndexStr) + } + return claims, nil +} + +// SetNextRequiredBlock sets the next block number that the synchronizer must process. +// curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ +// -d '{"method":"l2claimsync_setNextRequiredBlock", "params":[1000], "id":1}' +func (r *ClaimSyncRPC) SetNextRequiredBlock(blockNum uint64) (interface{}, jRPC.Error) { + r.logger.Infof("RPC call: l2claimsync_setNextRequiredBlock(%d)", blockNum) + if err := r.claimSync.SetNextRequiredBlock(context.Background(), blockNum); err != nil { + return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, + fmt.Sprintf("ClaimSyncRPC.SetNextRequiredBlock: %s", err.Error())) + } + return struct { + Message string `json:"message"` + }{ + Message: fmt.Sprintf("next required block set to %d", blockNum), + }, nil +} + +// GetRPCServices returns the RPC services exposed by ClaimSync. +func (c *ClaimSync) GetRPCServices() []jRPC.Service { + logger := log.WithFields("module", "l2claimsync-rpc") + return []jRPC.Service{ + { + Name: "l2claimsync", + Service: NewClaimSyncRPC(logger, c), + }, + } +} diff --git a/claimsync/claimsync_test.go b/claimsync/claimsync_test.go new file mode 100644 index 000000000..8704805bd --- /dev/null +++ b/claimsync/claimsync_test.go @@ -0,0 +1,96 @@ +package claimsync + +import ( + "context" + "math/big" + "path" + "testing" + "time" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + configtypes "github.com/agglayer/aggkit/config/types" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/reorgdetector" + "github.com/agglayer/aggkit/test/contracts/claimmock" + tree "github.com/agglayer/aggkit/tree/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +// TestClaimSyncerWaitUntilSetNextRequiredBlock verifies the deferred start behavior of ClaimSyncer: +// it must not begin syncing until an explicit starting block is provided via SetNextRequiredBlock. +// +// Steps: +// 1. Spin up a local Geth node and deploy a Claimmock contract simulating the bridge. +// 2. Create a ClaimSyncer and start it in a goroutine. +// 3. Emit a ClaimAsset event on-chain and wait for the tx receipt. +// 4. Assert GetLastProcessedBlock returns found=false — the syncer is idle, waiting for a start signal. +// 5. Call SetNextRequiredBlock(ctx, 1) to unlock the syncer. +// 6. Assert GetLastProcessedBlock returns found=true — the syncer processed the blocks and captured the event. +func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { + ctx, cancelFn := context.WithCancel(context.Background()) + // Setup Docker L1 + client, auth := startGeth(t, ctx, cancelFn) + // Deploy contracts + bridgeAddr, _, bridgeContract, err := claimmock.DeployClaimmock(auth, client) + require.NoError(t, err) + dbPathSyncer := path.Join(t.TempDir(), "claimsyncer.sqlite") + + cfg := ConfigStandalone{ + DBPath: dbPathSyncer, + BlockFinality: aggkittypes.LatestBlock, + InitialBlockNum: 0, + SyncBlockChunkSize: 100, + RetryAfterErrorPeriod: configtypes.NewDuration(time.Second), + WaitForNewBlocksPeriod: configtypes.NewDuration(time.Second), + RequireStorageContentCompatibility: true, + ConfigEmbedded: ConfigEmbedded{ + DBQueryTimeout: configtypes.NewDuration(5 * time.Second), + BridgeAddr: bridgeAddr, + }, + } + logger := log.WithFields("test", "TestClaimSync") + reorgDetector, err := reorgdetector.New(client, reorgdetector.Config{ + DBPath: path.Join(t.TempDir(), "reorgdetector.sqlite"), + CheckReorgsInterval: configtypes.NewDuration(30 * time.Second), + FinalizedBlock: aggkittypes.LatestBlock, + }, reorgdetector.L1) + require.NoError(t, err) + claimSyncer, err := NewClaimSync(ctx, cfg, reorgDetector, client, 0, claimsynctypes.L1ClaimSyncer, logger) + require.NoError(t, err) + go claimSyncer.Start(ctx) + globalIndex := big.NewInt(1) + mainnetExitRoot := common.HexToHash("beef") + rollupExitRoot := common.HexToHash("dead") + tx, err := bridgeContract.ClaimAsset( + auth, + [tree.DefaultHeight][common.HashLength]byte{}, // proofLocal + [tree.DefaultHeight][common.HashLength]byte{}, // proofRollup + globalIndex, + mainnetExitRoot, + rollupExitRoot, + uint32(0), // originNetwork + common.Address{}, // originTokenAddress/originAddress + uint32(0), // destinationNetwork + common.Address{}, // destinationAddress + big.NewInt(0), // amount + []byte("metadata"), // metadata + ) + require.NoError(t, err) + + _, err = waitForReceipt(ctx, client, tx.Hash(), 10) + require.NoError(t, err) + logger.Info("*** ClaimSyncer must be waiting to receive the starting point") + _, found, error := claimSyncer.GetLastProcessedBlock(ctx) + require.NoError(t, error) + require.False(t, found) + logger.Info("*** Setting next required block to 1, so must starting syncing and sync the ClaimAsset") + err = claimSyncer.SetNextRequiredBlock(ctx, 1) + require.NoError(t, error) + time.Sleep(time.Second * 5) + lastBlockProcessed, found, error := claimSyncer.GetLastProcessedBlock(ctx) + require.NoError(t, error) + require.True(t, found) + logger.Infof("*** Last block processed: %d", lastBlockProcessed) +} diff --git a/claimsync/config.go b/claimsync/config.go new file mode 100644 index 000000000..9e41b62f0 --- /dev/null +++ b/claimsync/config.go @@ -0,0 +1,54 @@ +package claimsync + +import ( + "fmt" + + "github.com/agglayer/aggkit/config/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +type ConfigEmbedded struct { + // DBQueryTimeout is the timeout for database operations (queries, transactions) + // This is separate from HTTP timeouts to allow database operations more time when needed + DBQueryTimeout types.Duration `mapstructure:"DBQueryTimeout"` + // BridgeAddr is the address of the bridge smart contract + BridgeAddr common.Address `mapstructure:"BridgeAddr"` +} + +type ConfigStandalone struct { + ConfigEmbedded + // DBPath path of the DB + DBPath string `mapstructure:"DBPath"` + // BlockFinality indicates the status of the blocks that will be queried in order to sync + BlockFinality aggkittypes.BlockNumberFinality `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll + // InitialBlockNum is the first block that will be queried when starting the synchronization from scratch. + // It should be a number equal or bellow the creation of the bridge contract + InitialBlockNum uint64 `mapstructure:"InitialBlockNum"` + // SyncBlockChunkSize is the amount of blocks that will be queried to the client on each request + SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"` + // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry + RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` + // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. + // Any number smaller than zero will be considered as unlimited retries + MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` + // WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block + WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` + // RequireStorageContentCompatibility is true it's mandatory that data stored in the database + // is compatible with the running environment + RequireStorageContentCompatibility bool `mapstructure:"RequireStorageContentCompatibility"` +} + +func (c ConfigEmbedded) Validate() error { + return nil +} + +func (c ConfigStandalone) Validate() error { + if err := c.ConfigEmbedded.Validate(); err != nil { + return err + } + if err := c.BlockFinality.Validate(); err != nil { + return fmt.Errorf("invalid BlockFinality configuration: %w", err) + } + return nil +} diff --git a/bridgesync/docker-compose.yml b/claimsync/docker-compose.yml similarity index 100% rename from bridgesync/docker-compose.yml rename to claimsync/docker-compose.yml diff --git a/claimsync/downloader.go b/claimsync/downloader.go index bcbb9be07..8ffd5696c 100644 --- a/claimsync/downloader.go +++ b/claimsync/downloader.go @@ -1,6 +1,7 @@ package claimsync import ( + "bytes" "context" "errors" "fmt" @@ -10,11 +11,12 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" - "github.com/agglayer/aggkit/bridgesync" + rpctypes "github.com/0xPolygon/cdk-rpc/types" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/sync" treetypes "github.com/agglayer/aggkit/tree/types" aggkittypes "github.com/agglayer/aggkit/types" @@ -23,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" gethvm "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/golang-collections/collections/stack" ) var ( @@ -35,11 +38,31 @@ var ( )) unsetClaimEventSignature = crypto.Keccak256Hash([]byte("UpdatedUnsetGlobalIndexHashChain(bytes32,bytes32)")) setClaimEventSignature = crypto.Keccak256Hash([]byte("SetClaim(bytes32)")) + + claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") + claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") + claimAssetPreEtrogMethodID = common.Hex2Bytes("2cffd02e") + claimMessagePreEtrogMethodID = common.Hex2Bytes("2d2c9d94") +) + +const ( + // DebugTraceTxEndpoint is the name of the debug method used to trace a transaction. + DebugTraceTxEndpoint = "debug_traceTransaction" + // GetTransactionByHashEndpoint is the name of the method used to get transaction details by hash. + GetTransactionByHashEndpoint = "eth_getTransactionByHash" + // callTracerType is the name of the call tracer + callTracerType = "callTracer" + + // methodIDLength is the length of the method ID in bytes + methodIDLength = 4 + + bridgeLeafTypeMessage = uint8(bridgesynctypes.LeafTypeMessage) + bridgeLeafTypeAsset = uint8(bridgesynctypes.LeafTypeAsset) ) // claimQuerier is used by event handlers to check the DetailedClaimEvent boundary. type ClaimQuerier interface { - GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) + GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) } // buildAppender creates the LogAppenderMap for claim events from the bridge contract. @@ -57,10 +80,12 @@ func buildAppender( if err != nil { return nil, fmt.Errorf("claimsync: failed to create PolygonZkEVMBridge binding: %w", err) } - + // TODO: Check syncfullclaims + syncFullClaims := true appender := make(sync.LogAppenderMap) - appender[claimEventSignaturePreEtrog] = buildClaimEventHandlerPreEtrog(legacyBridge, log) - appender[claimEventSignature] = buildClaimEventHandler(ctx, agglayerBridgeContract, querier, log) + appender[claimEventSignaturePreEtrog] = buildClaimEventHandlerPreEtrog(legacyBridge, ethClient, bridgeAddr, syncFullClaims, log) + + appender[claimEventSignature] = buildClaimEventHandler(ctx, agglayerBridgeContract, ethClient, querier, bridgeAddr, syncFullClaims, log) if isSovereign { appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(agglayerBridgeL2Contract) @@ -97,12 +122,15 @@ func detectSovereignChain( func buildClaimEventHandler( ctx context.Context, contract *agglayerbridge.Agglayerbridge, + client aggkittypes.EthClienter, querier ClaimQuerier, + bridgeAddr common.Address, + syncFullClaims bool, log aggkitcommon.Logger, ) func(*sync.EVMBlock, types.Log) error { return func(b *sync.EVMBlock, l types.Log) error { // Skip if DetailedClaimEvent indexing has already started at this block - boundaryBlock, err := querier.GetBoundaryBlockForClaimType(nil, bridgesync.DetailedClaimEvent) + boundaryBlock, err := querier.GetBoundaryBlockForClaimType(ctx, nil, DetailedClaimEvent) if err != nil && !errors.Is(err, db.ErrNotFound) { return fmt.Errorf("claimsync: failed checking DetailedClaimEvent boundary: %w", err) } @@ -114,8 +142,8 @@ func buildClaimEventHandler( // Skip if a DetailedClaimEvent for the same tx is already in the block's events for _, raw := range b.Events { - if e, ok := raw.(bridgesync.Event); ok && e.Claim != nil && - e.Claim.Type == bridgesync.DetailedClaimEvent && e.Claim.TxHash == l.TxHash { + if e, ok := raw.(Event); ok && e.Claim != nil && + e.Claim.Type == DetailedClaimEvent && e.Claim.TxHash == l.TxHash { log.Debugf("claimsync: skipping ClaimEvent at block %d tx %s; DetailedClaimEvent already present", l.BlockNumber, l.TxHash.Hex()) return nil @@ -127,7 +155,7 @@ func buildClaimEventHandler( return fmt.Errorf("claimsync: error parsing ClaimEvent log: %w", err) } - b.Events = append(b.Events, bridgesync.Event{Claim: &bridgesync.Claim{ + claim := &Claim{ BlockNum: b.Num, BlockPos: uint64(l.Index), BlockTimestamp: b.Timestamp, @@ -137,8 +165,26 @@ func buildClaimEventHandler( OriginAddress: claimEvent.OriginAddress, DestinationAddress: claimEvent.DestinationAddress, Amount: claimEvent.Amount, - Type: bridgesync.ClaimEvent, - }}) + Type: ClaimEvent, + } + + // Extract root call for txn_sender and error checking + _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, log, nil) + if err != nil { + return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) + } + // Check if the root call was successful + if rootCall.Err != nil { + return fmt.Errorf("execution reverted in root call (block %d, tx hash: %s): %s", b.Num, l.TxHash, *rootCall.Err) + } + + if syncFullClaims { + if err := setClaimCalldataFromRoot(claim, rootCall, bridgeAddr, log); err != nil { + return err + } + } + + b.Events = append(b.Events, Event{Claim: claim}) return nil } } @@ -153,7 +199,7 @@ func buildDetailedClaimEventHandler( return fmt.Errorf("claimsync: error parsing DetailedClaimEvent log: %w", err) } - claim := &bridgesync.Claim{ + claim := &Claim{ BlockNum: b.Num, BlockPos: uint64(l.Index), BlockTimestamp: b.Timestamp, @@ -171,20 +217,20 @@ func buildDetailedClaimEventHandler( ProofRollupExitRoot: treetypes.NewProof(claimEvent.SmtProofRollupExitRoot), GlobalExitRoot: crypto.Keccak256Hash(claimEvent.MainnetExitRoot[:], claimEvent.RollupExitRoot[:]), IsMessage: claimEvent.LeafType == uint8(bridgesynctypes.LeafTypeMessage), - Type: bridgesync.DetailedClaimEvent, + Type: DetailedClaimEvent, } // Remove any ClaimEvent for the same tx (DetailedClaimEvent takes precedence) newEvents := make([]interface{}, 0, len(b.Events)) for _, raw := range b.Events { - if e, ok := raw.(bridgesync.Event); ok && e.Claim != nil && - e.Claim.Type == bridgesync.ClaimEvent && e.Claim.TxHash == l.TxHash { + if e, ok := raw.(Event); ok && e.Claim != nil && + e.Claim.Type == ClaimEvent && e.Claim.TxHash == l.TxHash { continue } newEvents = append(newEvents, raw) } b.Events = newEvents - b.Events = append(b.Events, bridgesync.Event{Claim: claim}) + b.Events = append(b.Events, Event{Claim: claim}) return nil } } @@ -192,7 +238,10 @@ func buildDetailedClaimEventHandler( // buildClaimEventHandlerPreEtrog creates a handler for the pre-Etrog ClaimEvent log. func buildClaimEventHandlerPreEtrog( contract *polygonzkevmbridge.Polygonzkevmbridge, - log aggkitcommon.Logger, + client aggkittypes.EthClienter, + bridgeAddr common.Address, + syncFullClaims bool, + logger aggkitcommon.Logger, ) func(*sync.EVMBlock, types.Log) error { return func(b *sync.EVMBlock, l types.Log) error { claimEvent, err := contract.ParseClaimEvent(l) @@ -201,7 +250,7 @@ func buildClaimEventHandlerPreEtrog( } log.Debugf("claimsync: parsed pre-Etrog ClaimEvent: index %d block %d", claimEvent.Index, b.Num) - b.Events = append(b.Events, bridgesync.Event{Claim: &bridgesync.Claim{ + claim := &Claim{ BlockNum: b.Num, BlockPos: uint64(l.Index), BlockTimestamp: b.Timestamp, @@ -211,7 +260,24 @@ func buildClaimEventHandlerPreEtrog( OriginAddress: claimEvent.OriginAddress, DestinationAddress: claimEvent.DestinationAddress, Amount: claimEvent.Amount, - }}) + } + // Extract root call for txn_sender and error checking + _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, logger, nil) + if err != nil { + return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) + } + // Check if the root call was successful + if rootCall.Err != nil { + return fmt.Errorf("execution reverted in root call (block %d, tx hash: %s): %s", b.Num, l.TxHash, *rootCall.Err) + } + + if syncFullClaims { + if err := setClaimCalldataFromRoot(claim, rootCall, bridgeAddr, logger); err != nil { + return err + } + } + + b.Events = append(b.Events, Event{Claim: claim}) return nil } } @@ -226,7 +292,7 @@ func buildUnsetClaimEventHandler( return fmt.Errorf("claimsync: error parsing UpdatedUnsetGlobalIndexHashChain log: %w", err) } - b.Events = append(b.Events, bridgesync.Event{UnsetClaim: &bridgesync.UnsetClaim{ + b.Events = append(b.Events, Event{UnsetClaim: &UnsetClaim{ BlockNum: b.Num, BlockPos: uint64(l.Index), TxHash: l.TxHash, @@ -247,7 +313,7 @@ func buildSetClaimEventHandler( return fmt.Errorf("claimsync: error parsing SetClaim log: %w", err) } - b.Events = append(b.Events, bridgesync.Event{SetClaim: &bridgesync.SetClaim{ + b.Events = append(b.Events, Event{SetClaim: &SetClaim{ BlockNum: b.Num, BlockPos: uint64(l.Index), TxHash: l.TxHash, @@ -256,3 +322,197 @@ func buildSetClaimEventHandler( return nil } } + +type Call struct { + From common.Address `json:"from"` + To common.Address `json:"to"` + Value *rpctypes.ArgBig `json:"value"` + Err *string `json:"error"` + Input rpctypes.ArgBytes `json:"input"` + Calls []Call `json:"calls"` +} + +type tracerCfg struct { + Tracer string `json:"tracer"` +} + +// findCall traverses the call trace using DFS and either returns the call or stops when a callback succeeds. +func findCall(rootCall Call, targetAddr common.Address, callback func(Call) (bool, error), logger aggkitcommon.Logger, +) ([]*Call, error) { + callStack := stack.New() + callStack.Push(rootCall) + matchingCalls := []*Call{} + for callStack.Len() > 0 { + currentCallInterface := callStack.Pop() + currentCall, ok := currentCallInterface.(Call) + if !ok { + return nil, fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface) + } + + // Skip reverted calls + if currentCall.Err != nil { + logger.Debugf("skipping reverted call to %s from %s: %s", + currentCall.To.Hex(), currentCall.From.Hex(), *currentCall.Err) + continue + } + + if currentCall.To == targetAddr { + if callback != nil { + found, err := callback(currentCall) + if err != nil { + return nil, err + } + if found { + matchingCalls = append(matchingCalls, ¤tCall) + } + } else { + matchingCalls = append(matchingCalls, ¤tCall) + } + } + + // Add non-reverted calls to the stack + for _, c := range currentCall.Calls { + if c.Err == nil { + callStack.Push(c) + } + } + } + if len(matchingCalls) > 0 { + return matchingCalls, nil + } + return nil, db.ErrNotFound +} + +// extractRootCall extracts the root call for a transaction using debug_traceTransaction. +func extractRootCall(client aggkittypes.RPCClienter, contractAddr common.Address, txHash common.Hash) (*Call, error) { + rootCall := &Call{To: contractAddr} + err := client.Call(rootCall, DebugTraceTxEndpoint, txHash, tracerCfg{Tracer: callTracerType}) + if err != nil { + return nil, err + } + return rootCall, nil +} + +func extractCallData( + client aggkittypes.RPCClienter, + bridgeAddr common.Address, + txHash common.Hash, + logger aggkitcommon.Logger, + callback func(c Call) (bool, error), +) (foundCalls []*Call, rootCall *Call, err error) { + // Extract root call first + rootCall, err = extractRootCall(client, bridgeAddr, txHash) + if err != nil { + return nil, nil, err + } + + // Find the specific call to the bridge contract + foundCalls, err = findCall(*rootCall, bridgeAddr, callback, logger) + if err != nil { + return nil, nil, err + } + + return foundCalls, rootCall, nil +} + +// setClaimCalldataFromRoot finds and decodes calldata for the given bridge address using an already traced root call. +// +// Parameters: +// - rootCall: Already traced root call. +// - bridge: Target contract address. +// - logger: Logger instance for debug logging. +// +// Returns an error if calldata isn't found. +func setClaimCalldataFromRoot( + c *Claim, + rootCall *Call, + bridge common.Address, + logger aggkitcommon.Logger, +) error { + _, err := findCall(*rootCall, bridge, + func(call Call) (bool, error) { + // Skip reverted calls + if call.Err != nil { + return false, nil + } + return tryDecodeClaimCalldata(c, call.Input, logger) + }, logger) + + return err +} + +// tryDecodeClaimCalldata attempts to find and decode the claim calldata from the provided input bytes. +// It checks if the method ID corresponds to either the claim asset or claim message methods. +// If a match is found, it decodes the calldata using the ABI of the bridge contract and updates the claim object. +// Returns true if the calldata is successfully decoded and matches the expected format, otherwise returns false. +func tryDecodeClaimCalldata(c *Claim, input []byte, logger aggkitcommon.Logger) (bool, error) { + if len(input) < methodIDLength { + return false, fmt.Errorf("input too short: %d bytes", len(input)) + } + methodID := input[:methodIDLength] + switch { + case bytes.Equal(methodID, claimAssetEtrogMethodID): + fallthrough + case bytes.Equal(methodID, claimMessageEtrogMethodID): + bridgeV2ABI, err := agglayerbridge.AgglayerbridgeMetaData.GetAbi() + if err != nil { + return false, err + } + // Recover Method from signature and ABI + method, err := bridgeV2ABI.MethodById(methodID) + if err != nil { + return false, err + } + + data, err := method.Inputs.Unpack(input[methodIDLength:]) + if err != nil { + return false, err + } + + found, err := c.DecodeEtrogCalldata(data) + if err != nil { + return false, err + } + + if found { + c.IsMessage = bytes.Equal(methodID, claimMessageEtrogMethodID) + } + + return found, nil + + case bytes.Equal(methodID, claimAssetPreEtrogMethodID): + fallthrough + case bytes.Equal(methodID, claimMessagePreEtrogMethodID): + bridgeABI, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() + if err != nil { + return false, err + } + + // Recover Method from signature and ABI + method, err := bridgeABI.MethodById(methodID) + if err != nil { + return false, err + } + + data, err := method.Inputs.Unpack(input[methodIDLength:]) + if err != nil { + return false, err + } + + found, err := c.DecodePreEtrogCalldata(data) + if err != nil { + return false, err + } + + if found { + c.IsMessage = bytes.Equal(methodID, claimMessagePreEtrogMethodID) + } + + return found, nil + + default: + // Log unrecognized method ID for debugging but returns false to continue searching (DFS) + logger.Debugf("unrecognized method ID encountered during claim calldata extraction: %x", methodID) + return false, nil + } +} diff --git a/claimsync/embedded.go b/claimsync/embedded.go index c7d26904a..4c5417436 100644 --- a/claimsync/embedded.go +++ b/claimsync/embedded.go @@ -4,10 +4,10 @@ import ( "context" "database/sql" "fmt" + "strings" "time" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" - "github.com/agglayer/aggkit/bridgesync" claimsyncStorage "github.com/agglayer/aggkit/claimsync/storage" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" @@ -31,22 +31,44 @@ func newEmbeddedProcessor(logger aggkitcommon.Logger, storage claimsynctypes.Cla // --- Embedded mode --- -// embeddedClaimSync is passed to bridgesync as a ClaimEventsProcessor. +// EmbeddedClaimSync is passed to bridgesync as a ClaimEventsProcessor. // It has no own EVMDriver; bridgesync drives event download and calls ProcessClaimEvents // from its own ProcessBlock, reusing bridgesync's transaction for atomicity. -type embeddedClaimSync struct { +type EmbeddedClaimSync struct { Appender sync.LogAppenderMap - Processor *claimEmbeddedProcessor + Processor claimsynctypes.EmbeddedProcessor Reader claimsynctypes.ClaimsReader } +// Event combination claim events +type Event struct { + Claim *Claim + UnsetClaim *UnsetClaim + SetClaim *SetClaim +} + +func (e Event) String() string { + parts := []string{} + if e.Claim != nil { + parts = append(parts, e.Claim.String()) + } + if e.UnsetClaim != nil { + parts = append(parts, e.UnsetClaim.String()) + } + if e.SetClaim != nil { + parts = append(parts, e.SetClaim.String()) + } + return "claimsync.Event{" + strings.Join(parts, ", ") + "}" +} + // NewClaimStorage creates a claim storage instance for embedded mode, using the provided database connection. func NewClaimStorage( database *sql.DB, logger aggkitcommon.Logger, syncerID claimsynctypes.ClaimSyncerID, + dbQueryTimeout time.Duration, ) (claimsynctypes.ClaimStorager, error) { - store, err := claimsyncStorage.New(logger, database, syncerID.String()) + store, err := claimsyncStorage.New(logger, database, syncerID.String(), dbQueryTimeout) if err != nil { return nil, fmt.Errorf("claimsync: failed to create storage: %w", err) } @@ -66,20 +88,19 @@ func NewEmbedded( syncerID claimsynctypes.ClaimSyncerID, dbQueryTimeout time.Duration, logger aggkitcommon.Logger, -) (*embeddedClaimSync, error) { +) (*EmbeddedClaimSync, error) { proc := newEmbeddedProcessor(logger, storage) agglayerBridgeContract, err := agglayerbridge.NewAgglayerbridge(bridgeAddr, ethClient) if err != nil { return nil, fmt.Errorf("claimsync embedded: failed to create AgglayerBridge binding: %w", err) } - reader := NewProcessorReader(logger, storage) isSovereign, agglayerBridgeL2Contract, err := detectSovereignChain(ctx, bridgeAddr, ethClient) if err != nil { return nil, fmt.Errorf("claimsync embedded: failed to detect chain type: %w", err) } - appender, err := buildAppender(ctx, ethClient, reader, bridgeAddr, + appender, err := buildAppender(ctx, ethClient, storage, bridgeAddr, agglayerBridgeContract, agglayerBridgeL2Contract, isSovereign, logger) if err != nil { return nil, fmt.Errorf("claimsync embedded: failed to build appender: %w", err) @@ -87,47 +108,39 @@ func NewEmbedded( logger.Infof("claimsync embedded created: bridgeAddr=%s sovereign=%t", bridgeAddr.String(), isSovereign) - return &embeddedClaimSync{ + return &EmbeddedClaimSync{ Processor: proc, - Reader: reader, + Reader: storage, Appender: appender}, nil } -func (p *claimEmbeddedProcessor) ProcessBlockWithTx(tx dbtypes.Querier, block *sync.Block, insertBlock bool) error { - if insertBlock { - if err := p.storage.InsertBlock(tx, block.Num, block.Hash.String()); err != nil { - p.log.Errorf("failed to insert block %d: %v", block.Num, err) - return err - } - } +func (p *claimEmbeddedProcessor) ProcessBlockWithTx(ctx context.Context, tx dbtypes.Querier, block sync.Block, eventRaw any) error { - for _, e := range block.Events { - event, ok := e.(bridgesync.Event) - if !ok { - p.log.Errorf("failed to convert event to bridgesync.Event type in block %d", block.Num) - return fmt.Errorf("claimsync ProcessBlock: unexpected event type %T in block %d", e, block.Num) - } + event, ok := eventRaw.(Event) + if !ok { + return fmt.Errorf("claimsync ProcessBlock: unexpected event type %T in block %d", event, block.Num) + } - if event.Claim != nil { - if err := p.storage.InsertClaim(tx, *event.Claim); err != nil { - p.log.Errorf("failed to insert claim event at block %d: %v", block.Num, err) - return err - } + if event.Claim != nil { + if err := p.storage.InsertClaim(ctx, tx, *event.Claim); err != nil { + p.log.Errorf("failed to insert claim event at block %d: %v", block.Num, err) + return err } + } - if event.UnsetClaim != nil { - if err := p.storage.InsertUnsetClaim(tx, *event.UnsetClaim); err != nil { - p.log.Errorf("failed to insert unset_claim event at block %d: %v", block.Num, err) - return err - } + if event.UnsetClaim != nil { + if err := p.storage.InsertUnsetClaim(ctx, tx, *event.UnsetClaim); err != nil { + p.log.Errorf("failed to insert unset_claim event at block %d: %v", block.Num, err) + return err } + } - if event.SetClaim != nil { - if err := p.storage.InsertSetClaim(tx, *event.SetClaim); err != nil { - p.log.Errorf("failed to insert set_claim event at block %d: %v", block.Num, err) - return err - } + if event.SetClaim != nil { + if err := p.storage.InsertSetClaim(ctx, tx, *event.SetClaim); err != nil { + p.log.Errorf("failed to insert set_claim event at block %d: %v", block.Num, err) + return err } } + return nil } @@ -137,12 +150,12 @@ func (p *claimEmbeddedProcessor) ProcessBlockWithTx(tx dbtypes.Querier, block *s // it returns: // - the number of rows affected (currently the number of blocks deleted) // - error if the deletion failed, or nil if successful -func (p *claimEmbeddedProcessor) ReorgWithTx(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { - return p.deleteBlocksFrom(tx, firstReorgedBlock) +func (p *claimEmbeddedProcessor) ReorgWithTx(ctx context.Context, tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { + return p.deleteBlocksFrom(ctx, tx, firstReorgedBlock) } -func (p *claimEmbeddedProcessor) deleteBlocksFrom(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { - rowsAffected, err := p.storage.DeleteBlocksFrom(tx, firstReorgedBlock) +func (p *claimEmbeddedProcessor) deleteBlocksFrom(ctx context.Context, tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { + rowsAffected, err := p.storage.DeleteBlocksFrom(ctx, tx, firstReorgedBlock) if err != nil { return 0, fmt.Errorf("claimsync deleteBlocksFrom: %w", err) } diff --git a/bridgesync/helpers_test.go b/claimsync/helpers_test.go similarity index 99% rename from bridgesync/helpers_test.go rename to claimsync/helpers_test.go index f38322cdd..25dd69724 100644 --- a/bridgesync/helpers_test.go +++ b/claimsync/helpers_test.go @@ -1,4 +1,4 @@ -package bridgesync +package claimsync import ( "context" diff --git a/claimsync/processor.go b/claimsync/processor.go index 4e373da40..8d611b3d9 100644 --- a/claimsync/processor.go +++ b/claimsync/processor.go @@ -7,7 +7,6 @@ import ( "fmt" "time" - "github.com/agglayer/aggkit/bridgesync" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db/compatibility" @@ -49,9 +48,16 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { p.rollbackTx(tx) } }() - result := p.embeddedProcessor.ProcessBlockWithTx(tx, &block, true) - if result != nil { - return result + + if err := p.storage.InsertBlock(ctx, tx, block.Num, block.Hash); err != nil { + p.log.Errorf("failed to insert block %d: %v", block.Num, err) + return err + } + for _, e := range block.Events { + result := p.embeddedProcessor.ProcessBlockWithTx(dbCtx, tx, block, e) + if result != nil { + return result + } } if err := tx.Commit(); err != nil { p.log.Errorf("failed to commit block %d: %v", block.Num, err) @@ -79,7 +85,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { } }() - rowsAffected, err := p.embeddedProcessor.ReorgWithTx(tx, firstReorgedBlock) + rowsAffected, err := p.embeddedProcessor.ReorgWithTx(dbCtx, tx, firstReorgedBlock) if err != nil { return fmt.Errorf("claimsync Reorg: %w", err) } @@ -93,14 +99,19 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return nil } +// GetFirstProcessedBlock returns the lowest block number stored. +func (p *processor) GetFirstProcessedBlock(ctx context.Context) (uint64, bool, error) { + return p.storage.GetFirstProcessedBlock(ctx, nil) +} + // GetLastProcessedBlock returns the highest block number stored. -func (p *processor) GetLastProcessedBlock(_ context.Context) (uint64, error) { - return p.storage.GetLastProcessedBlock(nil) +func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { + return p.storage.GetLastProcessedBlock(ctx, nil) } // GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. -func (p *processor) GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) { - return p.storage.GetBoundaryBlockForClaimType(tx, claimType) +func (p *processor) GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) { + return p.storage.GetBoundaryBlockForClaimType(ctx, tx, claimType) } func (p *processor) withDatabaseTimeout(ctx context.Context) (context.Context, context.CancelFunc) { diff --git a/claimsync/reader.go b/claimsync/reader.go deleted file mode 100644 index f70eb9b68..000000000 --- a/claimsync/reader.go +++ /dev/null @@ -1,45 +0,0 @@ -package claimsync - -import ( - "math/big" - - "github.com/agglayer/aggkit/bridgesync" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" - aggkitcommon "github.com/agglayer/aggkit/common" - dbtypes "github.com/agglayer/aggkit/db/types" -) - -var _ claimsynctypes.ClaimsReader = (*processorReader)(nil) - -type processorReader struct { - storage claimsynctypes.ClaimStorager - log aggkitcommon.Logger -} - -func NewProcessorReader(logger aggkitcommon.Logger, storage claimsynctypes.ClaimStorager) *processorReader { - return &processorReader{ - storage: storage, - log: logger, - } -} - -// GetLastProcessedBlock returns the highest block number stored. -func (p *processorReader) GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) { - return p.storage.GetLastProcessedBlock(tx) -} - -// GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. -// Returns db.ErrNotFound if no claims of that type exist. -func (p *processorReader) GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) { - return p.storage.GetBoundaryBlockForClaimType(tx, claimType) -} - -// GetClaims returns claims in [fromBlock, toBlock] using compaction logic. -func (p *processorReader) GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) { - return p.storage.GetClaims(tx, fromBlock, toBlock) -} - -// GetClaimsByGlobalIndex returns claims for the given global index using compaction logic. -func (p *processorReader) GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) { - return p.storage.GetClaimsByGlobalIndex(tx, globalIndex) -} diff --git a/claimsync/storage/storage.go b/claimsync/storage/storage.go index 612e2a9ba..5b0066409 100644 --- a/claimsync/storage/storage.go +++ b/claimsync/storage/storage.go @@ -6,8 +6,9 @@ import ( "errors" "fmt" "math/big" + "strings" + "time" - "github.com/agglayer/aggkit/bridgesync" "github.com/agglayer/aggkit/claimsync/storage/migrations" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" @@ -15,6 +16,7 @@ import ( "github.com/agglayer/aggkit/db/compatibility" dbtypes "github.com/agglayer/aggkit/db/types" aggsync "github.com/agglayer/aggkit/sync" + "github.com/ethereum/go-ethereum/common" "github.com/russross/meddler" ) @@ -27,6 +29,7 @@ type blockRecord struct { } const ( + // claimColumnsSQL is the list of all claim columns claimColumnsSQL = `block_num, block_pos, tx_hash, @@ -46,6 +49,8 @@ const ( block_timestamp, type` + // compactedClaimsSelectSQL is the SELECT clause for compacted claims + // It combines metadata from the oldest claim with proofs and exit roots from the newest claim compactedClaimsSelectSQL = ` o.block_num, o.block_pos, @@ -65,17 +70,25 @@ const ( o.is_message, o.block_timestamp, o.type` + + // claimsByGERSQL is the query used by GetClaimsByGER. + claimsByGERSQL = "SELECT " + claimColumnsSQL + + " FROM claim WHERE global_exit_root = $1 AND type = $2" + + " ORDER BY block_num ASC, block_pos ASC" ) type claimStorage struct { - database dbtypes.DBer - compatStore compatibility.CompatibilityDataStorager[aggsync.RuntimeData] + database *sql.DB + compatStore compatibility.CompatibilityDataStorager[aggsync.RuntimeData] + log aggkitcommon.Logger + dbQueryTimeout time.Duration } // NewStandalone opens (or creates) the SQLite database at dbPath, runs all pending migrations, // and returns a ready-to-use Storage along with the underlying *sql.DB // (needed by the processor for transaction management). -func NewStandalone(logger aggkitcommon.Logger, dbPath string, ownerName string) (claimsynctypes.ClaimStorager, error) { +func NewStandalone(logger aggkitcommon.Logger, dbPath string, ownerName string, + dbQueryTimeout time.Duration) (claimsynctypes.ClaimStorager, error) { database, err := db.NewSQLiteDB(dbPath) if err != nil { return nil, fmt.Errorf("claimsync storage: failed to open SQLite DB at %s: %w", dbPath, err) @@ -87,16 +100,20 @@ func NewStandalone(logger aggkitcommon.Logger, dbPath string, ownerName string) } return &claimStorage{ - database: database, - compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + database: database, + compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + log: logger, + dbQueryTimeout: dbQueryTimeout, }, nil } // New creates a Storage using the provided sql.DB, so it can share -func New(logger aggkitcommon.Logger, database *sql.DB, ownerName string) (claimsynctypes.ClaimStorager, error) { +func New(logger aggkitcommon.Logger, database *sql.DB, ownerName string, dbQueryTimeout time.Duration) (claimsynctypes.ClaimStorager, error) { return &claimStorage{ - database: database, - compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + database: database, + compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + log: logger, + dbQueryTimeout: dbQueryTimeout, }, nil } @@ -124,15 +141,15 @@ func (s *claimStorage) getQuerier(tx dbtypes.Querier) dbtypes.Querier { } // InsertBlock inserts a block row using meddler. -func (s *claimStorage) InsertBlock(tx dbtypes.Querier, blockNum uint64, blockHash string) error { - if err := meddler.Insert(s.getQuerier(tx), "block", &blockRecord{Num: blockNum, Hash: blockHash}); err != nil { +func (s *claimStorage) InsertBlock(_ context.Context, tx dbtypes.Querier, blockNum uint64, blockHash common.Hash) error { + if err := meddler.Insert(s.getQuerier(tx), "block", &blockRecord{Num: blockNum, Hash: blockHash.Hex()}); err != nil { return fmt.Errorf("InsertBlock %d: %w", blockNum, err) } return nil } // InsertClaim persists a claim. The referenced block must already exist. -func (s *claimStorage) InsertClaim(tx dbtypes.Querier, claim bridgesync.Claim) error { +func (s *claimStorage) InsertClaim(_ context.Context, tx dbtypes.Querier, claim claimsynctypes.Claim) error { if err := meddler.Insert(s.getQuerier(tx), "claim", &claim); err != nil { return fmt.Errorf("InsertClaim (block %d, pos %d): %w", claim.BlockNum, claim.BlockPos, err) } @@ -140,7 +157,7 @@ func (s *claimStorage) InsertClaim(tx dbtypes.Querier, claim bridgesync.Claim) e } // InsertUnsetClaim persists an unset claim. The referenced block must already exist. -func (s *claimStorage) InsertUnsetClaim(tx dbtypes.Querier, u bridgesync.UnsetClaim) error { +func (s *claimStorage) InsertUnsetClaim(_ context.Context, tx dbtypes.Querier, u claimsynctypes.UnsetClaim) error { if err := meddler.Insert(s.getQuerier(tx), "unset_claim", &u); err != nil { return fmt.Errorf("InsertUnsetClaim (block %d, pos %d): %w", u.BlockNum, u.BlockPos, err) } @@ -148,7 +165,7 @@ func (s *claimStorage) InsertUnsetClaim(tx dbtypes.Querier, u bridgesync.UnsetCl } // InsertSetClaim persists a set claim. The referenced block must already exist. -func (s *claimStorage) InsertSetClaim(tx dbtypes.Querier, sc bridgesync.SetClaim) error { +func (s *claimStorage) InsertSetClaim(_ context.Context, tx dbtypes.Querier, sc claimsynctypes.SetClaim) error { if err := meddler.Insert(s.getQuerier(tx), "set_claim", &sc); err != nil { return fmt.Errorf("InsertSetClaim (block %d, pos %d): %w", sc.BlockNum, sc.BlockPos, err) } @@ -158,7 +175,7 @@ func (s *claimStorage) InsertSetClaim(tx dbtypes.Querier, sc bridgesync.SetClaim // GetClaims returns claims in [fromBlock, toBlock] using compaction logic: // claims with an unset_claim are returned uncompacted; others are compacted // (oldest metadata + newest proofs per global_index). -func (s *claimStorage) GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) { +func (s *claimStorage) GetClaims(ctx context.Context, tx dbtypes.Querier, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) { query := fmt.Sprintf(` WITH all_claims_ranked AS ( SELECT @@ -188,10 +205,14 @@ func (s *claimStorage) GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ORDER BY block_num ASC, block_pos ASC; `, claimColumnsSQL, compactedClaimsSelectSQL) - rows, err := s.getQuerier(tx).Query(query, fromBlock, toBlock) + // Create a context with database timeout + dbCtx, cancel := s.withDatabaseTimeout(ctx) + defer cancel() + + rows, err := s.getQuerier(tx).QueryContext(dbCtx, query, fromBlock, toBlock) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return []bridgesync.Claim{}, nil + return []claimsynctypes.Claim{}, nil } return nil, fmt.Errorf("GetClaims [%d, %d]: %w", fromBlock, toBlock, err) } @@ -201,7 +222,7 @@ func (s *claimStorage) GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) } // GetClaimsByGlobalIndex returns claims for the given global index using compaction logic. -func (s *claimStorage) GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) { +func (s *claimStorage) GetClaimsByGlobalIndex(ctx context.Context, tx dbtypes.Querier, globalIndex *big.Int) ([]claimsynctypes.Claim, error) { if globalIndex == nil { return nil, errors.New("GetClaimsByGlobalIndex: globalIndex cannot be nil") } @@ -233,10 +254,13 @@ func (s *claimStorage) GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *b ORDER BY block_num ASC, block_pos ASC; `, claimColumnsSQL, compactedClaimsSelectSQL) - rows, err := s.getQuerier(tx).Query(query, globalIndex.String()) + dbCtx, cancel := s.withDatabaseTimeout(ctx) + defer cancel() + + rows, err := s.getQuerier(tx).QueryContext(dbCtx, query, globalIndex.String()) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return []bridgesync.Claim{}, nil + return []claimsynctypes.Claim{}, nil } return nil, fmt.Errorf("GetClaimsByGlobalIndex %s: %w", globalIndex.String(), err) } @@ -245,21 +269,44 @@ func (s *claimStorage) GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *b return scanClaims(rows) } +// GetFirstProcessedBlock returns the lowest block number stored. +// Returns (0, false, nil) if there are no blocks on DB. +func (s *claimStorage) GetFirstProcessedBlock(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) { + dbCtx, cancel := s.withDatabaseTimeout(ctx) + defer cancel() + + var num uint64 + err := s.getQuerier(tx).QueryRowContext(dbCtx, `SELECT num FROM block ORDER BY num ASC LIMIT 1`).Scan(&num) + if errors.Is(err, sql.ErrNoRows) { + return 0, false, nil + } + + return num, err == nil, err +} + // GetLastProcessedBlock returns the highest block number stored. -func (s *claimStorage) GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) { +// Returns (0, false, nil) if there are no blocks on DB. +func (s *claimStorage) GetLastProcessedBlock(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) { + dbCtx, cancel := s.withDatabaseTimeout(ctx) + defer cancel() + var num uint64 - err := s.getQuerier(tx).QueryRow(`SELECT num FROM block ORDER BY num DESC LIMIT 1`).Scan(&num) + err := s.getQuerier(tx).QueryRowContext(dbCtx, `SELECT num FROM block ORDER BY num DESC LIMIT 1`).Scan(&num) if errors.Is(err, sql.ErrNoRows) { - return 0, nil + return 0, false, nil } - return num, err + + return num, err == nil, err } // GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. // Returns db.ErrNotFound if no claims of that type exist. -func (s *claimStorage) GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) { +func (s *claimStorage) GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType claimsynctypes.ClaimType) (uint64, error) { + dbCtx, cancel := s.withDatabaseTimeout(ctx) + defer cancel() + var blockNum *uint64 - if err := s.getQuerier(tx).QueryRow(`SELECT MAX(block_num) FROM claim WHERE type = $1`, claimType). + if err := s.getQuerier(tx).QueryRowContext(dbCtx, `SELECT MAX(block_num) FROM claim WHERE type = $1`, claimType). Scan(&blockNum); err != nil { return 0, err } @@ -269,10 +316,40 @@ func (s *claimStorage) GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimTyp return *blockNum, nil } +// GetClaimsByGER returns all DetailedClaimEvent claims with the given global exit root, +// ordered by block_num/block_pos ascending. If the claim table does not exist (e.g. L1 +// processor), returns nil, nil gracefully. +func (p *claimStorage) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { + dbCtx, cancel := p.withDatabaseTimeout(ctx) + defer cancel() + + rows, err := p.database.QueryContext(dbCtx, claimsByGERSQL, globalExitRoot.Hex(), claimsynctypes.DetailedClaimEvent) + if err != nil { + if strings.Contains(err.Error(), "no such table") { + return nil, nil + } + return nil, fmt.Errorf("GetClaimsByGER: %w", err) + } + defer func() { + if cerr := rows.Close(); cerr != nil { + p.log.Errorf("error closing rows: %v", cerr) + } + }() + + claims := []*claimsynctypes.Claim{} + if err = meddler.ScanAll(rows, &claims); err != nil { + return nil, fmt.Errorf("GetClaimsByGER: scan: %w", err) + } + return claims, nil +} + // DeleteBlocksFrom deletes all blocks with num >= firstBlock and returns the count deleted. // Cascade constraints automatically remove associated claims, unset_claims and set_claims. -func (s *claimStorage) DeleteBlocksFrom(tx dbtypes.Querier, firstBlock uint64) (int64, error) { - res, err := s.getQuerier(tx).Exec(`DELETE FROM block WHERE num >= $1`, firstBlock) +func (s *claimStorage) DeleteBlocksFrom(ctx context.Context, tx dbtypes.Querier, firstBlock uint64) (int64, error) { + dbCtx, cancel := s.withDatabaseTimeout(ctx) + defer cancel() + + res, err := s.getQuerier(tx).ExecContext(dbCtx, `DELETE FROM block WHERE num >= $1`, firstBlock) if err != nil { return 0, fmt.Errorf("DeleteBlocksFrom %d: %w", firstBlock, err) } @@ -280,17 +357,30 @@ func (s *claimStorage) DeleteBlocksFrom(tx dbtypes.Querier, firstBlock uint64) ( return n, nil } -func scanClaims(rows *sql.Rows) ([]bridgesync.Claim, error) { - var ptrs []*bridgesync.Claim +func scanClaims(rows *sql.Rows) ([]claimsynctypes.Claim, error) { + var ptrs []*claimsynctypes.Claim if err := meddler.ScanAll(rows, &ptrs); err != nil { return nil, fmt.Errorf("scanClaims: %w", err) } iface := db.SlicePtrsToSlice(ptrs) - claims, ok := iface.([]bridgesync.Claim) + claims, ok := iface.([]claimsynctypes.Claim) if !ok { return nil, errors.New("scanClaims: type assertion from []*Claim to []Claim failed") } return claims, nil } + +// buildNetworkIDsFilter builds SQL filter for the given network IDs +func buildNetworkIDsFilter(networkIDs []uint32, networkIDColumn string) string { + placeholders := make([]string, len(networkIDs)) + for i, id := range networkIDs { + placeholders[i] = fmt.Sprintf("%d", id) + } + return fmt.Sprintf("%s IN (%s)", networkIDColumn, strings.Join(placeholders, ", ")) +} + +func (p *claimStorage) withDatabaseTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + return context.WithTimeout(ctx, p.dbQueryTimeout) +} diff --git a/claimsync/storage/storage_paged.go b/claimsync/storage/storage_paged.go new file mode 100644 index 000000000..b06948c7c --- /dev/null +++ b/claimsync/storage/storage_paged.go @@ -0,0 +1,360 @@ +package storage + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math/big" + "regexp" + "strings" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + "github.com/agglayer/aggkit/db" + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/russross/meddler" +) + +const ( + // orderByBlockDesc is the default order by clause for block-based queries + orderByBlockDesc = "block_num DESC, block_pos DESC" + // unsetClaimTableName is the name of the table that stores unset claim events + unsetClaimTableName = "unset_claim" + + // setClaimTableName is the name of the table that stores set claim events + setClaimTableName = "set_claim" +) + +var ( + // tableNameRegex is the regex pattern to validate table names + tableNameRegex = regexp.MustCompile(`^[a-zA-Z0-9_]+$`) +) + +func (p *claimStorage) GetSetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + globalIndex *big.Int, +) ([]*claimsynctypes.SetClaim, int, error) { + whereClause := buildGlobalIndexFilterClause(globalIndex) + setClaimsCount, err := p.GetTotalNumberOfRecords(ctx, setClaimTableName, whereClause) + if err != nil { + return nil, 0, err + } + + if setClaimsCount == 0 { + return []*claimsynctypes.SetClaim{}, 0, nil + } + + offset, err := p.calculateOffset(pageNumber, pageSize, setClaimsCount, setClaimTableName) + if err != nil { + return nil, 0, err + } + + rows, err := p.queryPaged(ctx, p.database, offset, pageSize, setClaimTableName, orderByBlockDesc, whereClause) + if err != nil { + if errors.Is(err, db.ErrNotFound) { + p.log.Debugf("no set claims were found for provided parameters (pageNumber=%d, pageSize=%d)", + pageNumber, pageSize) + return nil, setClaimsCount, nil + } + p.log.Errorf("GetSetClaimsPaged: queryPaged failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) + return nil, 0, err + } + defer func() { + if cerr := rows.Close(); cerr != nil { + p.log.Errorf("error closing rows: %v", cerr) + } + }() + + setClaims := []*claimsynctypes.SetClaim{} + if err = meddler.ScanAll(rows, &setClaims); err != nil { + p.log.Errorf("GetSetClaimsPaged: meddler.ScanAll failed for pageNumber=%d, pageSize=%d: %v", + pageNumber, pageSize, err) + return nil, 0, err + } + + return setClaims, setClaimsCount, nil +} + +// GetUnsetClaimsPaged returns a paginated list of unset claims +// +//nolint:dupl +func (p *claimStorage) GetUnsetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + globalIndex *big.Int, +) ([]*claimsynctypes.UnsetClaim, int, error) { + whereClause := buildGlobalIndexFilterClause(globalIndex) + unclaimsCount, err := p.GetTotalNumberOfRecords(ctx, unsetClaimTableName, whereClause) + if err != nil { + return nil, 0, err + } + + if unclaimsCount == 0 { + return []*claimsynctypes.UnsetClaim{}, 0, nil + } + + offset, err := p.calculateOffset(pageNumber, pageSize, unclaimsCount, unsetClaimTableName) + if err != nil { + return nil, 0, err + } + + rows, err := p.queryPaged(ctx, p.database, offset, pageSize, unsetClaimTableName, orderByBlockDesc, whereClause) + if err != nil { + if errors.Is(err, db.ErrNotFound) { + p.log.Debugf("no unset claims were found for provided parameters (pageNumber=%d, pageSize=%d)", + pageNumber, pageSize) + return nil, unclaimsCount, nil + } + p.log.Errorf("GetUnsetClaimsPaged: queryPaged failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) + return nil, 0, err + } + defer func() { + if cerr := rows.Close(); cerr != nil { + p.log.Errorf("error closing rows: %v", cerr) + } + }() + + unsetClaims := []*claimsynctypes.UnsetClaim{} + if err = meddler.ScanAll(rows, &unsetClaims); err != nil { + p.log.Errorf("GetUnsetClaimsPaged: meddler.ScanAll failed for pageNumber=%d, pageSize=%d: %v", + pageNumber, pageSize, err) + return nil, 0, err + } + + return unsetClaims, unclaimsCount, nil +} + +func (p *claimStorage) GetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + networkIDs []uint32, globalIndex *big.Int, +) ([]*claimsynctypes.Claim, int, error) { + whereClause := p.buildClaimsFilterClause(networkIDs, globalIndex) + claimsCount, err := p.getCompactedClaimsCount(ctx, whereClause) + if err != nil { + return nil, 0, err + } + + if claimsCount == 0 { + return []*claimsynctypes.Claim{}, 0, nil + } + + offset, err := p.calculateOffset(pageNumber, pageSize, claimsCount, "claims") + if err != nil { + return nil, 0, err + } + + // Create a context with database timeout + dbCtx, cancel := p.withDatabaseTimeout(ctx) + defer cancel() + + // Pagination query with compaction logic implementing three cases: + // Case 1: If unset_claim exists for a global_index, return all claims on page uncompacted + // Case 2: If no unset_claim exists and globally oldest is on page, return compacted claim + // Case 3: If globally oldest is outside page and no unset_claim exists, exclude from results + // + // This query: + // - Gets claims for the requested page (DESC order: newest first) + // - Ranks all claims globally by global_index to find oldest and newest + // - For claims with unset_claim: returns all instances on the page uncompacted + // - For claims without unset_claim: only returns compacted version if newest is on page + //nolint:gosec + query := fmt.Sprintf(` + WITH page_claims AS ( + SELECT * + FROM claim + %s + ORDER BY block_num DESC, block_pos DESC + LIMIT $1 OFFSET $2 + ), + all_claims_ranked AS ( + SELECT + *, + ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num ASC, block_pos ASC) AS rn_oldest_global, + ROW_NUMBER() OVER (PARTITION BY global_index ORDER BY block_num DESC, block_pos DESC) AS rn_newest_global + FROM claim + %s + ), + claims_with_unset_on_page AS ( + -- Case 1: Return all claims on page if unset_claim exists (no compaction) + SELECT + pc.%s + FROM page_claims pc + WHERE EXISTS ( + SELECT 1 FROM unset_claim uc + WHERE uc.global_index = pc.global_index + ) + ), + newest_on_page AS ( + SELECT DISTINCT pc.global_index + FROM page_claims pc + JOIN all_claims_ranked acr ON pc.global_index = acr.global_index AND acr.rn_newest_global = 1 + WHERE pc.block_num = acr.block_num AND pc.block_pos = acr.block_pos + AND NOT EXISTS ( + SELECT 1 FROM unset_claim uc + WHERE uc.global_index = pc.global_index + ) + ), + compactable_claims AS ( + -- Case 2 & 3: Handle claims without unset_claim + SELECT + %s + FROM all_claims_ranked o + JOIN all_claims_ranked n ON o.global_index = n.global_index AND n.rn_newest_global = 1 + WHERE o.rn_oldest_global = 1 -- Globally oldest claim + AND o.global_index IN (SELECT global_index FROM newest_on_page) + ) + SELECT * FROM claims_with_unset_on_page + UNION ALL + SELECT * FROM compactable_claims + ORDER BY block_num DESC, block_pos DESC; + `, whereClause, whereClause, claimColumnsSQL, compactedClaimsSelectSQL) + + rows, err := p.database.QueryContext(dbCtx, query, pageSize, offset) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + p.log.Debugf("no claims were found for provided parameters (pageNumber=%d, pageSize=%d)", + pageNumber, pageSize) + return nil, claimsCount, nil + } + p.log.Errorf("GetClaimsPaged: queryPaged failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) + return nil, 0, err + } + defer func() { + if cerr := rows.Close(); cerr != nil { + p.log.Errorf("error closing rows: %v", cerr) + } + }() + + claims := []*claimsynctypes.Claim{} + if err = meddler.ScanAll(rows, &claims); err != nil { + p.log.Errorf("GetClaimsPaged: meddler.ScanAll failed for pageNumber=%d, pageSize=%d: %v", pageNumber, pageSize, err) + return nil, 0, err + } + + return claims, claimsCount, nil +} + +// buildGlobalIndexFilterClause builds a WHERE clause for filtering by global_index +func buildGlobalIndexFilterClause(globalIndex *big.Int) string { + if globalIndex != nil { + return " WHERE " + fmt.Sprintf("global_index = '%s'", globalIndex.String()) + } + + return "" +} + +// buildClaimsFilterClause builds the WHERE clause for the claims table +// based on the provided networkIDs and globalIndex +func (p *claimStorage) buildClaimsFilterClause(networkIDs []uint32, globalIndex *big.Int) string { + const clauseCapacity = 2 + clauses := make([]string, 0, clauseCapacity) + if len(networkIDs) > 0 { + clauses = append(clauses, buildNetworkIDsFilter(networkIDs, "origin_network")) + } + + if globalIndex != nil { + clauses = append(clauses, + fmt.Sprintf("global_index = '%s'", globalIndex.String()), + ) + } + + if len(clauses) > 0 { + return " WHERE " + strings.Join(clauses, " AND ") + } + return "" +} + +// getCompactedClaimsCount returns the count of claims with compaction logic applied. +// - If unset_claim exists for a global_index, count all claims with that global_index +// - If no unset_claim exists, count only one per global_index (compacted) +// The count represents the total across all pages, matching what would be returned +// if all pages were queried. +func (p *claimStorage) getCompactedClaimsCount(ctx context.Context, whereClause string) (int, error) { + // Create a context with database timeout + dbCtx, cancel := p.withDatabaseTimeout(ctx) + defer cancel() + + // Count query with compaction logic matching GetClaimsPaged: + // 1. Count all claims with unset_claim (no compaction, all returned) + // 2. Count distinct global_index for claims without unset_claim (compacted, one per global_index) + //nolint:gosec + query := fmt.Sprintf(` + WITH filtered_claims AS ( + SELECT * FROM claim %s + ) + SELECT + (SELECT COUNT(*) FROM filtered_claims + WHERE EXISTS ( + SELECT 1 FROM unset_claim uc + WHERE uc.global_index = filtered_claims.global_index + )) + + (SELECT COUNT(DISTINCT global_index) FROM filtered_claims + WHERE NOT EXISTS ( + SELECT 1 FROM unset_claim uc + WHERE uc.global_index = filtered_claims.global_index + )) AS total_count; + `, whereClause) + + count := 0 + err := p.database.QueryRowContext(dbCtx, query).Scan(&count) + if err != nil { + return 0, err + } + + return count, nil +} + +func (p *claimStorage) calculateOffset(pageNumber, pageSize uint32, + recordsCount int, tableName string) (uint32, error) { + offset := (pageNumber - 1) * pageSize + if offset >= uint32(recordsCount) { + msg := fmt.Sprintf("invalid page number for given page size and total number of %s (page=%d, size=%d, total=%d)", + tableName, pageNumber, pageSize, recordsCount) + p.log.Debugf(msg) + return 0, errors.New(msg) + } + return offset, nil +} + +// GetTotalNumberOfRecords returns the total number of records in the given table +func (p *claimStorage) GetTotalNumberOfRecords(ctx context.Context, tableName, whereClause string) (int, error) { + if !tableNameRegex.MatchString(tableName) { + return 0, fmt.Errorf("invalid table name '%s' provided", tableName) + } + + // Create a context with database timeout + dbCtx, cancel := p.withDatabaseTimeout(ctx) + defer cancel() + + count := 0 + err := p.database.QueryRowContext(dbCtx, fmt.Sprintf( + `SELECT COUNT(*) AS count FROM %s%s;`, tableName, whereClause, + )).Scan(&count) + if err != nil { + return 0, err + } + + return count, nil +} + +// queryPaged returns a paged result from the given table with context support +func (p *claimStorage) queryPaged(ctx context.Context, tx dbtypes.Querier, + offset, pageSize uint32, + table, orderByClause, whereClause string, +) (*sql.Rows, error) { + // Create a context with database timeout + dbCtx, _ := p.withDatabaseTimeout(ctx) + rows, err := tx.QueryContext(dbCtx, fmt.Sprintf(` + SELECT * + FROM %s + %s + ORDER BY %s + LIMIT $1 OFFSET $2; + `, table, whereClause, orderByClause), pageSize, offset) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, db.ErrNotFound + } + return nil, err + } + return rows, nil +} diff --git a/claimsync/types/claim_data.go b/claimsync/types/claim_data.go new file mode 100644 index 000000000..ead5a264e --- /dev/null +++ b/claimsync/types/claim_data.go @@ -0,0 +1,245 @@ +package types + +import ( + "fmt" + "math/big" + + treetypes "github.com/agglayer/aggkit/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +const ( + // nilStr holds nil string + nilStr = "nil" +) + +// ClaimType represents the type of a claim event +type ClaimType string + +const ( + ClaimEvent ClaimType = "ClaimEvent" + DetailedClaimEvent ClaimType = "DetailedClaimEvent" +) + +// Claim representation of a claim event +type Claim struct { + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + TxHash common.Hash `meddler:"tx_hash,hash"` + GlobalIndex *big.Int `meddler:"global_index,bigint"` + OriginNetwork uint32 `meddler:"origin_network"` + OriginAddress common.Address `meddler:"origin_address"` + DestinationAddress common.Address `meddler:"destination_address"` + Amount *big.Int `meddler:"amount,bigint"` + ProofLocalExitRoot treetypes.Proof `meddler:"proof_local_exit_root,merkleproof"` + ProofRollupExitRoot treetypes.Proof `meddler:"proof_rollup_exit_root,merkleproof"` + MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` + GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` + DestinationNetwork uint32 `meddler:"destination_network"` + Metadata []byte `meddler:"metadata"` + IsMessage bool `meddler:"is_message"` + BlockTimestamp uint64 `meddler:"block_timestamp"` + Type ClaimType `meddler:"type"` +} + +// String returns a string representation of the Claim. +func (c *Claim) String() string { + globalIndexStr := nilStr + if c.GlobalIndex != nil { + globalIndexStr = c.GlobalIndex.String() + } + + amountStr := nilStr + if c.Amount != nil { + amountStr = c.Amount.String() + } + + return fmt.Sprintf("Claim{BlockNum: %d, BlockPos: %d, TxHash: %s, GlobalIndex: %s, "+ + "OriginNetwork: %d, OriginAddress: %s, DestinationAddress: %s, Amount: %s, "+ + "ProofLocalExitRoot: %v, ProofRollupExitRoot: %v, MainnetExitRoot: %s, "+ + "RollupExitRoot: %s, GlobalExitRoot: %s, DestinationNetwork: %d, Metadata: %x, "+ + "IsMessage: %t, BlockTimestamp: %d, Type: %s}", + c.BlockNum, c.BlockPos, c.TxHash.String(), globalIndexStr, + c.OriginNetwork, c.OriginAddress.String(), c.DestinationAddress.String(), amountStr, + c.ProofLocalExitRoot.String(), c.ProofRollupExitRoot.String(), c.MainnetExitRoot.String(), + c.RollupExitRoot.String(), c.GlobalExitRoot.String(), c.DestinationNetwork, c.Metadata, + c.IsMessage, c.BlockTimestamp, c.Type) +} + +// DecodeEtrogCalldata decodes claim calldata for Etrog fork +func (c *Claim) DecodeEtrogCalldata(data []any) (bool, error) { + // Unpack method inputs. Note that both claimAsset and claimMessage have the same interface + // for the relevant parts + // claimAsset/claimMessage( + // 0: smtProofLocalExitRoot, + // 1: smtProofRollupExitRoot, + // 2: globalIndex, + // 3: mainnetExitRoot, + // 4: rollupExitRoot, + // 5: originNetwork, + // 6: originTokenAddress/originAddress, + // 7: destinationNetwork, + // 8: destinationAddress, + // 9: amount, + // 10: metadata, + // ) + + actualGlobalIndex, ok := data[2].(*big.Int) + if !ok { + return false, fmt.Errorf("unexpected type for actualGlobalIndex, expected *big.Int got '%T'", data[2]) + } + if actualGlobalIndex.Cmp(c.GlobalIndex) != 0 { + // not the claim we're looking for + return false, nil + } + + rawLERProof, ok := data[0].([treetypes.DefaultHeight][common.HashLength]byte) + if !ok { + return false, fmt.Errorf("unexpected type for rawLERProof, expected [32][32]byte got '%T'", data[0]) + } + + rawRERProof, ok := data[1].([treetypes.DefaultHeight][common.HashLength]byte) + if !ok { + return false, fmt.Errorf("unexpected type for rawRERProof, expected [32][32]byte got '%T'", data[1]) + } + + c.ProofLocalExitRoot = treetypes.NewProof(rawLERProof) + c.ProofRollupExitRoot = treetypes.NewProof(rawRERProof) + + c.MainnetExitRoot, ok = data[3].([common.HashLength]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'MainnetExitRoot'. Expected '[32]byte', got '%T'", data[3]) + } + + c.RollupExitRoot, ok = data[4].([common.HashLength]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'RollupExitRoot'. Expected '[32]byte', got '%T'", data[4]) + } + + c.DestinationNetwork, ok = data[7].(uint32) + if !ok { + return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[7]) + } + + c.Metadata, ok = data[10].([]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'claim Metadata'. Expected '[]byte', got '%T'", data[10]) + } + + c.GlobalExitRoot = crypto.Keccak256Hash(c.MainnetExitRoot.Bytes(), c.RollupExitRoot.Bytes()) + + return true, nil +} + +// DecodePreEtrogCalldata decodes the claim calldata for pre-Etrog forks +func (c *Claim) DecodePreEtrogCalldata(data []any) (bool, error) { + // claimMessage/claimAsset( + // 0: bytes32[32] smtProof, + // 1: uint32 index, + // 2: bytes32 mainnetExitRoot, + // 3: bytes32 rollupExitRoot, + // 4: uint32 originNetwork, + // 5: address originTokenAddress, + // 6: uint32 destinationNetwork, + // 7: address destinationAddress, + // 8: uint256 amount, + // 9: bytes metadata + // ) + actualGlobalIndex, ok := data[1].(uint32) + if !ok { + return false, fmt.Errorf("unexpected type for actualGlobalIndex, expected uint32 got '%T'", data[1]) + } + + if new(big.Int).SetUint64(uint64(actualGlobalIndex)).Cmp(c.GlobalIndex) != 0 { + // not the claim we're looking for + return false, nil + } + + rawLERProof, ok := data[0].([treetypes.DefaultHeight][common.HashLength]byte) + if !ok { + return false, fmt.Errorf("unexpected type for proofLERBytes, expected [32][32]byte got '%T'", data[0]) + } + + c.ProofLocalExitRoot = treetypes.NewProof(rawLERProof) + + c.MainnetExitRoot, ok = data[2].([common.HashLength]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'MainnetExitRoot'. Expected '[32]byte', got '%T'", data[2]) + } + + c.RollupExitRoot, ok = data[3].([common.HashLength]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'RollupExitRoot'. Expected '[32]byte', got '%T'", data[3]) + } + + c.DestinationNetwork, ok = data[6].(uint32) + if !ok { + return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[6]) + } + + c.Metadata, ok = data[9].([]byte) + if !ok { + return false, fmt.Errorf("unexpected type for 'Metadata'. Expected '[]byte', got '%T'", data[9]) + } + + c.GlobalExitRoot = crypto.Keccak256Hash(c.MainnetExitRoot.Bytes(), c.RollupExitRoot.Bytes()) + + return true, nil +} + +// UnsetClaim representation of an UpdatedUnsetGlobalIndexHashChain event, +// that is emitted by the bridge contract when a claim is unset. +type UnsetClaim struct { + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + TxHash common.Hash `meddler:"tx_hash,hash"` + GlobalIndex *big.Int `meddler:"global_index,bigint"` + UnsetGlobalIndexHashChain common.Hash `meddler:"unset_global_index_hash_chain,hash"` + CreatedAt uint64 `meddler:"created_at"` +} + +// TODO: Why this struct is duplicated?? +// Unclaim: this was in file bridgesync/types/types.go +// UnsetClaim: this was in file bridgesync/processor.go +type Unclaim struct { + GlobalIndex *big.Int `json:"global_index"` + BlockNumber uint64 `json:"block_number"` + LogIndex uint64 `json:"log_index"` +} + +// String returns a string representation of the UnsetClaim. +func (u *UnsetClaim) String() string { + globalIndexStr := nilStr + if u.GlobalIndex != nil { + globalIndexStr = u.GlobalIndex.String() + } + + return fmt.Sprintf("UnsetClaim{BlockNum: %d, BlockPos: %d, TxHash: %s, "+ + "GlobalIndex: %s, UnsetGlobalIndexHashChain: %s, CreatedAt: %d}", + u.BlockNum, u.BlockPos, u.TxHash.String(), + globalIndexStr, u.UnsetGlobalIndexHashChain.String(), u.CreatedAt) +} + +// SetClaim representation of a SetClaim event, +// that is emitted by the L2 bridge contract when a claim is set. +type SetClaim struct { + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + TxHash common.Hash `meddler:"tx_hash,hash"` + GlobalIndex *big.Int `meddler:"global_index,bigint"` + CreatedAt uint64 `meddler:"created_at"` +} + +// String returns a string representation of the SetClaim. +func (s *SetClaim) String() string { + globalIndexStr := nilStr + if s.GlobalIndex != nil { + globalIndexStr = s.GlobalIndex.String() + } + return fmt.Sprintf("SetClaim{BlockNum: %d, BlockPos: %d, TxHash: %s, "+ + "GlobalIndex: %s, CreatedAt: %d}", + s.BlockNum, s.BlockPos, s.TxHash.String(), + globalIndexStr, s.CreatedAt) +} diff --git a/claimsync/types/claim_reader.go b/claimsync/types/claim_reader.go index e62205326..031a23d57 100644 --- a/claimsync/types/claim_reader.go +++ b/claimsync/types/claim_reader.go @@ -1,16 +1,30 @@ package types import ( + "context" "math/big" - "github.com/agglayer/aggkit/bridgesync" dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/ethereum/go-ethereum/common" ) // ClaimsReader provides read-only access type ClaimsReader interface { - GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) - GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) - GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) - GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) + GetLastProcessedBlock(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) + GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) + GetClaims(ctx context.Context, tx dbtypes.Querier, fromBlock, toBlock uint64) ([]Claim, error) + GetClaimsByGlobalIndex(ctx context.Context, tx dbtypes.Querier, globalIndex *big.Int) ([]Claim, error) + GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) + GetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + networkIDs []uint32, globalIndex *big.Int, + ) ([]*Claim, int, error) + GetSetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + globalIndex *big.Int, + ) ([]*SetClaim, int, error) + GetUnsetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + globalIndex *big.Int, + ) ([]*UnsetClaim, int, error) } diff --git a/claimsync/types/claim_storager.go b/claimsync/types/claim_storager.go index c888b4cae..71edd02cc 100644 --- a/claimsync/types/claim_storager.go +++ b/claimsync/types/claim_storager.go @@ -4,33 +4,69 @@ import ( "context" "math/big" - "github.com/agglayer/aggkit/bridgesync" "github.com/agglayer/aggkit/db/compatibility" dbtypes "github.com/agglayer/aggkit/db/types" aggsync "github.com/agglayer/aggkit/sync" + "github.com/ethereum/go-ethereum/common" ) // Storage defines the interface for claim storage operations. // Each method accepts an optional tx dbtypes.Querier; pass nil to use the default DB connection. type ClaimStorager interface { // InsertBlock records a block so claims can reference it via foreign key - InsertBlock(tx dbtypes.Querier, blockNum uint64, blockHash string) error + InsertBlock(ctx context.Context, tx dbtypes.Querier, blockNum uint64, blockHash common.Hash) error // InsertClaim persists a single claim record - InsertClaim(tx dbtypes.Querier, claim bridgesync.Claim) error + InsertClaim(ctx context.Context, tx dbtypes.Querier, claim Claim) error // InsertUnsetClaim persists an unset claim record - InsertUnsetClaim(tx dbtypes.Querier, u bridgesync.UnsetClaim) error + InsertUnsetClaim(ctx context.Context, tx dbtypes.Querier, u UnsetClaim) error // InsertSetClaim persists a set claim record - InsertSetClaim(tx dbtypes.Querier, s bridgesync.SetClaim) error + InsertSetClaim(ctx context.Context, tx dbtypes.Querier, s SetClaim) error // GetClaims returns claims in [fromBlock, toBlock] using compaction logic - GetClaims(tx dbtypes.Querier, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) + GetClaims(ctx context.Context, tx dbtypes.Querier, fromBlock, toBlock uint64) ([]Claim, error) // GetClaimsByGlobalIndex returns claims for the given global index using compaction logic - GetClaimsByGlobalIndex(tx dbtypes.Querier, globalIndex *big.Int) ([]bridgesync.Claim, error) - // GetLastProcessedBlock returns the highest block number stored - GetLastProcessedBlock(tx dbtypes.Querier) (uint64, error) + GetClaimsByGlobalIndex(ctx context.Context, tx dbtypes.Querier, globalIndex *big.Int) ([]Claim, error) + // GetFirstProcessedBlock returns the lowest block number stored if any. + // Returns (0, false, nil) if there are no blocks. + GetFirstProcessedBlock(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) + // GetLastProcessedBlock returns the highest block number stored if any + // it returns: + // - the highest block number stored, or 0 if there are no blocks + // - a boolean indicating whether a block was found (false if there are no blocks) + // - error if the operation failed, or nil if successful + GetLastProcessedBlock(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) // GetBoundaryBlockForClaimType returns the max block_num for claims of the given type - GetBoundaryBlockForClaimType(tx dbtypes.Querier, claimType bridgesync.ClaimType) (uint64, error) + GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) // DeleteBlocksFrom deletes all blocks with num >= firstBlock (cascade-deletes claims etc.) - DeleteBlocksFrom(tx dbtypes.Querier, firstBlock uint64) (int64, error) + DeleteBlocksFrom(ctx context.Context, tx dbtypes.Querier, firstBlock uint64) (int64, error) + // GetClaimsByGER returns all DetailedClaimEvent claims with the given global exit root + GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) + // GetClaimsPaged returns claims for the given page parameters and filters, + // it returns: + // - the list of claims for the requested page + // - the total count of claims matching the filters (ignoring pagination) + // - error if the operation failed, or nil if successful + GetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, networkIDs []uint32, globalIndex *big.Int, + ) ([]*Claim, int, error) + // GetSetClaimsPaged returns set claims for the given page parameters and filters, + // it returns: + // - the list of set claims for the requested page + // - the total count of set claims matching the filters (ignoring pagination) + // - error if the operation failed, or nil if successful + GetSetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + globalIndex *big.Int, + ) ([]*SetClaim, int, error) + // GetUnsetClaimsPaged returns unset claims for the given page parameters and filters, + // it returns: + // - the list of unset claims for the requested page + // - the total count of unset claims matching the filters (ignoring pagination) + // - error if the operation failed, or nil if successful + GetUnsetClaimsPaged( + ctx context.Context, pageNumber, pageSize uint32, + globalIndex *big.Int, + ) ([]*UnsetClaim, int, error) + // NewTx begins a new database transaction. NewTx(ctx context.Context) (dbtypes.Txer, error) compatibility.CompatibilityDataStorager[aggsync.RuntimeData] diff --git a/claimsync/types/claim_syncer.go b/claimsync/types/claim_syncer.go new file mode 100644 index 000000000..97c7931e8 --- /dev/null +++ b/claimsync/types/claim_syncer.go @@ -0,0 +1,21 @@ +package types + +import ( + "context" + "math/big" +) + +type ClaimSyncer interface { + OriginNetwork() uint32 + // GetLastProcessedBlock is deprecated in favour GetProcessedBlockRange + GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) + //GetStatus(ctx context.Context) (Status, error) + // SetNextRequiredBlock sets the next required block number. It is used by aggsender that + // set the next required block to the next one from the previous settled certificate + // If the syncer have no block yet is going to use this as starting point + // If the syncer have any block check that the `blockNumber`is higher than the first synced block + SetNextRequiredBlock(ctx context.Context, blockNumber uint64) error + + GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]Claim, error) + GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]Claim, error) +} diff --git a/claimsync/types/mocks/mock_claim_storager.go b/claimsync/types/mocks/mock_claim_storager.go new file mode 100644 index 000000000..d256bba47 --- /dev/null +++ b/claimsync/types/mocks/mock_claim_storager.go @@ -0,0 +1,1037 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" + + sync "github.com/agglayer/aggkit/sync" + + types "github.com/agglayer/aggkit/db/types" +) + +// ClaimStorager is an autogenerated mock type for the ClaimStorager type +type ClaimStorager struct { + mock.Mock +} + +type ClaimStorager_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimStorager) EXPECT() *ClaimStorager_Expecter { + return &ClaimStorager_Expecter{mock: &_m.Mock} +} + +// DeleteBlocksFrom provides a mock function with given fields: ctx, tx, firstBlock +func (_m *ClaimStorager) DeleteBlocksFrom(ctx context.Context, tx types.Querier, firstBlock uint64) (int64, error) { + ret := _m.Called(ctx, tx, firstBlock) + + if len(ret) == 0 { + panic("no return value specified for DeleteBlocksFrom") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64) (int64, error)); ok { + return rf(ctx, tx, firstBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64) int64); ok { + r0 = rf(ctx, tx, firstBlock) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, uint64) error); ok { + r1 = rf(ctx, tx, firstBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimStorager_DeleteBlocksFrom_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteBlocksFrom' +type ClaimStorager_DeleteBlocksFrom_Call struct { + *mock.Call +} + +// DeleteBlocksFrom is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - firstBlock uint64 +func (_e *ClaimStorager_Expecter) DeleteBlocksFrom(ctx interface{}, tx interface{}, firstBlock interface{}) *ClaimStorager_DeleteBlocksFrom_Call { + return &ClaimStorager_DeleteBlocksFrom_Call{Call: _e.mock.On("DeleteBlocksFrom", ctx, tx, firstBlock)} +} + +func (_c *ClaimStorager_DeleteBlocksFrom_Call) Run(run func(ctx context.Context, tx types.Querier, firstBlock uint64)) *ClaimStorager_DeleteBlocksFrom_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(uint64)) + }) + return _c +} + +func (_c *ClaimStorager_DeleteBlocksFrom_Call) Return(_a0 int64, _a1 error) *ClaimStorager_DeleteBlocksFrom_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimStorager_DeleteBlocksFrom_Call) RunAndReturn(run func(context.Context, types.Querier, uint64) (int64, error)) *ClaimStorager_DeleteBlocksFrom_Call { + _c.Call.Return(run) + return _c +} + +// GetBoundaryBlockForClaimType provides a mock function with given fields: ctx, tx, claimType +func (_m *ClaimStorager) GetBoundaryBlockForClaimType(ctx context.Context, tx types.Querier, claimType claimsynctypes.ClaimType) (uint64, error) { + ret := _m.Called(ctx, tx, claimType) + + if len(ret) == 0 { + panic("no return value specified for GetBoundaryBlockForClaimType") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.ClaimType) (uint64, error)); ok { + return rf(ctx, tx, claimType) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.ClaimType) uint64); ok { + r0 = rf(ctx, tx, claimType) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, claimsynctypes.ClaimType) error); ok { + r1 = rf(ctx, tx, claimType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimStorager_GetBoundaryBlockForClaimType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBoundaryBlockForClaimType' +type ClaimStorager_GetBoundaryBlockForClaimType_Call struct { + *mock.Call +} + +// GetBoundaryBlockForClaimType is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - claimType claimsynctypes.ClaimType +func (_e *ClaimStorager_Expecter) GetBoundaryBlockForClaimType(ctx interface{}, tx interface{}, claimType interface{}) *ClaimStorager_GetBoundaryBlockForClaimType_Call { + return &ClaimStorager_GetBoundaryBlockForClaimType_Call{Call: _e.mock.On("GetBoundaryBlockForClaimType", ctx, tx, claimType)} +} + +func (_c *ClaimStorager_GetBoundaryBlockForClaimType_Call) Run(run func(ctx context.Context, tx types.Querier, claimType claimsynctypes.ClaimType)) *ClaimStorager_GetBoundaryBlockForClaimType_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(claimsynctypes.ClaimType)) + }) + return _c +} + +func (_c *ClaimStorager_GetBoundaryBlockForClaimType_Call) Return(_a0 uint64, _a1 error) *ClaimStorager_GetBoundaryBlockForClaimType_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimStorager_GetBoundaryBlockForClaimType_Call) RunAndReturn(run func(context.Context, types.Querier, claimsynctypes.ClaimType) (uint64, error)) *ClaimStorager_GetBoundaryBlockForClaimType_Call { + _c.Call.Return(run) + return _c +} + +// GetClaims provides a mock function with given fields: ctx, tx, fromBlock, toBlock +func (_m *ClaimStorager) GetClaims(ctx context.Context, tx types.Querier, fromBlock uint64, toBlock uint64) ([]claimsynctypes.Claim, error) { + ret := _m.Called(ctx, tx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetClaims") + } + + var r0 []claimsynctypes.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64, uint64) ([]claimsynctypes.Claim, error)); ok { + return rf(ctx, tx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64, uint64) []claimsynctypes.Claim); ok { + r0 = rf(ctx, tx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, uint64, uint64) error); ok { + r1 = rf(ctx, tx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimStorager_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' +type ClaimStorager_GetClaims_Call struct { + *mock.Call +} + +// GetClaims is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - fromBlock uint64 +// - toBlock uint64 +func (_e *ClaimStorager_Expecter) GetClaims(ctx interface{}, tx interface{}, fromBlock interface{}, toBlock interface{}) *ClaimStorager_GetClaims_Call { + return &ClaimStorager_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, tx, fromBlock, toBlock)} +} + +func (_c *ClaimStorager_GetClaims_Call) Run(run func(ctx context.Context, tx types.Querier, fromBlock uint64, toBlock uint64)) *ClaimStorager_GetClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(uint64), args[3].(uint64)) + }) + return _c +} + +func (_c *ClaimStorager_GetClaims_Call) Return(_a0 []claimsynctypes.Claim, _a1 error) *ClaimStorager_GetClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimStorager_GetClaims_Call) RunAndReturn(run func(context.Context, types.Querier, uint64, uint64) ([]claimsynctypes.Claim, error)) *ClaimStorager_GetClaims_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot +func (_m *ClaimStorager) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { + ret := _m.Called(ctx, globalExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsByGER") + } + + var r0 []*claimsynctypes.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)); ok { + return rf(ctx, globalExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, globalExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, globalExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimStorager_GetClaimsByGER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGER' +type ClaimStorager_GetClaimsByGER_Call struct { + *mock.Call +} + +// GetClaimsByGER is a helper method to define mock.On call +// - ctx context.Context +// - globalExitRoot common.Hash +func (_e *ClaimStorager_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *ClaimStorager_GetClaimsByGER_Call { + return &ClaimStorager_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} +} + +func (_c *ClaimStorager_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *ClaimStorager_GetClaimsByGER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *ClaimStorager_GetClaimsByGER_Call) Return(_a0 []*claimsynctypes.Claim, _a1 error) *ClaimStorager_GetClaimsByGER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimStorager_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)) *ClaimStorager_GetClaimsByGER_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsByGlobalIndex provides a mock function with given fields: ctx, tx, globalIndex +func (_m *ClaimStorager) GetClaimsByGlobalIndex(ctx context.Context, tx types.Querier, globalIndex *big.Int) ([]claimsynctypes.Claim, error) { + ret := _m.Called(ctx, tx, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsByGlobalIndex") + } + + var r0 []claimsynctypes.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, *big.Int) ([]claimsynctypes.Claim, error)); ok { + return rf(ctx, tx, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, *big.Int) []claimsynctypes.Claim); ok { + r0 = rf(ctx, tx, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, *big.Int) error); ok { + r1 = rf(ctx, tx, globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimStorager_GetClaimsByGlobalIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGlobalIndex' +type ClaimStorager_GetClaimsByGlobalIndex_Call struct { + *mock.Call +} + +// GetClaimsByGlobalIndex is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - globalIndex *big.Int +func (_e *ClaimStorager_Expecter) GetClaimsByGlobalIndex(ctx interface{}, tx interface{}, globalIndex interface{}) *ClaimStorager_GetClaimsByGlobalIndex_Call { + return &ClaimStorager_GetClaimsByGlobalIndex_Call{Call: _e.mock.On("GetClaimsByGlobalIndex", ctx, tx, globalIndex)} +} + +func (_c *ClaimStorager_GetClaimsByGlobalIndex_Call) Run(run func(ctx context.Context, tx types.Querier, globalIndex *big.Int)) *ClaimStorager_GetClaimsByGlobalIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(*big.Int)) + }) + return _c +} + +func (_c *ClaimStorager_GetClaimsByGlobalIndex_Call) Return(_a0 []claimsynctypes.Claim, _a1 error) *ClaimStorager_GetClaimsByGlobalIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimStorager_GetClaimsByGlobalIndex_Call) RunAndReturn(run func(context.Context, types.Querier, *big.Int) ([]claimsynctypes.Claim, error)) *ClaimStorager_GetClaimsByGlobalIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsPaged provides a mock function with given fields: ctx, pageNumber, pageSize, networkIDs, globalIndex +func (_m *ClaimStorager) GetClaimsPaged(ctx context.Context, pageNumber uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { + ret := _m.Called(ctx, pageNumber, pageSize, networkIDs, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsPaged") + } + + var r0 []*claimsynctypes.Claim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)); ok { + return rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, []uint32, *big.Int) int); ok { + r1 = rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, []uint32, *big.Int) error); ok { + r2 = rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimStorager_GetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsPaged' +type ClaimStorager_GetClaimsPaged_Call struct { + *mock.Call +} + +// GetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - pageNumber uint32 +// - pageSize uint32 +// - networkIDs []uint32 +// - globalIndex *big.Int +func (_e *ClaimStorager_Expecter) GetClaimsPaged(ctx interface{}, pageNumber interface{}, pageSize interface{}, networkIDs interface{}, globalIndex interface{}) *ClaimStorager_GetClaimsPaged_Call { + return &ClaimStorager_GetClaimsPaged_Call{Call: _e.mock.On("GetClaimsPaged", ctx, pageNumber, pageSize, networkIDs, globalIndex)} +} + +func (_c *ClaimStorager_GetClaimsPaged_Call) Run(run func(ctx context.Context, pageNumber uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int)) *ClaimStorager_GetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].([]uint32), args[4].(*big.Int)) + }) + return _c +} + +func (_c *ClaimStorager_GetClaimsPaged_Call) Return(_a0 []*claimsynctypes.Claim, _a1 int, _a2 error) *ClaimStorager_GetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimStorager_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)) *ClaimStorager_GetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// GetCompatibilityData provides a mock function with given fields: ctx, tx +func (_m *ClaimStorager) GetCompatibilityData(ctx context.Context, tx types.Querier) (bool, sync.RuntimeData, error) { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for GetCompatibilityData") + } + + var r0 bool + var r1 sync.RuntimeData + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) (bool, sync.RuntimeData, error)); ok { + return rf(ctx, tx) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) bool); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier) sync.RuntimeData); ok { + r1 = rf(ctx, tx) + } else { + r1 = ret.Get(1).(sync.RuntimeData) + } + + if rf, ok := ret.Get(2).(func(context.Context, types.Querier) error); ok { + r2 = rf(ctx, tx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimStorager_GetCompatibilityData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCompatibilityData' +type ClaimStorager_GetCompatibilityData_Call struct { + *mock.Call +} + +// GetCompatibilityData is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +func (_e *ClaimStorager_Expecter) GetCompatibilityData(ctx interface{}, tx interface{}) *ClaimStorager_GetCompatibilityData_Call { + return &ClaimStorager_GetCompatibilityData_Call{Call: _e.mock.On("GetCompatibilityData", ctx, tx)} +} + +func (_c *ClaimStorager_GetCompatibilityData_Call) Run(run func(ctx context.Context, tx types.Querier)) *ClaimStorager_GetCompatibilityData_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier)) + }) + return _c +} + +func (_c *ClaimStorager_GetCompatibilityData_Call) Return(_a0 bool, _a1 sync.RuntimeData, _a2 error) *ClaimStorager_GetCompatibilityData_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimStorager_GetCompatibilityData_Call) RunAndReturn(run func(context.Context, types.Querier) (bool, sync.RuntimeData, error)) *ClaimStorager_GetCompatibilityData_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstProcessedBlock provides a mock function with given fields: ctx, tx +func (_m *ClaimStorager) GetFirstProcessedBlock(ctx context.Context, tx types.Querier) (uint64, bool, error) { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstProcessedBlock") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) (uint64, bool, error)); ok { + return rf(ctx, tx) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) uint64); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier) bool); ok { + r1 = rf(ctx, tx) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(context.Context, types.Querier) error); ok { + r2 = rf(ctx, tx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimStorager_GetFirstProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstProcessedBlock' +type ClaimStorager_GetFirstProcessedBlock_Call struct { + *mock.Call +} + +// GetFirstProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +func (_e *ClaimStorager_Expecter) GetFirstProcessedBlock(ctx interface{}, tx interface{}) *ClaimStorager_GetFirstProcessedBlock_Call { + return &ClaimStorager_GetFirstProcessedBlock_Call{Call: _e.mock.On("GetFirstProcessedBlock", ctx, tx)} +} + +func (_c *ClaimStorager_GetFirstProcessedBlock_Call) Run(run func(ctx context.Context, tx types.Querier)) *ClaimStorager_GetFirstProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier)) + }) + return _c +} + +func (_c *ClaimStorager_GetFirstProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ClaimStorager_GetFirstProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimStorager_GetFirstProcessedBlock_Call) RunAndReturn(run func(context.Context, types.Querier) (uint64, bool, error)) *ClaimStorager_GetFirstProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetLastProcessedBlock provides a mock function with given fields: ctx, tx +func (_m *ClaimStorager) GetLastProcessedBlock(ctx context.Context, tx types.Querier) (uint64, bool, error) { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlock") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) (uint64, bool, error)); ok { + return rf(ctx, tx) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) uint64); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier) bool); ok { + r1 = rf(ctx, tx) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(context.Context, types.Querier) error); ok { + r2 = rf(ctx, tx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimStorager_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type ClaimStorager_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +func (_e *ClaimStorager_Expecter) GetLastProcessedBlock(ctx interface{}, tx interface{}) *ClaimStorager_GetLastProcessedBlock_Call { + return &ClaimStorager_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx, tx)} +} + +func (_c *ClaimStorager_GetLastProcessedBlock_Call) Run(run func(ctx context.Context, tx types.Querier)) *ClaimStorager_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier)) + }) + return _c +} + +func (_c *ClaimStorager_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ClaimStorager_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimStorager_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context, types.Querier) (uint64, bool, error)) *ClaimStorager_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetSetClaimsPaged provides a mock function with given fields: ctx, pageNumber, pageSize, globalIndex +func (_m *ClaimStorager) GetSetClaimsPaged(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { + ret := _m.Called(ctx, pageNumber, pageSize, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSetClaimsPaged") + } + + var r0 []*claimsynctypes.SetClaim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)); ok { + return rf(ctx, pageNumber, pageSize, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.SetClaim); ok { + r0 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.SetClaim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { + r1 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { + r2 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimStorager_GetSetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSetClaimsPaged' +type ClaimStorager_GetSetClaimsPaged_Call struct { + *mock.Call +} + +// GetSetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - pageNumber uint32 +// - pageSize uint32 +// - globalIndex *big.Int +func (_e *ClaimStorager_Expecter) GetSetClaimsPaged(ctx interface{}, pageNumber interface{}, pageSize interface{}, globalIndex interface{}) *ClaimStorager_GetSetClaimsPaged_Call { + return &ClaimStorager_GetSetClaimsPaged_Call{Call: _e.mock.On("GetSetClaimsPaged", ctx, pageNumber, pageSize, globalIndex)} +} + +func (_c *ClaimStorager_GetSetClaimsPaged_Call) Run(run func(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int)) *ClaimStorager_GetSetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) + }) + return _c +} + +func (_c *ClaimStorager_GetSetClaimsPaged_Call) Return(_a0 []*claimsynctypes.SetClaim, _a1 int, _a2 error) *ClaimStorager_GetSetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimStorager_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)) *ClaimStorager_GetSetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// GetUnsetClaimsPaged provides a mock function with given fields: ctx, pageNumber, pageSize, globalIndex +func (_m *ClaimStorager) GetUnsetClaimsPaged(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { + ret := _m.Called(ctx, pageNumber, pageSize, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetUnsetClaimsPaged") + } + + var r0 []*claimsynctypes.UnsetClaim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)); ok { + return rf(ctx, pageNumber, pageSize, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.UnsetClaim); ok { + r0 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.UnsetClaim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { + r1 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { + r2 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimStorager_GetUnsetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUnsetClaimsPaged' +type ClaimStorager_GetUnsetClaimsPaged_Call struct { + *mock.Call +} + +// GetUnsetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - pageNumber uint32 +// - pageSize uint32 +// - globalIndex *big.Int +func (_e *ClaimStorager_Expecter) GetUnsetClaimsPaged(ctx interface{}, pageNumber interface{}, pageSize interface{}, globalIndex interface{}) *ClaimStorager_GetUnsetClaimsPaged_Call { + return &ClaimStorager_GetUnsetClaimsPaged_Call{Call: _e.mock.On("GetUnsetClaimsPaged", ctx, pageNumber, pageSize, globalIndex)} +} + +func (_c *ClaimStorager_GetUnsetClaimsPaged_Call) Run(run func(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int)) *ClaimStorager_GetUnsetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) + }) + return _c +} + +func (_c *ClaimStorager_GetUnsetClaimsPaged_Call) Return(_a0 []*claimsynctypes.UnsetClaim, _a1 int, _a2 error) *ClaimStorager_GetUnsetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimStorager_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)) *ClaimStorager_GetUnsetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// InsertBlock provides a mock function with given fields: ctx, tx, blockNum, blockHash +func (_m *ClaimStorager) InsertBlock(ctx context.Context, tx types.Querier, blockNum uint64, blockHash common.Hash) error { + ret := _m.Called(ctx, tx, blockNum, blockHash) + + if len(ret) == 0 { + panic("no return value specified for InsertBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64, common.Hash) error); ok { + r0 = rf(ctx, tx, blockNum, blockHash) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimStorager_InsertBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertBlock' +type ClaimStorager_InsertBlock_Call struct { + *mock.Call +} + +// InsertBlock is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - blockNum uint64 +// - blockHash common.Hash +func (_e *ClaimStorager_Expecter) InsertBlock(ctx interface{}, tx interface{}, blockNum interface{}, blockHash interface{}) *ClaimStorager_InsertBlock_Call { + return &ClaimStorager_InsertBlock_Call{Call: _e.mock.On("InsertBlock", ctx, tx, blockNum, blockHash)} +} + +func (_c *ClaimStorager_InsertBlock_Call) Run(run func(ctx context.Context, tx types.Querier, blockNum uint64, blockHash common.Hash)) *ClaimStorager_InsertBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(uint64), args[3].(common.Hash)) + }) + return _c +} + +func (_c *ClaimStorager_InsertBlock_Call) Return(_a0 error) *ClaimStorager_InsertBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimStorager_InsertBlock_Call) RunAndReturn(run func(context.Context, types.Querier, uint64, common.Hash) error) *ClaimStorager_InsertBlock_Call { + _c.Call.Return(run) + return _c +} + +// InsertClaim provides a mock function with given fields: ctx, tx, claim +func (_m *ClaimStorager) InsertClaim(ctx context.Context, tx types.Querier, claim claimsynctypes.Claim) error { + ret := _m.Called(ctx, tx, claim) + + if len(ret) == 0 { + panic("no return value specified for InsertClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.Claim) error); ok { + r0 = rf(ctx, tx, claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimStorager_InsertClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertClaim' +type ClaimStorager_InsertClaim_Call struct { + *mock.Call +} + +// InsertClaim is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - claim claimsynctypes.Claim +func (_e *ClaimStorager_Expecter) InsertClaim(ctx interface{}, tx interface{}, claim interface{}) *ClaimStorager_InsertClaim_Call { + return &ClaimStorager_InsertClaim_Call{Call: _e.mock.On("InsertClaim", ctx, tx, claim)} +} + +func (_c *ClaimStorager_InsertClaim_Call) Run(run func(ctx context.Context, tx types.Querier, claim claimsynctypes.Claim)) *ClaimStorager_InsertClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(claimsynctypes.Claim)) + }) + return _c +} + +func (_c *ClaimStorager_InsertClaim_Call) Return(_a0 error) *ClaimStorager_InsertClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimStorager_InsertClaim_Call) RunAndReturn(run func(context.Context, types.Querier, claimsynctypes.Claim) error) *ClaimStorager_InsertClaim_Call { + _c.Call.Return(run) + return _c +} + +// InsertSetClaim provides a mock function with given fields: ctx, tx, s +func (_m *ClaimStorager) InsertSetClaim(ctx context.Context, tx types.Querier, s claimsynctypes.SetClaim) error { + ret := _m.Called(ctx, tx, s) + + if len(ret) == 0 { + panic("no return value specified for InsertSetClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.SetClaim) error); ok { + r0 = rf(ctx, tx, s) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimStorager_InsertSetClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertSetClaim' +type ClaimStorager_InsertSetClaim_Call struct { + *mock.Call +} + +// InsertSetClaim is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - s claimsynctypes.SetClaim +func (_e *ClaimStorager_Expecter) InsertSetClaim(ctx interface{}, tx interface{}, s interface{}) *ClaimStorager_InsertSetClaim_Call { + return &ClaimStorager_InsertSetClaim_Call{Call: _e.mock.On("InsertSetClaim", ctx, tx, s)} +} + +func (_c *ClaimStorager_InsertSetClaim_Call) Run(run func(ctx context.Context, tx types.Querier, s claimsynctypes.SetClaim)) *ClaimStorager_InsertSetClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(claimsynctypes.SetClaim)) + }) + return _c +} + +func (_c *ClaimStorager_InsertSetClaim_Call) Return(_a0 error) *ClaimStorager_InsertSetClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimStorager_InsertSetClaim_Call) RunAndReturn(run func(context.Context, types.Querier, claimsynctypes.SetClaim) error) *ClaimStorager_InsertSetClaim_Call { + _c.Call.Return(run) + return _c +} + +// InsertUnsetClaim provides a mock function with given fields: ctx, tx, u +func (_m *ClaimStorager) InsertUnsetClaim(ctx context.Context, tx types.Querier, u claimsynctypes.UnsetClaim) error { + ret := _m.Called(ctx, tx, u) + + if len(ret) == 0 { + panic("no return value specified for InsertUnsetClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.UnsetClaim) error); ok { + r0 = rf(ctx, tx, u) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimStorager_InsertUnsetClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertUnsetClaim' +type ClaimStorager_InsertUnsetClaim_Call struct { + *mock.Call +} + +// InsertUnsetClaim is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - u claimsynctypes.UnsetClaim +func (_e *ClaimStorager_Expecter) InsertUnsetClaim(ctx interface{}, tx interface{}, u interface{}) *ClaimStorager_InsertUnsetClaim_Call { + return &ClaimStorager_InsertUnsetClaim_Call{Call: _e.mock.On("InsertUnsetClaim", ctx, tx, u)} +} + +func (_c *ClaimStorager_InsertUnsetClaim_Call) Run(run func(ctx context.Context, tx types.Querier, u claimsynctypes.UnsetClaim)) *ClaimStorager_InsertUnsetClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(claimsynctypes.UnsetClaim)) + }) + return _c +} + +func (_c *ClaimStorager_InsertUnsetClaim_Call) Return(_a0 error) *ClaimStorager_InsertUnsetClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimStorager_InsertUnsetClaim_Call) RunAndReturn(run func(context.Context, types.Querier, claimsynctypes.UnsetClaim) error) *ClaimStorager_InsertUnsetClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewTx provides a mock function with given fields: ctx +func (_m *ClaimStorager) NewTx(ctx context.Context) (types.Txer, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for NewTx") + } + + var r0 types.Txer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (types.Txer, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) types.Txer); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Txer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimStorager_NewTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewTx' +type ClaimStorager_NewTx_Call struct { + *mock.Call +} + +// NewTx is a helper method to define mock.On call +// - ctx context.Context +func (_e *ClaimStorager_Expecter) NewTx(ctx interface{}) *ClaimStorager_NewTx_Call { + return &ClaimStorager_NewTx_Call{Call: _e.mock.On("NewTx", ctx)} +} + +func (_c *ClaimStorager_NewTx_Call) Run(run func(ctx context.Context)) *ClaimStorager_NewTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ClaimStorager_NewTx_Call) Return(_a0 types.Txer, _a1 error) *ClaimStorager_NewTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimStorager_NewTx_Call) RunAndReturn(run func(context.Context) (types.Txer, error)) *ClaimStorager_NewTx_Call { + _c.Call.Return(run) + return _c +} + +// SetCompatibilityData provides a mock function with given fields: ctx, tx, data +func (_m *ClaimStorager) SetCompatibilityData(ctx context.Context, tx types.Querier, data sync.RuntimeData) error { + ret := _m.Called(ctx, tx, data) + + if len(ret) == 0 { + panic("no return value specified for SetCompatibilityData") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, sync.RuntimeData) error); ok { + r0 = rf(ctx, tx, data) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimStorager_SetCompatibilityData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCompatibilityData' +type ClaimStorager_SetCompatibilityData_Call struct { + *mock.Call +} + +// SetCompatibilityData is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - data sync.RuntimeData +func (_e *ClaimStorager_Expecter) SetCompatibilityData(ctx interface{}, tx interface{}, data interface{}) *ClaimStorager_SetCompatibilityData_Call { + return &ClaimStorager_SetCompatibilityData_Call{Call: _e.mock.On("SetCompatibilityData", ctx, tx, data)} +} + +func (_c *ClaimStorager_SetCompatibilityData_Call) Run(run func(ctx context.Context, tx types.Querier, data sync.RuntimeData)) *ClaimStorager_SetCompatibilityData_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(sync.RuntimeData)) + }) + return _c +} + +func (_c *ClaimStorager_SetCompatibilityData_Call) Return(_a0 error) *ClaimStorager_SetCompatibilityData_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimStorager_SetCompatibilityData_Call) RunAndReturn(run func(context.Context, types.Querier, sync.RuntimeData) error) *ClaimStorager_SetCompatibilityData_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimStorager creates a new instance of ClaimStorager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimStorager(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimStorager { + mock := &ClaimStorager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/claimsync/types/mocks/mock_claim_syncer.go b/claimsync/types/mocks/mock_claim_syncer.go new file mode 100644 index 000000000..27a6b8bc8 --- /dev/null +++ b/claimsync/types/mocks/mock_claim_syncer.go @@ -0,0 +1,313 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/claimsync/types" +) + +// ClaimSyncer is an autogenerated mock type for the ClaimSyncer type +type ClaimSyncer struct { + mock.Mock +} + +type ClaimSyncer_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimSyncer) EXPECT() *ClaimSyncer_Expecter { + return &ClaimSyncer_Expecter{mock: &_m.Mock} +} + +// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *ClaimSyncer) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]types.Claim, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetClaims") + } + + var r0 []types.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]types.Claim, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []types.Claim); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimSyncer_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' +type ClaimSyncer_GetClaims_Call struct { + *mock.Call +} + +// GetClaims is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *ClaimSyncer_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *ClaimSyncer_GetClaims_Call { + return &ClaimSyncer_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} +} + +func (_c *ClaimSyncer_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *ClaimSyncer_GetClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *ClaimSyncer_GetClaims_Call) Return(_a0 []types.Claim, _a1 error) *ClaimSyncer_GetClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimSyncer_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]types.Claim, error)) *ClaimSyncer_GetClaims_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsByGlobalIndex provides a mock function with given fields: ctx, globalIndex +func (_m *ClaimSyncer) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]types.Claim, error) { + ret := _m.Called(ctx, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsByGlobalIndex") + } + + var r0 []types.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) ([]types.Claim, error)); ok { + return rf(ctx, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) []types.Claim); ok { + r0 = rf(ctx, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimSyncer_GetClaimsByGlobalIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGlobalIndex' +type ClaimSyncer_GetClaimsByGlobalIndex_Call struct { + *mock.Call +} + +// GetClaimsByGlobalIndex is a helper method to define mock.On call +// - ctx context.Context +// - globalIndex *big.Int +func (_e *ClaimSyncer_Expecter) GetClaimsByGlobalIndex(ctx interface{}, globalIndex interface{}) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + return &ClaimSyncer_GetClaimsByGlobalIndex_Call{Call: _e.mock.On("GetClaimsByGlobalIndex", ctx, globalIndex)} +} + +func (_c *ClaimSyncer_GetClaimsByGlobalIndex_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *ClaimSyncer_GetClaimsByGlobalIndex_Call) Return(_a0 []types.Claim, _a1 error) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimSyncer_GetClaimsByGlobalIndex_Call) RunAndReturn(run func(context.Context, *big.Int) ([]types.Claim, error)) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastProcessedBlock provides a mock function with given fields: ctx +func (_m *ClaimSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlock") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) bool); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimSyncer_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type ClaimSyncer_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *ClaimSyncer_Expecter) GetLastProcessedBlock(ctx interface{}) *ClaimSyncer_GetLastProcessedBlock_Call { + return &ClaimSyncer_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} +} + +func (_c *ClaimSyncer_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *ClaimSyncer_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ClaimSyncer_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ClaimSyncer_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimSyncer_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, bool, error)) *ClaimSyncer_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// OriginNetwork provides a mock function with no fields +func (_m *ClaimSyncer) OriginNetwork() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OriginNetwork") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// ClaimSyncer_OriginNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OriginNetwork' +type ClaimSyncer_OriginNetwork_Call struct { + *mock.Call +} + +// OriginNetwork is a helper method to define mock.On call +func (_e *ClaimSyncer_Expecter) OriginNetwork() *ClaimSyncer_OriginNetwork_Call { + return &ClaimSyncer_OriginNetwork_Call{Call: _e.mock.On("OriginNetwork")} +} + +func (_c *ClaimSyncer_OriginNetwork_Call) Run(run func()) *ClaimSyncer_OriginNetwork_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ClaimSyncer_OriginNetwork_Call) Return(_a0 uint32) *ClaimSyncer_OriginNetwork_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimSyncer_OriginNetwork_Call) RunAndReturn(run func() uint32) *ClaimSyncer_OriginNetwork_Call { + _c.Call.Return(run) + return _c +} + +// SetNextRequiredBlock provides a mock function with given fields: ctx, blockNumber +func (_m *ClaimSyncer) SetNextRequiredBlock(ctx context.Context, blockNumber uint64) error { + ret := _m.Called(ctx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for SetNextRequiredBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, blockNumber) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimSyncer_SetNextRequiredBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetNextRequiredBlock' +type ClaimSyncer_SetNextRequiredBlock_Call struct { + *mock.Call +} + +// SetNextRequiredBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +func (_e *ClaimSyncer_Expecter) SetNextRequiredBlock(ctx interface{}, blockNumber interface{}) *ClaimSyncer_SetNextRequiredBlock_Call { + return &ClaimSyncer_SetNextRequiredBlock_Call{Call: _e.mock.On("SetNextRequiredBlock", ctx, blockNumber)} +} + +func (_c *ClaimSyncer_SetNextRequiredBlock_Call) Run(run func(ctx context.Context, blockNumber uint64)) *ClaimSyncer_SetNextRequiredBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ClaimSyncer_SetNextRequiredBlock_Call) Return(_a0 error) *ClaimSyncer_SetNextRequiredBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimSyncer_SetNextRequiredBlock_Call) RunAndReturn(run func(context.Context, uint64) error) *ClaimSyncer_SetNextRequiredBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimSyncer creates a new instance of ClaimSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimSyncer(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimSyncer { + mock := &ClaimSyncer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/claimsync/types/mocks/mock_claims_reader.go b/claimsync/types/mocks/mock_claims_reader.go new file mode 100644 index 000000000..441100cd9 --- /dev/null +++ b/claimsync/types/mocks/mock_claims_reader.go @@ -0,0 +1,550 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/db/types" +) + +// ClaimsReader is an autogenerated mock type for the ClaimsReader type +type ClaimsReader struct { + mock.Mock +} + +type ClaimsReader_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimsReader) EXPECT() *ClaimsReader_Expecter { + return &ClaimsReader_Expecter{mock: &_m.Mock} +} + +// GetBoundaryBlockForClaimType provides a mock function with given fields: ctx, tx, claimType +func (_m *ClaimsReader) GetBoundaryBlockForClaimType(ctx context.Context, tx types.Querier, claimType claimsynctypes.ClaimType) (uint64, error) { + ret := _m.Called(ctx, tx, claimType) + + if len(ret) == 0 { + panic("no return value specified for GetBoundaryBlockForClaimType") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.ClaimType) (uint64, error)); ok { + return rf(ctx, tx, claimType) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.ClaimType) uint64); ok { + r0 = rf(ctx, tx, claimType) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, claimsynctypes.ClaimType) error); ok { + r1 = rf(ctx, tx, claimType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimsReader_GetBoundaryBlockForClaimType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBoundaryBlockForClaimType' +type ClaimsReader_GetBoundaryBlockForClaimType_Call struct { + *mock.Call +} + +// GetBoundaryBlockForClaimType is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - claimType claimsynctypes.ClaimType +func (_e *ClaimsReader_Expecter) GetBoundaryBlockForClaimType(ctx interface{}, tx interface{}, claimType interface{}) *ClaimsReader_GetBoundaryBlockForClaimType_Call { + return &ClaimsReader_GetBoundaryBlockForClaimType_Call{Call: _e.mock.On("GetBoundaryBlockForClaimType", ctx, tx, claimType)} +} + +func (_c *ClaimsReader_GetBoundaryBlockForClaimType_Call) Run(run func(ctx context.Context, tx types.Querier, claimType claimsynctypes.ClaimType)) *ClaimsReader_GetBoundaryBlockForClaimType_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(claimsynctypes.ClaimType)) + }) + return _c +} + +func (_c *ClaimsReader_GetBoundaryBlockForClaimType_Call) Return(_a0 uint64, _a1 error) *ClaimsReader_GetBoundaryBlockForClaimType_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimsReader_GetBoundaryBlockForClaimType_Call) RunAndReturn(run func(context.Context, types.Querier, claimsynctypes.ClaimType) (uint64, error)) *ClaimsReader_GetBoundaryBlockForClaimType_Call { + _c.Call.Return(run) + return _c +} + +// GetClaims provides a mock function with given fields: ctx, tx, fromBlock, toBlock +func (_m *ClaimsReader) GetClaims(ctx context.Context, tx types.Querier, fromBlock uint64, toBlock uint64) ([]claimsynctypes.Claim, error) { + ret := _m.Called(ctx, tx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetClaims") + } + + var r0 []claimsynctypes.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64, uint64) ([]claimsynctypes.Claim, error)); ok { + return rf(ctx, tx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64, uint64) []claimsynctypes.Claim); ok { + r0 = rf(ctx, tx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, uint64, uint64) error); ok { + r1 = rf(ctx, tx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimsReader_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' +type ClaimsReader_GetClaims_Call struct { + *mock.Call +} + +// GetClaims is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - fromBlock uint64 +// - toBlock uint64 +func (_e *ClaimsReader_Expecter) GetClaims(ctx interface{}, tx interface{}, fromBlock interface{}, toBlock interface{}) *ClaimsReader_GetClaims_Call { + return &ClaimsReader_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, tx, fromBlock, toBlock)} +} + +func (_c *ClaimsReader_GetClaims_Call) Run(run func(ctx context.Context, tx types.Querier, fromBlock uint64, toBlock uint64)) *ClaimsReader_GetClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(uint64), args[3].(uint64)) + }) + return _c +} + +func (_c *ClaimsReader_GetClaims_Call) Return(_a0 []claimsynctypes.Claim, _a1 error) *ClaimsReader_GetClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimsReader_GetClaims_Call) RunAndReturn(run func(context.Context, types.Querier, uint64, uint64) ([]claimsynctypes.Claim, error)) *ClaimsReader_GetClaims_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot +func (_m *ClaimsReader) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { + ret := _m.Called(ctx, globalExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsByGER") + } + + var r0 []*claimsynctypes.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)); ok { + return rf(ctx, globalExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, globalExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, globalExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimsReader_GetClaimsByGER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGER' +type ClaimsReader_GetClaimsByGER_Call struct { + *mock.Call +} + +// GetClaimsByGER is a helper method to define mock.On call +// - ctx context.Context +// - globalExitRoot common.Hash +func (_e *ClaimsReader_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *ClaimsReader_GetClaimsByGER_Call { + return &ClaimsReader_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} +} + +func (_c *ClaimsReader_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *ClaimsReader_GetClaimsByGER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *ClaimsReader_GetClaimsByGER_Call) Return(_a0 []*claimsynctypes.Claim, _a1 error) *ClaimsReader_GetClaimsByGER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimsReader_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)) *ClaimsReader_GetClaimsByGER_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsByGlobalIndex provides a mock function with given fields: ctx, tx, globalIndex +func (_m *ClaimsReader) GetClaimsByGlobalIndex(ctx context.Context, tx types.Querier, globalIndex *big.Int) ([]claimsynctypes.Claim, error) { + ret := _m.Called(ctx, tx, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsByGlobalIndex") + } + + var r0 []claimsynctypes.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, *big.Int) ([]claimsynctypes.Claim, error)); ok { + return rf(ctx, tx, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, *big.Int) []claimsynctypes.Claim); ok { + r0 = rf(ctx, tx, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, *big.Int) error); ok { + r1 = rf(ctx, tx, globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimsReader_GetClaimsByGlobalIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGlobalIndex' +type ClaimsReader_GetClaimsByGlobalIndex_Call struct { + *mock.Call +} + +// GetClaimsByGlobalIndex is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - globalIndex *big.Int +func (_e *ClaimsReader_Expecter) GetClaimsByGlobalIndex(ctx interface{}, tx interface{}, globalIndex interface{}) *ClaimsReader_GetClaimsByGlobalIndex_Call { + return &ClaimsReader_GetClaimsByGlobalIndex_Call{Call: _e.mock.On("GetClaimsByGlobalIndex", ctx, tx, globalIndex)} +} + +func (_c *ClaimsReader_GetClaimsByGlobalIndex_Call) Run(run func(ctx context.Context, tx types.Querier, globalIndex *big.Int)) *ClaimsReader_GetClaimsByGlobalIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(*big.Int)) + }) + return _c +} + +func (_c *ClaimsReader_GetClaimsByGlobalIndex_Call) Return(_a0 []claimsynctypes.Claim, _a1 error) *ClaimsReader_GetClaimsByGlobalIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimsReader_GetClaimsByGlobalIndex_Call) RunAndReturn(run func(context.Context, types.Querier, *big.Int) ([]claimsynctypes.Claim, error)) *ClaimsReader_GetClaimsByGlobalIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsPaged provides a mock function with given fields: ctx, pageNumber, pageSize, networkIDs, globalIndex +func (_m *ClaimsReader) GetClaimsPaged(ctx context.Context, pageNumber uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { + ret := _m.Called(ctx, pageNumber, pageSize, networkIDs, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsPaged") + } + + var r0 []*claimsynctypes.Claim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)); ok { + return rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, []uint32, *big.Int) int); ok { + r1 = rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, []uint32, *big.Int) error); ok { + r2 = rf(ctx, pageNumber, pageSize, networkIDs, globalIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimsReader_GetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsPaged' +type ClaimsReader_GetClaimsPaged_Call struct { + *mock.Call +} + +// GetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - pageNumber uint32 +// - pageSize uint32 +// - networkIDs []uint32 +// - globalIndex *big.Int +func (_e *ClaimsReader_Expecter) GetClaimsPaged(ctx interface{}, pageNumber interface{}, pageSize interface{}, networkIDs interface{}, globalIndex interface{}) *ClaimsReader_GetClaimsPaged_Call { + return &ClaimsReader_GetClaimsPaged_Call{Call: _e.mock.On("GetClaimsPaged", ctx, pageNumber, pageSize, networkIDs, globalIndex)} +} + +func (_c *ClaimsReader_GetClaimsPaged_Call) Run(run func(ctx context.Context, pageNumber uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int)) *ClaimsReader_GetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].([]uint32), args[4].(*big.Int)) + }) + return _c +} + +func (_c *ClaimsReader_GetClaimsPaged_Call) Return(_a0 []*claimsynctypes.Claim, _a1 int, _a2 error) *ClaimsReader_GetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimsReader_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)) *ClaimsReader_GetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// GetLastProcessedBlock provides a mock function with given fields: ctx, tx +func (_m *ClaimsReader) GetLastProcessedBlock(ctx context.Context, tx types.Querier) (uint64, bool, error) { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlock") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) (uint64, bool, error)); ok { + return rf(ctx, tx) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) uint64); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier) bool); ok { + r1 = rf(ctx, tx) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(context.Context, types.Querier) error); ok { + r2 = rf(ctx, tx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimsReader_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type ClaimsReader_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +func (_e *ClaimsReader_Expecter) GetLastProcessedBlock(ctx interface{}, tx interface{}) *ClaimsReader_GetLastProcessedBlock_Call { + return &ClaimsReader_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx, tx)} +} + +func (_c *ClaimsReader_GetLastProcessedBlock_Call) Run(run func(ctx context.Context, tx types.Querier)) *ClaimsReader_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier)) + }) + return _c +} + +func (_c *ClaimsReader_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ClaimsReader_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimsReader_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context, types.Querier) (uint64, bool, error)) *ClaimsReader_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetSetClaimsPaged provides a mock function with given fields: ctx, pageNumber, pageSize, globalIndex +func (_m *ClaimsReader) GetSetClaimsPaged(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { + ret := _m.Called(ctx, pageNumber, pageSize, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSetClaimsPaged") + } + + var r0 []*claimsynctypes.SetClaim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)); ok { + return rf(ctx, pageNumber, pageSize, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.SetClaim); ok { + r0 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.SetClaim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { + r1 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { + r2 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimsReader_GetSetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSetClaimsPaged' +type ClaimsReader_GetSetClaimsPaged_Call struct { + *mock.Call +} + +// GetSetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - pageNumber uint32 +// - pageSize uint32 +// - globalIndex *big.Int +func (_e *ClaimsReader_Expecter) GetSetClaimsPaged(ctx interface{}, pageNumber interface{}, pageSize interface{}, globalIndex interface{}) *ClaimsReader_GetSetClaimsPaged_Call { + return &ClaimsReader_GetSetClaimsPaged_Call{Call: _e.mock.On("GetSetClaimsPaged", ctx, pageNumber, pageSize, globalIndex)} +} + +func (_c *ClaimsReader_GetSetClaimsPaged_Call) Run(run func(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int)) *ClaimsReader_GetSetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) + }) + return _c +} + +func (_c *ClaimsReader_GetSetClaimsPaged_Call) Return(_a0 []*claimsynctypes.SetClaim, _a1 int, _a2 error) *ClaimsReader_GetSetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimsReader_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)) *ClaimsReader_GetSetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// GetUnsetClaimsPaged provides a mock function with given fields: ctx, pageNumber, pageSize, globalIndex +func (_m *ClaimsReader) GetUnsetClaimsPaged(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { + ret := _m.Called(ctx, pageNumber, pageSize, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetUnsetClaimsPaged") + } + + var r0 []*claimsynctypes.UnsetClaim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)); ok { + return rf(ctx, pageNumber, pageSize, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.UnsetClaim); ok { + r0 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.UnsetClaim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { + r1 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { + r2 = rf(ctx, pageNumber, pageSize, globalIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimsReader_GetUnsetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUnsetClaimsPaged' +type ClaimsReader_GetUnsetClaimsPaged_Call struct { + *mock.Call +} + +// GetUnsetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - pageNumber uint32 +// - pageSize uint32 +// - globalIndex *big.Int +func (_e *ClaimsReader_Expecter) GetUnsetClaimsPaged(ctx interface{}, pageNumber interface{}, pageSize interface{}, globalIndex interface{}) *ClaimsReader_GetUnsetClaimsPaged_Call { + return &ClaimsReader_GetUnsetClaimsPaged_Call{Call: _e.mock.On("GetUnsetClaimsPaged", ctx, pageNumber, pageSize, globalIndex)} +} + +func (_c *ClaimsReader_GetUnsetClaimsPaged_Call) Run(run func(ctx context.Context, pageNumber uint32, pageSize uint32, globalIndex *big.Int)) *ClaimsReader_GetUnsetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) + }) + return _c +} + +func (_c *ClaimsReader_GetUnsetClaimsPaged_Call) Return(_a0 []*claimsynctypes.UnsetClaim, _a1 int, _a2 error) *ClaimsReader_GetUnsetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimsReader_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)) *ClaimsReader_GetUnsetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimsReader creates a new instance of ClaimsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimsReader { + mock := &ClaimsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/claimsync/types/mocks/mock_embedded_processor.go b/claimsync/types/mocks/mock_embedded_processor.go new file mode 100644 index 000000000..f4282ca84 --- /dev/null +++ b/claimsync/types/mocks/mock_embedded_processor.go @@ -0,0 +1,146 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + sync "github.com/agglayer/aggkit/sync" + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/db/types" +) + +// EmbeddedProcessor is an autogenerated mock type for the EmbeddedProcessor type +type EmbeddedProcessor struct { + mock.Mock +} + +type EmbeddedProcessor_Expecter struct { + mock *mock.Mock +} + +func (_m *EmbeddedProcessor) EXPECT() *EmbeddedProcessor_Expecter { + return &EmbeddedProcessor_Expecter{mock: &_m.Mock} +} + +// ProcessBlockWithTx provides a mock function with given fields: ctx, tx, block, eventRaw +func (_m *EmbeddedProcessor) ProcessBlockWithTx(ctx context.Context, tx types.Querier, block sync.Block, eventRaw any) error { + ret := _m.Called(ctx, tx, block, eventRaw) + + if len(ret) == 0 { + panic("no return value specified for ProcessBlockWithTx") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, sync.Block, any) error); ok { + r0 = rf(ctx, tx, block, eventRaw) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EmbeddedProcessor_ProcessBlockWithTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlockWithTx' +type EmbeddedProcessor_ProcessBlockWithTx_Call struct { + *mock.Call +} + +// ProcessBlockWithTx is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - block sync.Block +// - eventRaw any +func (_e *EmbeddedProcessor_Expecter) ProcessBlockWithTx(ctx interface{}, tx interface{}, block interface{}, eventRaw interface{}) *EmbeddedProcessor_ProcessBlockWithTx_Call { + return &EmbeddedProcessor_ProcessBlockWithTx_Call{Call: _e.mock.On("ProcessBlockWithTx", ctx, tx, block, eventRaw)} +} + +func (_c *EmbeddedProcessor_ProcessBlockWithTx_Call) Run(run func(ctx context.Context, tx types.Querier, block sync.Block, eventRaw any)) *EmbeddedProcessor_ProcessBlockWithTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(sync.Block), args[3].(any)) + }) + return _c +} + +func (_c *EmbeddedProcessor_ProcessBlockWithTx_Call) Return(_a0 error) *EmbeddedProcessor_ProcessBlockWithTx_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EmbeddedProcessor_ProcessBlockWithTx_Call) RunAndReturn(run func(context.Context, types.Querier, sync.Block, any) error) *EmbeddedProcessor_ProcessBlockWithTx_Call { + _c.Call.Return(run) + return _c +} + +// ReorgWithTx provides a mock function with given fields: ctx, tx, firstReorgedBlock +func (_m *EmbeddedProcessor) ReorgWithTx(ctx context.Context, tx types.Querier, firstReorgedBlock uint64) (int64, error) { + ret := _m.Called(ctx, tx, firstReorgedBlock) + + if len(ret) == 0 { + panic("no return value specified for ReorgWithTx") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64) (int64, error)); ok { + return rf(ctx, tx, firstReorgedBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, uint64) int64); ok { + r0 = rf(ctx, tx, firstReorgedBlock) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, uint64) error); ok { + r1 = rf(ctx, tx, firstReorgedBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EmbeddedProcessor_ReorgWithTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReorgWithTx' +type EmbeddedProcessor_ReorgWithTx_Call struct { + *mock.Call +} + +// ReorgWithTx is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - firstReorgedBlock uint64 +func (_e *EmbeddedProcessor_Expecter) ReorgWithTx(ctx interface{}, tx interface{}, firstReorgedBlock interface{}) *EmbeddedProcessor_ReorgWithTx_Call { + return &EmbeddedProcessor_ReorgWithTx_Call{Call: _e.mock.On("ReorgWithTx", ctx, tx, firstReorgedBlock)} +} + +func (_c *EmbeddedProcessor_ReorgWithTx_Call) Run(run func(ctx context.Context, tx types.Querier, firstReorgedBlock uint64)) *EmbeddedProcessor_ReorgWithTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(uint64)) + }) + return _c +} + +func (_c *EmbeddedProcessor_ReorgWithTx_Call) Return(_a0 int64, _a1 error) *EmbeddedProcessor_ReorgWithTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EmbeddedProcessor_ReorgWithTx_Call) RunAndReturn(run func(context.Context, types.Querier, uint64) (int64, error)) *EmbeddedProcessor_ReorgWithTx_Call { + _c.Call.Return(run) + return _c +} + +// NewEmbeddedProcessor creates a new instance of EmbeddedProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEmbeddedProcessor(t interface { + mock.TestingT + Cleanup(func()) +}) *EmbeddedProcessor { + mock := &EmbeddedProcessor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/claimsync/types/processor.go b/claimsync/types/processor.go index 13a0151c2..7ab4d9a51 100644 --- a/claimsync/types/processor.go +++ b/claimsync/types/processor.go @@ -1,11 +1,13 @@ package types import ( + "context" + dbtypes "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/sync" ) type EmbeddedProcessor interface { - ProcessBlockWithTx(tx dbtypes.Querier, block *sync.Block, insertBlock bool) error - ReorgWithTx(tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) + ProcessBlockWithTx(ctx context.Context, tx dbtypes.Querier, block sync.Block, eventRaw any) error + ReorgWithTx(ctx context.Context, tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) } diff --git a/cmd/run.go b/cmd/run.go index baf1f8459..d6e9e57b4 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -31,6 +31,7 @@ import ( "github.com/agglayer/aggkit/bridgeservice" "github.com/agglayer/aggkit/bridgesync" "github.com/agglayer/aggkit/claimsync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config" "github.com/agglayer/aggkit/etherman" @@ -133,11 +134,18 @@ func start(cliCtx *cli.Context) error { if err != nil { return fmt.Errorf("failed to get initial local exit root: %w", err) } - l2BridgeSync := runBridgeSyncL2IfNeeded(ctx, components, cfg.BridgeL2Sync, reorgDetectorL2, + l2BridgeSync, l2ClaimSync := runBridgeSyncL2IfNeeded(ctx, components, cfg.BridgeL2Sync, reorgDetectorL2, l2Client, rollupDataQuerier.RollupID, initialLER, &backfillWg) l2GERSync := runL2GERSyncIfNeeded( ctx, components, cfg.L2GERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, l1Client, ) + if l2ClaimSync == nil { + standaloneClaimSync := runClaimSyncL2IfNeeded(ctx, components, cfg.BridgeL2Sync, reorgDetectorL2, l2Client, rollupDataQuerier.RollupID) + if standaloneClaimSync != nil { + rpcServices = append(rpcServices, standaloneClaimSync.GetRPCServices()...) + l2ClaimSync = standaloneClaimSync + } + } committeeQuerier := runAggsenderMultisigCommitteeIfNeeded(components, cfg.L1NetworkConfig.RollupAddr, l1Client, &cfg.AggSender.CommitteeOverride) @@ -197,7 +205,7 @@ func start(cliCtx *cli.Context) error { l1Client, l1InfoTreeSync, l2BridgeSync, - claimsync.NewFromBridgeSync(l2BridgeSync), + l2ClaimSync, l2Client, rollupDataQuerier, committeeQuerier, @@ -217,6 +225,7 @@ func start(cliCtx *cli.Context) error { l2Client, l1InfoTreeSync, l2BridgeSync, + l2ClaimSync, ) if err != nil { log.Fatal(err) @@ -229,7 +238,7 @@ func start(cliCtx *cli.Context) error { cfg.Validator, l1InfoTreeSync, l2BridgeSync, - claimsync.NewFromBridgeSync(l2BridgeSync), + l2ClaimSync, l1Client, l2Client, rollupDataQuerier, @@ -273,6 +282,7 @@ func createAggchainProofGen( l2Client aggkittypes.BaseEthereumClienter, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, l2Syncer *bridgesync.BridgeSync, + l2ClaimSync claimsynctypes.ClaimSyncer, ) (*prover.AggchainProofGenerationTool, error) { logger := log.WithFields("module", aggkitcommon.AGGCHAINPROOFGEN) @@ -283,6 +293,7 @@ func createAggchainProofGen( l1Client, l2Client, l2Syncer, + l2ClaimSync, l1InfoTreeSync, ) if err != nil { @@ -296,7 +307,7 @@ func createAggSenderValidator(ctx context.Context, cfg validator.Config, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, l2Syncer *bridgesync.BridgeSync, - claimSyncer claimsync.ClaimSyncer, + claimSyncer claimsynctypes.ClaimSyncer, l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier *ethermanquierier.RollupDataQuerier, @@ -335,6 +346,7 @@ func createAggSenderValidator(ctx context.Context, certQuerier := query.NewCertificateQuerier( l2Syncer, + claimSyncer, aggchainFEPQuerier, agglayerClient, initialLER, @@ -348,6 +360,7 @@ func createAggSenderValidator(ctx context.Context, l2Client, l1InfoTreeSync, l2Syncer, + claimSyncer, rollupDataQuerier, committeeQuerier, initialLER, @@ -364,7 +377,6 @@ func createAggSenderValidator(ctx context.Context, aggchainFEPQuerier, flowParams.InitialLER, flowParams.Signer, - claimSyncer, ) } @@ -374,7 +386,7 @@ func createAggSender( l1EthClient aggkittypes.BaseEthereumClienter, l1InfoTreeSync aggsendertypes.L1InfoTreeSyncer, l2Syncer aggsendertypes.L2BridgeSyncer, - claimSyncer claimsync.ClaimSyncer, + claimSyncer claimsynctypes.ClaimSyncer, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier aggsendertypes.RollupDataQuerier, committeeQuerier aggsendertypes.MultisigQuerier, @@ -605,7 +617,8 @@ func runL2ClientIfNeeded(ctx context.Context, aggkitcommon.AGGSENDERVALIDATOR, aggkitcommon.AGGCHAINPROOFGEN, aggkitcommon.L2BRIDGESYNC, - aggkitcommon.L2GERSYNC}, components) { + aggkitcommon.L2GERSYNC, + aggkitcommon.L2CLAIMSYNC}, components) { return nil } logger := log.WithFields("module", "l2client") @@ -691,7 +704,8 @@ func runReorgDetectorL2IfNeeded( aggkitcommon.AGGSENDERVALIDATOR, aggkitcommon.AGGCHAINPROOFGEN, aggkitcommon.L2BRIDGESYNC, - aggkitcommon.L2GERSYNC}, components) { + aggkitcommon.L2GERSYNC, + aggkitcommon.L2CLAIMSYNC}, components) { return nil, nil } rd := newReorgDetector(cfg, l2Client, reorgdetector.L2) @@ -736,6 +750,20 @@ func runL2GERSyncIfNeeded( return l2GERSync } +func resolveL1BridgeConfig(cfg *bridgesync.Config, components []string, logprefix string) { + hasBridgeComponent := isNeeded([]string{aggkitcommon.BRIDGE}, components) + + syncFromInBridgesResolved := cfg.SyncFromInBridges.Resolve(hasBridgeComponent) + cfg.SyncFromInBridgesResolved = &syncFromInBridgesResolved + + embeddedClaimSyncResolved := cfg.EmbeddedClaimSync.Resolve(hasBridgeComponent) + cfg.EmbeddedClaimSyncResolved = &embeddedClaimSyncResolved + + for _, line := range cfg.ResolvedString() { + log.Info(logprefix+"BridgeConfig Resolved: ", line) + } +} + func runBridgeSyncL1IfNeeded( ctx context.Context, components []string, @@ -753,11 +781,7 @@ func runBridgeSyncL1IfNeeded( log.Fatalf("invalid BridgeL1Sync config: %v", err) } - // Resolve SyncFromInBridges mode based on components - hasBridgeComponent := isNeeded([]string{aggkitcommon.BRIDGE}, components) - syncFromInBridges := cfg.SyncFromInBridges.Resolve(hasBridgeComponent) - log.Infof("BridgeL1Sync SyncFromInBridges mode: %s, resolved to: %t (BRIDGE component active: %t)", - cfg.SyncFromInBridges, syncFromInBridges, hasBridgeComponent) + resolveL1BridgeConfig(&cfg, components, "L1") bridgeSyncL1, err := bridgesync.NewL1( ctx, @@ -765,7 +789,6 @@ func runBridgeSyncL1IfNeeded( reorgDetectorL1, l1Client, rollupID, - syncFromInBridges, ) if err != nil { log.Fatalf("error creating bridgeSyncL1: %s", err) @@ -786,6 +809,50 @@ func runBridgeSyncL1IfNeeded( return bridgeSyncL1 } +func runClaimSyncL1IfNeeded( + ctx context.Context, + components []string, + cfg bridgesync.Config, + reorgDetectorL1 bridgesync.ReorgDetector, + l1Client aggkittypes.EthClienter, + rollupID uint32, + wg *sync.WaitGroup, +) *claimsync.ClaimSync { + if !isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L1BRIDGESYNC}, components) { + return nil + } + if err := cfg.Validate(); err != nil { + log.Fatalf("invalid BridgeL1Sync config: %v", err) + } + cfgClaim := claimsync.ConfigStandalone{ + ConfigEmbedded: claimsync.ConfigEmbedded{ + DBQueryTimeout: cfg.DBQueryTimeout, + BridgeAddr: cfg.BridgeAddr, + }, + DBPath: cfg.DBPath + "_claim.sqlite", + BlockFinality: cfg.BlockFinality, + InitialBlockNum: 0, + SyncBlockChunkSize: 1000, + RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod, + MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, + WaitForNewBlocksPeriod: cfg.WaitForNewBlocksPeriod, + RequireStorageContentCompatibility: cfg.RequireStorageContentCompatibility, + } + res, err := claimsync.NewStandaloneClaimSync( + ctx, + cfgClaim, + reorgDetectorL1, + l1Client, + claimsynctypes.L1ClaimSyncer, + rollupID, + ) + if err != nil { + log.Fatalf("error creating ClaimSyncL2: %s", err) + } + go res.Start(ctx) + return res +} + func runBridgeSyncL2IfNeeded( ctx context.Context, components []string, @@ -795,7 +862,7 @@ func runBridgeSyncL2IfNeeded( rollupID uint32, initialLER common.Hash, wg *sync.WaitGroup, -) *bridgesync.BridgeSync { +) (*bridgesync.BridgeSync, claimsynctypes.ClaimSyncer) { fullClaimsNeeded := isNeeded([]string{ aggkitcommon.BRIDGE, aggkitcommon.AGGSENDER, @@ -807,14 +874,11 @@ func runBridgeSyncL2IfNeeded( if !fullClaimsNeeded && !fullClaimsNotNeeded { // no bridge sync needed - return nil + return nil, nil } // Resolve SyncFromInBridges mode based on components - hasBridgeComponent := isNeeded([]string{aggkitcommon.BRIDGE}, components) - syncFromInBridges := cfg.SyncFromInBridges.Resolve(hasBridgeComponent) - log.Infof("BridgeL2Sync SyncFromInBridges mode: %s, resolved to: %t (BRIDGE component active: %t)", - cfg.SyncFromInBridges, syncFromInBridges, hasBridgeComponent) + resolveL1BridgeConfig(&cfg, components, "L2") bridgeSyncL2, err := bridgesync.NewL2( ctx, @@ -823,9 +887,7 @@ func runBridgeSyncL2IfNeeded( l2Client, rollupID, fullClaimsNeeded, - syncFromInBridges, initialLER, - nil, ) if err != nil { log.Fatalf("error creating bridgeSyncL2: %s", err) @@ -840,10 +902,58 @@ func runBridgeSyncL2IfNeeded( // Don't fail the entire process, just log the error and continue } }() - + log.Infof("Starting BridgeSyncL2 with SyncFromInBridges: %t EmbeddedClaimSyncResolved:%t", + *cfg.SyncFromInBridgesResolved, + *cfg.EmbeddedClaimSyncResolved) go bridgeSyncL2.Start(ctx) + if *cfg.EmbeddedClaimSyncResolved { + return bridgeSyncL2, bridgeSyncL2 + } + return bridgeSyncL2, nil +} - return bridgeSyncL2 +func runClaimSyncL2IfNeeded( + ctx context.Context, + components []string, + cfg bridgesync.Config, + reorgDetectorL2 *reorgdetector.ReorgDetector, + l2Client aggkittypes.EthClienter, + originNetwork uint32, +) *claimsync.ClaimSync { + if !isNeeded([]string{ + aggkitcommon.AGGSENDER, + aggkitcommon.AGGSENDERVALIDATOR, + aggkitcommon.AGGCHAINPROOFGEN, + aggkitcommon.L2CLAIMSYNC}, components) { + return nil + } + cfgClaim := claimsync.ConfigStandalone{ + ConfigEmbedded: claimsync.ConfigEmbedded{ + DBQueryTimeout: cfg.DBQueryTimeout, + BridgeAddr: cfg.BridgeAddr, + }, + DBPath: cfg.DBPath + "_claim.sqlite", + BlockFinality: cfg.BlockFinality, + InitialBlockNum: 0, + SyncBlockChunkSize: 1000, + RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod, + MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, + WaitForNewBlocksPeriod: cfg.WaitForNewBlocksPeriod, + RequireStorageContentCompatibility: cfg.RequireStorageContentCompatibility, + } + res, err := claimsync.NewStandaloneClaimSync( + ctx, + cfgClaim, + reorgDetectorL2, + l2Client, + claimsynctypes.L2ClaimSyncer, + originNetwork, + ) + if err != nil { + log.Fatalf("error creating ClaimSyncL2: %s", err) + } + go res.Start(ctx) + return res } func runAggsenderMultisigCommitteeIfNeeded( diff --git a/common/components.go b/common/components.go index 6008be7fa..9c12c88b4 100644 --- a/common/components.go +++ b/common/components.go @@ -27,6 +27,8 @@ const ( AGGCHAINPROOFGEN = "aggchain-proof-gen" // AGGSENDERVALIDATOR runs aggsender certificate validator AGGSENDERVALIDATOR = "aggsender-validator" + // L2CLAIMSYNC name to identify the l2 claim sync component + L2CLAIMSYNC = "l2claimsync" ) // ValidateComponents validates that all provided components are known/supported. @@ -42,6 +44,7 @@ func ValidateComponents(components []string) error { L2GERSYNC: {}, AGGCHAINPROOFGEN: {}, AGGSENDERVALIDATOR: {}, + L2CLAIMSYNC: {}, } // build a sorted list of valid component names for error messages diff --git a/config/default.go b/config/default.go index c999feff6..4a67d93e8 100644 --- a/config/default.go +++ b/config/default.go @@ -163,6 +163,7 @@ WaitForNewBlocksPeriod = "3s" RequireStorageContentCompatibility = {{RequireStorageContentCompatibility}} DBQueryTimeout = "{{defaultDBQueryTimeout}}" SyncFromInBridges = "auto" +EmbeddedClaimSync = "auto" [BridgeL2Sync] DBPath = "{{PathRWData}}/bridgel2sync.sqlite" @@ -176,6 +177,7 @@ WaitForNewBlocksPeriod = "3s" RequireStorageContentCompatibility = {{RequireStorageContentCompatibility}} DBQueryTimeout = "{{defaultDBQueryTimeout}}" SyncFromInBridges = "auto" +EmbeddedClaimSync = "auto" [L2GERSync] DBPath = "{{PathRWData}}/l2gersync.sqlite" diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 97b87438a..e7a23e9c9 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -158,7 +158,7 @@ func NewLegacy( } // TODO: get the initialBlock from L1 to simplify config - lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) + lastProcessedBlock, _, err := processor.GetLastProcessedBlock(ctx) if err != nil { return nil, err } @@ -363,7 +363,8 @@ func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, err if s.processor.isHalted() { return 0, sync.ErrInconsistentState } - return s.processor.GetLastProcessedBlock(ctx) + num, _, err := s.processor.GetLastProcessedBlock(ctx) + return num, err } func (s *L1InfoTreeSync) GetLocalExitRoot( @@ -475,7 +476,7 @@ func (s *L1InfoTreeSync) IsUpToDate(ctx context.Context, l1Client aggkittypes.Ba return false, sync.ErrInconsistentState } - lastProcessedBlock, err := s.processor.GetLastProcessedBlock(ctx) + lastProcessedBlock, _, err := s.processor.GetLastProcessedBlock(ctx) if err != nil { return false, fmt.Errorf("failed to get last processed block: %w", err) } diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 269557c37..0cae2f71a 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -248,9 +248,19 @@ func (p *processor) getInfoByIndexWithTx(tx dbtypes.DBer, index uint32) (*L1Info ) } -// GetLastProcessedBlock returns the last processed block -func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - return p.getLastProcessedBlockWithTx(p.db) +// GetLastProcessedBlock returns the last processed block. +// Returns (0, false, nil) if no blocks have been processed yet. +func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { + var lastProcessedBlockNum uint64 + row := p.db.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") + err := row.Scan(&lastProcessedBlockNum) + if errors.Is(err, sql.ErrNoRows) { + return 0, false, nil + } + if err != nil { + return 0, false, err + } + return lastProcessedBlockNum, true, nil } // GetLastProcessedBlockHeader returns the last processed block header diff --git a/l2gersync/l2_ger_syncer.go b/l2gersync/l2_ger_syncer.go index f1a7fd174..cc0a9651a 100644 --- a/l2gersync/l2_ger_syncer.go +++ b/l2gersync/l2_ger_syncer.go @@ -164,7 +164,8 @@ func (s *L2GERSync) GetInjectedGERsForRange(ctx context.Context, // GetLastProcessedBlock returns the last processed block number func (s *L2GERSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - return s.processor.GetLastProcessedBlock(ctx) + num, _, err := s.processor.GetLastProcessedBlock(ctx) + return num, err } // GetRemoveGEREvents retrieves remove GER events from the database with optional filters diff --git a/l2gersync/processor.go b/l2gersync/processor.go index d3e6cfc03..f68e0d0d0 100644 --- a/l2gersync/processor.go +++ b/l2gersync/processor.go @@ -156,7 +156,8 @@ func (p *processor) handleGEREvent(tx dbtypes.Txer, gerInfo *GlobalExitRootInfo, // GetLastProcessedBlock retrieves the most recent block processed by the processor, // including those without events. -func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { +// Returns (0, false, nil) if no blocks have been processed yet. +func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { var block BlockNum if err := meddler.QueryRow( p.database, @@ -164,11 +165,11 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { "SELECT num FROM block ORDER BY num DESC LIMIT 1;", ); err != nil { if errors.Is(err, sql.ErrNoRows) { - return 0, nil + return 0, false, nil } - return 0, err + return 0, false, err } - return block.Num, nil + return block.Num, true, nil } // getLatestL1InfoTreeIndex retrieves the highest L1InfoTreeIndex recorded in the imported_global_exit_root_v2 table diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index e60919952..03da6580f 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -413,6 +413,8 @@ func (dh *EVMMultidownloader) WaitForNewLatestBlocks(ctx context.Context) error lastBlockHeader = &aggkittypes.BlockHeader{ Number: latestSyncedBlockNumber, } + // Is not in DB, so must be finalized + finalized = true } dh.log.Infof("waiting new block (%s>%d)...", lastSyncedBlockTag.String(), latestSyncedBlockNumber) _, err = dh.waitForNewBlocks(ctx, lastSyncedBlockTag, lastBlockHeader, finalized) diff --git a/scripts/request_aggsender_status.sh b/scripts/request_aggsender_status.sh new file mode 100644 index 000000000..c56227f93 --- /dev/null +++ b/scripts/request_aggsender_status.sh @@ -0,0 +1,2 @@ +#!/bin/bash +curl -X POST http://localhost:33032/ -H "Con -application/json" -d '{"method":"aggsender_status", "params":[], "id":1}' | jq . diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 33d366ffe..e5f76bcdd 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "time" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db/compatibility" @@ -79,7 +80,7 @@ func (r RuntimeData) IsCompatible(other RuntimeData) (*RuntimeData, error) { } type processorInterface interface { - GetLastProcessedBlock(ctx context.Context) (uint64, error) + GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) ProcessBlock(ctx context.Context, block Block) error Reorg(ctx context.Context, firstReorgedBlock uint64) error } @@ -136,6 +137,7 @@ reset: lastProcessedBlock uint64 attempts int err error + found bool ) for { if err = d.compatibilityChecker.Check(ctx, nil); err != nil { @@ -148,13 +150,24 @@ reset: } for { - lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx) + // Now we let to have no processed block and wait until appears + lastProcessedBlock, found, err = d.processor.GetLastProcessedBlock(ctx) if err != nil { attempts++ d.log.Error("error getting last processed block: ", err) d.rh.Handle(ctx, "Sync", attempts) continue } + if !found { + d.log.Infof("no processed blocks found, waiting %s", d.rh.RetryAfterErrorPeriod) + select { + case <-ctx.Done(): + d.log.Info("context done while waiting for processed blocks, exiting sync") + return + case <-time.After(d.rh.RetryAfterErrorPeriod): + continue + } + } break } diff --git a/sync/mock_processor_interface.go b/sync/mock_processor_interface.go index 96ece8d42..8a50486b8 100644 --- a/sync/mock_processor_interface.go +++ b/sync/mock_processor_interface.go @@ -22,7 +22,7 @@ func (_m *ProcessorMock) EXPECT() *ProcessorMock_Expecter { } // GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, error) { +func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -30,8 +30,9 @@ func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, err } var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, bool, error)); ok { return rf(ctx) } if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { @@ -40,13 +41,19 @@ func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, err r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { + if rf, ok := ret.Get(1).(func(context.Context) bool); ok { r1 = rf(ctx) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(bool) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // ProcessorMock_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' @@ -67,12 +74,12 @@ func (_c *ProcessorMock_GetLastProcessedBlock_Call) Run(run func(ctx context.Con return _c } -func (_c *ProcessorMock_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *ProcessorMock_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) +func (_c *ProcessorMock_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ProcessorMock_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *ProcessorMock_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *ProcessorMock_GetLastProcessedBlock_Call { +func (_c *ProcessorMock_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, bool, error)) *ProcessorMock_GetLastProcessedBlock_Call { _c.Call.Return(run) return _c } diff --git a/test/e2e/bridge_test.go b/test/e2e/bridge_test.go new file mode 100644 index 000000000..f7ed9c9ca --- /dev/null +++ b/test/e2e/bridge_test.go @@ -0,0 +1,19 @@ +package e2e + +import ( + "os/exec" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestJustBridge(t *testing.T) { + if testing.Short() { + t.Skip("Skipping E2E test in short mode") + } + if _, err := exec.LookPath("cast"); err != nil { + t.Skip("cast not found in PATH, skipping TestJustBridge") + } + env := testEnv + require.NotNil(t, env, "testEnv must be set by TestMain") +} diff --git a/test/e2e/envs/loader.go b/test/e2e/envs/loader.go index 071220a6f..b301f90f4 100644 --- a/test/e2e/envs/loader.go +++ b/test/e2e/envs/loader.go @@ -211,12 +211,13 @@ func LoadEnv(ctx context.Context, envName ENVName) (*Env, error) { if err := json.Unmarshal(data, &summary); err != nil { return nil, fmt.Errorf("parse summary.json: %w", err) } - - // Ensure containers are down and data dir is clean, then start fresh - if err := ensureDockerComposeRunning(ctx, envDir); err != nil { - return nil, fmt.Errorf("start docker compose: %w", err) + isDockerRunning := os.Getenv("E2E_DOCKER_IS_RUNNING") == "1" + if !isDockerRunning { + // Ensure containers are down and data dir is clean, then start fresh + if err := ensureDockerComposeRunning(ctx, envDir); err != nil { + return nil, fmt.Errorf("start docker compose: %w", err) + } } - // Wait for services to be ready if err := waitForServices(ctx, &summary); err != nil { _ = stopDockerCompose(context.Background(), envDir) diff --git a/test/e2e/envs/op-pp/config_local/README.md b/test/e2e/envs/op-pp/config_local/README.md new file mode 100644 index 000000000..4f3e41469 --- /dev/null +++ b/test/e2e/envs/op-pp/config_local/README.md @@ -0,0 +1,20 @@ +# Configurations for run in local (vscode) + +## aggkit-parallel.toml +This configuration differents ports to be able to run at the same time as docker `aggkit-001` +To launch using vscode add next configuration to `.vscode/launch.json`: +``` + { + "name": "docker-compose aggsender", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "cmd/", + "cwd": "${workspaceFolder}", + "args":[ + "run", + "-cfg", "test/e2e/envs/op-pp/config_local/aggkit-parallel.toml", + "-components", "aggsender", + ] +}, +``` \ No newline at end of file diff --git a/test/e2e/envs/op-pp/config_local/aggkit-parallel.toml b/test/e2e/envs/op-pp/config_local/aggkit-parallel.toml new file mode 100644 index 000000000..730a0a63d --- /dev/null +++ b/test/e2e/envs/op-pp/config_local/aggkit-parallel.toml @@ -0,0 +1,122 @@ +# ============================================================================== +# Local aggkit config against the running Docker Compose environment. +# Run aggsender only: +# ./target/aggkit run --cfg=aggsender-local.toml --components=aggsender +# Run validator only: +# ./target/aggkit run --cfg=aggsender-local.toml --components=aggsender-validator +# Run both: +# ./target/aggkit run --cfg=aggsender-local.toml --components=aggsender,aggsender-validator +# ============================================================================== + +PathRWData = "/tmp/aggkit-local" + +# Docker containers are accessed via their mapped host ports: +# geth (L1) -> localhost:8545 +# op-geth-001 -> localhost:11545 +# op-node-001 -> localhost:11547 +# agglayer -> localhost:4443 +L1URL = "http://localhost:8545" +L2URL = "http://localhost:11545" +OpNodeURL = "http://localhost:11547" +AggLayerURL = "http://localhost:4443" + +rollupCreationBlockNumber = "37" +rollupManagerCreationBlockNumber = "37" +genesisBlockNumber = "37" + +# ------------------------------------------------------------------------------ +[L1Config] +URL = "http://localhost:8545" +chainId = "271828" +polygonZkEVMGlobalExitRootAddress = "0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674" +polygonRollupManagerAddress = "0x6c6c009cC348976dB4A908c92B24433d4F6edA43" +polTokenAddress = "0xEdE9cf798E0fE25D35469493f43E88FeA4a5da0E" +polygonZkEVMAddress = "0x414e9E227e4b589aF92200508aF5399576530E4e" +BridgeAddr = "0xC8cbEBf950B9Df44d987c8619f092beA980fF038" + +# ------------------------------------------------------------------------------ +[L2Config] +GlobalExitRootAddr = "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" +BridgeAddr = "0xC8cbEBf950B9Df44d987c8619f092beA980fF038" + +# ------------------------------------------------------------------------------ +[Log] +Environment = "development" +Level = "debug" +Outputs = ["stderr"] + +# ------------------------------------------------------------------------------ +# Use different ports to avoid conflicts with the running aggkit container +# (which uses 5576/5577/9091) +[RPC] +Port = "5596" + +[REST] +Port = "5597" + +[Prometheus] +Enabled = true +Host = "localhost" +Port = 9096 +# ------------------------------------------------------------------------------ +[AggSender] +AggsenderPrivateKey = {Path = "test/e2e/envs/op-pp/config/001/sequencer.keystore", Password = "pSnv6Dh5s9ahuzGzH9RoCDrKAMddaX3m"} +Mode = "PessimisticProof" +CheckStatusCertificateInterval = "1s" +TriggerCertMode = "ASAP" + +[AggSender.ValidatorClient] +# Base gRPC config; actual per-signer URLs come from the agglayer committee +UseTLS = false + +[AggSender.AggkitProverClient] +UseTLS = false + +[AggSender.AgglayerClient] + +[[AggSender.AgglayerClient.APIRateLimits]] +MethodName = "SendCertificate" + +[AggSender.AgglayerClient.APIRateLimits.RateLimit] +NumRequests = 0 + +[AggSender.AgglayerClient.GRPC] +URL = "http://localhost:4443" +MinConnectTimeout = "5s" +RequestTimeout = "300s" +UseTLS = false + +[AggSender.AgglayerClient.GRPC.Retry] +InitialBackoff = "1s" +MaxBackoff = "10s" +BackoffMultiplier = 2.0 +MaxAttempts = 20 + +# ------------------------------------------------------------------------------ +[BridgeL2Sync] +BridgeAddr = "0xC8cbEBf950B9Df44d987c8619f092beA980fF038" +BlockFinality = "LatestBlock" + +[ReorgDetectorL2] +FinalizedBlock = "FinalizedBlock" + +# ------------------------------------------------------------------------------ +[L1InfoTreeSync] +InitialBlock = "37" + +# ------------------------------------------------------------------------------ +[L2GERSync] +BlockFinality = "LatestBlock" + +# ------------------------------------------------------------------------------ +# Validator (aggsender-validator component) +# Most fields are resolved automatically from AggSender/L1Config/L2Config defaults. +# Port 5580 avoids conflict with the running container (which uses 5578). +[Validator] +EnableRPC = true +Signer = { Method = "local", Path = "test/e2e/envs/op-pp/config/001/sequencer.keystore", Password = "pSnv6Dh5s9ahuzGzH9RoCDrKAMddaX3m" } + +[Validator.ServerConfig] +Host = "0.0.0.0" +Port = 5581 +MaxDecodingMessageSize = 1073741824 # 1GB diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 46078fc1a..c89fbaa9e 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -211,7 +211,7 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(defaultDBQueryTimeout), } - bridgeL1Sync, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rdL1, testClient, originNetwork, true) + bridgeL1Sync, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rdL1, testClient, originNetwork) require.NoError(t, err) go bridgeL1Sync.Start(ctx) @@ -342,7 +342,7 @@ func L2Setup(t *testing.T, cfg *EnvironmentConfig, l1Setup *L1Environment) *L2En RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(defaultDBQueryTimeout), } - bridgeL2Sync, err := bridgesync.NewL2(ctx, bridgeSyncCfg, rdL2, testClient, originNetwork, false, false, bridgesynctypes.EmptyLER, nil) + bridgeL2Sync, err := bridgesync.NewL2(ctx, bridgeSyncCfg, rdL2, testClient, originNetwork, false, bridgesynctypes.EmptyLER) require.NoError(t, err) go bridgeL2Sync.Start(ctx) From aaaaf0b58ea041a4a6f09c3c2e52fec09b78f5f0 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 17 Mar 2026 12:30:28 +0100 Subject: [PATCH 03/28] feat: add TrueFalseAutoMode, claimsync RPC by syncerID, AutoStart and bootstrap - Move TrueFalseAutoMode to config/types as a struct with Mode string and Resolved *bool (not read from config); add type alias + var re-exports in bridgesync - claimsync RPC: GetRPCServices() sets service name (l1claimsync/l2claimsync) based on syncerID stored in ClaimSync struct - claimsync config: add AutoStart TrueFalseAutoMode field; resolved against BRIDGE/L1BRIDGESYNC or BRIDGE/L2BRIDGESYNC components at startup - sync/EVMDriver: add SyncNextBlock(ctx, blockNum) to bootstrap the first block; returns ErrAlreadyBootstrapped if a block already exists (ignorable) - claimsync: Start() passes InitialBlockNum to Sync() when AutoStart=true; syncNextBlockInfinite() retries bootstrap until success or ctx cancellation - sync: add README.md documenting how to implement a new syncer Co-Authored-By: Claude Sonnet 4.6 --- bridgesync/bridgesync.go | 155 +---- bridgesync/bridgesync_test.go | 99 +--- bridgesync/config.go | 94 +-- bridgesync/config_test.go | 50 +- bridgesync/e2e_test.go | 48 +- claimsync/agglayer_bridge_l2_reader.go | 146 +++++ claimsync/agglayer_bridge_l2_reader_test.go | 611 ++++++++++++++++++++ claimsync/claimsync.go | 53 +- claimsync/claimsync_rpc.go | 14 +- claimsync/config.go | 11 +- cmd/run.go | 144 ++--- config/types/true_false_auto.go | 71 +++ sync/README.md | 247 ++++++++ sync/evmdriver.go | 54 +- test/helpers/e2e.go | 2 + 15 files changed, 1371 insertions(+), 428 deletions(-) create mode 100644 claimsync/agglayer_bridge_l2_reader.go create mode 100644 claimsync/agglayer_bridge_l2_reader_test.go create mode 100644 config/types/true_false_auto.go create mode 100644 sync/README.md diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index 923380588..ab438f7fc 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -10,8 +10,6 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" - "github.com/agglayer/aggkit/claimsync" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/db/compatibility" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/reorgdetector" @@ -68,15 +66,15 @@ type ReorgDetector interface { // BridgeSync manages the state of the exit tree for the bridge contract by processing Ethereum blockchain events. type BridgeSync struct { - processor *processor - driver *sync.EVMDriver - downloader *sync.EVMDownloader - claimReader claimsynctypes.ClaimsReader + processor *processor + driver *sync.EVMDriver + downloader *sync.EVMDownloader originNetwork uint32 reorgDetector ReorgDetector ethClient aggkittypes.EthClienter agglayerBridge *agglayerbridge.Agglayerbridge + cfg Config } // NewL1 creates a bridge syncer that synchronizes the mainnet exit tree @@ -87,6 +85,7 @@ func NewL1( ethClient aggkittypes.EthClienter, originNetwork uint32, ) (*BridgeSync, error) { + syncFromInBridges := cfg.SyncFromInBridges.Resolved != nil && *cfg.SyncFromInBridges.Resolved return newBridgeSync( ctx, cfg, @@ -96,14 +95,12 @@ func NewL1( L1BridgeSyncer, originNetwork, false, - *cfg.SyncFromInBridgesResolved, + syncFromInBridges, bridgesynctypes.EmptyLER, - *cfg.EmbeddedClaimSyncResolved, ) } // NewL2 creates a bridge syncer that synchronizes the local exit tree. -// Pass a non-nil claimEventsProcessor to delegate claim storage to claimsync. func NewL2( ctx context.Context, cfg Config, @@ -113,6 +110,7 @@ func NewL2( syncFullClaims bool, initialLER common.Hash, ) (*BridgeSync, error) { + syncFromInBridges := cfg.SyncFromInBridges.Resolved != nil && *cfg.SyncFromInBridges.Resolved return newBridgeSync( ctx, cfg, @@ -122,9 +120,8 @@ func NewL2( L2BridgeSyncer, originNetwork, syncFullClaims, - *cfg.SyncFromInBridgesResolved, + syncFromInBridges, initialLER, - *cfg.EmbeddedClaimSyncResolved, ) } @@ -139,7 +136,6 @@ func newBridgeSync( syncFullClaims bool, syncFromInBridges bool, initialLER common.Hash, - embeddedClaimSyncFlag bool, ) (*BridgeSync, error) { logger := log.WithFields("module", syncerID.String()) @@ -162,57 +158,12 @@ func newBridgeSync( return nil, fmt.Errorf("failed to create sqlite database %s: %w", cfg.DBPath, err) } - var embeddedClaimSync claimsync.EmbeddedClaimSync - if embeddedClaimSyncFlag { - claimID := claimsynctypes.ClaimSyncerID(syncerID) - logger.Info("initializing embedded claim sync for bridge sync %s", claimID) - claimStorage, err := claimsync.NewClaimStorage(database, logger, claimID, cfg.DBQueryTimeout.Duration) - if err != nil { - return nil, fmt.Errorf("failed to create claim storage: %w", err) - } - - embeddedClaimSyncObject, err := claimsync.NewEmbedded( - ctx, claimStorage, - cfg.BridgeAddr, - ethClient, - nil, - claimID, - cfg.DBQueryTimeout.Duration, - logger, - ) - if err != nil { - return nil, fmt.Errorf("failed to initialize embedded claim sync: %w", err) - } - embeddedClaimSync = *embeddedClaimSyncObject - - } - - processor, err := newProcessor(database, "bridge_sync_"+syncerID.String(), logger, cfg.DBQueryTimeout.Duration, embeddedClaimSync.Processor) + processor, err := newProcessor(database, "bridge_sync_"+syncerID.String(), logger, cfg.DBQueryTimeout.Duration) if err != nil { return nil, err } processor.initialLER = initialLER - lastProcessedBlock, found, err := processor.GetLastProcessedBlock(ctx) - if err != nil { - return nil, err - } - - if !found || (lastProcessedBlock < cfg.InitialBlockNum) { - header, err := ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(cfg.InitialBlockNum)) - if err != nil { - return nil, fmt.Errorf("failed to get initial block %d: %w", cfg.InitialBlockNum, err) - } - - err = processor.ProcessBlock(ctx, sync.Block{ - Num: cfg.InitialBlockNum, - Hash: header.Hash, - }) - if err != nil { - return nil, err - } - } - rh := &sync.RetryHandler{ MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod.Duration, @@ -223,7 +174,7 @@ func newBridgeSync( return nil, fmt.Errorf("failed to resolve bridge deployment. Reason: %w", err) } - appender, err := buildAppender(ctx, ethClient, cfg.BridgeAddr, syncFromInBridges, bridgeDeployment, logger, embeddedClaimSync.Appender) + appender, err := buildAppender(ctx, ethClient, cfg.BridgeAddr, syncFromInBridges, bridgeDeployment, logger) if err != nil { return nil, err } @@ -278,9 +229,7 @@ func newBridgeSync( " syncBlockChunkSize: %d\n"+ " ReorgDetector: %s\n"+ " waitForNewBlocksPeriod: %s\n"+ - " syncFullClaims: %t\n"+ - " syncFromInBridges: %t\n"+ - " embeddedClaimSyncFlag: %t", + " syncFromInBridges: %t", syncerID, cfg.DBPath, cfg.InitialBlockNum, @@ -292,9 +241,7 @@ func newBridgeSync( cfg.SyncBlockChunkSize, rd.String(), cfg.WaitForNewBlocksPeriod.String(), - syncFullClaims, syncFromInBridges, - embeddedClaimSyncFlag, ) return &BridgeSync{ @@ -305,6 +252,7 @@ func newBridgeSync( reorgDetector: rd, ethClient: ethClient, agglayerBridge: agglayerBridge, + cfg: cfg, }, nil } @@ -358,8 +306,8 @@ func resolveBridgeDeployment(ctx context.Context, // Start starts the synchronization process func (s *BridgeSync) Start(ctx context.Context) { - s.processor.log.Info("starting bridge synchronizer") - s.driver.Sync(ctx) + s.processor.log.Infof("starting bridge synchronizer InitialBlockNum: %d", s.cfg.InitialBlockNum) + s.driver.Sync(ctx, &s.cfg.InitialBlockNum) } func (s *BridgeSync) GetBridgesPaged( @@ -372,44 +320,6 @@ func (s *BridgeSync) GetBridgesPaged( return s.processor.GetBridgesPaged(ctx, page, pageSize, depositCount, networkIDs, fromAddress) } -func (s *BridgeSync) GetClaimsPaged( - ctx context.Context, - page, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { - return claimReaderDelegatePaged(s, func() ([]*claimsynctypes.Claim, int, error) { - return s.claimReader.GetClaimsPaged(ctx, page, pageSize, networkIDs, globalIndex) - }) -} - -func (s *BridgeSync) GetUnsetClaimsPaged( - ctx context.Context, - page, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { - return claimReaderDelegatePaged(s, func() ([]*claimsynctypes.UnsetClaim, int, error) { - return s.claimReader.GetUnsetClaimsPaged(ctx, page, pageSize, globalIndex) - }) -} - -func (s *BridgeSync) GetSetClaimsPaged( - ctx context.Context, - page, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { - return claimReaderDelegatePaged(s, func() ([]*claimsynctypes.SetClaim, int, error) { - return s.claimReader.GetSetClaimsPaged(ctx, page, pageSize, globalIndex) - }) -} - -func (c *BridgeSync) SetNextRequiredBlock(ctx context.Context, nextBlockNum uint64) error { - num, found, err := c.GetLastProcessedBlock(ctx) - if err != nil { - return fmt.Errorf("failed to get last processed block: %w", err) - } - if !found { - return fmt.Errorf("last processed block not found") - } - if nextBlockNum > num { - return fmt.Errorf("cannot set next required block to %d, last processed block is %d", nextBlockNum, num) - } - return nil -} - func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { if s.processor.isHalted() { s.processor.log.Error("processor is halted, cannot get last processed block") @@ -425,18 +335,6 @@ func (s *BridgeSync) GetExitRootByHash(ctx context.Context, root common.Hash) (* return s.processor.exitTree.GetRootByHash(ctx, root) } -func (s *BridgeSync) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) { - return claimReaderDelegate(s, func() ([]claimsynctypes.Claim, error) { - return s.claimReader.GetClaimsByGlobalIndex(ctx, nil, globalIndex) - }) -} - -func (s *BridgeSync) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) { - return claimReaderDelegate(s, func() ([]claimsynctypes.Claim, error) { - return s.claimReader.GetClaims(ctx, nil, fromBlock, toBlock) - }) -} - func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) ([]Bridge, error) { if s.processor.isHalted() { return nil, sync.ErrInconsistentState @@ -607,31 +505,6 @@ func (s *BridgeSync) IsActive(ctx context.Context) bool { return !s.processor.isHalted() } -// GetClaimsByGER returns all DetailedClaimEvent claims for the given global exit root. -func (s *BridgeSync) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { - return claimReaderDelegate(s, func() ([]*claimsynctypes.Claim, error) { - return s.claimReader.GetClaimsByGER(ctx, globalExitRoot) - }) -} - -// claimReaderDelegate checks the halted state and, if healthy, calls fn. -func claimReaderDelegate[T any](s *BridgeSync, fn func() (T, error)) (T, error) { - if s.processor.isHalted() { - var zero T - return zero, sync.ErrInconsistentState - } - return fn() -} - -// claimReaderDelegatePaged is like claimReaderDelegate for functions returning (T, int, error). -func claimReaderDelegatePaged[T any](s *BridgeSync, fn func() (T, int, error)) (T, int, error) { - if s.processor.isHalted() { - var zero T - return zero, 0, sync.ErrInconsistentState - } - return fn() -} - // GetBridgeByDepositCount returns the bridge with the given deposit count (bridge or bridge_archive). func (s *BridgeSync) GetBridgeByDepositCount(ctx context.Context, depositCount uint32) (*Bridge, error) { return s.processor.GetBridgeByDepositCount(ctx, depositCount) diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go index 2d208d4ed..5192154af 100644 --- a/bridgesync/bridgesync_test.go +++ b/bridgesync/bridgesync_test.go @@ -70,9 +70,13 @@ func TestNewLx(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") + // CustomHeaderByNumber is called once (for L1 on fresh DB; L2 reuses the same DB) + mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second + syncFromInBridgesResolved := testSyncFromInBridges bridgeSyncL1Cfg := Config{ DBPath: dbPath, BridgeAddr: bridge, @@ -85,6 +89,7 @@ func TestNewLx(t *testing.T) { RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(dbQueryTimeout), } + bridgeSyncL1Cfg.SyncFromInBridges.Resolved = &syncFromInBridgesResolved l1BridgeSync, err := NewL1( ctx, @@ -92,7 +97,6 @@ func TestNewLx(t *testing.T) { mockReorgDetector, mockEthClient, originNetwork, - false, ) require.NoError(t, err) @@ -111,16 +115,15 @@ func TestNewLx(t *testing.T) { RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(dbQueryTimeout), } + bridgeSyncL2Cfg.SyncFromInBridges.Resolved = &syncFromInBridgesResolved l2BridgdeSync, err := NewL2( ctx, bridgeSyncL2Cfg, mockReorgDetector, mockEthClient, originNetwork, - false, testSyncFromInBridges, bridgesynctypes.EmptyLER, - nil, ) require.NoError(t, err) @@ -137,10 +140,8 @@ func TestNewLx(t *testing.T) { mockReorgDetector, mockEthClient, originNetwork, - false, testSyncFromInBridges, bridgesynctypes.EmptyLER, - nil, ) require.Error(t, err) require.Nil(t, l2BridgeSyncer) @@ -151,7 +152,7 @@ func TestGetLastProcessedBlock(t *testing.T) { halted: true, log: log.WithFields("module", "L2BridgeSyncer"), }} - _, err := s.GetLastProcessedBlock(context.Background()) + _, _, err := s.GetLastProcessedBlock(context.Background()) require.ErrorIs(t, err, sync.ErrInconsistentState) } @@ -266,18 +267,6 @@ func TestGetExitRootByIndex(t *testing.T) { require.ErrorIs(t, err, sync.ErrInconsistentState) } -func TestGetClaims(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetClaims(context.Background(), 0, 0) - require.ErrorIs(t, err, sync.ErrInconsistentState) -} - -func TestGetClaimsByGlobalIndex(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetClaimsByGlobalIndex(context.Background(), new(big.Int)) - require.ErrorIs(t, err, sync.ErrInconsistentState) -} - func TestBridgeSync_GetTokenMappings(t *testing.T) { const ( syncBlockChunkSize = uint64(100) @@ -321,6 +310,8 @@ func TestBridgeSync_GetTokenMappings(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") + mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second @@ -336,16 +327,15 @@ func TestBridgeSync_GetTokenMappings(t *testing.T) { RequireStorageContentCompatibility: false, DBQueryTimeout: cfgtypes.NewDuration(dbQueryTimeout), } + bridgeSyncCfg.SyncFromInBridges.Resolved = func() *bool { b := testSyncFromInBridges; return &b }() s, err := NewL2( ctx, bridgeSyncCfg, mockReorgDetector, mockEthClient, originNetwork, - false, testSyncFromInBridges, bridgesynctypes.EmptyLER, - nil, ) require.NoError(t, err) @@ -493,6 +483,8 @@ func TestBridgeSync_GetLegacyTokenMigrations(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") + mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second @@ -508,16 +500,15 @@ func TestBridgeSync_GetLegacyTokenMigrations(t *testing.T) { RequireStorageContentCompatibility: false, DBQueryTimeout: cfgtypes.NewDuration(dbQueryTimeout), } + bridgeSyncCfg.SyncFromInBridges.Resolved = func() *bool { b := testSyncFromInBridges; return &b }() s, err := NewL2( ctx, bridgeSyncCfg, mockReorgDetector, mockEthClient, originNetwork, - false, testSyncFromInBridges, bridgesynctypes.EmptyLER, - nil, ) require.NoError(t, err) @@ -606,24 +597,6 @@ func TestGetBridgePaged(t *testing.T) { require.ErrorIs(t, err, sync.ErrInconsistentState) } -func TestGetClaimPaged(t *testing.T) { - s := BridgeSync{processor: &processor{ - halted: true, - log: log.WithFields("module", "L2BridgeSyncer"), - }} - _, _, err := s.GetClaimsPaged(context.Background(), 0, 0, nil, nil) - require.ErrorIs(t, err, sync.ErrInconsistentState) -} - -func TestGetSetClaimPaged(t *testing.T) { - s := BridgeSync{processor: &processor{ - halted: true, - log: log.WithFields("module", "L2BridgeSyncer"), - }} - _, _, err := s.GetSetClaimsPaged(context.Background(), 0, 0, nil) - require.ErrorIs(t, err, sync.ErrInconsistentState) -} - func TestBridgeSync_GetLastReorgEvent(t *testing.T) { expectedReorgEvent := reorgdetector.ReorgEvent{ DetectedAt: int64(1710000000), @@ -700,6 +673,8 @@ func TestBridgeSync_GetLastRoot(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") + mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second @@ -715,16 +690,15 @@ func TestBridgeSync_GetLastRoot(t *testing.T) { RequireStorageContentCompatibility: false, DBQueryTimeout: cfgtypes.NewDuration(dbQueryTimeout), } + bridgeSyncCfg.SyncFromInBridges.Resolved = func() *bool { b := testSyncFromInBridges; return &b }() s, err := NewL2( ctx, bridgeSyncCfg, mockReorgDetector, mockEthClient, originNetwork, - false, testSyncFromInBridges, bridgesynctypes.EmptyLER, - nil, ) require.NoError(t, err) @@ -882,6 +856,8 @@ func TestBridgeSync_SubscribeToSync(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") + mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second @@ -897,6 +873,7 @@ func TestBridgeSync_SubscribeToSync(t *testing.T) { RequireStorageContentCompatibility: false, DBQueryTimeout: cfgtypes.NewDuration(dbQueryTimeout), } + bridgeSyncCfg.SyncFromInBridges.Resolved = func() *bool { b := testSyncFromInBridges; return &b }() s, err := NewL2( ctx, @@ -904,10 +881,8 @@ func TestBridgeSync_SubscribeToSync(t *testing.T) { mockReorgDetector, mockEthClient, originNetwork, - false, testSyncFromInBridges, bridgesynctypes.EmptyLER, - nil, ) require.NoError(t, err) @@ -960,42 +935,6 @@ func TestBridgeSync_SubscribeToSync(t *testing.T) { }) } -func TestBridgeSync_GetClaimsByGER(t *testing.T) { - ctx := context.Background() - p := createTestProcessor(t, "test_bridgesync_get_claims_by_ger") - s := BridgeSync{processor: p} - - ger := common.HexToHash("0xaabbccdd11223344aabbccdd11223344aabbccdd11223344aabbccdd11223344") - - t.Run("returns empty slice for unknown GER", func(t *testing.T) { - claims, err := s.GetClaimsByGER(ctx, ger) - require.NoError(t, err) - require.Empty(t, claims) - }) - - t.Run("returns matching DetailedClaimEvent", func(t *testing.T) { - tx, err := p.db.BeginTx(ctx, nil) - require.NoError(t, err) - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(1)) - require.NoError(t, err) - claim := &Claim{ - BlockNum: 1, - BlockPos: 0, - GlobalIndex: big.NewInt(42), - GlobalExitRoot: ger, - Type: DetailedClaimEvent, - Amount: big.NewInt(0), - } - require.NoError(t, meddler.Insert(tx, "claim", claim)) - require.NoError(t, tx.Commit()) - - claims, err := s.GetClaimsByGER(ctx, ger) - require.NoError(t, err) - require.Len(t, claims, 1) - require.Equal(t, int64(42), claims[0].GlobalIndex.Int64()) - }) -} - func TestBridgeSync_GetBridgeByDepositCount(t *testing.T) { ctx := context.Background() p := createTestProcessor(t, "test_bridgesync_get_bridge_by_deposit_count") diff --git a/bridgesync/config.go b/bridgesync/config.go index 175b9f536..b2c85fad9 100644 --- a/bridgesync/config.go +++ b/bridgesync/config.go @@ -2,70 +2,25 @@ package bridgesync import ( "fmt" - "strings" "github.com/agglayer/aggkit/config/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" ) -// TrueFalseAutoMode represents the mode for FromAddress extraction -type TrueFalseAutoMode string +// TrueFalseAutoMode is an alias for config/types.TrueFalseAutoMode. +type TrueFalseAutoMode = types.TrueFalseAutoMode -const ( +// Re-export the TrueFalseAutoMode values from config/types. +var ( // TrueMode always extracts FromAddress using debug_traceTransaction - TrueMode TrueFalseAutoMode = "true" + TrueMode = types.TrueMode // FalseMode never extracts FromAddress - FalseMode TrueFalseAutoMode = "false" + FalseMode = types.FalseMode // AutoMode decides automatically based on whether BRIDGE component is active - AutoMode TrueFalseAutoMode = "auto" + AutoMode = types.AutoMode ) -// UnmarshalText implements encoding.TextUnmarshaler -func (m *TrueFalseAutoMode) UnmarshalText(text []byte) error { - str := strings.ToLower(strings.TrimSpace(string(text))) - switch str { - case "true": - *m = TrueMode - case "false": - *m = FalseMode - case "auto": - *m = AutoMode - default: - return fmt.Errorf("invalid TrueFalseAutoMode: value %s (valid values: true, false, auto)", str) - } - return nil -} - -// String returns the string representation -func (m TrueFalseAutoMode) String() string { - return string(m) -} - -func (m TrueFalseAutoMode) Validate(fieldName string) error { - cpy := m - if err := cpy.UnmarshalText([]byte(m.String())); err != nil { - return fmt.Errorf("invalid %s configuration: %w", fieldName, err) - } - return nil -} - -// Resolve converts the mode to a boolean, using the provided components list to resolve "auto" -func (m TrueFalseAutoMode) Resolve(autoModeResult bool) bool { - switch m { - case TrueMode: - return true - case FalseMode: - return false - case AutoMode: - // Resolve to auto mode - return autoModeResult - default: - // Default to false - return false - } -} - type Config struct { // DBPath path of the DB DBPath string `mapstructure:"DBPath"` @@ -98,16 +53,8 @@ type Config struct { // - "auto": automatically decides based on whether BRIDGE component is active // Note: TxnSender and ToAddress are always extracted via standard eth_getTransactionByHash. // Default: "auto" - SyncFromInBridges TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"SyncFromInBridges"` - // EmbeddedClaimSync controls whether to use embedded claim synchronization mode. - // If brridge-service is running then we must use embedded claim sync, if not it runs in standalone - EmbeddedClaimSync TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"EmbeddedClaimSync"` - // SyncFromInBridgesResolved is the resolved boolean value of SyncFromInBridges after "auto" is evaluated. - // Not read from config file; set programmatically after resolution. - SyncFromInBridgesResolved *bool `mapstructure:"-"` - // EmbeddedClaimSyncResolved is the resolved boolean value of EmbeddedClaimSync after "auto" is evaluated. - // Not read from config file; set programmatically after resolution. - EmbeddedClaimSyncResolved *bool `mapstructure:"-"` + // SyncFromInBridges.Resolved is set programmatically after resolution; not read from config. + SyncFromInBridges TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"SyncFromInBridges"` //nolint:lll } // Validate checks if the configuration is valid @@ -115,13 +62,12 @@ func (c Config) Validate() error { if err := c.BlockFinality.Validate(); err != nil { return fmt.Errorf("invalid BlockFinality configuration: %w", err) } - // Validate SyncFromInBridges - if err := c.SyncFromInBridges.Validate("SyncFromInBridges"); err != nil { - return err - } - // Validate EmbeddedClaimSync - if err := c.EmbeddedClaimSync.Validate("EmbeddedClaimSync"); err != nil { - return err + // Validate SyncFromInBridges (empty is allowed — means not configured) + if c.SyncFromInBridges.Mode != "" { + var m TrueFalseAutoMode + if err := m.UnmarshalText([]byte(c.SyncFromInBridges.Mode)); err != nil { + return fmt.Errorf("invalid SyncFromInBridges value: %w", err) + } } return nil } @@ -130,16 +76,10 @@ func (c Config) Validate() error { // to log it func (c *Config) ResolvedString() []string { var result []string - if c.SyncFromInBridgesResolved != nil { - result = append(result, fmt.Sprintf("SyncFromInBridges:%s -> %t", c.SyncFromInBridges, *c.SyncFromInBridgesResolved)) + if c.SyncFromInBridges.Resolved != nil { + result = append(result, fmt.Sprintf("SyncFromInBridges:%s -> %t", c.SyncFromInBridges, *c.SyncFromInBridges.Resolved)) } else { result = append(result, fmt.Sprintf("SyncFromInBridges: %s -> ???", c.SyncFromInBridges)) } - if c.EmbeddedClaimSyncResolved != nil { - result = append(result, fmt.Sprintf("EmbeddedClaimSync:%s -> %t", c.EmbeddedClaimSync, *c.EmbeddedClaimSyncResolved)) - } else { - result = append(result, fmt.Sprintf("EmbeddedClaimSync: %s -> ???", c.EmbeddedClaimSync)) - } return result - } diff --git a/bridgesync/config_test.go b/bridgesync/config_test.go index 3c42513bf..cc6922976 100644 --- a/bridgesync/config_test.go +++ b/bridgesync/config_test.go @@ -65,50 +65,50 @@ func TestSyncFromInBridgesMode_UnmarshalText(t *testing.T) { { name: "auto lowercase", input: "auto", - expected: AutoValue, + expected: AutoMode, expectedError: "", }, { name: "auto uppercase", input: "AUTO", - expected: AutoValue, + expected: AutoMode, expectedError: "", }, { name: "auto mixed case", input: "AuTo", - expected: AutoValue, + expected: AutoMode, expectedError: "", }, { name: "auto with whitespace", input: " auto ", - expected: AutoValue, + expected: AutoMode, expectedError: "", }, { name: "invalid value", input: "invalid", - expected: "", - expectedError: "invalid SyncFromInBridgesMode: invalid (valid values: true, false, auto)", + expected: TrueFalseAutoMode{}, + expectedError: "invalid TrueFalseAutoMode: invalid (valid values: true, false, auto)", }, { name: "empty string", input: "", - expected: "", - expectedError: "invalid SyncFromInBridgesMode: (valid values: true, false, auto)", + expected: TrueFalseAutoMode{}, + expectedError: "invalid TrueFalseAutoMode: (valid values: true, false, auto)", }, { name: "numeric value", input: "1", - expected: "", - expectedError: "invalid SyncFromInBridgesMode: 1 (valid values: true, false, auto)", + expected: TrueFalseAutoMode{}, + expectedError: "invalid TrueFalseAutoMode: 1 (valid values: true, false, auto)", }, { name: "yes value", input: "yes", - expected: "", - expectedError: "invalid SyncFromInBridgesMode: yes (valid values: true, false, auto)", + expected: TrueFalseAutoMode{}, + expectedError: "invalid TrueFalseAutoMode: yes (valid values: true, false, auto)", }, } @@ -146,17 +146,17 @@ func TestSyncFromInBridgesMode_String(t *testing.T) { }, { name: "auto mode", - mode: AutoValue, + mode: AutoMode, expected: "auto", }, { name: "empty mode", - mode: TrueFalseAutoMode(""), + mode: TrueFalseAutoMode{}, expected: "", }, { name: "invalid mode", - mode: TrueFalseAutoMode("invalid"), + mode: TrueFalseAutoMode{Mode: "invalid"}, expected: "invalid", }, } @@ -202,37 +202,37 @@ func TestSyncFromInBridgesMode_Resolve(t *testing.T) { }, { name: "auto mode with bridge component", - mode: AutoValue, + mode: AutoMode, hasBridgeComponent: true, expected: true, }, { name: "auto mode without bridge component", - mode: AutoValue, + mode: AutoMode, hasBridgeComponent: false, expected: false, }, { name: "invalid mode with bridge component", - mode: TrueFalseAutoMode("invalid"), + mode: TrueFalseAutoMode{Mode: "invalid"}, hasBridgeComponent: true, expected: false, }, { name: "invalid mode without bridge component", - mode: TrueFalseAutoMode("invalid"), + mode: TrueFalseAutoMode{Mode: "invalid"}, hasBridgeComponent: false, expected: false, }, { name: "empty mode with bridge component", - mode: TrueFalseAutoMode(""), + mode: TrueFalseAutoMode{}, hasBridgeComponent: true, expected: false, }, { name: "empty mode without bridge component", - mode: TrueFalseAutoMode(""), + mode: TrueFalseAutoMode{}, hasBridgeComponent: false, expected: false, }, @@ -279,7 +279,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with SyncFromInBridges auto", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: AutoValue, + SyncFromInBridges: AutoMode, }, expectedError: "", }, @@ -287,7 +287,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with empty SyncFromInBridges", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: "", + SyncFromInBridges: TrueFalseAutoMode{}, }, expectedError: "", }, @@ -305,7 +305,7 @@ func TestConfig_Validate(t *testing.T) { name: "invalid config with invalid SyncFromInBridges", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: "invalid_value", + SyncFromInBridges: TrueFalseAutoMode{Mode: "invalid_value"}, }, expectedError: "invalid SyncFromInBridges value:", }, @@ -313,7 +313,7 @@ func TestConfig_Validate(t *testing.T) { name: "invalid config with numeric SyncFromInBridges", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: "123", + SyncFromInBridges: TrueFalseAutoMode{Mode: "123"}, }, expectedError: "invalid SyncFromInBridges value:", }, diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index 6ec2b6f7b..c2dc1c47a 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -33,6 +33,17 @@ var ( const testSyncFromInBridges = true +// bridgeSyncAdapter wraps BridgeSync to satisfy helpers.Processorer interface. +type bridgeSyncAdapter struct { + *bridgesync.BridgeSync +} + +func (a *bridgeSyncAdapter) GetLastProcessedBlock(ctx context.Context) (uint64, error) { + block, _, err := a.BridgeSync.GetLastProcessedBlock(ctx) + return block, err +} + + func mockClientCallGetTransactionByHash(t *testing.T, mockClient *mocks.RPCClienter, expectedTxHash common.Hash, fromAddress string, toAddress string) { @@ -67,7 +78,10 @@ func TestBridgeEventE2E(t *testing.T) { arg.Input = bridgesync.BridgeAssetMethodID }).Return(nil) - l1Setup, _ := helpers.NewSimulatedEVMEnvironment(t, &helpers.EnvironmentConfig{L1RPCClient: rpcClient}) + l1Setup, _ := helpers.NewSimulatedEVMEnvironment(t, &helpers.EnvironmentConfig{ + L1RPCClient: rpcClient, + L2GERManagerType: helpers.LegacyL2GERContract, + }) ctx := t.Context() // Send bridge txs bridgesSent := 0 @@ -132,12 +146,12 @@ func TestBridgeEventE2E(t *testing.T) { time.Sleep(time.Second * 2) // sleeping since the processor could be up to date, but have pending reorgs lb := getFinalizedBlockNumber(t, ctx, l1Setup.SimBackend.Client()) - helpers.RequireProcessorUpdated(t, l1Setup.BridgeSync, lb, etherman.NewDefaultEthClient(l1Setup.SimBackend.Client(), nil, nil)) + helpers.RequireProcessorUpdated(t, &bridgeSyncAdapter{l1Setup.BridgeSync}, lb, etherman.NewDefaultEthClient(l1Setup.SimBackend.Client(), nil, nil)) // Get bridges lastBlock, err := l1Setup.SimBackend.Client().BlockNumber(ctx) require.NoError(t, err) - lastProcessedBlock, err := l1Setup.BridgeSync.GetLastProcessedBlock(ctx) + lastProcessedBlock, _, err := l1Setup.BridgeSync.GetLastProcessedBlock(ctx) require.NoError(t, err) actualBridges, err := l1Setup.BridgeSync.GetBridges(ctx, 0, lastProcessedBlock) require.NoError(t, err) @@ -212,7 +226,7 @@ func TestBridgeL1SyncerWithReorgDetector(t *testing.T) { ethClient := etherman.NewDefaultEthClient(client.Client(), rpcClient, ethClientConfig) // Create the bridge syncer with reorg detector - syncer, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rd, ethClient, originNetwork, testSyncFromInBridges) + syncer, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rd, ethClient, originNetwork) require.NoError(t, err) require.NotNil(t, syncer) require.Equal(t, originNetwork, syncer.OriginNetwork()) @@ -248,7 +262,7 @@ func TestBridgeL1SyncerWithReorgDetector(t *testing.T) { t.Logf(" Block number after first bridge: %d", blockNum1) // Wait for syncer to process - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + helpers.WaitForSyncerToCatchUp(ctx, t, &bridgeSyncAdapter{syncer}, client) // Step 4: Record the block hash to fork from later (fork from the current block to ensure reorg detection) t.Log("Step 4: Recording block hash for fork point") @@ -285,10 +299,10 @@ func TestBridgeL1SyncerWithReorgDetector(t *testing.T) { t.Logf(" Block number after second bridge: %d", blockNum2) t.Logf(" Created bridge tx: %s", tx2.Hash().Hex()) - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + helpers.WaitForSyncerToCatchUp(ctx, t, &bridgeSyncAdapter{syncer}, client) // Check bridge count in L1 DB - lastProcessed, err := syncer.GetLastProcessedBlock(ctx) + lastProcessed, _, err := syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) bridgesBeforeFork, err := syncer.GetBridges(ctx, 0, lastProcessed) require.NoError(t, err) @@ -333,11 +347,11 @@ func TestBridgeL1SyncerWithReorgDetector(t *testing.T) { require.NoError(t, err) t.Logf("Hash of the forked block: %s", forkedBlockHash.Hash().Hex()) t.Logf("After fork Current block number: %d", currBlockNum) - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + helpers.WaitForSyncerToCatchUp(ctx, t, &bridgeSyncAdapter{syncer}, client) // Step 9: Check bridge count after fork t.Log("Step 9: Checking bridge count after fork") - lastProcessedAfterFork, err := syncer.GetLastProcessedBlock(ctx) + lastProcessedAfterFork, _, err := syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) bridgesAfterFork, err := syncer.GetBridges(ctx, 0, lastProcessedAfterFork) require.NoError(t, err) @@ -369,6 +383,7 @@ func TestReorgWithSameHashEdgeCase(t *testing.T) { // Create bridge syncer with reorg detector const originNetwork = uint32(1) + syncFromBridges := true bridgeSyncCfg := bridgesync.Config{ DBPath: dbPathSyncer, BridgeAddr: bridgeAddr, @@ -381,6 +396,7 @@ func TestReorgWithSameHashEdgeCase(t *testing.T) { RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(5 * time.Second), } + bridgeSyncCfg.SyncFromInBridges.Resolved = &syncFromBridges rpcClient := mocks.NewRPCClienter(t) // txReceipt To is not bridgeAddr, so must call debugTrace mockClientCallGetTransactionByHash(t, rpcClient, @@ -394,7 +410,7 @@ func TestReorgWithSameHashEdgeCase(t *testing.T) { arg.Input = bridgesync.BridgeAssetMethodID }).Return(nil) ethClient := etherman.NewDefaultEthClient(client.Client(), rpcClient, ethClientConfig) - syncer, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rd, ethClient, originNetwork, testSyncFromInBridges) + syncer, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rd, ethClient, originNetwork) require.NoError(t, err) require.NotNil(t, syncer) @@ -431,7 +447,7 @@ func TestReorgWithSameHashEdgeCase(t *testing.T) { helpers.CommitBlocks(t, client, 1, blockTime) t.Logf("Created tx: %s", tx.Hash().Hex()) - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + helpers.WaitForSyncerToCatchUp(ctx, t, &bridgeSyncAdapter{syncer}, client) // commit 3 blocks helpers.CommitBlocks(t, client, 3, blockTime) @@ -511,7 +527,7 @@ func TestBridgeL1SyncerWithMultipleReorgs(t *testing.T) { }).Return(nil) ethClient := etherman.NewDefaultEthClient(client.Client(), rpcClient, ethClientConfig) // Create the bridge syncer with reorg detector - syncer, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rd, ethClient, originNetwork, testSyncFromInBridges) + syncer, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rd, ethClient, originNetwork) require.NoError(t, err) require.NotNil(t, syncer) require.Equal(t, originNetwork, syncer.OriginNetwork()) @@ -581,7 +597,7 @@ func TestBridgeL1SyncerWithMultipleReorgs(t *testing.T) { helpers.CommitBlocks(t, client, 2, blockTime) // Check bridge count in L1 DB - lastProcessed, err := syncer.GetLastProcessedBlock(ctx) + lastProcessed, _, err := syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) bridgesBeforeFork, err := syncer.GetBridges(ctx, 0, lastProcessed) require.NoError(t, err) @@ -621,7 +637,7 @@ func TestBridgeL1SyncerWithMultipleReorgs(t *testing.T) { // Step 9: Check bridge count after fork t.Log("Step 9: Checking bridge count after fork") - lastProcessedAfterFork, err := syncer.GetLastProcessedBlock(ctx) + lastProcessedAfterFork, _, err := syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) bridgesAfterFork, err := syncer.GetBridges(ctx, 0, lastProcessedAfterFork) require.NoError(t, err) @@ -671,7 +687,7 @@ func TestBridgeL1SyncerWithMultipleReorgs(t *testing.T) { require.Equal(t, 2, reorgCount) // Check bridge count in L1 DB - lastProcessed, err = syncer.GetLastProcessedBlock(ctx) + lastProcessed, _, err = syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) bridgesAfterFourthBridge, err := syncer.GetBridges(ctx, 0, lastProcessed) require.NoError(t, err) @@ -695,7 +711,7 @@ func TestBridgeL1SyncerWithMultipleReorgs(t *testing.T) { helpers.CommitBlocks(t, client, 2, blockTime) // Check bridge count in L1 DB - lastProcessed, err = syncer.GetLastProcessedBlock(ctx) + lastProcessed, _, err = syncer.GetLastProcessedBlock(ctx) require.NoError(t, err) bridgesAfterFifthBridge, err := syncer.GetBridges(ctx, 0, lastProcessed) require.NoError(t, err) diff --git a/claimsync/agglayer_bridge_l2_reader.go b/claimsync/agglayer_bridge_l2_reader.go new file mode 100644 index 000000000..563b4086e --- /dev/null +++ b/claimsync/agglayer_bridge_l2_reader.go @@ -0,0 +1,146 @@ +package claimsync + +import ( + "context" + "fmt" + "math/big" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/log" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" +) + +// AgglayerBridgeL2Reader provides functionality to read and interact with the AggLayer Bridge L2 contract. +// It encapsulates the contract instance and provides methods to query bridge-related data from the L2 chain. +type AgglayerBridgeL2Reader struct { + agglayerBridgeL2 *agglayerbridgel2.Agglayerbridgel2 + unsetClaimsMaxLogBlockRange uint64 +} + +// NewAgglayerBridgeL2Reader creates a new instance of AgglayerBridgeL2Reader. +// It initializes the contract instance using the provided bridge address and L2 client. +// +// Parameters: +// - bridgeAddr: The Ethereum address of the AggLayer Bridge L2 contract +// - l2Client: The Ethereum client for interacting with the L2 chain +// +// Returns: +// - *AgglayerBridgeL2Reader: A new reader instance +// - error: Any error that occurred during contract initialization +func NewAgglayerBridgeL2Reader( + bridgeAddr common.Address, + l2Client aggkittypes.BaseEthereumClienter, +) (*AgglayerBridgeL2Reader, error) { + return NewAgglayerBridgeL2ReaderWithMaxLogBlockRange(bridgeAddr, l2Client, 0) +} + +// NewAgglayerBridgeL2ReaderWithMaxLogBlockRange creates a new instance of AgglayerBridgeL2Reader +// with an optional proactive max block range for unset claims eth_getLogs queries. +func NewAgglayerBridgeL2ReaderWithMaxLogBlockRange( + bridgeAddr common.Address, + l2Client aggkittypes.BaseEthereumClienter, + unsetClaimsMaxLogBlockRange uint64, +) (*AgglayerBridgeL2Reader, error) { + agglayerBridgeL2Contract, err := agglayerbridgel2.NewAgglayerbridgel2(bridgeAddr, l2Client) + if err != nil { + return nil, err + } + + return &AgglayerBridgeL2Reader{ + agglayerBridgeL2: agglayerBridgeL2Contract, + unsetClaimsMaxLogBlockRange: unsetClaimsMaxLogBlockRange, + }, nil +} + +// GetUnsetClaimsForBlockRange retrieves all unset claims (unclaims) within a specified block range. +// It filters the UpdatedUnsetGlobalIndexHashChain events from the bridge contract and converts them +// into Unclaim objects for further processing. +// If the block range is too large, it automatically splits the request into smaller chunks. +// +// Parameters: +// - ctx: Context for cancellation and timeout control +// - fromBlock: The starting block number for the search range (inclusive) +// - toBlock: The ending block number for the search range (inclusive) +// +// Returns: +// - []types.Unclaim: A slice of Unclaim objects containing global index, block number, and block index +// - error: Any error that occurred during the event filtering or iteration +func (r *AgglayerBridgeL2Reader) GetUnsetClaimsForBlockRange(ctx context.Context, + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { + if fromBlock > toBlock { + return nil, fmt.Errorf("invalid block range: fromBlock(%d) > toBlock(%d)", fromBlock, toBlock) + } + + if r.unsetClaimsMaxLogBlockRange > 0 && toBlock-fromBlock >= r.unsetClaimsMaxLogBlockRange { + return r.getUnsetClaimsInChunks(ctx, fromBlock, toBlock, r.unsetClaimsMaxLogBlockRange) + } + + return r.fetchUnsetClaimsWithFallbackChunking(ctx, fromBlock, toBlock) +} + +func (r *AgglayerBridgeL2Reader) fetchUnsetClaimsWithFallbackChunking(ctx context.Context, + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { + unclaims, err := r.fetchUnsetClaims(ctx, fromBlock, toBlock) + if err != nil { + // Check if error is due to block range being too large + maxRange, isMaxRangeErr := aggkitcommon.ParseMaxRangeFromError(err.Error()) + if isMaxRangeErr { + return r.getUnsetClaimsInChunks(ctx, fromBlock, toBlock, maxRange) + } + + return nil, err + } + + return unclaims, nil +} + +func (r *AgglayerBridgeL2Reader) getUnsetClaimsInChunks(ctx context.Context, + fromBlock, toBlock, maxRange uint64) ([]claimsynctypes.Unclaim, error) { + log.Debugf("block range too large, splitting into chunks of max %d blocks", maxRange) + return aggkitcommon.ChunkedRangeQuery( + ctx, fromBlock, toBlock, maxRange, + r.fetchUnsetClaimsWithFallbackChunking, + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { + return append(all, chunk...) + }, + make([]claimsynctypes.Unclaim, 0), + ) +} + +// fetchUnsetClaims performs the actual event filtering for a given block range +func (r *AgglayerBridgeL2Reader) fetchUnsetClaims(ctx context.Context, + fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { + unclaimIterator, err := r.agglayerBridgeL2.FilterUpdatedUnsetGlobalIndexHashChain( + &bind.FilterOpts{Context: ctx, Start: fromBlock, End: &toBlock}) + if err != nil { + return nil, err + } + + defer func() { + if err := unclaimIterator.Close(); err != nil { + log.Errorf("failed to close UpdatedUnsetGlobalIndexHashChain iterator: %v", err) + } + }() + + unclaims := make([]claimsynctypes.Unclaim, 0) + for unclaimIterator.Next() { + globalIndex := unclaimIterator.Event.UnsetGlobalIndex + log.Infof("unset claim: %s at block %d, index %d", new(big.Int).SetBytes(globalIndex[:]), + unclaimIterator.Event.Raw.BlockNumber, unclaimIterator.Event.Raw.Index) + unclaims = append(unclaims, claimsynctypes.Unclaim{ + GlobalIndex: new(big.Int).SetBytes(globalIndex[:]), + BlockNumber: unclaimIterator.Event.Raw.BlockNumber, + LogIndex: uint64(unclaimIterator.Event.Raw.Index), + }) + } + + if unclaimIterator.Error() != nil { + return nil, unclaimIterator.Error() + } + + return unclaims, nil +} diff --git a/claimsync/agglayer_bridge_l2_reader_test.go b/claimsync/agglayer_bridge_l2_reader_test.go new file mode 100644 index 000000000..22465bf2a --- /dev/null +++ b/claimsync/agglayer_bridge_l2_reader_test.go @@ -0,0 +1,611 @@ +package claimsync + +import ( + "context" + "errors" + "testing" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/etherman" + aggkittypes "github.com/agglayer/aggkit/types" + mocksethclient "github.com/agglayer/aggkit/types/mocks" + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestNewAgglayerBridgeL2Reader(t *testing.T) { + tests := []struct { + name string + bridgeAddr common.Address + l2Client aggkittypes.BaseEthereumClienter + expectError bool + errorMsg string + }{ + { + name: "successful creation", + bridgeAddr: common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + l2Client: mocksethclient.NewBaseEthereumClienter(t), + expectError: false, + }, + { + name: "zero address", + bridgeAddr: common.Address{}, + l2Client: mocksethclient.NewBaseEthereumClienter(t), + expectError: false, // Zero address is valid, contract creation might still work + }, + { + name: "contract creation with valid mock client", + bridgeAddr: common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + l2Client: mocksethclient.NewBaseEthereumClienter(t), + expectError: false, // The contract creation should succeed with a valid mock client + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reader, err := NewAgglayerBridgeL2Reader(tt.bridgeAddr, tt.l2Client) + + if tt.expectError { + require.Error(t, err) + require.Nil(t, reader) + if tt.errorMsg != "" { + require.Contains(t, err.Error(), tt.errorMsg) + } + } else { + require.NoError(t, err) + require.NotNil(t, reader) + require.NotNil(t, reader.agglayerBridgeL2) + } + }) + } +} + +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_ProactiveChunkingByConfig(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + + t.Run("configured max proactively chunks range", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2ReaderWithMaxLogBlockRange(bridgeAddr, mockClient, 1000) + require.NoError(t, err) + + var ranges [][2]uint64 + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Run(func(args mock.Arguments) { + q, ok := args.Get(1).(ethereum.FilterQuery) + require.True(t, ok) + require.NotNil(t, q.FromBlock) + require.NotNil(t, q.ToBlock) + ranges = append(ranges, [2]uint64{q.FromBlock.Uint64(), q.ToBlock.Uint64()}) + }).Times(3) + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Equal(t, [][2]uint64{{0, 999}, {1000, 1999}, {2000, 2500}}, ranges) + }) + + t.Run("zero configured max keeps current non-proactive behavior", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2ReaderWithMaxLogBlockRange(bridgeAddr, mockClient, 0) + require.NoError(t, err) + + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) + require.NoError(t, err) + require.NotNil(t, unclaims) + }) +} + +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_WithMockedClient(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + mockClient := mocksethclient.NewBaseEthereumClienter(t) + + // Mock the FilterLogs method that will be called by the contract + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + t.Run("successful call with mocked client", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) // Should be empty since we mocked empty results + }) + + t.Run("zero block range", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 0) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + }) + + t.Run("same from and to block", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 100) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + }) + + t.Run("large block range", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, ^uint64(0)) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + }) + + mockClient.AssertExpectations(t) +} + +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_ErrorHandling(t *testing.T) { + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + mockClient := mocksethclient.NewBaseEthereumClienter(t) + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + t.Run("context cancellation", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + // Mock the FilterLogs method + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) + require.NoError(t, err) // Context cancellation doesn't cause error in this implementation + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + }) + + t.Run("nil context handling", func(t *testing.T) { + // Test that nil context is handled gracefully + unclaims, err := reader.GetUnsetClaimsForBlockRange(context.TODO(), 100, 200) + require.NoError(t, err) // The function handles nil context gracefully + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + }) + + mockClient.AssertExpectations(t) +} + +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_InputValidation(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + mockClient := mocksethclient.NewBaseEthereumClienter(t) + + // Mock the FilterLogs method + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + t.Run("fromBlock greater than toBlock", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 200, 100) + // This should return an error as it's an invalid block range + require.Error(t, err) + require.Nil(t, unclaims) + require.Contains(t, err.Error(), "invalid block range") + require.Contains(t, err.Error(), "fromBlock(200) > toBlock(100)") + }) + + t.Run("maximum uint64 values", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, ^uint64(0), ^uint64(0)) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + }) + + t.Run("minimum values", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 0) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + }) + + mockClient.AssertExpectations(t) +} + +// Test error handling in GetUnsetClaimsForBlockRange +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_FilterErrorHandling(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + mockClient := mocksethclient.NewBaseEthereumClienter(t) + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + t.Run("filter error", func(t *testing.T) { + // Mock FilterLogs to return an error + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, errors.New("filter error")) + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) + require.Error(t, err) + require.Nil(t, unclaims) + require.Contains(t, err.Error(), "filter error") + }) + + mockClient.AssertExpectations(t) +} + +// Test iterator close error handling +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_IteratorCloseError(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + mockClient := mocksethclient.NewBaseEthereumClienter(t) + + // Mock FilterLogs to return empty results + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // Test normal operation - iterator close error is logged but doesn't affect return + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) + + mockClient.AssertExpectations(t) +} + +// Test with simulated backend to get real contract behavior +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_SimulatedBackend(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + + // Use a simulated backend to get real contract behavior + simulatedBackend := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) + defer simulatedBackend.Close() + + // Use the client from the simulated backend + client := simulatedBackend.Client() + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, etherman.NewDefaultEthClient(client, nil, nil)) + require.NoError(t, err) + + // Test with the simulated backend - need to mine some blocks first + simulatedBackend.Commit() // Mine the genesis block + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 1) + require.NoError(t, err) + require.NotNil(t, unclaims) + // Should be empty since no events were emitted + require.Empty(t, unclaims) +} + +// Test with real contract events to test iterator behavior +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_WithRealEvents(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + + // Use a simulated backend to get real contract behavior + simulatedBackend := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) + defer simulatedBackend.Close() + + // Use the client from the simulated backend + client := simulatedBackend.Client() + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, etherman.NewDefaultEthClient(client, nil, nil)) + require.NoError(t, err) + + // Mine some blocks to create a valid range + simulatedBackend.Commit() // Block 1 + simulatedBackend.Commit() // Block 2 + simulatedBackend.Commit() // Block 3 + + // Test with a valid block range + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 1, 3) + require.NoError(t, err) + require.NotNil(t, unclaims) + // Should be empty since no events were emitted, but this tests the iterator path + require.Empty(t, unclaims) +} + +// Test the actual iterator behavior by creating a test that exercises the iterator loop +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_IteratorBehavior(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + + // Use a simulated backend to get real contract behavior + simulatedBackend := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) + defer simulatedBackend.Close() + + // Use the client from the simulated backend + client := simulatedBackend.Client() + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, etherman.NewDefaultEthClient(client, nil, nil)) + require.NoError(t, err) + + // Mine some blocks to create a valid range + simulatedBackend.Commit() // Block 1 + simulatedBackend.Commit() // Block 2 + simulatedBackend.Commit() // Block 3 + + // Test with a valid block range - this will test the iterator behavior + // The iterator will be created and the Next() method will be called + // Even though there are no events, this tests the iterator loop structure + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 1, 3) + require.NoError(t, err) + require.NotNil(t, unclaims) + // Should be empty since no events were emitted, but this tests the iterator path + require.Empty(t, unclaims) + + // Test with a single block range + unclaims, err = reader.GetUnsetClaimsForBlockRange(ctx, 1, 1) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) +} + +// Test with different block ranges +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_BlockRanges(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + mockClient := mocksethclient.NewBaseEthereumClienter(t) + + // Mock FilterLogs to return empty results for all calls + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + testCases := []struct { + name string + fromBlock uint64 + toBlock uint64 + }{ + {"zero to zero", 0, 0}, + {"zero to max", 0, ^uint64(0)}, + {"max to max", ^uint64(0), ^uint64(0)}, + {"normal range", 100, 200}, + {"single block", 100, 100}, + {"large range", 0, 1000000}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, tc.fromBlock, tc.toBlock) + require.NoError(t, err) + require.NotNil(t, unclaims) + }) + } + + mockClient.AssertExpectations(t) +} + +// Test context handling +func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_ContextHandling(t *testing.T) { + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + mockClient := mocksethclient.NewBaseEthereumClienter(t) + + // Mock FilterLogs to return empty results + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) + + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + t.Run("cancelled context", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) + require.NoError(t, err) // Context cancellation doesn't cause error in this implementation + require.NotNil(t, unclaims) + }) + + t.Run("background context", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(context.Background(), 100, 200) + require.NoError(t, err) + require.NotNil(t, unclaims) + }) + + t.Run("TODO context", func(t *testing.T) { + unclaims, err := reader.GetUnsetClaimsForBlockRange(context.TODO(), 100, 200) + require.NoError(t, err) + require.NotNil(t, unclaims) + }) + + mockClient.AssertExpectations(t) +} + +func TestGetUnsetClaimsInChunks(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + + t.Run("exact chunk boundaries", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // Mock 3 successful chunk calls (blocks 0-999, 1000-1999, 2000-2999) + mockClient.On("FilterLogs", mock.Anything, mock.MatchedBy(func(q interface{}) bool { + return true // Accept all filter queries for simplicity + })).Return([]ethtypes.Log{}, nil).Times(3) + + unclaims, err := aggkitcommon.ChunkedRangeQuery( + ctx, 0, 2999, 1000, + reader.fetchUnsetClaims, + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { + return append(all, chunk...) + }, + []claimsynctypes.Unclaim{}, + ) + require.NoError(t, err) + require.NotNil(t, unclaims) + require.Empty(t, unclaims) // Empty results as we mocked empty logs + + mockClient.AssertExpectations(t) + }) + + t.Run("non-exact chunk boundaries", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // Mock 3 calls: 0-999, 1000-1999, 2000-2500 + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Times(3) + + unclaims, err := aggkitcommon.ChunkedRangeQuery( + ctx, 0, 2500, 1000, + reader.fetchUnsetClaims, + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { + return append(all, chunk...) + }, + []claimsynctypes.Unclaim{}, + ) + require.NoError(t, err) + require.NotNil(t, unclaims) + + mockClient.AssertExpectations(t) + }) + + t.Run("single chunk (range smaller than maxRange)", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() + + unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 500, 1000, + reader.fetchUnsetClaims, + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { + return append(all, chunk...) + }, + []claimsynctypes.Unclaim{}, + ) + require.NoError(t, err) + require.NotNil(t, unclaims) + + mockClient.AssertExpectations(t) + }) + + t.Run("error in middle chunk", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // First chunk succeeds + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() + // Second chunk fails + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return(nil, errors.New("rpc error")).Once() + + unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 2000, 1000, + reader.fetchUnsetClaims, + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { + return append(all, chunk...) + }, + []claimsynctypes.Unclaim{}, + ) + require.ErrorContains(t, err, "rpc error") + require.Empty(t, unclaims) + + mockClient.AssertExpectations(t) + }) + + t.Run("zero maxRange", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // Should return error immediately without making any calls + unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 1000, 0, + reader.fetchUnsetClaims, + func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { + return append(all, chunk...) + }, + []claimsynctypes.Unclaim{}, + ) + require.ErrorContains(t, err, "maxRange must be greater than 0") + require.Empty(t, unclaims) + + // No FilterLogs calls should have been made + mockClient.AssertExpectations(t) + }) +} + +func TestGetUnsetClaimsForBlockRange_ChunkingIntegration(t *testing.T) { + ctx := context.Background() + bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + + t.Run("normal fetch succeeds without chunking", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // Mock successful FilterLogs call + mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 500) + require.NoError(t, err) + require.NotNil(t, unclaims) + + mockClient.AssertExpectations(t) + }) + + t.Run("block range too large triggers chunking", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // First call fails with "block range too large" + mockClient.On("FilterLogs", mock.Anything, mock.Anything). + Return(nil, errors.New("block range too large, max range: 1000")).Once() + + // Subsequent chunked calls succeed (0-999, 1000-1999, 2000-2500) + mockClient.On("FilterLogs", mock.Anything, mock.Anything). + Return([]ethtypes.Log{}, nil).Times(3) + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) + require.NoError(t, err) + require.NotNil(t, unclaims) + + mockClient.AssertExpectations(t) + }) + + t.Run("non-parseable error returns original error", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // Return an error that doesn't match the pattern + mockClient.On("FilterLogs", mock.Anything, mock.Anything). + Return(nil, errors.New("some other RPC error")).Once() + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) + require.ErrorContains(t, err, "some other RPC error") + require.Nil(t, unclaims) + + mockClient.AssertExpectations(t) + }) + + t.Run("chunking fails partway through", func(t *testing.T) { + mockClient := mocksethclient.NewBaseEthereumClienter(t) + reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) + require.NoError(t, err) + + // First call triggers chunking + mockClient.On("FilterLogs", mock.Anything, mock.Anything). + Return(nil, errors.New("block range too large, max range: 1000")).Once() + + // First chunk succeeds + mockClient.On("FilterLogs", mock.Anything, mock.Anything). + Return([]ethtypes.Log{}, nil).Once() + + // Second chunk fails + mockClient.On("FilterLogs", mock.Anything, mock.Anything). + Return(nil, errors.New("connection timeout")).Once() + + unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) + require.ErrorContains(t, err, "connection timeout") + require.Empty(t, unclaims) + + mockClient.AssertExpectations(t) + }) +} diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index 8f1a06d4b..878262bc0 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -2,11 +2,11 @@ package claimsync import ( "context" + "errors" "fmt" "math/big" "time" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" claimsyncStorage "github.com/agglayer/aggkit/claimsync/storage" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" @@ -30,6 +30,8 @@ type ClaimSync struct { ethClient aggkittypes.EthClienter logger aggkitcommon.Logger originNetwork uint32 + syncerID claimsynctypes.ClaimSyncerID + cfg ConfigStandalone } // NewStandaloneClaimSync creates a standalone ClaimSync that indexes claim events from the bridge contract directly. @@ -70,18 +72,12 @@ func NewClaimSync( return nil, err } - agglayerBridgeContract, err := agglayerbridge.NewAgglayerbridge(cfg.BridgeAddr, ethClient) - if err != nil { - return nil, fmt.Errorf("claimsync: failed to create AgglayerBridge binding: %w", err) - } - - isSovereign, agglayerBridgeL2Contract, err := detectSovereignChain(ctx, cfg.BridgeAddr, ethClient) + deployment, err := resolveBridgeDeployment(ctx, cfg.BridgeAddr, ethClient) if err != nil { return nil, fmt.Errorf("claimsync: failed to detect chain type: %w", err) } - appender, err := buildAppender(ctx, ethClient, proc, cfg.BridgeAddr, - agglayerBridgeContract, agglayerBridgeL2Contract, isSovereign, logger) + appender, err := buildAppender(ctx, ethClient, proc, cfg.BridgeAddr, deployment, logger) if err != nil { return nil, fmt.Errorf("claimsync: failed to build appender: %w", err) } @@ -135,7 +131,7 @@ func NewClaimSync( logger.Infof( "claimsync created: dbPath=%s initialBlock=%d blockFinality=%s bridgeAddr=%s sovereign=%t", - cfg.DBPath, cfg.InitialBlockNum, cfg.BlockFinality.String(), cfg.BridgeAddr.String(), isSovereign, + cfg.DBPath, cfg.InitialBlockNum, cfg.BlockFinality.String(), cfg.BridgeAddr.String(), deployment.kind == SovereignChain, ) return &ClaimSync{ @@ -145,13 +141,46 @@ func NewClaimSync( ethClient: ethClient, logger: logger, originNetwork: originNetwork, + syncerID: syncerID, + cfg: cfg, }, nil } // Start starts the synchronization process. func (c *ClaimSync) Start(ctx context.Context) { - c.logger.Info("starting claim synchronizer") - c.driver.Sync(ctx) + c.logger.Infof("starting claim synchronizer AutoStart: %t InitialBlock: %d", + *c.cfg.AutoStart.Resolved, c.cfg.InitialBlockNum) + if *c.cfg.AutoStart.Resolved == true { + c.driver.Sync(ctx, &c.cfg.InitialBlockNum) + } else { + c.driver.Sync(ctx, nil) + } +} + +func (c *ClaimSync) syncNextBlockInfinite(ctx context.Context, blockNumber uint64) { + c.logger.Infof("autoStartDownloading: bootstrapping block %d", blockNumber) + for { + err := c.driver.SyncNextBlock(ctx, blockNumber) + if err == nil || errors.Is(err, sync.ErrAlreadyBootstrapped) { + return + } + c.logger.Warnf("autoStartDownloading: failed to process block %d: %v — retrying in %s", + blockNumber, err, c.cfg.RetryAfterErrorPeriod.Duration) + select { + case <-ctx.Done(): + c.logger.Info("autoStartDownloading: context cancelled, stopping") + return + case <-time.After(c.cfg.RetryAfterErrorPeriod.Duration): + } + } +} + +// SyncNextBlock downloads and processes blockNum as a bootstrap step. +// Returns sync.ErrAlreadyBootstrapped (ignorable) if a processed block already exists. +func (c *ClaimSync) SyncNextBlock(ctx context.Context, blockNum uint64) error { + c.logger.Infof("SyncNextBlock: syncing block %d", blockNum) + c.syncNextBlockInfinite(ctx, blockNum) + return nil } // OriginNetwork returns the network ID of the origin chain diff --git a/claimsync/claimsync_rpc.go b/claimsync/claimsync_rpc.go index 122f66e76..ee1fa1b9e 100644 --- a/claimsync/claimsync_rpc.go +++ b/claimsync/claimsync_rpc.go @@ -56,7 +56,7 @@ func (r *ClaimSyncRPC) Status() (interface{}, jRPC.Error) { // curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ // -d '{"method":"l2claimsync_getClaims", "params":[0, 1000], "id":1}' func (r *ClaimSyncRPC) GetClaims(fromBlock, toBlock uint64) (interface{}, jRPC.Error) { - r.logger.Infof("RPC call: l2claimsync_getClaims(%d, %d)", fromBlock, toBlock) + r.logger.Infof("RPC call: lclaimsync_getClaims(%d, %d)", fromBlock, toBlock) claims, err := r.claimSync.GetClaims(context.Background(), fromBlock, toBlock) if err != nil { return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, @@ -69,7 +69,7 @@ func (r *ClaimSyncRPC) GetClaims(fromBlock, toBlock uint64) (interface{}, jRPC.E // curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ // -d '{"method":"l2claimsync_getClaimsByGlobalIndex", "params":["123"], "id":1}' func (r *ClaimSyncRPC) GetClaimsByGlobalIndex(globalIndexStr string) (interface{}, jRPC.Error) { - r.logger.Infof("RPC call: l2claimsync_getClaimsByGlobalIndex(%s)", globalIndexStr) + r.logger.Infof("RPC call: lclaimsync_getClaimsByGlobalIndex(%s)", globalIndexStr) globalIndex := new(big.Int) if _, ok := globalIndex.SetString(globalIndexStr, 10); !ok { return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, @@ -91,7 +91,7 @@ func (r *ClaimSyncRPC) GetClaimsByGlobalIndex(globalIndexStr string) (interface{ // curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ // -d '{"method":"l2claimsync_setNextRequiredBlock", "params":[1000], "id":1}' func (r *ClaimSyncRPC) SetNextRequiredBlock(blockNum uint64) (interface{}, jRPC.Error) { - r.logger.Infof("RPC call: l2claimsync_setNextRequiredBlock(%d)", blockNum) + r.logger.Infof("RPC call: lclaimsync_setNextRequiredBlock(%d)", blockNum) if err := r.claimSync.SetNextRequiredBlock(context.Background(), blockNum); err != nil { return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, fmt.Sprintf("ClaimSyncRPC.SetNextRequiredBlock: %s", err.Error())) @@ -105,10 +105,14 @@ func (r *ClaimSyncRPC) SetNextRequiredBlock(blockNum uint64) (interface{}, jRPC. // GetRPCServices returns the RPC services exposed by ClaimSync. func (c *ClaimSync) GetRPCServices() []jRPC.Service { - logger := log.WithFields("module", "l2claimsync-rpc") + name := "l1claimsync" + if c.syncerID == claimsynctypes.L2ClaimSyncer { + name = "l2claimsync" + } + logger := log.WithFields("module", name+"-rpc") return []jRPC.Service{ { - Name: "l2claimsync", + Name: name, Service: NewClaimSyncRPC(logger, c), }, } diff --git a/claimsync/config.go b/claimsync/config.go index 9e41b62f0..9ca44e487 100644 --- a/claimsync/config.go +++ b/claimsync/config.go @@ -17,7 +17,7 @@ type ConfigEmbedded struct { } type ConfigStandalone struct { - ConfigEmbedded + ConfigEmbedded `mapstructure:",squash"` // DBPath path of the DB DBPath string `mapstructure:"DBPath"` // BlockFinality indicates the status of the blocks that will be queried in order to sync @@ -37,6 +37,12 @@ type ConfigStandalone struct { // RequireStorageContentCompatibility is true it's mandatory that data stored in the database // is compatible with the running environment RequireStorageContentCompatibility bool `mapstructure:"RequireStorageContentCompatibility"` + // AutoStart controls whether the synchronizer should start automatically after initialization. + // Possible values: + // - "true": automatically starts after initialization using InitialBlockNum + // - "false": does not start automatically; requires manual start + // - "auto": automatically decides based on which component is active + AutoStart types.TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"AutoStart"` } func (c ConfigEmbedded) Validate() error { @@ -50,5 +56,8 @@ func (c ConfigStandalone) Validate() error { if err := c.BlockFinality.Validate(); err != nil { return fmt.Errorf("invalid BlockFinality configuration: %w", err) } + if err := c.AutoStart.Validate("AutoStart"); err != nil { + return err + } return nil } diff --git a/cmd/run.go b/cmd/run.go index d6e9e57b4..a3d92551e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -52,6 +52,10 @@ import ( "github.com/urfave/cli/v2" ) +const ( + MainnetID = uint32(0) +) + func start(cliCtx *cli.Context) error { // Validate components first before loading configuration components := cliCtx.StringSlice(config.FlagComponents) @@ -102,7 +106,7 @@ func start(cliCtx *cli.Context) error { } }() var rpcServices []jRPC.Service - l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(l1Client, cfg.L1Multidownloader) + l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(components,l1Client, cfg.L1Multidownloader) if err != nil { return fmt.Errorf("failed to create L1MultiDownloader: %w", err) } @@ -110,7 +114,7 @@ func start(cliCtx *cli.Context) error { rpcServices = append(rpcServices, l1mdServices...) } - rollupDataQuerier, err := createRollupDataQuerier(cliCtx.Context, cfg.L1NetworkConfig, l1Client) + rollupDataQuerier, err := createRollupDataQuerier(cliCtx.Context, components, cfg.L1NetworkConfig, l1Client) if err != nil { return fmt.Errorf("failed to create rollup data querier: %w", err) } @@ -127,25 +131,30 @@ func start(cliCtx *cli.Context) error { if l1InfoTreeSync != nil { rpcServices = append(rpcServices, l1InfoTreeSync.GetRPCServices()...) } + + l1ClaimSync := runClaimSyncL1IfNeeded(ctx, components, cfg.ClaimL1Sync, reorgDetectorL1, l1Client, MainnetID) + if l1ClaimSync != nil { + rpcServices = append(rpcServices, l1ClaimSync.GetRPCServices()...) + } + l1BridgeSync := runBridgeSyncL1IfNeeded(ctx, components, cfg.BridgeL1Sync, reorgDetectorL1, - l1Client, 0, &backfillWg) + l1Client, MainnetID, &backfillWg) initialLER, err := query.NewLERDataQuerier( cfg.AggSender.RollupCreationBlockL1, rollupDataQuerier).GetInitialLocalExitRoot() if err != nil { return fmt.Errorf("failed to get initial local exit root: %w", err) } - l2BridgeSync, l2ClaimSync := runBridgeSyncL2IfNeeded(ctx, components, cfg.BridgeL2Sync, reorgDetectorL2, + + l2ClaimSync := runClaimSyncL2IfNeeded(ctx, components, cfg.ClaimL2Sync, reorgDetectorL2, l2Client, rollupDataQuerier.RollupID) + if l2ClaimSync != nil { + rpcServices = append(rpcServices, l2ClaimSync.GetRPCServices()...) + } + + l2BridgeSync := runBridgeSyncL2IfNeeded(ctx, components, cfg.BridgeL2Sync, reorgDetectorL2, l2Client, rollupDataQuerier.RollupID, initialLER, &backfillWg) l2GERSync := runL2GERSyncIfNeeded( ctx, components, cfg.L2GERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, l1Client, ) - if l2ClaimSync == nil { - standaloneClaimSync := runClaimSyncL2IfNeeded(ctx, components, cfg.BridgeL2Sync, reorgDetectorL2, l2Client, rollupDataQuerier.RollupID) - if standaloneClaimSync != nil { - rpcServices = append(rpcServices, standaloneClaimSync.GetRPCServices()...) - l2ClaimSync = standaloneClaimSync - } - } committeeQuerier := runAggsenderMultisigCommitteeIfNeeded(components, cfg.L1NetworkConfig.RollupAddr, l1Client, &cfg.AggSender.CommitteeOverride) @@ -550,6 +559,16 @@ func isNeeded(casesWhereNeeded, actualCases []string) bool { return false } +func l1InfoTreeMustRun(components []string) bool { + if !isNeeded([]string{ + aggkitcommon.AGGORACLE, aggkitcommon.AGGSENDER, aggkitcommon.AGGSENDERVALIDATOR, + aggkitcommon.BRIDGE, aggkitcommon.L1INFOTREESYNC, + aggkitcommon.L2GERSYNC, aggkitcommon.AGGCHAINPROOFGEN}, components) { + return false + } + return true +} + func runL1InfoTreeSyncerIfNeeded( ctx context.Context, components []string, @@ -558,10 +577,7 @@ func runL1InfoTreeSyncerIfNeeded( l1EthClient aggkittypes.BaseEthereumClienter, l1MultiDownloader *multidownloader.EVMMultidownloader, ) *l1infotreesync.L1InfoTreeSync { - if !isNeeded([]string{ - aggkitcommon.AGGORACLE, aggkitcommon.AGGSENDER, aggkitcommon.AGGSENDERVALIDATOR, - aggkitcommon.BRIDGE, aggkitcommon.L1INFOTREESYNC, - aggkitcommon.L2GERSYNC, aggkitcommon.AGGCHAINPROOFGEN}, components) { + if !l1InfoTreeMustRun(components) { return nil } var l1InfoTreeSync *l1infotreesync.L1InfoTreeSync @@ -660,6 +676,7 @@ func runReorgDetectorL1IfNeeded( } func runL1MultiDownloaderIfNeeded( + components []string, l1Client aggkittypes.EthClienter, cfg multidownloader.Config, ) (*multidownloader.EVMMultidownloader, []jRPC.Service, error) { @@ -672,6 +689,10 @@ func runL1MultiDownloaderIfNeeded( log.Warnf("L1 MultiDownloader is disabled, don't creating the service.") return nil, nil, nil } + if !l1InfoTreeMustRun(components){ + log.Infof("L1 MultiDownloader not going to run because components: %v", components) + return nil, nil,nil + } logger := log.WithFields("module", "L1MultiDownloader") downloader, err := multidownloader.NewEVMMultidownloader( @@ -753,11 +774,7 @@ func runL2GERSyncIfNeeded( func resolveL1BridgeConfig(cfg *bridgesync.Config, components []string, logprefix string) { hasBridgeComponent := isNeeded([]string{aggkitcommon.BRIDGE}, components) - syncFromInBridgesResolved := cfg.SyncFromInBridges.Resolve(hasBridgeComponent) - cfg.SyncFromInBridgesResolved = &syncFromInBridgesResolved - - embeddedClaimSyncResolved := cfg.EmbeddedClaimSync.Resolve(hasBridgeComponent) - cfg.EmbeddedClaimSyncResolved = &embeddedClaimSyncResolved + cfg.SyncFromInBridges.Resolve(hasBridgeComponent) for _, line := range cfg.ResolvedString() { log.Info(logprefix+"BridgeConfig Resolved: ", line) @@ -812,11 +829,10 @@ func runBridgeSyncL1IfNeeded( func runClaimSyncL1IfNeeded( ctx context.Context, components []string, - cfg bridgesync.Config, + cfg claimsync.ConfigStandalone, reorgDetectorL1 bridgesync.ReorgDetector, l1Client aggkittypes.EthClienter, rollupID uint32, - wg *sync.WaitGroup, ) *claimsync.ClaimSync { if !isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L1BRIDGESYNC}, components) { return nil @@ -824,32 +840,26 @@ func runClaimSyncL1IfNeeded( if err := cfg.Validate(); err != nil { log.Fatalf("invalid BridgeL1Sync config: %v", err) } - cfgClaim := claimsync.ConfigStandalone{ - ConfigEmbedded: claimsync.ConfigEmbedded{ - DBQueryTimeout: cfg.DBQueryTimeout, - BridgeAddr: cfg.BridgeAddr, - }, - DBPath: cfg.DBPath + "_claim.sqlite", - BlockFinality: cfg.BlockFinality, - InitialBlockNum: 0, - SyncBlockChunkSize: 1000, - RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod, - MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, - WaitForNewBlocksPeriod: cfg.WaitForNewBlocksPeriod, - RequireStorageContentCompatibility: cfg.RequireStorageContentCompatibility, - } + + autoStart := cfg.AutoStart.Resolve(isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L1BRIDGESYNC}, components)) + res, err := claimsync.NewStandaloneClaimSync( ctx, - cfgClaim, + cfg, reorgDetectorL1, l1Client, claimsynctypes.L1ClaimSyncer, rollupID, ) if err != nil { - log.Fatalf("error creating ClaimSyncL2: %s", err) + log.Fatalf("error creating ClaimSyncL1: %s", err) + } + if autoStart { + log.Infof("Starting ClaimSyncL1 (autoStart=true)") + go res.Start(ctx) + } else { + log.Infof("ClaimSyncL1 created (autoStart=false, on-demand)") } - go res.Start(ctx) return res } @@ -862,7 +872,7 @@ func runBridgeSyncL2IfNeeded( rollupID uint32, initialLER common.Hash, wg *sync.WaitGroup, -) (*bridgesync.BridgeSync, claimsynctypes.ClaimSyncer) { +) *bridgesync.BridgeSync { fullClaimsNeeded := isNeeded([]string{ aggkitcommon.BRIDGE, aggkitcommon.AGGSENDER, @@ -874,7 +884,7 @@ func runBridgeSyncL2IfNeeded( if !fullClaimsNeeded && !fullClaimsNotNeeded { // no bridge sync needed - return nil, nil + return nil } // Resolve SyncFromInBridges mode based on components @@ -902,20 +912,16 @@ func runBridgeSyncL2IfNeeded( // Don't fail the entire process, just log the error and continue } }() - log.Infof("Starting BridgeSyncL2 with SyncFromInBridges: %t EmbeddedClaimSyncResolved:%t", - *cfg.SyncFromInBridgesResolved, - *cfg.EmbeddedClaimSyncResolved) + log.Infof("Starting BridgeSyncL2 with SyncFromInBridges: %t", + *cfg.SyncFromInBridges.Resolved) go bridgeSyncL2.Start(ctx) - if *cfg.EmbeddedClaimSyncResolved { - return bridgeSyncL2, bridgeSyncL2 - } - return bridgeSyncL2, nil + return bridgeSyncL2 } func runClaimSyncL2IfNeeded( ctx context.Context, components []string, - cfg bridgesync.Config, + cfg claimsync.ConfigStandalone, reorgDetectorL2 *reorgdetector.ReorgDetector, l2Client aggkittypes.EthClienter, originNetwork uint32, @@ -927,23 +933,12 @@ func runClaimSyncL2IfNeeded( aggkitcommon.L2CLAIMSYNC}, components) { return nil } - cfgClaim := claimsync.ConfigStandalone{ - ConfigEmbedded: claimsync.ConfigEmbedded{ - DBQueryTimeout: cfg.DBQueryTimeout, - BridgeAddr: cfg.BridgeAddr, - }, - DBPath: cfg.DBPath + "_claim.sqlite", - BlockFinality: cfg.BlockFinality, - InitialBlockNum: 0, - SyncBlockChunkSize: 1000, - RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod, - MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, - WaitForNewBlocksPeriod: cfg.WaitForNewBlocksPeriod, - RequireStorageContentCompatibility: cfg.RequireStorageContentCompatibility, - } + + autoStart := cfg.AutoStart.Resolve(isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L2BRIDGESYNC}, components)) + res, err := claimsync.NewStandaloneClaimSync( ctx, - cfgClaim, + cfg, reorgDetectorL2, l2Client, claimsynctypes.L2ClaimSyncer, @@ -952,7 +947,12 @@ func runClaimSyncL2IfNeeded( if err != nil { log.Fatalf("error creating ClaimSyncL2: %s", err) } - go res.Start(ctx) + if autoStart { + log.Infof("Starting ClaimSyncL2 (autoStart=true)") + go res.Start(ctx) + } else { + log.Infof("ClaimSyncL2 created (autoStart=false, on-demand)") + } return res } @@ -1044,10 +1044,22 @@ func startPrometheusHTTPServer(c prometheus.Config) { // (AGGORACLE, AGGCHAINPROOFGEN, AGGSENDER, BRIDGE) are needed. The client is configured with // the provided L1 network configuration and uses default implementations for creating Ethereum // clients and rollup manager contracts. Returns (nil, nil) if none of the required components are needed. -func createRollupDataQuerier(ctx context.Context, +func createRollupDataQuerier( + ctx context.Context, + components []string, cfg ethermanconfig.L1NetworkConfig, l1Client aggkittypes.BaseEthereumClienter, ) (*ethermanquierier.RollupDataQuerier, error) { + if !isNeeded([]string{ + aggkitcommon.AGGORACLE, + aggkitcommon.AGGSENDER, + aggkitcommon.AGGCHAINPROOFGEN, + aggkitcommon.BRIDGE, + aggkitcommon.L1BRIDGESYNC, + aggkitcommon.L2BRIDGESYNC, + }, components) { + return nil, nil + } return ethermanquierier.NewRollupDataQuerier(ctx, cfg, l1Client, func(rollupManagerAddr common.Address, client aggkittypes.BaseEthereumClienter) (ethermanquierier.RollupManagerContract, error) { diff --git a/config/types/true_false_auto.go b/config/types/true_false_auto.go new file mode 100644 index 000000000..85cd3a95a --- /dev/null +++ b/config/types/true_false_auto.go @@ -0,0 +1,71 @@ +package types + +import ( + "fmt" + "strings" +) + +// TrueFalseAutoMode represents a tri-state config value: true, false, or auto. +// Mode is set from the config file via UnmarshalText; Resolved is set programmatically. +type TrueFalseAutoMode struct { + Mode string `mapstructure:"-"` + Resolved *bool `mapstructure:"-"` +} + +var ( + // TrueMode always activates the feature. + TrueMode = TrueFalseAutoMode{Mode: "true"} + // FalseMode always deactivates the feature. + FalseMode = TrueFalseAutoMode{Mode: "false"} + // AutoMode decides automatically based on context. + AutoMode = TrueFalseAutoMode{Mode: "auto"} +) + +// UnmarshalText implements encoding.TextUnmarshaler. +func (m *TrueFalseAutoMode) UnmarshalText(text []byte) error { + str := strings.ToLower(strings.TrimSpace(string(text))) + switch str { + case "true": + m.Mode = "true" + case "false": + m.Mode = "false" + case "auto": + m.Mode = "auto" + default: + return fmt.Errorf("invalid TrueFalseAutoMode: %s (valid values: true, false, auto)", str) + } + return nil +} + +// String returns the string representation. +func (m TrueFalseAutoMode) String() string { + return m.Mode +} + +// Validate checks that the mode is a valid value. Empty mode is allowed. +func (m TrueFalseAutoMode) Validate(fieldName string) error { + if m.Mode == "" { + return nil + } + var cpy TrueFalseAutoMode + if err := cpy.UnmarshalText([]byte(m.Mode)); err != nil { + return fmt.Errorf("invalid %s configuration: %w", fieldName, err) + } + return nil +} + +// Resolve converts the mode to a boolean using autoModeResult for AutoMode, +// stores the result in Resolved, and returns it. +func (m *TrueFalseAutoMode) Resolve(autoModeResult bool) bool { + var result bool + switch m.Mode { + case "true": + result = true + case "false": + result = false + case "auto": + result = autoModeResult + } + m.Resolved = &result + return result +} diff --git a/sync/README.md b/sync/README.md new file mode 100644 index 000000000..c47a92e7f --- /dev/null +++ b/sync/README.md @@ -0,0 +1,247 @@ +# sync package + +Provides the building blocks for EVM-based block synchronizers. A syncer tracks events emitted by one or more smart contracts, persists them atomically per block, and handles chain reorganisations automatically. + +## Architecture overview + +``` +┌─────────────────────────────────────────────────────┐ +│ EVMDriver │ +│ - main sync loop (Sync) │ +│ - reorg detection & recovery │ +│ - retry logic │ +│ - block subscriber pub/sub │ +└────────────────┬──────────────────┬─────────────────┘ + │ │ + ┌──────────▼──────┐ ┌────────▼──────────┐ + │ EVMDownloader │ │ processor │ + │ (fetch blocks │ │ (store blocks + │ + │ + parse logs) │ │ events in DB) │ + └──────────┬──────┘ └───────────────────┘ + │ + ┌────────▼────────┐ + │ LogAppenderMap │ + │ (event topic → │ + │ handler func) │ + └─────────────────┘ +``` + +## How to implement a new syncer + +Three pieces are needed: an **Event struct**, a **`buildAppender` function**, and a **processor**. + +### 1. Event struct + +Define one struct per contract event you want to index. Use `any` as the type stored in `sync.Block.Events`. + +```go +// transfer.go + +// TransferEvent represents an ERC-20 Transfer event. +type TransferEvent struct { + From common.Address + To common.Address + Amount *big.Int +} +``` + +### 2. `buildAppender` function + +`buildAppender` returns a `sync.LogAppenderMap` — a map from event topic hash to a handler function. Each handler parses a raw `types.Log` and appends the decoded event to `b.Events`. + +```go +// downloader.go + +var transferEventSignature = crypto.Keccak256Hash([]byte("Transfer(address,address,uint256)")) + +func buildAppender( + contractABI *abi.ABI, +) (sync.LogAppenderMap, error) { + appender := make(sync.LogAppenderMap) + + appender[transferEventSignature] = func(b *sync.EVMBlock, l types.Log) error { + var ev TransferEvent + if err := contractABI.UnpackIntoInterface(&ev, "Transfer", l.Data); err != nil { + return fmt.Errorf("buildAppender Transfer: unpack: %w", err) + } + // Indexed topics are not in Data; decode them from Topics. + ev.From = common.BytesToAddress(l.Topics[1].Bytes()) + ev.To = common.BytesToAddress(l.Topics[2].Bytes()) + b.Events = append(b.Events, ev) + return nil + } + + return appender, nil +} +``` + +> For events with indexed parameters: topic[0] is always the event signature; topic[1], topic[2], … are the indexed arguments in declaration order. + +### 3. processor + +The processor must implement the `processorInterface` used by `EVMDriver`: + +```go +type processorInterface interface { + GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) + ProcessBlock(ctx context.Context, block Block) error + Reorg(ctx context.Context, firstReorgedBlock uint64) error +} +``` + +A typical implementation: + +```go +// processor.go + +type processor struct { + storage MyStorager + log aggkitcommon.Logger + timeout time.Duration +} + +// GetLastProcessedBlock returns the highest block number stored in the DB. +// The bool indicates whether any block has been processed yet. +func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { + block, err := p.storage.GetLastBlock(ctx) + if errors.Is(err, db.ErrNotFound) { + return 0, false, nil + } + if err != nil { + return 0, false, err + } + return block.Num, true, nil +} + +// ProcessBlock stores the block and all its events atomically. +func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { + tx, err := p.storage.NewTx(ctx) + if err != nil { + return err + } + defer func() { /* rollback on error */ }() + + if err := p.storage.InsertBlock(ctx, tx, block.Num, block.Hash); err != nil { + return err + } + for _, e := range block.Events { + switch ev := e.(type) { + case TransferEvent: + if err := p.storage.InsertTransfer(ctx, tx, block.Num, ev); err != nil { + return err + } + } + } + return tx.Commit() +} + +// Reorg deletes all data for blocks >= firstReorgedBlock. +func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + tx, err := p.storage.NewTx(ctx) + if err != nil { + return err + } + defer func() { /* rollback on error */ }() + if err := p.storage.DeleteBlocksFrom(ctx, tx, firstReorgedBlock); err != nil { + return err + } + return tx.Commit() +} +``` + +### 4. Wiring it together + +```go +// mysyncer.go + +type MySync struct { + driver *sync.EVMDriver + processor *processor +} + +func NewMySync( + ctx context.Context, + cfg Config, + rd sync.ReorgDetector, + ethClient aggkittypes.EthClienter, + syncerID string, +) (*MySync, error) { + store, err := storage.New(cfg.DBPath) + if err != nil { + return nil, err + } + + proc := &processor{storage: store, timeout: cfg.DBQueryTimeout.Duration} + + contractABI, err := loadABI() // load your contract ABI + if err != nil { + return nil, err + } + + appender, err := buildAppender(contractABI) + if err != nil { + return nil, err + } + + rh := &sync.RetryHandler{ + MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, + RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod.Duration, + } + + downloader, err := sync.NewEVMDownloader( + syncerID, + sync.NewAdapterEthClientToMultidownloader(ethClient), + cfg.SyncBlockChunkSize, + cfg.BlockFinality, + cfg.WaitForNewBlocksPeriod.Duration, + appender, + []common.Address{cfg.ContractAddr}, // contracts to filter events from + rh, + rd.GetFinalizedBlockType(), + rd, + syncerID, + ) + if err != nil { + return nil, err + } + + compatibilityChecker := compatibility.NewCompatibilityCheck( + cfg.RequireStorageContentCompatibility, + downloader.RuntimeData, + proc, + ) + + driver, err := sync.NewEVMDriver(rd, proc, downloader, syncerID, bufferSize, rh, compatibilityChecker) + if err != nil { + return nil, err + } + + return &MySync{driver: driver, processor: proc}, nil +} + +func (s *MySync) Start(ctx context.Context) { + s.driver.Sync(ctx) +} +``` + +## Key types reference + +| Type | Package | Description | +|---|---|---| +| `EVMDriver` | `sync` | Main sync loop, reorg handling, retry | +| `EVMDownloader` | `sync` | Downloads blocks and parses logs via `LogAppenderMap` | +| `LogAppenderMap` | `sync` | `map[topic hash → handler]` — decodes logs into events | +| `Block` | `sync` | Block number + hash + `[]any` events | +| `EVMBlock` | `sync` | Internal block used during download (before processing) | +| `RetryHandler` | `sync` | Configures retry behaviour (max attempts, backoff period) | +| `RuntimeData` | `sync` | Chain ID + watched addresses, used for DB compatibility checks | + +## Reference implementation + +See [claimsync](../claimsync/) for a complete real-world example: + +| File | Role | +|---|---| +| [`downloader.go`](../claimsync/downloader.go) | Event structs, `buildAppender`, log handlers | +| [`processor.go`](../claimsync/processor.go) | `processorInterface` implementation | +| [`claimsync.go`](../claimsync/claimsync.go) | Wires everything together in `NewClaimSync` | diff --git a/sync/evmdriver.go b/sync/evmdriver.go index e5f76bcdd..ff7ef2a0d 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -14,7 +14,11 @@ import ( "github.com/ethereum/go-ethereum/common" ) -var ErrInconsistentState = errors.New("state is inconsistent, try again later once the state is consolidated") +var ( + ErrInconsistentState = errors.New("state is inconsistent, try again later once the state is consolidated") + ErrNoLastProcessedBlock = errors.New("no block has been processed yet") + ErrAlreadyBootstrapped = errors.New("SyncNextBlock: already bootstrapped, a processed block already exists") +) var _ fmt.Stringer = (*Block)(nil) @@ -131,7 +135,37 @@ func (d *EVMDriver) GetCompletionPercentage() *float64 { return nil } -func (d *EVMDriver) Sync(ctx context.Context) { +// SyncNextBlock downloads and processes the given blockNum as a bootstrap step. +// It requires that no block has been processed yet (GetLastProcessedBlock returns found=false). +// Returns ErrAlreadyBootstrapped if a processed block already exists — callers may safely ignore this. +func (d *EVMDriver) SyncNextBlock(ctx context.Context, blockNum uint64) error { + _, found, err := d.processor.GetLastProcessedBlock(ctx) + if err != nil { + return fmt.Errorf("SyncNextBlock: getting last processed block: %w", err) + } + if found { + return ErrAlreadyBootstrapped + } + + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + + downloadCh := make(chan EVMBlock, 1) + go d.downloader.Download(cancelCtx, blockNum, downloadCh) + + select { + case <-ctx.Done(): + return ctx.Err() + case b, ok := <-downloadCh: + if !ok { + return fmt.Errorf("SyncNextBlock: download channel closed unexpectedly") + } + cancel() // stop the downloader after receiving the first block + return d.handleNewBlock(ctx, b) + } +} + +func (d *EVMDriver) Sync(ctx context.Context, firstBlockNumber *uint64) { reset: var ( lastProcessedBlock uint64 @@ -148,10 +182,14 @@ reset: } break } - + var nextBlock uint64 for { // Now we let to have no processed block and wait until appears lastProcessedBlock, found, err = d.processor.GetLastProcessedBlock(ctx) + if err == nil && found { + nextBlock = lastProcessedBlock + 1 + break + } if err != nil { attempts++ d.log.Error("error getting last processed block: ", err) @@ -159,6 +197,11 @@ reset: continue } if !found { + if firstBlockNumber != nil { + d.log.Infof("no processed blocks found, starting from configured initial block number %d", *firstBlockNumber) + nextBlock = *firstBlockNumber // we will start syncing from the initial block number + break + } d.log.Infof("no processed blocks found, waiting %s", d.rh.RetryAfterErrorPeriod) select { case <-ctx.Done(): @@ -175,11 +218,12 @@ reset: cancellableCtx, cancel := context.WithCancel(ctx) defer cancel() - d.log.Infof("Starting sync... lastProcessedBlock %d", lastProcessedBlock) + d.log.Infof("Starting sync... lastProcessedBlock %d NextBlock: %d", + lastProcessedBlock, nextBlock) // start downloading downloadCh := make(chan EVMBlock, d.downloadBufferSize) go func() { - d.downloader.Download(cancellableCtx, lastProcessedBlock+1, downloadCh) + d.downloader.Download(cancellableCtx, nextBlock, downloadCh) log.Warnf("downloader.Download exited, cancelling context") cancel() }() diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index c89fbaa9e..d355db090 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -199,6 +199,7 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { testClient := NewTestClient(l1Client.Client(), WithRPCClienter(cfg.L1RPCClient)) dbPathBridgeSyncL1 := path.Join(t.TempDir(), "BridgeSyncL1.sqlite") + syncFromL1Bridges := cfg.L1RPCClient != nil bridgeSyncCfg := bridgesync.Config{ DBPath: dbPathBridgeSyncL1, BridgeAddr: bridgeL1Addr, @@ -211,6 +212,7 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { RequireStorageContentCompatibility: true, DBQueryTimeout: cfgtypes.NewDuration(defaultDBQueryTimeout), } + bridgeSyncCfg.SyncFromInBridges.Resolved = &syncFromL1Bridges bridgeL1Sync, err := bridgesync.NewL1(ctx, bridgeSyncCfg, rdL1, testClient, originNetwork) require.NoError(t, err) From f7636973ddc5a973bf74fad6ae27536a98436603 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 17 Mar 2026 12:34:47 +0100 Subject: [PATCH 04/28] feat: lxbridge doesn't sync claims, that have been spplited to new lxclaimsyncer --- aggsender/flows/builder_flow_factory.go | 4 +- aggsender/prover/proof_generation_tool.go | 4 +- bridgeservice/bridge.go | 377 -- bridgeservice/bridge_interfaces.go | 8 - bridgesync/agglayer_bridge_l2_reader.go | 146 - bridgesync/agglayer_bridge_l2_reader_test.go | 611 --- bridgesync/downloader.go | 8 +- bridgesync/downloader_test.go | 468 +-- bridgesync/processor.go | 47 +- bridgesync/processor_test.go | 3920 ++---------------- claimsync/downloader.go | 74 +- claimsync/embedded.go | 13 +- claimsync/processor.go | 1 + config/config.go | 5 + config/default.go | 30 + l1infotreesync/l1infotreesync.go | 28 +- l2gersync/l2_ger_syncer.go | 4 +- multidownloader/sync/evmdriver.go | 11 +- scripts/aggsender_request_last_cert.sh | 4 + scripts/aggsender_request_status.sh | 4 + scripts/l1claimsync_set_starting_block.sh | 7 + scripts/l2claimsync_get_claims.sh | 7 + scripts/request_aggsender_status.sh | 2 - test/e2e/envs/op-pp/config_local/README.md | 1 + 24 files changed, 381 insertions(+), 5403 deletions(-) delete mode 100644 bridgesync/agglayer_bridge_l2_reader.go delete mode 100644 bridgesync/agglayer_bridge_l2_reader_test.go create mode 100755 scripts/aggsender_request_last_cert.sh create mode 100755 scripts/aggsender_request_status.sh create mode 100755 scripts/l1claimsync_set_starting_block.sh create mode 100755 scripts/l2claimsync_get_claims.sh delete mode 100644 scripts/request_aggsender_status.sh diff --git a/aggsender/flows/builder_flow_factory.go b/aggsender/flows/builder_flow_factory.go index 6ea650e1f..823a61398 100644 --- a/aggsender/flows/builder_flow_factory.go +++ b/aggsender/flows/builder_flow_factory.go @@ -12,7 +12,7 @@ import ( "github.com/agglayer/aggkit/aggsender/optimistic" "github.com/agglayer/aggkit/aggsender/query" "github.com/agglayer/aggkit/aggsender/types" - "github.com/agglayer/aggkit/bridgesync" + "github.com/agglayer/aggkit/claimsync" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/l2gersync" @@ -194,7 +194,7 @@ func CreateCommonFlowComponents( return nil, err } - agglayerBridgeL2Reader, err := bridgesync.NewAgglayerBridgeL2ReaderWithMaxLogBlockRange( + agglayerBridgeL2Reader, err := claimsync.NewAgglayerBridgeL2ReaderWithMaxLogBlockRange( agglayerBridgeL2Addr, l2Client, unsetClaimsMaxLogBlockRange, diff --git a/aggsender/prover/proof_generation_tool.go b/aggsender/prover/proof_generation_tool.go index e8219e8c8..17c171682 100644 --- a/aggsender/prover/proof_generation_tool.go +++ b/aggsender/prover/proof_generation_tool.go @@ -10,7 +10,7 @@ import ( "github.com/agglayer/aggkit/aggsender/flows" "github.com/agglayer/aggkit/aggsender/query" "github.com/agglayer/aggkit/aggsender/types" - "github.com/agglayer/aggkit/bridgesync" + "github.com/agglayer/aggkit/claimsync" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitgrpc "github.com/agglayer/aggkit/grpc" @@ -102,7 +102,7 @@ func NewAggchainProofGenerationTool( return nil, fmt.Errorf("error creating L2 GER reader: %w", err) } - agglayerBridgeL2Reader, err := bridgesync.NewAgglayerBridgeL2Reader(cfg.AgglayerBridgeL2Addr, l2Client) + agglayerBridgeL2Reader, err := claimsync.NewAgglayerBridgeL2Reader(cfg.AgglayerBridgeL2Addr, l2Client) if err != nil { return nil, fmt.Errorf("failed to create bridge L2 sovereign reader: %w", err) } diff --git a/bridgeservice/bridge.go b/bridgeservice/bridge.go index a23dce425..7cb94d0cd 100644 --- a/bridgeservice/bridge.go +++ b/bridgeservice/bridge.go @@ -30,7 +30,6 @@ import ( "github.com/agglayer/aggkit/bridgeservice/metrics" "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/l1infotreesync" @@ -202,9 +201,6 @@ func (b *BridgeService) registerRoutes() { bridgeGroup := b.router.Group(BridgeV1Prefix) { bridgeGroup.GET("/bridges", b.GetBridgesHandler) - bridgeGroup.GET("/claims", b.GetClaimsHandler) - bridgeGroup.GET("/unset-claims", b.GetUnsetClaimsHandler) - bridgeGroup.GET("/set-claims", b.GetSetClaimsHandler) bridgeGroup.GET("/token-mappings", b.GetTokenMappingsHandler) bridgeGroup.GET("/legacy-token-migrations", b.GetLegacyTokenMigrationsHandler) bridgeGroup.GET("/l1-info-tree-index", b.L1InfoTreeIndexForBridgeHandler) @@ -213,7 +209,6 @@ func (b *BridgeService) registerRoutes() { bridgeGroup.GET("/last-reorg-event", b.GetLastReorgEventHandler) bridgeGroup.GET("/sync-status", b.GetSyncStatusHandler) bridgeGroup.GET("/removed-gers", b.GetRemoveGEREventsHandler) - bridgeGroup.GET("/claims-by-ger", b.GetClaimsByGERHandler) bridgeGroup.GET("/bridge-by-deposit-count", b.GetBridgeByDepositCountHandler) bridgeGroup.GET("/bridges-by-content", b.GetBridgesByContentHandler) @@ -415,290 +410,8 @@ func (b *BridgeService) GetBridgesHandler(c *gin.Context) { }) } -// GetClaimsHandler retrieves paginated claims for a given network. -// -// @Summary Get claims -// @Description Returns a paginated list of claims for the specified network. -// @Tags claims -// @Param network_id query uint32 true "Origin network ID" -// @Param page_number query uint32 false "Page number (default 1)" -// @Param page_size query uint32 false "Page size (default 100)" -// @Param network_ids query []uint32 false "Filter by one or more source network IDs (maximum 5 allowed)" -// @Param include_all_fields query bool false "Whether to include full response fields (default false)" -// @Param global_index query uint32 false "Filter by global index" -// @Produce json -// @Success 200 {object} types.ClaimsResult -// @Failure 400 {object} types.ErrorResponse "Bad Request" -// @Failure 500 {object} types.ErrorResponse "Internal Server Error" -// @Router /claims [get] -func (b *BridgeService) GetClaimsHandler(c *gin.Context) { - b.logger.Debugf("GetClaims request received (network id=%s, page number=%s, page size=%s, "+ - "include_all_fields=%s, global_index=%s)", - c.Query(networkIDParam), c.Query(pageNumberParam), c.Query(pageSizeParam), - c.Query(includeAllFields), c.Query(globalIndexParam)) - - statusCode := http.StatusOK - startTime := time.Now() - defer func() { - reportMetrics(metrics.GetClaimsReq, statusCode, startTime) - }() - - networkID, err := parseUintQuery(c, networkIDParam, true, uint32(0)) - if err != nil { - b.logger.Warnf(errNetworkID, err) - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": err.Error()}) - return - } - - networkIDs, err := parseNetworkIDSliceParam(c, networkIDsParam) - if err != nil { - b.logger.Warnf("invalid network IDs parameter: %v", err) - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": fmt.Sprintf("invalid %s parameter: %s", networkIDsParam, err)}) - return - } - - // Parse include_all_fields parameter (default to false) - includeAllFieldsFlag := false - if includeAllFieldsStr := c.Query(includeAllFields); includeAllFieldsStr != "" { - includeAllFieldsFlag, err = strconv.ParseBool(includeAllFieldsStr) - if err != nil { - b.logger.Warnf("invalid include_all_fields parameter: %v", err) - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": "invalid include_all_fields parameter"}) - return - } - } - - globalIndex, ctx, cancel, pageNumber, pageSize, ok := b.parseGlobalIndexAndSetupRequest(c, &statusCode) - if !ok { - return - } - defer cancel() - - b.logger.Debugf( - "fetching claims (network id=%d, page=%d, size=%d, "+ - "network_ids=%v, include_all_fields=%t, global_index=%d)", - networkID, pageNumber, pageSize, networkIDs, includeAllFieldsFlag, globalIndex) - - var ( - claims []*claimsynctypes.Claim - count int - ) - - switch networkID { - case mainnetNetworkID: - if b.bridgeL1 == nil { - statusCode = http.StatusServiceUnavailable - c.JSON(statusCode, - gin.H{"error": "L1 bridge syncer is not available"}) - return - } - - claims, count, err = b.bridgeL1.GetClaimsPaged(ctx, pageNumber, pageSize, networkIDs, globalIndex) - if err != nil { - b.logger.Warnf("failed to get claims for L1 network: %v", err) - statusCode = http.StatusInternalServerError - c.JSON(statusCode, - gin.H{"error": fmt.Sprintf("failed to get claims for the L1 network, error: %s", err)}) - return - } - case b.networkID: - if b.bridgeL2 == nil { - statusCode = http.StatusServiceUnavailable - c.JSON(statusCode, - gin.H{"error": "L2 bridge syncer is not available"}) - return - } - - claims, count, err = b.bridgeL2.GetClaimsPaged(ctx, pageNumber, pageSize, networkIDs, globalIndex) - if err != nil { - b.logger.Warnf("failed to get claims for L2 network (ID=%d): %v", networkID, err) - statusCode = http.StatusInternalServerError - c.JSON(statusCode, - gin.H{"error": fmt.Sprintf("failed to get claims for the L2 network (ID=%d), error: %s", networkID, err)}) - return - } - default: - b.logger.Warnf(errNetworkID, networkID) - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": fmt.Sprintf(errNetworkID, networkID)}) - return - } - - // Use conditional function to create claim responses - claimResponses := make([]*types.ClaimResponse, len(claims)) - for i, claim := range claims { - claimResponses[i] = NewClaimResponse(claim, includeAllFieldsFlag) - } - - c.JSON(statusCode, - types.ClaimsResult{ - Claims: claimResponses, - Count: count, - }) -} - -// @Summary Get unset claims -// @Description Returns unset claims for the configured L2 network, paginated. -// Note: unset claims are only available for L2 networks, not L1. -// @Tags unset-claims -// @Param page_number query int false "Page number" -// @Param page_size query int false "Page size" -// @Param global_index query string false "Filter by global index" -// @Produce json -// @Success 200 {object} types.UnsetClaimsResult -// @Failure 400 {object} types.ErrorResponse "Bad Request - Invalid parameters" -// @Failure 500 {object} types.ErrorResponse "Internal Server Error" -// @Failure 503 {object} types.ErrorResponse "Service Unavailable - L2 bridge syncer not available" -// @Router /unset-claims [get] -func (b *BridgeService) GetUnsetClaimsHandler(c *gin.Context) { - b.logger.Debugf("GetUnsetClaims request received (page number=%s, page size=%s, global_index=%s)", - c.Query(pageNumberParam), c.Query(pageSizeParam), c.Query(globalIndexParam)) - - statusCode := http.StatusOK - startTime := time.Now() - defer func() { - reportMetrics(metrics.GetUnsetClaimsReq, statusCode, startTime) - }() - - if b.bridgeL2 == nil { - statusCode = http.StatusServiceUnavailable - c.JSON(statusCode, - gin.H{"error": "L2 bridge syncer is not available"}) - return - } - - globalIndex, ctx, cancel, pageNumber, pageSize, ok := b.parseGlobalIndexAndSetupRequest(c, &statusCode) - if !ok { - return - } - defer cancel() - - b.logger.Debugf("fetching unset claims for L2 network (network id=%d, page=%d, size=%d, global_index=%v)", - b.networkID, pageNumber, pageSize, globalIndex) - - var ( - unsetClaims []*claimsynctypes.UnsetClaim - count int - err error - ) - - unsetClaims, count, err = b.bridgeL2.GetUnsetClaimsPaged(ctx, pageNumber, pageSize, globalIndex) - if err != nil { - b.logger.Warnf("failed to get unset claims for L2 network (ID=%d): %v", b.networkID, err) - statusCode = http.StatusInternalServerError - c.JSON(statusCode, - gin.H{"error": fmt.Sprintf("failed to get unset claims for the L2 network (ID=%d), error: %s", b.networkID, err)}) - return - } - - // Convert unset claims to response format - unsetClaimResponses := make([]*types.UnsetClaimResponse, len(unsetClaims)) - for i, unsetClaim := range unsetClaims { - unsetClaimResponses[i] = &types.UnsetClaimResponse{ - BlockNum: unsetClaim.BlockNum, - BlockPos: unsetClaim.BlockPos, - TxHash: types.Hash(unsetClaim.TxHash.Hex()), - GlobalIndex: types.BigIntString(unsetClaim.GlobalIndex.String()), - UnsetGlobalIndexHashChain: types.Hash(unsetClaim.UnsetGlobalIndexHashChain.Hex()), - CreatedAt: unsetClaim.CreatedAt, - } - } - c.JSON(statusCode, - types.UnsetClaimsResult{ - UnsetClaims: unsetClaimResponses, - Count: count, - }) -} - -// @Summary Get set claims -// @Description Returns set claims for the configured L2 network, paginated. -// Note: set claims are only available for L2 networks, not L1. -// @Tags set-claims -// @Param page_number query int false "Page number" -// @Param page_size query int false "Page size" -// @Param global_index query string false "Filter by global index" -// @Produce json -// @Success 200 {object} types.SetClaimsResult -// @Failure 400 {object} types.ErrorResponse "Bad Request - Invalid parameters" -// @Failure 500 {object} types.ErrorResponse "Internal Server Error" -// @Failure 503 {object} types.ErrorResponse "Service Unavailable - L2 bridge syncer not available" -// @Router /set-claims [get] -func (b *BridgeService) GetSetClaimsHandler(c *gin.Context) { - b.logger.Debugf("GetSetClaims request received (page number=%s, page size=%s, global_index=%s)", - c.Query(pageNumberParam), c.Query(pageSizeParam), c.Query(globalIndexParam)) - - statusCode := http.StatusOK - startTime := time.Now() - defer func() { - reportMetrics(metrics.GetSetClaimsReq, statusCode, startTime) - }() - - if b.bridgeL2 == nil { - statusCode = http.StatusServiceUnavailable - c.JSON(statusCode, - gin.H{"error": "L2 bridge syncer is not available"}) - return - } - - globalIndex, ctx, cancel, pageNumber, pageSize, ok := b.parseGlobalIndexAndSetupRequest(c, &statusCode) - if !ok { - return - } - defer cancel() - b.logger.Debugf("fetching set claims for L2 network (network id=%d, page=%d, size=%d, global_index=%v)", - b.networkID, pageNumber, pageSize, globalIndex) - - var ( - setClaims []*claimsynctypes.SetClaim - count int - err error - ) - - setClaims, count, err = b.bridgeL2.GetSetClaimsPaged(ctx, pageNumber, pageSize, globalIndex) - if err != nil { - b.logger.Warnf("failed to get set claims for L2 network (ID=%d): %v", b.networkID, err) - statusCode = http.StatusInternalServerError - c.JSON(statusCode, - gin.H{"error": fmt.Sprintf("failed to get set claims for the L2 network (ID=%d), error: %s", b.networkID, err)}) - return - } - - // Convert set claims to response format - setClaimResponses := make([]*types.SetClaimResponse, len(setClaims)) - for i, setClaim := range setClaims { - setClaimResponses[i] = &types.SetClaimResponse{ - BlockNum: setClaim.BlockNum, - BlockPos: setClaim.BlockPos, - TxHash: types.Hash(setClaim.TxHash.Hex()), - GlobalIndex: types.BigIntString(setClaim.GlobalIndex.String()), - CreatedAt: setClaim.CreatedAt, - } - } - - c.JSON(statusCode, - types.SetClaimsResult{ - SetClaims: setClaimResponses, - Count: count, - }) -} - -// @Summary Get token mappings -// @Description Returns token mappings for the given network, paginated -// @Tags token-mappings -// @Param network_id query int true "Network ID" -// @Param page_number query int false "Page number" -// @Param page_size query int false "Page size" -// @Param origin_token_address query string false "Filter by origin token address" -// @Produce json -// @Success 200 {object} types.TokenMappingsResult -// @Failure 400 {object} types.ErrorResponse "Bad Request" -// @Failure 500 {object} types.ErrorResponse "Internal Server Error" -// @Router /token-mappings [get] func (b *BridgeService) GetTokenMappingsHandler(c *gin.Context) { b.logger.Debugf( "GetTokenMappings request received (network id=%s, page number=%s, page size=%s, origin token address=%s)", @@ -1600,96 +1313,6 @@ func reportMetrics(handlerID string, statusCode int, startTime time.Time) { metrics.ObserveRequestLatencyHistogram(handlerID, startTime) } -// GetClaimsByGERHandler retrieves all DetailedClaimEvent claims that used the given global exit root. -// -// @Summary Get claims by global exit root -// @Description Returns all claims (DetailedClaimEvent type) recorded with the specified GER for the given network. -// @Tags claims -// @Param network_id query uint32 true "Network ID (0 for L1, L2 network ID otherwise)" -// @Param global_exit_root query string true "Global exit root (0x-prefixed 32-byte hex)" -// @Produce json -// @Success 200 {object} types.ClaimsByGERResult -// @Failure 400 {object} types.ErrorResponse "Bad Request" -// @Failure 500 {object} types.ErrorResponse "Internal Server Error" -// @Failure 503 {object} types.ErrorResponse "Service Unavailable" -// @Router /claims-by-ger [get] -func (b *BridgeService) GetClaimsByGERHandler(c *gin.Context) { - b.logger.Debugf("GetClaimsByGER request received") - - statusCode := http.StatusOK - startTime := time.Now() - defer func() { - reportMetrics(metrics.GetClaimsByGERReq, statusCode, startTime) - }() - - ctx, cancel := context.WithTimeout(c, b.readTimeout) - defer cancel() - - networkID, err := parseUintQuery(c, networkIDParam, true, uint32(0)) - if err != nil { - b.logger.Warnf(errNetworkID, err) - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": err.Error()}) - return - } - - gerStr := c.Query("global_exit_root") - if gerStr == "" { - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": "global_exit_root is mandatory"}) - return - } - if !isValidHexHash(gerStr) { - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": "invalid global_exit_root parameter, must be a valid hex hash"}) - return - } - ger := common.HexToHash(gerStr) - - var claims []*claimsynctypes.Claim - switch networkID { - case mainnetNetworkID: - if b.bridgeL1 == nil { - statusCode = http.StatusServiceUnavailable - c.JSON(statusCode, gin.H{"error": "L1 bridge syncer is not available"}) - return - } - claims, err = b.bridgeL1.GetClaimsByGER(ctx, ger) - if err != nil { - b.logger.Errorf("failed to get claims by GER %s for L1 network: %v", gerStr, err) - statusCode = http.StatusInternalServerError - c.JSON(statusCode, gin.H{"error": fmt.Sprintf("failed to get claims by GER: %s", err)}) - return - } - case b.networkID: - if b.bridgeL2 == nil { - statusCode = http.StatusServiceUnavailable - c.JSON(statusCode, gin.H{"error": "L2 bridge syncer is not available"}) - return - } - claims, err = b.bridgeL2.GetClaimsByGER(ctx, ger) - if err != nil { - b.logger.Errorf("failed to get claims by GER %s for L2 network (ID=%d): %v", gerStr, networkID, err) - statusCode = http.StatusInternalServerError - c.JSON(statusCode, gin.H{"error": fmt.Sprintf("failed to get claims by GER: %s", err)}) - return - } - default: - b.logger.Warnf(errNetworkID, networkID) - statusCode = http.StatusBadRequest - c.JSON(statusCode, gin.H{"error": fmt.Sprintf(errNetworkID, networkID)}) - return - } - - claimResponses := make([]*types.ClaimResponse, 0, len(claims)) - for _, claim := range claims { - claimResponses = append(claimResponses, NewClaimResponse(claim, false)) - } - c.JSON(statusCode, types.ClaimsByGERResult{ - Claims: claimResponses, - Count: len(claimResponses), - }) -} // GetBridgeByDepositCountHandler retrieves a bridge by deposit count for the given network. // diff --git a/bridgeservice/bridge_interfaces.go b/bridgeservice/bridge_interfaces.go index 1be67e05e..c93473368 100644 --- a/bridgeservice/bridge_interfaces.go +++ b/bridgeservice/bridge_interfaces.go @@ -5,7 +5,6 @@ import ( "math/big" "github.com/agglayer/aggkit/bridgesync" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/l2gersync" tree "github.com/agglayer/aggkit/tree/types" @@ -22,18 +21,11 @@ type Bridger interface { originTokenAddress string) ([]*bridgesync.TokenMapping, int, error) GetLegacyTokenMigrations(ctx context.Context, pageNumber, pageSize uint32) ([]*bridgesync.LegacyTokenMigration, int, error) - GetClaimsPaged(ctx context.Context, page, pageSize uint32, - networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) - GetUnsetClaimsPaged(ctx context.Context, page, pageSize uint32, - globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) - GetSetClaimsPaged(ctx context.Context, page, pageSize uint32, - globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) GetLastReorgEvent(ctx context.Context) (*bridgesync.LastReorg, error) GetContractDepositCount(ctx context.Context) (uint32, error) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) GetLatestNetworkBlock(ctx context.Context) (uint64, error) IsActive(ctx context.Context) bool - GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) GetBridgeByDepositCount(ctx context.Context, depositCount uint32) (*bridgesync.Bridge, error) GetBridgesByContent(ctx context.Context, leafType uint8, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, diff --git a/bridgesync/agglayer_bridge_l2_reader.go b/bridgesync/agglayer_bridge_l2_reader.go deleted file mode 100644 index 03b21fc27..000000000 --- a/bridgesync/agglayer_bridge_l2_reader.go +++ /dev/null @@ -1,146 +0,0 @@ -package bridgesync - -import ( - "context" - "fmt" - "math/big" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" - aggkitcommon "github.com/agglayer/aggkit/common" - "github.com/agglayer/aggkit/log" - aggkittypes "github.com/agglayer/aggkit/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" -) - -// AgglayerBridgeL2Reader provides functionality to read and interact with the AggLayer Bridge L2 contract. -// It encapsulates the contract instance and provides methods to query bridge-related data from the L2 chain. -type AgglayerBridgeL2Reader struct { - agglayerBridgeL2 *agglayerbridgel2.Agglayerbridgel2 - unsetClaimsMaxLogBlockRange uint64 -} - -// NewAgglayerBridgeL2Reader creates a new instance of AgglayerBridgeL2Reader. -// It initializes the contract instance using the provided bridge address and L2 client. -// -// Parameters: -// - bridgeAddr: The Ethereum address of the AggLayer Bridge L2 contract -// - l2Client: The Ethereum client for interacting with the L2 chain -// -// Returns: -// - *AgglayerBridgeL2Reader: A new reader instance -// - error: Any error that occurred during contract initialization -func NewAgglayerBridgeL2Reader( - bridgeAddr common.Address, - l2Client aggkittypes.BaseEthereumClienter, -) (*AgglayerBridgeL2Reader, error) { - return NewAgglayerBridgeL2ReaderWithMaxLogBlockRange(bridgeAddr, l2Client, 0) -} - -// NewAgglayerBridgeL2ReaderWithMaxLogBlockRange creates a new instance of AgglayerBridgeL2Reader -// with an optional proactive max block range for unset claims eth_getLogs queries. -func NewAgglayerBridgeL2ReaderWithMaxLogBlockRange( - bridgeAddr common.Address, - l2Client aggkittypes.BaseEthereumClienter, - unsetClaimsMaxLogBlockRange uint64, -) (*AgglayerBridgeL2Reader, error) { - agglayerBridgeL2Contract, err := agglayerbridgel2.NewAgglayerbridgel2(bridgeAddr, l2Client) - if err != nil { - return nil, err - } - - return &AgglayerBridgeL2Reader{ - agglayerBridgeL2: agglayerBridgeL2Contract, - unsetClaimsMaxLogBlockRange: unsetClaimsMaxLogBlockRange, - }, nil -} - -// GetUnsetClaimsForBlockRange retrieves all unset claims (unclaims) within a specified block range. -// It filters the UpdatedUnsetGlobalIndexHashChain events from the bridge contract and converts them -// into Unclaim objects for further processing. -// If the block range is too large, it automatically splits the request into smaller chunks. -// -// Parameters: -// - ctx: Context for cancellation and timeout control -// - fromBlock: The starting block number for the search range (inclusive) -// - toBlock: The ending block number for the search range (inclusive) -// -// Returns: -// - []types.Unclaim: A slice of Unclaim objects containing global index, block number, and block index -// - error: Any error that occurred during the event filtering or iteration -func (r *AgglayerBridgeL2Reader) GetUnsetClaimsForBlockRange(ctx context.Context, - fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { - if fromBlock > toBlock { - return nil, fmt.Errorf("invalid block range: fromBlock(%d) > toBlock(%d)", fromBlock, toBlock) - } - - if r.unsetClaimsMaxLogBlockRange > 0 && toBlock-fromBlock >= r.unsetClaimsMaxLogBlockRange { - return r.getUnsetClaimsInChunks(ctx, fromBlock, toBlock, r.unsetClaimsMaxLogBlockRange) - } - - return r.fetchUnsetClaimsWithFallbackChunking(ctx, fromBlock, toBlock) -} - -func (r *AgglayerBridgeL2Reader) fetchUnsetClaimsWithFallbackChunking(ctx context.Context, - fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { - unclaims, err := r.fetchUnsetClaims(ctx, fromBlock, toBlock) - if err != nil { - // Check if error is due to block range being too large - maxRange, isMaxRangeErr := aggkitcommon.ParseMaxRangeFromError(err.Error()) - if isMaxRangeErr { - return r.getUnsetClaimsInChunks(ctx, fromBlock, toBlock, maxRange) - } - - return nil, err - } - - return unclaims, nil -} - -func (r *AgglayerBridgeL2Reader) getUnsetClaimsInChunks(ctx context.Context, - fromBlock, toBlock, maxRange uint64) ([]claimsynctypes.Unclaim, error) { - log.Debugf("block range too large, splitting into chunks of max %d blocks", maxRange) - return aggkitcommon.ChunkedRangeQuery( - ctx, fromBlock, toBlock, maxRange, - r.fetchUnsetClaimsWithFallbackChunking, - func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { - return append(all, chunk...) - }, - make([]claimsynctypes.Unclaim, 0), - ) -} - -// fetchUnsetClaims performs the actual event filtering for a given block range -func (r *AgglayerBridgeL2Reader) fetchUnsetClaims(ctx context.Context, - fromBlock, toBlock uint64) ([]claimsynctypes.Unclaim, error) { - unclaimIterator, err := r.agglayerBridgeL2.FilterUpdatedUnsetGlobalIndexHashChain( - &bind.FilterOpts{Context: ctx, Start: fromBlock, End: &toBlock}) - if err != nil { - return nil, err - } - - defer func() { - if err := unclaimIterator.Close(); err != nil { - log.Errorf("failed to close UpdatedUnsetGlobalIndexHashChain iterator: %v", err) - } - }() - - unclaims := make([]claimsynctypes.Unclaim, 0) - for unclaimIterator.Next() { - globalIndex := unclaimIterator.Event.UnsetGlobalIndex - log.Infof("unset claim: %s at block %d, index %d", new(big.Int).SetBytes(globalIndex[:]), - unclaimIterator.Event.Raw.BlockNumber, unclaimIterator.Event.Raw.Index) - unclaims = append(unclaims, claimsynctypes.Unclaim{ - GlobalIndex: new(big.Int).SetBytes(globalIndex[:]), - BlockNumber: unclaimIterator.Event.Raw.BlockNumber, - LogIndex: uint64(unclaimIterator.Event.Raw.Index), - }) - } - - if unclaimIterator.Error() != nil { - return nil, unclaimIterator.Error() - } - - return unclaims, nil -} diff --git a/bridgesync/agglayer_bridge_l2_reader_test.go b/bridgesync/agglayer_bridge_l2_reader_test.go deleted file mode 100644 index e3217c1f1..000000000 --- a/bridgesync/agglayer_bridge_l2_reader_test.go +++ /dev/null @@ -1,611 +0,0 @@ -package bridgesync - -import ( - "context" - "errors" - "testing" - - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" - aggkitcommon "github.com/agglayer/aggkit/common" - "github.com/agglayer/aggkit/etherman" - aggkittypes "github.com/agglayer/aggkit/types" - mocksethclient "github.com/agglayer/aggkit/types/mocks" - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - ethtypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestNewAgglayerBridgeL2Reader(t *testing.T) { - tests := []struct { - name string - bridgeAddr common.Address - l2Client aggkittypes.BaseEthereumClienter - expectError bool - errorMsg string - }{ - { - name: "successful creation", - bridgeAddr: common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), - l2Client: mocksethclient.NewBaseEthereumClienter(t), - expectError: false, - }, - { - name: "zero address", - bridgeAddr: common.Address{}, - l2Client: mocksethclient.NewBaseEthereumClienter(t), - expectError: false, // Zero address is valid, contract creation might still work - }, - { - name: "contract creation with valid mock client", - bridgeAddr: common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), - l2Client: mocksethclient.NewBaseEthereumClienter(t), - expectError: false, // The contract creation should succeed with a valid mock client - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - reader, err := NewAgglayerBridgeL2Reader(tt.bridgeAddr, tt.l2Client) - - if tt.expectError { - require.Error(t, err) - require.Nil(t, reader) - if tt.errorMsg != "" { - require.Contains(t, err.Error(), tt.errorMsg) - } - } else { - require.NoError(t, err) - require.NotNil(t, reader) - require.NotNil(t, reader.agglayerBridgeL2) - } - }) - } -} - -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_ProactiveChunkingByConfig(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - - t.Run("configured max proactively chunks range", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2ReaderWithMaxLogBlockRange(bridgeAddr, mockClient, 1000) - require.NoError(t, err) - - var ranges [][2]uint64 - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Run(func(args mock.Arguments) { - q, ok := args.Get(1).(ethereum.FilterQuery) - require.True(t, ok) - require.NotNil(t, q.FromBlock) - require.NotNil(t, q.ToBlock) - ranges = append(ranges, [2]uint64{q.FromBlock.Uint64(), q.ToBlock.Uint64()}) - }).Times(3) - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Equal(t, [][2]uint64{{0, 999}, {1000, 1999}, {2000, 2500}}, ranges) - }) - - t.Run("zero configured max keeps current non-proactive behavior", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2ReaderWithMaxLogBlockRange(bridgeAddr, mockClient, 0) - require.NoError(t, err) - - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) - require.NoError(t, err) - require.NotNil(t, unclaims) - }) -} - -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_WithMockedClient(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mockClient := mocksethclient.NewBaseEthereumClienter(t) - - // Mock the FilterLogs method that will be called by the contract - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - t.Run("successful call with mocked client", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) // Should be empty since we mocked empty results - }) - - t.Run("zero block range", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 0) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - }) - - t.Run("same from and to block", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 100) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - }) - - t.Run("large block range", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, ^uint64(0)) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - }) - - mockClient.AssertExpectations(t) -} - -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_ErrorHandling(t *testing.T) { - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mockClient := mocksethclient.NewBaseEthereumClienter(t) - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - t.Run("context cancellation", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately - - // Mock the FilterLogs method - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) - require.NoError(t, err) // Context cancellation doesn't cause error in this implementation - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - }) - - t.Run("nil context handling", func(t *testing.T) { - // Test that nil context is handled gracefully - unclaims, err := reader.GetUnsetClaimsForBlockRange(context.TODO(), 100, 200) - require.NoError(t, err) // The function handles nil context gracefully - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - }) - - mockClient.AssertExpectations(t) -} - -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_InputValidation(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mockClient := mocksethclient.NewBaseEthereumClienter(t) - - // Mock the FilterLogs method - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - t.Run("fromBlock greater than toBlock", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 200, 100) - // This should return an error as it's an invalid block range - require.Error(t, err) - require.Nil(t, unclaims) - require.Contains(t, err.Error(), "invalid block range") - require.Contains(t, err.Error(), "fromBlock(200) > toBlock(100)") - }) - - t.Run("maximum uint64 values", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, ^uint64(0), ^uint64(0)) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - }) - - t.Run("minimum values", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 0) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - }) - - mockClient.AssertExpectations(t) -} - -// Test error handling in GetUnsetClaimsForBlockRange -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_FilterErrorHandling(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mockClient := mocksethclient.NewBaseEthereumClienter(t) - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - t.Run("filter error", func(t *testing.T) { - // Mock FilterLogs to return an error - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, errors.New("filter error")) - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) - require.Error(t, err) - require.Nil(t, unclaims) - require.Contains(t, err.Error(), "filter error") - }) - - mockClient.AssertExpectations(t) -} - -// Test iterator close error handling -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_IteratorCloseError(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mockClient := mocksethclient.NewBaseEthereumClienter(t) - - // Mock FilterLogs to return empty results - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // Test normal operation - iterator close error is logged but doesn't affect return - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) - - mockClient.AssertExpectations(t) -} - -// Test with simulated backend to get real contract behavior -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_SimulatedBackend(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - - // Use a simulated backend to get real contract behavior - simulatedBackend := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - defer simulatedBackend.Close() - - // Use the client from the simulated backend - client := simulatedBackend.Client() - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, etherman.NewDefaultEthClient(client, nil, nil)) - require.NoError(t, err) - - // Test with the simulated backend - need to mine some blocks first - simulatedBackend.Commit() // Mine the genesis block - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 1) - require.NoError(t, err) - require.NotNil(t, unclaims) - // Should be empty since no events were emitted - require.Empty(t, unclaims) -} - -// Test with real contract events to test iterator behavior -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_WithRealEvents(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - - // Use a simulated backend to get real contract behavior - simulatedBackend := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - defer simulatedBackend.Close() - - // Use the client from the simulated backend - client := simulatedBackend.Client() - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, etherman.NewDefaultEthClient(client, nil, nil)) - require.NoError(t, err) - - // Mine some blocks to create a valid range - simulatedBackend.Commit() // Block 1 - simulatedBackend.Commit() // Block 2 - simulatedBackend.Commit() // Block 3 - - // Test with a valid block range - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 1, 3) - require.NoError(t, err) - require.NotNil(t, unclaims) - // Should be empty since no events were emitted, but this tests the iterator path - require.Empty(t, unclaims) -} - -// Test the actual iterator behavior by creating a test that exercises the iterator loop -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_IteratorBehavior(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - - // Use a simulated backend to get real contract behavior - simulatedBackend := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - defer simulatedBackend.Close() - - // Use the client from the simulated backend - client := simulatedBackend.Client() - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, etherman.NewDefaultEthClient(client, nil, nil)) - require.NoError(t, err) - - // Mine some blocks to create a valid range - simulatedBackend.Commit() // Block 1 - simulatedBackend.Commit() // Block 2 - simulatedBackend.Commit() // Block 3 - - // Test with a valid block range - this will test the iterator behavior - // The iterator will be created and the Next() method will be called - // Even though there are no events, this tests the iterator loop structure - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 1, 3) - require.NoError(t, err) - require.NotNil(t, unclaims) - // Should be empty since no events were emitted, but this tests the iterator path - require.Empty(t, unclaims) - - // Test with a single block range - unclaims, err = reader.GetUnsetClaimsForBlockRange(ctx, 1, 1) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) -} - -// Test with different block ranges -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_BlockRanges(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mockClient := mocksethclient.NewBaseEthereumClienter(t) - - // Mock FilterLogs to return empty results for all calls - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - testCases := []struct { - name string - fromBlock uint64 - toBlock uint64 - }{ - {"zero to zero", 0, 0}, - {"zero to max", 0, ^uint64(0)}, - {"max to max", ^uint64(0), ^uint64(0)}, - {"normal range", 100, 200}, - {"single block", 100, 100}, - {"large range", 0, 1000000}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, tc.fromBlock, tc.toBlock) - require.NoError(t, err) - require.NotNil(t, unclaims) - }) - } - - mockClient.AssertExpectations(t) -} - -// Test context handling -func TestAgglayerBridgeL2Reader_GetUnsetClaimsForBlockRange_ContextHandling(t *testing.T) { - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mockClient := mocksethclient.NewBaseEthereumClienter(t) - - // Mock FilterLogs to return empty results - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil) - - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - t.Run("cancelled context", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 100, 200) - require.NoError(t, err) // Context cancellation doesn't cause error in this implementation - require.NotNil(t, unclaims) - }) - - t.Run("background context", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(context.Background(), 100, 200) - require.NoError(t, err) - require.NotNil(t, unclaims) - }) - - t.Run("TODO context", func(t *testing.T) { - unclaims, err := reader.GetUnsetClaimsForBlockRange(context.TODO(), 100, 200) - require.NoError(t, err) - require.NotNil(t, unclaims) - }) - - mockClient.AssertExpectations(t) -} - -func TestGetUnsetClaimsInChunks(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - - t.Run("exact chunk boundaries", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // Mock 3 successful chunk calls (blocks 0-999, 1000-1999, 2000-2999) - mockClient.On("FilterLogs", mock.Anything, mock.MatchedBy(func(q interface{}) bool { - return true // Accept all filter queries for simplicity - })).Return([]ethtypes.Log{}, nil).Times(3) - - unclaims, err := aggkitcommon.ChunkedRangeQuery( - ctx, 0, 2999, 1000, - reader.fetchUnsetClaims, - func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { - return append(all, chunk...) - }, - []claimsynctypes.Unclaim{}, - ) - require.NoError(t, err) - require.NotNil(t, unclaims) - require.Empty(t, unclaims) // Empty results as we mocked empty logs - - mockClient.AssertExpectations(t) - }) - - t.Run("non-exact chunk boundaries", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // Mock 3 calls: 0-999, 1000-1999, 2000-2500 - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Times(3) - - unclaims, err := aggkitcommon.ChunkedRangeQuery( - ctx, 0, 2500, 1000, - reader.fetchUnsetClaims, - func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { - return append(all, chunk...) - }, - []claimsynctypes.Unclaim{}, - ) - require.NoError(t, err) - require.NotNil(t, unclaims) - - mockClient.AssertExpectations(t) - }) - - t.Run("single chunk (range smaller than maxRange)", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() - - unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 500, 1000, - reader.fetchUnsetClaims, - func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { - return append(all, chunk...) - }, - []claimsynctypes.Unclaim{}, - ) - require.NoError(t, err) - require.NotNil(t, unclaims) - - mockClient.AssertExpectations(t) - }) - - t.Run("error in middle chunk", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // First chunk succeeds - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() - // Second chunk fails - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return(nil, errors.New("rpc error")).Once() - - unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 2000, 1000, - reader.fetchUnsetClaims, - func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { - return append(all, chunk...) - }, - []claimsynctypes.Unclaim{}, - ) - require.ErrorContains(t, err, "rpc error") - require.Empty(t, unclaims) - - mockClient.AssertExpectations(t) - }) - - t.Run("zero maxRange", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // Should return error immediately without making any calls - unclaims, err := aggkitcommon.ChunkedRangeQuery(ctx, 0, 1000, 0, - reader.fetchUnsetClaims, - func(all, chunk []claimsynctypes.Unclaim) []claimsynctypes.Unclaim { - return append(all, chunk...) - }, - []claimsynctypes.Unclaim{}, - ) - require.ErrorContains(t, err, "maxRange must be greater than 0") - require.Empty(t, unclaims) - - // No FilterLogs calls should have been made - mockClient.AssertExpectations(t) - }) -} - -func TestGetUnsetClaimsForBlockRange_ChunkingIntegration(t *testing.T) { - ctx := context.Background() - bridgeAddr := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - - t.Run("normal fetch succeeds without chunking", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // Mock successful FilterLogs call - mockClient.On("FilterLogs", mock.Anything, mock.Anything).Return([]ethtypes.Log{}, nil).Once() - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 500) - require.NoError(t, err) - require.NotNil(t, unclaims) - - mockClient.AssertExpectations(t) - }) - - t.Run("block range too large triggers chunking", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // First call fails with "block range too large" - mockClient.On("FilterLogs", mock.Anything, mock.Anything). - Return(nil, errors.New("block range too large, max range: 1000")).Once() - - // Subsequent chunked calls succeed (0-999, 1000-1999, 2000-2500) - mockClient.On("FilterLogs", mock.Anything, mock.Anything). - Return([]ethtypes.Log{}, nil).Times(3) - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) - require.NoError(t, err) - require.NotNil(t, unclaims) - - mockClient.AssertExpectations(t) - }) - - t.Run("non-parseable error returns original error", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // Return an error that doesn't match the pattern - mockClient.On("FilterLogs", mock.Anything, mock.Anything). - Return(nil, errors.New("some other RPC error")).Once() - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) - require.ErrorContains(t, err, "some other RPC error") - require.Nil(t, unclaims) - - mockClient.AssertExpectations(t) - }) - - t.Run("chunking fails partway through", func(t *testing.T) { - mockClient := mocksethclient.NewBaseEthereumClienter(t) - reader, err := NewAgglayerBridgeL2Reader(bridgeAddr, mockClient) - require.NoError(t, err) - - // First call triggers chunking - mockClient.On("FilterLogs", mock.Anything, mock.Anything). - Return(nil, errors.New("block range too large, max range: 1000")).Once() - - // First chunk succeeds - mockClient.On("FilterLogs", mock.Anything, mock.Anything). - Return([]ethtypes.Log{}, nil).Once() - - // Second chunk fails - mockClient.On("FilterLogs", mock.Anything, mock.Anything). - Return(nil, errors.New("connection timeout")).Once() - - unclaims, err := reader.GetUnsetClaimsForBlockRange(ctx, 0, 2500) - require.ErrorContains(t, err, "connection timeout") - require.Empty(t, unclaims) - - mockClient.AssertExpectations(t) - }) -} diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 8b27ff050..74793a997 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -71,14 +71,8 @@ func buildAppender( syncFromInBridges bool, bridgeDeployment *bridgeDeployment, logger *logger.Logger, - claimAppender sync.LogAppenderMap, ) (sync.LogAppenderMap, error) { - var appender sync.LogAppenderMap - if claimAppender != nil { - appender = claimAppender - } else { - appender = make(sync.LogAppenderMap) - } + appender := make(sync.LogAppenderMap) // Add event handlers for the bridge contract appender[bridgeEventSignature] = buildBridgeEventHandler( diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index ce1b122ec..342a8d395 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -9,13 +9,10 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" bridgetypes "github.com/agglayer/aggkit/bridgesync/types" - "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/etherman" logger "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/sync" - treetypes "github.com/agglayer/aggkit/tree/types" "github.com/agglayer/aggkit/types/mocks" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -324,322 +321,6 @@ func TestBuildAppender(t *testing.T) { return l, nil }, }, - { - name: "claimEventSignaturePreEtrog appender", - eventSignature: claimEventSignaturePreEtrog, - deploymentKind: NonSovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - bridgeV1Abi, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() - require.NoError(t, err) - - event, err := bridgeV1Abi.EventByID(claimEventSignaturePreEtrog) - if err != nil { - return types.Log{}, err - } - - index := uint32(5) - originNetwork := uint32(6) - originAddress := common.HexToAddress("0x20") - destinationAddress := common.HexToAddress("0x30") - amount := big.NewInt(10) - data, err := event.Inputs.Pack( - index, originNetwork, - originAddress, destinationAddress, amount) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{claimEventSignaturePreEtrog}, - Data: data, - } - return l, nil - }, - }, - { - name: "claimEventSignature appender", - eventSignature: claimEventSignature, - deploymentKind: NonSovereignChain, - logsCount: 1, - buildQuerierMockFunc: func() *BridgeQuerierMock { - querierMock := NewBridgeQuerierMock(t) - querierMock.EXPECT(). - GetBoundaryBlockForClaimType(mock.Anything, mock.Anything). - Return(0, db.ErrNotFound). - Once() - - return querierMock - }, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(claimEventSignature) - if err != nil { - return types.Log{}, err - } - - globalIndex := big.NewInt(5) - originNetwork := uint32(6) - originAddress := common.HexToAddress("0x20") - destinationAddress := common.HexToAddress("0x30") - amount := big.NewInt(10) - data, err := event.Inputs.Pack( - globalIndex, originNetwork, - originAddress, destinationAddress, amount) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{claimEventSignature}, - Data: data, - } - return l, nil - }, - }, - { - name: "claimEventSignature appender skipping due to boundary block", - eventSignature: claimEventSignature, - deploymentKind: SovereignChain, - logsCount: 0, - buildQuerierMockFunc: func() *BridgeQuerierMock { - querierMock := NewBridgeQuerierMock(t) - querierMock.EXPECT(). - GetBoundaryBlockForClaimType(mock.Anything, mock.Anything). - Return(10, nil). - Once() - - return querierMock - }, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(claimEventSignature) - if err != nil { - return types.Log{}, err - } - - globalIndex := big.NewInt(5) - originNetwork := uint32(6) - originAddress := common.HexToAddress("0x20") - destinationAddress := common.HexToAddress("0x30") - amount := big.NewInt(10) - data, err := event.Inputs.Pack( - globalIndex, originNetwork, - originAddress, destinationAddress, amount) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - BlockNumber: 12, - Topics: []common.Hash{claimEventSignature}, - Data: data, - } - return l, nil - }, - }, - { - name: "detailedClaimEventSignature appender", - eventSignature: detailedClaimEventSignature, - deploymentKind: SovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(detailedClaimEventSignature) - if err != nil { - return types.Log{}, err - } - - // indexed args - globalIndex := common.BigToHash(big.NewInt(5)) - destinationAddress := common.HexToHash(common.HexToAddress("0x30").Hex()) - - // non-indexed args - lerProof := [treetypes.DefaultHeight]common.Hash{} - rerProof := [treetypes.DefaultHeight]common.Hash{} - mainnetExitRoot := common.HexToHash("5ca1e") - rollupExitRoot := common.HexToHash("5ca1e1") - leafType := bridgetypes.LeafTypeAsset - originNet := uint32(6) - originAddress := common.HexToAddress("0x20") - destinationNet := uint32(7) - amount := big.NewInt(10) - metadata := []byte{} - data, err := event.Inputs.NonIndexed().Pack(lerProof, rerProof, mainnetExitRoot, rollupExitRoot, - leafType, originNet, originAddress, destinationNet, amount, metadata) - if err != nil { - return types.Log{}, err - } - - return types.Log{ - Topics: []common.Hash{ - detailedClaimEventSignature, - globalIndex, - destinationAddress, - }, - Data: data, - }, nil - }, - }, - { - name: "tokenMappingEventSignature appender", - eventSignature: tokenMappingEventSignature, - deploymentKind: NonSovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(tokenMappingEventSignature) - if err != nil { - return types.Log{}, err - } - - originNetwork := uint32(10) - originTokenAddress := common.HexToAddress("0x20") - wrappedTokenAddress := common.HexToAddress("0x30") - metadata := []byte{0x40} - data, err := event.Inputs.Pack( - originNetwork, originTokenAddress, - wrappedTokenAddress, metadata) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{tokenMappingEventSignature}, - Data: data, - } - return l, nil - }, - }, - { - name: "setSovereignTokenAddress appender", - eventSignature: setSovereignTokenEventSignature, - deploymentKind: SovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(setSovereignTokenEventSignature) - if err != nil { - return types.Log{}, err - } - - originNetwork := uint32(15) - originTokenAddress := common.HexToAddress("0x25") - sovereignTokenAddress := common.HexToAddress("0x35") - isNotMintable := true - data, err := event.Inputs.Pack( - originNetwork, originTokenAddress, - sovereignTokenAddress, isNotMintable) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{setSovereignTokenEventSignature}, - Data: data, - } - return l, nil - }, - }, - { - name: "legacyTokenMigration appender", - eventSignature: migrateLegacyTokenEventSignature, - deploymentKind: SovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(migrateLegacyTokenEventSignature) - if err != nil { - return types.Log{}, err - } - - senderAddr := common.HexToAddress("0x5") - legacyTokenAddr := common.HexToAddress("0x10") - updatedTokenAddr := common.HexToAddress("0x20") - amount := big.NewInt(150) - data, err := event.Inputs.Pack( - senderAddr, legacyTokenAddr, - updatedTokenAddr, amount) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{migrateLegacyTokenEventSignature}, - Data: data, - } - return l, nil - }, - }, - { - name: "removeLegacySovereignTokenAddress appender", - eventSignature: removeLegacySovereignTokenEventSignature, - deploymentKind: SovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(removeLegacySovereignTokenEventSignature) - if err != nil { - return types.Log{}, err - } - - sovereignTokenAddr := common.HexToAddress("0x5") - data, err := event.Inputs.Pack(sovereignTokenAddr) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{removeLegacySovereignTokenEventSignature}, - Data: data, - } - return l, nil - }, - }, - { - name: "unsetClaimEventSignature appender", - eventSignature: unsetClaimEventSignature, - deploymentKind: SovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(unsetClaimEventSignature) - if err != nil { - return types.Log{}, err - } - - unsetGlobalIndex := [32]byte{} - copy(unsetGlobalIndex[:], big.NewInt(12345).Bytes()) - newUnsetGlobalIndexHashChain := common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") - - data, err := event.Inputs.Pack(unsetGlobalIndex, newUnsetGlobalIndexHashChain) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{unsetClaimEventSignature}, - Data: data, - } - return l, nil - }, - }, - { - name: "setClaimEventSignature appender", - eventSignature: setClaimEventSignature, - deploymentKind: SovereignChain, - logsCount: 1, - logBuilder: func() (types.Log, error) { - event, err := bridgeL2Abi.EventByID(setClaimEventSignature) - if err != nil { - return types.Log{}, err - } - - globalIndexBytes := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") - data, err := event.Inputs.Pack(globalIndexBytes) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{setClaimEventSignature}, - Data: data, - } - return l, nil - }, - }, { name: "backwardLETSignature appender", eventSignature: backwardLETEventSignature, @@ -710,11 +391,7 @@ func TestBuildAppender(t *testing.T) { logger := logger.WithFields("module", "test") bridgeDeployment.kind = tt.deploymentKind - var querierMock *BridgeQuerierMock - if tt.buildQuerierMockFunc != nil { - querierMock = tt.buildQuerierMockFunc() - } - appenderMap, err := buildAppender(t.Context(), ethClient, querierMock, bridgeAddr, false, true, bridgeDeployment, logger) + appenderMap, err := buildAppender(t.Context(), ethClient, bridgeAddr, false, bridgeDeployment, logger) if tt.expectedErr == "" { require.NoError(t, err) require.NotNil(t, appenderMap) @@ -886,100 +563,6 @@ func TestFindCallWithOnlyUnrecognizedMethods(t *testing.T) { require.Contains(t, err.Error(), "not found") } -func TestTryDecodeClaimCalldata(t *testing.T) { - c := &Claim{} - logger := logger.WithFields("module", "test") - - // Short input should return false, error - found, err := c.tryDecodeClaimCalldata([]byte{0x01, 0x02, 0x03}, logger) - require.Error(t, err) - require.Contains(t, err.Error(), "input too short: 3 bytes") - require.False(t, found) - - // Unknown method ID should return false, nil (not error anymore) - input := make([]byte, methodIDLength) - copy(input, []byte{0xaa, 0xbb, 0xcc, 0xdd}) - found, err = c.tryDecodeClaimCalldata(input, logger) - require.NoError(t, err) // Should not return error anymore - require.False(t, found) - - // Test getProxiedTokensManager method ID (38b8fbbb) - getProxiedTokensManagerID := []byte{0x38, 0xb8, 0xfb, 0xbb} - found, err = c.tryDecodeClaimCalldata(getProxiedTokensManagerID, logger) - require.NoError(t, err) // Should not return error - require.False(t, found) // Should return false (not a claim method) - - // Valid method ID (simulate claimAssetEtrogMethodID) - copy(input, claimAssetEtrogMethodID) - // The rest of the input is not valid ABI, so it will error on unpack - found, err = c.tryDecodeClaimCalldata(input, logger) - require.Error(t, err) - require.False(t, found) -} - -func TestSetClaimCalldataFromRoot(t *testing.T) { - bridgeAddr := common.HexToAddress("0x10") - logger := logger.WithFields("module", "test") - - // Case 1: Root call successful, valid internal call - rootCall := &Call{ - To: common.HexToAddress("0x01"), - Err: nil, - Calls: []Call{ - { - To: bridgeAddr, - From: common.HexToAddress("0x20"), - Err: nil, - Input: append(claimAssetEtrogMethodID, []byte{0x00, 0x01, 0x02, 0x03}...), // not valid ABI, but triggers methodID match - }, - }, - } - - claim := &Claim{} - err := claim.setClaimCalldataFromRoot(rootCall, bridgeAddr, logger) - require.Error(t, err) - require.Contains(t, err.Error(), "length insufficient") - - // Case 2: Root call reverted - rootCall = &Call{ - To: bridgeAddr, - Err: strPtr("reverted"), - } - - claim = &Claim{} - err = claim.setClaimCalldataFromRoot(rootCall, bridgeAddr, logger) - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - - // Case 3: All internal calls reverted - rootCall = &Call{ - To: common.HexToAddress("0x01"), - Err: nil, - Calls: []Call{ - { - To: bridgeAddr, - Err: strPtr("reverted"), - }, - }, - } - - claim = &Claim{} - err = claim.setClaimCalldataFromRoot(rootCall, bridgeAddr, logger) - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - - // Case 4: No matching call - rootCall = &Call{ - To: common.HexToAddress("0x01"), - Err: nil, - Calls: []Call{}, - } - - claim = &Claim{} - err = claim.setClaimCalldataFromRoot(rootCall, bridgeAddr, logger) - require.Error(t, err) - require.Contains(t, err.Error(), "not found") -} func TestTxnSenderField(t *testing.T) { bridgeAddr := common.HexToAddress("0x10") @@ -989,11 +572,6 @@ func TestTxnSenderField(t *testing.T) { agglayerBridgeABI, err := agglayerbridge.AgglayerbridgeMetaData.GetAbi() require.NoError(t, err) - querierMock := NewBridgeQuerierMock(t) - querierMock.EXPECT(). - GetBoundaryBlockForClaimType(mock.Anything, mock.Anything). - Return(0, db.ErrNotFound). - Maybe() tests := []struct { name string @@ -1048,48 +626,6 @@ func TestTxnSenderField(t *testing.T) { return l, nil }, }, - { - name: "claimEventSignature with TxnSender", - eventSignature: claimEventSignature, - callFrame: Call{ - To: common.HexToAddress("0x01"), - From: expectedTxnSender, - Err: nil, - Calls: []Call{ - { - To: bridgeAddr, - From: common.HexToAddress("0x20"), - Err: nil, - Input: BridgeAssetMethodID, - }, - }, - }, - expectedTxnSender: expectedTxnSender, - logBuilder: func() (types.Log, error) { - event, err := agglayerBridgeABI.EventByID(claimEventSignature) - if err != nil { - return types.Log{}, err - } - - globalIndex := big.NewInt(5) - originNetwork := uint32(6) - originAddress := common.HexToAddress("0x20") - destinationAddress := common.HexToAddress("0x30") - amount := big.NewInt(10) - data, err := event.Inputs.Pack( - globalIndex, originNetwork, - originAddress, destinationAddress, amount) - if err != nil { - return types.Log{}, err - } - - l := types.Log{ - Topics: []common.Hash{claimEventSignature}, - Data: data, - } - return l, nil - }, - }, } for _, tt := range tests { @@ -1136,7 +672,7 @@ func TestTxnSenderField(t *testing.T) { kind: NonSovereignChain, agglayerBridge: agglayerBridge, } - appenderMap, err := buildAppender(t.Context(), ethClient, querierMock, bridgeAddr, false, true, bridgeDeployment, logger) + appenderMap, err := buildAppender(t.Context(), ethClient, bridgeAddr, false, bridgeDeployment, logger) require.NoError(t, err) require.NotNil(t, appenderMap) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index a0258b9e9..0d0d6c703 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -14,7 +14,6 @@ import ( bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync/migrations" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/db/compatibility" @@ -484,8 +483,6 @@ type processor struct { dbQueryTimeout time.Duration bridgeSubscriber aggkitcommon.PubSub[uint64] initialLER common.Hash - claimEventsProcessor claimsynctypes.EmbeddedProcessor - compatibility.CompatibilityDataStorager[BridgeSyncRuntimeData] } @@ -506,18 +503,16 @@ func newProcessor( syncerID string, logger *log.Logger, dbQueryTimeout time.Duration, - claimEventsProcessor claimsynctypes.EmbeddedProcessor, ) (*processor, error) { exitTree := tree.NewAppendOnlyTree(database, "") return &processor{ - syncerID: syncerID, - db: database, - exitTree: exitTree, - log: logger, - dbQueryTimeout: dbQueryTimeout, - bridgeSubscriber: aggkitcommon.NewGenericSubscriber[uint64](), - claimEventsProcessor: claimEventsProcessor, + syncerID: syncerID, + db: database, + exitTree: exitTree, + log: logger, + dbQueryTimeout: dbQueryTimeout, + bridgeSubscriber: aggkitcommon.NewGenericSubscriber[uint64](), CompatibilityDataStorager: compatibility.NewKeyValueToCompatibilityStorage[BridgeSyncRuntimeData]( db.NewKeyValueStorage(database), syncerID, @@ -648,25 +643,6 @@ func (p *processor) buildBridgesFilterClause(depositCount *uint64, networkIDs [] return "", nil } -// GetBoundaryBlockForClaimType returns the max (latest) block number for a given claim type -func (p *processor) GetBoundaryBlockForClaimType(ctx context.Context, claimType claimsynctypes.ClaimType) (uint64, error) { - dbCtx, cancel := p.withDatabaseTimeout(ctx) - defer cancel() - - query := `SELECT MAX(block_num) FROM claim WHERE type = $1;` - var blockNumber *uint64 - if err := p.db.QueryRowContext(dbCtx, query, claimType).Scan(&blockNumber); err != nil { - return 0, err - } - - if blockNumber == nil { - p.log.Debugf("no boundary block found for claim type %s", claimType) - return 0, db.ErrNotFound - } - - return *blockNumber, nil -} - // buildGlobalIndexFilterClause builds a WHERE clause for filtering by global_index func buildGlobalIndexFilterClause(globalIndex *big.Int) string { if globalIndex != nil { @@ -1039,17 +1015,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { for _, e := range block.Events { event, ok := e.(Event) if !ok { - // Try to process with embedded claimProcessor - if p.claimEventsProcessor != nil { - if err := p.claimEventsProcessor.ProcessBlockWithTx(dbCtx, tx, block, e); err != nil { - p.log.Errorf("ProcessBlock: failed to process event type %T using embedded claimProcessor in block %d: %v", - e, - block.Num, err) - return err - } - // It have been processed by embedded claimProcessor, do next item in loop - continue - } err = fmt.Errorf("ProcessBlock: failed to convert event %T to Event type in block %d", e, block.Num) p.log.Errorf(err.Error()) return err diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index ae4e1e7bf..e8210d640 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -10,11 +10,8 @@ import ( "os" "path" "path/filepath" - "reflect" - "regexp" "slices" "sort" - "strings" "testing" "time" @@ -38,6 +35,15 @@ import ( const dbQueryTimeout = 30 * time.Second +// newTestProcessor is a test helper that creates a processor from a file path. +func newTestProcessor(dbPath string, syncerID string, logger *log.Logger, dbQueryTimeout time.Duration) (*processor, error) { + database, err := newSqliteDB(dbPath) + if err != nil { + return nil, err + } + return newProcessor(database, syncerID, logger, dbQueryTimeout) +} + func TestBigIntString(t *testing.T) { globalIndex := GenerateGlobalIndex(true, 0, 1093) fmt.Println(globalIndex.String()) @@ -97,7 +103,7 @@ func TestBigIntString(t *testing.T) { func TestProcessor(t *testing.T) { path := path.Join(t.TempDir(), "bridgeSyncerProcessor.db") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(path, "bridge-syncer", logger, dbQueryTimeout) require.NoError(t, err) actions := []processAction{ // processed: ~ @@ -134,15 +140,6 @@ func TestProcessor(t *testing.T) { expectedLastProcessedBlock: 1, expectedErr: nil, }, - &getClaims{ - p: p, - description: "after block1: range 1, 1", - ctx: context.Background(), - fromBlock: 1, - toBlock: 1, - expectedClaims: eventsToClaims(block1.Events), - expectedErr: nil, - }, &getBridges{ p: p, description: "after block1: range 1, 1", @@ -179,27 +176,6 @@ func TestProcessor(t *testing.T) { expectedLastProcessedBlock: 3, expectedErr: nil, }, - &getClaims{ - p: p, - description: "after block3: range 2, 2", - ctx: context.Background(), - fromBlock: 2, - toBlock: 2, - expectedClaims: []Claim{}, - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block3: range 1, 3", - ctx: context.Background(), - fromBlock: 1, - toBlock: 3, - expectedClaims: append( - eventsToClaims(block1.Events), - eventsToClaims(block3.Events)..., - ), - expectedErr: nil, - }, &getBridges{ p: p, description: "after block3: range 2, 2", @@ -276,56 +252,6 @@ func TestProcessor(t *testing.T) { expectedLastProcessedBlock: 5, expectedErr: nil, }, - &getClaims{ - p: p, - description: "after block5: range 1, 3", - ctx: context.Background(), - fromBlock: 1, - toBlock: 3, - expectedClaims: append( - eventsToClaims(block1.Events), - eventsToClaims(block3.Events)..., - ), - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block5: range 4, 5", - ctx: context.Background(), - fromBlock: 4, - toBlock: 5, - expectedClaims: append( - eventsToClaims(block4.Events), - eventsToClaims(block5.Events)..., - ), - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block5: range 0, 5", - ctx: context.Background(), - fromBlock: 0, - toBlock: 5, - expectedClaims: slices.Concat( - eventsToClaims(block1.Events), - eventsToClaims(block3.Events), - eventsToClaims(block4.Events), - eventsToClaims(block5.Events), - ), - expectedErr: nil, - }, - &getTotalRecordsAction{ - p: p, - description: "get number of claims after block5", - tableName: claimTableName, - expectedRecordsNum: len( - slices.Concat( - eventsToClaims(block1.Events), - eventsToClaims(block3.Events), - eventsToClaims(block4.Events), - eventsToClaims(block5.Events), - )), - }, &reorgAction{ p: p, description: "reorg the last block", @@ -366,17 +292,6 @@ var ( Metadata: common.Hex2Bytes("1"), DepositCount: 0, }}, - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(1), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("1"), - DestinationAddress: common.HexToAddress("1"), - Amount: big.NewInt(1), - MainnetExitRoot: common.Hash{}, - Type: DetailedClaimEvent, - }}, Event{TokenMapping: &TokenMapping{ BlockNum: 1, BlockPos: 2, @@ -451,26 +366,6 @@ var ( block5 = sync.Block{ Num: 5, Events: []any{ - Event{Claim: &Claim{ - BlockNum: 5, - BlockPos: 0, - GlobalIndex: big.NewInt(4), - OriginNetwork: 4, - OriginAddress: common.HexToAddress("04"), - DestinationAddress: common.HexToAddress("04"), - Amount: big.NewInt(4), - MainnetExitRoot: common.Hash{}, - }}, - Event{Claim: &Claim{ - BlockNum: 5, - BlockPos: 1, - GlobalIndex: big.NewInt(5), - OriginNetwork: 5, - OriginAddress: common.HexToAddress("05"), - DestinationAddress: common.HexToAddress("05"), - Amount: big.NewInt(5), - MainnetExitRoot: common.Hash{}, - }}, Event{LegacyTokenMigration: &LegacyTokenMigration{ BlockNum: 5, BlockPos: 2, @@ -504,33 +399,6 @@ type processAction interface { execute(t *testing.T) } -// GetClaims - -type getClaims struct { - p *processor - description string - ctx context.Context - fromBlock uint64 - toBlock uint64 - expectedClaims []Claim - expectedErr error -} - -func (a *getClaims) method() string { - return "GetClaims" -} - -func (a *getClaims) desc() string { - return a.description -} - -func (a *getClaims) execute(t *testing.T) { - t.Helper() - actualEvents, actualErr := a.p.GetClaims(a.ctx, a.fromBlock, a.toBlock) - require.Equal(t, a.expectedErr, actualErr) - require.Equal(t, a.expectedClaims, actualEvents) -} - // GetBridges type getBridges struct { @@ -579,7 +447,7 @@ func (a *getLastProcessedBlockAction) desc() string { func (a *getLastProcessedBlockAction) execute(t *testing.T) { t.Helper() - actualLastProcessedBlock, actualErr := a.p.GetLastProcessedBlock(a.ctx) + actualLastProcessedBlock, _, actualErr := a.p.GetLastProcessedBlock(a.ctx) require.Equal(t, a.expectedLastProcessedBlock, actualLastProcessedBlock) require.Equal(t, a.expectedErr, actualErr) } @@ -671,19 +539,6 @@ func eventsToBridges(events []any) []Bridge { return bridges } -func eventsToClaims(events []any) []Claim { - claims := []Claim{} - for _, event := range events { - e, ok := event.(Event) - if !ok { - panic("should be ok") - } - if e.Claim != nil { - claims = append(claims, *e.Claim) - } - } - return claims -} func TestHashBridge(t *testing.T) { data, err := os.ReadFile("../tree/testvectors/leaf-vectors.json") @@ -865,49 +720,6 @@ func TestDecodeGlobalIndex(t *testing.T) { } } -func TestInsertAndGetClaim(t *testing.T) { - path := path.Join(t.TempDir(), "TestInsertAndGetClaim.sqlite") - err := migrations.RunMigrations(path) - require.NoError(t, err) - logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "foo", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - tx, err := p.db.BeginTx(context.Background(), nil) - require.NoError(t, err) - - // insert test claim - testClaim := Claim{ - BlockNum: 1, - BlockPos: 0, - GlobalIndex: GenerateGlobalIndexForNetworkID(0, 1093), - OriginNetwork: 11, - OriginAddress: common.HexToAddress("0x11"), - DestinationAddress: common.HexToAddress("0x11"), - Amount: big.NewInt(11), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - GlobalExitRoot: common.Hash{}, - DestinationNetwork: 12, - Metadata: []byte("0x11"), - IsMessage: false, - Type: ClaimEvent, - } - - _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, testClaim.BlockNum, fmt.Sprintf("0x%x", testClaim.BlockNum)) - require.NoError(t, err) - require.NoError(t, meddler.Insert(tx, "claim", &testClaim)) - - require.NoError(t, tx.Commit()) - - // get test claim - claims, err := p.GetClaims(context.Background(), 1, 1) - require.NoError(t, err) - require.Len(t, claims, 1) - require.Equal(t, testClaim, claims[0]) -} func TestGetBridgesPublished(t *testing.T) { t.Parallel() @@ -956,7 +768,7 @@ func TestGetBridgesPublished(t *testing.T) { path := path.Join(t.TempDir(), fmt.Sprintf("bridgesyncTestGetBridgesPublished_%s.sqlite", tc.name)) require.NoError(t, migrations.RunMigrations(path)) logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "foo", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(path, "foo", logger, dbQueryTimeout) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) @@ -989,7 +801,7 @@ func TestGetBridgesPublished(t *testing.T) { func TestProcessBlockInvalidIndex(t *testing.T) { path := path.Join(t.TempDir(), "aggsenderTestProcessor.sqlite") logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "foo", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(path, "foo", logger, dbQueryTimeout) require.NoError(t, err) err = p.ProcessBlock(context.Background(), sync.Block{ Num: 0, @@ -1041,7 +853,7 @@ func TestGetBridgesPaged(t *testing.T) { path := path.Join(t.TempDir(), "bridgesyncGetBridgesPaged.sqlite") require.NoError(t, migrations.RunMigrations(path)) logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(path, "bridge-syncer", logger, dbQueryTimeout) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) @@ -1236,156 +1048,6 @@ func TestGetBridgesPaged(t *testing.T) { } } -func TestGetClaimsPaged(t *testing.T) { - t.Parallel() - fromBlock := uint64(1) - toBlock := uint64(10) - - // Compute uint256 max: 2^256 - 1 - uint256Max := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil), big.NewInt(1)) - // Compute uint64 max: 2^64 - 1 = 18446744073709551615 - uint64Max := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(64), nil), big.NewInt(1)) - num1 := new(big.Int) - num1.SetString("18446744073709551617", 10) - num2 := new(big.Int) - num2.SetString("18446744073709551618", 10) - - claims := []*Claim{ - {BlockNum: 1, GlobalIndex: num2, Amount: big.NewInt(1), OriginNetwork: 1, MainnetExitRoot: common.Hash{}}, - {BlockNum: 2, GlobalIndex: big.NewInt(2), Amount: big.NewInt(1), OriginNetwork: 1, MainnetExitRoot: common.Hash{}}, - {BlockNum: 3, GlobalIndex: uint64Max, Amount: big.NewInt(1), OriginNetwork: 2, MainnetExitRoot: common.Hash{}}, - {BlockNum: 4, GlobalIndex: num1, Amount: big.NewInt(1), OriginNetwork: 2, MainnetExitRoot: common.Hash{}}, - {BlockNum: 5, GlobalIndex: big.NewInt(5), Amount: big.NewInt(1), OriginNetwork: 3, MainnetExitRoot: common.Hash{}}, - {BlockNum: 6, GlobalIndex: uint256Max, Amount: big.NewInt(1), OriginNetwork: 4, MainnetExitRoot: common.Hash{}}, - } - - path := path.Join(t.TempDir(), "bridgesyncGetClaimsPaged.sqlite") - require.NoError(t, migrations.RunMigrations(path)) - logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - tx, err := p.db.BeginTx(context.Background(), nil) - require.NoError(t, err) - - for i := fromBlock; i <= toBlock; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } - - for _, claim := range claims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } - require.NoError(t, tx.Commit()) - - testCases := []struct { - name string - pageSize uint32 - page uint32 - networkIDs []uint32 - globalIndex *big.Int - expectedCount int - expectedClaims []*Claim - expectedError string - }{ - { - name: "pagination: page 2, size 1", - pageSize: 1, - page: 2, - expectedCount: len(claims), - expectedClaims: []*Claim{claims[4]}, - expectedError: "", - }, - { - name: "all results on the same page", - pageSize: 20, - page: 1, - expectedCount: len(claims), - expectedClaims: []*Claim{claims[5], claims[4], claims[3], claims[2], claims[1], claims[0]}, - expectedError: "", - }, - { - name: "pagination: page 2, size 3", - pageSize: 3, - page: 2, - expectedCount: len(claims), - expectedClaims: []*Claim{claims[2], claims[1], claims[0]}, - expectedError: "", - }, - { - name: "invalid page size", - pageSize: 3, - page: 4, - expectedCount: 0, - expectedClaims: []*Claim{}, - expectedError: "invalid page number for given page size and total number of claims", - }, - { - name: "filter by network ids (all results within the same page)", - pageSize: 3, - page: 1, - networkIDs: []uint32{claims[0].OriginNetwork, claims[4].OriginNetwork}, - expectedCount: 3, - expectedClaims: []*Claim{claims[4], claims[1], claims[0]}, - expectedError: "", - }, - { - name: "filter by network ids (paginated results)", - pageSize: 1, - page: 2, - networkIDs: []uint32{claims[0].OriginNetwork, claims[4].OriginNetwork}, - expectedCount: 3, - expectedClaims: []*Claim{claims[1]}, - expectedError: "", - }, - { - name: "filter by network ids (all results within the same page) and from address", - pageSize: 3, - page: 1, - networkIDs: []uint32{claims[0].OriginNetwork, claims[4].OriginNetwork}, - expectedCount: 3, - expectedClaims: []*Claim{claims[4], claims[1], claims[0]}, - expectedError: "", - }, - { - name: "filter by global index", - pageSize: 3, - page: 1, - globalIndex: big.NewInt(5), - expectedCount: 1, - expectedClaims: []*Claim{claims[4]}, - expectedError: "", - }, - { - name: "filter by network ids and global index", - pageSize: 3, - page: 1, - networkIDs: []uint32{2, 3, 4}, - globalIndex: uint64Max, - expectedCount: 1, - expectedClaims: []*Claim{claims[2]}, - expectedError: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - claims, count, err := p.GetClaimsPaged(ctx, tc.page, tc.pageSize, - tc.networkIDs, tc.globalIndex) - - if tc.expectedError != "" { - require.ErrorContains(t, err, tc.expectedError) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedClaims, claims) - require.Equal(t, tc.expectedCount, count) - } - }) - } -} func TestProcessor_GetTokenMappings(t *testing.T) { t.Parallel() @@ -1397,7 +1059,7 @@ func TestProcessor_GetTokenMappings(t *testing.T) { require.NoError(t, err) logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(path, "bridge-syncer", logger, dbQueryTimeout) require.NoError(t, err) allTokenMappings := make([]*TokenMapping, 0, tokenMappingsCount) @@ -1496,7 +1158,7 @@ func TestProcessor_GetLegacyTokenMigrations(t *testing.T) { require.NoError(t, err) logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(path, "bridge-syncer", logger, dbQueryTimeout) require.NoError(t, err) const ( @@ -1632,7 +1294,7 @@ func TestDecodePreEtrogCalldata_Valid(t *testing.T) { claimAssetData, err := method.Inputs.Unpack(claimAssetInput[4:]) require.NoError(t, err) - isFound, err := actualClaim.decodePreEtrogCalldata(claimAssetData) + isFound, err := actualClaim.DecodePreEtrogCalldata(claimAssetData) require.NoError(t, err) require.True(t, isFound) @@ -1829,7 +1491,7 @@ func TestDecodePreEtrogCalldata(t *testing.T) { Metadata: nil, } - match, err := claim.decodePreEtrogCalldata(tt.data) + match, err := claim.DecodePreEtrogCalldata(tt.data) if tt.expectError { require.Error(t, err) @@ -2026,7 +1688,7 @@ func TestDecodeEtrogCalldata(t *testing.T) { t.Run(tt.name, func(t *testing.T) { claim := &Claim{GlobalIndex: globalIndex} - isDecoded, err := claim.decodeEtrogCalldata(tt.data) + isDecoded, err := claim.DecodeEtrogCalldata(tt.data) if tt.expectError { require.Error(t, err) } else { @@ -2041,7 +1703,7 @@ func TestDecodeEtrogCalldata(t *testing.T) { func TestQueryBlockRangeOrdering(t *testing.T) { path := path.Join(t.TempDir(), "bridgeSyncerProcessorOrdering.db") logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(path, "bridge-syncer", logger, dbQueryTimeout) require.NoError(t, err) // Create test data with events in different blocks and positions @@ -2259,3218 +1921,274 @@ func TestBridgeSyncRuntimeData_IsCompatible(t *testing.T) { } } -func TestGetClaimByGlobalIndex(t *testing.T) { - path := path.Join(t.TempDir(), "bridgesyncTestGetClaimByGlobalIndex.sqlite") - logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - ctx := context.Background() +func intPtr(i int) *int { + return &i +} - // Test case 1: Claim not found - t.Run("claim not found", func(t *testing.T) { - nonExistentGlobalIndex := big.NewInt(999999) - claims, err := p.GetClaimsByGlobalIndex(ctx, nonExistentGlobalIndex) - require.NoError(t, err) - require.Empty(t, claims) - }) +func uint64Ptr(i uint64) *uint64 { + return &i +} - // Test case 2: Insert claims and retrieve them - globalIndexToTest := GenerateGlobalIndex(true, 0, 2000) - testClaims := []*Claim{ - { - BlockNum: 1, - BlockPos: 0, - GlobalIndex: big.NewInt(1000), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x11"), - DestinationAddress: common.HexToAddress("0x22"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.HexToHash("0xmainnet"), - RollupExitRoot: common.HexToHash("0xrollup"), - GlobalExitRoot: common.HexToHash("0xglobal"), - DestinationNetwork: 2, - Metadata: []byte("test metadata 1"), - IsMessage: false, - }, - { - BlockNum: 2, - BlockPos: 1, - GlobalIndex: globalIndexToTest, - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x33"), - DestinationAddress: common.HexToAddress("0x44"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.HexToHash("0xmainnet2"), - RollupExitRoot: common.HexToHash("0xrollup2"), - GlobalExitRoot: common.HexToHash("0xglobal2"), - DestinationNetwork: 4, - Metadata: []byte("test metadata 2"), - IsMessage: true, - }, - { - BlockNum: 3, - BlockPos: 1, - GlobalIndex: globalIndexToTest, // same global index as previous claim - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x33"), - DestinationAddress: common.HexToAddress("0x55"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.HexToHash("0xmainnet2"), - RollupExitRoot: common.HexToHash("0xrollup2"), - GlobalExitRoot: common.HexToHash("0xglobal2"), - DestinationNetwork: 4, - Metadata: []byte("test metadata 2"), - IsMessage: true, - }, - } +func boolPtr(b bool) *bool { + return &b +} - // Insert test claims - tx, err := p.db.BeginTx(ctx, nil) - require.NoError(t, err) +func TestProcessor_ErrorPathLogging(t *testing.T) { + t.Parallel() - // Insert blocks first - for _, claim := range testClaims { - _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, - claim.BlockNum, fmt.Sprintf("0x%x", claim.BlockNum)) - require.NoError(t, err) - } + t.Run("GetBridgesPaged error paths", func(t *testing.T) { + t.Parallel() + p := createTestProcessor(t, "GetBridgesPagedErrorPaths") - // Insert claims - for _, claim := range testClaims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } + testBlock := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{Bridge: createTestBridge(1, 0)}, + }, + } + require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - require.NoError(t, tx.Commit()) + // Test invalid page number (page 10 with only 1 record and page size 5) + _, _, err := p.GetBridgesPaged(context.Background(), 10, 5, nil, nil, "") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid page number") - // Test case 3: Retrieve existing claims by global index - t.Run("retrieve existing claims", func(t *testing.T) { - claims, err := p.GetClaimsByGlobalIndex(ctx, globalIndexToTest) + // Test successful case with valid page + bridges, count, err := p.GetBridgesPaged(context.Background(), 1, 5, nil, nil, "") require.NoError(t, err) - require.Len(t, claims, 1) - // no unset claim, so the claims got compacted - require.Equal(t, testClaims[1].BlockNum, claims[0].BlockNum) - require.Equal(t, testClaims[1].BlockPos, claims[0].BlockPos) - require.Equal(t, testClaims[1].GlobalIndex, claims[0].GlobalIndex) - require.Equal(t, testClaims[1].OriginAddress, claims[0].OriginAddress) - require.Equal(t, testClaims[1].DestinationAddress, claims[0].DestinationAddress) - require.Equal(t, testClaims[1].Amount, claims[0].Amount) - require.Equal(t, testClaims[1].Metadata, claims[0].Metadata) - require.Equal(t, testClaims[1].IsMessage, claims[0].IsMessage) - require.Equal(t, testClaims[2].MainnetExitRoot, claims[0].MainnetExitRoot) - require.Equal(t, testClaims[2].RollupExitRoot, claims[0].RollupExitRoot) - require.Equal(t, testClaims[2].GlobalExitRoot, claims[0].GlobalExitRoot) - require.Equal(t, testClaims[2].ProofLocalExitRoot, claims[0].ProofLocalExitRoot) - require.Equal(t, testClaims[2].ProofRollupExitRoot, claims[0].ProofRollupExitRoot) + require.Len(t, bridges, 1) + require.Equal(t, 1, count) }) - // Test case 4: Test with very large global index - t.Run("large global index", func(t *testing.T) { - // Create a very large global index - largeGlobalIndex := new(big.Int) - largeGlobalIndex.SetString("340282366920938463463374607431768211455", 10) // 2^128 - 1 - - largeClaim := &Claim{ - BlockNum: 4, - BlockPos: 0, - GlobalIndex: largeGlobalIndex, - OriginNetwork: 7, - OriginAddress: common.HexToAddress("0x77"), - DestinationAddress: common.HexToAddress("0x88"), - Amount: big.NewInt(400), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.HexToHash("0xmainnet4"), - RollupExitRoot: common.HexToHash("0xrollup4"), - GlobalExitRoot: common.HexToHash("0xglobal4"), - DestinationNetwork: 8, - Metadata: []byte("large index test"), - IsMessage: false, - } - // Insert block and claim - tx, err := p.db.BeginTx(ctx, nil) - require.NoError(t, err) + t.Run("GetLegacyTokenMigrations error paths", func(t *testing.T) { + t.Parallel() + p := createTestProcessor(t, "GetLegacyTokenMigrationsErrorPaths") - _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, - largeClaim.BlockNum, fmt.Sprintf("0x%x", largeClaim.BlockNum)) - require.NoError(t, err) + testBlock := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{LegacyTokenMigration: &LegacyTokenMigration{ + BlockNum: 1, + BlockPos: 0, + BlockTimestamp: 1234567890, + TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), + Sender: common.HexToAddress("0x1234567890123456789012345678901234567890"), + LegacyTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + UpdatedTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + Amount: big.NewInt(1000000000000000000), + }}, + }, + } + require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - require.NoError(t, meddler.Insert(tx, "claim", largeClaim)) - require.NoError(t, tx.Commit()) + // Test invalid page number (page 10 with only 1 record and page size 5) + _, _, err := p.GetLegacyTokenMigrations(context.Background(), 10, 5) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid page number") - // Retrieve the claim - retrievedClaims, err := p.GetClaimsByGlobalIndex(ctx, largeGlobalIndex) + // Test successful case with valid page + migrations, count, err := p.GetLegacyTokenMigrations(context.Background(), 1, 5) require.NoError(t, err) - require.Len(t, retrievedClaims, 1) // Should return one claim - require.Equal(t, *largeClaim, retrievedClaims[0]) + require.Len(t, migrations, 1) + require.Equal(t, 1, count) }) - // Test case 5: Test with zero global index - t.Run("zero global index", func(t *testing.T) { - zeroGlobalIndex := big.NewInt(0) - - zeroClaim := &Claim{ - BlockNum: 5, - BlockPos: 0, - GlobalIndex: zeroGlobalIndex, - OriginNetwork: 9, - OriginAddress: common.HexToAddress("0x99"), - DestinationAddress: common.HexToAddress("0xaa"), - Amount: big.NewInt(0), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - GlobalExitRoot: common.Hash{}, - DestinationNetwork: 10, - Metadata: []byte{}, - IsMessage: true, - } - - // Insert block and claim - tx, err := p.db.BeginTx(ctx, nil) - require.NoError(t, err) + t.Run("GetTokenMappings error paths", func(t *testing.T) { + t.Parallel() + p := createTestProcessor(t, "GetTokenMappingsErrorPaths") - _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, - zeroClaim.BlockNum, fmt.Sprintf("0x%x", zeroClaim.BlockNum)) - require.NoError(t, err) + testBlock := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{TokenMapping: createTestTokenMapping(1, 0)}, + }, + } + require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - require.NoError(t, meddler.Insert(tx, "claim", zeroClaim)) - require.NoError(t, tx.Commit()) + // Test invalid page number (page 10 with only 1 record and page size 5) + _, _, err := p.GetTokenMappings(context.Background(), 10, 5, "") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid page number") - // Retrieve the claim - retrievedClaims, err := p.GetClaimsByGlobalIndex(ctx, zeroGlobalIndex) + // Test successful case with valid page + mappings, count, err := p.GetTokenMappings(context.Background(), 1, 5, "") require.NoError(t, err) - require.Len(t, retrievedClaims, 1) // Should return one claim - require.Equal(t, *zeroClaim, retrievedClaims[0]) - }) - - // Test case 6: Test with nil global index (should handle gracefully) - t.Run("nil global index", func(t *testing.T) { - claims, err := p.GetClaimsByGlobalIndex(ctx, nil) - require.ErrorContains(t, err, "global index parameter cannot be nil") - require.Empty(t, claims) - }) - - // Test case 7: db returns error - t.Run("db error", func(t *testing.T) { - p.db.Close() // Close the processor's DB to simulate an error - - // Attempt to retrieve claims with the invalid processor - claims, err := p.GetClaimsByGlobalIndex(ctx, globalIndexToTest) - require.Error(t, err) - require.Empty(t, claims) - }) -} - -// TestGetClaimsByGlobalIndex_Compact tests the compaction behavior of GetClaimsByGlobalIndex -// It mirrors the test cases from TestGetClaims_Compact to ensure consistent behavior -// -//nolint:dupl -func TestGetClaimsByGlobalIndex_Compact(t *testing.T) { - logger := log.WithFields("module", "bridge-syncer") - ctx := context.Background() - - // Define test claims used across test cases - oldProof := types.Proof{} - oldProof[0] = common.HexToHash("0x01") - - testCases := []struct { - name string - globalIndex *big.Int - setupBlocks func() []sync.Block - expectedCount int - validateResults func(t *testing.T, claims []Claim) - }{ - { - name: "Case 1: don't compact if unset_claim exists for global_index", - globalIndex: big.NewInt(100), - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("original_metadata"), - IsMessage: false, - BlockTimestamp: 1000, - }}, - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{UnsetClaim: &UnsetClaim{ - GlobalIndex: big.NewInt(100), - BlockNum: 2, - BlockPos: 0, - TxHash: common.Hash{}, - }}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0x999"), - DestinationAddress: common.HexToAddress("0x888"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("newest_metadata"), - IsMessage: true, - BlockTimestamp: 3000, - }}, - }, - }, - } - }, - expectedCount: 2, // Should return all claims without compacting because unset_claim exists - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - require.Len(t, claims, 2, "should not compact when unset claim exists") - // Claims should be ordered by block_num ASC - require.Equal(t, uint64(1), claims[0].BlockNum) - require.Equal(t, []byte("original_metadata"), claims[0].Metadata) - require.Equal(t, uint64(3), claims[1].BlockNum) - require.Equal(t, []byte("newest_metadata"), claims[1].Metadata) - }, - }, - { - name: "Case 2: compact if no unset_claim exists", - globalIndex: big.NewInt(200), - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("original_metadata"), - IsMessage: false, - BlockTimestamp: 1000, - }}, - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 99, - OriginAddress: common.HexToAddress("0xfff"), - DestinationAddress: common.HexToAddress("0xeee"), - Amount: big.NewInt(999), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 88, - Metadata: []byte("middle_metadata"), - IsMessage: true, - BlockTimestamp: 2000, - }}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0x999"), - DestinationAddress: common.HexToAddress("0x888"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("newest_metadata"), - IsMessage: true, - BlockTimestamp: 3000, - }}, - }, - }, - } - }, - expectedCount: 1, // Should return 1 compacted claim - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - require.Len(t, claims, 1, "should compact when no unset claim exists") - claim := claims[0] - require.Equal(t, big.NewInt(200), claim.GlobalIndex) - // Metadata from oldest (block 1) - require.Equal(t, uint64(1), claim.BlockNum, "should preserve oldest block") - require.Equal(t, uint64(0), claim.BlockPos, "should preserve oldest position") - require.Equal(t, []byte("original_metadata"), claim.Metadata, "should preserve oldest metadata") - require.Equal(t, big.NewInt(100), claim.Amount, "should preserve oldest amount") - require.Equal(t, uint32(1), claim.OriginNetwork, "should preserve oldest origin network") - // Proofs from newest (block 3) - require.Equal(t, common.HexToHash("0x3a"), claim.ProofLocalExitRoot[0], "should use newest proof") - require.Equal(t, common.HexToHash("0x3c"), claim.MainnetExitRoot, "should use newest MainnetExitRoot") - require.Equal(t, common.HexToHash("0x3d"), claim.RollupExitRoot, "should use newest RollupExitRoot") - require.Equal(t, common.HexToHash("0x3e"), claim.GlobalExitRoot, "should use newest GlobalExitRoot") - }, - }, - { - name: "Single claim - no compaction needed", - globalIndex: big.NewInt(300), - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(300), - OriginNetwork: 5, - OriginAddress: common.HexToAddress("0x555"), - DestinationAddress: common.HexToAddress("0x666"), - Amount: big.NewInt(500), - ProofLocalExitRoot: oldProof, - ProofRollupExitRoot: oldProof, - MainnetExitRoot: common.HexToHash("0xaaa"), - RollupExitRoot: common.HexToHash("0xbbb"), - GlobalExitRoot: common.HexToHash("0xccc"), - DestinationNetwork: 6, - Metadata: []byte("single_claim"), - IsMessage: false, - BlockTimestamp: 1000, - }}, - }, - }, - } - }, - expectedCount: 1, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - require.Len(t, claims, 1) - require.Equal(t, big.NewInt(300), claims[0].GlobalIndex) - require.Equal(t, []byte("single_claim"), claims[0].Metadata) - }, - }, - { - name: "Non-existent global index", - globalIndex: big.NewInt(999999), - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(400), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: oldProof, - ProofRollupExitRoot: oldProof, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("different_index"), - IsMessage: false, - BlockTimestamp: 1000, - }}, - }, - }, - } - }, - expectedCount: 0, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - require.Empty(t, claims, "should return empty for non-existent global index") - }, - }, - { - name: "Multiple claims same block - compact using position", - globalIndex: big.NewInt(500), - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(500), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("pos_0_metadata"), - IsMessage: false, - BlockTimestamp: 1000, - }}, - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 1, - TxHash: common.HexToHash("0x112"), - GlobalIndex: big.NewInt(500), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 2, - Metadata: []byte("pos_1_metadata"), - IsMessage: false, - BlockTimestamp: 1000, - }}, - }, - }, - } - }, - expectedCount: 1, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - require.Len(t, claims, 1, "should compact multiple claims in same block") - claim := claims[0] - require.Equal(t, uint64(1), claim.BlockNum) - require.Equal(t, uint64(0), claim.BlockPos, "should use oldest position") - require.Equal(t, []byte("pos_0_metadata"), claim.Metadata, "should use oldest metadata") - require.Equal(t, common.HexToHash("0x2a"), claim.ProofLocalExitRoot[0], "should use newest proof") - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create a fresh database for each test case - dbPath := filepath.Join(t.TempDir(), "testcase.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Setup blocks - blocks := tc.setupBlocks() - for _, block := range blocks { - require.NoError(t, testP.ProcessBlock(ctx, block)) - } - - // Execute test - claims, err := testP.GetClaimsByGlobalIndex(ctx, tc.globalIndex) - require.NoError(t, err) - - // Validate results - require.Len(t, claims, tc.expectedCount) - if tc.validateResults != nil { - tc.validateResults(t, claims) - } - }) - } -} - -func intPtr(i int) *int { - return &i -} - -func uint64Ptr(i uint64) *uint64 { - return &i -} - -func boolPtr(b bool) *bool { - return &b -} - -func TestProcessor_ErrorPathLogging(t *testing.T) { - t.Parallel() - - t.Run("GetBridgesPaged error paths", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "GetBridgesPagedErrorPaths") - - testBlock := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Bridge: createTestBridge(1, 0)}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - - // Test invalid page number (page 10 with only 1 record and page size 5) - _, _, err := p.GetBridgesPaged(context.Background(), 10, 5, nil, nil, "") - require.Error(t, err) - require.Contains(t, err.Error(), "invalid page number") - - // Test successful case with valid page - bridges, count, err := p.GetBridgesPaged(context.Background(), 1, 5, nil, nil, "") - require.NoError(t, err) - require.Len(t, bridges, 1) - require.Equal(t, 1, count) - }) - - t.Run("GetClaimsPaged error paths", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "GetClaimsPagedErrorPaths") - - testBlock := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 0, - BlockTimestamp: 1234567890, - TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), - GlobalIndex: big.NewInt(1000000000000000000), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - DestinationAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Amount: big.NewInt(1000000000000000000), - ProofLocalExitRoot: [common.HashLength]common.Hash{}, - ProofRollupExitRoot: [common.HashLength]common.Hash{}, - MainnetExitRoot: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), - RollupExitRoot: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), - GlobalExitRoot: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), - DestinationNetwork: 1, - Metadata: []byte{}, - IsMessage: false, - }}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - - // Test invalid page number (page 10 with only 1 record and page size 5) - _, _, err := p.GetClaimsPaged(context.Background(), 10, 5, nil, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid page number") - - // Test successful case with valid page - claims, count, err := p.GetClaimsPaged(context.Background(), 1, 5, nil, nil) - require.NoError(t, err) - require.Len(t, claims, 1) - require.Equal(t, 1, count) - }) - - t.Run("GetLegacyTokenMigrations error paths", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "GetLegacyTokenMigrationsErrorPaths") - - testBlock := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{LegacyTokenMigration: &LegacyTokenMigration{ - BlockNum: 1, - BlockPos: 0, - BlockTimestamp: 1234567890, - TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), - Sender: common.HexToAddress("0x1234567890123456789012345678901234567890"), - LegacyTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - UpdatedTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Amount: big.NewInt(1000000000000000000), - }}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - - // Test invalid page number (page 10 with only 1 record and page size 5) - _, _, err := p.GetLegacyTokenMigrations(context.Background(), 10, 5) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid page number") - - // Test successful case with valid page - migrations, count, err := p.GetLegacyTokenMigrations(context.Background(), 1, 5) - require.NoError(t, err) - require.Len(t, migrations, 1) - require.Equal(t, 1, count) - }) - - t.Run("GetTokenMappings error paths", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "GetTokenMappingsErrorPaths") - - testBlock := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{TokenMapping: createTestTokenMapping(1, 0)}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - - // Test invalid page number (page 10 with only 1 record and page size 5) - _, _, err := p.GetTokenMappings(context.Background(), 10, 5, "") - require.Error(t, err) - require.Contains(t, err.Error(), "invalid page number") - - // Test successful case with valid page - mappings, count, err := p.GetTokenMappings(context.Background(), 1, 5, "") - require.NoError(t, err) - require.Len(t, mappings, 1) - require.Equal(t, 1, count) - }) -} - -func TestProcessor_DatabaseConnectionErrors(t *testing.T) { - t.Parallel() - - t.Run("GetTotalNumberOfRecords with invalid table name", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "DatabaseConnectionErrors") - - // Test with invalid table name - _, err := p.GetTotalNumberOfRecords(context.Background(), "invalid_table_name", "") - require.Error(t, err) - require.Contains(t, err.Error(), "no such table") - }) - - t.Run("fetchTokenMappings with database errors", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "DatabaseConnectionErrors2") - - testBlock := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{TokenMapping: createTestTokenMapping(1, 0)}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - - // Now test with an offset that would cause a database error - p.db.Close() - _, err := p.fetchTokenMappings(context.Background(), 5, 0, "") - require.Error(t, err) - }) -} - -func TestProcessor_CalculateOffsetErrors(t *testing.T) { - t.Parallel() - - t.Run("GetTokenMappings with invalid offset calculation", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "CalculateOffsetErrors") - - testBlock := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{TokenMapping: createTestTokenMapping(1, 0)}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - - // Test with page number that would result in offset >= total records - _, _, err := p.GetTokenMappings(context.Background(), 10, 5, "") // page 10 with only 1 record and page size 5 - require.Error(t, err) - require.Contains(t, err.Error(), "invalid page number") - }) - - t.Run("GetBridgesPaged with invalid offset calculation", func(t *testing.T) { - t.Parallel() - p := createTestProcessor(t, "CalculateOffsetErrors2") - - testBlock := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Bridge: createTestBridge(1, 0)}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - - // Test with page number that would result in offset >= total records - _, _, err := p.GetBridgesPaged(context.Background(), 10, 5, nil, nil, "") // page 10 with only 1 record and page size 5 - require.Error(t, err) - require.Contains(t, err.Error(), "invalid page number") - }) -} - -// Helper functions to reduce test redundancy - -// createTestProcessor creates a new processor for testing -func createTestProcessor(t *testing.T, dbName string) *processor { - t.Helper() - - path := path.Join(t.TempDir(), dbName+".db") - logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - return p -} - -// createTestBridge creates a test Bridge event -func createTestBridge(blockNum uint64, blockPos int) *Bridge { - return &Bridge{ - BlockNum: blockNum, - BlockPos: uint64(blockPos), - BlockTimestamp: 1234567890, - TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), - FromAddress: func() *common.Address { - addr := common.HexToAddress("0x1234567890123456789012345678901234567890") - return &addr - }(), - LeafType: 1, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Amount: big.NewInt(1000000000000000000), - Metadata: []byte{}, - DepositCount: 0, - } -} - -// createTestTokenMapping creates a test TokenMapping event -func createTestTokenMapping(blockNum uint64, blockPos int) *TokenMapping { - return &TokenMapping{ - BlockNum: blockNum, - BlockPos: uint64(blockPos), - BlockTimestamp: 1234567890, - TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - WrappedTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Metadata: []byte{}, - IsNotMintable: false, - Type: 0, - } -} - -func TestGetUnsetClaimsPaged(t *testing.T) { - t.Parallel() - - path := path.Join(t.TempDir(), "bridgesyncGetUnsetClaimsPaged.sqlite") - logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Create test unset claims - unsetClaims := []*UnsetClaim{ - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x123"), - GlobalIndex: big.NewInt(100), - UnsetGlobalIndexHashChain: common.HexToHash("0xabc123"), - }, - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x456"), - GlobalIndex: big.NewInt(200), - UnsetGlobalIndexHashChain: common.HexToHash("0xdef456"), - }, - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x789"), - GlobalIndex: big.NewInt(100), // Same global index as first - UnsetGlobalIndexHashChain: common.HexToHash("0x987654"), - }, - } - - // Insert test data by processing blocks - for i, unsetClaim := range unsetClaims { - block := sync.Block{ - Num: uint64(i + 1), - Hash: common.HexToHash(fmt.Sprintf("0x%d", i+1)), - Events: []any{ - Event{UnsetClaim: unsetClaim}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), block)) - } - - testCases := []struct { - name string - pageSize uint32 - page uint32 - globalIndex *big.Int - expectedCount int - expectedUnsetClaims []*UnsetClaim - expectedError string - }{ - { - name: "all results on first page", - pageSize: 10, - page: 1, - globalIndex: nil, - expectedCount: 3, - expectedUnsetClaims: []*UnsetClaim{unsetClaims[2], unsetClaims[1], unsetClaims[0]}, // DESC order - expectedError: "", - }, - { - name: "pagination: page 2, size 1", - pageSize: 1, - page: 2, - globalIndex: nil, - expectedCount: 3, - expectedUnsetClaims: []*UnsetClaim{unsetClaims[1]}, // Second item in DESC order - expectedError: "", - }, - { - name: "filter by global index", - pageSize: 10, - page: 1, - globalIndex: big.NewInt(100), - expectedCount: 2, - expectedUnsetClaims: []*UnsetClaim{unsetClaims[2], unsetClaims[0]}, // DESC order, filtered by globalIndex=100 - expectedError: "", - }, - { - name: "filter by non-existent global index", - pageSize: 10, - page: 1, - globalIndex: big.NewInt(999), - expectedCount: 0, - expectedUnsetClaims: []*UnsetClaim{}, - expectedError: "", - }, - { - name: "invalid page number", - pageSize: 3, - page: 5, - globalIndex: nil, - expectedCount: 0, - expectedUnsetClaims: []*UnsetClaim{}, - expectedError: "invalid page number for given page size and total number of unset_claim", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - unsetClaims, count, err := p.GetUnsetClaimsPaged(ctx, tc.page, tc.pageSize, tc.globalIndex) - - if tc.expectedError != "" { - require.ErrorContains(t, err, tc.expectedError) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedUnsetClaims, unsetClaims) - require.Equal(t, tc.expectedCount, count) - } - }) - } -} - -func TestGetSetClaimsPaged(t *testing.T) { - t.Parallel() - - path := path.Join(t.TempDir(), "bridgesyncGetSetClaimsPaged.sqlite") - logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Create test set claims - setClaims := []*SetClaim{ - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(100), - }, - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(200), - }, - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(100), // Same global index as first - }, - { - BlockNum: 4, - BlockPos: 0, - TxHash: common.HexToHash("0x444"), - GlobalIndex: big.NewInt(300), - }, - } - - // Insert test data by processing blocks - for i, setClaim := range setClaims { - block := sync.Block{ - Num: uint64(i + 1), - Hash: common.HexToHash(fmt.Sprintf("0x%d", i+1)), - Events: []any{ - Event{SetClaim: setClaim}, - }, - } - require.NoError(t, p.ProcessBlock(context.Background(), block)) - } - - testCases := []struct { - name string - pageSize uint32 - page uint32 - globalIndex *big.Int - expectedCount int - expectedClaims []*SetClaim - expectedError string - }{ - { - name: "all results on first page", - pageSize: 10, - page: 1, - globalIndex: nil, - expectedCount: 4, - expectedClaims: []*SetClaim{setClaims[3], setClaims[2], setClaims[1], setClaims[0]}, // DESC order - expectedError: "", - }, - { - name: "pagination: page 2, size 1", - pageSize: 1, - page: 2, - globalIndex: nil, - expectedCount: 4, - expectedClaims: []*SetClaim{setClaims[2]}, // Second item in DESC order - expectedError: "", - }, - { - name: "filter by global index", - pageSize: 10, - page: 1, - globalIndex: big.NewInt(100), - expectedCount: 2, - expectedClaims: []*SetClaim{setClaims[2], setClaims[0]}, // DESC order, filtered by globalIndex=100 - expectedError: "", - }, - { - name: "filter by non-existent global index", - pageSize: 10, - page: 1, - globalIndex: big.NewInt(999), - expectedCount: 0, - expectedClaims: []*SetClaim{}, - expectedError: "", - }, - { - name: "invalid page number", - pageSize: 4, - page: 5, - globalIndex: nil, - expectedCount: 0, - expectedClaims: []*SetClaim{}, - expectedError: "invalid page number for given page size and total number of set_claim", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - setClaims, count, err := p.GetSetClaimsPaged(ctx, tc.page, tc.pageSize, tc.globalIndex) - - if tc.expectedError != "" { - require.ErrorContains(t, err, tc.expectedError) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedClaims, setClaims) - require.Equal(t, tc.expectedCount, count) - } - }) - } -} - -func TestDatabaseQueryTimeout(t *testing.T) { - normalTimeout := 100 * time.Millisecond - shortTimeout := 1 * time.Nanosecond - - path := path.Join(t.TempDir(), "bridgeSyncerProcessorTimeout.db") - logger := log.WithFields("module", "bridge-syncer-timeout") - - // Create processor with normal timeout for setup - p, err := newProcessor(path, "bridge-syncer-timeout", logger, normalTimeout, nil) - require.NoError(t, err) - - // Insert some test data to ensure the database is working - block := sync.Block{ - Num: 1, - Hash: common.HexToHash("0x123"), - Events: []any{}, - } - - ctx := context.Background() - err = p.ProcessBlock(ctx, block) - require.NoError(t, err) - - // Create a new processor with short timeout for testing timeout behavior - pShortTimeout, err := newProcessor(path, "bridge-syncer-short-timeout", logger, shortTimeout, nil) - require.NoError(t, err) - - // Test that operations timeout with short timeout - _, err = pShortTimeout.GetLastProcessedBlock(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), "context deadline exceeded") - - _, err = pShortTimeout.GetBridges(ctx, 1, 1) - require.Error(t, err) - require.Contains(t, err.Error(), "context deadline exceeded") - - _, err = pShortTimeout.GetClaims(ctx, 1, 1) - require.Error(t, err) - require.Contains(t, err.Error(), "context deadline exceeded") -} - -//nolint:dupl -func TestGetClaims_Compact(t *testing.T) { - // Define all claims used across test cases - claims := []*Claim{ - // claims[0] - Basic claim with GlobalIndex=1 - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(1), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("metadata1"), - IsMessage: false, - BlockTimestamp: 1000, - Type: ClaimEvent, - }, - // claims[1] - Basic claim with GlobalIndex=2 - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(2), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0xccc"), - DestinationAddress: common.HexToAddress("0xddd"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 4, - Metadata: []byte("metadata2"), - IsMessage: true, - BlockTimestamp: 2000, - Type: ClaimEvent, - }, - // claims[2] - Oldest claim with GlobalIndex=100 (block 1) - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("original_metadata"), - IsMessage: false, - BlockTimestamp: 1000, - Type: ClaimEvent, - }, - // claims[3] - Middle claim with GlobalIndex=100 (block 2) - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 99, - OriginAddress: common.HexToAddress("0xfff"), - DestinationAddress: common.HexToAddress("0xeee"), - Amount: big.NewInt(999), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 88, - Metadata: []byte("middle_metadata"), - IsMessage: true, - BlockTimestamp: 2000, - Type: ClaimEvent, - }, - // claims[4] - Newest claim with GlobalIndex=100 (block 3) - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0x999"), - DestinationAddress: common.HexToAddress("0x888"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("newest_metadata"), - IsMessage: true, - BlockTimestamp: 3000, - Type: DetailedClaimEvent, - }, - // claims[5] - Oldest claim with GlobalIndex=100 (block 1, pos 0) - for multiple groups test - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xa1"), - DestinationAddress: common.HexToAddress("0xb1"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("index1_old"), - IsMessage: false, - BlockTimestamp: 1000, - Type: ClaimEvent, - }, - // claims[6] - Oldest claim with GlobalIndex=200 (block 1, pos 1) - { - BlockNum: 1, - BlockPos: 1, - TxHash: common.HexToHash("0x112"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0xa2"), - DestinationAddress: common.HexToAddress("0xb2"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 4, - Metadata: []byte("index2_old"), - IsMessage: true, - BlockTimestamp: 1001, - Type: ClaimEvent, - }, - // claims[7] - Newest claim with GlobalIndex=100 (block 2, pos 0) - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x221"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 99, - OriginAddress: common.HexToAddress("0xc1"), - DestinationAddress: common.HexToAddress("0xd1"), - Amount: big.NewInt(999), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 88, - Metadata: []byte("index1_new"), - IsMessage: true, - BlockTimestamp: 2000, - Type: ClaimEvent, - }, - // claims[8] - Newest claim with GlobalIndex=200 (block 2, pos 1) - { - BlockNum: 2, - BlockPos: 1, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0xc2"), - DestinationAddress: common.HexToAddress("0xd2"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x4a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x4b")}, - MainnetExitRoot: common.HexToHash("0x4c"), - RollupExitRoot: common.HexToHash("0x4d"), - GlobalExitRoot: common.HexToHash("0x4e"), - DestinationNetwork: 66, - Metadata: []byte("index2_new"), - IsMessage: false, - BlockTimestamp: 2001, - Type: DetailedClaimEvent, - }, - // claims[9] - Same block, pos 0 with GlobalIndex=123 - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(123), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("pos0"), - IsMessage: false, - BlockTimestamp: 1000, - Type: ClaimEvent, - }, - // claims[10] - Same block, pos 1 with GlobalIndex=123 - { - BlockNum: 1, - BlockPos: 1, - TxHash: common.HexToHash("0x112"), - GlobalIndex: big.NewInt(123), - OriginNetwork: 99, - OriginAddress: common.HexToAddress("0xccc"), - DestinationAddress: common.HexToAddress("0xddd"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 88, - Metadata: []byte("pos1"), - IsMessage: true, - BlockTimestamp: 1001, - Type: ClaimEvent, - }, - // claims[11] - Same block, pos 2 with GlobalIndex=123 - { - BlockNum: 1, - BlockPos: 2, - TxHash: common.HexToHash("0x113"), - GlobalIndex: big.NewInt(123), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0xeee"), - DestinationAddress: common.HexToAddress("0xfff"), - Amount: big.NewInt(300), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("pos2"), - IsMessage: false, - BlockTimestamp: 1002, - Type: ClaimEvent, - }, - // claims[12] - Partial range GlobalIndex=456 (block 1) - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(456), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("block1"), - IsMessage: false, - BlockTimestamp: 1000, - Type: ClaimEvent, - }, - // claims[13] - Partial range GlobalIndex=456 (block 2) - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(456), - OriginNetwork: 99, - OriginAddress: common.HexToAddress("0xccc"), - DestinationAddress: common.HexToAddress("0xddd"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 88, - Metadata: []byte("block2"), - IsMessage: true, - BlockTimestamp: 2000, - Type: ClaimEvent, - }, - // claims[14] - Partial range GlobalIndex=456 (block 3) - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(456), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0xeee"), - DestinationAddress: common.HexToAddress("0xfff"), - Amount: big.NewInt(300), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("block3"), - IsMessage: false, - BlockTimestamp: 3000, - Type: ClaimEvent, - }, - // claims[15] - Ordering test GlobalIndex=200 (block 1) - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xa1"), - DestinationAddress: common.HexToAddress("0xb1"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("200"), - IsMessage: false, - BlockTimestamp: 1000, - Type: ClaimEvent, - }, - // claims[16] - Ordering test GlobalIndex=100 (block 2) - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 2, - OriginAddress: common.HexToAddress("0xa2"), - DestinationAddress: common.HexToAddress("0xb2"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 3, - Metadata: []byte("100"), - IsMessage: true, - BlockTimestamp: 2000, - Type: ClaimEvent, - }, - // claims[17] - Ordering test GlobalIndex=150 (block 3) - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(150), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0xa3"), - DestinationAddress: common.HexToAddress("0xb3"), - Amount: big.NewInt(300), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 4, - Metadata: []byte("150"), - IsMessage: false, - BlockTimestamp: 3000, - Type: ClaimEvent, - }, - // claims[18] - block 3, pos 1 with GlobalIndex=200 - { - BlockNum: 3, - BlockPos: 1, - TxHash: common.HexToHash("0x112"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0xccc"), - DestinationAddress: common.HexToAddress("0xddd"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2ab")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2bc")}, - MainnetExitRoot: common.HexToHash("0x2ce"), - RollupExitRoot: common.HexToHash("0x2df"), - GlobalExitRoot: common.HexToHash("0x2ee"), - DestinationNetwork: 88, - Metadata: []byte("block3pos1"), - IsMessage: true, - BlockTimestamp: 3001, - Type: ClaimEvent, - }, - } - - testCases := []struct { - name string - setupBlocks func() []sync.Block - queryFrom uint64 - queryTo uint64 - expectedCount int - errorContains string - validateResults func(t *testing.T, claims []Claim) - }{ - { - name: "non-compacted mode with different claims", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[0]}, - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[1]}, - }, - }, - } - }, - queryFrom: 1, - queryTo: 2, - expectedCount: 2, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - require.Len(t, claims, 2) - require.Equal(t, big.NewInt(1), claims[0].GlobalIndex) - require.Equal(t, big.NewInt(2), claims[1].GlobalIndex) - }, - }, - { - name: "compacted mode with no duplicates", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[0]}, - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[1]}, - }, - }, - } - }, - queryFrom: 1, - queryTo: 2, - expectedCount: 2, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - require.Len(t, claims, 2) - require.Equal(t, big.NewInt(1), claims[0].GlobalIndex) - require.Equal(t, big.NewInt(2), claims[1].GlobalIndex) - }, - }, - { - name: "compacted mode with duplicates across blocks", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[2]}, - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[3]}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[4]}, - }, - }, - } - }, - queryFrom: 1, - queryTo: 3, - expectedCount: 1, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - claim := claims[0] - // Fields from oldest claim (block 1) - should be preserved - require.Equal(t, uint64(1), claim.BlockNum, "BlockNum should be from oldest claim") - require.Equal(t, uint64(0), claim.BlockPos, "BlockPos should be from oldest claim") - require.Equal(t, common.HexToHash("0x111"), claim.TxHash, "TxHash should be from oldest claim") - require.Equal(t, big.NewInt(100), claim.GlobalIndex) - require.Equal(t, uint32(1), claim.OriginNetwork, "OriginNetwork should be from oldest claim") - require.Equal(t, common.HexToAddress("0xaaa"), claim.OriginAddress, "OriginAddress should be from oldest claim") - require.Equal(t, common.HexToAddress("0xbbb"), claim.DestinationAddress, "DestinationAddress should be from oldest claim") - require.Equal(t, big.NewInt(100), claim.Amount, "Amount should be from oldest claim") - require.Equal(t, uint32(2), claim.DestinationNetwork, "DestinationNetwork should be from oldest claim") - require.Equal(t, []byte("original_metadata"), claim.Metadata, "Metadata should be from oldest claim") - require.Equal(t, false, claim.IsMessage, "IsMessage should be from oldest claim") - require.Equal(t, uint64(1000), claim.BlockTimestamp, "BlockTimestamp should be from oldest claim") - // Fields from newest claim (block 3) - should be updated - require.Equal(t, common.HexToHash("0x3a"), claim.ProofLocalExitRoot[0], "ProofLocalExitRoot should be from newest claim") - require.Equal(t, common.HexToHash("0x3b"), claim.ProofRollupExitRoot[0], "ProofRollupExitRoot should be from newest claim") - require.Equal(t, common.HexToHash("0x3c"), claim.MainnetExitRoot, "MainnetExitRoot should be from newest claim") - require.Equal(t, common.HexToHash("0x3d"), claim.RollupExitRoot, "RollupExitRoot should be from newest claim") - require.Equal(t, common.HexToHash("0x3e"), claim.GlobalExitRoot, "GlobalExitRoot should be from newest claim") - }, - }, - { - name: "compacted mode with multiple duplicate groups", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[5]}, // GlobalIndex=100, oldest - Event{Claim: claims[6]}, // GlobalIndex=200, oldest - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[7]}, // GlobalIndex=100, newest - Event{Claim: claims[8]}, // GlobalIndex=200, newest - }, - }, - } - }, - queryFrom: 1, - queryTo: 2, - expectedCount: 2, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // First claim (globalIndex1=100) - claim1 := claims[0] - require.Equal(t, big.NewInt(100), claim1.GlobalIndex) - // Fields from oldest claim (block 1, pos 0) - should be preserved - require.Equal(t, uint64(1), claim1.BlockNum, "Claim1: BlockNum should be from oldest") - require.Equal(t, uint64(0), claim1.BlockPos, "Claim1: BlockPos should be from oldest") - require.Equal(t, uint32(1), claim1.OriginNetwork, "Claim1: OriginNetwork should be from oldest") - require.Equal(t, common.HexToAddress("0xa1"), claim1.OriginAddress, "Claim1: OriginAddress should be from oldest") - require.Equal(t, common.HexToAddress("0xb1"), claim1.DestinationAddress, "Claim1: DestinationAddress should be from oldest") - require.Equal(t, big.NewInt(100), claim1.Amount, "Claim1: Amount should be from oldest") - require.Equal(t, uint32(2), claim1.DestinationNetwork, "Claim1: DestinationNetwork should be from oldest") - require.Equal(t, []byte("index1_old"), claim1.Metadata, "Claim1: Metadata should be from oldest") - require.Equal(t, false, claim1.IsMessage, "Claim1: IsMessage should be from oldest") - require.Equal(t, uint64(1000), claim1.BlockTimestamp, "Claim1: BlockTimestamp should be from oldest") - // Fields from newest claim (block 2, pos 0) - should be updated - require.Equal(t, common.HexToHash("0x3a"), claim1.ProofLocalExitRoot[0], "Claim1: ProofLocalExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x3b"), claim1.ProofRollupExitRoot[0], "Claim1: ProofRollupExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x3c"), claim1.MainnetExitRoot, "Claim1: MainnetExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x3d"), claim1.RollupExitRoot, "Claim1: RollupExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x3e"), claim1.GlobalExitRoot, "Claim1: GlobalExitRoot should be from newest") - - // Second claim (globalIndex2=200) - claim2 := claims[1] - require.Equal(t, big.NewInt(200), claim2.GlobalIndex) - // Fields from oldest claim (block 1, pos 1) - should be preserved - require.Equal(t, uint64(1), claim2.BlockNum, "Claim2: BlockNum should be from oldest") - require.Equal(t, uint64(1), claim2.BlockPos, "Claim2: BlockPos should be from oldest") - require.Equal(t, uint32(3), claim2.OriginNetwork, "Claim2: OriginNetwork should be from oldest") - require.Equal(t, common.HexToAddress("0xa2"), claim2.OriginAddress, "Claim2: OriginAddress should be from oldest") - require.Equal(t, common.HexToAddress("0xb2"), claim2.DestinationAddress, "Claim2: DestinationAddress should be from oldest") - require.Equal(t, big.NewInt(200), claim2.Amount, "Claim2: Amount should be from oldest") - require.Equal(t, uint32(4), claim2.DestinationNetwork, "Claim2: DestinationNetwork should be from oldest") - require.Equal(t, []byte("index2_old"), claim2.Metadata, "Claim2: Metadata should be from oldest") - require.Equal(t, true, claim2.IsMessage, "Claim2: IsMessage should be from oldest") - require.Equal(t, uint64(1001), claim2.BlockTimestamp, "Claim2: BlockTimestamp should be from oldest") - // Fields from newest claim (block 2, pos 1) - should be updated - require.Equal(t, common.HexToHash("0x4a"), claim2.ProofLocalExitRoot[0], "Claim2: ProofLocalExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x4b"), claim2.ProofRollupExitRoot[0], "Claim2: ProofRollupExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x4c"), claim2.MainnetExitRoot, "Claim2: MainnetExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x4d"), claim2.RollupExitRoot, "Claim2: RollupExitRoot should be from newest") - require.Equal(t, common.HexToHash("0x4e"), claim2.GlobalExitRoot, "Claim2: GlobalExitRoot should be from newest") - }, - }, - { - name: "compacted mode same block multiple positions", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[9]}, // pos 0 - oldest - Event{Claim: claims[10]}, // pos 1 - middle - Event{Claim: claims[11]}, // pos 2 - newest - }, - }, - } - }, - queryFrom: 1, - queryTo: 1, - expectedCount: 1, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - claim := claims[0] - // Fields from oldest claim (block 1, pos 0) - should be preserved - require.Equal(t, uint64(1), claim.BlockNum, "BlockNum should be from oldest") - require.Equal(t, uint64(0), claim.BlockPos, "BlockPos should be from oldest (pos 0)") - require.Equal(t, uint32(1), claim.OriginNetwork, "OriginNetwork should be from oldest") - require.Equal(t, common.HexToAddress("0xaaa"), claim.OriginAddress, "OriginAddress should be from oldest") - require.Equal(t, common.HexToAddress("0xbbb"), claim.DestinationAddress, "DestinationAddress should be from oldest") - require.Equal(t, big.NewInt(100), claim.Amount, "Amount should be from oldest") - require.Equal(t, uint32(2), claim.DestinationNetwork, "DestinationNetwork should be from oldest") - require.Equal(t, []byte("pos0"), claim.Metadata, "Metadata should be from oldest (pos0)") - require.Equal(t, false, claim.IsMessage, "IsMessage should be from oldest") - require.Equal(t, uint64(1000), claim.BlockTimestamp, "BlockTimestamp should be from oldest") - // Fields from newest claim (block 1, pos 2) - should be updated - require.Equal(t, common.HexToHash("0x3a"), claim.ProofLocalExitRoot[0], "ProofLocalExitRoot should be from newest (pos 2)") - require.Equal(t, common.HexToHash("0x3b"), claim.ProofRollupExitRoot[0], "ProofRollupExitRoot should be from newest (pos 2)") - require.Equal(t, common.HexToHash("0x3c"), claim.MainnetExitRoot, "MainnetExitRoot should be from newest (pos 2)") - require.Equal(t, common.HexToHash("0x3d"), claim.RollupExitRoot, "RollupExitRoot should be from newest (pos 2)") - require.Equal(t, common.HexToHash("0x3e"), claim.GlobalExitRoot, "GlobalExitRoot should be from newest (pos 2)") - }, - }, - { - name: "compacted mode empty range", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{}, - }, - } - }, - queryFrom: 1, - queryTo: 1, - expectedCount: 0, - }, - { - name: "compacted mode partial range", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[12]}, // block 1 - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[13]}, // block 2 - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[14]}, // block 3 - }, - }, - } - }, - queryFrom: 2, - queryTo: 3, - expectedCount: 0, // Changed from 1 to 0: globally oldest claim (block 1) is outside query range - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Case 3: Since globally oldest claim (block 1) is outside the query range (2-3), - // we should not return anything for this global_index (no unset claim exists) - require.Empty(t, claims, "should return no claims when globally oldest is outside range") - }, - }, - { - name: "ordering preserved by block number and position", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[15]}, // GlobalIndex=200 - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[16]}, // GlobalIndex=100 - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[17]}, // GlobalIndex=150 - }, - }, - } - }, - queryFrom: 1, - queryTo: 3, - expectedCount: 3, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Should be ordered by block_num ASC, not by global_index value - require.Equal(t, big.NewInt(200), claims[0].GlobalIndex) - require.Equal(t, big.NewInt(100), claims[1].GlobalIndex) - require.Equal(t, big.NewInt(150), claims[2].GlobalIndex) - }, - }, - { - name: "invalid block range - fromBlock greater than toBlock", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[0]}, - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[1]}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{}, - }, - { - Num: 4, - Hash: common.HexToHash("0x4"), - Events: []any{}, - }, - { - Num: 5, - Hash: common.HexToHash("0x5"), - Events: []any{}, - }, - } - }, - queryFrom: 5, - queryTo: 3, - expectedCount: 0, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Should return empty array for invalid range (WHERE block_num >= 5 AND block_num <= 3 returns nothing) - require.Empty(t, claims) - }, - }, - { - name: "fromBlock = 0 edge case", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[0]}, - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[1]}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[4]}, // GlobalIndex=100, BlockNum=3 - }, - }, - } - }, - queryFrom: 0, - queryTo: 3, - expectedCount: 3, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Should return all claims from block 1 onwards (block 0 doesn't exist, WHERE clause: block_num >= 0) - require.Len(t, claims, 3) - require.Equal(t, big.NewInt(1), claims[0].GlobalIndex) - require.Equal(t, uint64(1), claims[0].BlockNum) - require.Equal(t, big.NewInt(2), claims[1].GlobalIndex) - require.Equal(t, uint64(2), claims[1].BlockNum) - require.Equal(t, big.NewInt(100), claims[2].GlobalIndex) - require.Equal(t, uint64(3), claims[2].BlockNum) - }, - }, - { - name: "claims at both fromBlock and toBlock boundaries with compaction", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[0]}, // GlobalIndex=1, outside range - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[3]}, // GlobalIndex=100, at fromBlock boundary (oldest) - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[4]}, // GlobalIndex=100, at toBlock boundary (newest, should provide proofs) - }, - }, - { - Num: 4, - Hash: common.HexToHash("0x4"), - Events: []any{}, - }, - } - }, - queryFrom: 2, - queryTo: 3, - expectedCount: 1, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Should compact GlobalIndex=100 appearing at both boundaries (blocks 2 and 3) - require.Len(t, claims, 1) - require.Equal(t, big.NewInt(100), claims[0].GlobalIndex) - require.Equal(t, uint64(2), claims[0].BlockNum, "Should preserve oldest block (fromBlock boundary)") - require.Equal(t, uint64(0), claims[0].BlockPos, "Should preserve oldest BlockPos") - // Verify compaction: oldest metadata from block 2, newest proofs from block 3 - require.Equal(t, []byte("middle_metadata"), claims[0].Metadata, "Should preserve oldest metadata from block 2") - require.Equal(t, common.HexToHash("0x3a"), claims[0].ProofLocalExitRoot[0], "Should use newest proof from block 3") - require.Equal(t, common.HexToHash("0x3c"), claims[0].MainnetExitRoot, "Should use newest MainnetExitRoot from block 3") - }, - }, - { - name: "block range with gaps in processed blocks", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[0]}, // GlobalIndex=1 - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{}, // Block 2 has no claims (processed but empty) - }, - // Block 3 is completely skipped (not processed) - { - Num: 4, - Hash: common.HexToHash("0x4"), - Events: []any{ - Event{Claim: claims[1]}, // GlobalIndex=2 - }, - }, - { - Num: 5, - Hash: common.HexToHash("0x5"), - Events: []any{}, // Block 5 has no claims (processed but empty) - }, - } - }, - queryFrom: 1, - queryTo: 5, - expectedCount: 2, - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Should return claims from blocks that exist in the range - // Block 2 has no events, block 3 was not processed at all - // But claims from blocks 1 and 4 should still be returned - require.Len(t, claims, 2) - require.Equal(t, big.NewInt(1), claims[0].GlobalIndex, "First claim from block 1") - require.Equal(t, big.NewInt(2), claims[1].GlobalIndex, "Second claim - claims[1] was added to block 4 but has BlockNum=2 in its data") - require.Equal(t, uint64(1), claims[0].BlockNum) - require.Equal(t, uint64(2), claims[1].BlockNum, "BlockNum comes from claim data, not the block it was added to") - }, - }, - { - name: "Case 1: don't compact if unset claim exists for global_index", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[2]}, // GlobalIndex=100, block 1, pos 1 - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{UnsetClaim: &UnsetClaim{ // Unset claim for GlobalIndex=1 - GlobalIndex: big.NewInt(100), - BlockNum: 2, - BlockPos: 0, - TxHash: common.Hash{}, - UnsetGlobalIndexHashChain: common.Hash{}, - }}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[4]}, // GlobalIndex=1, block 3, pos 0 - }, - }, - } - }, - queryFrom: 1, - queryTo: 3, - expectedCount: 2, // Should return all claims without compacting GlobalIndex=100 due to unset claim - validateResults: func(t *testing.T, resultClaims []Claim) { - t.Helper() - // Should return: claim (GI=100, block 1), claim (GI=100, block 3) - require.Len(t, resultClaims, 2, "should not compact GlobalIndex=100 when unset claim exists") - require.Equal(t, *claims[2], resultClaims[0]) - require.Equal(t, *claims[4], resultClaims[1]) - }, - }, - { - name: "Case 2: compact if no unset claim exists", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[2]}, // GlobalIndex=100, block 1 (oldest) - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[3]}, // GlobalIndex=100, block 2 - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[4]}, // GlobalIndex=100, block 3 (newest) - // No unset claim - should compact - }, - }, - } - }, - queryFrom: 1, - queryTo: 3, - expectedCount: 1, // Should return 1 compacted claim - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Should compact all 3 claims into 1 - require.Len(t, claims, 1, "should compact when no unset claim exists") - claim := claims[0] - require.Equal(t, big.NewInt(100), claim.GlobalIndex) - // Metadata from oldest (block 1) - require.Equal(t, uint64(1), claim.BlockNum, "should preserve oldest block") - require.Equal(t, uint64(0), claim.BlockPos, "should preserve oldest position") - require.Equal(t, []byte("original_metadata"), claim.Metadata, "should preserve oldest metadata") - require.Equal(t, big.NewInt(100), claim.Amount, "should preserve oldest amount") - require.Equal(t, uint32(1), claim.OriginNetwork, "should preserve oldest origin network") - // Proofs from newest (block 3) - require.Equal(t, common.HexToHash("0x3a"), claim.ProofLocalExitRoot[0], "should use newest proof") - require.Equal(t, common.HexToHash("0x3c"), claim.MainnetExitRoot, "should use newest MainnetExitRoot") - require.Equal(t, common.HexToHash("0x3d"), claim.RollupExitRoot, "should use newest RollupExitRoot") - require.Equal(t, common.HexToHash("0x3e"), claim.GlobalExitRoot, "should use newest GlobalExitRoot") - }, - }, - { - name: "Case 3: don't return if globally oldest is outside query range", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[2]}, // GlobalIndex=100, block 1 (globally oldest) - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[3]}, // GlobalIndex=100, block 2 - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[4]}, // GlobalIndex=100, block 3 (newest) - }, - }, - } - }, - queryFrom: 2, // Query starts at block 2, but globally oldest is at block 1 - queryTo: 3, - expectedCount: 0, // Should return nothing because globally oldest (block 1) is outside range - validateResults: func(t *testing.T, claims []Claim) { - t.Helper() - // Should return no claims because the globally oldest claim (block 1) is outside the query range (2-3) - require.Empty(t, claims, "should not return claims when globally oldest is outside query range") - }, - }, - { - name: "Case 3 exception: return if unset claim exists even when globally oldest is outside range", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[0]}, // GlobalIndex=1, block 1, pos 0 - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{UnsetClaim: &UnsetClaim{ // Unset claim for GlobalIndex=100 - GlobalIndex: big.NewInt(1), - BlockNum: 1, - BlockPos: 1, - TxHash: common.Hash{}, - UnsetGlobalIndexHashChain: common.Hash{}, - }}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x3"), - Events: []any{ - Event{Claim: claims[4]}, // GlobalIndex=1, block 3, pos 0 - }, - }, - } - }, - queryFrom: 3, // Query starts at block 3, globally oldest is at block 1 - queryTo: 3, - expectedCount: 1, // Should return claim from block 3 (uncompacted) because unset claim exists - validateResults: func(t *testing.T, resultClaims []Claim) { - t.Helper() - // Should return claim from block 3 even though globally oldest is outside range - // because an unset claim exists for this global_index - require.Len(t, resultClaims, 1, "should return claims when unset claim exists, even if globally oldest is outside range") - require.Equal(t, *claims[4], resultClaims[0]) - }, - }, - { - name: "Multiple global_indexes with different compaction rules", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{Claim: claims[2]}, // GlobalIndex=100, block 1, pos 0 (globally oldest) - Event{Claim: claims[6]}, // GlobalIndex=200, block 1, pos 1 (globally oldest) - }, - }, - { - Num: 2, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{UnsetClaim: &UnsetClaim{ // Unset claim for GlobalIndex=100 - GlobalIndex: big.NewInt(100), - BlockNum: 1, - BlockPos: 1, - }}, - }, - }, - { - Num: 3, - Hash: common.HexToHash("0x2"), - Events: []any{ - Event{Claim: claims[4]}, // GlobalIndex=100, block 3, pos 0 - Event{Claim: claims[18]}, // GlobalIndex=200, block 3, pos 1 - }, - }, - } - }, - queryFrom: 3, // Query block 3 - queryTo: 3, - expectedCount: 1, // GlobalIndex=100: 1 claim (uncompacted, block 2 due to unset claim) - // GlobalIndex=456: 0 claims (globally oldest is at block 1, outside range) - // GlobalIndex=1: 0 claims (only exists at block 1, outside range) - validateResults: func(t *testing.T, resultClaims []Claim) { - t.Helper() - require.Len(t, resultClaims, 1, "should apply different rules per global_index") - // Should be GlobalIndex=100 (the one with unset claim) - require.Equal(t, *claims[4], resultClaims[0]) - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - dbPath := t.TempDir() + "/test.db" - p, err := newProcessor(dbPath, "test", log.GetDefaultLogger(), time.Second*10, nil) - require.NoError(t, err) - - // Setup blocks - blocks := tc.setupBlocks() - for _, block := range blocks { - err := p.ProcessBlock(ctx, block) - require.NoError(t, err) - } - - // Execute test - claims, err := p.GetClaims(ctx, tc.queryFrom, tc.queryTo) - - // Validate error expectations - if tc.errorContains != "" { - require.ErrorContains(t, err, tc.errorContains) - } else { - // Validate success expectations - require.NoError(t, err) - require.Len(t, claims, tc.expectedCount) - - // Run custom validations if provided - if tc.validateResults != nil { - tc.validateResults(t, claims) - } - } - }) - } -} - -// TestGetClaimsPaged_CompactionAcrossPages tests the compaction behavior when -// claims with the same global_index span across multiple pages -func TestGetClaimsPaged_CompactionAcrossPages(t *testing.T) { - path := path.Join(t.TempDir(), "claimsPaged_compaction.sqlite") - require.NoError(t, migrations.RunMigrations(path)) - logger := log.WithFields("module", "bridge-syncer") - p, err := newProcessor(path, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - ctx := context.Background() - - // Create test scenario: - // - Global index 100: claims at blocks 10 (oldest), 20, 30 (newest with updated proofs) - // - Global index 200: claims at blocks 15 (oldest), 25 (newest with updated proofs) - // - Global index 300: single claim at block 35 - // - Global index 400: single claim at block 5 - // - // When ordered DESC by block_num: [35, 30, 25, 20, 15, 10, 5] - // - // Page 1 (size 3): blocks [35, 30, 25] - // - Block 35: global_index=300 (newest) -> INCLUDE (compacted with itself) - // - Block 30: global_index=100 (newest) -> INCLUDE (compacted with block 10) - // - Block 25: global_index=200 (newest) -> INCLUDE (compacted with block 15) - // - // Page 2 (size 3): blocks [20, 15, 10] - // - Block 20: global_index=100 (NOT newest, 30 is newest) -> EXCLUDE - // - Block 15: global_index=200 (NOT newest, 25 is newest) -> EXCLUDE - // - Block 10: global_index=100 (NOT newest, 30 is newest) -> EXCLUDE - // - // Page 3 (size 3): blocks [5] - // - Block 5: global_index=400 (newest) -> INCLUDE (compacted with itself) - - oldProof := types.Proof{} - oldProof[0] = common.HexToHash("0x01") - - newProof := types.Proof{} - newProof[0] = common.HexToHash("0x02") - - claims := []*Claim{ - // Global index 100 - oldest (will be base for compaction) - { - BlockNum: 10, - BlockPos: 0, - TxHash: common.HexToHash("0xa1"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1111"), - DestinationAddress: common.HexToAddress("0x2222"), - Amount: big.NewInt(1000), - ProofLocalExitRoot: oldProof, - ProofRollupExitRoot: oldProof, - MainnetExitRoot: common.HexToHash("0x3333"), - RollupExitRoot: common.HexToHash("0x4444"), - GlobalExitRoot: common.HexToHash("0x5555"), - DestinationNetwork: 2, - Metadata: []byte("old"), - IsMessage: false, - BlockTimestamp: 1000, - }, - // Global index 200 - oldest - { - BlockNum: 15, - BlockPos: 0, - TxHash: common.HexToHash("0xa2"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x3333"), - DestinationAddress: common.HexToAddress("0x4444"), - Amount: big.NewInt(2000), - ProofLocalExitRoot: oldProof, - ProofRollupExitRoot: oldProof, - MainnetExitRoot: common.HexToHash("0x6666"), - RollupExitRoot: common.HexToHash("0x7777"), - GlobalExitRoot: common.HexToHash("0x8888"), - DestinationNetwork: 4, - Metadata: []byte("metadata200"), - IsMessage: true, - BlockTimestamp: 1500, - }, - // Global index 100 - middle - { - BlockNum: 20, - BlockPos: 0, - TxHash: common.HexToHash("0xa3"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1111"), - DestinationAddress: common.HexToAddress("0x2222"), - Amount: big.NewInt(1000), - ProofLocalExitRoot: oldProof, - ProofRollupExitRoot: oldProof, - MainnetExitRoot: common.HexToHash("0x3333"), - RollupExitRoot: common.HexToHash("0x4444"), - GlobalExitRoot: common.HexToHash("0x5555"), - DestinationNetwork: 2, - Metadata: []byte("should_not_matter"), - IsMessage: false, - BlockTimestamp: 2000, - }, - // Global index 200 - newest (has updated proofs) - { - BlockNum: 25, - BlockPos: 0, - TxHash: common.HexToHash("0xa4"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x3333"), - DestinationAddress: common.HexToAddress("0x4444"), - Amount: big.NewInt(2000), - ProofLocalExitRoot: newProof, // Updated proof - ProofRollupExitRoot: newProof, // Updated proof - MainnetExitRoot: common.HexToHash("0x9999"), // Updated - RollupExitRoot: common.HexToHash("0xaaaa"), // Updated - GlobalExitRoot: common.HexToHash("0xbbbb"), // Updated - DestinationNetwork: 4, - Metadata: []byte("should_not_matter"), - IsMessage: true, - BlockTimestamp: 2500, - }, - // Global index 100 - newest (has updated proofs) - { - BlockNum: 30, - BlockPos: 0, - TxHash: common.HexToHash("0xa5"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1111"), - DestinationAddress: common.HexToAddress("0x2222"), - Amount: big.NewInt(1000), - ProofLocalExitRoot: newProof, // Updated proof - ProofRollupExitRoot: newProof, // Updated proof - MainnetExitRoot: common.HexToHash("0xcccc"), // Updated - RollupExitRoot: common.HexToHash("0xdddd"), // Updated - GlobalExitRoot: common.HexToHash("0xeeee"), // Updated - DestinationNetwork: 2, - Metadata: []byte("should_not_matter"), - IsMessage: false, - BlockTimestamp: 3000, - }, - // Global index 300 - single claim - { - BlockNum: 35, - BlockPos: 0, - TxHash: common.HexToHash("0xa6"), - GlobalIndex: big.NewInt(300), - OriginNetwork: 5, - OriginAddress: common.HexToAddress("0x5555"), - DestinationAddress: common.HexToAddress("0x6666"), - Amount: big.NewInt(3000), - ProofLocalExitRoot: newProof, - ProofRollupExitRoot: newProof, - MainnetExitRoot: common.HexToHash("0xffff"), - RollupExitRoot: common.HexToHash("0x0000"), - GlobalExitRoot: common.HexToHash("0x1111"), - DestinationNetwork: 6, - Metadata: []byte("metadata300"), - IsMessage: false, - BlockTimestamp: 3500, - }, - // Global index 400 - single claim - { - BlockNum: 5, - BlockPos: 0, - TxHash: common.HexToHash("0xa7"), - GlobalIndex: big.NewInt(400), - OriginNetwork: 7, - OriginAddress: common.HexToAddress("0x7777"), - DestinationAddress: common.HexToAddress("0x8888"), - Amount: big.NewInt(4000), - ProofLocalExitRoot: oldProof, - ProofRollupExitRoot: oldProof, - MainnetExitRoot: common.HexToHash("0x2222"), - RollupExitRoot: common.HexToHash("0x3333"), - GlobalExitRoot: common.HexToHash("0x4444"), - DestinationNetwork: 8, - Metadata: []byte("metadata400"), - IsMessage: true, - BlockTimestamp: 500, - }, - } - - // Insert all claims - tx, err := p.db.BeginTx(ctx, nil) - require.NoError(t, err) - - for i := uint64(1); i <= 40; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } - - for _, claim := range claims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } - require.NoError(t, tx.Commit()) - - // Test Page 1: Should return 3 compacted claims (300, 100 compacted, 200 compacted) - t.Run("Page 1 - newest claims on page", func(t *testing.T) { - result, count, err := p.GetClaimsPaged(ctx, 1, 3, nil, nil) - require.NoError(t, err) - require.Equal(t, 4, count) // Total compacted count: 4 distinct global_index values - - // Should get 3 claims: global_index 300, 100 (compacted), 200 (compacted) - require.Len(t, result, 3) - - // Build a map for easier testing - claimsByGlobalIndex := make(map[int64]*Claim) - for _, claim := range result { - claimsByGlobalIndex[claim.GlobalIndex.Int64()] = claim - } - - // Verify we have the expected global indices - require.Contains(t, claimsByGlobalIndex, int64(100)) - require.Contains(t, claimsByGlobalIndex, int64(200)) - require.Contains(t, claimsByGlobalIndex, int64(300)) - - // Check global_index 300 (block 35) - claim300 := claimsByGlobalIndex[300] - require.Equal(t, uint64(35), claim300.BlockNum) - require.Equal(t, []byte("metadata300"), claim300.Metadata) - - // Check global_index 100 (compacted: oldest block 10, newest proofs from block 30) - claim100 := claimsByGlobalIndex[100] - require.Equal(t, uint64(10), claim100.BlockNum) // Oldest claim's block - require.Equal(t, common.HexToHash("0xa1"), claim100.TxHash) // Oldest claim's tx - require.Equal(t, []byte("old"), claim100.Metadata) // Oldest claim's metadata - require.Equal(t, newProof, claim100.ProofLocalExitRoot) // Newest claim's proof - require.Equal(t, common.HexToHash("0xcccc"), claim100.MainnetExitRoot) // Newest claim's root - - // Check global_index 200 (compacted: oldest block 15, newest proofs from block 25) - claim200 := claimsByGlobalIndex[200] - require.Equal(t, uint64(15), claim200.BlockNum) // Oldest claim's block - require.Equal(t, []byte("metadata200"), claim200.Metadata) // Oldest claim's metadata - require.Equal(t, newProof, claim200.ProofLocalExitRoot) // Newest claim's proof - require.Equal(t, common.HexToHash("0x9999"), claim200.MainnetExitRoot) // Newest claim's root - }) - - // Test Page 2: Should return 0 claims (all claims on this page are NOT the newest) - t.Run("Page 2 - no newest claims on page", func(t *testing.T) { - result, count, err := p.GetClaimsPaged(ctx, 2, 3, nil, nil) - require.NoError(t, err) - require.Equal(t, 4, count) // Total compacted count: 4 distinct global_index values - - // Should get 0 claims because blocks 20, 15, 10 are all older versions - require.Len(t, result, 0) - }) - - // Test with larger page size that captures everything - t.Run("Large page size - all newest claims", func(t *testing.T) { - result, count, err := p.GetClaimsPaged(ctx, 1, 100, nil, nil) - require.NoError(t, err) - require.Equal(t, 4, count) // Total compacted count: 4 distinct global_index values - - // Should get 4 compacted claims: 300, 100, 200, 400 - require.Len(t, result, 4) - - globalIndices := make(map[int64]bool) - for _, claim := range result { - globalIndices[claim.GlobalIndex.Int64()] = true - } - - require.True(t, globalIndices[100]) - require.True(t, globalIndices[200]) - require.True(t, globalIndices[300]) - require.True(t, globalIndices[400]) - }) - - // Test with network IDs filter - should only return claims from networks 1 and 3 - t.Run("Filter by network IDs", func(t *testing.T) { - networkIDs := []uint32{1, 3} // Only global_index 100 (network 1) and 200 (network 3) - result, count, err := p.GetClaimsPaged(ctx, 1, 100, networkIDs, nil) - require.NoError(t, err) - require.Equal(t, 2, count) // 2 distinct global_index values (100 and 200) after compaction - - // Should get 2 compacted claims: 100 and 200 - require.Len(t, result, 2) - - globalIndices := make(map[int64]bool) - for _, claim := range result { - globalIndices[claim.GlobalIndex.Int64()] = true - } - - require.True(t, globalIndices[100]) - require.True(t, globalIndices[200]) - require.False(t, globalIndices[300]) // Network 5 - excluded - require.False(t, globalIndices[400]) // Network 7 - excluded - }) - - // Test with network IDs and specific global index filter - t.Run("Filter by network IDs and global index", func(t *testing.T) { - networkIDs := []uint32{1, 3, 5} // Networks that include our target global_index - globalIndexFilter := big.NewInt(100) - result, count, err := p.GetClaimsPaged(ctx, 1, 100, networkIDs, globalIndexFilter) - require.NoError(t, err) - require.Equal(t, 1, count) // 1 compacted claim with global_index 100 - - // Should get 1 compacted claim: only global_index 100 (network 1 matches filter) - require.Len(t, result, 1) - require.Equal(t, big.NewInt(100), result[0].GlobalIndex) - require.Equal(t, uint32(1), result[0].OriginNetwork) - - // Verify compaction: oldest metadata with newest proofs - require.Equal(t, uint64(10), result[0].BlockNum) // Oldest claim's block - require.Equal(t, common.HexToHash("0xa1"), result[0].TxHash) // Oldest claim's tx - require.Equal(t, []byte("old"), result[0].Metadata) // Oldest claim's metadata - require.Equal(t, newProof, result[0].ProofLocalExitRoot) // Newest claim's proof - require.Equal(t, common.HexToHash("0xcccc"), result[0].MainnetExitRoot) // Newest claim's root - }) - - // Test with network IDs and global index that don't match - t.Run("Filter by network IDs and global index - no match", func(t *testing.T) { - networkIDs := []uint32{5, 7} // Networks 5 and 7 - globalIndexFilter := big.NewInt(100) // But global_index 100 is on network 1 - result, count, err := p.GetClaimsPaged(ctx, 1, 100, networkIDs, globalIndexFilter) - require.NoError(t, err) - require.Equal(t, 0, count) // No claims match both filters - - require.Len(t, result, 0) - }) - - // ========== Additional comprehensive test cases (mirroring TestGetClaims_Compact) ========== - - // Test Case 1: Don't compact if unset_claim exists for global_index - t.Run("Case 1: don't compact if unset_claim exists for global_index", func(t *testing.T) { - // Create a new database for this test - dbPath := filepath.Join(t.TempDir(), "case1.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Setup: Insert 3 claims with same global_index and 1 unset_claim - tx, err := testP.db.BeginTx(ctx, nil) - require.NoError(t, err) - - for i := uint64(1); i <= 3; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } - - testClaims := []*Claim{ - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(1), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("metadata1"), - IsMessage: false, - BlockTimestamp: 1000, - }, - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(1), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0xccc"), - DestinationAddress: common.HexToAddress("0xddd"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 4, - Metadata: []byte("metadata2"), - IsMessage: true, - BlockTimestamp: 2000, - }, - } - - for _, claim := range testClaims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } - - // Insert unset_claim for global_index 1 - unsetClaim := &UnsetClaim{ - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(1), - } - require.NoError(t, meddler.Insert(tx, "unset_claim", unsetClaim)) - require.NoError(t, tx.Commit()) - - // Query: Should return all claims uncompacted because unset_claim exists - result, count, err := testP.GetClaimsPaged(ctx, 1, 10, nil, nil) - require.NoError(t, err) - require.Equal(t, 2, count) - require.Len(t, result, 2) - require.Equal(t, result[0], testClaims[1]) // they are returned in DESC order - require.Equal(t, result[1], testClaims[0]) - }) - - // Test Case 2: Compact if no unset_claim exists - t.Run("Case 2: compact if no unset_claim exists", func(t *testing.T) { - // Create a new database for this test - dbPath := filepath.Join(t.TempDir(), "case2.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Setup: Insert 3 claims with same global_index, NO unset_claim - tx, err := testP.db.BeginTx(ctx, nil) - require.NoError(t, err) - - for i := uint64(1); i <= 3; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } - - testClaims := []*Claim{ - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("original_metadata"), - IsMessage: false, - BlockTimestamp: 1000, - }, - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 99, - OriginAddress: common.HexToAddress("0xfff"), - DestinationAddress: common.HexToAddress("0xeee"), - Amount: big.NewInt(999), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 88, - Metadata: []byte("middle_metadata"), - IsMessage: true, - BlockTimestamp: 2000, - }, - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0x999"), - DestinationAddress: common.HexToAddress("0x888"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("newest_metadata"), - IsMessage: true, - BlockTimestamp: 3000, - }, - } + require.Len(t, mappings, 1) + require.Equal(t, 1, count) + }) +} - for _, claim := range testClaims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } - require.NoError(t, tx.Commit()) +func TestProcessor_DatabaseConnectionErrors(t *testing.T) { + t.Parallel() - // Query: Should return 1 compacted claim (oldest metadata + newest proofs) - result, count, err := testP.GetClaimsPaged(ctx, 1, 10, nil, nil) - require.NoError(t, err) - require.Equal(t, 1, count) // 1 compacted claim (3 raw claims compacted to 1) - require.Len(t, result, 1) + t.Run("GetTotalNumberOfRecords with invalid table name", func(t *testing.T) { + t.Parallel() + p := createTestProcessor(t, "DatabaseConnectionErrors") - // Verify compaction: oldest claim's metadata with newest claim's proofs - require.Equal(t, big.NewInt(100), result[0].GlobalIndex) - require.Equal(t, uint64(1), result[0].BlockNum) // Oldest claim's block - require.Equal(t, common.HexToHash("0x111"), result[0].TxHash) // Oldest claim's tx - require.Equal(t, []byte("original_metadata"), result[0].Metadata) // Oldest claim's metadata - require.Equal(t, types.Proof{common.HexToHash("0x3a")}, result[0].ProofLocalExitRoot) // Newest claim's proof - require.Equal(t, common.HexToHash("0x3c"), result[0].MainnetExitRoot) // Newest claim's root + // Test with invalid table name + _, err := p.GetTotalNumberOfRecords(context.Background(), "invalid_table_name", "") + require.Error(t, err) + require.Contains(t, err.Error(), "no such table") }) - // Test Case 3: Don't return if newest is not on the page - // Original test intent: Verify that when the newest claim is not on the requested page, - // we return 0 results (even though older claims for that global_index might be on the page) - // Note: With compacted count, we need multiple global_indexes to create valid pagination - t.Run("Case 3: don't return if newest is not on the page", func(t *testing.T) { - // Create a new database for this test - dbPath := filepath.Join(t.TempDir(), "case3.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Setup: Insert claims with two global_indexes to create valid pagination - // global_index 100: blocks 1 (oldest), 2, 3 (newest) - newest on page 1 - // global_index 200: blocks 4 (oldest), 5 (newest) - newest on page 2 - tx, err := testP.db.BeginTx(ctx, nil) - require.NoError(t, err) - - for i := uint64(1); i <= 5; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } + t.Run("fetchTokenMappings with database errors", func(t *testing.T) { + t.Parallel() + p := createTestProcessor(t, "DatabaseConnectionErrors2") - testClaims := []*Claim{ - // Global index 100 - oldest (block 1) - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("original_metadata"), - IsMessage: false, - BlockTimestamp: 1000, - }, - // Global index 100 - middle (block 2) - { - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x222"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 99, - OriginAddress: common.HexToAddress("0xfff"), - DestinationAddress: common.HexToAddress("0xeee"), - Amount: big.NewInt(999), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 88, - Metadata: []byte("middle_metadata"), - IsMessage: true, - BlockTimestamp: 2000, - }, - // Global index 100 - newest (block 3) - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0x999"), - DestinationAddress: common.HexToAddress("0x888"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("newest_metadata"), - IsMessage: true, - BlockTimestamp: 3000, - }, - // Global index 200 - oldest (block 4) - { - BlockNum: 4, - BlockPos: 0, - TxHash: common.HexToHash("0x444"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 2, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x4a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x4b")}, - MainnetExitRoot: common.HexToHash("0x4c"), - RollupExitRoot: common.HexToHash("0x4d"), - GlobalExitRoot: common.HexToHash("0x4e"), - DestinationNetwork: 3, - Metadata: []byte("index200_old"), - IsMessage: false, - BlockTimestamp: 4000, - }, - // Global index 200 - newest (block 5) - { - BlockNum: 5, - BlockPos: 0, - TxHash: common.HexToHash("0x555"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 2, - OriginAddress: common.HexToAddress("0xccc"), - DestinationAddress: common.HexToAddress("0xddd"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x5a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x5b")}, - MainnetExitRoot: common.HexToHash("0x5c"), - RollupExitRoot: common.HexToHash("0x5d"), - GlobalExitRoot: common.HexToHash("0x5e"), - DestinationNetwork: 3, - Metadata: []byte("index200_new"), - IsMessage: false, - BlockTimestamp: 5000, + testBlock := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{TokenMapping: createTestTokenMapping(1, 0)}, }, } + require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - for _, claim := range testClaims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } - require.NoError(t, tx.Commit()) - - // Query page 1 (size 1): Contains block 5 (newest for global_index 200) - // global_index 200's newest (block 5) is on page 1 → should return compacted claim - // global_index 100's newest (block 3) is NOT on page 1 → should NOT return - result, count, err := testP.GetClaimsPaged(ctx, 1, 1, nil, nil) - require.NoError(t, err) - require.Equal(t, 2, count) // 2 distinct global_index values (100 and 200) - - // Should return 1 claim: global_index 200 (its newest is on page 1) - require.Len(t, result, 1) - require.Equal(t, big.NewInt(200), result[0].GlobalIndex) - - // Query page 2 (size 1): Contains block 4 (oldest for global_index 200, not newest) - // This tests the original Case 3 concept: when the newest is NOT on the page, return 0 - // global_index 200's newest (block 5) is NOT on page 2 → should NOT return - // global_index 100's newest (block 3) is NOT on page 2 → should NOT return - result2, count2, err := testP.GetClaimsPaged(ctx, 2, 1, nil, nil) - require.NoError(t, err) - require.Equal(t, 2, count2) // Same total count - - // Should return 0 claims because neither newest is on page 2 - // This preserves the original test's essence: don't return if newest is not on page - require.Len(t, result2, 0) + // Now test with an offset that would cause a database error + p.db.Close() + _, err := p.fetchTokenMappings(context.Background(), 5, 0, "") + require.Error(t, err) }) +} - // Test Case 3 Exception: Return if unset_claim exists even when globally oldest is outside range - t.Run("Case 3 exception: return if unset_claim exists even when globally oldest is outside range", func(t *testing.T) { - // Create a new database for this test - dbPath := filepath.Join(t.TempDir(), "case3_exception.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Setup: Insert claims + unset_claim - tx, err := testP.db.BeginTx(ctx, nil) - require.NoError(t, err) +func TestProcessor_CalculateOffsetErrors(t *testing.T) { + t.Parallel() - for i := uint64(1); i <= 3; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } + t.Run("GetTokenMappings with invalid offset calculation", func(t *testing.T) { + t.Parallel() + p := createTestProcessor(t, "CalculateOffsetErrors") - testClaims := []*Claim{ - // Oldest claim (block 1) - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(1), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xaaa"), - DestinationAddress: common.HexToAddress("0xbbb"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("metadata1"), - IsMessage: false, - BlockTimestamp: 1000, - }, - // Newest claim (block 3) - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0x999"), - DestinationAddress: common.HexToAddress("0x888"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x3a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x3b")}, - MainnetExitRoot: common.HexToHash("0x3c"), - RollupExitRoot: common.HexToHash("0x3d"), - GlobalExitRoot: common.HexToHash("0x3e"), - DestinationNetwork: 66, - Metadata: []byte("newest_metadata"), - IsMessage: true, - BlockTimestamp: 3000, + testBlock := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{TokenMapping: createTestTokenMapping(1, 0)}, }, } + require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - for _, claim := range testClaims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } - - // Insert unset_claim for global_index 1 - unsetClaim := &UnsetClaim{ - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(1), - } - require.NoError(t, meddler.Insert(tx, "unset_claim", unsetClaim)) - require.NoError(t, tx.Commit()) - - // Query: Even though oldest is outside page, should return claim because unset_claim exists - result, count, err := testP.GetClaimsPaged(ctx, 1, 10, nil, nil) - require.NoError(t, err) - require.Equal(t, 2, count) - - // Should return both claims uncompacted - require.Len(t, result, 2) - require.Equal(t, result[0], testClaims[1]) // they are returned in DESC order - require.Equal(t, result[1], testClaims[0]) + // Test with page number that would result in offset >= total records + _, _, err := p.GetTokenMappings(context.Background(), 10, 5, "") // page 10 with only 1 record and page size 5 + require.Error(t, err) + require.Contains(t, err.Error(), "invalid page number") }) - // Test: Multiple global_indexes with different compaction rules - t.Run("Multiple global_indexes with different compaction rules", func(t *testing.T) { - // Create a new database for this test - dbPath := filepath.Join(t.TempDir(), "multiple_indexes.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - testP, err := newProcessor(dbPath, "bridge-syncer", logger, dbQueryTimeout, nil) - require.NoError(t, err) - - // Setup: Multiple global indexes with different scenarios - tx, err := testP.db.BeginTx(ctx, nil) - require.NoError(t, err) - - for i := uint64(1); i <= 3; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } + t.Run("GetBridgesPaged with invalid offset calculation", func(t *testing.T) { + t.Parallel() + p := createTestProcessor(t, "CalculateOffsetErrors2") - testClaims := []*Claim{ - // Global index 100 - oldest (block 1, pos 0) - { - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x111"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0xa1"), - DestinationAddress: common.HexToAddress("0xb1"), - Amount: big.NewInt(100), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x1a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x1b")}, - MainnetExitRoot: common.HexToHash("0x1c"), - RollupExitRoot: common.HexToHash("0x1d"), - GlobalExitRoot: common.HexToHash("0x1e"), - DestinationNetwork: 2, - Metadata: []byte("index1_old"), - IsMessage: false, - BlockTimestamp: 1000, - }, - // Global index 200 - oldest (block 1, pos 1) - { - BlockNum: 1, - BlockPos: 1, - TxHash: common.HexToHash("0x112"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0xa2"), - DestinationAddress: common.HexToAddress("0xb2"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2b")}, - MainnetExitRoot: common.HexToHash("0x2c"), - RollupExitRoot: common.HexToHash("0x2d"), - GlobalExitRoot: common.HexToHash("0x2e"), - DestinationNetwork: 4, - Metadata: []byte("index2_old"), - IsMessage: true, - BlockTimestamp: 1001, - }, - // Global index 200 - newest (block 3, pos 1) - { - BlockNum: 3, - BlockPos: 1, - TxHash: common.HexToHash("0x112"), - GlobalIndex: big.NewInt(200), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0xccc"), - DestinationAddress: common.HexToAddress("0xddd"), - Amount: big.NewInt(200), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x2ab")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x2bc")}, - MainnetExitRoot: common.HexToHash("0x2ce"), - RollupExitRoot: common.HexToHash("0x2df"), - GlobalExitRoot: common.HexToHash("0x2ee"), - DestinationNetwork: 88, - Metadata: []byte("block3pos1"), - IsMessage: true, - BlockTimestamp: 3001, - }, - // Global index 100 - newest (block 3, pos 0) - { - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x333"), - GlobalIndex: big.NewInt(100), - OriginNetwork: 77, - OriginAddress: common.HexToAddress("0xc2"), - DestinationAddress: common.HexToAddress("0xd2"), - Amount: big.NewInt(777), - ProofLocalExitRoot: types.Proof{common.HexToHash("0x4a")}, - ProofRollupExitRoot: types.Proof{common.HexToHash("0x4b")}, - MainnetExitRoot: common.HexToHash("0x4c"), - RollupExitRoot: common.HexToHash("0x4d"), - GlobalExitRoot: common.HexToHash("0x4e"), - DestinationNetwork: 66, - Metadata: []byte("index1_new"), - IsMessage: false, - BlockTimestamp: 2001, + testBlock := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{Bridge: createTestBridge(1, 0)}, }, } + require.NoError(t, p.ProcessBlock(context.Background(), testBlock)) - for _, claim := range testClaims { - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } + // Test with page number that would result in offset >= total records + _, _, err := p.GetBridgesPaged(context.Background(), 10, 5, nil, nil, "") // page 10 with only 1 record and page size 5 + require.Error(t, err) + require.Contains(t, err.Error(), "invalid page number") + }) +} - // Insert unset_claim for global_index 100 only - unsetClaim := &UnsetClaim{ - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(100), - } - require.NoError(t, meddler.Insert(tx, "unset_claim", unsetClaim)) - require.NoError(t, tx.Commit()) +// Helper functions to reduce test redundancy - // Query: Should return: - // - Global index 100: both claims uncompacted (because unset_claim exists) -> count as 2 - // - Global index 200: 1 compacted claim -> count as 1 - result, count, err := testP.GetClaimsPaged(ctx, 1, 10, nil, nil) - require.NoError(t, err) - require.Equal(t, 3, count) // 2 (unset_claim) + 1 (compacted) = 3 - require.Len(t, result, 3) // 2 for index 100 (uncompacted) + 1 for index 200 (compacted) +// createTestProcessor creates a new processor for testing +func createTestProcessor(t *testing.T, dbName string) *processor { + t.Helper() - // Count claims by global index - claimsByGlobalIndex := make(map[int64]int) - for _, claim := range result { - claimsByGlobalIndex[claim.GlobalIndex.Int64()]++ - } + path := path.Join(t.TempDir(), dbName+".db") + logger := log.WithFields("module", "bridge-syncer") + p, err := newTestProcessor(path, "bridge-syncer", logger, dbQueryTimeout) + require.NoError(t, err) + return p +} - require.Equal(t, 2, claimsByGlobalIndex[100]) // Uncompacted - require.Equal(t, 1, claimsByGlobalIndex[200]) // Compacted - }) +// createTestBridge creates a test Bridge event +func createTestBridge(blockNum uint64, blockPos int) *Bridge { + return &Bridge{ + BlockNum: blockNum, + BlockPos: uint64(blockPos), + BlockTimestamp: 1234567890, + TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), + FromAddress: func() *common.Address { + addr := common.HexToAddress("0x1234567890123456789012345678901234567890") + return &addr + }(), + LeafType: 1, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 1, + DestinationAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + Amount: big.NewInt(1000000000000000000), + Metadata: []byte{}, + DepositCount: 0, + } } -// TestClaimColumnsSQL_ReflectionCheck verifies that all meddler-tagged fields -// in the Claim struct are present in the claimColumnsSQL constant. -// This test uses reflection to ensure maintainability - if a new field is added -// to Claim with a meddler tag, this test will fail until claimColumnsSQL is updated. -func TestClaimColumnsSQL_ReflectionCheck(t *testing.T) { - t.Parallel() +// createTestTokenMapping creates a test TokenMapping event +func createTestTokenMapping(blockNum uint64, blockPos int) *TokenMapping { + return &TokenMapping{ + BlockNum: blockNum, + BlockPos: uint64(blockPos), + BlockTimestamp: 1234567890, + TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + WrappedTokenAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + Metadata: []byte{}, + IsNotMintable: false, + Type: 0, + } +} - claimType := reflect.TypeFor[Claim]() - // Collect meddler-tagged column names - var meddlerColumns []string - for i := 0; i < claimType.NumField(); i++ { - tag := claimType.Field(i).Tag.Get("meddler") - if tag == "" { - continue - } - name := strings.Split(tag, ",")[0] - if name != "" && name != "-" { - meddlerColumns = append(meddlerColumns, name) - } - } - require.NotEmpty(t, meddlerColumns, "Claim struct should have meddler-tagged fields") +func TestDatabaseQueryTimeout(t *testing.T) { + normalTimeout := 100 * time.Millisecond + shortTimeout := 1 * time.Nanosecond + + path := path.Join(t.TempDir(), "bridgeSyncerProcessorTimeout.db") + logger := log.WithFields("module", "bridge-syncer-timeout") - // Normalize whitespace and split columns - ws := regexp.MustCompile(`\s+`) - normalized := strings.TrimSpace(ws.ReplaceAllString(claimColumnsSQL, " ")) + // Create processor with normal timeout for setup + p, err := newTestProcessor(path, "bridge-syncer-timeout", logger, normalTimeout) + require.NoError(t, err) - var sqlColumns []string - for col := range strings.SplitSeq(normalized, ",") { - if col = strings.TrimSpace(col); col != "" { - sqlColumns = append(sqlColumns, col) - } + // Insert some test data to ensure the database is working + block := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x123"), + Events: []any{}, } - require.Equal(t, len(meddlerColumns), len(sqlColumns), - "SQL column count must match meddler-tagged field count") + ctx := context.Background() + err = p.ProcessBlock(ctx, block) + require.NoError(t, err) - // Turn SQL columns into a lookup set - sqlSet := make(map[string]struct{}, len(sqlColumns)) - for _, col := range sqlColumns { - sqlSet[col] = struct{}{} - } + // Create a new processor with short timeout for testing timeout behavior + pShortTimeout, err := newTestProcessor(path, "bridge-syncer-short-timeout", logger, shortTimeout) + require.NoError(t, err) + + // Test that operations timeout with short timeout + _, _, err = pShortTimeout.GetLastProcessedBlock(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "context deadline exceeded") + + _, err = pShortTimeout.GetBridges(ctx, 1, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "context deadline exceeded") - // Ensure every struct tag column exists in SQL - for _, col := range meddlerColumns { - _, ok := sqlSet[col] - require.True(t, ok, "Missing SQL column for meddler-tag '%s'", col) - } } + func TestProcessor_BackwardLET(t *testing.T) { buildBlocksWithSequentialBridges := func(blocksCount, bridgesPerBlock uint64, blockNumOffset uint64, depositCountOffset uint32) []sync.Block { @@ -5785,7 +2503,7 @@ func TestProcessor_BackwardLET(t *testing.T) { t.Run(c.name, func(t *testing.T) { dbPath := filepath.Join(t.TempDir(), "backward_let_cases.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout, nil) + p, err := newTestProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout) require.NoError(t, err) blocks := c.setupBlocks() @@ -5821,7 +2539,7 @@ func TestProcessor_BackwardLET(t *testing.T) { require.NoError(t, err) } - lastProcessedBlock, err := p.GetLastProcessedBlock(t.Context()) + lastProcessedBlock, _, err := p.GetLastProcessedBlock(t.Context()) require.NoError(t, err) expectedBridges := collectExpectedBridgesUpTo(t, blocks, c.skipBlocks, c.targetDepositCount) @@ -5832,113 +2550,6 @@ func TestProcessor_BackwardLET(t *testing.T) { } } -func TestGetBoundaryBlock(t *testing.T) { - insertBlockQuery := `INSERT INTO block (num, hash) VALUES ($1, $2) ON CONFLICT (num) DO UPDATE SET hash = $2` - - cases := []struct { - name string - claims []*Claim - claimType ClaimType - expectedBlock uint64 - expectedErr error - }{ - { - name: "no claims, not found error", - expectedErr: db.ErrNotFound, - }, - { - name: "detailed claim event exists, return its block", - claims: []*Claim{ - { - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(100), - Type: DetailedClaimEvent, - }, - { - BlockNum: 6, - BlockPos: 1, - GlobalIndex: big.NewInt(101), - Type: DetailedClaimEvent, - }, - }, - claimType: DetailedClaimEvent, - expectedBlock: 6, - }, - { - name: "mixed claim types exist, return detailed claim event block", - claims: []*Claim{ - { - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(100), - Type: ClaimEvent, - }, - { - BlockNum: 100, - BlockPos: 1, - GlobalIndex: big.NewInt(101), - Type: DetailedClaimEvent, - }, - { - BlockNum: 101, - BlockPos: 1, - GlobalIndex: big.NewInt(102), - Type: DetailedClaimEvent, - }, - }, - claimType: DetailedClaimEvent, - expectedBlock: 101, - }, - { - name: "no corresponding claim types exist", - claims: []*Claim{ - { - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(100), - Type: ClaimEvent, - }, - { - BlockNum: 100, - BlockPos: 1, - GlobalIndex: big.NewInt(101), - Type: ClaimEvent, - }, - }, - claimType: DetailedClaimEvent, - expectedErr: db.ErrNotFound, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "get_boundary_block.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout, nil) - require.NoError(t, err) - - // Insert claims if any - if len(tc.claims) > 0 { - tx, err := p.db.BeginTx(t.Context(), nil) - require.NoError(t, err) - for _, claim := range tc.claims { - _, err = tx.Exec(insertBlockQuery, claim.BlockNum, common.HexToHash("0x0")) - require.NoError(t, err) - require.NoError(t, meddler.Insert(tx, "claim", claim)) - } - require.NoError(t, tx.Commit()) - } - - blockNum, err := p.GetBoundaryBlockForClaimType(t.Context(), tc.claimType) - if tc.expectedErr != nil { - require.ErrorIs(t, err, tc.expectedErr) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedBlock, blockNum) - } - }) - } -} func TestHandleForwardLETEvent(t *testing.T) { t.Run("successfully process single leaf with no archived bridge", func(t *testing.T) { @@ -6578,7 +3189,7 @@ func TestHandleForwardLETEvent(t *testing.T) { tempDBPath := filepath.Join(t.TempDir(), "temp_genesis.db") err = migrations.RunMigrations(tempDBPath) require.NoError(t, err) - tempP, err := newProcessor(tempDBPath, "test-genesis", log.WithFields("module", "test-genesis"), dbQueryTimeout) + tempP, err := newTestProcessor(tempDBPath, "test-genesis", log.WithFields("module", "test-genesis"), dbQueryTimeout) require.NoError(t, err) tempTx, err := db.NewTx(t.Context(), tempP.db) require.NoError(t, err) @@ -6621,7 +3232,7 @@ func setupProcessorWithTransaction(t *testing.T) (*processor, dbtypes.Txer) { require.NoError(t, err) logger := log.WithFields("module", "test") - p, err := newProcessor(dbPath, "test", logger, dbQueryTimeout, nil) + p, err := newTestProcessor(dbPath, "test", logger, dbQueryTimeout) require.NoError(t, err) p.initialLER = bridgesynctypes.EmptyLER @@ -6652,7 +3263,7 @@ func calculateExpectedRootAfterForwardLET(t *testing.T, initialDepositCount uint require.NoError(t, err) logger := log.WithFields("module", "test-calc") - tempP, err := newProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout, nil) + tempP, err := newTestProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout) require.NoError(t, err) tempTx, err := db.NewTx(t.Context(), tempP.db) @@ -6744,73 +3355,6 @@ func encodeLeafDataArrayForTest(t *testing.T, leaves []LeafData) []byte { return encodedBytes } -func TestProcessor_GetClaimsByGER(t *testing.T) { - t.Helper() - - ctx := context.Background() - p := createTestProcessor(t, "test_get_claims_by_ger") - - gerHash := common.HexToHash("0xaabbccdd") - otherGER := common.HexToHash("0x11223344") - - // Insert a block and two claims: one DetailedClaimEvent with gerHash, one ClaimEvent with gerHash, - // and one DetailedClaimEvent with a different GER. - tx, err := p.db.BeginTx(ctx, nil) - require.NoError(t, err) - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(1)) - require.NoError(t, err) - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(2)) - require.NoError(t, err) - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(3)) - require.NoError(t, err) - - detailedClaim := &Claim{ - BlockNum: 1, - BlockPos: 0, - GlobalIndex: big.NewInt(100), - GlobalExitRoot: gerHash, - Type: DetailedClaimEvent, - Amount: big.NewInt(0), - } - require.NoError(t, meddler.Insert(tx, "claim", detailedClaim)) - - // A ClaimEvent with the same GER — should NOT be returned - claimEventSameGER := &Claim{ - BlockNum: 2, - BlockPos: 0, - GlobalIndex: big.NewInt(200), - GlobalExitRoot: gerHash, - Type: ClaimEvent, - Amount: big.NewInt(0), - } - require.NoError(t, meddler.Insert(tx, "claim", claimEventSameGER)) - - // A DetailedClaimEvent with a different GER — should NOT be returned - detailedOtherGER := &Claim{ - BlockNum: 3, - BlockPos: 0, - GlobalIndex: big.NewInt(300), - GlobalExitRoot: otherGER, - Type: DetailedClaimEvent, - Amount: big.NewInt(0), - } - require.NoError(t, meddler.Insert(tx, "claim", detailedOtherGER)) - require.NoError(t, tx.Commit()) - - t.Run("returns only DetailedClaimEvent with matching GER", func(t *testing.T) { - claims, err := p.GetClaimsByGER(ctx, gerHash) - require.NoError(t, err) - require.Len(t, claims, 1) - require.Equal(t, int64(100), claims[0].GlobalIndex.Int64()) - require.Equal(t, DetailedClaimEvent, claims[0].Type) - }) - - t.Run("returns nil for unknown GER", func(t *testing.T) { - claims, err := p.GetClaimsByGER(ctx, common.HexToHash("0xdeadbeef")) - require.NoError(t, err) - require.Empty(t, claims) - }) -} func TestProcessor_GetBridgeByDepositCount(t *testing.T) { t.Helper() diff --git a/claimsync/downloader.go b/claimsync/downloader.go index 8ffd5696c..2f20d2508 100644 --- a/claimsync/downloader.go +++ b/claimsync/downloader.go @@ -65,15 +65,28 @@ type ClaimQuerier interface { GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) } +// BridgeDeployment represents the type of bridge contract deployment (sovereign vs non-sovereign). +type BridgeDeployment byte + +const ( + Unknown BridgeDeployment = iota + NonSovereignChain + SovereignChain +) + +type bridgeDeployment struct { + kind BridgeDeployment + agglayerBridge *agglayerbridge.Agglayerbridge + agglayerBridgeL2 *agglayerbridgel2.Agglayerbridgel2 +} + // buildAppender creates the LogAppenderMap for claim events from the bridge contract. func buildAppender( ctx context.Context, ethClient aggkittypes.EthClienter, querier ClaimQuerier, bridgeAddr common.Address, - agglayerBridgeContract *agglayerbridge.Agglayerbridge, - agglayerBridgeL2Contract *agglayerbridgel2.Agglayerbridgel2, - isSovereign bool, + deployment *bridgeDeployment, log aggkitcommon.Logger, ) (sync.LogAppenderMap, error) { legacyBridge, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridgeAddr, ethClient) @@ -85,37 +98,62 @@ func buildAppender( appender := make(sync.LogAppenderMap) appender[claimEventSignaturePreEtrog] = buildClaimEventHandlerPreEtrog(legacyBridge, ethClient, bridgeAddr, syncFullClaims, log) - appender[claimEventSignature] = buildClaimEventHandler(ctx, agglayerBridgeContract, ethClient, querier, bridgeAddr, syncFullClaims, log) + appender[claimEventSignature] = buildClaimEventHandler(ctx, deployment.agglayerBridge, ethClient, querier, bridgeAddr, syncFullClaims, log) - if isSovereign { - appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(agglayerBridgeL2Contract) - appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(agglayerBridgeL2Contract) - appender[setClaimEventSignature] = buildSetClaimEventHandler(agglayerBridgeL2Contract) + if deployment.kind == SovereignChain { + appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(deployment.agglayerBridgeL2) + appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(deployment.agglayerBridgeL2) + appender[setClaimEventSignature] = buildSetClaimEventHandler(deployment.agglayerBridgeL2) } return appender, nil } -// detectSovereignChain returns true if bridgeAddr is a sovereign chain bridge (AgglayerBridgeL2). -// It also returns the AgglayerBridgeL2 binding regardless (always created). -func detectSovereignChain( +// resolveBridgeDeployment resolves which bridge contract flavor is deployed: +// AgglayerBridge => NonSovereign bridge +// AgglayerBridgeL2 => Sovereign bridge +func resolveBridgeDeployment( ctx context.Context, bridgeAddr common.Address, backend bind.ContractBackend, -) (bool, *agglayerbridgel2.Agglayerbridgel2, error) { - contract, err := agglayerbridgel2.NewAgglayerbridgel2(bridgeAddr, backend) +) (*bridgeDeployment, error) { + agglayerBridge, err := agglayerbridge.NewAgglayerbridge(bridgeAddr, backend) if err != nil { - return false, nil, fmt.Errorf("claimsync: failed to create AgglayerBridgeL2 binding: %w", err) + return nil, fmt.Errorf("claimsync: failed to create AgglayerBridge binding (%s): %w", bridgeAddr, err) + } + + agglayerBridgeL2, err := agglayerbridgel2.NewAgglayerbridgel2(bridgeAddr, backend) + if err != nil { + return nil, fmt.Errorf("claimsync: failed to create AgglayerBridgeL2 binding (%s): %w", bridgeAddr, err) } callOpts := &bind.CallOpts{Pending: false, Context: ctx} - if _, err := contract.BridgeManager(callOpts); err == nil { - return true, contract, nil + + // 1. Try calling bridgeManager function — only exists on AgglayerBridgeL2 + if _, err := agglayerBridgeL2.BridgeManager(callOpts); err == nil { + return &bridgeDeployment{ + kind: SovereignChain, + agglayerBridge: agglayerBridge, + agglayerBridgeL2: agglayerBridgeL2, + }, nil + } else if !strings.Contains(err.Error(), gethvm.ErrExecutionReverted.Error()) { + return nil, fmt.Errorf("claimsync: unexpected error querying AgglayerBridgeL2.BridgeManager (%s): %w", + bridgeAddr.Hex(), err) + } + + // 2. If that failed, try lastUpdatedDepositCount function — exists on base AgglayerBridge + if _, err := agglayerBridge.LastUpdatedDepositCount(callOpts); err == nil { + return &bridgeDeployment{ + kind: NonSovereignChain, + agglayerBridge: agglayerBridge, + agglayerBridgeL2: agglayerBridgeL2, + }, nil } else if !strings.Contains(err.Error(), gethvm.ErrExecutionReverted.Error()) { - return false, nil, fmt.Errorf("claimsync: unexpected error querying AgglayerBridgeL2.BridgeManager: %w", err) + return nil, fmt.Errorf("claimsync: unexpected error querying AgglayerBridge.lastUpdatedDepositCount (%s): %w", + bridgeAddr.Hex(), err) } - return false, contract, nil + return nil, fmt.Errorf("claimsync: unable to determine bridge contract type at address %s", bridgeAddr) } // buildClaimEventHandler creates a handler for the ClaimEvent log. diff --git a/claimsync/embedded.go b/claimsync/embedded.go index 4c5417436..46777f130 100644 --- a/claimsync/embedded.go +++ b/claimsync/embedded.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" claimsyncStorage "github.com/agglayer/aggkit/claimsync/storage" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" @@ -90,23 +89,17 @@ func NewEmbedded( logger aggkitcommon.Logger, ) (*EmbeddedClaimSync, error) { proc := newEmbeddedProcessor(logger, storage) - agglayerBridgeContract, err := agglayerbridge.NewAgglayerbridge(bridgeAddr, ethClient) - if err != nil { - return nil, fmt.Errorf("claimsync embedded: failed to create AgglayerBridge binding: %w", err) - } - - isSovereign, agglayerBridgeL2Contract, err := detectSovereignChain(ctx, bridgeAddr, ethClient) + deployment, err := resolveBridgeDeployment(ctx, bridgeAddr, ethClient) if err != nil { return nil, fmt.Errorf("claimsync embedded: failed to detect chain type: %w", err) } - appender, err := buildAppender(ctx, ethClient, storage, bridgeAddr, - agglayerBridgeContract, agglayerBridgeL2Contract, isSovereign, logger) + appender, err := buildAppender(ctx, ethClient, storage, bridgeAddr, deployment, logger) if err != nil { return nil, fmt.Errorf("claimsync embedded: failed to build appender: %w", err) } - logger.Infof("claimsync embedded created: bridgeAddr=%s sovereign=%t", bridgeAddr.String(), isSovereign) + logger.Infof("claimsync embedded created: bridgeAddr=%s sovereign=%t", bridgeAddr.String(), deployment.kind == SovereignChain) return &EmbeddedClaimSync{ Processor: proc, diff --git a/claimsync/processor.go b/claimsync/processor.go index 8d611b3d9..fcce9d171 100644 --- a/claimsync/processor.go +++ b/claimsync/processor.go @@ -64,6 +64,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } shouldRollback = false + p.log.Debugf("claimSyncer: successfully processed block %d with %d events", block.Num, len(block.Events)) return nil } diff --git a/config/config.go b/config/config.go index 40267afc0..cda05755e 100644 --- a/config/config.go +++ b/config/config.go @@ -14,6 +14,7 @@ import ( "github.com/agglayer/aggkit/aggsender/prover" validator "github.com/agglayer/aggkit/aggsender/validator" "github.com/agglayer/aggkit/bridgesync" + "github.com/agglayer/aggkit/claimsync" "github.com/agglayer/aggkit/common" ethermanconfig "github.com/agglayer/aggkit/etherman/config" "github.com/agglayer/aggkit/l1infotreesync" @@ -277,9 +278,13 @@ type Config struct { // BridgeL1Sync is the configuration for the synchronizer of the bridge of the L1 BridgeL1Sync bridgesync.Config + // ClaimL1Sync is the configuration for the synchronizer of the claims of the L1 + ClaimL1Sync claimsync.ConfigStandalone // BridgeL2Sync is the configuration for the synchronizer of the bridge of the L2 BridgeL2Sync bridgesync.Config + // ClaimL2Sync is the configuration for the synchronizer of the claims of the L2 + ClaimL2Sync claimsync.ConfigStandalone // L2GERSync is the config for the synchronizer in charge of syncing the GER injected on L2. // Needed for the bridge service (RPC) diff --git a/config/default.go b/config/default.go index 4a67d93e8..5b116352d 100644 --- a/config/default.go +++ b/config/default.go @@ -165,6 +165,21 @@ DBQueryTimeout = "{{defaultDBQueryTimeout}}" SyncFromInBridges = "auto" EmbeddedClaimSync = "auto" +[ClaimL1Sync] +DBPath = "{{PathRWData}}/claiml1sync.sqlite" +DBQueryTimeout = "{{BridgeL1Sync.DBQueryTimeout}}" + +BlockFinality = "{{BridgeL1Sync.BlockFinality}}" +InitialBlockNum = {{BridgeL1Sync.InitialBlockNum}} +AutoStart = "auto" + +BridgeAddr = "{{BridgeL1Sync.BridgeAddr}}" +SyncBlockChunkSize = {{BridgeL1Sync.SyncBlockChunkSize}} +RetryAfterErrorPeriod = "{{BridgeL1Sync.RetryAfterErrorPeriod}}" +MaxRetryAttemptsAfterError = {{BridgeL1Sync.MaxRetryAttemptsAfterError}} +WaitForNewBlocksPeriod = "{{BridgeL1Sync.WaitForNewBlocksPeriod}}" +RequireStorageContentCompatibility = {{BridgeL1Sync.RequireStorageContentCompatibility}} + [BridgeL2Sync] DBPath = "{{PathRWData}}/bridgel2sync.sqlite" BlockFinality = "LatestBlock" @@ -179,6 +194,21 @@ DBQueryTimeout = "{{defaultDBQueryTimeout}}" SyncFromInBridges = "auto" EmbeddedClaimSync = "auto" +[ClaimL2Sync] +DBPath = "{{PathRWData}}/claiml2sync.sqlite" +DBQueryTimeout = "{{BridgeL2Sync.DBQueryTimeout}}" + +BlockFinality = "{{BridgeL2Sync.BlockFinality}}" +InitialBlockNum = {{BridgeL2Sync.InitialBlockNum}} +AutoStart = "auto" + +BridgeAddr = "{{BridgeL2Sync.BridgeAddr}}" +SyncBlockChunkSize = {{BridgeL2Sync.SyncBlockChunkSize}} +RetryAfterErrorPeriod = "{{BridgeL2Sync.RetryAfterErrorPeriod}}" +MaxRetryAttemptsAfterError = {{BridgeL2Sync.MaxRetryAttemptsAfterError}} +WaitForNewBlocksPeriod = "{{BridgeL2Sync.WaitForNewBlocksPeriod}}" +RequireStorageContentCompatibility = {{BridgeL2Sync.RequireStorageContentCompatibility}} + [L2GERSync] DBPath = "{{PathRWData}}/l2gersync.sqlite" BlockFinality = "LatestBlock" diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index e7a23e9c9..5ec2caafc 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -39,7 +39,7 @@ var ( ) type DriverInterface interface { - Sync(ctx context.Context) + Sync(ctx context.Context, firstBlockNumber *uint64) GetCompletionPercentage() *float64 } @@ -51,6 +51,7 @@ type L1InfoTreeSync struct { processor *processor driver DriverInterface downloader DownloaderInterface + cfg Config } type RuntimeData = mdrsync.RuntimeData @@ -141,6 +142,7 @@ func NewMultidownloadBased( processor: processor, driver: driver, downloader: downloader, + cfg: cfg, }, nil } @@ -157,27 +159,6 @@ func NewLegacy( return nil, err } - // TODO: get the initialBlock from L1 to simplify config - lastProcessedBlock, _, err := processor.GetLastProcessedBlock(ctx) - if err != nil { - return nil, err - } - - parentBlockNumber := cfg.InitialBlock - 1 - if cfg.InitialBlock > 0 && lastProcessedBlock < parentBlockNumber { - block, err := l1Client.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(parentBlockNumber)) - if err != nil { - return nil, fmt.Errorf("failed to get initial block %d: %w", parentBlockNumber, err) - } - - err = processor.ProcessBlock(ctx, sync.Block{ - Num: parentBlockNumber, - Hash: block.Hash, - }) - if err != nil { - return nil, err - } - } rh := &sync.RetryHandler{ RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod.Duration, MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, @@ -237,6 +218,7 @@ func NewLegacy( processor: processor, driver: driver, downloader: downloader, + cfg: cfg, }, nil } @@ -263,7 +245,7 @@ func (a *L1InfoTreeSync) GetRPCServices() []jRPC.Service { // Start starts the synchronization process func (s *L1InfoTreeSync) Start(ctx context.Context) { s.processor.log.Info("starting l1infotreesync") - s.driver.Sync(ctx) + s.driver.Sync(ctx, &s.cfg.InitialBlock) } // GetRollupExitTreeMerkleProof creates a merkle proof for the rollup exit tree diff --git a/l2gersync/l2_ger_syncer.go b/l2gersync/l2_ger_syncer.go index cc0a9651a..7bdbc75bb 100644 --- a/l2gersync/l2_ger_syncer.go +++ b/l2gersync/l2_ger_syncer.go @@ -41,6 +41,7 @@ type L1InfoTreeQuerier interface { type L2GERSync struct { driver *sync.EVMDriver processor *processor + cfg Config } // New initializes and returns a new instance of L2GERSync @@ -109,6 +110,7 @@ func New( return &L2GERSync{ driver: driver, processor: processor, + cfg: cfg, }, nil } @@ -144,7 +146,7 @@ func resolveSyncMode(ctx context.Context, address common.Address, backend bind.C // Start initiates the synchronization process. func (s *L2GERSync) Start(ctx context.Context) { s.processor.log.Info("starting l2gersync") - s.driver.Sync(ctx) + s.driver.Sync(ctx, &s.cfg.InitialBlockNum) } // GetFirstGERAfterL1InfoTreeIndex returns the first GER after a specified L1 info tree index diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index b20b2d286..c4bf43a55 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -52,7 +52,16 @@ func NewEVMDriver( } } -func (d *EVMDriver) Sync(ctx context.Context) { +func (d *EVMDriver) Sync(ctx context.Context, firstBlockNumber *uint64) { + // firstBlockNumber is unused and not support in the current implementation + // it just check that is equal to syncerConfig.InitialBlockNum + if firstBlockNumber == nil { + d.logger.Fatalf("multidownloader doesnt support firstBlockNumber==nil") + } + if *firstBlockNumber != d.syncerConfig.FromBlock { + d.logger.Fatalf("multidownloader doesnt support firstBlockNumber different than FromBlock, got %d, expected %d", + *firstBlockNumber, d.syncerConfig.FromBlock) + } attempts := 0 for { if ctx.Err() != nil { diff --git a/scripts/aggsender_request_last_cert.sh b/scripts/aggsender_request_last_cert.sh new file mode 100755 index 000000000..121cbc4fd --- /dev/null +++ b/scripts/aggsender_request_last_cert.sh @@ -0,0 +1,4 @@ +#!/bin/bash +AGGKIT_URL=${AGGKIT_URL:-http://localhost:5576/} + +curl -X POST $AGGKIT_URL -H "Content-Type: application/json" -d '{"method":"aggsender_getCertificateHeaderPerHeight", "params":[], "id":1}' | jq . diff --git a/scripts/aggsender_request_status.sh b/scripts/aggsender_request_status.sh new file mode 100755 index 000000000..461e187dd --- /dev/null +++ b/scripts/aggsender_request_status.sh @@ -0,0 +1,4 @@ +#!/bin/bash +AGGKIT_URL=${AGGKIT_URL:-http://localhost:5576/} + +curl -X POST $AGGKIT_URL -H "Content-Type: application/json" -d '{"method":"aggsender_status", "params":[], "id":1}' | jq . diff --git a/scripts/l1claimsync_set_starting_block.sh b/scripts/l1claimsync_set_starting_block.sh new file mode 100755 index 000000000..e63cf067d --- /dev/null +++ b/scripts/l1claimsync_set_starting_block.sh @@ -0,0 +1,7 @@ +#!/bin/bash +AGGKIT_URL=${AGGKIT_URL:-http://localhost:5576/} + +START_BLOCK=${1:-1} + +echo "Requesting L1 Start block $START_BLOCK..." >&2 +curl -X POST $AGGKIT_URL -H "Content-Type: application/json" -d '{"method":"l1claimsync_setNextRequiredBlock", "params":['${START_BLOCK}'], "id":1}' 2>/dev/null | jq . diff --git a/scripts/l2claimsync_get_claims.sh b/scripts/l2claimsync_get_claims.sh new file mode 100755 index 000000000..66a744ffa --- /dev/null +++ b/scripts/l2claimsync_get_claims.sh @@ -0,0 +1,7 @@ +#!/bin/bash +AGGKIT_URL=${AGGKIT_URL:-http://localhost:5576/} + +START_BLOCK=${1:-0} +END_BLOCK=${2:-99999999} +echo "Requesting claims from block ${START_BLOCK} to ${END_BLOCK}..." >&2 +curl -X POST $AGGKIT_URL -H "Content-Type: application/json" -d '{"method":"l2claimsync_getClaims", "params":['${START_BLOCK},${END_BLOCK}'], "id":1}' 2>/dev/null | jq . diff --git a/scripts/request_aggsender_status.sh b/scripts/request_aggsender_status.sh deleted file mode 100644 index c56227f93..000000000 --- a/scripts/request_aggsender_status.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -curl -X POST http://localhost:33032/ -H "Con -application/json" -d '{"method":"aggsender_status", "params":[], "id":1}' | jq . diff --git a/test/e2e/envs/op-pp/config_local/README.md b/test/e2e/envs/op-pp/config_local/README.md index 4f3e41469..7a9cdf9fc 100644 --- a/test/e2e/envs/op-pp/config_local/README.md +++ b/test/e2e/envs/op-pp/config_local/README.md @@ -2,6 +2,7 @@ ## aggkit-parallel.toml This configuration differents ports to be able to run at the same time as docker `aggkit-001` + To launch using vscode add next configuration to `.vscode/launch.json`: ``` { From 289b379b22f0d3c6d5b0737b1dba8b29b963e48c Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 17 Mar 2026 14:45:45 +0100 Subject: [PATCH 05/28] fix: Unittest and linter --- aggsender/aggsender.go | 7 +- aggsender/aggsender_test.go | 15 +- aggsender/aggsender_validator.go | 5 +- .../imported_bridge_exit_converter_test.go | 21 +- .../builder_flow_aggchain_prover_test.go | 57 +-- aggsender/flows/builder_flow_factory.go | 3 +- aggsender/flows/builder_flow_factory_test.go | 1 + aggsender/flows/builder_flow_pp_test.go | 71 ++-- aggsender/flows/flow_base.go | 4 +- aggsender/flows/flow_base_test.go | 139 +++---- .../flows/max_l2blocknumber_limiter_test.go | 5 +- .../mocks/mock_aggsender_builder_flow.go | 226 ++++++----- aggsender/mocks/mock_aggsender_flow_baser.go | 110 ++--- .../mock_optimistic_signature_calculator.go | 18 +- aggsender/optimistic/optimistic_sign_test.go | 4 +- ...ulate_hash_commit_imported_bridges_test.go | 6 +- aggsender/prover/proof_generation_tool.go | 2 +- .../prover/proof_generation_tool_test.go | 35 +- aggsender/query/aggchain_proof_query_test.go | 25 +- aggsender/query/bridge_query.go | 11 +- aggsender/query/bridge_query_test.go | 49 +-- aggsender/query/certificate_query_test.go | 102 +++-- aggsender/trigger/trigger_by_bridge_test.go | 2 +- .../types/certificate_build_params_test.go | 34 +- aggsender/types/interfaces.go | 4 +- bridgeservice/bridge.go | 383 ++++++++++++++++++ bridgeservice/bridge_interfaces.go | 11 + bridgeservice/bridge_test.go | 101 ++--- bridgeservice/mocks/mock_bridger.go | 310 +------------- bridgeservice/mocks/mock_claimer.go | 267 ++++++++++++ bridgesync/bridgesync_test.go | 12 - bridgesync/claim.go | 51 --- bridgesync/downloader_test.go | 7 +- bridgesync/e2e_test.go | 3 - bridgesync/processor.go | 89 +--- bridgesync/processor_test.go | 52 +-- claimsync/claimcalldata_test.go | 2 +- claimsync/claimsync.go | 33 +- claimsync/claimsync_rpc.go | 3 +- claimsync/claimsync_test.go | 10 +- claimsync/downloader.go | 51 ++- claimsync/embedded.go | 15 +- claimsync/processor.go | 10 +- claimsync/storage/storage.go | 43 +- claimsync/storage/storage_paged.go | 1 + claimsync/types/claim_reader.go | 2 +- claimsync/types/claim_storager.go | 2 +- claimsync/types/claim_syncer.go | 2 +- claimsync/types/mocks/mock_claims_reader.go | 29 +- cmd/run.go | 22 +- config/types/true_false_auto.go | 30 +- l1infotreesync/mock_driver_interface.go | 17 +- l2gersync/processor_test.go | 2 +- multidownloader/e2e_test.go | 3 +- multidownloader/evm_multidownloader_test.go | 3 +- multidownloader/sync/evmdriver.go | 1 + sync/evmdriver_test.go | 16 +- tools/remove_ger/diagnosis.go | 17 +- 58 files changed, 1450 insertions(+), 1106 deletions(-) create mode 100644 bridgeservice/mocks/mock_claimer.go delete mode 100644 bridgesync/claim.go diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 0c55e0331..76e4f216e 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -397,6 +397,10 @@ func (a *AggSender) sendCertificates(ctx context.Context, returnAfterNIterations } func (a *AggSender) setClaimSyncerNextRequiredBlock(ctx context.Context) { + if a.l2ClaimSyncer == nil { + a.log.Debugf("l2 claim syncer is nil, skipping setClaimSyncerNextRequiredBlock") + return + } for { select { case <-ctx.Done(): @@ -410,9 +414,6 @@ func (a *AggSender) setClaimSyncerNextRequiredBlock(ctx context.Context) { continue } a.log.Infof("Setting starting Claim L2 Syncer block to %d", nextBlock) - if a.l2ClaimSyncer == nil { - a.log.Fatalf("l2 claim syncer is nil, so we are not going to set the next required block for claim syncer") - } if err := a.l2ClaimSyncer.SetNextRequiredBlock(ctx, nextBlock); err != nil { a.log.Errorf("error setting next required block for claim syncer: %v", err) time.Sleep(a.cfg.DelayBetweenRetries.Duration) diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index 4a884c0c1..63432a270 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -25,6 +25,7 @@ import ( aggsendertypes "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgetypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config/types" mocksdb "github.com/agglayer/aggkit/db/compatibility/mocks" @@ -95,7 +96,7 @@ func TestAggSenderStart(t *testing.T) { sendTrigger.EXPECT().Status().Return("test status").Once() sendTrigger.EXPECT().OnIdle().Maybe() bridgeL2SyncerMock.EXPECT().OriginNetwork().Return(uint32(2)) - bridgeL2SyncerMock.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), nil) + bridgeL2SyncerMock.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), true, nil) aggLayerMock.EXPECT().GetLatestPendingCertificateHeader(mock.Anything, mock.Anything).Return(nil, nil).Twice() aggLayerMock.EXPECT().GetLatestSettledCertificateHeader(mock.Anything, mock.Anything).Return(nil, nil).Twice() rollupQuerierMock.EXPECT().GetRollupChainID().Return(uint64(1234), nil) @@ -263,7 +264,7 @@ func TestSendCertificate_NoClaims(t *testing.T) { Status: agglayertypes.Settled, }, nil).Once() mockStorage.EXPECT().SaveLastSentCertificate(mock.Anything, mock.Anything).Return(nil).Once() - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(50), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(50), true, nil) mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(mock.Anything, uint64(11), uint64(50)).Return([]bridgesync.Bridge{ { BlockNum: 30, @@ -277,8 +278,8 @@ func TestSendCertificate_NoClaims(t *testing.T) { Metadata: []byte("metadata"), DepositCount: 1, }, - }, []bridgesync.Claim{}, nil).Once() - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(mock.Anything, uint64(11), uint64(50)).Return([]bridgetypes.Unclaim{}, nil).Once() + }, []claimsynctypes.Claim{}, nil).Once() + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(mock.Anything, uint64(11), uint64(50)).Return([]claimsynctypes.Unclaim{}, nil).Once() mockL1Querier.EXPECT().GetTargetL1InfoRoot(ctx).Return(&treetypes.Root{}, nil, nil).Once() mockL2BridgeQuerier.EXPECT().GetExitRootByIndex(mock.Anything, uint32(1)).Return(common.Hash{}, nil).Once() mockL2BridgeQuerier.EXPECT().OriginNetwork().Return(uint32(1)).Once() @@ -810,14 +811,14 @@ func NewBridgesData(t *testing.T, num int, blockNum []uint64) []bridgesync.Bridg return res } -func NewClaimData(t *testing.T, num int, blockNum []uint64) []bridgesync.Claim { +func NewClaimData(t *testing.T, num int, blockNum []uint64) []claimsynctypes.Claim { t.Helper() if num == 0 { num = len(blockNum) } - res := make([]bridgesync.Claim, 0) + res := make([]claimsynctypes.Claim, 0) for i := 0; i < num; i++ { - res = append(res, bridgesync.Claim{ + res = append(res, claimsynctypes.Claim{ BlockNum: blockNum[i%len(blockNum)], BlockPos: 0, }) diff --git a/aggsender/aggsender_validator.go b/aggsender/aggsender_validator.go index c432ae241..8203fb3db 100644 --- a/aggsender/aggsender_validator.go +++ b/aggsender/aggsender_validator.go @@ -4,9 +4,6 @@ import ( "context" "errors" - signertypes "github.com/agglayer/go_signer/signer/types" - ethcommon "github.com/ethereum/go-ethereum/common" - "github.com/agglayer/aggkit/agglayer" "github.com/agglayer/aggkit/aggsender/metrics" "github.com/agglayer/aggkit/aggsender/types" @@ -14,6 +11,8 @@ import ( v1 "github.com/agglayer/aggkit/aggsender/validator/proto/v1" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/grpc" + signertypes "github.com/agglayer/go_signer/signer/types" + ethcommon "github.com/ethereum/go-ethereum/common" ) var ( diff --git a/aggsender/converters/imported_bridge_exit_converter_test.go b/aggsender/converters/imported_bridge_exit_converter_test.go index 6ab9acc4b..2a3f83110 100644 --- a/aggsender/converters/imported_bridge_exit_converter_test.go +++ b/aggsender/converters/imported_bridge_exit_converter_test.go @@ -10,6 +10,7 @@ import ( "github.com/agglayer/aggkit/aggsender/mocks" "github.com/agglayer/aggkit/bridgesync" bridgetypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" treetypes "github.com/agglayer/aggkit/tree/types" "github.com/ethereum/go-ethereum/common" @@ -23,13 +24,13 @@ func TestConvertClaimToImportedBridgeExit(t *testing.T) { tests := []struct { name string - claim bridgesync.Claim + claim claimsynctypes.Claim expectedError bool expectedExit *agglayertypes.ImportedBridgeExit }{ { name: "Asset claim", - claim: bridgesync.Claim{ + claim: claimsynctypes.Claim{ IsMessage: false, OriginNetwork: 1, OriginAddress: common.HexToAddress("0x123"), @@ -61,7 +62,7 @@ func TestConvertClaimToImportedBridgeExit(t *testing.T) { }, { name: "Message claim", - claim: bridgesync.Claim{ + claim: claimsynctypes.Claim{ IsMessage: true, OriginNetwork: 1, OriginAddress: common.HexToAddress("0x123"), @@ -118,14 +119,14 @@ func TestGetImportedBridgeExits(t *testing.T) { tests := []struct { name string - claims []bridgesync.Claim + claims []claimsynctypes.Claim mockFn func(*mocks.L1InfoTreeDataQuerier) expectedError bool expectedExits []*agglayertypes.ImportedBridgeExit }{ { name: "Single claim", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { IsMessage: false, OriginNetwork: 1, @@ -199,7 +200,7 @@ func TestGetImportedBridgeExits(t *testing.T) { }, { name: "Multiple claims", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { IsMessage: false, OriginNetwork: 1, @@ -326,13 +327,13 @@ func TestGetImportedBridgeExits(t *testing.T) { }, { name: "No claims", - claims: []bridgesync.Claim{}, + claims: []claimsynctypes.Claim{}, expectedError: false, expectedExits: []*agglayertypes.ImportedBridgeExit{}, }, { name: "error getting proof for GER", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { IsMessage: false, OriginNetwork: 11, @@ -385,7 +386,7 @@ func TestGetImportedBridgeExits(t *testing.T) { func TestConvertToImportedBridgeExitsWithoutClaimData_NoClaims(t *testing.T) { t.Parallel() - exits, err := ConvertToImportedBridgeExitsWithoutClaimData([]bridgesync.Claim{}) + exits, err := ConvertToImportedBridgeExitsWithoutClaimData([]claimsynctypes.Claim{}) require.NoError(t, err) require.Equal(t, []*agglayertypes.ImportedBridgeExit{}, exits) } @@ -393,7 +394,7 @@ func TestConvertToImportedBridgeExitsWithoutClaimData_NoClaims(t *testing.T) { func TestConvertToImportedBridgeExitsWithoutClaimData_MultipleClaims(t *testing.T) { t.Parallel() - claims := []bridgesync.Claim{ + claims := []claimsynctypes.Claim{ { IsMessage: false, OriginNetwork: 1, diff --git a/aggsender/flows/builder_flow_aggchain_prover_test.go b/aggsender/flows/builder_flow_aggchain_prover_test.go index 1b1ad235e..c74f8d77e 100644 --- a/aggsender/flows/builder_flow_aggchain_prover_test.go +++ b/aggsender/flows/builder_flow_aggchain_prover_test.go @@ -16,6 +16,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/log" @@ -77,27 +78,27 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { LastProvenBlock: 1, EndBlock: 10, }, nil).Once() - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{{}}, []bridgesync.Claim{ + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{{}}, []claimsynctypes.Claim{ { GlobalIndex: big.NewInt(1), GlobalExitRoot: ger, MainnetExitRoot: mer, RollupExitRoot: rer, }}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]bridgesynctypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) }, expectedParams: &types.CertificateBuildParams{ FromBlock: 1, ToBlock: 10, RetryCount: 1, Bridges: []bridgesync.Bridge{{}}, - Claims: []bridgesync.Claim{{ + Claims: []claimsynctypes.Claim{{ GlobalIndex: big.NewInt(1), RollupExitRoot: common.HexToHash("0x1"), MainnetExitRoot: common.HexToHash("0x2"), GlobalExitRoot: l1infotreesync.CalculateGER(common.HexToHash("0x2"), common.HexToHash("0x1")), }}, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x1"), AggchainProof: &types.AggchainProof{ SP1StarkProof: &types.SP1StarkProof{Proof: []byte("some-proof")}, @@ -134,14 +135,14 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { FinalizedL1InfoTreeRoot: &finalizedL1Root, L1InfoTreeLeafCount: 11, }, nil, nil).Once() - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{{}}, []bridgesync.Claim{ + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{{}}, []claimsynctypes.Claim{ { GlobalIndex: big.NewInt(1), GlobalExitRoot: ger, MainnetExitRoot: mer, RollupExitRoot: rer, }}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]bridgesynctypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockAggchainProofQuerier.EXPECT().GenerateAggchainProof(context.Background(), uint64(0), uint64(10), mock.Anything). Return(&types.AggchainProof{ SP1StarkProof: &types.SP1StarkProof{Proof: []byte("some-proof")}, @@ -165,13 +166,13 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { }, Bridges: []bridgesync.Bridge{{}}, L1InfoTreeLeafCount: 11, - Claims: []bridgesync.Claim{{ + Claims: []claimsynctypes.Claim{{ GlobalIndex: big.NewInt(1), RollupExitRoot: common.HexToHash("0x1"), MainnetExitRoot: common.HexToHash("0x2"), GlobalExitRoot: l1infotreesync.CalculateGER(common.HexToHash("0x2"), common.HexToHash("0x1")), }}, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, L1InfoTreeRootFromWhichToProve: finalizedL1Root, AggchainProof: &types.AggchainProof{ SP1StarkProof: &types.SP1StarkProof{Proof: []byte("some-proof")}, @@ -193,15 +194,15 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { mockStorage.EXPECT().GetLastSentCertificateHeader().Return(nil, nil).Once() mockL1InfoDataQuery.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: finalizedL1Root, BlockNum: 10}, nil, nil) - mockL2BridgeQuerier.On("GetLastProcessedBlock", ctx).Return(uint64(10), nil) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{{}}, []bridgesync.Claim{ + mockL2BridgeQuerier.On("GetLastProcessedBlock", ctx).Return(uint64(10), true, nil) + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{{}}, []claimsynctypes.Claim{ { GlobalIndex: big.NewInt(1), GlobalExitRoot: ger, MainnetExitRoot: mer, RollupExitRoot: rer, }}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]bridgesynctypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoDataQuery.EXPECT().IsGERFinalized(ger, uint32(1)).Return(true, nil).Once() mockAggchainProofQuerier.EXPECT().GenerateAggchainProof(context.Background(), uint64(0), uint64(10), mock.Anything). Return(nil, errors.New("some error")) @@ -216,11 +217,11 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { mockL1InfoDataQuery *mocks.L1InfoTreeDataQuerier) { mockStorage.EXPECT().GetLastSentCertificateHeaderWithProofIfInError(ctx).Return(nil, nil, nil).Once() mockStorage.EXPECT().GetLastSentCertificateHeader().Return(nil, nil).Once() - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockL1InfoDataQuery.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: finalizedL1Root, BlockNum: 10}, nil, nil) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{}, []bridgesync.Claim{}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]bridgesynctypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(1), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) wrappedErr := fmt.Errorf("wrapped error: %w", query.ErrNoProofBuiltYet) mockAggchainProofQuerier.EXPECT().GenerateAggchainProof(context.Background(), uint64(0), uint64(10), mock.Anything). Return(nil, wrappedErr) @@ -241,14 +242,14 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil).Once() mockL1InfoDataQuery.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: finalizedL1Root, BlockNum: 10, Index: 10}, nil, nil) - mockL2BridgeQuerier.On("GetLastProcessedBlock", ctx).Return(uint64(10), nil) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{{}}, []bridgesync.Claim{{ + mockL2BridgeQuerier.On("GetLastProcessedBlock", ctx).Return(uint64(10), true, nil) + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{{}}, []claimsynctypes.Claim{{ GlobalIndex: big.NewInt(1), GlobalExitRoot: ger, MainnetExitRoot: mer, RollupExitRoot: rer, }}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgesynctypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoDataQuery.EXPECT().IsGERFinalized(ger, uint32(11)).Return(true, nil).Once() mockAggchainProofQuerier.EXPECT().GenerateAggchainProof(context.Background(), uint64(5), uint64(10), mock.Anything). Return(&types.AggchainProof{ @@ -266,13 +267,13 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { }, Bridges: []bridgesync.Bridge{{}}, L1InfoTreeLeafCount: 11, - Claims: []bridgesync.Claim{{ + Claims: []claimsynctypes.Claim{{ GlobalIndex: big.NewInt(1), RollupExitRoot: common.HexToHash("0x1"), MainnetExitRoot: common.HexToHash("0x2"), GlobalExitRoot: l1infotreesync.CalculateGER(common.HexToHash("0x2"), common.HexToHash("0x1")), }}, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, L1InfoTreeRootFromWhichToProve: finalizedL1Root, AggchainProof: &types.AggchainProof{ SP1StarkProof: &types.SP1StarkProof{Proof: []byte("some-proof")}, @@ -296,14 +297,14 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil).Once() mockL1InfoDataQuery.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: finalizedL1Root, BlockNum: 10, Index: 10}, nil, nil) - mockL2BridgeQuerier.On("GetLastProcessedBlock", ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.On("GetLastProcessedBlock", ctx).Return(uint64(10), true, nil) mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return( []bridgesync.Bridge{{BlockNum: 6}, {BlockNum: 10}}, - []bridgesync.Claim{ + []claimsynctypes.Claim{ {BlockNum: 8, GlobalIndex: big.NewInt(1), GlobalExitRoot: ger, MainnetExitRoot: mer, RollupExitRoot: rer}, {BlockNum: 9, GlobalIndex: big.NewInt(2), GlobalExitRoot: ger, MainnetExitRoot: mer, RollupExitRoot: rer}}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgesynctypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoDataQuery.EXPECT().IsGERFinalized(ger, uint32(11)).Return(true, nil).Once() mockAggchainProofQuerier.EXPECT().GenerateAggchainProof(context.Background(), uint64(5), uint64(10), mock.Anything). Return(&types.AggchainProof{ @@ -321,14 +322,14 @@ func Test_AggchainProverFlow_GetCertificateBuildParams(t *testing.T) { ToBlock: 5, }, Bridges: []bridgesync.Bridge{{BlockNum: 6}}, - Claims: []bridgesync.Claim{{ + Claims: []claimsynctypes.Claim{{ BlockNum: 8, GlobalIndex: big.NewInt(1), RollupExitRoot: common.HexToHash("0x1"), MainnetExitRoot: common.HexToHash("0x2"), GlobalExitRoot: l1infotreesync.CalculateGER(common.HexToHash("0x2"), common.HexToHash("0x1")), }}, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, L1InfoTreeRootFromWhichToProve: finalizedL1Root, AggchainProof: &types.AggchainProof{ SP1StarkProof: &types.SP1StarkProof{Proof: []byte("some-proof")}, @@ -523,7 +524,7 @@ func Test_AggchainProverFlow_BuildCertificate(t *testing.T) { FromBlock: 1, ToBlock: 10, Bridges: []bridgesync.Bridge{{}}, - Claims: []bridgesync.Claim{}, + Claims: []claimsynctypes.Claim{}, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x1"), }, expectedError: "error getting exit root by index", @@ -537,7 +538,7 @@ func Test_AggchainProverFlow_BuildCertificate(t *testing.T) { FromBlock: 1, ToBlock: 10, Bridges: []bridgesync.Bridge{}, - Claims: []bridgesync.Claim{}, + Claims: []claimsynctypes.Claim{}, CreatedAt: uint32(createdAt.Unix()), L1InfoTreeRootFromWhichToProve: common.HexToHash("0x1"), CertificateType: types.CertificateTypeFEP, @@ -837,7 +838,7 @@ func Test_AggchainProverFlow_GenerateBuildParams(t *testing.T) { ToBlock: 10, RetryCount: 0, Bridges: []bridgesync.Bridge{{}}, - Claims: []bridgesync.Claim{}, + Claims: []claimsynctypes.Claim{}, CreatedAt: timeNowUTCForTest(), CertificateType: types.CertificateTypeFEP, } @@ -851,7 +852,7 @@ func Test_AggchainProverFlow_GenerateBuildParams(t *testing.T) { ToBlock: 10, RetryCount: 0, Bridges: []bridgesync.Bridge{{}}, - Claims: []bridgesync.Claim{}, + Claims: []claimsynctypes.Claim{}, CreatedAt: timeNowUTCForTest(), CertificateType: types.CertificateTypeFEP, }, diff --git a/aggsender/flows/builder_flow_factory.go b/aggsender/flows/builder_flow_factory.go index 823a61398..047f71466 100644 --- a/aggsender/flows/builder_flow_factory.go +++ b/aggsender/flows/builder_flow_factory.go @@ -203,7 +203,8 @@ func CreateCommonFlowComponents( return nil, fmt.Errorf("failed to create bridge L2 sovereign reader: %w", err) } - l2BridgeQuerier := query.NewBridgeDataQuerier(logger, l2Syncer, l2ClaimSyncer, delayBetweenRetries, agglayerBridgeL2Reader) + l2BridgeQuerier := query.NewBridgeDataQuerier( + logger, l2Syncer, l2ClaimSyncer, delayBetweenRetries, agglayerBridgeL2Reader) l1InfoTreeQuerier, err := query.NewL1InfoTreeDataQuerier(l1Client, globalExitRootL1Addr, l1InfoTreeSyncer, blockFinalityForL1InfoTree) if err != nil { diff --git a/aggsender/flows/builder_flow_factory_test.go b/aggsender/flows/builder_flow_factory_test.go index f9019d49f..240c9bf08 100644 --- a/aggsender/flows/builder_flow_factory_test.go +++ b/aggsender/flows/builder_flow_factory_test.go @@ -195,6 +195,7 @@ func TestNewFlow(t *testing.T) { mockL2Client, mockL1InfoTreeSyncer, mockL2BridgeSyncer, + nil, // l2ClaimSyncer mockRollupDataQuerier, mockCommitteeQuerier, nil, // certQuerier diff --git a/aggsender/flows/builder_flow_pp_test.go b/aggsender/flows/builder_flow_pp_test.go index 3c83da5a1..47cb938ed 100644 --- a/aggsender/flows/builder_flow_pp_test.go +++ b/aggsender/flows/builder_flow_pp_test.go @@ -12,6 +12,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgetypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/log" treetypes "github.com/agglayer/aggkit/tree/types" @@ -33,7 +34,7 @@ func TestBuildCertificate(t *testing.T) { tests := []struct { name string bridges []bridgesync.Bridge - claims []bridgesync.Claim + claims []claimsynctypes.Claim lastSentCertificate types.CertificateHeader fromBlock uint64 toBlock uint64 @@ -55,7 +56,7 @@ func TestBuildCertificate(t *testing.T) { DepositCount: 1, }, }, - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { IsMessage: false, OriginNetwork: 1, @@ -157,7 +158,7 @@ func TestBuildCertificate(t *testing.T) { { name: "No bridges or claims", bridges: []bridgesync.Bridge{}, - claims: []bridgesync.Claim{}, + claims: []claimsynctypes.Claim{}, lastSentCertificate: types.CertificateHeader{ NewLocalExitRoot: common.HexToHash("0x123"), Height: 1, @@ -179,7 +180,7 @@ func TestBuildCertificate(t *testing.T) { DepositCount: 1, }, }, - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { IsMessage: false, OriginNetwork: 1, @@ -279,7 +280,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockL2BridgeQuerier *mocks.BridgeQuerier, mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), errors.New("some error")) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, errors.New("some error")) }, expectedError: "error getting last processed block from l2: some error", }, @@ -297,7 +298,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockFn: func(mockStorage *mocks.AggSenderStorage, mockL2BridgeQuerier *mocks.BridgeQuerier, mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 10}, nil) }, expectedParams: nil, @@ -309,7 +310,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 10}, nil, nil) - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return(nil, nil, errors.New("some error")) }, @@ -322,10 +323,10 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 10}, nil, nil) - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []bridgesync.Claim{}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgetypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) }, expectedParams: nil, }, @@ -337,10 +338,10 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 10}, nil, nil) - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []bridgesync.Claim{{GlobalExitRoot: common.HexToHash("0x1")}}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgetypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{{GlobalExitRoot: common.HexToHash("0x1")}}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoTreeQuerier.EXPECT().IsGERFinalized(common.HexToHash("0x1"), uint32(1)).Return(true, nil).Once() }, expectedParams: nil, @@ -353,10 +354,10 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 10}, nil, nil) - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []bridgesync.Claim{{GlobalExitRoot: common.HexToHash("0x1"), BlockNum: 10}}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgetypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{{GlobalExitRoot: common.HexToHash("0x1"), BlockNum: 10}}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoTreeQuerier.EXPECT().IsGERFinalized(common.HexToHash("0x1"), uint32(1)).Return(false, errors.New("some error")).Once() }, expectedParams: nil, @@ -368,7 +369,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockFn: func(mockStorage *mocks.AggSenderStorage, mockL2BridgeQuerier *mocks.BridgeQuerier, mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) rer1 := common.HexToHash("0x1") mer1 := common.HexToHash("0x2") @@ -376,7 +377,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { rer2 := common.HexToHash("0x3") mer2 := common.HexToHash("0x4") ger2 := l1infotreesync.CalculateGER(mer2, rer2) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []bridgesync.Claim{ + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{ { BlockNum: 9, GlobalExitRoot: ger1, @@ -389,7 +390,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { RollupExitRoot: rer2, MainnetExitRoot: mer2, }}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgetypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(ctx).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 1}, nil, nil) mockL1InfoTreeQuerier.EXPECT().IsGERFinalized(ger1, uint32(1)).Return(true, nil).Once() @@ -404,7 +405,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { CertificateType: types.CertificateTypePP, LastSentCertificate: &types.CertificateHeader{ToBlock: 5}, Bridges: []bridgesync.Bridge{}, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 9, RollupExitRoot: common.HexToHash("0x1"), @@ -412,7 +413,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { GlobalExitRoot: l1infotreesync.CalculateGER(common.HexToHash("0x2"), common.HexToHash("0x1")), }, }, - Unclaims: []bridgetypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, CreatedAt: timeNowUTCForTest(), L1InfoTreeRootFromWhichToProve: common.HexToHash("0x123"), }, @@ -423,19 +424,19 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockFn: func(mockStorage *mocks.AggSenderStorage, mockL2BridgeQuerier *mocks.BridgeQuerier, mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) rer := common.HexToHash("0x1") mer := common.HexToHash("0x2") ger := l1infotreesync.CalculateGER(mer, rer) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []bridgesync.Claim{ + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{ { BlockNum: 1, GlobalExitRoot: ger, RollupExitRoot: rer, MainnetExitRoot: mer, }}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgetypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(ctx).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 1}, nil, nil) mockL1InfoTreeQuerier.EXPECT().IsGERFinalized(ger, uint32(1)).Return(true, nil).Once() @@ -448,14 +449,14 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { CertificateType: types.CertificateTypePP, LastSentCertificate: &types.CertificateHeader{ToBlock: 5}, Bridges: []bridgesync.Bridge{}, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 1, RollupExitRoot: common.HexToHash("0x1"), MainnetExitRoot: common.HexToHash("0x2"), GlobalExitRoot: l1infotreesync.CalculateGER(common.HexToHash("0x2"), common.HexToHash("0x1")), }}, - Unclaims: []bridgetypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, CreatedAt: timeNowUTCForTest(), L1InfoTreeRootFromWhichToProve: common.HexToHash("0x123"), }, @@ -467,11 +468,11 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(mock.Anything).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 10}, nil, nil) - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return( - []bridgesync.Bridge{{}}, []bridgesync.Claim{{GlobalExitRoot: common.HexToHash("0x1")}}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgetypes.Unclaim{}, nil) + []bridgesync.Bridge{{}}, []claimsynctypes.Claim{{GlobalExitRoot: common.HexToHash("0x1")}}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoTreeQuerier.EXPECT().IsGERFinalized(common.HexToHash("0x1"), uint32(1)).Return(true, nil).Once() }, expectedError: "GER mismatch", @@ -481,7 +482,7 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { mockFn: func(mockStorage *mocks.AggSenderStorage, mockL2BridgeQuerier *mocks.BridgeQuerier, mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) mockL1InfoTreeQuerier.On("GetTargetL1InfoRoot", ctx).Return(nil, nil, errors.New("some error")) }, @@ -495,15 +496,15 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { rer := common.HexToHash("0x1") mer := common.HexToHash("0x2") ger := l1infotreesync.CalculateGER(mer, rer) - mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil) + mockL2BridgeQuerier.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil) mockStorage.EXPECT().GetLastSentCertificateHeader().Return(&types.CertificateHeader{ToBlock: 5}, nil) - mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{{}}, []bridgesync.Claim{ + mockL2BridgeQuerier.EXPECT().GetBridgesAndClaims(ctx, uint64(6), uint64(10)).Return([]bridgesync.Bridge{{}}, []claimsynctypes.Claim{ { GlobalExitRoot: ger, RollupExitRoot: rer, MainnetExitRoot: mer, }}, nil) - mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]bridgetypes.Unclaim{}, nil) + mockL2BridgeQuerier.EXPECT().GetUnsetClaimsForBlockRange(ctx, uint64(6), uint64(10)).Return([]claimsynctypes.Unclaim{}, nil) mockL1InfoTreeQuerier.EXPECT().GetTargetL1InfoRoot(ctx).Return( &treetypes.Root{Hash: common.HexToHash("0x123"), BlockNum: 10}, nil, nil) mockL1InfoTreeQuerier.EXPECT().IsGERFinalized(ger, uint32(1)).Return(true, nil).Once() @@ -516,13 +517,13 @@ func Test_PPFlow_GetCertificateBuildParams(t *testing.T) { CertificateType: types.CertificateTypePP, LastSentCertificate: &types.CertificateHeader{ToBlock: 5}, Bridges: []bridgesync.Bridge{{}}, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { RollupExitRoot: common.HexToHash("0x1"), MainnetExitRoot: common.HexToHash("0x2"), GlobalExitRoot: l1infotreesync.CalculateGER(common.HexToHash("0x2"), common.HexToHash("0x1")), }}, - Unclaims: []bridgetypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, CreatedAt: timeNowUTCForTest(), L1InfoTreeRootFromWhichToProve: common.HexToHash("0x123"), }, diff --git a/aggsender/flows/flow_base.go b/aggsender/flows/flow_base.go index 2fb47052c..478f8de76 100644 --- a/aggsender/flows/flow_base.go +++ b/aggsender/flows/flow_base.go @@ -375,7 +375,9 @@ func (f *baseFlow) getNewLocalExitRoot( } // ConvertClaimToImportedBridgeExit converts a claim to an ImportedBridgeExit object -func (f *baseFlow) ConvertClaimToImportedBridgeExit(claim claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error) { +func (f *baseFlow) ConvertClaimToImportedBridgeExit( + claim claimsynctypes.Claim, +) (*agglayertypes.ImportedBridgeExit, error) { return converters.ConvertToImportedBridgeExitWithoutClaimData(claim) } diff --git a/aggsender/flows/flow_base_test.go b/aggsender/flows/flow_base_test.go index c98d7b905..7f0ce5d7e 100644 --- a/aggsender/flows/flow_base_test.go +++ b/aggsender/flows/flow_base_test.go @@ -12,6 +12,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/l1infotreesync" @@ -56,8 +57,8 @@ func Test_baseFlow_limitCertSize(t *testing.T) { FromBlock: 1, ToBlock: 9, Bridges: []bridgesync.Bridge{{BlockNum: 9}}, - Claims: []bridgesync.Claim{}, - Unclaims: []bridgesynctypes.Unclaim{}, + Claims: []claimsynctypes.Claim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, }, { @@ -72,8 +73,8 @@ func Test_baseFlow_limitCertSize(t *testing.T) { FromBlock: 1, ToBlock: 9, Bridges: []bridgesync.Bridge{}, - Claims: []bridgesync.Claim{}, - Unclaims: []bridgesynctypes.Unclaim{}, + Claims: []claimsynctypes.Claim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, }, { @@ -111,13 +112,13 @@ func Test_baseFlow_limitCertSize(t *testing.T) { FromBlock: 1, ToBlock: 10, Bridges: []bridgesync.Bridge{{}, {}}, - Claims: []bridgesync.Claim{{}, {}}, + Claims: []claimsynctypes.Claim{{}, {}}, }, expectedCert: &types.CertificateBuildParams{ FromBlock: 1, ToBlock: 10, Bridges: []bridgesync.Bridge{{}, {}}, - Claims: []bridgesync.Claim{{}, {}}, + Claims: []claimsynctypes.Claim{{}, {}}, }, }, } @@ -518,7 +519,7 @@ func Test_baseFlow_VerifyBuildParams(t *testing.T) { buildParams: &types.CertificateBuildParams{ FromBlock: 1, ToBlock: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ {GlobalExitRoot: common.HexToHash("0x123"), MainnetExitRoot: common.HexToHash("0x456"), RollupExitRoot: common.HexToHash("0x789")}, }, }, @@ -611,7 +612,7 @@ func Test_baseFlow_VerifyBlockRangeGaps(t *testing.T) { // gap is [16,16] mockL2BridgeQuerier.EXPECT(). GetBridgesAndClaims(ctx, uint64(16), uint64(16)). - Return([]bridgesync.Bridge{}, []bridgesync.Claim{}, nil) + Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{}, nil) }, }, { @@ -628,7 +629,7 @@ func Test_baseFlow_VerifyBlockRangeGaps(t *testing.T) { mockFn: func(mockL2BridgeQuerier *mocks.BridgeQuerier) { mockL2BridgeQuerier.EXPECT(). GetBridgesAndClaims(ctx, uint64(16), uint64(16)). - Return([]bridgesync.Bridge{{}}, []bridgesync.Claim{}, nil) + Return([]bridgesync.Bridge{{}}, []claimsynctypes.Claim{}, nil) }, expectedError: "there are new bridges or claims in the gap", }, @@ -646,7 +647,7 @@ func Test_baseFlow_VerifyBlockRangeGaps(t *testing.T) { mockFn: func(mockL2BridgeQuerier *mocks.BridgeQuerier) { mockL2BridgeQuerier.EXPECT(). GetBridgesAndClaims(ctx, uint64(16), uint64(16)). - Return([]bridgesync.Bridge{}, []bridgesync.Claim{{}}, nil) + Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{{}}, nil) }, expectedError: "there are new bridges or claims in the gap", }, @@ -665,7 +666,7 @@ func Test_baseFlow_VerifyBlockRangeGaps(t *testing.T) { mockFn: func(mockL2BridgeQuerier *mocks.BridgeQuerier) { mockL2BridgeQuerier.EXPECT(). GetBridgesAndClaims(ctx, uint64(16), uint64(16)). - Return([]bridgesync.Bridge{}, []bridgesync.Claim{}, nil) + Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{}, nil) }, expectedError: "block gap detected", }, @@ -702,7 +703,7 @@ func Test_baseFlow_VerifyBlockRangeGaps(t *testing.T) { // lastSettledToBlock = 4, so gap is [5,6] mockL2BridgeQuerier.EXPECT(). GetBridgesAndClaims(ctx, uint64(5), uint64(6)). - Return([]bridgesync.Bridge{}, []bridgesync.Claim{}, nil) + Return([]bridgesync.Bridge{}, []claimsynctypes.Claim{}, nil) }, }, { @@ -764,8 +765,8 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { tests := []struct { name string - claims []bridgesync.Claim - unclaims []bridgesynctypes.Unclaim + claims []claimsynctypes.Claim + unclaims []claimsynctypes.Unclaim fullClaimsNeeded bool mockFn func(*mocks.L1InfoTreeDataQuerier) expectedCount int @@ -773,15 +774,15 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { }{ { name: "no claims, no unclaims", - claims: []bridgesync.Claim{}, - unclaims: []bridgesynctypes.Unclaim{}, + claims: []claimsynctypes.Claim{}, + unclaims: []claimsynctypes.Unclaim{}, fullClaimsNeeded: false, expectedCount: 0, expectedError: "", }, { name: "claims without unclaims - FullClaimsNeeded false", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { BlockNum: 1, BlockPos: 0, @@ -799,14 +800,14 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { Amount: big.NewInt(2000), }, }, - unclaims: []bridgesynctypes.Unclaim{}, + unclaims: []claimsynctypes.Unclaim{}, fullClaimsNeeded: false, expectedCount: 2, expectedError: "", }, { name: "claims without unclaims - FullClaimsNeeded true", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { BlockNum: 1, BlockPos: 0, @@ -821,7 +822,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { ProofRollupExitRoot: mockProof, }, }, - unclaims: []bridgesynctypes.Unclaim{}, + unclaims: []claimsynctypes.Unclaim{}, fullClaimsNeeded: true, mockFn: func(mockL1InfoTreeQuery *mocks.L1InfoTreeDataQuerier) { mockL1InfoTreeQuery.EXPECT().GetProofForGER(ctx, common.HexToHash("0xger1"), rootFromWhichToProve). @@ -838,7 +839,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { }, { name: "claims with unclaims canceling some claims", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { BlockNum: 1, BlockPos: 0, @@ -861,7 +862,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { Amount: big.NewInt(3000), }, }, - unclaims: []bridgesynctypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ {GlobalIndex: big.NewInt(100), BlockNumber: 10, LogIndex: 0}, }, fullClaimsNeeded: false, @@ -870,7 +871,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { }, { name: "claims with unclaims canceling all claims of same GlobalIndex", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { BlockNum: 1, BlockPos: 0, @@ -893,7 +894,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { Amount: big.NewInt(3000), }, }, - unclaims: []bridgesynctypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ {GlobalIndex: big.NewInt(100), BlockNumber: 10, LogIndex: 0}, {GlobalIndex: big.NewInt(100), BlockNumber: 11, LogIndex: 1}, }, @@ -903,7 +904,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { }, { name: "more unclaims than claims for a GlobalIndex", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { BlockNum: 1, BlockPos: 0, @@ -912,7 +913,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { Amount: big.NewInt(1000), }, }, - unclaims: []bridgesynctypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ {GlobalIndex: big.NewInt(100), BlockNumber: 10, LogIndex: 0}, {GlobalIndex: big.NewInt(100), BlockNumber: 11, LogIndex: 1}, {GlobalIndex: big.NewInt(100), BlockNumber: 12, LogIndex: 2}, @@ -923,7 +924,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { }, { name: "multiple GlobalIndices with mixed cancellation", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { BlockNum: 1, BlockPos: 0, @@ -953,7 +954,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { Amount: big.NewInt(4000), }, }, - unclaims: []bridgesynctypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ {GlobalIndex: big.NewInt(100), BlockNumber: 10, LogIndex: 0}, {GlobalIndex: big.NewInt(200), BlockNumber: 11, LogIndex: 1}, }, @@ -963,7 +964,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { }, { name: "using GenerateGlobalIndex helper", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { BlockNum: 1, BlockPos: 0, @@ -979,7 +980,7 @@ func Test_baseFlow_getImportedBridgeExits(t *testing.T) { Amount: big.NewInt(2000), }, }, - unclaims: []bridgesynctypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ {GlobalIndex: bridgesync.GenerateGlobalIndex(false, 1, 1), BlockNumber: 10, LogIndex: 0}, }, fullClaimsNeeded: false, @@ -1117,7 +1118,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1129,7 +1130,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 10, // Unclaim appears BEFORE claim at block 15 @@ -1147,7 +1148,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1159,7 +1160,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{}, // No unclaim for first claim + Unclaims: []claimsynctypes.Unclaim{}, // No unclaim for first claim }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims (cached results will be reused in subsequent calls) @@ -1178,7 +1179,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1190,7 +1191,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, }, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: both claims have finalized GERs (cached results will be reused in subsequent calls) @@ -1205,7 +1206,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1217,7 +1218,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims @@ -1235,7 +1236,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 30, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1252,7 +1253,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger3, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 12, // Has unclaim @@ -1277,7 +1278,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 30, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1294,7 +1295,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger3, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 8, // Before block 15 @@ -1316,7 +1317,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 30, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1333,7 +1334,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger3, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 8, // Before block 15 - valid @@ -1355,7 +1356,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 50, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1382,7 +1383,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger5, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex2, BlockNumber: 12, // Before block 25 @@ -1412,7 +1413,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 30, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1434,7 +1435,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger4, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 15, // Has unclaim @@ -1457,7 +1458,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 30, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1474,7 +1475,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger3, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 14, // Exactly at boundary @@ -1496,7 +1497,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 30, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1508,7 +1509,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 15, // Exactly at block 15, but block 15 can't be included @@ -1530,7 +1531,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 40, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1552,7 +1553,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger4, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims (cached results will be reused in subsequent calls) @@ -1568,7 +1569,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 30, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1590,7 +1591,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger4, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: globalIndex1, BlockNumber: 7, // C1 has unclaim @@ -1609,7 +1610,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 1, // At start block GlobalIndex: globalIndex1, @@ -1621,7 +1622,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, // Unfinalized, exists on L1 }, }, - Unclaims: []bridgesynctypes.Unclaim{}, // No unclaim for C1 + Unclaims: []claimsynctypes.Unclaim{}, // No unclaim for C1 }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims (cached results will be reused in subsequent calls) @@ -1638,14 +1639,14 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, GlobalExitRoot: ger1, }, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims (cached results will be reused in subsequent calls) @@ -1659,14 +1660,14 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, GlobalExitRoot: ger1, }, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims (cached results will be reused in subsequent calls) @@ -1681,7 +1682,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1693,7 +1694,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, }, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims (cached results will be reused in subsequent calls) @@ -1708,7 +1709,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te FromBlock: 1, ToBlock: 20, L1InfoTreeLeafCount: 10, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 5, GlobalIndex: globalIndex1, @@ -1720,7 +1721,7 @@ func Test_baseFlow_adjustCertificateIfNonFinalizedClaims_UnclaimValidation(t *te GlobalExitRoot: ger2, }, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, mockFn: func(mockL1InfoTreeQuerier *mocks.L1InfoTreeDataQuerier) { // First pass: check all claims (cached results will be reused in subsequent calls) @@ -1783,7 +1784,7 @@ func Test_baseFlow_NextCertificateBlockRange_CertQuerier(t *testing.T) { } // certQuerier re-derives the toBlock = 10. - mockBridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(20), nil) + mockBridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(20), true, nil) mockBridgeQuerier.EXPECT().OriginNetwork().Return(uint32(1)) mockCertQuerier.EXPECT().GetLastSettledCertificateToBlock(mock.Anything, mock.Anything). Return(uint64(10), nil) @@ -1813,7 +1814,7 @@ func Test_baseFlow_NextCertificateBlockRange_CertQuerier(t *testing.T) { Height: 2, } - mockBridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(20), nil) + mockBridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(20), true, nil) mockBridgeQuerier.EXPECT().OriginNetwork().Return(uint32(1)) mockCertQuerier.EXPECT().GetLastSettledCertificateToBlock(mock.Anything, mock.Anything). Return(uint64(0), errors.New("query failed")) @@ -1840,7 +1841,7 @@ func Test_baseFlow_NextCertificateBlockRange_CertQuerier(t *testing.T) { Height: 1, } - mockBridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(20), nil) + mockBridgeQuerier.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(20), true, nil) f := &baseFlow{ l2BridgeQuerier: mockBridgeQuerier, diff --git a/aggsender/flows/max_l2blocknumber_limiter_test.go b/aggsender/flows/max_l2blocknumber_limiter_test.go index 8edc018af..ce6e5303c 100644 --- a/aggsender/flows/max_l2blocknumber_limiter_test.go +++ b/aggsender/flows/max_l2blocknumber_limiter_test.go @@ -5,6 +5,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/log" "github.com/stretchr/testify/require" ) @@ -160,7 +161,7 @@ func TestAdaptCertificateForPP(t *testing.T) { BlockNum: 120, }, }, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 120, }, @@ -189,7 +190,7 @@ func TestAdaptCertificateForPP(t *testing.T) { buildParams: &types.CertificateBuildParams{ FromBlock: 100, ToBlock: 200, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ { BlockNum: 120, }, diff --git a/aggsender/mocks/mock_aggsender_builder_flow.go b/aggsender/mocks/mock_aggsender_builder_flow.go index 91f7fea66..81b4670ed 100644 --- a/aggsender/mocks/mock_aggsender_builder_flow.go +++ b/aggsender/mocks/mock_aggsender_builder_flow.go @@ -191,6 +191,65 @@ func (_c *AggsenderBuilderFlow_GenerateBuildParams_Call) RunAndReturn(run func(c return _c } +// GeneratePreBuildParams provides a mock function with given fields: ctx, certType +func (_m *AggsenderBuilderFlow) GeneratePreBuildParams(ctx context.Context, certType types.CertificateType) (*types.CertificatePreBuildParams, error) { + ret := _m.Called(ctx, certType) + + if len(ret) == 0 { + panic("no return value specified for GeneratePreBuildParams") + } + + var r0 *types.CertificatePreBuildParams + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)); ok { + return rf(ctx, certType) + } + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) *types.CertificatePreBuildParams); ok { + r0 = rf(ctx, certType) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.CertificatePreBuildParams) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.CertificateType) error); ok { + r1 = rf(ctx, certType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggsenderBuilderFlow_GeneratePreBuildParams_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GeneratePreBuildParams' +type AggsenderBuilderFlow_GeneratePreBuildParams_Call struct { + *mock.Call +} + +// GeneratePreBuildParams is a helper method to define mock.On call +// - ctx context.Context +// - certType types.CertificateType +func (_e *AggsenderBuilderFlow_Expecter) GeneratePreBuildParams(ctx interface{}, certType interface{}) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + return &AggsenderBuilderFlow_GeneratePreBuildParams_Call{Call: _e.mock.On("GeneratePreBuildParams", ctx, certType)} +} + +func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Run(run func(ctx context.Context, certType types.CertificateType)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateType)) + }) + return _c +} + +func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Return(_a0 *types.CertificatePreBuildParams, _a1 error) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) RunAndReturn(run func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { + _c.Call.Return(run) + return _c +} + // GetCertificateBuildParams provides a mock function with given fields: ctx func (_m *AggsenderBuilderFlow) GetCertificateBuildParams(ctx context.Context) (*types.CertificateBuildParams, error) { ret := _m.Called(ctx) @@ -249,6 +308,61 @@ func (_c *AggsenderBuilderFlow_GetCertificateBuildParams_Call) RunAndReturn(run return _c } +// GetNextBlockNumber provides a mock function with no fields +func (_m *AggsenderBuilderFlow) GetNextBlockNumber() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetNextBlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggsenderBuilderFlow_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' +type AggsenderBuilderFlow_GetNextBlockNumber_Call struct { + *mock.Call +} + +// GetNextBlockNumber is a helper method to define mock.On call +func (_e *AggsenderBuilderFlow_Expecter) GetNextBlockNumber() *AggsenderBuilderFlow_GetNextBlockNumber_Call { + return &AggsenderBuilderFlow_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} +} + +func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Run(run func()) *AggsenderBuilderFlow_GetNextBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderBuilderFlow_GetNextBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderBuilderFlow_GetNextBlockNumber_Call { + _c.Call.Return(run) + return _c +} + // Signer provides a mock function with no fields func (_m *AggsenderBuilderFlow) Signer() signertypes.Signer { ret := _m.Called() @@ -343,118 +457,6 @@ func (_c *AggsenderBuilderFlow_UpdateAggchainData_Call) RunAndReturn(run func(*a return _c } -// GeneratePreBuildParams provides a mock function with given fields: ctx, certType -func (_m *AggsenderBuilderFlow) GeneratePreBuildParams(ctx context.Context, certType types.CertificateType) (*types.CertificatePreBuildParams, error) { - ret := _m.Called(ctx, certType) - - if len(ret) == 0 { - panic("no return value specified for GeneratePreBuildParams") - } - - var r0 *types.CertificatePreBuildParams - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)); ok { - return rf(ctx, certType) - } - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) *types.CertificatePreBuildParams); ok { - r0 = rf(ctx, certType) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificatePreBuildParams) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, types.CertificateType) error); ok { - r1 = rf(ctx, certType) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderBuilderFlow_GeneratePreBuildParams_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GeneratePreBuildParams' -type AggsenderBuilderFlow_GeneratePreBuildParams_Call struct { - *mock.Call -} - -// GeneratePreBuildParams is a helper method to define mock.On call -func (_e *AggsenderBuilderFlow_Expecter) GeneratePreBuildParams(ctx interface{}, certType interface{}) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - return &AggsenderBuilderFlow_GeneratePreBuildParams_Call{Call: _e.mock.On("GeneratePreBuildParams", ctx, certType)} -} - -func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Run(run func(ctx context.Context, certType types.CertificateType)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateType)) - }) - return _c -} - -func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Return(_a0 *types.CertificatePreBuildParams, _a1 error) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) RunAndReturn(run func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - _c.Call.Return(run) - return _c -} - -// GetNextBlockNumber provides a mock function with no fields -func (_m *AggsenderBuilderFlow) GetNextBlockNumber() (uint64, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetNextBlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderBuilderFlow_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' -type AggsenderBuilderFlow_GetNextBlockNumber_Call struct { - *mock.Call -} - -// GetNextBlockNumber is a helper method to define mock.On call -func (_e *AggsenderBuilderFlow_Expecter) GetNextBlockNumber() *AggsenderBuilderFlow_GetNextBlockNumber_Call { - return &AggsenderBuilderFlow_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} -} - -func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Run(run func()) *AggsenderBuilderFlow_GetNextBlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderBuilderFlow_GetNextBlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderBuilderFlow_GetNextBlockNumber_Call { - _c.Call.Return(run) - return _c -} - // NewAggsenderBuilderFlow creates a new instance of AggsenderBuilderFlow. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewAggsenderBuilderFlow(t interface { diff --git a/aggsender/mocks/mock_aggsender_flow_baser.go b/aggsender/mocks/mock_aggsender_flow_baser.go index b558cb5a2..0b5aa5437 100644 --- a/aggsender/mocks/mock_aggsender_flow_baser.go +++ b/aggsender/mocks/mock_aggsender_flow_baser.go @@ -383,6 +383,61 @@ func (_c *AggsenderFlowBaser_GetNewLocalExitRoot_Call) RunAndReturn(run func(con return _c } +// GetNextBlockNumber provides a mock function with no fields +func (_m *AggsenderFlowBaser) GetNextBlockNumber() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetNextBlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggsenderFlowBaser_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' +type AggsenderFlowBaser_GetNextBlockNumber_Call struct { + *mock.Call +} + +// GetNextBlockNumber is a helper method to define mock.On call +func (_e *AggsenderFlowBaser_Expecter) GetNextBlockNumber() *AggsenderFlowBaser_GetNextBlockNumber_Call { + return &AggsenderFlowBaser_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} +} + +func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Run(run func()) *AggsenderFlowBaser_GetNextBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderFlowBaser_GetNextBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderFlowBaser_GetNextBlockNumber_Call { + _c.Call.Return(run) + return _c +} + // LimitCertSize provides a mock function with given fields: certParams func (_m *AggsenderFlowBaser) LimitCertSize(certParams *types.CertificateBuildParams) (*types.CertificateBuildParams, error) { ret := _m.Called(certParams) @@ -486,61 +541,6 @@ func (_c *AggsenderFlowBaser_StartL2Block_Call) RunAndReturn(run func() uint64) return _c } -// GetNextBlockNumber provides a mock function with no fields -func (_m *AggsenderFlowBaser) GetNextBlockNumber() (uint64, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetNextBlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderFlowBaser_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' -type AggsenderFlowBaser_GetNextBlockNumber_Call struct { - *mock.Call -} - -// GetNextBlockNumber is a helper method to define mock.On call -func (_e *AggsenderFlowBaser_Expecter) GetNextBlockNumber() *AggsenderFlowBaser_GetNextBlockNumber_Call { - return &AggsenderFlowBaser_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} -} - -func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Run(run func()) *AggsenderFlowBaser_GetNextBlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderFlowBaser_GetNextBlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderFlowBaser_GetNextBlockNumber_Call { - _c.Call.Return(run) - return _c -} - // VerifyBlockRangeGaps provides a mock function with given fields: ctx, lastSentCertificate, newFromBlock, newToBlock func (_m *AggsenderFlowBaser) VerifyBlockRangeGaps(ctx context.Context, lastSentCertificate *types.CertificateHeader, newFromBlock uint64, newToBlock uint64) error { ret := _m.Called(ctx, lastSentCertificate, newFromBlock, newToBlock) diff --git a/aggsender/optimistic/mocks/mock_optimistic_signature_calculator.go b/aggsender/optimistic/mocks/mock_optimistic_signature_calculator.go index 9f2e4d482..8608dc793 100644 --- a/aggsender/optimistic/mocks/mock_optimistic_signature_calculator.go +++ b/aggsender/optimistic/mocks/mock_optimistic_signature_calculator.go @@ -3,7 +3,7 @@ package mocks import ( - bridgesync "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" common "github.com/ethereum/go-ethereum/common" context "context" @@ -27,7 +27,7 @@ func (_m *OptimisticSignatureCalculator) EXPECT() *OptimisticSignatureCalculator } // Sign provides a mock function with given fields: ctx, aggchainReq, newLocalExitRoot, claims -func (_m *OptimisticSignatureCalculator) Sign(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []bridgesync.Claim) (common.Hash, error) { +func (_m *OptimisticSignatureCalculator) Sign(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []claimsynctypes.Claim) (common.Hash, error) { ret := _m.Called(ctx, aggchainReq, newLocalExitRoot, claims) if len(ret) == 0 { @@ -36,10 +36,10 @@ func (_m *OptimisticSignatureCalculator) Sign(ctx context.Context, aggchainReq t var r0 common.Hash var r1 error - if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) (common.Hash, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) (common.Hash, error)); ok { return rf(ctx, aggchainReq, newLocalExitRoot, claims) } - if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) common.Hash); ok { + if rf, ok := ret.Get(0).(func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) common.Hash); ok { r0 = rf(ctx, aggchainReq, newLocalExitRoot, claims) } else { if ret.Get(0) != nil { @@ -47,7 +47,7 @@ func (_m *OptimisticSignatureCalculator) Sign(ctx context.Context, aggchainReq t } } - if rf, ok := ret.Get(1).(func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) error); ok { r1 = rf(ctx, aggchainReq, newLocalExitRoot, claims) } else { r1 = ret.Error(1) @@ -65,14 +65,14 @@ type OptimisticSignatureCalculator_Sign_Call struct { // - ctx context.Context // - aggchainReq types.AggchainProofRequest // - newLocalExitRoot common.Hash -// - claims []bridgesync.Claim +// - claims []claimsynctypes.Claim func (_e *OptimisticSignatureCalculator_Expecter) Sign(ctx interface{}, aggchainReq interface{}, newLocalExitRoot interface{}, claims interface{}) *OptimisticSignatureCalculator_Sign_Call { return &OptimisticSignatureCalculator_Sign_Call{Call: _e.mock.On("Sign", ctx, aggchainReq, newLocalExitRoot, claims)} } -func (_c *OptimisticSignatureCalculator_Sign_Call) Run(run func(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []bridgesync.Claim)) *OptimisticSignatureCalculator_Sign_Call { +func (_c *OptimisticSignatureCalculator_Sign_Call) Run(run func(ctx context.Context, aggchainReq types.AggchainProofRequest, newLocalExitRoot common.Hash, claims []claimsynctypes.Claim)) *OptimisticSignatureCalculator_Sign_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.AggchainProofRequest), args[2].(common.Hash), args[3].([]bridgesync.Claim)) + run(args[0].(context.Context), args[1].(types.AggchainProofRequest), args[2].(common.Hash), args[3].([]claimsynctypes.Claim)) }) return _c } @@ -82,7 +82,7 @@ func (_c *OptimisticSignatureCalculator_Sign_Call) Return(_a0 common.Hash, _a1 e return _c } -func (_c *OptimisticSignatureCalculator_Sign_Call) RunAndReturn(run func(context.Context, types.AggchainProofRequest, common.Hash, []bridgesync.Claim) (common.Hash, error)) *OptimisticSignatureCalculator_Sign_Call { +func (_c *OptimisticSignatureCalculator_Sign_Call) RunAndReturn(run func(context.Context, types.AggchainProofRequest, common.Hash, []claimsynctypes.Claim) (common.Hash, error)) *OptimisticSignatureCalculator_Sign_Call { _c.Call.Return(run) return _c } diff --git a/aggsender/optimistic/optimistic_sign_test.go b/aggsender/optimistic/optimistic_sign_test.go index 545e641ba..0fd48bdb1 100644 --- a/aggsender/optimistic/optimistic_sign_test.go +++ b/aggsender/optimistic/optimistic_sign_test.go @@ -9,7 +9,7 @@ import ( "github.com/agglayer/aggkit/aggsender/mocks" "github.com/agglayer/aggkit/aggsender/types" - "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/log" "github.com/agglayer/go_signer/signer" @@ -142,7 +142,7 @@ func TestOptimisticSignatureCalculatorImpl_Sign(t *testing.T) { newLocalExitRoot := common.HexToHash("0xdef") certBuildParams := &types.CertificateBuildParams{ - Claims: []bridgesync.Claim{}, + Claims: []claimsynctypes.Claim{}, } testCases := []struct { diff --git a/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges_test.go b/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges_test.go index c8b6b1602..d28714be0 100644 --- a/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges_test.go +++ b/aggsender/optimistic/optimistichash/calculate_hash_commit_imported_bridges_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/agglayer/aggkit/aggsender/converters" - "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -35,7 +35,7 @@ func TestSignatureOptimisticData_CommitImportedBrige(t *testing.T) { } func TestNewCommitImportedBrigesData(t *testing.T) { - claims := []bridgesync.Claim{ + claims := []claimsynctypes.Claim{ { GlobalIndex: big.NewInt(12345), IsMessage: false, @@ -70,7 +70,7 @@ func TestNewCommitImportedBrigesData(t *testing.T) { } func TestSetBridgeExitHash(t *testing.T) { - claim := &bridgesync.Claim{ + claim := &claimsynctypes.Claim{ GlobalIndex: big.NewInt(12345), IsMessage: false, OriginNetwork: 1, diff --git a/aggsender/prover/proof_generation_tool.go b/aggsender/prover/proof_generation_tool.go index 17c171682..fba50fca5 100644 --- a/aggsender/prover/proof_generation_tool.go +++ b/aggsender/prover/proof_generation_tool.go @@ -10,8 +10,8 @@ import ( "github.com/agglayer/aggkit/aggsender/flows" "github.com/agglayer/aggkit/aggsender/query" "github.com/agglayer/aggkit/aggsender/types" - "github.com/agglayer/aggkit/claimsync" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + "github.com/agglayer/aggkit/claimsync" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitgrpc "github.com/agglayer/aggkit/grpc" "github.com/agglayer/aggkit/l2gersync" diff --git a/aggsender/prover/proof_generation_tool_test.go b/aggsender/prover/proof_generation_tool_test.go index bb1e7df9f..953443c68 100644 --- a/aggsender/prover/proof_generation_tool_test.go +++ b/aggsender/prover/proof_generation_tool_test.go @@ -7,7 +7,8 @@ import ( "github.com/agglayer/aggkit/aggsender/mocks" "github.com/agglayer/aggkit/aggsender/types" - "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + claimsynctypesmocks "github.com/agglayer/aggkit/claimsync/types/mocks" aggkitgrpc "github.com/agglayer/aggkit/grpc" "github.com/agglayer/aggkit/log" aggkittypes "github.com/agglayer/aggkit/types" @@ -25,6 +26,7 @@ func TestGenerateAggchainProof(t *testing.T) { setupMocks func( ctx context.Context, mockL2Syncer *mocks.L2BridgeSyncer, + mockL2ClaimSyncer *claimsynctypesmocks.ClaimSyncer, mockAggchainProofClient *mocks.AggchainProofClientInterface, mockFlow *mocks.AggchainProofFlow, ) @@ -35,13 +37,14 @@ func TestGenerateAggchainProof(t *testing.T) { name: "Success", setupMocks: func(ctx context.Context, mockL2Syncer *mocks.L2BridgeSyncer, + mockL2ClaimSyncer *claimsynctypesmocks.ClaimSyncer, mockAggchainProofClient *mocks.AggchainProofClientInterface, mockFlow *mocks.AggchainProofFlow, ) { - mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(20), nil) - mockL2Syncer.EXPECT().GetClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Claim{}, nil) + mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(20), true, nil) + mockL2ClaimSyncer.EXPECT().GetClaims(ctx, uint64(1), uint64(10)).Return([]claimsynctypes.Claim{}, nil) certBuildParams := &types.CertificateBuildParams{ - Claims: []bridgesync.Claim{}, + Claims: []claimsynctypes.Claim{}, } mockFlow.EXPECT().GenerateAggchainProof(ctx, uint64(0), uint64(10), certBuildParams).Return( &types.AggchainProof{SP1StarkProof: &types.SP1StarkProof{Proof: []byte("proof")}}, nil) @@ -52,10 +55,11 @@ func TestGenerateAggchainProof(t *testing.T) { name: "Failure_GetLastProcessedBlock", setupMocks: func(ctx context.Context, mockL2Syncer *mocks.L2BridgeSyncer, + mockL2ClaimSyncer *claimsynctypesmocks.ClaimSyncer, mockAggchainProofClient *mocks.AggchainProofClientInterface, mockFlow *mocks.AggchainProofFlow, ) { - mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), errors.New("test error")) + mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, errors.New("test error")) }, expectedError: "error getting last processed block from l2: test error", }, @@ -63,11 +67,12 @@ func TestGenerateAggchainProof(t *testing.T) { name: "Failure_GetClaims", setupMocks: func(ctx context.Context, mockL2Syncer *mocks.L2BridgeSyncer, + mockL2ClaimSyncer *claimsynctypesmocks.ClaimSyncer, mockAggchainProofClient *mocks.AggchainProofClientInterface, mockFlow *mocks.AggchainProofFlow, ) { - mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(20), nil) - mockL2Syncer.EXPECT().GetClaims(ctx, uint64(1), uint64(10)).Return(nil, errors.New("test error")) + mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(20), true, nil) + mockL2ClaimSyncer.EXPECT().GetClaims(ctx, uint64(1), uint64(10)).Return(nil, errors.New("test error")) }, expectedError: "error getting claims (imported bridge exits)", }, @@ -75,13 +80,14 @@ func TestGenerateAggchainProof(t *testing.T) { name: "Failure_GenerateAggchainProof", setupMocks: func(ctx context.Context, mockL2Syncer *mocks.L2BridgeSyncer, + mockL2ClaimSyncer *claimsynctypesmocks.ClaimSyncer, mockAggchainProofClient *mocks.AggchainProofClientInterface, mockFlow *mocks.AggchainProofFlow, ) { - mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(20), nil) - mockL2Syncer.EXPECT().GetClaims(ctx, uint64(1), uint64(10)).Return([]bridgesync.Claim{}, nil) + mockL2Syncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(20), true, nil) + mockL2ClaimSyncer.EXPECT().GetClaims(ctx, uint64(1), uint64(10)).Return([]claimsynctypes.Claim{}, nil) certBuildParams := &types.CertificateBuildParams{ - Claims: []bridgesync.Claim{}, + Claims: []claimsynctypes.Claim{}, } mockFlow.EXPECT().GenerateAggchainProof(ctx, uint64(0), uint64(10), certBuildParams).Return( nil, errors.New("test error")) @@ -102,17 +108,19 @@ func TestGenerateAggchainProof(t *testing.T) { mockLogger := log.WithFields("test", tt.name) mockL2Syncer := mocks.NewL2BridgeSyncer(t) + mockL2ClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) mockAggchainProofClient := mocks.NewAggchainProofClientInterface(t) mockFlow := mocks.NewAggchainProofFlow(t) tool := &AggchainProofGenerationTool{ logger: mockLogger, l2Syncer: mockL2Syncer, + l2ClaimSyncer: mockL2ClaimSyncer, aggchainProofClient: mockAggchainProofClient, flow: mockFlow, } - tt.setupMocks(ctx, mockL2Syncer, mockAggchainProofClient, mockFlow) + tt.setupMocks(ctx, mockL2Syncer, mockL2ClaimSyncer, mockAggchainProofClient, mockFlow) proof, err := tool.GenerateAggchainProof(ctx, lastProvenBlock, toBlock) if tt.expectedError != "" { @@ -158,6 +166,7 @@ func TestOptimisticModeQuerierAlwaysOff(t *testing.T) { func TestNewAggchainProofGenerationTool(t *testing.T) { mockL2Syncer := mocks.NewL2BridgeSyncer(t) + mockL2ClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) mockL1Client := aggkittypesmocks.NewBaseEthereumClienter(t) mockL2Client := aggkittypesmocks.NewBaseEthereumClienter(t) mockL1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Maybe() @@ -168,7 +177,7 @@ func TestNewAggchainProofGenerationTool(t *testing.T) { mockL1InfoTreeSyncer.EXPECT().Finality().Return(aggkittypes.FinalizedBlock).Maybe() _, err := NewAggchainProofGenerationTool(context.TODO(), log.WithFields("module", "test"), - Config{AggkitProverClient: aggkitgrpc.DefaultConfig()}, mockL1Client, mockL2Client, mockL2Syncer, mockL1InfoTreeSyncer) + Config{AggkitProverClient: aggkitgrpc.DefaultConfig()}, mockL1Client, mockL2Client, mockL2Syncer, mockL2ClaimSyncer, mockL1InfoTreeSyncer) require.Error(t, err) cfg := Config{ @@ -177,6 +186,6 @@ func TestNewAggchainProofGenerationTool(t *testing.T) { } _, err = NewAggchainProofGenerationTool(context.TODO(), log.WithFields("module", "test"), - cfg, mockL1Client, mockL2Client, mockL2Syncer, mockL1InfoTreeSyncer) + cfg, mockL1Client, mockL2Client, mockL2Syncer, mockL2ClaimSyncer, mockL1InfoTreeSyncer) require.ErrorContains(t, err, "L2 GER reader") } diff --git a/aggsender/query/aggchain_proof_query_test.go b/aggsender/query/aggchain_proof_query_test.go index b0ebdfeb9..a0db4fc9e 100644 --- a/aggsender/query/aggchain_proof_query_test.go +++ b/aggsender/query/aggchain_proof_query_test.go @@ -11,6 +11,7 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgetypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/log" treetypes "github.com/agglayer/aggkit/tree/types" @@ -25,13 +26,13 @@ func TestGetImportedBridgeExitsForProver(t *testing.T) { testCases := []struct { name string - claims []bridgesync.Claim + claims []claimsynctypes.Claim expectedExits []*agglayertypes.ImportedBridgeExitWithBlockNumber expectedError string }{ { name: "success", - claims: []bridgesync.Claim{ + claims: []claimsynctypes.Claim{ { IsMessage: false, OriginNetwork: 1, @@ -273,7 +274,7 @@ func TestGenerateAggchainProof(t *testing.T) { { name: "error getting injected GERs", lastProvenBlock: 100, - buildParams: &types.CertificateBuildParams{L1InfoTreeLeafCount: 2, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x1"), ToBlock: 200, Claims: []bridgesync.Claim{{}}}, + buildParams: &types.CertificateBuildParams{L1InfoTreeLeafCount: 2, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x1"), ToBlock: 200, Claims: []claimsynctypes.Claim{{}}}, mockFn: func(aggchainProofClient *mocks.AggchainProofClientInterface, l1InfoTreeDataQuerier *mocks.L1InfoTreeDataQuerier, gerQuerier *mocks.GERQuerier, @@ -289,7 +290,7 @@ func TestGenerateAggchainProof(t *testing.T) { { name: "error getting aggchain proof", lastProvenBlock: 100, - buildParams: &types.CertificateBuildParams{L1InfoTreeLeafCount: 2, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x123"), ToBlock: 200, Claims: []bridgesync.Claim{{GlobalIndex: big.NewInt(1)}}}, + buildParams: &types.CertificateBuildParams{L1InfoTreeLeafCount: 2, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x123"), ToBlock: 200, Claims: []claimsynctypes.Claim{{GlobalIndex: big.NewInt(1)}}}, mockFn: func(aggchainProofClient *mocks.AggchainProofClientInterface, l1InfoTreeDataQuerier *mocks.L1InfoTreeDataQuerier, gerQuerier *mocks.GERQuerier, @@ -308,7 +309,7 @@ func TestGenerateAggchainProof(t *testing.T) { { name: "success", lastProvenBlock: 100, - buildParams: &types.CertificateBuildParams{L1InfoTreeLeafCount: 2, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x123"), ToBlock: 200, Claims: []bridgesync.Claim{{GlobalIndex: big.NewInt(1)}}}, + buildParams: &types.CertificateBuildParams{L1InfoTreeLeafCount: 2, L1InfoTreeRootFromWhichToProve: common.HexToHash("0x123"), ToBlock: 200, Claims: []claimsynctypes.Claim{{GlobalIndex: big.NewInt(1)}}}, mockFn: func(aggchainProofClient *mocks.AggchainProofClientInterface, l1InfoTreeDataQuerier *mocks.L1InfoTreeDataQuerier, gerQuerier *mocks.GERQuerier, @@ -375,18 +376,18 @@ func TestConvertUnclaimsToAgglayerUnclaims(t *testing.T) { testCases := []struct { name string - unclaims []bridgetypes.Unclaim + unclaims []claimsynctypes.Unclaim expectedUnclaims []*agglayertypes.Unclaim expectedError string }{ { name: "empty map", - unclaims: []bridgetypes.Unclaim{}, + unclaims: []claimsynctypes.Unclaim{}, expectedUnclaims: []*agglayertypes.Unclaim{}, }, { name: "single unclaim with mainnet flag true", - unclaims: []bridgetypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 5), BlockNumber: 100, @@ -407,7 +408,7 @@ func TestConvertUnclaimsToAgglayerUnclaims(t *testing.T) { }, { name: "single unclaim with mainnet flag false and rollup index", - unclaims: []bridgetypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: bridgesync.GenerateGlobalIndex(false, 3, 7), BlockNumber: 200, @@ -428,7 +429,7 @@ func TestConvertUnclaimsToAgglayerUnclaims(t *testing.T) { }, { name: "multiple unclaims with different configurations", - unclaims: []bridgetypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 1), BlockNumber: 100, @@ -477,7 +478,7 @@ func TestConvertUnclaimsToAgglayerUnclaims(t *testing.T) { }, { name: "unclaim with zero global index", - unclaims: []bridgetypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: big.NewInt(0), BlockNumber: 100, @@ -498,7 +499,7 @@ func TestConvertUnclaimsToAgglayerUnclaims(t *testing.T) { }, { name: "unclaim with large values", - unclaims: []bridgetypes.Unclaim{ + unclaims: []claimsynctypes.Unclaim{ { GlobalIndex: bridgesync.GenerateGlobalIndex(false, 4294967295, 4294967295), // max uint32 values BlockNumber: 999999, diff --git a/aggsender/query/bridge_query.go b/aggsender/query/bridge_query.go index 33976e636..142743783 100644 --- a/aggsender/query/bridge_query.go +++ b/aggsender/query/bridge_query.go @@ -92,12 +92,16 @@ func (b *bridgeDataQuerier) GetExitRootByIndex(ctx context.Context, index uint32 func (b *bridgeDataQuerier) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { bridgeBlock, found, err := b.bridgeSyncer.GetLastProcessedBlock(ctx) if err != nil { - return 0, false, fmt.Errorf("error getting bridge syncer last processed block: %w", err) + return 0, false, fmt.Errorf("error getting last processed block: %w", err) } if !found { return 0, false, nil } + if b.claimSyncer == nil { + return bridgeBlock, true, nil + } + claimBlock, claimFound, err := b.claimSyncer.GetLastProcessedBlock(ctx) if err != nil { return 0, false, fmt.Errorf("error getting claim syncer last processed block: %w", err) @@ -134,7 +138,7 @@ func (b *bridgeDataQuerier) WaitForSyncerToCatchUp(ctx context.Context, block ui for { bridgeReady, err := b.isSyncerCaughtUp(ctx, block) if err != nil { - return fmt.Errorf("bridgeDataQuerier - error checking bridge syncer: %w", err) + return fmt.Errorf("bridgeDataQuerier - error getting last processed block: %w", err) } claimReady, err := b.isClaimSyncerCaughtUp(ctx, block) @@ -180,6 +184,9 @@ func (b *bridgeDataQuerier) isSyncerCaughtUp(ctx context.Context, block uint64) // isClaimSyncerCaughtUp checks whether the claim syncer has processed up to the given block. // Returns true if caught up, false if not yet. func (b *bridgeDataQuerier) isClaimSyncerCaughtUp(ctx context.Context, block uint64) (bool, error) { + if b.claimSyncer == nil { + return true, nil + } lastProcessedBlock, found, err := b.claimSyncer.GetLastProcessedBlock(ctx) if err != nil { return false, err diff --git a/aggsender/query/bridge_query_test.go b/aggsender/query/bridge_query_test.go index 30ba00cca..d9845ec8f 100644 --- a/aggsender/query/bridge_query_test.go +++ b/aggsender/query/bridge_query_test.go @@ -8,6 +8,8 @@ import ( "github.com/agglayer/aggkit/aggsender/mocks" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + claimsynctypesmocks "github.com/agglayer/aggkit/claimsync/types/mocks" "github.com/agglayer/aggkit/log" treetypes "github.com/agglayer/aggkit/tree/types" "github.com/ethereum/go-ethereum/common" @@ -22,27 +24,27 @@ func TestGetBridgesAndClaims(t *testing.T) { name string fromBlock uint64 toBlock uint64 - mockFn func(*mocks.L2BridgeSyncer) + mockFn func(*mocks.L2BridgeSyncer, *claimsynctypesmocks.ClaimSyncer) expectedBridges []bridgesync.Bridge - expectedClaims []bridgesync.Claim + expectedClaims []claimsynctypes.Claim expectedError string }{ { name: "success - valid bridges and claims", fromBlock: 100, toBlock: 200, - mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { + mockFn: func(mockSyncer *mocks.L2BridgeSyncer, mockClaimSyncer *claimsynctypesmocks.ClaimSyncer) { mockSyncer.EXPECT().GetBridges(ctx, uint64(100), uint64(200)).Return([]bridgesync.Bridge{ {BlockNum: 100, BlockPos: 1}, }, nil) - mockSyncer.EXPECT().GetClaims(ctx, uint64(100), uint64(200)).Return([]bridgesync.Claim{ + mockClaimSyncer.EXPECT().GetClaims(ctx, uint64(100), uint64(200)).Return([]claimsynctypes.Claim{ {BlockNum: 200, BlockPos: 1}, }, nil) }, expectedBridges: []bridgesync.Bridge{ {BlockNum: 100, BlockPos: 1}, }, - expectedClaims: []bridgesync.Claim{ + expectedClaims: []claimsynctypes.Claim{ {BlockNum: 200, BlockPos: 1}, }, }, @@ -50,7 +52,7 @@ func TestGetBridgesAndClaims(t *testing.T) { name: "error - failed to fetch bridges", fromBlock: 100, toBlock: 200, - mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { + mockFn: func(mockSyncer *mocks.L2BridgeSyncer, mockClaimSyncer *claimsynctypesmocks.ClaimSyncer) { mockSyncer.EXPECT().GetBridges(ctx, uint64(100), uint64(200)).Return(nil, errors.New("some error")) }, expectedBridges: nil, @@ -61,11 +63,11 @@ func TestGetBridgesAndClaims(t *testing.T) { name: "error - failed to fetch claims", fromBlock: 100, toBlock: 200, - mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { + mockFn: func(mockSyncer *mocks.L2BridgeSyncer, mockClaimSyncer *claimsynctypesmocks.ClaimSyncer) { mockSyncer.EXPECT().GetBridges(ctx, uint64(100), uint64(200)).Return([]bridgesync.Bridge{ {BlockNum: 100, BlockPos: 1}, }, nil) - mockSyncer.EXPECT().GetClaims(ctx, uint64(100), uint64(200)).Return(nil, errors.New("some error")) + mockClaimSyncer.EXPECT().GetClaims(ctx, uint64(100), uint64(200)).Return(nil, errors.New("some error")) }, expectedError: "error getting claims: some error", }, @@ -73,9 +75,9 @@ func TestGetBridgesAndClaims(t *testing.T) { name: "no bridges and claims - empty cert", fromBlock: 100, toBlock: 200, - mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { + mockFn: func(mockSyncer *mocks.L2BridgeSyncer, mockClaimSyncer *claimsynctypesmocks.ClaimSyncer) { mockSyncer.EXPECT().GetBridges(ctx, uint64(100), uint64(200)).Return(nil, nil) - mockSyncer.EXPECT().GetClaims(ctx, uint64(100), uint64(200)).Return(nil, nil) + mockClaimSyncer.EXPECT().GetClaims(ctx, uint64(100), uint64(200)).Return(nil, nil) }, expectedBridges: nil, expectedClaims: nil, @@ -90,10 +92,11 @@ func TestGetBridgesAndClaims(t *testing.T) { mockSyncer := new(mocks.L2BridgeSyncer) mockSyncer.EXPECT().OriginNetwork().Return(1).Once() + mockClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) AgglayerBridgeL2Reader := new(mocks.AgglayerBridgeL2Reader) - tc.mockFn(mockSyncer) + tc.mockFn(mockSyncer, mockClaimSyncer) - bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, 0, AgglayerBridgeL2Reader) + bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, mockClaimSyncer, 0, AgglayerBridgeL2Reader) bridges, claims, err := bridgeQuerier.GetBridgesAndClaims(ctx, tc.fromBlock, tc.toBlock) if tc.expectedError != "" { @@ -152,7 +155,7 @@ func TestGetExitRootByIndex(t *testing.T) { AgglayerBridgeL2Reader := new(mocks.AgglayerBridgeL2Reader) tc.mockFn(mockSyncer) - bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, 0, AgglayerBridgeL2Reader) + bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, nil, 0, AgglayerBridgeL2Reader) hash, err := bridgeQuerier.GetExitRootByIndex(ctx, tc.index) if tc.expectedError != "" { @@ -180,14 +183,14 @@ func TestGetLastProcessedBlock(t *testing.T) { { name: "success - valid last processed block", mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), nil) + mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) }, expectedBlock: 150, }, { name: "error - failed to fetch last processed block", mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), errors.New("some error")) + mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, errors.New("some error")) }, expectedError: "error getting last processed block: some error", }, @@ -204,9 +207,9 @@ func TestGetLastProcessedBlock(t *testing.T) { AgglayerBridgeL2Reader := new(mocks.AgglayerBridgeL2Reader) tc.mockFn(mockSyncer) - bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, 0, AgglayerBridgeL2Reader) + bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, nil, 0, AgglayerBridgeL2Reader) - block, err := bridgeQuerier.GetLastProcessedBlock(ctx) + block, _, err := bridgeQuerier.GetLastProcessedBlock(ctx) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) } else { @@ -227,7 +230,7 @@ func TestOriginNetwork(t *testing.T) { AgglayerBridgeL2Reader := new(mocks.AgglayerBridgeL2Reader) - bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, 0, AgglayerBridgeL2Reader) + bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, nil, 0, AgglayerBridgeL2Reader) originNetwork := bridgeQuerier.OriginNetwork() require.Equal(t, uint32(1), originNetwork) @@ -251,7 +254,7 @@ func TestWaitForSyncerToCatchUp(t *testing.T) { name: "fail to get last processed block", block: 100, mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), errors.New("some error")).Once() + mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, errors.New("some error")).Once() }, expectedError: "bridgeDataQuerier - error getting last processed block: some error", }, @@ -260,7 +263,7 @@ func TestWaitForSyncerToCatchUp(t *testing.T) { delayBetweenRetries: 0, block: 10, mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(100), nil).Once() + mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(100), true, nil).Once() }, }, { @@ -268,8 +271,8 @@ func TestWaitForSyncerToCatchUp(t *testing.T) { block: 10, delayBetweenRetries: 10 * time.Millisecond, mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), nil).Times(3) - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), nil).Once() + mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, nil).Times(3) + mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(10), true, nil).Once() }, }, } @@ -347,7 +350,7 @@ func TestGetUnsetClaimsForBlockRange(t *testing.T) { AgglayerBridgeL2Reader := new(mocks.AgglayerBridgeL2Reader) tc.mockFn(AgglayerBridgeL2Reader) - bridgeQuerier := NewBridgeDataQuerier(log.WithFields("test", "TestGetUnsetClaimsForBlockRange"), mockSyncer, 0, AgglayerBridgeL2Reader) + bridgeQuerier := NewBridgeDataQuerier(log.WithFields("test", "TestGetUnsetClaimsForBlockRange"), mockSyncer, nil, 0, AgglayerBridgeL2Reader) unclaims, err := bridgeQuerier.GetUnsetClaimsForBlockRange(ctx, tc.fromBlock, tc.toBlock) if tc.expectedError != "" { diff --git a/aggsender/query/certificate_query_test.go b/aggsender/query/certificate_query_test.go index 76fd11894..c90de7886 100644 --- a/aggsender/query/certificate_query_test.go +++ b/aggsender/query/certificate_query_test.go @@ -12,6 +12,8 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/bridgesync" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + claimsynctypesmocks "github.com/agglayer/aggkit/claimsync/types/mocks" treetypes "github.com/agglayer/aggkit/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -26,7 +28,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { testCases := []struct { name string certificate *agglayertypes.CertificateHeader - mockFn func(*mocks.AggchainFEPRollupQuerier, *agglayermocks.AgglayerClientMock, *mocks.L2BridgeSyncer) + mockFn func(*mocks.AggchainFEPRollupQuerier, *agglayermocks.AgglayerClientMock, *mocks.L2BridgeSyncer, *claimsynctypesmocks.ClaimSyncer) expectedErr string expectedBlock uint64 }{ @@ -43,7 +45,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: common.HexToHash("0x123"), }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0x123")).Return(&treetypes.Root{ BlockNum: uint64(100), }, nil) @@ -56,7 +58,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { } agglayerClient.EXPECT().GetNetworkInfo(ctx, uint32(0)).Return(networkStatus, nil) - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, networkStatus.SettledImportedBridgeExit.GlobalIndex).Return([]bridgesync.Claim{ + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, networkStatus.SettledImportedBridgeExit.GlobalIndex).Return([]claimsynctypes.Claim{ { BlockNum: 150, GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 1), @@ -73,7 +75,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: bridgesynctypes.EmptyLER, }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { networkStatus := agglayertypes.NetworkInfo{ SettledImportedBridgeExit: &agglayertypes.SettledImportedBridgeExit{ GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 1), @@ -81,7 +83,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { }, } agglayerClient.EXPECT().GetNetworkInfo(ctx, uint32(0)).Return(networkStatus, nil) - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, networkStatus.SettledImportedBridgeExit.GlobalIndex).Return([]bridgesync.Claim{ + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, networkStatus.SettledImportedBridgeExit.GlobalIndex).Return([]claimsynctypes.Claim{ { BlockNum: 50, GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 1), @@ -97,7 +99,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: common.HexToHash("0x456"), }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0x456")).Return(&treetypes.Root{ BlockNum: uint64(300), }, nil) @@ -113,7 +115,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: common.HexToHash("0x789"), }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0x789")).Return(nil, errors.New("exit root not found")) }, expectedErr: "failed to resolve the bridge exit block number for NewLocalExitRoot", @@ -124,7 +126,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: bridgesynctypes.EmptyLER, }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { agglayerClient.EXPECT().GetNetworkInfo(ctx, uint32(0)).Return(agglayertypes.NetworkInfo{}, errors.New("agglayer error")) }, expectedErr: "failed to get latest settled imported bridge exit from agglayer", @@ -135,7 +137,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: bridgesynctypes.EmptyLER, }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { networkStatus := agglayertypes.NetworkInfo{ SettledImportedBridgeExit: &agglayertypes.SettledImportedBridgeExit{ GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 1), @@ -143,7 +145,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { }, } agglayerClient.EXPECT().GetNetworkInfo(ctx, uint32(0)).Return(networkStatus, nil) - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, networkStatus.SettledImportedBridgeExit.GlobalIndex).Return(nil, errors.New("claim not found")) + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, networkStatus.SettledImportedBridgeExit.GlobalIndex).Return(nil, errors.New("claim not found")) }, expectedErr: "failed to get claim(s) by global index", }, @@ -153,7 +155,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: bridgesynctypes.EmptyLER, }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { agglayerClient.EXPECT().GetNetworkInfo(ctx, uint32(0)).Return(agglayertypes.NetworkInfo{}, nil) aggchainQuerier.EXPECT().GetLastSettledL2Block().Return(uint64(0), errors.New("L2 block query failed")) }, @@ -165,7 +167,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { Status: agglayertypes.Settled, NewLocalExitRoot: bridgesynctypes.EmptyLER, }, - mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(aggchainQuerier *mocks.AggchainFEPRollupQuerier, agglayerClient *agglayermocks.AgglayerClientMock, bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { agglayerClient.EXPECT().GetNetworkInfo(ctx, uint32(0)).Return(agglayertypes.NetworkInfo{}, nil) aggchainQuerier.EXPECT().GetLastSettledL2Block().Return(uint64(0), nil) aggchainQuerier.EXPECT().StartL2Block().Return(uint64(0)) @@ -181,13 +183,15 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { mockAggchainFEPQuerier := mocks.NewAggchainFEPRollupQuerier(t) mockAgglayerClient := agglayermocks.NewAgglayerClientMock(t) mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) + mockClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) if tc.mockFn != nil { - tc.mockFn(mockAggchainFEPQuerier, mockAgglayerClient, mockL2BridgeSyncer) + tc.mockFn(mockAggchainFEPQuerier, mockAgglayerClient, mockL2BridgeSyncer, mockClaimSyncer) } certRangeQuerier := NewCertificateQuerier( mockL2BridgeSyncer, + mockClaimSyncer, mockAggchainFEPQuerier, mockAgglayerClient, bridgesynctypes.EmptyLER, @@ -204,6 +208,7 @@ func TestGetLastSettledCertificateToBlock(t *testing.T) { mockAgglayerClient.AssertExpectations(t) mockL2BridgeSyncer.AssertExpectations(t) mockAggchainFEPQuerier.AssertExpectations(t) + mockClaimSyncer.AssertExpectations(t) }) } } @@ -213,7 +218,7 @@ func TestGetNewCertificateToBlock(t *testing.T) { ctx := t.Context() - testClaim := bridgesync.Claim{ + testClaim := claimsynctypes.Claim{ GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 1), IsMessage: false, Metadata: crypto.Keccak256([]byte("test metadata")), @@ -230,7 +235,7 @@ func TestGetNewCertificateToBlock(t *testing.T) { testCases := []struct { name string certificate *agglayertypes.Certificate - mockFn func(*mocks.L2BridgeSyncer) + mockFn func(*mocks.L2BridgeSyncer, *claimsynctypesmocks.ClaimSyncer) expectedErr string expectedBlock uint64 }{ @@ -243,13 +248,13 @@ func TestGetNewCertificateToBlock(t *testing.T) { testIbe, }, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0x123")).Return(&treetypes.Root{ BlockNum: uint64(100), }, nil) claim := testClaim claim.BlockNum = 150 // Simulate a claim with block number 150 - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{claim}, nil) + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{claim}, nil) }, expectedBlock: 150, // max of 100, 150 }, @@ -259,10 +264,10 @@ func TestGetNewCertificateToBlock(t *testing.T) { NewLocalExitRoot: bridgesynctypes.EmptyLER, ImportedBridgeExits: []*agglayertypes.ImportedBridgeExit{testIbe}, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { claim := testClaim claim.BlockNum = 75 // Simulate a claim with block number 75 - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{claim}, nil) + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{claim}, nil) }, expectedBlock: 75, // max of 0, 75 }, @@ -272,7 +277,7 @@ func TestGetNewCertificateToBlock(t *testing.T) { NewLocalExitRoot: common.HexToHash("0x456"), ImportedBridgeExits: []*agglayertypes.ImportedBridgeExit{}, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0x456")).Return(&treetypes.Root{ BlockNum: uint64(200), }, nil) @@ -293,7 +298,7 @@ func TestGetNewCertificateToBlock(t *testing.T) { NewLocalExitRoot: common.HexToHash("0x789"), ImportedBridgeExits: nil, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0x789")).Return(&treetypes.Root{ BlockNum: uint64(300), }, nil) @@ -306,7 +311,7 @@ func TestGetNewCertificateToBlock(t *testing.T) { NewLocalExitRoot: common.HexToHash("0xabc"), ImportedBridgeExits: []*agglayertypes.ImportedBridgeExit{}, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0xabc")).Return(nil, errors.New("exit root not found")) }, expectedErr: "failed to resolve the bridge exit block number for NewLocalExitRoot", @@ -317,8 +322,8 @@ func TestGetNewCertificateToBlock(t *testing.T) { NewLocalExitRoot: bridgesynctypes.EmptyLER, ImportedBridgeExits: []*agglayertypes.ImportedBridgeExit{testIbe}, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return(nil, errors.New("claim not found")) + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return(nil, errors.New("claim not found")) }, expectedErr: "failed to get claim(s) by global index", }, @@ -332,11 +337,11 @@ func TestGetNewCertificateToBlock(t *testing.T) { testIbe, // Last one - should be used }, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { // Mock claim by global index for last imported bridge exit only claim := testClaim claim.BlockNum = 250 // Simulate a claim with block number 250 - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{claim}, nil) + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{claim}, nil) }, expectedBlock: 250, // max of 0, 250 }, @@ -346,14 +351,14 @@ func TestGetNewCertificateToBlock(t *testing.T) { NewLocalExitRoot: common.HexToHash("0xdef"), ImportedBridgeExits: []*agglayertypes.ImportedBridgeExit{testIbe}, }, - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer, claimSyncer *claimsynctypesmocks.ClaimSyncer) { bridgeSyncer.EXPECT().GetExitRootByHash(ctx, common.HexToHash("0xdef")).Return(&treetypes.Root{ BlockNum: uint64(400), }, nil) claim := testClaim claim.BlockNum = 100 // Simulate a claim with block number 100 - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{claim}, nil) + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{claim}, nil) }, expectedBlock: 400, // max of 400, 100 }, @@ -366,13 +371,15 @@ func TestGetNewCertificateToBlock(t *testing.T) { mockAggchainFEPQuerier := mocks.NewAggchainFEPRollupQuerier(t) mockAgglayerClient := agglayermocks.NewAgglayerClientMock(t) mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) + mockClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) if tc.mockFn != nil { - tc.mockFn(mockL2BridgeSyncer) + tc.mockFn(mockL2BridgeSyncer, mockClaimSyncer) } certQuerier := NewCertificateQuerier( mockL2BridgeSyncer, + mockClaimSyncer, mockAggchainFEPQuerier, mockAgglayerClient, bridgesynctypes.EmptyLER, @@ -387,6 +394,7 @@ func TestGetNewCertificateToBlock(t *testing.T) { } mockL2BridgeSyncer.AssertExpectations(t) + mockClaimSyncer.AssertExpectations(t) }) } } @@ -473,6 +481,7 @@ func TestCalculateCertificateTypeFromToBlock(t *testing.T) { mockAggchainFEPQuerier := mocks.NewAggchainFEPRollupQuerier(t) mockAgglayerClient := agglayermocks.NewAgglayerClientMock(t) mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) + mockClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) // Always expect IsFEP call mockAggchainFEPQuerier.EXPECT().IsFEP().Return(tc.isFEP).Once() @@ -484,6 +493,7 @@ func TestCalculateCertificateTypeFromToBlock(t *testing.T) { certQuerier := NewCertificateQuerier( mockL2BridgeSyncer, + mockClaimSyncer, mockAggchainFEPQuerier, mockAgglayerClient, bridgesynctypes.EmptyLER, @@ -598,6 +608,7 @@ func TestCalculateCertificateType(t *testing.T) { mockAggchainFEPQuerier := mocks.NewAggchainFEPRollupQuerier(t) mockAgglayerClient := agglayermocks.NewAgglayerClientMock(t) mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) + mockClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) // Only expect calls to querier if AggchainData is nil (fallback case) if tc.certificate.AggchainData == nil { @@ -609,6 +620,7 @@ func TestCalculateCertificateType(t *testing.T) { certQuerier := NewCertificateQuerier( mockL2BridgeSyncer, + mockClaimSyncer, mockAggchainFEPQuerier, mockAgglayerClient, bridgesynctypes.EmptyLER, @@ -627,7 +639,7 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { ctx := t.Context() - testClaim := bridgesync.Claim{ + testClaim := claimsynctypes.Claim{ GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 1), IsMessage: false, Metadata: crypto.Keccak256([]byte("test metadata")), @@ -646,7 +658,7 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { name string globalIndex *agglayertypes.GlobalIndex bridgeExitHash common.Hash - mockFn func(*mocks.L2BridgeSyncer) + mockFn func(*claimsynctypesmocks.ClaimSyncer) expectedErr string expectedBlockNum uint64 }{ @@ -654,8 +666,8 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { name: "successful match - single claim", globalIndex: testIbe.GlobalIndex, bridgeExitHash: testIbe.BridgeExit.Hash(), - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{testClaim}, nil) + mockFn: func(claimSyncer *claimsynctypesmocks.ClaimSyncer) { + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{testClaim}, nil) }, expectedBlockNum: 150, }, @@ -663,7 +675,7 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { name: "successful match - multiple claims with matching hash", globalIndex: testIbe.GlobalIndex, bridgeExitHash: testIbe.BridgeExit.Hash(), - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { + mockFn: func(claimSyncer *claimsynctypesmocks.ClaimSyncer) { claim1 := testClaim claim1.BlockNum = 100 claim1.Amount = big.NewInt(50) // Different amount to generate different hash @@ -671,7 +683,7 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { claim2 := testClaim claim2.BlockNum = 200 // This should be returned - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{claim1, claim2}, nil) + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{claim1, claim2}, nil) }, expectedBlockNum: 200, }, @@ -679,8 +691,8 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { name: "no matching bridge exit hash", globalIndex: testIbe.GlobalIndex, bridgeExitHash: common.HexToHash("0x999"), // Different hash that won't match - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{testClaim}, nil) + mockFn: func(claimSyncer *claimsynctypesmocks.ClaimSyncer) { + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{testClaim}, nil) }, expectedErr: "no claim found for bridge exit hash", }, @@ -688,8 +700,8 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { name: "empty claims slice", globalIndex: testIbe.GlobalIndex, bridgeExitHash: testIbe.BridgeExit.Hash(), - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]bridgesync.Claim{}, nil) + mockFn: func(claimSyncer *claimsynctypesmocks.ClaimSyncer) { + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return([]claimsynctypes.Claim{}, nil) }, expectedErr: "no claim found for bridge exit hash", }, @@ -697,8 +709,8 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { name: "error getting claims by global index", globalIndex: testIbe.GlobalIndex, bridgeExitHash: testIbe.BridgeExit.Hash(), - mockFn: func(bridgeSyncer *mocks.L2BridgeSyncer) { - bridgeSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return(nil, errors.New("database error")) + mockFn: func(claimSyncer *claimsynctypesmocks.ClaimSyncer) { + claimSyncer.EXPECT().GetClaimsByGlobalIndex(ctx, testIbe.GlobalIndex.ToBigInt()).Return(nil, errors.New("database error")) }, expectedErr: "failed to get claim(s) by global index", }, @@ -708,14 +720,14 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() - mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) + mockClaimSyncer := claimsynctypesmocks.NewClaimSyncer(t) if tc.mockFn != nil { - tc.mockFn(mockL2BridgeSyncer) + tc.mockFn(mockClaimSyncer) } certQuerier := &certificateQuerier{ - l2BridgeSyncer: mockL2BridgeSyncer, + l2ClaimSyncer: mockClaimSyncer, } blockNum, err := certQuerier.getBlockNumFromGlobalIndex(ctx, tc.globalIndex.ToBigInt(), tc.bridgeExitHash) @@ -726,7 +738,7 @@ func TestGetBlockNumFromGlobalIndex(t *testing.T) { require.Equal(t, tc.expectedBlockNum, blockNum) } - mockL2BridgeSyncer.AssertExpectations(t) + mockClaimSyncer.AssertExpectations(t) }) } } diff --git a/aggsender/trigger/trigger_by_bridge_test.go b/aggsender/trigger/trigger_by_bridge_test.go index b949dd59d..e027ce1b0 100644 --- a/aggsender/trigger/trigger_by_bridge_test.go +++ b/aggsender/trigger/trigger_by_bridge_test.go @@ -152,7 +152,7 @@ func TestPreconfTriggerForceTriggerEvent(t *testing.T) { // Create a mock subscription channel syncCh := make(chan sync.Block, 3) mockL2BridgeSync.EXPECT().SubscribeToSync("aggsender").Return(syncCh) - mockL2BridgeSync.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(12345), nil).Once() + mockL2BridgeSync.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(12345), true, nil).Once() sut := newPreconfTrigger( logger, mockL2BridgeSync, diff --git a/aggsender/types/certificate_build_params_test.go b/aggsender/types/certificate_build_params_test.go index 702ec5769..e70d73c26 100644 --- a/aggsender/types/certificate_build_params_test.go +++ b/aggsender/types/certificate_build_params_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/agglayer/aggkit/bridgesync" - bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/stretchr/testify/require" ) @@ -77,8 +77,8 @@ func TestCertificateBuildParamsString(t *testing.T) { FromBlock: 100, ToBlock: 200, Bridges: []bridgesync.Bridge{}, - Claims: []bridgesync.Claim{}, - Unclaims: []bridgesynctypes.Unclaim{}, + Claims: []claimsynctypes.Claim{}, + Unclaims: []claimsynctypes.Unclaim{}, CreatedAt: 1234567890, }, expected: "Type: pp FromBlock: 100, ToBlock: 200, numBridges: 0, numClaims: 0, numUnclaims: 0, createdAt: 1234567890", @@ -93,10 +93,10 @@ func TestCertificateBuildParamsString(t *testing.T) { {BlockNum: 1001}, {BlockNum: 1002}, }, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ {BlockNum: 1500}, }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ {BlockNumber: 1600}, {BlockNumber: 1700}, {BlockNumber: 1800}, @@ -114,8 +114,8 @@ func TestCertificateBuildParamsString(t *testing.T) { Bridges: []bridgesync.Bridge{ {BlockNum: 550}, }, - Claims: []bridgesync.Claim{}, - Unclaims: []bridgesynctypes.Unclaim{}, + Claims: []claimsynctypes.Claim{}, + Unclaims: []claimsynctypes.Unclaim{}, CreatedAt: 1111111111, }, expected: "Type: optimistic FromBlock: 500, ToBlock: 600, numBridges: 1, numClaims: 0, numUnclaims: 0, createdAt: 1111111111", @@ -127,8 +127,8 @@ func TestCertificateBuildParamsString(t *testing.T) { FromBlock: 1, ToBlock: 10, Bridges: []bridgesync.Bridge{}, - Claims: []bridgesync.Claim{}, - Unclaims: []bridgesynctypes.Unclaim{}, + Claims: []claimsynctypes.Claim{}, + Unclaims: []claimsynctypes.Unclaim{}, CreatedAt: 0, }, expected: "Type: FromBlock: 1, ToBlock: 10, numBridges: 0, numClaims: 0, numUnclaims: 0, createdAt: 0", @@ -144,11 +144,11 @@ func TestCertificateBuildParamsString(t *testing.T) { {BlockNum: 2000000000}, {BlockNum: 3000000000}, }, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ {BlockNum: 4000000000}, {BlockNum: 5000000000}, }, - Unclaims: []bridgesynctypes.Unclaim{}, + Unclaims: []claimsynctypes.Unclaim{}, CreatedAt: 4294967295, }, expected: "Type: pp FromBlock: 999999999, ToBlock: 9999999999, numBridges: 3, numClaims: 2, numUnclaims: 0, createdAt: 4294967295", @@ -190,7 +190,7 @@ func TestAdjustToBlock(t *testing.T) { Bridges: []bridgesync.Bridge{ {BlockNum: 150, DepositCount: 1}, }, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ {BlockNum: 180}, }, }, @@ -217,11 +217,11 @@ func TestAdjustToBlock(t *testing.T) { {BlockNum: 180, DepositCount: 2}, {BlockNum: 250, DepositCount: 3}, // This should be excluded }, - Claims: []bridgesync.Claim{ + Claims: []claimsynctypes.Claim{ {BlockNum: 150}, {BlockNum: 220}, // This should be excluded }, - Unclaims: []bridgesynctypes.Unclaim{ + Unclaims: []claimsynctypes.Unclaim{ {BlockNumber: 140}, {BlockNumber: 280}, // This should be excluded }, @@ -281,8 +281,8 @@ func TestAdjustToBlock(t *testing.T) { ToBlock: 200, CertificateType: CertificateTypeOptimistic, Bridges: []bridgesync.Bridge{}, - Claims: []bridgesync.Claim{}, - Unclaims: []bridgesynctypes.Unclaim{}, + Claims: []claimsynctypes.Claim{}, + Unclaims: []claimsynctypes.Unclaim{}, }, newToBlock: 150, validate: func(t *testing.T, result *CertificateBuildParams) { @@ -340,7 +340,7 @@ func TestEstimateSize(t *testing.T) { FromBlock: 100, ToBlock: 200, Bridges: make([]bridgesync.Bridge, 50), - Claims: make([]bridgesync.Claim, 150), + Claims: make([]claimsynctypes.Claim, 150), } estimatedSize := sut.EstimatedSize() diff --git a/aggsender/types/interfaces.go b/aggsender/types/interfaces.go index e145dda37..317b838e8 100644 --- a/aggsender/types/interfaces.go +++ b/aggsender/types/interfaces.go @@ -106,11 +106,11 @@ type L2BridgeSyncer interface { GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) GetBridges(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Bridge, error) - //GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) + // GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) OriginNetwork() uint32 GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) GetExitRootByHash(ctx context.Context, root common.Hash) (*treetypes.Root, error) - //GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) + // GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) SubscribeToSync(subscriberID string) <-chan sync.Block SubscribeToNewBridge(subscriberID string) <-chan uint64 } diff --git a/bridgeservice/bridge.go b/bridgeservice/bridge.go index 7cb94d0cd..c20245038 100644 --- a/bridgeservice/bridge.go +++ b/bridgeservice/bridge.go @@ -30,6 +30,7 @@ import ( "github.com/agglayer/aggkit/bridgeservice/metrics" "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/l1infotreesync" @@ -105,6 +106,8 @@ type BridgeService struct { injectedGERs L2GERSyncer bridgeL1 Bridger bridgeL2 Bridger + claimL1 Claimer + claimL2 Claimer router *gin.Engine } @@ -116,7 +119,9 @@ func New( l1InfoTree L1InfoTreeSyncer, injectedGERs L2GERSyncer, bridgeL1 Bridger, + claimL1 Claimer, bridgeL2 Bridger, + claimL2 Claimer, ) *BridgeService { cfg.Logger.Infof("starting bridge service (network id=%d, address=%s)", cfg.NetworkID, cfg.Address) @@ -148,6 +153,8 @@ func New( injectedGERs: injectedGERs, bridgeL1: bridgeL1, bridgeL2: bridgeL2, + claimL1: claimL1, + claimL2: claimL2, router: router, } @@ -201,6 +208,9 @@ func (b *BridgeService) registerRoutes() { bridgeGroup := b.router.Group(BridgeV1Prefix) { bridgeGroup.GET("/bridges", b.GetBridgesHandler) + bridgeGroup.GET("/claims", b.GetClaimsHandler) + bridgeGroup.GET("/unset-claims", b.GetUnsetClaimsHandler) + bridgeGroup.GET("/set-claims", b.GetSetClaimsHandler) bridgeGroup.GET("/token-mappings", b.GetTokenMappingsHandler) bridgeGroup.GET("/legacy-token-migrations", b.GetLegacyTokenMigrationsHandler) bridgeGroup.GET("/l1-info-tree-index", b.L1InfoTreeIndexForBridgeHandler) @@ -209,6 +219,7 @@ func (b *BridgeService) registerRoutes() { bridgeGroup.GET("/last-reorg-event", b.GetLastReorgEventHandler) bridgeGroup.GET("/sync-status", b.GetSyncStatusHandler) bridgeGroup.GET("/removed-gers", b.GetRemoveGEREventsHandler) + bridgeGroup.GET("/claims-by-ger", b.GetClaimsByGERHandler) bridgeGroup.GET("/bridge-by-deposit-count", b.GetBridgeByDepositCountHandler) bridgeGroup.GET("/bridges-by-content", b.GetBridgesByContentHandler) @@ -410,8 +421,290 @@ func (b *BridgeService) GetBridgesHandler(c *gin.Context) { }) } +// GetClaimsHandler retrieves paginated claims for a given network. +// +// @Summary Get claims +// @Description Returns a paginated list of claims for the specified network. +// @Tags claims +// @Param network_id query uint32 true "Origin network ID" +// @Param page_number query uint32 false "Page number (default 1)" +// @Param page_size query uint32 false "Page size (default 100)" +// @Param network_ids query []uint32 false "Filter by one or more source network IDs (maximum 5 allowed)" +// @Param include_all_fields query bool false "Whether to include full response fields (default false)" +// @Param global_index query uint32 false "Filter by global index" +// @Produce json +// @Success 200 {object} types.ClaimsResult +// @Failure 400 {object} types.ErrorResponse "Bad Request" +// @Failure 500 {object} types.ErrorResponse "Internal Server Error" +// @Router /claims [get] +func (b *BridgeService) GetClaimsHandler(c *gin.Context) { + b.logger.Debugf("GetClaims request received (network id=%s, page number=%s, page size=%s, "+ + "include_all_fields=%s, global_index=%s)", + c.Query(networkIDParam), c.Query(pageNumberParam), c.Query(pageSizeParam), + c.Query(includeAllFields), c.Query(globalIndexParam)) + + statusCode := http.StatusOK + startTime := time.Now() + defer func() { + reportMetrics(metrics.GetClaimsReq, statusCode, startTime) + }() + + networkID, err := parseUintQuery(c, networkIDParam, true, uint32(0)) + if err != nil { + b.logger.Warnf(errNetworkID, err) + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": err.Error()}) + return + } + + networkIDs, err := parseNetworkIDSliceParam(c, networkIDsParam) + if err != nil { + b.logger.Warnf("invalid network IDs parameter: %v", err) + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": fmt.Sprintf("invalid %s parameter: %s", networkIDsParam, err)}) + return + } + + // Parse include_all_fields parameter (default to false) + includeAllFieldsFlag := false + if includeAllFieldsStr := c.Query(includeAllFields); includeAllFieldsStr != "" { + includeAllFieldsFlag, err = strconv.ParseBool(includeAllFieldsStr) + if err != nil { + b.logger.Warnf("invalid include_all_fields parameter: %v", err) + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": "invalid include_all_fields parameter"}) + return + } + } + + globalIndex, ctx, cancel, pageNumber, pageSize, ok := b.parseGlobalIndexAndSetupRequest(c, &statusCode) + if !ok { + return + } + defer cancel() + + b.logger.Debugf( + "fetching claims (network id=%d, page=%d, size=%d, "+ + "network_ids=%v, include_all_fields=%t, global_index=%d)", + networkID, pageNumber, pageSize, networkIDs, includeAllFieldsFlag, globalIndex) + + var ( + claims []*claimsynctypes.Claim + count int + ) + + switch networkID { + case mainnetNetworkID: + if b.bridgeL1 == nil { + statusCode = http.StatusServiceUnavailable + c.JSON(statusCode, + gin.H{"error": "L1 bridge syncer is not available"}) + return + } + + claims, count, err = b.claimL1.GetClaimsPaged(ctx, pageNumber, pageSize, networkIDs, globalIndex) + if err != nil { + b.logger.Warnf("failed to get claims for L1 network: %v", err) + statusCode = http.StatusInternalServerError + c.JSON(statusCode, + gin.H{"error": fmt.Sprintf("failed to get claims for the L1 network, error: %s", err)}) + return + } + case b.networkID: + if b.bridgeL2 == nil { + statusCode = http.StatusServiceUnavailable + c.JSON(statusCode, + gin.H{"error": "L2 bridge syncer is not available"}) + return + } + + claims, count, err = b.claimL2.GetClaimsPaged(ctx, pageNumber, pageSize, networkIDs, globalIndex) + if err != nil { + b.logger.Warnf("failed to get claims for L2 network (ID=%d): %v", networkID, err) + statusCode = http.StatusInternalServerError + c.JSON(statusCode, + gin.H{"error": fmt.Sprintf("failed to get claims for the L2 network (ID=%d), error: %s", networkID, err)}) + return + } + default: + b.logger.Warnf(errNetworkID, networkID) + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": fmt.Sprintf(errNetworkID, networkID)}) + return + } + + // Use conditional function to create claim responses + claimResponses := make([]*types.ClaimResponse, len(claims)) + for i, claim := range claims { + claimResponses[i] = NewClaimResponse(claim, includeAllFieldsFlag) + } + + c.JSON(statusCode, + types.ClaimsResult{ + Claims: claimResponses, + Count: count, + }) +} + +// @Summary Get unset claims +// @Description Returns unset claims for the configured L2 network, paginated. +// Note: unset claims are only available for L2 networks, not L1. +// @Tags unset-claims +// @Param page_number query int false "Page number" +// @Param page_size query int false "Page size" +// @Param global_index query string false "Filter by global index" +// @Produce json +// @Success 200 {object} types.UnsetClaimsResult +// @Failure 400 {object} types.ErrorResponse "Bad Request - Invalid parameters" +// @Failure 500 {object} types.ErrorResponse "Internal Server Error" +// @Failure 503 {object} types.ErrorResponse "Service Unavailable - L2 bridge syncer not available" +// @Router /unset-claims [get] +func (b *BridgeService) GetUnsetClaimsHandler(c *gin.Context) { + b.logger.Debugf("GetUnsetClaims request received (page number=%s, page size=%s, global_index=%s)", + c.Query(pageNumberParam), c.Query(pageSizeParam), c.Query(globalIndexParam)) + + statusCode := http.StatusOK + startTime := time.Now() + defer func() { + reportMetrics(metrics.GetUnsetClaimsReq, statusCode, startTime) + }() + + if b.bridgeL2 == nil { + statusCode = http.StatusServiceUnavailable + c.JSON(statusCode, + gin.H{"error": "L2 bridge syncer is not available"}) + return + } + + globalIndex, ctx, cancel, pageNumber, pageSize, ok := b.parseGlobalIndexAndSetupRequest(c, &statusCode) + if !ok { + return + } + defer cancel() + + b.logger.Debugf("fetching unset claims for L2 network (network id=%d, page=%d, size=%d, global_index=%v)", + b.networkID, pageNumber, pageSize, globalIndex) + + var ( + unsetClaims []*claimsynctypes.UnsetClaim + count int + err error + ) + + unsetClaims, count, err = b.claimL2.GetUnsetClaimsPaged(ctx, pageNumber, pageSize, globalIndex) + if err != nil { + b.logger.Warnf("failed to get unset claims for L2 network (ID=%d): %v", b.networkID, err) + statusCode = http.StatusInternalServerError + c.JSON(statusCode, + gin.H{"error": fmt.Sprintf("failed to get unset claims for the L2 network (ID=%d), error: %s", b.networkID, err)}) + return + } + + // Convert unset claims to response format + unsetClaimResponses := make([]*types.UnsetClaimResponse, len(unsetClaims)) + for i, unsetClaim := range unsetClaims { + unsetClaimResponses[i] = &types.UnsetClaimResponse{ + BlockNum: unsetClaim.BlockNum, + BlockPos: unsetClaim.BlockPos, + TxHash: types.Hash(unsetClaim.TxHash.Hex()), + GlobalIndex: types.BigIntString(unsetClaim.GlobalIndex.String()), + UnsetGlobalIndexHashChain: types.Hash(unsetClaim.UnsetGlobalIndexHashChain.Hex()), + CreatedAt: unsetClaim.CreatedAt, + } + } + + c.JSON(statusCode, + types.UnsetClaimsResult{ + UnsetClaims: unsetClaimResponses, + Count: count, + }) +} + +// @Summary Get set claims +// @Description Returns set claims for the configured L2 network, paginated. +// Note: set claims are only available for L2 networks, not L1. +// @Tags set-claims +// @Param page_number query int false "Page number" +// @Param page_size query int false "Page size" +// @Param global_index query string false "Filter by global index" +// @Produce json +// @Success 200 {object} types.SetClaimsResult +// @Failure 400 {object} types.ErrorResponse "Bad Request - Invalid parameters" +// @Failure 500 {object} types.ErrorResponse "Internal Server Error" +// @Failure 503 {object} types.ErrorResponse "Service Unavailable - L2 bridge syncer not available" +// @Router /set-claims [get] +func (b *BridgeService) GetSetClaimsHandler(c *gin.Context) { + b.logger.Debugf("GetSetClaims request received (page number=%s, page size=%s, global_index=%s)", + c.Query(pageNumberParam), c.Query(pageSizeParam), c.Query(globalIndexParam)) + + statusCode := http.StatusOK + startTime := time.Now() + defer func() { + reportMetrics(metrics.GetSetClaimsReq, statusCode, startTime) + }() + + if b.bridgeL2 == nil { + statusCode = http.StatusServiceUnavailable + c.JSON(statusCode, + gin.H{"error": "L2 bridge syncer is not available"}) + return + } + + globalIndex, ctx, cancel, pageNumber, pageSize, ok := b.parseGlobalIndexAndSetupRequest(c, &statusCode) + if !ok { + return + } + defer cancel() + + b.logger.Debugf("fetching set claims for L2 network (network id=%d, page=%d, size=%d, global_index=%v)", + b.networkID, pageNumber, pageSize, globalIndex) + + var ( + setClaims []*claimsynctypes.SetClaim + count int + err error + ) + + setClaims, count, err = b.claimL2.GetSetClaimsPaged(ctx, pageNumber, pageSize, globalIndex) + if err != nil { + b.logger.Warnf("failed to get set claims for L2 network (ID=%d): %v", b.networkID, err) + statusCode = http.StatusInternalServerError + c.JSON(statusCode, + gin.H{"error": fmt.Sprintf("failed to get set claims for the L2 network (ID=%d), error: %s", b.networkID, err)}) + return + } + // Convert set claims to response format + setClaimResponses := make([]*types.SetClaimResponse, len(setClaims)) + for i, setClaim := range setClaims { + setClaimResponses[i] = &types.SetClaimResponse{ + BlockNum: setClaim.BlockNum, + BlockPos: setClaim.BlockPos, + TxHash: types.Hash(setClaim.TxHash.Hex()), + GlobalIndex: types.BigIntString(setClaim.GlobalIndex.String()), + CreatedAt: setClaim.CreatedAt, + } + } + c.JSON(statusCode, + types.SetClaimsResult{ + SetClaims: setClaimResponses, + Count: count, + }) +} + +// @Summary Get token mappings +// @Description Returns token mappings for the given network, paginated +// @Tags token-mappings +// @Param network_id query int true "Network ID" +// @Param page_number query int false "Page number" +// @Param page_size query int false "Page size" +// @Param origin_token_address query string false "Filter by origin token address" +// @Produce json +// @Success 200 {object} types.TokenMappingsResult +// @Failure 400 {object} types.ErrorResponse "Bad Request" +// @Failure 500 {object} types.ErrorResponse "Internal Server Error" +// @Router /token-mappings [get] func (b *BridgeService) GetTokenMappingsHandler(c *gin.Context) { b.logger.Debugf( "GetTokenMappings request received (network id=%s, page number=%s, page size=%s, origin token address=%s)", @@ -1313,6 +1606,96 @@ func reportMetrics(handlerID string, statusCode int, startTime time.Time) { metrics.ObserveRequestLatencyHistogram(handlerID, startTime) } +// GetClaimsByGERHandler retrieves all DetailedClaimEvent claims that used the given global exit root. +// +// @Summary Get claims by global exit root +// @Description Returns all claims (DetailedClaimEvent type) recorded with the specified GER for the given network. +// @Tags claims +// @Param network_id query uint32 true "Network ID (0 for L1, L2 network ID otherwise)" +// @Param global_exit_root query string true "Global exit root (0x-prefixed 32-byte hex)" +// @Produce json +// @Success 200 {object} types.ClaimsByGERResult +// @Failure 400 {object} types.ErrorResponse "Bad Request" +// @Failure 500 {object} types.ErrorResponse "Internal Server Error" +// @Failure 503 {object} types.ErrorResponse "Service Unavailable" +// @Router /claims-by-ger [get] +func (b *BridgeService) GetClaimsByGERHandler(c *gin.Context) { + b.logger.Debugf("GetClaimsByGER request received") + + statusCode := http.StatusOK + startTime := time.Now() + defer func() { + reportMetrics(metrics.GetClaimsByGERReq, statusCode, startTime) + }() + + ctx, cancel := context.WithTimeout(c, b.readTimeout) + defer cancel() + + networkID, err := parseUintQuery(c, networkIDParam, true, uint32(0)) + if err != nil { + b.logger.Warnf(errNetworkID, err) + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": err.Error()}) + return + } + + gerStr := c.Query("global_exit_root") + if gerStr == "" { + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": "global_exit_root is mandatory"}) + return + } + if !isValidHexHash(gerStr) { + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": "invalid global_exit_root parameter, must be a valid hex hash"}) + return + } + ger := common.HexToHash(gerStr) + + var claims []*claimsynctypes.Claim + switch networkID { + case mainnetNetworkID: + if b.claimL1 == nil { + statusCode = http.StatusServiceUnavailable + c.JSON(statusCode, gin.H{"error": "L1 claim syncer is not available"}) + return + } + claims, err = b.claimL1.GetClaimsByGER(ctx, ger) + if err != nil { + b.logger.Errorf("failed to get claims by GER %s for L1 network: %v", gerStr, err) + statusCode = http.StatusInternalServerError + c.JSON(statusCode, gin.H{"error": fmt.Sprintf("failed to get claims by GER: %s", err)}) + return + } + case b.networkID: + if b.claimL2 == nil { + statusCode = http.StatusServiceUnavailable + c.JSON(statusCode, gin.H{"error": "L2 claim syncer is not available"}) + return + } + claims, err = b.claimL2.GetClaimsByGER(ctx, ger) + if err != nil { + b.logger.Errorf("failed to get claims by GER %s for L2 network (ID=%d): %v", gerStr, networkID, err) + statusCode = http.StatusInternalServerError + c.JSON(statusCode, gin.H{"error": fmt.Sprintf("failed to get claims by GER: %s", err)}) + return + } + default: + b.logger.Warnf(errNetworkID, networkID) + statusCode = http.StatusBadRequest + c.JSON(statusCode, gin.H{"error": fmt.Sprintf(errNetworkID, networkID)}) + return + } + + claimResponses := make([]*types.ClaimResponse, 0, len(claims)) + for _, claim := range claims { + claimResponses = append(claimResponses, NewClaimResponse(claim, false)) + } + c.JSON(statusCode, types.ClaimsByGERResult{ + Claims: claimResponses, + Count: len(claimResponses), + }) +} // GetBridgeByDepositCountHandler retrieves a bridge by deposit count for the given network. // diff --git a/bridgeservice/bridge_interfaces.go b/bridgeservice/bridge_interfaces.go index c93473368..4cf3cb5db 100644 --- a/bridgeservice/bridge_interfaces.go +++ b/bridgeservice/bridge_interfaces.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/agglayer/aggkit/bridgesync" + claimsynctype "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/l2gersync" tree "github.com/agglayer/aggkit/tree/types" @@ -32,6 +33,16 @@ type Bridger interface { amount *big.Int, metadata []byte) ([]*bridgesync.Bridge, error) } +type Claimer interface { + GetClaimsPaged(ctx context.Context, page, pageSize uint32, + networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctype.Claim, int, error) + GetUnsetClaimsPaged(ctx context.Context, page, pageSize uint32, + globalIndex *big.Int) ([]*claimsynctype.UnsetClaim, int, error) + GetSetClaimsPaged(ctx context.Context, page, pageSize uint32, + globalIndex *big.Int) ([]*claimsynctype.SetClaim, int, error) + GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctype.Claim, error) +} + type L2GERSyncer interface { GetFirstGERAfterL1InfoTreeIndex( ctx context.Context, atOrAfterL1InfoTreeIndex uint32, diff --git a/bridgeservice/bridge_test.go b/bridgeservice/bridge_test.go index 9a059b8aa..167a58328 100644 --- a/bridgeservice/bridge_test.go +++ b/bridgeservice/bridge_test.go @@ -19,6 +19,7 @@ import ( mocks "github.com/agglayer/aggkit/bridgeservice/mocks" bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/l1infotreesync" @@ -44,7 +45,9 @@ type bridgeWithMocks struct { l1InfoTree *mocks.L1InfoTreeSyncer injectedGERs *mocks.L2GERSyncer bridgeL1 *mocks.Bridger + claimL1 *mocks.Claimer bridgeL2 *mocks.Bridger + claimL2 *mocks.Claimer } func newBridgeWithMocks(t *testing.T, networkID uint32) bridgeWithMocks { @@ -54,7 +57,9 @@ func newBridgeWithMocks(t *testing.T, networkID uint32) bridgeWithMocks { l1InfoTree: mocks.NewL1InfoTreeSyncer(t), injectedGERs: mocks.NewL2GERSyncer(t), bridgeL1: mocks.NewBridger(t), + claimL1: mocks.NewClaimer(t), bridgeL2: mocks.NewBridger(t), + claimL2: mocks.NewClaimer(t), } logger := log.WithFields("module", "test bridge service") cfg := &Config{ @@ -64,7 +69,7 @@ func newBridgeWithMocks(t *testing.T, networkID uint32) bridgeWithMocks { WriteTimeout: 0, NetworkID: networkID, } - b.bridge = New(cfg, b.upgradeQuerier, b.l1InfoTree, b.injectedGERs, b.bridgeL1, b.bridgeL2) + b.bridge = New(cfg, b.upgradeQuerier, b.l1InfoTree, b.injectedGERs, b.bridgeL1, b.claimL1, b.bridgeL2, b.claimL2) return b } @@ -766,7 +771,7 @@ func TestGetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: big.NewInt(1), @@ -778,11 +783,11 @@ func TestGetClaimsHandler(t *testing.T) { MainnetExitRoot: common.HexToHash("0xdefc...789"), }, } - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, false) }) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, len(expectedClaims), nil) @@ -808,7 +813,7 @@ func TestGetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: big.NewInt(1), @@ -820,12 +825,12 @@ func TestGetClaimsHandler(t *testing.T) { MainnetExitRoot: common.HexToHash("0xdefc...789"), }, } - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, false) }) bridgeMocks.bridge.networkID = 10 - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, len(expectedClaims), nil) @@ -858,7 +863,7 @@ func TestGetClaimsHandler(t *testing.T) { t.Run("GetClaims for L1 network failed", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsPaged(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(nil, 0, errors.New(fooErrMsg)) @@ -874,7 +879,7 @@ func TestGetClaimsHandler(t *testing.T) { t.Run("GetClaims for L2 network failed", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetClaimsPaged(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(nil, 0, errors.New(barErrMsg)) @@ -949,7 +954,7 @@ func TestGetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) // Create claims with proof data - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: big.NewInt(1), @@ -973,11 +978,11 @@ func TestGetClaimsHandler(t *testing.T) { }, }, } - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, true) }) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, len(expectedClaims), nil) @@ -1021,7 +1026,7 @@ func TestGetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) // Create claims with proof data - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: big.NewInt(1), @@ -1045,12 +1050,12 @@ func TestGetClaimsHandler(t *testing.T) { }, }, } - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, true) }) bridgeMocks.bridge.networkID = 10 - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, len(expectedClaims), nil) @@ -1093,7 +1098,7 @@ func TestGetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) // Create claims with proof data - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: big.NewInt(1), @@ -1117,11 +1122,11 @@ func TestGetClaimsHandler(t *testing.T) { }, }, } - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, false) }) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, len(expectedClaims), nil) @@ -1205,7 +1210,7 @@ func TestGetClaimsHandler(t *testing.T) { // Create 2 claims with the same global_index (should be compacted to 1) globalIndex, _ := new(big.Int).SetString("18446744073709551617", 10) - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: globalIndex, @@ -1219,10 +1224,10 @@ func TestGetClaimsHandler(t *testing.T) { } expectedCount := 1 - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, false) }) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, expectedCount, nil) @@ -1248,7 +1253,7 @@ func TestGetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) // Create 3 claims with the same global_index but with unset_claim (all should be returned) - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: big.NewInt(100), @@ -1283,11 +1288,11 @@ func TestGetClaimsHandler(t *testing.T) { // The count should be 3 (all claims, no compaction when unset_claim exists) expectedCount := 3 - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, false) }) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, expectedCount, nil) @@ -1317,7 +1322,7 @@ func TestGetClaimsHandler(t *testing.T) { // - global_index=100: 2 claims, no unset_claim → compacted to 1 // - global_index=200: 3 claims, has unset_claim → all 3 returned // - global_index=300: 1 claim, no unset_claim → 1 returned - expectedClaims := []*bridgesync.Claim{ + expectedClaims := []*claimsynctypes.Claim{ { BlockNum: 1, GlobalIndex: big.NewInt(100), @@ -1372,11 +1377,11 @@ func TestGetClaimsHandler(t *testing.T) { // Expected count: 1 (compacted) + 3 (all with unset_claim) + 1 (single) = 5 expectedCount := 5 - claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *bridgesync.Claim) *bridgetypes.ClaimResponse { + claimsResp := aggkitcommon.MapSlice(expectedClaims, func(claim *claimsynctypes.Claim) *bridgetypes.ClaimResponse { return NewClaimResponse(claim, false) }) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsPaged(mock.Anything, page, pageSize, mock.Anything, mock.Anything). Return(expectedClaims, expectedCount, nil) @@ -1404,7 +1409,7 @@ func TestGetUnsetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - expectedUnsetClaims := []*bridgesync.UnsetClaim{ + expectedUnsetClaims := []*claimsynctypes.UnsetClaim{ { BlockNum: 1, BlockPos: 1, @@ -1415,7 +1420,7 @@ func TestGetUnsetClaimsHandler(t *testing.T) { }, } - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetUnsetClaimsPaged(mock.Anything, page, pageSize, mock.Anything). Return(expectedUnsetClaims, len(expectedUnsetClaims), nil) @@ -1439,7 +1444,7 @@ func TestGetUnsetClaimsHandler(t *testing.T) { t.Run("GetUnsetClaims for L2 network failed", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetUnsetClaimsPaged(mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(nil, 0, errors.New(barErrMsg)) @@ -1479,7 +1484,7 @@ func TestGetSetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - expectedSetClaims := []*bridgesync.SetClaim{ + expectedSetClaims := []*claimsynctypes.SetClaim{ { BlockNum: 1, BlockPos: 1, @@ -1489,7 +1494,7 @@ func TestGetSetClaimsHandler(t *testing.T) { }, } - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetSetClaimsPaged(mock.Anything, page, pageSize, mock.Anything). Return(expectedSetClaims, len(expectedSetClaims), nil) @@ -1519,7 +1524,7 @@ func TestGetSetClaimsHandler(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - expectedSetClaims := []*bridgesync.SetClaim{ + expectedSetClaims := []*claimsynctypes.SetClaim{ { BlockNum: 2, BlockPos: 0, @@ -1529,7 +1534,7 @@ func TestGetSetClaimsHandler(t *testing.T) { }, } - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetSetClaimsPaged(mock.Anything, page, pageSize, globalIndex). Return(expectedSetClaims, len(expectedSetClaims), nil) @@ -1554,7 +1559,7 @@ func TestGetSetClaimsHandler(t *testing.T) { t.Run("GetSetClaims for L2 network failed", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetSetClaimsPaged(mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(nil, 0, errors.New(barErrMsg)) @@ -3805,7 +3810,7 @@ func TestGetClaimsByGERHandler(t *testing.T) { validGER := "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" gerHash := common.HexToHash(validGER) - sampleClaim := &bridgesync.Claim{ + sampleClaim := &claimsynctypes.Claim{ BlockNum: 1, GlobalIndex: big.NewInt(1), OriginNetwork: 0, @@ -3818,9 +3823,9 @@ func TestGetClaimsByGERHandler(t *testing.T) { t.Run("L2 network success", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - expectedClaims := []*bridgesync.Claim{sampleClaim} + expectedClaims := []*claimsynctypes.Claim{sampleClaim} - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetClaimsByGER(mock.Anything, gerHash). Return(expectedClaims, nil) @@ -3841,9 +3846,9 @@ func TestGetClaimsByGERHandler(t *testing.T) { t.Run("L1 network success", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - expectedClaims := []*bridgesync.Claim{sampleClaim} + expectedClaims := []*claimsynctypes.Claim{sampleClaim} - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsByGER(mock.Anything, gerHash). Return(expectedClaims, nil) @@ -3900,7 +3905,7 @@ func TestGetClaimsByGERHandler(t *testing.T) { t.Run("L1 bridge nil", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridge.bridgeL1 = nil + bridgeMocks.bridge.claimL1 = nil queryParams := url.Values{} queryParams.Set(networkIDParam, "0") @@ -3909,12 +3914,12 @@ func TestGetClaimsByGERHandler(t *testing.T) { w := performRequest(t, bridgeMocks.bridge.router, http.MethodGet, fmt.Sprintf("%s/claims-by-ger?%s", BridgeV1Prefix, queryParams.Encode()), nil) require.Equal(t, http.StatusServiceUnavailable, w.Code) - require.Contains(t, w.Body.String(), "L1 bridge syncer is not available") + require.Contains(t, w.Body.String(), "L1 claim syncer is not available") }) t.Run("L2 bridge nil", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridge.bridgeL2 = nil + bridgeMocks.bridge.claimL2 = nil queryParams := url.Values{} queryParams.Set(networkIDParam, strconv.Itoa(int(l2NetworkID))) @@ -3923,13 +3928,13 @@ func TestGetClaimsByGERHandler(t *testing.T) { w := performRequest(t, bridgeMocks.bridge.router, http.MethodGet, fmt.Sprintf("%s/claims-by-ger?%s", BridgeV1Prefix, queryParams.Encode()), nil) require.Equal(t, http.StatusServiceUnavailable, w.Code) - require.Contains(t, w.Body.String(), "L2 bridge syncer is not available") + require.Contains(t, w.Body.String(), "L2 claim syncer is not available") }) t.Run("L1 service error", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridgeL1.EXPECT(). + bridgeMocks.claimL1.EXPECT(). GetClaimsByGER(mock.Anything, gerHash). Return(nil, errors.New("db error")) @@ -3946,7 +3951,7 @@ func TestGetClaimsByGERHandler(t *testing.T) { t.Run("L2 service error", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetClaimsByGER(mock.Anything, gerHash). Return(nil, errors.New("db error")) @@ -3963,9 +3968,9 @@ func TestGetClaimsByGERHandler(t *testing.T) { t.Run("empty result", func(t *testing.T) { bridgeMocks := newBridgeWithMocks(t, l2NetworkID) - bridgeMocks.bridgeL2.EXPECT(). + bridgeMocks.claimL2.EXPECT(). GetClaimsByGER(mock.Anything, gerHash). - Return([]*bridgesync.Claim{}, nil) + Return([]*claimsynctypes.Claim{}, nil) queryParams := url.Values{} queryParams.Set(networkIDParam, strconv.Itoa(int(l2NetworkID))) diff --git a/bridgeservice/mocks/mock_bridger.go b/bridgeservice/mocks/mock_bridger.go index 5eae58095..bc837ea17 100644 --- a/bridgeservice/mocks/mock_bridger.go +++ b/bridgeservice/mocks/mock_bridger.go @@ -13,9 +13,7 @@ import ( mock "github.com/stretchr/testify/mock" - treetypes "github.com/agglayer/aggkit/tree/types" - - types "github.com/agglayer/aggkit/claimsync/types" + types "github.com/agglayer/aggkit/tree/types" ) // Bridger is an autogenerated mock type for the Bridger type @@ -224,134 +222,6 @@ func (_c *Bridger_GetBridgesPaged_Call) RunAndReturn(run func(context.Context, u return _c } -// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot -func (_m *Bridger) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*types.Claim, error) { - ret := _m.Called(ctx, globalExitRoot) - - if len(ret) == 0 { - panic("no return value specified for GetClaimsByGER") - } - - var r0 []*types.Claim - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*types.Claim, error)); ok { - return rf(ctx, globalExitRoot) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*types.Claim); ok { - r0 = rf(ctx, globalExitRoot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.Claim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, globalExitRoot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Bridger_GetClaimsByGER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGER' -type Bridger_GetClaimsByGER_Call struct { - *mock.Call -} - -// GetClaimsByGER is a helper method to define mock.On call -// - ctx context.Context -// - globalExitRoot common.Hash -func (_e *Bridger_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *Bridger_GetClaimsByGER_Call { - return &Bridger_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} -} - -func (_c *Bridger_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *Bridger_GetClaimsByGER_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *Bridger_GetClaimsByGER_Call) Return(_a0 []*types.Claim, _a1 error) *Bridger_GetClaimsByGER_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Bridger_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*types.Claim, error)) *Bridger_GetClaimsByGER_Call { - _c.Call.Return(run) - return _c -} - -// GetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, networkIDs, globalIndex -func (_m *Bridger) GetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*types.Claim, int, error) { - ret := _m.Called(ctx, page, pageSize, networkIDs, globalIndex) - - if len(ret) == 0 { - panic("no return value specified for GetClaimsPaged") - } - - var r0 []*types.Claim - var r1 int - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*types.Claim, int, error)); ok { - return rf(ctx, page, pageSize, networkIDs, globalIndex) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*types.Claim); ok { - r0 = rf(ctx, page, pageSize, networkIDs, globalIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.Claim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, []uint32, *big.Int) int); ok { - r1 = rf(ctx, page, pageSize, networkIDs, globalIndex) - } else { - r1 = ret.Get(1).(int) - } - - if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, []uint32, *big.Int) error); ok { - r2 = rf(ctx, page, pageSize, networkIDs, globalIndex) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Bridger_GetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsPaged' -type Bridger_GetClaimsPaged_Call struct { - *mock.Call -} - -// GetClaimsPaged is a helper method to define mock.On call -// - ctx context.Context -// - page uint32 -// - pageSize uint32 -// - networkIDs []uint32 -// - globalIndex *big.Int -func (_e *Bridger_Expecter) GetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, networkIDs interface{}, globalIndex interface{}) *Bridger_GetClaimsPaged_Call { - return &Bridger_GetClaimsPaged_Call{Call: _e.mock.On("GetClaimsPaged", ctx, page, pageSize, networkIDs, globalIndex)} -} - -func (_c *Bridger_GetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int)) *Bridger_GetClaimsPaged_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].([]uint32), args[4].(*big.Int)) - }) - return _c -} - -func (_c *Bridger_GetClaimsPaged_Call) Return(_a0 []*types.Claim, _a1 int, _a2 error) *Bridger_GetClaimsPaged_Call { - _c.Call.Return(_a0, _a1, _a2) - return _c -} - -func (_c *Bridger_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*types.Claim, int, error)) *Bridger_GetClaimsPaged_Call { - _c.Call.Return(run) - return _c -} - // GetContractDepositCount provides a mock function with given fields: ctx func (_m *Bridger) GetContractDepositCount(ctx context.Context) (uint32, error) { ret := _m.Called(ctx) @@ -530,23 +400,23 @@ func (_c *Bridger_GetLastReorgEvent_Call) RunAndReturn(run func(context.Context) } // GetLastRoot provides a mock function with given fields: ctx -func (_m *Bridger) GetLastRoot(ctx context.Context) (*treetypes.Root, error) { +func (_m *Bridger) GetLastRoot(ctx context.Context) (*types.Root, error) { ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for GetLastRoot") } - var r0 *treetypes.Root + var r0 *types.Root var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*treetypes.Root, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) (*types.Root, error)); ok { return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) *treetypes.Root); ok { + if rf, ok := ret.Get(0).(func(context.Context) *types.Root); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*treetypes.Root) + r0 = ret.Get(0).(*types.Root) } } @@ -577,12 +447,12 @@ func (_c *Bridger_GetLastRoot_Call) Run(run func(ctx context.Context)) *Bridger_ return _c } -func (_c *Bridger_GetLastRoot_Call) Return(_a0 *treetypes.Root, _a1 error) *Bridger_GetLastRoot_Call { +func (_c *Bridger_GetLastRoot_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetLastRoot_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Bridger_GetLastRoot_Call) RunAndReturn(run func(context.Context) (*treetypes.Root, error)) *Bridger_GetLastRoot_Call { +func (_c *Bridger_GetLastRoot_Call) RunAndReturn(run func(context.Context) (*types.Root, error)) *Bridger_GetLastRoot_Call { _c.Call.Return(run) return _c } @@ -711,23 +581,23 @@ func (_c *Bridger_GetLegacyTokenMigrations_Call) RunAndReturn(run func(context.C } // GetProof provides a mock function with given fields: ctx, depositCount, localExitRoot -func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (treetypes.Proof, error) { +func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (types.Proof, error) { ret := _m.Called(ctx, depositCount, localExitRoot) if len(ret) == 0 { panic("no return value specified for GetProof") } - var r0 treetypes.Proof + var r0 types.Proof var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { return rf(ctx, depositCount, localExitRoot) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { r0 = rf(ctx, depositCount, localExitRoot) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(treetypes.Proof) + r0 = ret.Get(0).(types.Proof) } } @@ -760,34 +630,34 @@ func (_c *Bridger_GetProof_Call) Run(run func(ctx context.Context, depositCount return _c } -func (_c *Bridger_GetProof_Call) Return(_a0 treetypes.Proof, _a1 error) *Bridger_GetProof_Call { +func (_c *Bridger_GetProof_Call) Return(_a0 types.Proof, _a1 error) *Bridger_GetProof_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *Bridger_GetProof_Call { +func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *Bridger_GetProof_Call { _c.Call.Return(run) return _c } // GetRootByLER provides a mock function with given fields: ctx, ler -func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*treetypes.Root, error) { +func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*types.Root, error) { ret := _m.Called(ctx, ler) if len(ret) == 0 { panic("no return value specified for GetRootByLER") } - var r0 *treetypes.Root + var r0 *types.Root var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*treetypes.Root, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Root, error)); ok { return rf(ctx, ler) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *treetypes.Root); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Root); ok { r0 = rf(ctx, ler) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*treetypes.Root) + r0 = ret.Get(0).(*types.Root) } } @@ -819,80 +689,12 @@ func (_c *Bridger_GetRootByLER_Call) Run(run func(ctx context.Context, ler commo return _c } -func (_c *Bridger_GetRootByLER_Call) Return(_a0 *treetypes.Root, _a1 error) *Bridger_GetRootByLER_Call { +func (_c *Bridger_GetRootByLER_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetRootByLER_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*treetypes.Root, error)) *Bridger_GetRootByLER_Call { - _c.Call.Return(run) - return _c -} - -// GetSetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex -func (_m *Bridger) GetSetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*types.SetClaim, int, error) { - ret := _m.Called(ctx, page, pageSize, globalIndex) - - if len(ret) == 0 { - panic("no return value specified for GetSetClaimsPaged") - } - - var r0 []*types.SetClaim - var r1 int - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*types.SetClaim, int, error)); ok { - return rf(ctx, page, pageSize, globalIndex) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*types.SetClaim); ok { - r0 = rf(ctx, page, pageSize, globalIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.SetClaim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { - r1 = rf(ctx, page, pageSize, globalIndex) - } else { - r1 = ret.Get(1).(int) - } - - if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { - r2 = rf(ctx, page, pageSize, globalIndex) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Bridger_GetSetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSetClaimsPaged' -type Bridger_GetSetClaimsPaged_Call struct { - *mock.Call -} - -// GetSetClaimsPaged is a helper method to define mock.On call -// - ctx context.Context -// - page uint32 -// - pageSize uint32 -// - globalIndex *big.Int -func (_e *Bridger_Expecter) GetSetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, globalIndex interface{}) *Bridger_GetSetClaimsPaged_Call { - return &Bridger_GetSetClaimsPaged_Call{Call: _e.mock.On("GetSetClaimsPaged", ctx, page, pageSize, globalIndex)} -} - -func (_c *Bridger_GetSetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int)) *Bridger_GetSetClaimsPaged_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) - }) - return _c -} - -func (_c *Bridger_GetSetClaimsPaged_Call) Return(_a0 []*types.SetClaim, _a1 int, _a2 error) *Bridger_GetSetClaimsPaged_Call { - _c.Call.Return(_a0, _a1, _a2) - return _c -} - -func (_c *Bridger_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*types.SetClaim, int, error)) *Bridger_GetSetClaimsPaged_Call { +func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Root, error)) *Bridger_GetRootByLER_Call { _c.Call.Return(run) return _c } @@ -965,74 +767,6 @@ func (_c *Bridger_GetTokenMappings_Call) RunAndReturn(run func(context.Context, return _c } -// GetUnsetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex -func (_m *Bridger) GetUnsetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*types.UnsetClaim, int, error) { - ret := _m.Called(ctx, page, pageSize, globalIndex) - - if len(ret) == 0 { - panic("no return value specified for GetUnsetClaimsPaged") - } - - var r0 []*types.UnsetClaim - var r1 int - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*types.UnsetClaim, int, error)); ok { - return rf(ctx, page, pageSize, globalIndex) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*types.UnsetClaim); ok { - r0 = rf(ctx, page, pageSize, globalIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.UnsetClaim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { - r1 = rf(ctx, page, pageSize, globalIndex) - } else { - r1 = ret.Get(1).(int) - } - - if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { - r2 = rf(ctx, page, pageSize, globalIndex) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Bridger_GetUnsetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUnsetClaimsPaged' -type Bridger_GetUnsetClaimsPaged_Call struct { - *mock.Call -} - -// GetUnsetClaimsPaged is a helper method to define mock.On call -// - ctx context.Context -// - page uint32 -// - pageSize uint32 -// - globalIndex *big.Int -func (_e *Bridger_Expecter) GetUnsetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, globalIndex interface{}) *Bridger_GetUnsetClaimsPaged_Call { - return &Bridger_GetUnsetClaimsPaged_Call{Call: _e.mock.On("GetUnsetClaimsPaged", ctx, page, pageSize, globalIndex)} -} - -func (_c *Bridger_GetUnsetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int)) *Bridger_GetUnsetClaimsPaged_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) - }) - return _c -} - -func (_c *Bridger_GetUnsetClaimsPaged_Call) Return(_a0 []*types.UnsetClaim, _a1 int, _a2 error) *Bridger_GetUnsetClaimsPaged_Call { - _c.Call.Return(_a0, _a1, _a2) - return _c -} - -func (_c *Bridger_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*types.UnsetClaim, int, error)) *Bridger_GetUnsetClaimsPaged_Call { - _c.Call.Return(run) - return _c -} - // IsActive provides a mock function with given fields: ctx func (_m *Bridger) IsActive(ctx context.Context) bool { ret := _m.Called(ctx) diff --git a/bridgeservice/mocks/mock_claimer.go b/bridgeservice/mocks/mock_claimer.go new file mode 100644 index 000000000..0f68192c9 --- /dev/null +++ b/bridgeservice/mocks/mock_claimer.go @@ -0,0 +1,267 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + "context" + "math/big" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + common "github.com/ethereum/go-ethereum/common" + mock "github.com/stretchr/testify/mock" +) + +// Claimer is an autogenerated mock type for the Claimer type +type Claimer struct { + mock.Mock +} + +type Claimer_Expecter struct { + mock *mock.Mock +} + +func (_m *Claimer) EXPECT() *Claimer_Expecter { + return &Claimer_Expecter{mock: &_m.Mock} +} + +// NewClaimer creates a new instance of Claimer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimer(t interface { + mock.TestingT + Cleanup(func()) +}) *Claimer { + mock := &Claimer{} + mock.Mock.Test(t) + t.Cleanup(func() { mock.AssertExpectations(t) }) + return mock +} + +// GetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, networkIDs, globalIndex +func (_m *Claimer) GetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { + ret := _m.Called(ctx, page, pageSize, networkIDs, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsPaged") + } + + var r0 []*claimsynctypes.Claim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)); ok { + return rf(ctx, page, pageSize, networkIDs, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, page, pageSize, networkIDs, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.Claim) + } + } + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, []uint32, *big.Int) int); ok { + r1 = rf(ctx, page, pageSize, networkIDs, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, []uint32, *big.Int) error); ok { + r2 = rf(ctx, page, pageSize, networkIDs, globalIndex) + } else { + r2 = ret.Error(2) + } + return r0, r1, r2 +} + +type Claimer_GetClaimsPaged_Call struct { + *mock.Call +} + +func (_e *Claimer_Expecter) GetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, networkIDs interface{}, globalIndex interface{}) *Claimer_GetClaimsPaged_Call { + return &Claimer_GetClaimsPaged_Call{Call: _e.mock.On("GetClaimsPaged", ctx, page, pageSize, networkIDs, globalIndex)} +} + +func (_c *Claimer_GetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int)) *Claimer_GetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].([]uint32), args[4].(*big.Int)) + }) + return _c +} + +func (_c *Claimer_GetClaimsPaged_Call) Return(_a0 []*claimsynctypes.Claim, _a1 int, _a2 error) *Claimer_GetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Claimer_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)) *Claimer_GetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// GetUnsetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex +func (_m *Claimer) GetUnsetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { + ret := _m.Called(ctx, page, pageSize, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetUnsetClaimsPaged") + } + + var r0 []*claimsynctypes.UnsetClaim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)); ok { + return rf(ctx, page, pageSize, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.UnsetClaim); ok { + r0 = rf(ctx, page, pageSize, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.UnsetClaim) + } + } + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { + r1 = rf(ctx, page, pageSize, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { + r2 = rf(ctx, page, pageSize, globalIndex) + } else { + r2 = ret.Error(2) + } + return r0, r1, r2 +} + +type Claimer_GetUnsetClaimsPaged_Call struct { + *mock.Call +} + +func (_e *Claimer_Expecter) GetUnsetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, globalIndex interface{}) *Claimer_GetUnsetClaimsPaged_Call { + return &Claimer_GetUnsetClaimsPaged_Call{Call: _e.mock.On("GetUnsetClaimsPaged", ctx, page, pageSize, globalIndex)} +} + +func (_c *Claimer_GetUnsetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int)) *Claimer_GetUnsetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) + }) + return _c +} + +func (_c *Claimer_GetUnsetClaimsPaged_Call) Return(_a0 []*claimsynctypes.UnsetClaim, _a1 int, _a2 error) *Claimer_GetUnsetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Claimer_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)) *Claimer_GetUnsetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// GetSetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex +func (_m *Claimer) GetSetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { + ret := _m.Called(ctx, page, pageSize, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSetClaimsPaged") + } + + var r0 []*claimsynctypes.SetClaim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)); ok { + return rf(ctx, page, pageSize, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.SetClaim); ok { + r0 = rf(ctx, page, pageSize, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.SetClaim) + } + } + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { + r1 = rf(ctx, page, pageSize, globalIndex) + } else { + r1 = ret.Get(1).(int) + } + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { + r2 = rf(ctx, page, pageSize, globalIndex) + } else { + r2 = ret.Error(2) + } + return r0, r1, r2 +} + +type Claimer_GetSetClaimsPaged_Call struct { + *mock.Call +} + +func (_e *Claimer_Expecter) GetSetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, globalIndex interface{}) *Claimer_GetSetClaimsPaged_Call { + return &Claimer_GetSetClaimsPaged_Call{Call: _e.mock.On("GetSetClaimsPaged", ctx, page, pageSize, globalIndex)} +} + +func (_c *Claimer_GetSetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int)) *Claimer_GetSetClaimsPaged_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) + }) + return _c +} + +func (_c *Claimer_GetSetClaimsPaged_Call) Return(_a0 []*claimsynctypes.SetClaim, _a1 int, _a2 error) *Claimer_GetSetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Claimer_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)) *Claimer_GetSetClaimsPaged_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot +func (_m *Claimer) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { + ret := _m.Called(ctx, globalExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsByGER") + } + + var r0 []*claimsynctypes.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)); ok { + return rf(ctx, globalExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, globalExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*claimsynctypes.Claim) + } + } + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, globalExitRoot) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +type Claimer_GetClaimsByGER_Call struct { + *mock.Call +} + +func (_e *Claimer_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *Claimer_GetClaimsByGER_Call { + return &Claimer_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} +} + +func (_c *Claimer_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *Claimer_GetClaimsByGER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *Claimer_GetClaimsByGER_Call) Return(_a0 []*claimsynctypes.Claim, _a1 error) *Claimer_GetClaimsByGER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Claimer_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)) *Claimer_GetClaimsByGER_Call { + _c.Call.Return(run) + return _c +} diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go index 5192154af..20d9c8102 100644 --- a/bridgesync/bridgesync_test.go +++ b/bridgesync/bridgesync_test.go @@ -70,10 +70,6 @@ func TestNewLx(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") - // CustomHeaderByNumber is called once (for L1 on fresh DB; L2 reuses the same DB) - mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). - Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() - dbQueryTimeout := 30 * time.Second syncFromInBridgesResolved := testSyncFromInBridges @@ -310,8 +306,6 @@ func TestBridgeSync_GetTokenMappings(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") - mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). - Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second @@ -483,8 +477,6 @@ func TestBridgeSync_GetLegacyTokenMigrations(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") - mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). - Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second @@ -673,8 +665,6 @@ func TestBridgeSync_GetLastRoot(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") - mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). - Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second @@ -856,8 +846,6 @@ func TestBridgeSync_SubscribeToSync(t *testing.T) { mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(blockFinalityType) mockReorgDetector.EXPECT().String().Return("mockReorgDetector") - mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, mock.Anything). - Return(aggkittypes.NewBlockHeader(0, common.Hash{}, 0, nil), nil).Once() dbQueryTimeout := 30 * time.Second diff --git a/bridgesync/claim.go b/bridgesync/claim.go deleted file mode 100644 index d36fb3a36..000000000 --- a/bridgesync/claim.go +++ /dev/null @@ -1,51 +0,0 @@ -package bridgesync - -import ( - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -// Type aliases to maintain backward compatibility after types were moved to claimsync/types. - -// Claim is an alias for claimsynctypes.Claim. -type Claim = claimsynctypes.Claim - -// ClaimType is an alias for claimsynctypes.ClaimType. -type ClaimType = claimsynctypes.ClaimType - -// UnsetClaim is an alias for claimsynctypes.UnsetClaim. -type UnsetClaim = claimsynctypes.UnsetClaim - -// SetClaim is an alias for claimsynctypes.SetClaim. -type SetClaim = claimsynctypes.SetClaim - -const ( - // ClaimEvent is an alias for claimsynctypes.ClaimEvent. - ClaimEvent ClaimType = claimsynctypes.ClaimEvent - // DetailedClaimEvent is an alias for claimsynctypes.DetailedClaimEvent. - DetailedClaimEvent ClaimType = claimsynctypes.DetailedClaimEvent -) - -var ( - // claim event signatures (moved to claimsync package, re-exported here for test compatibility) - claimEventSignature = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)")) - claimEventSignaturePreEtrog = crypto.Keccak256Hash([]byte("ClaimEvent(uint32,uint32,address,address,uint256)")) - detailedClaimEventSignature = crypto.Keccak256Hash([]byte( - "DetailedClaimEvent(bytes32[32],bytes32[32]," + - "uint256,bytes32,bytes32,uint8,uint32," + - "address,uint32,address,uint256,bytes)", - )) - unsetClaimEventSignature = crypto.Keccak256Hash([]byte( - "UpdatedUnsetGlobalIndexHashChain(bytes32,bytes32)", - )) - setClaimEventSignature = crypto.Keccak256Hash([]byte( - "SetClaim(bytes32)", - )) - - // claim method IDs (moved to claimsync package, re-exported here for test compatibility) - claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") - claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") - claimAssetPreEtrogMethodID = common.Hex2Bytes("2cffd02e") - claimMessagePreEtrogMethodID = common.Hex2Bytes("2d2c9d94") -) diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index 342a8d395..47af83b7d 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -23,6 +23,11 @@ import ( "github.com/stretchr/testify/require" ) +var ( + claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") + claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") +) + // mainnet: // case https://etherscan.io/tx/0x8db8e288d25102b64d8a37ad05769817d1b43f0384dd05da075d24d2cee9cb65 (bn: 19566985) -> fix // case: https://etherscan.io/tx/0x0b276867aa22d1c162c2700d35c500a124a6a953c7b24931a1d3efc63f7cd4ab (bn: 22770713) @@ -563,7 +568,6 @@ func TestFindCallWithOnlyUnrecognizedMethods(t *testing.T) { require.Contains(t, err.Error(), "not found") } - func TestTxnSenderField(t *testing.T) { bridgeAddr := common.HexToAddress("0x10") blockNum := uint64(1) @@ -572,7 +576,6 @@ func TestTxnSenderField(t *testing.T) { agglayerBridgeABI, err := agglayerbridge.AgglayerbridgeMetaData.GetAbi() require.NoError(t, err) - tests := []struct { name string eventSignature common.Hash diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index c2dc1c47a..6f48c09f7 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -31,8 +31,6 @@ var ( } ) -const testSyncFromInBridges = true - // bridgeSyncAdapter wraps BridgeSync to satisfy helpers.Processorer interface. type bridgeSyncAdapter struct { *bridgesync.BridgeSync @@ -43,7 +41,6 @@ func (a *bridgeSyncAdapter) GetLastProcessedBlock(ctx context.Context) (uint64, return block, err } - func mockClientCallGetTransactionByHash(t *testing.T, mockClient *mocks.RPCClienter, expectedTxHash common.Hash, fromAddress string, toAddress string) { diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 0d0d6c703..ff5c4d973 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -36,21 +36,12 @@ const ( // bridgeTableName is the name of the table that stores bridge events bridgeTableName = "bridge" - // claimTableName is the name of the table that stores claim events - claimTableName = "claim" - // tokenMappingTableName is the name of the table that stores token mapping events tokenMappingTableName = "token_mapping" // legacyTokenMigrationTableName is the name of the table that stores legacy token migration events legacyTokenMigrationTableName = "legacy_token_migration" - // unsetClaimTableName is the name of the table that stores unset claim events - unsetClaimTableName = "unset_claim" - - // setClaimTableName is the name of the table that stores set claim events - setClaimTableName = "set_claim" - // backwardLETTableName is the name of the table that stores backward local exit tree events backwardLETTableName = "backward_let" @@ -65,48 +56,6 @@ const ( // orderByBlockDesc is the default order by clause for block-based queries orderByBlockDesc = "block_num DESC, block_pos DESC" - // claimColumnsSQL is the list of all claim columns - claimColumnsSQL = `block_num, - block_pos, - tx_hash, - global_index, - origin_network, - origin_address, - destination_address, - amount, - proof_local_exit_root, - proof_rollup_exit_root, - mainnet_exit_root, - rollup_exit_root, - global_exit_root, - destination_network, - metadata, - is_message, - block_timestamp, - type` - - // compactedClaimsSelectSQL is the SELECT clause for compacted claims - // It combines metadata from the oldest claim with proofs and exit roots from the newest claim - compactedClaimsSelectSQL = ` - o.block_num, - o.block_pos, - o.tx_hash, - o.global_index, - o.origin_network, - o.origin_address, - o.destination_address, - o.amount, - n.proof_local_exit_root, - n.proof_rollup_exit_root, - n.mainnet_exit_root, - n.rollup_exit_root, - n.global_exit_root, - o.destination_network, - o.metadata, - o.is_message, - o.block_timestamp, - o.type` - // bridgeByDepositCountSQL is the query used by GetBridgeByDepositCount for the main bridge table. // deposit_count is a unique monotonic counter per bridge event in the contract, so no // additional origin_network filter is needed (it would incorrectly exclude L2-native tokens). @@ -135,9 +84,6 @@ const ( ) var ( - // errFailToConvertClaims indicates that the conversion from []*Claim to []Claim failed. - errFailToConvertClaims = errors.New("failed to convert from []*Claim to []Claim") - // tableNameRegex is the regex pattern to validate table names tableNameRegex = regexp.MustCompile(`^[a-zA-Z0-9_]+$`) @@ -364,9 +310,9 @@ type Event struct { RemoveLegacyToken *RemoveLegacyToken BackwardLET *BackwardLET ForwardLET *ForwardLET - //Claim *Claim - //UnsetClaim *UnsetClaim - //SetClaim *SetClaim + // Claim *Claim + // UnsetClaim *UnsetClaim + // SetClaim *SetClaim } @@ -473,16 +419,16 @@ func (b BridgeSyncRuntimeData) IsCompatible(storage BridgeSyncRuntimeData) (*Bri } type processor struct { - syncerID string - db *sql.DB - exitTree types.FullTreer - log *log.Logger - mu mutex.RWMutex - halted bool - haltedReason string - dbQueryTimeout time.Duration - bridgeSubscriber aggkitcommon.PubSub[uint64] - initialLER common.Hash + syncerID string + db *sql.DB + exitTree types.FullTreer + log *log.Logger + mu mutex.RWMutex + halted bool + haltedReason string + dbQueryTimeout time.Duration + bridgeSubscriber aggkitcommon.PubSub[uint64] + initialLER common.Hash compatibility.CompatibilityDataStorager[BridgeSyncRuntimeData] } @@ -643,15 +589,6 @@ func (p *processor) buildBridgesFilterClause(depositCount *uint64, networkIDs [] return "", nil } -// buildGlobalIndexFilterClause builds a WHERE clause for filtering by global_index -func buildGlobalIndexFilterClause(globalIndex *big.Int) string { - if globalIndex != nil { - return " WHERE " + fmt.Sprintf("global_index = '%s'", globalIndex.String()) - } - - return "" -} - // buildTokenMappingsFilterClause builds the WHERE clause for the token_mapping table // based on the provided originTokenAddress func (p *processor) buildTokenMappingsFilterClause(originTokenAddress string) string { diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index e8210d640..db69964d7 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -20,6 +20,7 @@ import ( bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync/migrations" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" dbtypes "github.com/agglayer/aggkit/db/types" @@ -62,7 +63,7 @@ func TestBigIntString(t *testing.T) { tx, err := db.BeginTx(ctx, nil) require.NoError(t, err) - claim := &Claim{ + claim := &claimsynctypes.Claim{ BlockNum: 1, BlockPos: 0, GlobalIndex: GenerateGlobalIndex(true, 0, 1093), @@ -76,7 +77,7 @@ func TestBigIntString(t *testing.T) { RollupExitRoot: common.Hash{}, GlobalExitRoot: common.Hash{}, DestinationNetwork: 12, - Type: ClaimEvent, + Type: claimsynctypes.ClaimEvent, } _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, claim.BlockNum) @@ -94,7 +95,7 @@ func TestBigIntString(t *testing.T) { `, claim.BlockNum, claim.BlockNum) require.NoError(t, err) - claimsFromDB := []*Claim{} + claimsFromDB := []*claimsynctypes.Claim{} require.NoError(t, meddler.ScanAll(rows, &claimsFromDB)) require.Len(t, claimsFromDB, 1) require.Equal(t, claim, claimsFromDB[0]) @@ -500,31 +501,6 @@ func (a *processBlockAction) execute(t *testing.T) { require.Equal(t, a.expectedErr, actualErr) } -// getTotalRecordsAction - -type getTotalRecordsAction struct { - p *processor - description string - tableName string - expectedRecordsNum int -} - -func (a *getTotalRecordsAction) method() string { - return "getTotalRecordsAction" -} - -func (a *getTotalRecordsAction) desc() string { - return a.description -} - -func (a *getTotalRecordsAction) execute(t *testing.T) { - t.Helper() - - recordsNum, err := a.p.GetTotalNumberOfRecords(context.Background(), a.tableName, "") - require.NoError(t, err) - require.Equal(t, a.expectedRecordsNum, recordsNum) -} - func eventsToBridges(events []any) []Bridge { bridges := []Bridge{} for _, event := range events { @@ -539,7 +515,6 @@ func eventsToBridges(events []any) []Bridge { return bridges } - func TestHashBridge(t *testing.T) { data, err := os.ReadFile("../tree/testvectors/leaf-vectors.json") require.NoError(t, err) @@ -720,7 +695,6 @@ func TestDecodeGlobalIndex(t *testing.T) { } } - func TestGetBridgesPublished(t *testing.T) { t.Parallel() @@ -1048,7 +1022,6 @@ func TestGetBridgesPaged(t *testing.T) { } } - func TestProcessor_GetTokenMappings(t *testing.T) { t.Parallel() @@ -1260,7 +1233,7 @@ func TestDecodePreEtrogCalldata_Valid(t *testing.T) { } } - expectedClaim := &Claim{ + expectedClaim := &claimsynctypes.Claim{ GlobalIndex: new(big.Int).SetUint64(uint64(globalIndex)), MainnetExitRoot: common.HexToHash("0xdead"), RollupExitRoot: common.HexToHash("0xbeef"), @@ -1285,9 +1258,10 @@ func TestDecodePreEtrogCalldata_Valid(t *testing.T) { ) require.NoError(t, err) - actualClaim := &Claim{ + actualClaim := &claimsynctypes.Claim{ GlobalIndex: new(big.Int).SetUint64(uint64(globalIndex)), } + claimAssetPreEtrogMethodID := common.Hex2Bytes("2cffd02e") method, err := bridgeV1ABI.MethodById(claimAssetPreEtrogMethodID) require.NoError(t, err) @@ -1483,7 +1457,7 @@ func TestDecodePreEtrogCalldata(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - claim := &Claim{ + claim := &claimsynctypes.Claim{ GlobalIndex: new(big.Int).SetUint64(uint64(globalIndex)), MainnetExitRoot: common.Hash{}, RollupExitRoot: common.Hash{}, @@ -1686,7 +1660,7 @@ func TestDecodeEtrogCalldata(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - claim := &Claim{GlobalIndex: globalIndex} + claim := &claimsynctypes.Claim{GlobalIndex: globalIndex} isDecoded, err := claim.DecodeEtrogCalldata(tt.data) if tt.expectError { @@ -1921,7 +1895,6 @@ func TestBridgeSyncRuntimeData_IsCompatible(t *testing.T) { } } - func intPtr(i int) *int { return &i } @@ -1962,7 +1935,6 @@ func TestProcessor_ErrorPathLogging(t *testing.T) { require.Equal(t, 1, count) }) - t.Run("GetLegacyTokenMigrations error paths", func(t *testing.T) { t.Parallel() p := createTestProcessor(t, "GetLegacyTokenMigrationsErrorPaths") @@ -2149,8 +2121,6 @@ func createTestTokenMapping(blockNum uint64, blockPos int) *TokenMapping { } } - - func TestDatabaseQueryTimeout(t *testing.T) { normalTimeout := 100 * time.Millisecond shortTimeout := 1 * time.Nanosecond @@ -2185,10 +2155,8 @@ func TestDatabaseQueryTimeout(t *testing.T) { _, err = pShortTimeout.GetBridges(ctx, 1, 1) require.Error(t, err) require.Contains(t, err.Error(), "context deadline exceeded") - } - func TestProcessor_BackwardLET(t *testing.T) { buildBlocksWithSequentialBridges := func(blocksCount, bridgesPerBlock uint64, blockNumOffset uint64, depositCountOffset uint32) []sync.Block { @@ -2550,7 +2518,6 @@ func TestProcessor_BackwardLET(t *testing.T) { } } - func TestHandleForwardLETEvent(t *testing.T) { t.Run("successfully process single leaf with no archived bridge", func(t *testing.T) { p, tx := setupProcessorWithTransaction(t) @@ -3355,7 +3322,6 @@ func encodeLeafDataArrayForTest(t *testing.T, leaves []LeafData) []byte { return encodedBytes } - func TestProcessor_GetBridgeByDepositCount(t *testing.T) { t.Helper() diff --git a/claimsync/claimcalldata_test.go b/claimsync/claimcalldata_test.go index c975b2539..8977c03a9 100644 --- a/claimsync/claimcalldata_test.go +++ b/claimsync/claimcalldata_test.go @@ -1099,7 +1099,7 @@ func TestClaimCalldata(t *testing.T) { logger := log.WithFields("module", "test") // Extract root call first using new function - _, rootCall, err := extractCallData(client, bridgeAddr, tc.log.TxHash, logger, nil) + rootCall, err := extractCallData(client, bridgeAddr, tc.log.TxHash, logger, nil) require.NoError(t, err) // Use setClaimCalldataFromRoot instead of setClaimCalldata diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index 878262bc0..f419c3603 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -57,7 +57,6 @@ func NewClaimSync( syncerID claimsynctypes.ClaimSyncerID, logger aggkitcommon.Logger, ) (*ClaimSync, error) { - dbQueryTimeout := cfg.DBQueryTimeout.Duration if dbQueryTimeout == 0 { dbQueryTimeout = defaultDBTimeout @@ -67,10 +66,7 @@ func NewClaimSync( return nil, fmt.Errorf("claimsync: failed to create storage: %w", err) } - proc, err := newProcessor(logger, store, dbQueryTimeout) - if err != nil { - return nil, err - } + proc := newProcessor(logger, store, dbQueryTimeout) deployment, err := resolveBridgeDeployment(ctx, cfg.BridgeAddr, ethClient) if err != nil { @@ -131,7 +127,8 @@ func NewClaimSync( logger.Infof( "claimsync created: dbPath=%s initialBlock=%d blockFinality=%s bridgeAddr=%s sovereign=%t", - cfg.DBPath, cfg.InitialBlockNum, cfg.BlockFinality.String(), cfg.BridgeAddr.String(), deployment.kind == SovereignChain, + cfg.DBPath, cfg.InitialBlockNum, cfg.BlockFinality.String(), + cfg.BridgeAddr.String(), deployment.kind == SovereignChain, ) return &ClaimSync{ @@ -150,7 +147,7 @@ func NewClaimSync( func (c *ClaimSync) Start(ctx context.Context) { c.logger.Infof("starting claim synchronizer AutoStart: %t InitialBlock: %d", *c.cfg.AutoStart.Resolved, c.cfg.InitialBlockNum) - if *c.cfg.AutoStart.Resolved == true { + if *c.cfg.AutoStart.Resolved { c.driver.Sync(ctx, &c.cfg.InitialBlockNum) } else { c.driver.Sync(ctx, nil) @@ -211,7 +208,8 @@ func (c *ClaimSync) SetNextRequiredBlock(ctx context.Context, blockNumber uint64 return fmt.Errorf("claimsync: failed to get first processed block: %w", err) } if blockNumber <= firstBlock { - return fmt.Errorf("claimsync: cannot set next required block to %d, it must be greater than the first block in DB (%d)", + return fmt.Errorf("claimsync: cannot set next required block to %d, "+ + "it must be greater than the first block in DB (%d)", blockNumber, firstBlock) } if blockNumber > lastBlock { @@ -235,6 +233,25 @@ func (c *ClaimSync) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big return c.reader.GetClaimsByGlobalIndex(ctx, nil, globalIndex) } +func (c *ClaimSync) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) { + return c.reader.GetClaimsByGER(ctx, nil, globalExitRoot) +} + +func (c *ClaimSync) GetClaimsPaged(ctx context.Context, page, pageSize uint32, + networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { + return c.reader.GetClaimsPaged(ctx, page, pageSize, networkIDs, globalIndex) +} + +func (c *ClaimSync) GetUnsetClaimsPaged(ctx context.Context, page, pageSize uint32, + globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { + return c.reader.GetUnsetClaimsPaged(ctx, page, pageSize, globalIndex) +} + +func (c *ClaimSync) GetSetClaimsPaged(ctx context.Context, page, pageSize uint32, + globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { + return c.reader.GetSetClaimsPaged(ctx, page, pageSize, globalIndex) +} + func (c *ClaimSync) createStartingPoint(ctx context.Context, blockNumber uint64) error { c.logger.Infof("creating starting point at block %d:", blockNumber) header, err := c.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) diff --git a/claimsync/claimsync_rpc.go b/claimsync/claimsync_rpc.go index ee1fa1b9e..72568d3d9 100644 --- a/claimsync/claimsync_rpc.go +++ b/claimsync/claimsync_rpc.go @@ -71,7 +71,8 @@ func (r *ClaimSyncRPC) GetClaims(fromBlock, toBlock uint64) (interface{}, jRPC.E func (r *ClaimSyncRPC) GetClaimsByGlobalIndex(globalIndexStr string) (interface{}, jRPC.Error) { r.logger.Infof("RPC call: lclaimsync_getClaimsByGlobalIndex(%s)", globalIndexStr) globalIndex := new(big.Int) - if _, ok := globalIndex.SetString(globalIndexStr, 10); !ok { + const decimalBase = 10 + if _, ok := globalIndex.SetString(globalIndexStr, decimalBase); !ok { return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, "ClaimSyncRPC.GetClaimsByGlobalIndex: invalid global index: %s", globalIndexStr) } diff --git a/claimsync/claimsync_test.go b/claimsync/claimsync_test.go index 8704805bd..cf2443eb3 100644 --- a/claimsync/claimsync_test.go +++ b/claimsync/claimsync_test.go @@ -82,15 +82,15 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { _, err = waitForReceipt(ctx, client, tx.Hash(), 10) require.NoError(t, err) logger.Info("*** ClaimSyncer must be waiting to receive the starting point") - _, found, error := claimSyncer.GetLastProcessedBlock(ctx) - require.NoError(t, error) + _, found, err2 := claimSyncer.GetLastProcessedBlock(ctx) + require.NoError(t, err2) require.False(t, found) logger.Info("*** Setting next required block to 1, so must starting syncing and sync the ClaimAsset") err = claimSyncer.SetNextRequiredBlock(ctx, 1) - require.NoError(t, error) + require.NoError(t, err) time.Sleep(time.Second * 5) - lastBlockProcessed, found, error := claimSyncer.GetLastProcessedBlock(ctx) - require.NoError(t, error) + lastBlockProcessed, found, err2 := claimSyncer.GetLastProcessedBlock(ctx) + require.NoError(t, err2) require.True(t, found) logger.Infof("*** Last block processed: %d", lastBlockProcessed) } diff --git a/claimsync/downloader.go b/claimsync/downloader.go index 2f20d2508..2bc136819 100644 --- a/claimsync/downloader.go +++ b/claimsync/downloader.go @@ -55,9 +55,6 @@ const ( // methodIDLength is the length of the method ID in bytes methodIDLength = 4 - - bridgeLeafTypeMessage = uint8(bridgesynctypes.LeafTypeMessage) - bridgeLeafTypeAsset = uint8(bridgesynctypes.LeafTypeAsset) ) // claimQuerier is used by event handlers to check the DetailedClaimEvent boundary. @@ -96,9 +93,11 @@ func buildAppender( // TODO: Check syncfullclaims syncFullClaims := true appender := make(sync.LogAppenderMap) - appender[claimEventSignaturePreEtrog] = buildClaimEventHandlerPreEtrog(legacyBridge, ethClient, bridgeAddr, syncFullClaims, log) + appender[claimEventSignaturePreEtrog] = buildClaimEventHandlerPreEtrog( + legacyBridge, ethClient, bridgeAddr, syncFullClaims, log) - appender[claimEventSignature] = buildClaimEventHandler(ctx, deployment.agglayerBridge, ethClient, querier, bridgeAddr, syncFullClaims, log) + appender[claimEventSignature] = buildClaimEventHandler( + ctx, deployment.agglayerBridge, ethClient, querier, bridgeAddr, syncFullClaims, log) if deployment.kind == SovereignChain { appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(deployment.agglayerBridgeL2) @@ -207,7 +206,7 @@ func buildClaimEventHandler( } // Extract root call for txn_sender and error checking - _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, log, nil) + rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, log, nil) if err != nil { return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) } @@ -300,7 +299,7 @@ func buildClaimEventHandlerPreEtrog( Amount: claimEvent.Amount, } // Extract root call for txn_sender and error checking - _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, logger, nil) + rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, logger, nil) if err != nil { return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) } @@ -376,15 +375,15 @@ type tracerCfg struct { // findCall traverses the call trace using DFS and either returns the call or stops when a callback succeeds. func findCall(rootCall Call, targetAddr common.Address, callback func(Call) (bool, error), logger aggkitcommon.Logger, -) ([]*Call, error) { +) error { callStack := stack.New() callStack.Push(rootCall) - matchingCalls := []*Call{} + found := false for callStack.Len() > 0 { currentCallInterface := callStack.Pop() currentCall, ok := currentCallInterface.(Call) if !ok { - return nil, fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface) + return fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface) } // Skip reverted calls @@ -395,16 +394,15 @@ func findCall(rootCall Call, targetAddr common.Address, callback func(Call) (boo } if currentCall.To == targetAddr { + found = true if callback != nil { - found, err := callback(currentCall) + ok, err := callback(currentCall) if err != nil { - return nil, err + return err } - if found { - matchingCalls = append(matchingCalls, ¤tCall) + if !ok { + found = false } - } else { - matchingCalls = append(matchingCalls, ¤tCall) } } @@ -415,10 +413,10 @@ func findCall(rootCall Call, targetAddr common.Address, callback func(Call) (boo } } } - if len(matchingCalls) > 0 { - return matchingCalls, nil + if !found { + return db.ErrNotFound } - return nil, db.ErrNotFound + return nil } // extractRootCall extracts the root call for a transaction using debug_traceTransaction. @@ -437,20 +435,19 @@ func extractCallData( txHash common.Hash, logger aggkitcommon.Logger, callback func(c Call) (bool, error), -) (foundCalls []*Call, rootCall *Call, err error) { +) (*Call, error) { // Extract root call first - rootCall, err = extractRootCall(client, bridgeAddr, txHash) + rootCall, err := extractRootCall(client, bridgeAddr, txHash) if err != nil { - return nil, nil, err + return nil, err } // Find the specific call to the bridge contract - foundCalls, err = findCall(*rootCall, bridgeAddr, callback, logger) - if err != nil { - return nil, nil, err + if err = findCall(*rootCall, bridgeAddr, callback, logger); err != nil { + return nil, err } - return foundCalls, rootCall, nil + return rootCall, nil } // setClaimCalldataFromRoot finds and decodes calldata for the given bridge address using an already traced root call. @@ -467,7 +464,7 @@ func setClaimCalldataFromRoot( bridge common.Address, logger aggkitcommon.Logger, ) error { - _, err := findCall(*rootCall, bridge, + err := findCall(*rootCall, bridge, func(call Call) (bool, error) { // Skip reverted calls if call.Err != nil { diff --git a/claimsync/embedded.go b/claimsync/embedded.go index 46777f130..9842c58f3 100644 --- a/claimsync/embedded.go +++ b/claimsync/embedded.go @@ -99,15 +99,18 @@ func NewEmbedded( return nil, fmt.Errorf("claimsync embedded: failed to build appender: %w", err) } - logger.Infof("claimsync embedded created: bridgeAddr=%s sovereign=%t", bridgeAddr.String(), deployment.kind == SovereignChain) + logger.Infof("claimsync embedded created: bridgeAddr=%s sovereign=%t", + bridgeAddr.String(), deployment.kind == SovereignChain) return &EmbeddedClaimSync{ Processor: proc, Reader: storage, Appender: appender}, nil } -func (p *claimEmbeddedProcessor) ProcessBlockWithTx(ctx context.Context, tx dbtypes.Querier, block sync.Block, eventRaw any) error { +func (p *claimEmbeddedProcessor) ProcessBlockWithTx( + ctx context.Context, tx dbtypes.Querier, block sync.Block, eventRaw any, +) error { event, ok := eventRaw.(Event) if !ok { return fmt.Errorf("claimsync ProcessBlock: unexpected event type %T in block %d", event, block.Num) @@ -143,11 +146,15 @@ func (p *claimEmbeddedProcessor) ProcessBlockWithTx(ctx context.Context, tx dbty // it returns: // - the number of rows affected (currently the number of blocks deleted) // - error if the deletion failed, or nil if successful -func (p *claimEmbeddedProcessor) ReorgWithTx(ctx context.Context, tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { +func (p *claimEmbeddedProcessor) ReorgWithTx( + ctx context.Context, tx dbtypes.Querier, firstReorgedBlock uint64, +) (int64, error) { return p.deleteBlocksFrom(ctx, tx, firstReorgedBlock) } -func (p *claimEmbeddedProcessor) deleteBlocksFrom(ctx context.Context, tx dbtypes.Querier, firstReorgedBlock uint64) (int64, error) { +func (p *claimEmbeddedProcessor) deleteBlocksFrom( + ctx context.Context, tx dbtypes.Querier, firstReorgedBlock uint64, +) (int64, error) { rowsAffected, err := p.storage.DeleteBlocksFrom(ctx, tx, firstReorgedBlock) if err != nil { return 0, fmt.Errorf("claimsync deleteBlocksFrom: %w", err) diff --git a/claimsync/processor.go b/claimsync/processor.go index fcce9d171..84272e499 100644 --- a/claimsync/processor.go +++ b/claimsync/processor.go @@ -22,14 +22,16 @@ type processor struct { embeddedProcessor claimsynctypes.EmbeddedProcessor } -func newProcessor(logger aggkitcommon.Logger, storage claimsynctypes.ClaimStorager, dbQueryTimeout time.Duration) (*processor, error) { +func newProcessor( + logger aggkitcommon.Logger, storage claimsynctypes.ClaimStorager, dbQueryTimeout time.Duration, +) *processor { return &processor{ storage: storage, log: logger, dbQueryTimeout: dbQueryTimeout, CompatibilityDataStorager: storage, embeddedProcessor: newEmbeddedProcessor(logger, storage), - }, nil + } } // ProcessBlock stores the block and its claim-related events atomically. @@ -111,7 +113,9 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, bool, er } // GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. -func (p *processor) GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) { +func (p *processor) GetBoundaryBlockForClaimType( + ctx context.Context, tx dbtypes.Querier, claimType ClaimType, +) (uint64, error) { return p.storage.GetBoundaryBlockForClaimType(ctx, tx, claimType) } diff --git a/claimsync/storage/storage.go b/claimsync/storage/storage.go index 5b0066409..98d409864 100644 --- a/claimsync/storage/storage.go +++ b/claimsync/storage/storage.go @@ -95,23 +95,27 @@ func NewStandalone(logger aggkitcommon.Logger, dbPath string, ownerName string, } if err := migrations.RunMigrations(logger, database); err != nil { - database.Close() //nolint:errcheck + database.Close() return nil, fmt.Errorf("claimsync storage: failed to run migrations: %w", err) } return &claimStorage{ - database: database, - compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + database: database, + compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData]( + db.NewKeyValueStorage(database), ownerName), log: logger, dbQueryTimeout: dbQueryTimeout, }, nil } // New creates a Storage using the provided sql.DB, so it can share -func New(logger aggkitcommon.Logger, database *sql.DB, ownerName string, dbQueryTimeout time.Duration) (claimsynctypes.ClaimStorager, error) { +func New( + logger aggkitcommon.Logger, database *sql.DB, ownerName string, dbQueryTimeout time.Duration, +) (claimsynctypes.ClaimStorager, error) { return &claimStorage{ - database: database, - compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData](db.NewKeyValueStorage(database), ownerName), + database: database, + compatStore: compatibility.NewKeyValueToCompatibilityStorage[aggsync.RuntimeData]( + db.NewKeyValueStorage(database), ownerName), log: logger, dbQueryTimeout: dbQueryTimeout, }, nil @@ -123,7 +127,9 @@ func (s *claimStorage) NewTx(ctx context.Context) (dbtypes.Txer, error) { } // GetCompatibilityData implements claimsynctypes.ClaimStorager. -func (s *claimStorage) GetCompatibilityData(ctx context.Context, tx dbtypes.Querier) (bool, aggsync.RuntimeData, error) { +func (s *claimStorage) GetCompatibilityData( + ctx context.Context, tx dbtypes.Querier, +) (bool, aggsync.RuntimeData, error) { return s.compatStore.GetCompatibilityData(ctx, tx) } @@ -141,7 +147,9 @@ func (s *claimStorage) getQuerier(tx dbtypes.Querier) dbtypes.Querier { } // InsertBlock inserts a block row using meddler. -func (s *claimStorage) InsertBlock(_ context.Context, tx dbtypes.Querier, blockNum uint64, blockHash common.Hash) error { +func (s *claimStorage) InsertBlock( + _ context.Context, tx dbtypes.Querier, blockNum uint64, blockHash common.Hash, +) error { if err := meddler.Insert(s.getQuerier(tx), "block", &blockRecord{Num: blockNum, Hash: blockHash.Hex()}); err != nil { return fmt.Errorf("InsertBlock %d: %w", blockNum, err) } @@ -175,7 +183,9 @@ func (s *claimStorage) InsertSetClaim(_ context.Context, tx dbtypes.Querier, sc // GetClaims returns claims in [fromBlock, toBlock] using compaction logic: // claims with an unset_claim are returned uncompacted; others are compacted // (oldest metadata + newest proofs per global_index). -func (s *claimStorage) GetClaims(ctx context.Context, tx dbtypes.Querier, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) { +func (s *claimStorage) GetClaims( + ctx context.Context, tx dbtypes.Querier, fromBlock, toBlock uint64, +) ([]claimsynctypes.Claim, error) { query := fmt.Sprintf(` WITH all_claims_ranked AS ( SELECT @@ -222,7 +232,9 @@ func (s *claimStorage) GetClaims(ctx context.Context, tx dbtypes.Querier, fromBl } // GetClaimsByGlobalIndex returns claims for the given global index using compaction logic. -func (s *claimStorage) GetClaimsByGlobalIndex(ctx context.Context, tx dbtypes.Querier, globalIndex *big.Int) ([]claimsynctypes.Claim, error) { +func (s *claimStorage) GetClaimsByGlobalIndex( + ctx context.Context, tx dbtypes.Querier, globalIndex *big.Int, +) ([]claimsynctypes.Claim, error) { if globalIndex == nil { return nil, errors.New("GetClaimsByGlobalIndex: globalIndex cannot be nil") } @@ -301,7 +313,9 @@ func (s *claimStorage) GetLastProcessedBlock(ctx context.Context, tx dbtypes.Que // GetBoundaryBlockForClaimType returns the max block_num for claims of the given type. // Returns db.ErrNotFound if no claims of that type exist. -func (s *claimStorage) GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType claimsynctypes.ClaimType) (uint64, error) { +func (s *claimStorage) GetBoundaryBlockForClaimType( + ctx context.Context, tx dbtypes.Querier, claimType claimsynctypes.ClaimType, +) (uint64, error) { dbCtx, cancel := s.withDatabaseTimeout(ctx) defer cancel() @@ -319,11 +333,14 @@ func (s *claimStorage) GetBoundaryBlockForClaimType(ctx context.Context, tx dbty // GetClaimsByGER returns all DetailedClaimEvent claims with the given global exit root, // ordered by block_num/block_pos ascending. If the claim table does not exist (e.g. L1 // processor), returns nil, nil gracefully. -func (p *claimStorage) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { +func (p *claimStorage) GetClaimsByGER( + ctx context.Context, tx dbtypes.Querier, globalExitRoot common.Hash, +) ([]*claimsynctypes.Claim, error) { dbCtx, cancel := p.withDatabaseTimeout(ctx) defer cancel() - rows, err := p.database.QueryContext(dbCtx, claimsByGERSQL, globalExitRoot.Hex(), claimsynctypes.DetailedClaimEvent) + rows, err := p.getQuerier(tx).QueryContext( + dbCtx, claimsByGERSQL, globalExitRoot.Hex(), claimsynctypes.DetailedClaimEvent) if err != nil { if strings.Contains(err.Error(), "no such table") { return nil, nil diff --git a/claimsync/storage/storage_paged.go b/claimsync/storage/storage_paged.go index b06948c7c..8bba612c5 100644 --- a/claimsync/storage/storage_paged.go +++ b/claimsync/storage/storage_paged.go @@ -30,6 +30,7 @@ var ( tableNameRegex = regexp.MustCompile(`^[a-zA-Z0-9_]+$`) ) +//nolint:dupl func (p *claimStorage) GetSetClaimsPaged( ctx context.Context, pageNumber, pageSize uint32, globalIndex *big.Int, diff --git a/claimsync/types/claim_reader.go b/claimsync/types/claim_reader.go index 031a23d57..9a949a1ce 100644 --- a/claimsync/types/claim_reader.go +++ b/claimsync/types/claim_reader.go @@ -14,7 +14,7 @@ type ClaimsReader interface { GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) GetClaims(ctx context.Context, tx dbtypes.Querier, fromBlock, toBlock uint64) ([]Claim, error) GetClaimsByGlobalIndex(ctx context.Context, tx dbtypes.Querier, globalIndex *big.Int) ([]Claim, error) - GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) + GetClaimsByGER(ctx context.Context, tx dbtypes.Querier, globalExitRoot common.Hash) ([]*Claim, error) GetClaimsPaged( ctx context.Context, pageNumber, pageSize uint32, networkIDs []uint32, globalIndex *big.Int, diff --git a/claimsync/types/claim_storager.go b/claimsync/types/claim_storager.go index 71edd02cc..5e46b13f1 100644 --- a/claimsync/types/claim_storager.go +++ b/claimsync/types/claim_storager.go @@ -39,7 +39,7 @@ type ClaimStorager interface { // DeleteBlocksFrom deletes all blocks with num >= firstBlock (cascade-deletes claims etc.) DeleteBlocksFrom(ctx context.Context, tx dbtypes.Querier, firstBlock uint64) (int64, error) // GetClaimsByGER returns all DetailedClaimEvent claims with the given global exit root - GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) + GetClaimsByGER(ctx context.Context, tx dbtypes.Querier, globalExitRoot common.Hash) ([]*Claim, error) // GetClaimsPaged returns claims for the given page parameters and filters, // it returns: // - the list of claims for the requested page diff --git a/claimsync/types/claim_syncer.go b/claimsync/types/claim_syncer.go index 97c7931e8..d989cf6fb 100644 --- a/claimsync/types/claim_syncer.go +++ b/claimsync/types/claim_syncer.go @@ -9,7 +9,7 @@ type ClaimSyncer interface { OriginNetwork() uint32 // GetLastProcessedBlock is deprecated in favour GetProcessedBlockRange GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) - //GetStatus(ctx context.Context) (Status, error) + // GetStatus(ctx context.Context) (Status, error) // SetNextRequiredBlock sets the next required block number. It is used by aggsender that // set the next required block to the next one from the previous settled certificate // If the syncer have no block yet is going to use this as starting point diff --git a/claimsync/types/mocks/mock_claims_reader.go b/claimsync/types/mocks/mock_claims_reader.go index 441100cd9..d5ff81c11 100644 --- a/claimsync/types/mocks/mock_claims_reader.go +++ b/claimsync/types/mocks/mock_claims_reader.go @@ -147,9 +147,9 @@ func (_c *ClaimsReader_GetClaims_Call) RunAndReturn(run func(context.Context, ty return _c } -// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot -func (_m *ClaimsReader) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { - ret := _m.Called(ctx, globalExitRoot) +// GetClaimsByGER provides a mock function with given fields: ctx, tx, globalExitRoot +func (_m *ClaimsReader) GetClaimsByGER(ctx context.Context, tx types.Querier, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { + ret := _m.Called(ctx, tx, globalExitRoot) if len(ret) == 0 { panic("no return value specified for GetClaimsByGER") @@ -157,19 +157,19 @@ func (_m *ClaimsReader) GetClaimsByGER(ctx context.Context, globalExitRoot commo var r0 []*claimsynctypes.Claim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)); ok { - return rf(ctx, globalExitRoot) + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, common.Hash) ([]*claimsynctypes.Claim, error)); ok { + return rf(ctx, tx, globalExitRoot) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*claimsynctypes.Claim); ok { - r0 = rf(ctx, globalExitRoot) + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, common.Hash) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, tx, globalExitRoot) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*claimsynctypes.Claim) } } - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, globalExitRoot) + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, common.Hash) error); ok { + r1 = rf(ctx, tx, globalExitRoot) } else { r1 = ret.Error(1) } @@ -184,14 +184,15 @@ type ClaimsReader_GetClaimsByGER_Call struct { // GetClaimsByGER is a helper method to define mock.On call // - ctx context.Context +// - tx types.Querier // - globalExitRoot common.Hash -func (_e *ClaimsReader_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *ClaimsReader_GetClaimsByGER_Call { - return &ClaimsReader_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} +func (_e *ClaimsReader_Expecter) GetClaimsByGER(ctx interface{}, tx interface{}, globalExitRoot interface{}) *ClaimsReader_GetClaimsByGER_Call { + return &ClaimsReader_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, tx, globalExitRoot)} } -func (_c *ClaimsReader_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *ClaimsReader_GetClaimsByGER_Call { +func (_c *ClaimsReader_GetClaimsByGER_Call) Run(run func(ctx context.Context, tx types.Querier, globalExitRoot common.Hash)) *ClaimsReader_GetClaimsByGER_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) + run(args[0].(context.Context), args[1].(types.Querier), args[2].(common.Hash)) }) return _c } @@ -201,7 +202,7 @@ func (_c *ClaimsReader_GetClaimsByGER_Call) Return(_a0 []*claimsynctypes.Claim, return _c } -func (_c *ClaimsReader_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)) *ClaimsReader_GetClaimsByGER_Call { +func (_c *ClaimsReader_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, types.Querier, common.Hash) ([]*claimsynctypes.Claim, error)) *ClaimsReader_GetClaimsByGER_Call { _c.Call.Return(run) return _c } diff --git a/cmd/run.go b/cmd/run.go index a3d92551e..453e135f1 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -106,7 +106,7 @@ func start(cliCtx *cli.Context) error { } }() var rpcServices []jRPC.Service - l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(components,l1Client, cfg.L1Multidownloader) + l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(components, l1Client, cfg.L1Multidownloader) if err != nil { return fmt.Errorf("failed to create L1MultiDownloader: %w", err) } @@ -145,7 +145,8 @@ func start(cliCtx *cli.Context) error { return fmt.Errorf("failed to get initial local exit root: %w", err) } - l2ClaimSync := runClaimSyncL2IfNeeded(ctx, components, cfg.ClaimL2Sync, reorgDetectorL2, l2Client, rollupDataQuerier.RollupID) + l2ClaimSync := runClaimSyncL2IfNeeded( + ctx, components, cfg.ClaimL2Sync, reorgDetectorL2, l2Client, rollupDataQuerier.RollupID) if l2ClaimSync != nil { rpcServices = append(rpcServices, l2ClaimSync.GetRPCServices()...) } @@ -179,6 +180,8 @@ func start(cliCtx *cli.Context) error { l2GERSync, l1BridgeSync, l2BridgeSync, + l1ClaimSync, + l2ClaimSync, ) go b.Start(ctx) log.Info("Bridge service started") @@ -560,13 +563,10 @@ func isNeeded(casesWhereNeeded, actualCases []string) bool { } func l1InfoTreeMustRun(components []string) bool { - if !isNeeded([]string{ + return isNeeded([]string{ aggkitcommon.AGGORACLE, aggkitcommon.AGGSENDER, aggkitcommon.AGGSENDERVALIDATOR, aggkitcommon.BRIDGE, aggkitcommon.L1INFOTREESYNC, - aggkitcommon.L2GERSYNC, aggkitcommon.AGGCHAINPROOFGEN}, components) { - return false - } - return true + aggkitcommon.L2GERSYNC, aggkitcommon.AGGCHAINPROOFGEN}, components) } func runL1InfoTreeSyncerIfNeeded( @@ -689,9 +689,9 @@ func runL1MultiDownloaderIfNeeded( log.Warnf("L1 MultiDownloader is disabled, don't creating the service.") return nil, nil, nil } - if !l1InfoTreeMustRun(components){ + if !l1InfoTreeMustRun(components) { log.Infof("L1 MultiDownloader not going to run because components: %v", components) - return nil, nil,nil + return nil, nil, nil } logger := log.WithFields("module", "L1MultiDownloader") @@ -982,6 +982,8 @@ func createBridgeService( injectedGERs bridgeservice.L2GERSyncer, bridgeL1 bridgeservice.Bridger, bridgeL2 bridgeservice.Bridger, + claimL1 bridgeservice.Claimer, + claimL2 bridgeservice.Claimer, ) *bridgeservice.BridgeService { logger := log.WithFields("module", aggkitcommon.BRIDGE) @@ -999,7 +1001,9 @@ func createBridgeService( l1InfoTree, injectedGERs, bridgeL1, + claimL1, bridgeL2, + claimL2, ) } diff --git a/config/types/true_false_auto.go b/config/types/true_false_auto.go index 85cd3a95a..ab68c03e0 100644 --- a/config/types/true_false_auto.go +++ b/config/types/true_false_auto.go @@ -12,25 +12,31 @@ type TrueFalseAutoMode struct { Resolved *bool `mapstructure:"-"` } +const ( + trueModeStr = "true" + falseModeStr = "false" + autoModeStr = "auto" +) + var ( // TrueMode always activates the feature. - TrueMode = TrueFalseAutoMode{Mode: "true"} + TrueMode = TrueFalseAutoMode{Mode: trueModeStr} // FalseMode always deactivates the feature. - FalseMode = TrueFalseAutoMode{Mode: "false"} + FalseMode = TrueFalseAutoMode{Mode: falseModeStr} // AutoMode decides automatically based on context. - AutoMode = TrueFalseAutoMode{Mode: "auto"} + AutoMode = TrueFalseAutoMode{Mode: autoModeStr} ) // UnmarshalText implements encoding.TextUnmarshaler. func (m *TrueFalseAutoMode) UnmarshalText(text []byte) error { str := strings.ToLower(strings.TrimSpace(string(text))) switch str { - case "true": - m.Mode = "true" - case "false": - m.Mode = "false" - case "auto": - m.Mode = "auto" + case trueModeStr: + m.Mode = trueModeStr + case falseModeStr: + m.Mode = falseModeStr + case autoModeStr: + m.Mode = autoModeStr default: return fmt.Errorf("invalid TrueFalseAutoMode: %s (valid values: true, false, auto)", str) } @@ -59,11 +65,11 @@ func (m TrueFalseAutoMode) Validate(fieldName string) error { func (m *TrueFalseAutoMode) Resolve(autoModeResult bool) bool { var result bool switch m.Mode { - case "true": + case trueModeStr: result = true - case "false": + case falseModeStr: result = false - case "auto": + case autoModeStr: result = autoModeResult } m.Resolved = &result diff --git a/l1infotreesync/mock_driver_interface.go b/l1infotreesync/mock_driver_interface.go index d8f4a5a0c..30340eec2 100644 --- a/l1infotreesync/mock_driver_interface.go +++ b/l1infotreesync/mock_driver_interface.go @@ -68,9 +68,9 @@ func (_c *DriverInterfaceMock_GetCompletionPercentage_Call) RunAndReturn(run fun return _c } -// Sync provides a mock function with given fields: ctx -func (_m *DriverInterfaceMock) Sync(ctx context.Context) { - _m.Called(ctx) +// Sync provides a mock function with given fields: ctx, firstBlockNumber +func (_m *DriverInterfaceMock) Sync(ctx context.Context, firstBlockNumber *uint64) { + _m.Called(ctx, firstBlockNumber) } // DriverInterfaceMock_Sync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sync' @@ -80,13 +80,14 @@ type DriverInterfaceMock_Sync_Call struct { // Sync is a helper method to define mock.On call // - ctx context.Context -func (_e *DriverInterfaceMock_Expecter) Sync(ctx interface{}) *DriverInterfaceMock_Sync_Call { - return &DriverInterfaceMock_Sync_Call{Call: _e.mock.On("Sync", ctx)} +// - firstBlockNumber *uint64 +func (_e *DriverInterfaceMock_Expecter) Sync(ctx interface{}, firstBlockNumber interface{}) *DriverInterfaceMock_Sync_Call { + return &DriverInterfaceMock_Sync_Call{Call: _e.mock.On("Sync", ctx, firstBlockNumber)} } -func (_c *DriverInterfaceMock_Sync_Call) Run(run func(ctx context.Context)) *DriverInterfaceMock_Sync_Call { +func (_c *DriverInterfaceMock_Sync_Call) Run(run func(ctx context.Context, firstBlockNumber *uint64)) *DriverInterfaceMock_Sync_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run(args[0].(context.Context), args[1].(*uint64)) }) return _c } @@ -96,7 +97,7 @@ func (_c *DriverInterfaceMock_Sync_Call) Return() *DriverInterfaceMock_Sync_Call return _c } -func (_c *DriverInterfaceMock_Sync_Call) RunAndReturn(run func(context.Context)) *DriverInterfaceMock_Sync_Call { +func (_c *DriverInterfaceMock_Sync_Call) RunAndReturn(run func(context.Context, *uint64)) *DriverInterfaceMock_Sync_Call { _c.Run(run) return _c } diff --git a/l2gersync/processor_test.go b/l2gersync/processor_test.go index 8bc39de6a..e8f9dc382 100644 --- a/l2gersync/processor_test.go +++ b/l2gersync/processor_test.go @@ -193,7 +193,7 @@ func TestReorg(t *testing.T) { err = processor.Reorg(context.TODO(), 2) require.NoError(t, err) - blockNum, err := processor.GetLastProcessedBlock(context.TODO()) + blockNum, _, err := processor.GetLastProcessedBlock(context.TODO()) require.NoError(t, err) require.Equal(t, uint64(1), blockNum) diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go index 975513cdb..8904cc6d3 100644 --- a/multidownloader/e2e_test.go +++ b/multidownloader/e2e_test.go @@ -284,7 +284,8 @@ func TestE2E_CustomSyncer(t *testing.T) { } }() go func() { - driver.Sync(ctx) + fromBlock := syncerConfig.FromBlock + driver.Sync(ctx, &fromBlock) }() for numReorgs := 0; numReorgs < 3; numReorgs++ { diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 041d8632d..48a3671fc 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -167,7 +167,8 @@ func TestEVMMultidownloaderExploratory(t *testing.T) { timer := aggkitcommon.TimeTracker{} timer.Start() if syncer != nil { - syncer.Sync(t.Context()) + fromBlock := uint64(5157574) + syncer.Sync(t.Context(), &fromBlock) } timer.Stop() log.Infof("L1InfoTree sync finished in %s", timer.String()) diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index c4bf43a55..a67c8c736 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -57,6 +57,7 @@ func (d *EVMDriver) Sync(ctx context.Context, firstBlockNumber *uint64) { // it just check that is equal to syncerConfig.InitialBlockNum if firstBlockNumber == nil { d.logger.Fatalf("multidownloader doesnt support firstBlockNumber==nil") + return } if *firstBlockNumber != d.syncerConfig.FromBlock { d.logger.Fatalf("multidownloader doesnt support firstBlockNumber different than FromBlock, got %d, expected %d", diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index de6d1b7c1..41a2f8958 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -84,7 +84,7 @@ func TestSync(t *testing.T) { }) // Mocking this actions, the driver should "store" all the blocks from the downloader - pm.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(3), nil) + pm.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(3), true, nil) rdm.EXPECT().AddBlockToTrack(mock.Anything, reorgDetectorID, expectedBlock1.Num, expectedBlock1.Hash).Return(nil) pm.EXPECT().ProcessBlock(mock.Anything, Block{Num: expectedBlock1.Num, Events: expectedBlock1.Events, Hash: expectedBlock1.Hash}). Return(nil) @@ -93,7 +93,7 @@ func TestSync(t *testing.T) { pm.EXPECT(). ProcessBlock(mock.Anything, Block{Num: expectedBlock2.Num, Events: expectedBlock2.Events, Hash: expectedBlock2.Hash}). Return(nil) - go driver.Sync(ctx) + go driver.Sync(ctx, nil) time.Sleep(time.Millisecond * 200) // time to download expectedBlock1 // Trigger reorg 1 @@ -169,7 +169,7 @@ func TestSync_ReorgCancelsRetryHandlerInHandleNewBlock(t *testing.T) { } }) - pm.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(3), nil) + pm.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(3), true, nil) // AddBlockToTrack always returns nil rdm.EXPECT().AddBlockToTrack(mock.Anything, reorgDetectorID, expectedBlock.Num, expectedBlock.Hash). @@ -188,7 +188,7 @@ func TestSync_ReorgCancelsRetryHandlerInHandleNewBlock(t *testing.T) { } }) - go driver.Sync(ctx) + go driver.Sync(ctx, nil) time.Sleep(300 * time.Millisecond) // Let it retry a few times @@ -392,12 +392,12 @@ func TestCheckCompatibility(t *testing.T) { driver.compatibilityChecker = compatibilityCheckerMock t.Run("pass compatibility check", func(t *testing.T) { compatibilityCheckerMock.EXPECT().Check(context.Background(), nil).Return(nil) - processorMock.EXPECT().GetLastProcessedBlock(context.Background()).Return(uint64(1), errUnittest) + processorMock.EXPECT().GetLastProcessedBlock(context.Background()).Return(uint64(1), false, errUnittest) LogFatalf = func(format string, args ...any) { panic("should not call log.Fatalf") } require.Panics(t, func() { - driver.Sync(context.Background()) + driver.Sync(context.Background(), nil) }, "should stop because GetLastProcessedBlock failed") }) t.Run("fails compatibility check ", func(t *testing.T) { @@ -406,7 +406,7 @@ func TestCheckCompatibility(t *testing.T) { panic("should not call log.Fatalf") } require.Panics(t, func() { - driver.Sync(context.Background()) + driver.Sync(context.Background(), nil) }, "should stop because GetLastProcessedBlock failed") }) } @@ -431,7 +431,7 @@ func TestEVMDriver_Sync(t *testing.T) { if err != nil { t.Fatalf("could not construct receiver type: %v", err) } - d.Sync(context.Background()) + d.Sync(context.Background(), nil) }) } } diff --git a/tools/remove_ger/diagnosis.go b/tools/remove_ger/diagnosis.go index 70e297cbf..d8d0c34f0 100644 --- a/tools/remove_ger/diagnosis.go +++ b/tools/remove_ger/diagnosis.go @@ -11,6 +11,7 @@ import ( "github.com/agglayer/aggkit/bridgeservice/client" bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -148,7 +149,7 @@ func (e GERExistsOnL1Error) Error() string { // Exported so E2E tests can use the same query for wait and assertion as the tool. func GetClaimsByGER( ctx context.Context, bridgeService *client.Client, networkID uint32, gerHash common.Hash, -) ([]*bridgesync.Claim, error) { +) ([]*claimsynctypes.Claim, error) { res, err := bridgeService.GetClaimsByGER(ctx, networkID, gerHash.Hex()) if err != nil { return nil, fmt.Errorf("GetClaimsByGER: %w", err) @@ -156,15 +157,15 @@ func GetClaimsByGER( if res == nil || len(res.Claims) == 0 { return nil, nil } - claims := make([]*bridgesync.Claim, 0, len(res.Claims)) + claims := make([]*claimsynctypes.Claim, 0, len(res.Claims)) for _, cr := range res.Claims { claims = append(claims, claimResponseToClaim(cr)) } return claims, nil } -// claimResponseToClaim converts a bridge service ClaimResponse to a bridgesync.Claim. -func claimResponseToClaim(r *bridgetypes.ClaimResponse) *bridgesync.Claim { +// claimResponseToClaim converts a bridge service ClaimResponse to a claimsynctypes.Claim. +func claimResponseToClaim(r *bridgetypes.ClaimResponse) *claimsynctypes.Claim { globalIndex, ok := new(big.Int).SetString(string(r.GlobalIndex), decimalBase) if !ok { log.Warnf("claimResponseToClaim: failed to parse GlobalIndex %q, defaulting to 0", r.GlobalIndex) @@ -175,7 +176,7 @@ func claimResponseToClaim(r *bridgetypes.ClaimResponse) *bridgesync.Claim { log.Warnf("claimResponseToClaim: failed to parse Amount %q, defaulting to 0", r.Amount) amount = big.NewInt(0) } - return &bridgesync.Claim{ + return &claimsynctypes.Claim{ BlockNum: r.BlockNum, BlockTimestamp: r.BlockTimestamp, TxHash: common.HexToHash(string(r.TxHash)), @@ -190,7 +191,7 @@ func claimResponseToClaim(r *bridgetypes.ClaimResponse) *bridgesync.Claim { GlobalExitRoot: common.HexToHash(string(r.GlobalExitRoot)), Metadata: decodeMetadataHex(r.Metadata), IsMessage: r.IsMessage, - Type: bridgesync.DetailedClaimEvent, + Type: claimsynctypes.DetailedClaimEvent, } } @@ -226,7 +227,7 @@ func decodeMetadataHex(s string) []byte { } // classifyClaim classifies a single claim (A, B.1, B.2) using the runbook decision tree. -func classifyClaim(ctx context.Context, env *Env, claim *bridgesync.Claim) (ClaimDiagnosis, error) { +func classifyClaim(ctx context.Context, env *Env, claim *claimsynctypes.Claim) (ClaimDiagnosis, error) { cd := ClaimDiagnosis{ GlobalIndex: claim.GlobalIndex, OriginNetwork: claim.OriginNetwork, @@ -343,7 +344,7 @@ func classifyClaim(ctx context.Context, env *Env, claim *bridgesync.Claim) (Clai // (either not found or content mismatch). It searches all L1 bridges with the same content fields as the // claim. If a match is found at a different deposit_count, the claim is B.2. Otherwise Category A. func classifyByClaimContent( - ctx context.Context, env *Env, claim *bridgesync.Claim, claimLeafType uint8, cd ClaimDiagnosis, + ctx context.Context, env *Env, claim *claimsynctypes.Claim, claimLeafType uint8, cd ClaimDiagnosis, ) (ClaimDiagnosis, error) { cd.Category = ScenarioCategoryA // default From 0ee3e40c246878c3e279edb55e5ca1d40688c0d4 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 17 Mar 2026 17:22:14 +0100 Subject: [PATCH 06/28] fix: unittest --- claimsync/claimsync.go | 28 ++++++++++++++-------------- claimsync/claimsync_test.go | 2 ++ claimsync/downloader.go | 27 ++++++++++++++++++++------- config/types/true_false_auto.go | 10 +++++++--- 4 files changed, 43 insertions(+), 24 deletions(-) diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index f419c3603..d3381765a 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -72,6 +72,9 @@ func NewClaimSync( if err != nil { return nil, fmt.Errorf("claimsync: failed to detect chain type: %w", err) } + if deployment.kind == Unknown { + logger.Warnf("unable to determine bridge contract type at address %s", cfg.BridgeAddr.Hex()) + } appender, err := buildAppender(ctx, ethClient, proc, cfg.BridgeAddr, deployment, logger) if err != nil { @@ -126,9 +129,8 @@ func NewClaimSync( } logger.Infof( - "claimsync created: dbPath=%s initialBlock=%d blockFinality=%s bridgeAddr=%s sovereign=%t", - cfg.DBPath, cfg.InitialBlockNum, cfg.BlockFinality.String(), - cfg.BridgeAddr.String(), deployment.kind == SovereignChain, + "claimsync created: dbPath=%s initialBlock=%d blockFinality=%s bridgeAddr=%s bridgeKind=%s", + cfg.DBPath, cfg.InitialBlockNum, cfg.BlockFinality.String(), cfg.BridgeAddr.String(), deployment.kind.String(), ) return &ClaimSync{ @@ -145,8 +147,8 @@ func NewClaimSync( // Start starts the synchronization process. func (c *ClaimSync) Start(ctx context.Context) { - c.logger.Infof("starting claim synchronizer AutoStart: %t InitialBlock: %d", - *c.cfg.AutoStart.Resolved, c.cfg.InitialBlockNum) + c.logger.Infof("starting claim synchronizer AutoStart: %s InitialBlock: %d", + c.cfg.AutoStart.String(), c.cfg.InitialBlockNum) if *c.cfg.AutoStart.Resolved { c.driver.Sync(ctx, &c.cfg.InitialBlockNum) } else { @@ -233,25 +235,23 @@ func (c *ClaimSync) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big return c.reader.GetClaimsByGlobalIndex(ctx, nil, globalIndex) } -func (c *ClaimSync) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) { - return c.reader.GetClaimsByGER(ctx, nil, globalExitRoot) -} - func (c *ClaimSync) GetClaimsPaged(ctx context.Context, page, pageSize uint32, - networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { + networkIDs []uint32, globalIndex *big.Int) ([]*Claim, int, error) { return c.reader.GetClaimsPaged(ctx, page, pageSize, networkIDs, globalIndex) } - func (c *ClaimSync) GetUnsetClaimsPaged(ctx context.Context, page, pageSize uint32, - globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { + globalIndex *big.Int) ([]*UnsetClaim, int, error) { return c.reader.GetUnsetClaimsPaged(ctx, page, pageSize, globalIndex) } - func (c *ClaimSync) GetSetClaimsPaged(ctx context.Context, page, pageSize uint32, - globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { + globalIndex *big.Int) ([]*SetClaim, int, error) { return c.reader.GetSetClaimsPaged(ctx, page, pageSize, globalIndex) } +func (c *ClaimSync) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) { + return c.reader.GetClaimsByGER(ctx, nil, globalExitRoot) +} + func (c *ClaimSync) createStartingPoint(ctx context.Context, blockNumber uint64) error { c.logger.Infof("creating starting point at block %d:", blockNumber) header, err := c.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) diff --git a/claimsync/claimsync_test.go b/claimsync/claimsync_test.go index cf2443eb3..f56595cc7 100644 --- a/claimsync/claimsync_test.go +++ b/claimsync/claimsync_test.go @@ -35,6 +35,7 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { // Deploy contracts bridgeAddr, _, bridgeContract, err := claimmock.DeployClaimmock(auth, client) require.NoError(t, err) + log.Infof("Deployed fake bridge contract %s", bridgeAddr.Hex()) dbPathSyncer := path.Join(t.TempDir(), "claimsyncer.sqlite") cfg := ConfigStandalone{ @@ -49,6 +50,7 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { DBQueryTimeout: configtypes.NewDuration(5 * time.Second), BridgeAddr: bridgeAddr, }, + AutoStart: configtypes.FalseMode, } logger := log.WithFields("test", "TestClaimSync") reorgDetector, err := reorgdetector.New(client, reorgdetector.Config{ diff --git a/claimsync/downloader.go b/claimsync/downloader.go index 2bc136819..3f98290d2 100644 --- a/claimsync/downloader.go +++ b/claimsync/downloader.go @@ -71,6 +71,17 @@ const ( SovereignChain ) +func (b BridgeDeployment) String() string { + switch b { + case NonSovereignChain: + return "NonSovereignChain" + case SovereignChain: + return "SovereignChain" + default: + return "Unknown" + } +} + type bridgeDeployment struct { kind BridgeDeployment agglayerBridge *agglayerbridge.Agglayerbridge @@ -99,11 +110,9 @@ func buildAppender( appender[claimEventSignature] = buildClaimEventHandler( ctx, deployment.agglayerBridge, ethClient, querier, bridgeAddr, syncFullClaims, log) - if deployment.kind == SovereignChain { - appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(deployment.agglayerBridgeL2) - appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(deployment.agglayerBridgeL2) - appender[setClaimEventSignature] = buildSetClaimEventHandler(deployment.agglayerBridgeL2) - } + appender[detailedClaimEventSignature] = buildDetailedClaimEventHandler(deployment.agglayerBridgeL2) + appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(deployment.agglayerBridgeL2) + appender[setClaimEventSignature] = buildSetClaimEventHandler(deployment.agglayerBridgeL2) return appender, nil } @@ -151,8 +160,12 @@ func resolveBridgeDeployment( return nil, fmt.Errorf("claimsync: unexpected error querying AgglayerBridge.lastUpdatedDepositCount (%s): %w", bridgeAddr.Hex(), err) } - - return nil, fmt.Errorf("claimsync: unable to determine bridge contract type at address %s", bridgeAddr) + // It can't be determined if the bridge is non-sovereign or sovereign + return &bridgeDeployment{ + kind: Unknown, + agglayerBridge: agglayerBridge, + agglayerBridgeL2: agglayerBridgeL2, + }, nil } // buildClaimEventHandler creates a handler for the ClaimEvent log. diff --git a/config/types/true_false_auto.go b/config/types/true_false_auto.go index ab68c03e0..a75c10479 100644 --- a/config/types/true_false_auto.go +++ b/config/types/true_false_auto.go @@ -20,9 +20,9 @@ const ( var ( // TrueMode always activates the feature. - TrueMode = TrueFalseAutoMode{Mode: trueModeStr} + TrueMode = TrueFalseAutoMode{Mode: trueModeStr, Resolved: func() *bool { b := true; return &b }()} // FalseMode always deactivates the feature. - FalseMode = TrueFalseAutoMode{Mode: falseModeStr} + FalseMode = TrueFalseAutoMode{Mode: falseModeStr, Resolved: func() *bool { b := false; return &b }()} // AutoMode decides automatically based on context. AutoMode = TrueFalseAutoMode{Mode: autoModeStr} ) @@ -45,7 +45,11 @@ func (m *TrueFalseAutoMode) UnmarshalText(text []byte) error { // String returns the string representation. func (m TrueFalseAutoMode) String() string { - return m.Mode + if m.Resolved != nil { + return fmt.Sprintf("{Mode: %s, Resolved: %t}", m.Mode, *m.Resolved) + } else { + return fmt.Sprintf("{Mode: %s, Resolved: }", m.Mode) + } } // Validate checks that the mode is a valid value. Empty mode is allowed. From e3bf86b192e9cf8a7e09643cf0a3bab2e12dc3ce Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 17 Mar 2026 18:02:13 +0100 Subject: [PATCH 07/28] fix: unittest --- claimsync/claimsync.go | 21 ++------------------- claimsync/claimsync_test.go | 30 +++++++++++++++++++++++------- sync/evmdriver.go | 1 - 3 files changed, 25 insertions(+), 27 deletions(-) diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index d3381765a..7496ef146 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -194,15 +194,10 @@ func (c *ClaimSync) SetNextRequiredBlock(ctx context.Context, blockNumber uint64 return fmt.Errorf("claimsync: failed to get last processed block: %w", err) } if !found { - if blockNumber == 0 { - err := fmt.Errorf("claimsync: cannot set next required block to 0, invalid block number") - c.logger.Error(err) - return err - } - if err := c.createStartingPoint(ctx, blockNumber-1); err != nil { + c.logger.Infof("Starting to sync from block %d (no processed blocks found)", blockNumber) + if err := c.driver.SyncNextBlock(ctx, blockNumber); err != nil { return fmt.Errorf("claimsync: failed to createStartingPoint: %w", err) } - c.logger.Infof("Set next required block to %d (no processed blocks found)", blockNumber) return nil } firstBlock, _, err := c.processor.GetFirstProcessedBlock(ctx) @@ -251,15 +246,3 @@ func (c *ClaimSync) GetSetClaimsPaged(ctx context.Context, page, pageSize uint32 func (c *ClaimSync) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*Claim, error) { return c.reader.GetClaimsByGER(ctx, nil, globalExitRoot) } - -func (c *ClaimSync) createStartingPoint(ctx context.Context, blockNumber uint64) error { - c.logger.Infof("creating starting point at block %d:", blockNumber) - header, err := c.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) - if err != nil { - return fmt.Errorf("claimsync: get header for block %d: %w", blockNumber, err) - } - if err := c.processor.ProcessBlock(ctx, sync.Block{Num: blockNumber, Hash: header.Hash}); err != nil { - return fmt.Errorf("claimsync: process block %d: %w", blockNumber, err) - } - return nil -} diff --git a/claimsync/claimsync_test.go b/claimsync/claimsync_test.go index f56595cc7..ccb063fc9 100644 --- a/claimsync/claimsync_test.go +++ b/claimsync/claimsync_test.go @@ -33,9 +33,11 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { // Setup Docker L1 client, auth := startGeth(t, ctx, cancelFn) // Deploy contracts - bridgeAddr, _, bridgeContract, err := claimmock.DeployClaimmock(auth, client) + bridgeAddr, deployTx, bridgeContract, err := claimmock.DeployClaimmock(auth, client) require.NoError(t, err) - log.Infof("Deployed fake bridge contract %s", bridgeAddr.Hex()) + _, err = waitForReceipt(ctx, client, deployTx.Hash(), 10) + require.NoError(t, err) + log.Infof("*** Deployed fake bridge contract %s", bridgeAddr.Hex()) dbPathSyncer := path.Join(t.TempDir(), "claimsyncer.sqlite") cfg := ConfigStandalone{ @@ -43,8 +45,8 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { BlockFinality: aggkittypes.LatestBlock, InitialBlockNum: 0, SyncBlockChunkSize: 100, - RetryAfterErrorPeriod: configtypes.NewDuration(time.Second), - WaitForNewBlocksPeriod: configtypes.NewDuration(time.Second), + RetryAfterErrorPeriod: configtypes.NewDuration(time.Millisecond * 100), + WaitForNewBlocksPeriod: configtypes.NewDuration(time.Millisecond * 100), RequireStorageContentCompatibility: true, ConfigEmbedded: ConfigEmbedded{ DBQueryTimeout: configtypes.NewDuration(5 * time.Second), @@ -81,18 +83,32 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { ) require.NoError(t, err) - _, err = waitForReceipt(ctx, client, tx.Hash(), 10) + txReceipt, err := waitForReceipt(ctx, client, tx.Hash(), 10) require.NoError(t, err) + waitTillBlockNumber := txReceipt.BlockNumber.Uint64() logger.Info("*** ClaimSyncer must be waiting to receive the starting point") _, found, err2 := claimSyncer.GetLastProcessedBlock(ctx) require.NoError(t, err2) require.False(t, found) logger.Info("*** Setting next required block to 1, so must starting syncing and sync the ClaimAsset") - err = claimSyncer.SetNextRequiredBlock(ctx, 1) + err = claimSyncer.SetNextRequiredBlock(ctx, 0) require.NoError(t, err) - time.Sleep(time.Second * 5) + for i := 0; i < 10; i++ { + currentBlockNumber, _, err := claimSyncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + logger.Infof("*** Wait for block %d, current %d", waitTillBlockNumber, currentBlockNumber) + if currentBlockNumber >= waitTillBlockNumber { + break + } + time.Sleep(time.Second) + } lastBlockProcessed, found, err2 := claimSyncer.GetLastProcessedBlock(ctx) require.NoError(t, err2) require.True(t, found) + require.GreaterOrEqual(t, lastBlockProcessed, waitTillBlockNumber) logger.Infof("*** Last block processed: %d", lastBlockProcessed) + claims, err := claimSyncer.GetClaims(ctx, 0, lastBlockProcessed) + require.NoError(t, err) + logger.Infof("*** Claims retrieved: %v", claims) + require.Equal(t, 1, len(claims)) } diff --git a/sync/evmdriver.go b/sync/evmdriver.go index ff7ef2a0d..77733607e 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -213,7 +213,6 @@ reset: } break } - // setup context to cancel downloader and/or block processor cancellableCtx, cancel := context.WithCancel(ctx) defer cancel() From 3ccacd9edeab489068ab345bbaeb9732fafa0097 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 09:48:37 +0100 Subject: [PATCH 08/28] fix: unittest --- claimsync/claimcalldata_test.go | 3 +-- claimsync/downloader.go | 47 ++++++++++++++++++--------------- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/claimsync/claimcalldata_test.go b/claimsync/claimcalldata_test.go index 8977c03a9..8565eb353 100644 --- a/claimsync/claimcalldata_test.go +++ b/claimsync/claimcalldata_test.go @@ -1099,10 +1099,9 @@ func TestClaimCalldata(t *testing.T) { logger := log.WithFields("module", "test") // Extract root call first using new function - rootCall, err := extractCallData(client, bridgeAddr, tc.log.TxHash, logger, nil) + _, rootCall, err := extractCallData(client, bridgeAddr, tc.log.TxHash, logger, nil) require.NoError(t, err) - // Use setClaimCalldataFromRoot instead of setClaimCalldata err = setClaimCalldataFromRoot(&actualClaim, rootCall, bridgeAddr, logger) require.NoError(t, err) require.Equal(t, tc.expectedClaim, actualClaim) diff --git a/claimsync/downloader.go b/claimsync/downloader.go index 3f98290d2..3a13f97d4 100644 --- a/claimsync/downloader.go +++ b/claimsync/downloader.go @@ -219,7 +219,7 @@ func buildClaimEventHandler( } // Extract root call for txn_sender and error checking - rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, log, nil) + _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, log, nil) if err != nil { return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) } @@ -312,7 +312,7 @@ func buildClaimEventHandlerPreEtrog( Amount: claimEvent.Amount, } // Extract root call for txn_sender and error checking - rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, logger, nil) + _, rootCall, err := extractCallData(client, bridgeAddr, l.TxHash, logger, nil) if err != nil { return fmt.Errorf("failed to extract claim event tx sender (tx hash: %s): %w", l.TxHash, err) } @@ -387,16 +387,19 @@ type tracerCfg struct { } // findCall traverses the call trace using DFS and either returns the call or stops when a callback succeeds. -func findCall(rootCall Call, targetAddr common.Address, callback func(Call) (bool, error), logger aggkitcommon.Logger, -) error { +func findCall(rootCall Call, + targetAddr common.Address, + callback func(Call) (bool, error), + logger aggkitcommon.Logger, +) ([]*Call, error) { callStack := stack.New() callStack.Push(rootCall) - found := false + matchingCalls := []*Call{} for callStack.Len() > 0 { currentCallInterface := callStack.Pop() currentCall, ok := currentCallInterface.(Call) if !ok { - return fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface) + return nil, fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface) } // Skip reverted calls @@ -407,15 +410,16 @@ func findCall(rootCall Call, targetAddr common.Address, callback func(Call) (boo } if currentCall.To == targetAddr { - found = true if callback != nil { - ok, err := callback(currentCall) + found, err := callback(currentCall) if err != nil { - return err + return nil, err } - if !ok { - found = false + if found { + matchingCalls = append(matchingCalls, ¤tCall) } + } else { + matchingCalls = append(matchingCalls, ¤tCall) } } @@ -426,10 +430,10 @@ func findCall(rootCall Call, targetAddr common.Address, callback func(Call) (boo } } } - if !found { - return db.ErrNotFound + if len(matchingCalls) > 0 { + return matchingCalls, nil } - return nil + return nil, db.ErrNotFound } // extractRootCall extracts the root call for a transaction using debug_traceTransaction. @@ -448,19 +452,20 @@ func extractCallData( txHash common.Hash, logger aggkitcommon.Logger, callback func(c Call) (bool, error), -) (*Call, error) { +) (foundCalls []*Call, rootCall *Call, err error) { // Extract root call first - rootCall, err := extractRootCall(client, bridgeAddr, txHash) + rootCall, err = extractRootCall(client, bridgeAddr, txHash) if err != nil { - return nil, err + return nil, nil, err } // Find the specific call to the bridge contract - if err = findCall(*rootCall, bridgeAddr, callback, logger); err != nil { - return nil, err + foundCalls, err = findCall(*rootCall, bridgeAddr, callback, logger) + if err != nil { + return nil, nil, err } - return rootCall, nil + return foundCalls, rootCall, nil } // setClaimCalldataFromRoot finds and decodes calldata for the given bridge address using an already traced root call. @@ -477,7 +482,7 @@ func setClaimCalldataFromRoot( bridge common.Address, logger aggkitcommon.Logger, ) error { - err := findCall(*rootCall, bridge, + _, err := findCall(*rootCall, bridge, func(call Call) (bool, error) { // Skip reverted calls if call.Err != nil { From 1f7b33deab176cccaf1a7110399266dbbbce0914 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 10:37:33 +0100 Subject: [PATCH 09/28] fix: ut --- bridgeservice/mocks/mock_claimer.go | 229 ++++++---- bridgesync/config_test.go | 239 ---------- claimsync/claim_data.go | 3 + claimsync/downloader.go | 7 +- claimsync/downloader_test.go | 437 +++++++++++++++++++ claimsync/types/claim_querier.go | 12 + claimsync/types/mocks/mock_claim_querier.go | 98 +++++ claimsync/types/mocks/mock_claim_storager.go | 29 +- config/types/true_false_auto.go | 8 +- config/types/true_false_auto_test.go | 198 +++++++++ 10 files changed, 900 insertions(+), 360 deletions(-) create mode 100644 claimsync/downloader_test.go create mode 100644 claimsync/types/claim_querier.go create mode 100644 claimsync/types/mocks/mock_claim_querier.go create mode 100644 config/types/true_false_auto_test.go diff --git a/bridgeservice/mocks/mock_claimer.go b/bridgeservice/mocks/mock_claimer.go index 0f68192c9..1834b335e 100644 --- a/bridgeservice/mocks/mock_claimer.go +++ b/bridgeservice/mocks/mock_claimer.go @@ -3,12 +3,15 @@ package mocks import ( - "context" - "math/big" + big "math/big" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" common "github.com/ethereum/go-ethereum/common" + + context "context" + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/claimsync/types" ) // Claimer is an autogenerated mock type for the Claimer type @@ -24,174 +27,181 @@ func (_m *Claimer) EXPECT() *Claimer_Expecter { return &Claimer_Expecter{mock: &_m.Mock} } -// NewClaimer creates a new instance of Claimer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewClaimer(t interface { - mock.TestingT - Cleanup(func()) -}) *Claimer { - mock := &Claimer{} - mock.Mock.Test(t) - t.Cleanup(func() { mock.AssertExpectations(t) }) - return mock -} - -// GetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, networkIDs, globalIndex -func (_m *Claimer) GetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*claimsynctypes.Claim, int, error) { - ret := _m.Called(ctx, page, pageSize, networkIDs, globalIndex) +// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot +func (_m *Claimer) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*types.Claim, error) { + ret := _m.Called(ctx, globalExitRoot) if len(ret) == 0 { - panic("no return value specified for GetClaimsPaged") + panic("no return value specified for GetClaimsByGER") } - var r0 []*claimsynctypes.Claim - var r1 int - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)); ok { - return rf(ctx, page, pageSize, networkIDs, globalIndex) + var r0 []*types.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*types.Claim, error)); ok { + return rf(ctx, globalExitRoot) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*claimsynctypes.Claim); ok { - r0 = rf(ctx, page, pageSize, networkIDs, globalIndex) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*types.Claim); ok { + r0 = rf(ctx, globalExitRoot) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*claimsynctypes.Claim) + r0 = ret.Get(0).([]*types.Claim) } } - if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, []uint32, *big.Int) int); ok { - r1 = rf(ctx, page, pageSize, networkIDs, globalIndex) - } else { - r1 = ret.Get(1).(int) - } - if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, []uint32, *big.Int) error); ok { - r2 = rf(ctx, page, pageSize, networkIDs, globalIndex) + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, globalExitRoot) } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + + return r0, r1 } -type Claimer_GetClaimsPaged_Call struct { +// Claimer_GetClaimsByGER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGER' +type Claimer_GetClaimsByGER_Call struct { *mock.Call } -func (_e *Claimer_Expecter) GetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, networkIDs interface{}, globalIndex interface{}) *Claimer_GetClaimsPaged_Call { - return &Claimer_GetClaimsPaged_Call{Call: _e.mock.On("GetClaimsPaged", ctx, page, pageSize, networkIDs, globalIndex)} +// GetClaimsByGER is a helper method to define mock.On call +// - ctx context.Context +// - globalExitRoot common.Hash +func (_e *Claimer_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *Claimer_GetClaimsByGER_Call { + return &Claimer_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} } -func (_c *Claimer_GetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int)) *Claimer_GetClaimsPaged_Call { +func (_c *Claimer_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *Claimer_GetClaimsByGER_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].([]uint32), args[4].(*big.Int)) + run(args[0].(context.Context), args[1].(common.Hash)) }) return _c } -func (_c *Claimer_GetClaimsPaged_Call) Return(_a0 []*claimsynctypes.Claim, _a1 int, _a2 error) *Claimer_GetClaimsPaged_Call { - _c.Call.Return(_a0, _a1, _a2) +func (_c *Claimer_GetClaimsByGER_Call) Return(_a0 []*types.Claim, _a1 error) *Claimer_GetClaimsByGER_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *Claimer_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*claimsynctypes.Claim, int, error)) *Claimer_GetClaimsPaged_Call { +func (_c *Claimer_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*types.Claim, error)) *Claimer_GetClaimsByGER_Call { _c.Call.Return(run) return _c } -// GetUnsetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex -func (_m *Claimer) GetUnsetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.UnsetClaim, int, error) { - ret := _m.Called(ctx, page, pageSize, globalIndex) +// GetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, networkIDs, globalIndex +func (_m *Claimer) GetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int) ([]*types.Claim, int, error) { + ret := _m.Called(ctx, page, pageSize, networkIDs, globalIndex) if len(ret) == 0 { - panic("no return value specified for GetUnsetClaimsPaged") + panic("no return value specified for GetClaimsPaged") } - var r0 []*claimsynctypes.UnsetClaim + var r0 []*types.Claim var r1 int var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)); ok { - return rf(ctx, page, pageSize, globalIndex) + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*types.Claim, int, error)); ok { + return rf(ctx, page, pageSize, networkIDs, globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.UnsetClaim); ok { - r0 = rf(ctx, page, pageSize, globalIndex) + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, []uint32, *big.Int) []*types.Claim); ok { + r0 = rf(ctx, page, pageSize, networkIDs, globalIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*claimsynctypes.UnsetClaim) + r0 = ret.Get(0).([]*types.Claim) } } - if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { - r1 = rf(ctx, page, pageSize, globalIndex) + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, []uint32, *big.Int) int); ok { + r1 = rf(ctx, page, pageSize, networkIDs, globalIndex) } else { r1 = ret.Get(1).(int) } - if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { - r2 = rf(ctx, page, pageSize, globalIndex) + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, []uint32, *big.Int) error); ok { + r2 = rf(ctx, page, pageSize, networkIDs, globalIndex) } else { r2 = ret.Error(2) } + return r0, r1, r2 } -type Claimer_GetUnsetClaimsPaged_Call struct { +// Claimer_GetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsPaged' +type Claimer_GetClaimsPaged_Call struct { *mock.Call } -func (_e *Claimer_Expecter) GetUnsetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, globalIndex interface{}) *Claimer_GetUnsetClaimsPaged_Call { - return &Claimer_GetUnsetClaimsPaged_Call{Call: _e.mock.On("GetUnsetClaimsPaged", ctx, page, pageSize, globalIndex)} +// GetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - page uint32 +// - pageSize uint32 +// - networkIDs []uint32 +// - globalIndex *big.Int +func (_e *Claimer_Expecter) GetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, networkIDs interface{}, globalIndex interface{}) *Claimer_GetClaimsPaged_Call { + return &Claimer_GetClaimsPaged_Call{Call: _e.mock.On("GetClaimsPaged", ctx, page, pageSize, networkIDs, globalIndex)} } -func (_c *Claimer_GetUnsetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int)) *Claimer_GetUnsetClaimsPaged_Call { +func (_c *Claimer_GetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, networkIDs []uint32, globalIndex *big.Int)) *Claimer_GetClaimsPaged_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].([]uint32), args[4].(*big.Int)) }) return _c } -func (_c *Claimer_GetUnsetClaimsPaged_Call) Return(_a0 []*claimsynctypes.UnsetClaim, _a1 int, _a2 error) *Claimer_GetUnsetClaimsPaged_Call { +func (_c *Claimer_GetClaimsPaged_Call) Return(_a0 []*types.Claim, _a1 int, _a2 error) *Claimer_GetClaimsPaged_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Claimer_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.UnsetClaim, int, error)) *Claimer_GetUnsetClaimsPaged_Call { +func (_c *Claimer_GetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, []uint32, *big.Int) ([]*types.Claim, int, error)) *Claimer_GetClaimsPaged_Call { _c.Call.Return(run) return _c } // GetSetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex -func (_m *Claimer) GetSetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*claimsynctypes.SetClaim, int, error) { +func (_m *Claimer) GetSetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*types.SetClaim, int, error) { ret := _m.Called(ctx, page, pageSize, globalIndex) if len(ret) == 0 { panic("no return value specified for GetSetClaimsPaged") } - var r0 []*claimsynctypes.SetClaim + var r0 []*types.SetClaim var r1 int var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*types.SetClaim, int, error)); ok { return rf(ctx, page, pageSize, globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*claimsynctypes.SetClaim); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*types.SetClaim); ok { r0 = rf(ctx, page, pageSize, globalIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*claimsynctypes.SetClaim) + r0 = ret.Get(0).([]*types.SetClaim) } } + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { r1 = rf(ctx, page, pageSize, globalIndex) } else { r1 = ret.Get(1).(int) } + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { r2 = rf(ctx, page, pageSize, globalIndex) } else { r2 = ret.Error(2) } + return r0, r1, r2 } +// Claimer_GetSetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSetClaimsPaged' type Claimer_GetSetClaimsPaged_Call struct { *mock.Call } +// GetSetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - page uint32 +// - pageSize uint32 +// - globalIndex *big.Int func (_e *Claimer_Expecter) GetSetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, globalIndex interface{}) *Claimer_GetSetClaimsPaged_Call { return &Claimer_GetSetClaimsPaged_Call{Call: _e.mock.On("GetSetClaimsPaged", ctx, page, pageSize, globalIndex)} } @@ -203,65 +213,94 @@ func (_c *Claimer_GetSetClaimsPaged_Call) Run(run func(ctx context.Context, page return _c } -func (_c *Claimer_GetSetClaimsPaged_Call) Return(_a0 []*claimsynctypes.SetClaim, _a1 int, _a2 error) *Claimer_GetSetClaimsPaged_Call { +func (_c *Claimer_GetSetClaimsPaged_Call) Return(_a0 []*types.SetClaim, _a1 int, _a2 error) *Claimer_GetSetClaimsPaged_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Claimer_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*claimsynctypes.SetClaim, int, error)) *Claimer_GetSetClaimsPaged_Call { +func (_c *Claimer_GetSetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*types.SetClaim, int, error)) *Claimer_GetSetClaimsPaged_Call { _c.Call.Return(run) return _c } -// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot -func (_m *Claimer) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { - ret := _m.Called(ctx, globalExitRoot) +// GetUnsetClaimsPaged provides a mock function with given fields: ctx, page, pageSize, globalIndex +func (_m *Claimer) GetUnsetClaimsPaged(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int) ([]*types.UnsetClaim, int, error) { + ret := _m.Called(ctx, page, pageSize, globalIndex) if len(ret) == 0 { - panic("no return value specified for GetClaimsByGER") + panic("no return value specified for GetUnsetClaimsPaged") } - var r0 []*claimsynctypes.Claim - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)); ok { - return rf(ctx, globalExitRoot) + var r0 []*types.UnsetClaim + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) ([]*types.UnsetClaim, int, error)); ok { + return rf(ctx, page, pageSize, globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*claimsynctypes.Claim); ok { - r0 = rf(ctx, globalExitRoot) + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, *big.Int) []*types.UnsetClaim); ok { + r0 = rf(ctx, page, pageSize, globalIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*claimsynctypes.Claim) + r0 = ret.Get(0).([]*types.UnsetClaim) } } - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, globalExitRoot) + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, *big.Int) int); ok { + r1 = rf(ctx, page, pageSize, globalIndex) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(int) } - return r0, r1 + + if rf, ok := ret.Get(2).(func(context.Context, uint32, uint32, *big.Int) error); ok { + r2 = rf(ctx, page, pageSize, globalIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } -type Claimer_GetClaimsByGER_Call struct { +// Claimer_GetUnsetClaimsPaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUnsetClaimsPaged' +type Claimer_GetUnsetClaimsPaged_Call struct { *mock.Call } -func (_e *Claimer_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *Claimer_GetClaimsByGER_Call { - return &Claimer_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} +// GetUnsetClaimsPaged is a helper method to define mock.On call +// - ctx context.Context +// - page uint32 +// - pageSize uint32 +// - globalIndex *big.Int +func (_e *Claimer_Expecter) GetUnsetClaimsPaged(ctx interface{}, page interface{}, pageSize interface{}, globalIndex interface{}) *Claimer_GetUnsetClaimsPaged_Call { + return &Claimer_GetUnsetClaimsPaged_Call{Call: _e.mock.On("GetUnsetClaimsPaged", ctx, page, pageSize, globalIndex)} } -func (_c *Claimer_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *Claimer_GetClaimsByGER_Call { +func (_c *Claimer_GetUnsetClaimsPaged_Call) Run(run func(ctx context.Context, page uint32, pageSize uint32, globalIndex *big.Int)) *Claimer_GetUnsetClaimsPaged_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(*big.Int)) }) return _c } -func (_c *Claimer_GetClaimsByGER_Call) Return(_a0 []*claimsynctypes.Claim, _a1 error) *Claimer_GetClaimsByGER_Call { - _c.Call.Return(_a0, _a1) +func (_c *Claimer_GetUnsetClaimsPaged_Call) Return(_a0 []*types.UnsetClaim, _a1 int, _a2 error) *Claimer_GetUnsetClaimsPaged_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Claimer_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)) *Claimer_GetClaimsByGER_Call { +func (_c *Claimer_GetUnsetClaimsPaged_Call) RunAndReturn(run func(context.Context, uint32, uint32, *big.Int) ([]*types.UnsetClaim, int, error)) *Claimer_GetUnsetClaimsPaged_Call { _c.Call.Return(run) return _c } + +// NewClaimer creates a new instance of Claimer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimer(t interface { + mock.TestingT + Cleanup(func()) +}) *Claimer { + mock := &Claimer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/bridgesync/config_test.go b/bridgesync/config_test.go index cc6922976..9167b987b 100644 --- a/bridgesync/config_test.go +++ b/bridgesync/config_test.go @@ -7,245 +7,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestSyncFromInBridgesMode_UnmarshalText(t *testing.T) { - tests := []struct { - name string - input string - expected TrueFalseAutoMode - expectedError string - }{ - { - name: "true lowercase", - input: "true", - expected: TrueMode, - expectedError: "", - }, - { - name: "true uppercase", - input: "TRUE", - expected: TrueMode, - expectedError: "", - }, - { - name: "true mixed case", - input: "TrUe", - expected: TrueMode, - expectedError: "", - }, - { - name: "true with whitespace", - input: " true ", - expected: TrueMode, - expectedError: "", - }, - { - name: "false lowercase", - input: "false", - expected: FalseMode, - expectedError: "", - }, - { - name: "false uppercase", - input: "FALSE", - expected: FalseMode, - expectedError: "", - }, - { - name: "false mixed case", - input: "FaLsE", - expected: FalseMode, - expectedError: "", - }, - { - name: "false with whitespace", - input: " false ", - expected: FalseMode, - expectedError: "", - }, - { - name: "auto lowercase", - input: "auto", - expected: AutoMode, - expectedError: "", - }, - { - name: "auto uppercase", - input: "AUTO", - expected: AutoMode, - expectedError: "", - }, - { - name: "auto mixed case", - input: "AuTo", - expected: AutoMode, - expectedError: "", - }, - { - name: "auto with whitespace", - input: " auto ", - expected: AutoMode, - expectedError: "", - }, - { - name: "invalid value", - input: "invalid", - expected: TrueFalseAutoMode{}, - expectedError: "invalid TrueFalseAutoMode: invalid (valid values: true, false, auto)", - }, - { - name: "empty string", - input: "", - expected: TrueFalseAutoMode{}, - expectedError: "invalid TrueFalseAutoMode: (valid values: true, false, auto)", - }, - { - name: "numeric value", - input: "1", - expected: TrueFalseAutoMode{}, - expectedError: "invalid TrueFalseAutoMode: 1 (valid values: true, false, auto)", - }, - { - name: "yes value", - input: "yes", - expected: TrueFalseAutoMode{}, - expectedError: "invalid TrueFalseAutoMode: yes (valid values: true, false, auto)", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var mode TrueFalseAutoMode - err := mode.UnmarshalText([]byte(tt.input)) - - if tt.expectedError == "" { - require.NoError(t, err) - require.Equal(t, tt.expected, mode) - } else { - require.Error(t, err) - require.Equal(t, tt.expectedError, err.Error()) - } - }) - } -} - -func TestSyncFromInBridgesMode_String(t *testing.T) { - tests := []struct { - name string - mode TrueFalseAutoMode - expected string - }{ - { - name: "true mode", - mode: TrueMode, - expected: "true", - }, - { - name: "false mode", - mode: FalseMode, - expected: "false", - }, - { - name: "auto mode", - mode: AutoMode, - expected: "auto", - }, - { - name: "empty mode", - mode: TrueFalseAutoMode{}, - expected: "", - }, - { - name: "invalid mode", - mode: TrueFalseAutoMode{Mode: "invalid"}, - expected: "invalid", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.mode.String() - require.Equal(t, tt.expected, result) - }) - } -} - -func TestSyncFromInBridgesMode_Resolve(t *testing.T) { - tests := []struct { - name string - mode TrueFalseAutoMode - hasBridgeComponent bool - expected bool - }{ - { - name: "true mode with bridge component", - mode: TrueMode, - hasBridgeComponent: true, - expected: true, - }, - { - name: "true mode without bridge component", - mode: TrueMode, - hasBridgeComponent: false, - expected: true, - }, - { - name: "false mode with bridge component", - mode: FalseMode, - hasBridgeComponent: true, - expected: false, - }, - { - name: "false mode without bridge component", - mode: FalseMode, - hasBridgeComponent: false, - expected: false, - }, - { - name: "auto mode with bridge component", - mode: AutoMode, - hasBridgeComponent: true, - expected: true, - }, - { - name: "auto mode without bridge component", - mode: AutoMode, - hasBridgeComponent: false, - expected: false, - }, - { - name: "invalid mode with bridge component", - mode: TrueFalseAutoMode{Mode: "invalid"}, - hasBridgeComponent: true, - expected: false, - }, - { - name: "invalid mode without bridge component", - mode: TrueFalseAutoMode{Mode: "invalid"}, - hasBridgeComponent: false, - expected: false, - }, - { - name: "empty mode with bridge component", - mode: TrueFalseAutoMode{}, - hasBridgeComponent: true, - expected: false, - }, - { - name: "empty mode without bridge component", - mode: TrueFalseAutoMode{}, - hasBridgeComponent: false, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.mode.Resolve(tt.hasBridgeComponent) - require.Equal(t, tt.expected, result) - }) - } -} - func TestConfig_Validate(t *testing.T) { tests := []struct { name string diff --git a/claimsync/claim_data.go b/claimsync/claim_data.go index 158f85746..0dbd33e3a 100644 --- a/claimsync/claim_data.go +++ b/claimsync/claim_data.go @@ -20,3 +20,6 @@ type UnsetClaim = claimsynctypes.UnsetClaim // SetClaim is an alias for claimsynctypes.SetClaim type SetClaim = claimsynctypes.SetClaim + +// ClaimQuerier is an alias for claimsynctypes.ClaimQuerier +type ClaimQuerier = claimsynctypes.ClaimQuerier diff --git a/claimsync/downloader.go b/claimsync/downloader.go index 3a13f97d4..c52b9c770 100644 --- a/claimsync/downloader.go +++ b/claimsync/downloader.go @@ -15,7 +15,6 @@ import ( bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" - dbtypes "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/sync" treetypes "github.com/agglayer/aggkit/tree/types" @@ -57,11 +56,6 @@ const ( methodIDLength = 4 ) -// claimQuerier is used by event handlers to check the DetailedClaimEvent boundary. -type ClaimQuerier interface { - GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) -} - // BridgeDeployment represents the type of bridge contract deployment (sovereign vs non-sovereign). type BridgeDeployment byte @@ -446,6 +440,7 @@ func extractRootCall(client aggkittypes.RPCClienter, contractAddr common.Address return rootCall, nil } +//nolint:unparam // foundCalls is part of the public API and may be used by callers func extractCallData( client aggkittypes.RPCClienter, bridgeAddr common.Address, diff --git a/claimsync/downloader_test.go b/claimsync/downloader_test.go new file mode 100644 index 000000000..0001bfc84 --- /dev/null +++ b/claimsync/downloader_test.go @@ -0,0 +1,437 @@ +package claimsync + +import ( + "bytes" + "fmt" + "math/big" + "testing" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" + claimtypemocks "github.com/agglayer/aggkit/claimsync/types/mocks" + "github.com/agglayer/aggkit/db" + logger "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/sync" + tree "github.com/agglayer/aggkit/tree/types" + "github.com/agglayer/aggkit/types/mocks" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestFindCall(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + fromAddr := common.HexToAddress("0x20") + lg := logger.WithFields("module", "test") + + // Simple direct call + root := Call{ + To: bridgeAddr, + From: fromAddr, + Err: nil, + } + founds, err := findCall(root, bridgeAddr, nil, lg) + require.NoError(t, err) + require.NotNil(t, founds) + require.Equal(t, bridgeAddr, founds[0].To) + + // Reverted root call must be skipped — returns ErrNotFound + root = Call{ + To: bridgeAddr, + From: fromAddr, + Err: strPtr("reverted"), + } + _, err = findCall(root, bridgeAddr, nil, lg) + require.Error(t, err) + + // Nested calls: one valid, one reverted — only valid is returned + root = Call{ + To: common.HexToAddress("0x01"), + From: fromAddr, + Err: nil, + Calls: []Call{ + { + To: bridgeAddr, + From: fromAddr, + Err: nil, + }, + { + To: bridgeAddr, + From: fromAddr, + Err: strPtr("reverted"), + }, + }, + } + founds, err = findCall(root, bridgeAddr, nil, lg) + require.NoError(t, err) + require.Len(t, founds, 1) + require.Equal(t, bridgeAddr, founds[0].To) +} + +func TestFindCallWithMixedMethods(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + fromAddr := common.HexToAddress("0x20") + lg := logger.WithFields("module", "test") + + // Transaction with three sub-calls: + // 1. unrecognized method + // 2. claimAsset (recognized) + // 3. claimMessage (recognized) + rootCall := Call{ + To: common.HexToAddress("0x01"), + From: fromAddr, + Err: nil, + Calls: []Call{ + { + To: bridgeAddr, + From: fromAddr, + Err: nil, + Input: []byte{0x38, 0xb8, 0xfb, 0xbb}, // unrecognized + }, + { + To: bridgeAddr, + From: fromAddr, + Err: nil, + Input: claimAssetEtrogMethodID, + }, + { + To: bridgeAddr, + From: fromAddr, + Err: nil, + Input: claimMessageEtrogMethodID, + }, + }, + } + + isClaimMethod := func(call Call) (bool, error) { + if len(call.Input) < methodIDLength { + return false, fmt.Errorf("input too short") + } + methodID := call.Input[:methodIDLength] + return bytes.Equal(methodID, claimAssetEtrogMethodID) || bytes.Equal(methodID, claimMessageEtrogMethodID), nil + } + + founds, err := findCall(rootCall, bridgeAddr, isClaimMethod, lg) + require.NoError(t, err) + require.NotNil(t, founds) + // DFS uses a stack so claimMessage (pushed last) is found first + require.Equal(t, claimMessageEtrogMethodID, []byte(founds[0].Input[:4])) +} + +func TestFindCallWithOnlyUnrecognizedMethods(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + fromAddr := common.HexToAddress("0x20") + lg := logger.WithFields("module", "test") + + rootCall := Call{ + To: common.HexToAddress("0x01"), + From: fromAddr, + Err: nil, + Calls: []Call{ + { + To: bridgeAddr, + From: fromAddr, + Err: nil, + Input: []byte{0x38, 0xb8, 0xfb, 0xbb}, + }, + { + To: bridgeAddr, + From: fromAddr, + Err: nil, + Input: []byte{0xaa, 0xbb, 0xcc, 0xdd}, + }, + }, + } + + found, err := findCall(rootCall, bridgeAddr, func(call Call) (bool, error) { + if len(call.Input) < 4 { + return false, fmt.Errorf("input too short") + } + methodID := call.Input[:4] + if bytes.Equal(methodID, claimAssetEtrogMethodID) || bytes.Equal(methodID, claimMessageEtrogMethodID) { + return true, nil + } + return false, nil + }, lg) + + require.Error(t, err) + require.Nil(t, found) + require.Contains(t, err.Error(), "not found") +} + +func TestTryDecodeClaimCalldata(t *testing.T) { + lg := logger.WithFields("module", "test") + globalIndex := big.NewInt(42) + + agglayerBridgeABI, err := agglayerbridge.AgglayerbridgeMetaData.GetAbi() + require.NoError(t, err) + + packClaimInputs := func(method string, gi *big.Int) []byte { + data, packErr := agglayerBridgeABI.Methods[method].Inputs.Pack( + [tree.DefaultHeight][common.HashLength]byte{}, // smtProofLocalExitRoot + [tree.DefaultHeight][common.HashLength]byte{}, // smtProofRollupExitRoot + gi, + [common.HashLength]byte{}, // mainnetExitRoot + [common.HashLength]byte{}, // rollupExitRoot + uint32(10), + common.Address{}, + uint32(0), + common.Address{}, + big.NewInt(100), + []byte{}, + ) + require.NoError(t, packErr) + return data + } + + claimAssetInput := append(append([]byte{}, claimAssetEtrogMethodID...), packClaimInputs("claimAsset", globalIndex)...) + claimMessageInput := append(append([]byte{}, claimMessageEtrogMethodID...), packClaimInputs("claimMessage", globalIndex)...) + wrongGlobalInput := append(append([]byte{}, claimAssetEtrogMethodID...), packClaimInputs("claimAsset", big.NewInt(999))...) + + tests := []struct { + name string + input []byte + globalIndex *big.Int + expectedFound bool + expectedMsg bool + expectErr bool + }{ + { + name: "input too short", + input: []byte{0x01, 0x02}, + expectErr: true, + }, + { + name: "unrecognized method ID", + input: []byte{0xaa, 0xbb, 0xcc, 0xdd}, + globalIndex: globalIndex, + expectedFound: false, + }, + { + name: "claimAsset matching globalIndex", + input: claimAssetInput, + globalIndex: globalIndex, + expectedFound: true, + expectedMsg: false, + }, + { + name: "claimMessage matching globalIndex", + input: claimMessageInput, + globalIndex: globalIndex, + expectedFound: true, + expectedMsg: true, + }, + { + name: "claimAsset non-matching globalIndex", + input: wrongGlobalInput, + globalIndex: globalIndex, + expectedFound: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + claim := &Claim{GlobalIndex: tt.globalIndex} + found, err := tryDecodeClaimCalldata(claim, tt.input, lg) + if tt.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedFound, found) + if tt.expectedFound { + require.Equal(t, tt.expectedMsg, claim.IsMessage) + } + } + }) + } +} + +func TestBuildAppender(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + blockNum := uint64(1) + lg := logger.WithFields("module", "test") + + l2ABI, err := agglayerbridgel2.Agglayerbridgel2MetaData.GetAbi() + require.NoError(t, err) + + agglayerBridgeABI, err := agglayerbridge.AgglayerbridgeMetaData.GetAbi() + require.NoError(t, err) + + claimGlobalIndex := big.NewInt(100) + + // Build claimAsset calldata that matches the ClaimEvent globalIndex + claimAssetCalldata, err := agglayerBridgeABI.Methods["claimAsset"].Inputs.Pack( + [tree.DefaultHeight][common.HashLength]byte{}, + [tree.DefaultHeight][common.HashLength]byte{}, + claimGlobalIndex, + [common.HashLength]byte{}, + [common.HashLength]byte{}, + uint32(10), + common.Address{}, + uint32(0), + common.Address{}, + big.NewInt(50), + []byte{}, + ) + require.NoError(t, err) + claimAssetInput := append(append([]byte{}, claimAssetEtrogMethodID...), claimAssetCalldata...) + + ethClient := mocks.NewEthClienter(t) + // Only called for the claimEventSignature subtest + ethClient.EXPECT(). + Call(mock.Anything, DebugTraceTxEndpoint, mock.Anything, mock.Anything). + Run(func(result any, method string, args ...any) { + arg, ok := result.(*Call) + require.True(t, ok) + *arg = Call{ + To: bridgeAddr, + From: common.HexToAddress("0x01"), + Input: claimAssetInput, + } + }). + Return(nil). + Maybe() + + agglayerBridge, err := agglayerbridge.NewAgglayerbridge(bridgeAddr, ethClient) + require.NoError(t, err) + agglayerBridgeL2, err := agglayerbridgel2.NewAgglayerbridgel2(bridgeAddr, ethClient) + require.NoError(t, err) + + deployment := &bridgeDeployment{ + kind: NonSovereignChain, + agglayerBridge: agglayerBridge, + agglayerBridgeL2: agglayerBridgeL2, + } + querier := claimtypemocks.NewClaimQuerier(t) + querier.EXPECT().GetBoundaryBlockForClaimType(mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), db.ErrNotFound).Maybe() + + appenderMap, err := buildAppender(t.Context(), ethClient, querier, bridgeAddr, deployment, lg) + require.NoError(t, err) + require.NotNil(t, appenderMap) + + tests := []struct { + name string + eventSignature common.Hash + logsCount int + logBuilder func(t *testing.T) types.Log + }{ + { + name: "claimEventSignature", + eventSignature: claimEventSignature, + logsCount: 1, + logBuilder: func(t *testing.T) types.Log { + t.Helper() + event, err := agglayerBridgeABI.EventByID(claimEventSignature) + require.NoError(t, err) + data, err := event.Inputs.Pack( + claimGlobalIndex, uint32(10), common.Address{}, common.Address{}, big.NewInt(50), + ) + require.NoError(t, err) + return types.Log{ + Topics: []common.Hash{claimEventSignature}, + Data: data, + } + }, + }, + { + name: "detailedClaimEventSignature", + eventSignature: detailedClaimEventSignature, + logsCount: 1, + logBuilder: func(t *testing.T) types.Log { + t.Helper() + detailedEvent, err := l2ABI.EventByID(detailedClaimEventSignature) + require.NoError(t, err) + + var nonIndexed abi.Arguments + for _, inp := range detailedEvent.Inputs { + if !inp.Indexed { + nonIndexed = append(nonIndexed, inp) + } + } + // Non-indexed order: smtProofLocalExitRoot, smtProofRollupExitRoot, + // mainnetExitRoot, rollupExitRoot, leafType, originNetwork, + // originTokenAddress, destinationNetwork, amount, metadata + data, err := nonIndexed.Pack( + [tree.DefaultHeight][common.HashLength]byte{}, + [tree.DefaultHeight][common.HashLength]byte{}, + [common.HashLength]byte{}, + [common.HashLength]byte{}, + uint8(0), + uint32(10), + common.Address{}, + uint32(0), + big.NewInt(100), + []byte{}, + ) + require.NoError(t, err) + + destAddr := common.HexToAddress("0x30") + return types.Log{ + Topics: []common.Hash{ + detailedClaimEventSignature, + common.BigToHash(big.NewInt(200)), // globalIndex (indexed) + common.BytesToHash(destAddr.Bytes()), // destinationAddress (indexed) + }, + Data: data, + } + }, + }, + { + name: "unsetClaimEventSignature", + eventSignature: unsetClaimEventSignature, + logsCount: 1, + logBuilder: func(t *testing.T) types.Log { + t.Helper() + event, err := l2ABI.EventByID(unsetClaimEventSignature) + require.NoError(t, err) + data, err := event.Inputs.Pack( + common.HexToHash("0xdeadbeef"), // unsetGlobalIndex (bytes32) + common.HexToHash("0x5ca1e"), // newUnsetGlobalIndexHashChain (bytes32) + ) + require.NoError(t, err) + return types.Log{ + Topics: []common.Hash{unsetClaimEventSignature}, + Data: data, + } + }, + }, + { + name: "setClaimEventSignature", + eventSignature: setClaimEventSignature, + logsCount: 1, + logBuilder: func(t *testing.T) types.Log { + t.Helper() + event, err := l2ABI.EventByID(setClaimEventSignature) + require.NoError(t, err) + data, err := event.Inputs.Pack( + common.HexToHash("0xfeedcafe"), // globalIndex (bytes32) + ) + require.NoError(t, err) + return types.Log{ + Topics: []common.Hash{setClaimEventSignature}, + Data: data, + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log := tt.logBuilder(t) + appenderFunc, exists := appenderMap[tt.eventSignature] + require.True(t, exists) + + block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: blockNum}} + err := appenderFunc(block, log) + require.NoError(t, err) + require.Equal(t, tt.logsCount, len(block.Events)) + }) + } +} + +func strPtr(s string) *string { + return &s +} diff --git a/claimsync/types/claim_querier.go b/claimsync/types/claim_querier.go new file mode 100644 index 000000000..974cde96c --- /dev/null +++ b/claimsync/types/claim_querier.go @@ -0,0 +1,12 @@ +package types + +import ( + "context" + + dbtypes "github.com/agglayer/aggkit/db/types" +) + +// ClaimQuerier is used by event handlers to check the DetailedClaimEvent boundary. +type ClaimQuerier interface { + GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) +} diff --git a/claimsync/types/mocks/mock_claim_querier.go b/claimsync/types/mocks/mock_claim_querier.go new file mode 100644 index 000000000..0e843e9a8 --- /dev/null +++ b/claimsync/types/mocks/mock_claim_querier.go @@ -0,0 +1,98 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/db/types" +) + +// ClaimQuerier is an autogenerated mock type for the ClaimQuerier type +type ClaimQuerier struct { + mock.Mock +} + +type ClaimQuerier_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimQuerier) EXPECT() *ClaimQuerier_Expecter { + return &ClaimQuerier_Expecter{mock: &_m.Mock} +} + +// GetBoundaryBlockForClaimType provides a mock function with given fields: ctx, tx, claimType +func (_m *ClaimQuerier) GetBoundaryBlockForClaimType(ctx context.Context, tx types.Querier, claimType claimsynctypes.ClaimType) (uint64, error) { + ret := _m.Called(ctx, tx, claimType) + + if len(ret) == 0 { + panic("no return value specified for GetBoundaryBlockForClaimType") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.ClaimType) (uint64, error)); ok { + return rf(ctx, tx, claimType) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, claimsynctypes.ClaimType) uint64); ok { + r0 = rf(ctx, tx, claimType) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, claimsynctypes.ClaimType) error); ok { + r1 = rf(ctx, tx, claimType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimQuerier_GetBoundaryBlockForClaimType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBoundaryBlockForClaimType' +type ClaimQuerier_GetBoundaryBlockForClaimType_Call struct { + *mock.Call +} + +// GetBoundaryBlockForClaimType is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +// - claimType claimsynctypes.ClaimType +func (_e *ClaimQuerier_Expecter) GetBoundaryBlockForClaimType(ctx interface{}, tx interface{}, claimType interface{}) *ClaimQuerier_GetBoundaryBlockForClaimType_Call { + return &ClaimQuerier_GetBoundaryBlockForClaimType_Call{Call: _e.mock.On("GetBoundaryBlockForClaimType", ctx, tx, claimType)} +} + +func (_c *ClaimQuerier_GetBoundaryBlockForClaimType_Call) Run(run func(ctx context.Context, tx types.Querier, claimType claimsynctypes.ClaimType)) *ClaimQuerier_GetBoundaryBlockForClaimType_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier), args[2].(claimsynctypes.ClaimType)) + }) + return _c +} + +func (_c *ClaimQuerier_GetBoundaryBlockForClaimType_Call) Return(_a0 uint64, _a1 error) *ClaimQuerier_GetBoundaryBlockForClaimType_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimQuerier_GetBoundaryBlockForClaimType_Call) RunAndReturn(run func(context.Context, types.Querier, claimsynctypes.ClaimType) (uint64, error)) *ClaimQuerier_GetBoundaryBlockForClaimType_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimQuerier creates a new instance of ClaimQuerier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimQuerier(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimQuerier { + mock := &ClaimQuerier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/claimsync/types/mocks/mock_claim_storager.go b/claimsync/types/mocks/mock_claim_storager.go index d256bba47..05298a7f8 100644 --- a/claimsync/types/mocks/mock_claim_storager.go +++ b/claimsync/types/mocks/mock_claim_storager.go @@ -207,9 +207,9 @@ func (_c *ClaimStorager_GetClaims_Call) RunAndReturn(run func(context.Context, t return _c } -// GetClaimsByGER provides a mock function with given fields: ctx, globalExitRoot -func (_m *ClaimStorager) GetClaimsByGER(ctx context.Context, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { - ret := _m.Called(ctx, globalExitRoot) +// GetClaimsByGER provides a mock function with given fields: ctx, tx, globalExitRoot +func (_m *ClaimStorager) GetClaimsByGER(ctx context.Context, tx types.Querier, globalExitRoot common.Hash) ([]*claimsynctypes.Claim, error) { + ret := _m.Called(ctx, tx, globalExitRoot) if len(ret) == 0 { panic("no return value specified for GetClaimsByGER") @@ -217,19 +217,19 @@ func (_m *ClaimStorager) GetClaimsByGER(ctx context.Context, globalExitRoot comm var r0 []*claimsynctypes.Claim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)); ok { - return rf(ctx, globalExitRoot) + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, common.Hash) ([]*claimsynctypes.Claim, error)); ok { + return rf(ctx, tx, globalExitRoot) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []*claimsynctypes.Claim); ok { - r0 = rf(ctx, globalExitRoot) + if rf, ok := ret.Get(0).(func(context.Context, types.Querier, common.Hash) []*claimsynctypes.Claim); ok { + r0 = rf(ctx, tx, globalExitRoot) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*claimsynctypes.Claim) } } - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, globalExitRoot) + if rf, ok := ret.Get(1).(func(context.Context, types.Querier, common.Hash) error); ok { + r1 = rf(ctx, tx, globalExitRoot) } else { r1 = ret.Error(1) } @@ -244,14 +244,15 @@ type ClaimStorager_GetClaimsByGER_Call struct { // GetClaimsByGER is a helper method to define mock.On call // - ctx context.Context +// - tx types.Querier // - globalExitRoot common.Hash -func (_e *ClaimStorager_Expecter) GetClaimsByGER(ctx interface{}, globalExitRoot interface{}) *ClaimStorager_GetClaimsByGER_Call { - return &ClaimStorager_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, globalExitRoot)} +func (_e *ClaimStorager_Expecter) GetClaimsByGER(ctx interface{}, tx interface{}, globalExitRoot interface{}) *ClaimStorager_GetClaimsByGER_Call { + return &ClaimStorager_GetClaimsByGER_Call{Call: _e.mock.On("GetClaimsByGER", ctx, tx, globalExitRoot)} } -func (_c *ClaimStorager_GetClaimsByGER_Call) Run(run func(ctx context.Context, globalExitRoot common.Hash)) *ClaimStorager_GetClaimsByGER_Call { +func (_c *ClaimStorager_GetClaimsByGER_Call) Run(run func(ctx context.Context, tx types.Querier, globalExitRoot common.Hash)) *ClaimStorager_GetClaimsByGER_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) + run(args[0].(context.Context), args[1].(types.Querier), args[2].(common.Hash)) }) return _c } @@ -261,7 +262,7 @@ func (_c *ClaimStorager_GetClaimsByGER_Call) Return(_a0 []*claimsynctypes.Claim, return _c } -func (_c *ClaimStorager_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, common.Hash) ([]*claimsynctypes.Claim, error)) *ClaimStorager_GetClaimsByGER_Call { +func (_c *ClaimStorager_GetClaimsByGER_Call) RunAndReturn(run func(context.Context, types.Querier, common.Hash) ([]*claimsynctypes.Claim, error)) *ClaimStorager_GetClaimsByGER_Call { _c.Call.Return(run) return _c } diff --git a/config/types/true_false_auto.go b/config/types/true_false_auto.go index a75c10479..ee3586d31 100644 --- a/config/types/true_false_auto.go +++ b/config/types/true_false_auto.go @@ -43,13 +43,9 @@ func (m *TrueFalseAutoMode) UnmarshalText(text []byte) error { return nil } -// String returns the string representation. +// String returns the mode string ("true", "false", "auto", or ""). func (m TrueFalseAutoMode) String() string { - if m.Resolved != nil { - return fmt.Sprintf("{Mode: %s, Resolved: %t}", m.Mode, *m.Resolved) - } else { - return fmt.Sprintf("{Mode: %s, Resolved: }", m.Mode) - } + return m.Mode } // Validate checks that the mode is a valid value. Empty mode is allowed. diff --git a/config/types/true_false_auto_test.go b/config/types/true_false_auto_test.go new file mode 100644 index 000000000..544f6d534 --- /dev/null +++ b/config/types/true_false_auto_test.go @@ -0,0 +1,198 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTrueFalseAutoMode_UnmarshalText(t *testing.T) { + tests := []struct { + name string + input string + expected TrueFalseAutoMode + expectedError string + }{ + { + name: "true lowercase", + input: "true", + expected: TrueFalseAutoMode{Mode: "true"}, + }, + { + name: "true uppercase", + input: "TRUE", + expected: TrueFalseAutoMode{Mode: "true"}, + }, + { + name: "true mixed case", + input: "TrUe", + expected: TrueFalseAutoMode{Mode: "true"}, + }, + { + name: "true with whitespace", + input: " true ", + expected: TrueFalseAutoMode{Mode: "true"}, + }, + { + name: "false lowercase", + input: "false", + expected: TrueFalseAutoMode{Mode: "false"}, + }, + { + name: "false uppercase", + input: "FALSE", + expected: TrueFalseAutoMode{Mode: "false"}, + }, + { + name: "false mixed case", + input: "FaLsE", + expected: TrueFalseAutoMode{Mode: "false"}, + }, + { + name: "false with whitespace", + input: " false ", + expected: TrueFalseAutoMode{Mode: "false"}, + }, + { + name: "auto lowercase", + input: "auto", + expected: TrueFalseAutoMode{Mode: "auto"}, + }, + { + name: "auto uppercase", + input: "AUTO", + expected: TrueFalseAutoMode{Mode: "auto"}, + }, + { + name: "auto mixed case", + input: "AuTo", + expected: TrueFalseAutoMode{Mode: "auto"}, + }, + { + name: "auto with whitespace", + input: " auto ", + expected: TrueFalseAutoMode{Mode: "auto"}, + }, + { + name: "invalid value", + input: "invalid", + expectedError: "invalid TrueFalseAutoMode: invalid (valid values: true, false, auto)", + }, + { + name: "empty string", + input: "", + expectedError: "invalid TrueFalseAutoMode: (valid values: true, false, auto)", + }, + { + name: "numeric value", + input: "1", + expectedError: "invalid TrueFalseAutoMode: 1 (valid values: true, false, auto)", + }, + { + name: "yes value", + input: "yes", + expectedError: "invalid TrueFalseAutoMode: yes (valid values: true, false, auto)", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var mode TrueFalseAutoMode + err := mode.UnmarshalText([]byte(tt.input)) + + if tt.expectedError == "" { + require.NoError(t, err) + require.Equal(t, tt.expected, mode) + } else { + require.Error(t, err) + require.Equal(t, tt.expectedError, err.Error()) + } + }) + } +} + +func TestTrueFalseAutoMode_String(t *testing.T) { + tests := []struct { + name string + mode TrueFalseAutoMode + expected string + }{ + {name: "true mode", mode: TrueMode, expected: "true"}, + {name: "false mode", mode: FalseMode, expected: "false"}, + {name: "auto mode", mode: AutoMode, expected: "auto"}, + {name: "empty mode", mode: TrueFalseAutoMode{}, expected: ""}, + {name: "invalid mode", mode: TrueFalseAutoMode{Mode: "invalid"}, expected: "invalid"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expected, tt.mode.String()) + }) + } +} + +func TestTrueFalseAutoMode_Resolve(t *testing.T) { + tests := []struct { + name string + mode TrueFalseAutoMode + hasBridgeComponent bool + expected bool + }{ + {name: "true mode with bridge component", mode: TrueMode, hasBridgeComponent: true, expected: true}, + {name: "true mode without bridge component", mode: TrueMode, hasBridgeComponent: false, expected: true}, + {name: "false mode with bridge component", mode: FalseMode, hasBridgeComponent: true, expected: false}, + {name: "false mode without bridge component", mode: FalseMode, hasBridgeComponent: false, expected: false}, + {name: "auto mode with bridge component", mode: AutoMode, hasBridgeComponent: true, expected: true}, + {name: "auto mode without bridge component", mode: AutoMode, hasBridgeComponent: false, expected: false}, + {name: "invalid mode", mode: TrueFalseAutoMode{Mode: "invalid"}, hasBridgeComponent: true, expected: false}, + {name: "empty mode", mode: TrueFalseAutoMode{}, hasBridgeComponent: true, expected: false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mode := tt.mode + result := mode.Resolve(tt.hasBridgeComponent) + require.Equal(t, tt.expected, result) + require.NotNil(t, mode.Resolved) + require.Equal(t, tt.expected, *mode.Resolved) + }) + } +} + +func TestTrueFalseAutoMode_Validate(t *testing.T) { + tests := []struct { + name string + mode TrueFalseAutoMode + fieldName string + expectedError string + }{ + {name: "true mode", mode: TrueMode, fieldName: "TestField"}, + {name: "false mode", mode: FalseMode, fieldName: "TestField"}, + {name: "auto mode", mode: AutoMode, fieldName: "TestField"}, + {name: "empty mode is allowed", mode: TrueFalseAutoMode{}, fieldName: "TestField"}, + { + name: "invalid mode", + mode: TrueFalseAutoMode{Mode: "invalid_value"}, + fieldName: "TestField", + expectedError: "invalid TestField configuration:", + }, + { + name: "numeric mode", + mode: TrueFalseAutoMode{Mode: "123"}, + fieldName: "MyField", + expectedError: "invalid MyField configuration:", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.mode.Validate(tt.fieldName) + if tt.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedError) + } + }) + } +} From d937b0af3dba04a18ee0db7d520f5c1c1d502038 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 11:24:51 +0100 Subject: [PATCH 10/28] feat: coverage --- claimsync/claimsync_test.go | 3 + claimsync/config_test.go | 102 +++ claimsync/storage/storage_test.go | 1195 ++++++++++++++++++++++++++++ claimsync/types/claim_data_test.go | 434 ++++++++++ 4 files changed, 1734 insertions(+) create mode 100644 claimsync/config_test.go create mode 100644 claimsync/storage/storage_test.go create mode 100644 claimsync/types/claim_data_test.go diff --git a/claimsync/claimsync_test.go b/claimsync/claimsync_test.go index ccb063fc9..c0c8af9e9 100644 --- a/claimsync/claimsync_test.go +++ b/claimsync/claimsync_test.go @@ -29,6 +29,9 @@ import ( // 5. Call SetNextRequiredBlock(ctx, 1) to unlock the syncer. // 6. Assert GetLastProcessedBlock returns found=true — the syncer processed the blocks and captured the event. func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode") + } ctx, cancelFn := context.WithCancel(context.Background()) // Setup Docker L1 client, auth := startGeth(t, ctx, cancelFn) diff --git a/claimsync/config_test.go b/claimsync/config_test.go new file mode 100644 index 000000000..e6fdf5307 --- /dev/null +++ b/claimsync/config_test.go @@ -0,0 +1,102 @@ +package claimsync + +import ( + "testing" + + configtypes "github.com/agglayer/aggkit/config/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestConfigEmbedded_Validate(t *testing.T) { + c := ConfigEmbedded{} + require.NoError(t, c.Validate()) +} + +func TestConfigStandalone_Validate(t *testing.T) { + tests := []struct { + name string + config ConfigStandalone + expectedError string + }{ + { + name: "valid config", + config: ConfigStandalone{ + ConfigEmbedded: ConfigEmbedded{}, + BlockFinality: aggkittypes.SafeBlock, + }, + expectedError: "", + }, + { + name: "valid config with AutoStart true", + config: ConfigStandalone{ + BlockFinality: aggkittypes.SafeBlock, + AutoStart: configtypes.TrueMode, + }, + expectedError: "", + }, + { + name: "valid config with AutoStart false", + config: ConfigStandalone{ + BlockFinality: aggkittypes.SafeBlock, + AutoStart: configtypes.FalseMode, + }, + expectedError: "", + }, + { + name: "valid config with AutoStart auto", + config: ConfigStandalone{ + BlockFinality: aggkittypes.SafeBlock, + AutoStart: configtypes.AutoMode, + }, + expectedError: "", + }, + { + name: "valid config with empty AutoStart", + config: ConfigStandalone{ + BlockFinality: aggkittypes.SafeBlock, + AutoStart: configtypes.TrueFalseAutoMode{}, + }, + expectedError: "", + }, + { + name: "invalid BlockFinality", + config: ConfigStandalone{ + BlockFinality: aggkittypes.BlockNumberFinality{ + Block: aggkittypes.Latest, + Offset: 1, // Invalid: LatestBlock cannot have positive offset + }, + }, + expectedError: "invalid BlockFinality configuration:", + }, + { + name: "invalid AutoStart value", + config: ConfigStandalone{ + BlockFinality: aggkittypes.SafeBlock, + AutoStart: configtypes.TrueFalseAutoMode{Mode: "invalid_value"}, + }, + expectedError: "invalid AutoStart configuration:", + }, + { + name: "invalid AutoStart numeric value", + config: ConfigStandalone{ + BlockFinality: aggkittypes.SafeBlock, + AutoStart: configtypes.TrueFalseAutoMode{Mode: "123"}, + }, + expectedError: "invalid AutoStart configuration:", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + + if tt.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedError) + } + }) + } +} diff --git a/claimsync/storage/storage_test.go b/claimsync/storage/storage_test.go new file mode 100644 index 000000000..621f7acbc --- /dev/null +++ b/claimsync/storage/storage_test.go @@ -0,0 +1,1195 @@ +package storage + +import ( + "context" + "database/sql" + "math/big" + "path/filepath" + "reflect" + "regexp" + "strings" + "testing" + "time" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + "github.com/agglayer/aggkit/db" + logger "github.com/agglayer/aggkit/log" + treetypes "github.com/agglayer/aggkit/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +// newTestStorage creates a new test storage. The returned *sql.DB is the SAME +// connection used internally by the storage, so closing it will cause the storage +// to fail on subsequent operations. This is used for the "db error" tests. +func newTestStorage(t *testing.T) (claimsynctypes.ClaimStorager, *sql.DB) { + t.Helper() + + lg := logger.GetDefaultLogger() + dbPath := filepath.Join(t.TempDir(), "test.db") + + // Open the DB first so we have a handle to share + rawDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + // Run migrations manually via NewStandalone on the same path, + // then use New() to share the rawDB connection. + // We use NewStandalone to run migrations, then close that storage and reopen with shared DB. + setupStorage, err := NewStandalone(lg, dbPath, t.Name()+"-setup", 30*time.Second) + require.NoError(t, err) + _ = setupStorage // migrations are done; we don't need this handle + + // Now create storage using the shared rawDB + s, err := New(lg, rawDB, t.Name(), 30*time.Second) + require.NoError(t, err) + + return s, rawDB +} + +// insertBlockAndClaim inserts a block and claim using a transaction. +func insertBlockAndClaim(t *testing.T, ctx context.Context, s claimsynctypes.ClaimStorager, claim claimsynctypes.Claim) { + t.Helper() + + tx, err := s.NewTx(ctx) + require.NoError(t, err) + + err = s.InsertBlock(ctx, tx, claim.BlockNum, common.Hash{}) + require.NoError(t, err) + + err = s.InsertClaim(ctx, tx, claim) + require.NoError(t, err) + + require.NoError(t, tx.Commit()) +} + +func TestInsertAndGetClaim(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + claim := claimsynctypes.Claim{ + BlockNum: 1, + BlockPos: 0, + TxHash: common.HexToHash("0xabc"), + GlobalIndex: new(big.Int).SetUint64(1093), + Amount: big.NewInt(100), + Type: claimsynctypes.ClaimEvent, + } + + insertBlockAndClaim(t, ctx, s, claim) + + got, err := s.GetClaims(ctx, nil, 1, 1) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, claim.GlobalIndex, got[0].GlobalIndex) + require.Equal(t, claim.BlockNum, got[0].BlockNum) + require.Equal(t, claim.Type, got[0].Type) +} + +func TestGetClaimsByGlobalIndex(t *testing.T) { + t.Run("claim not found", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + got, err := s.GetClaimsByGlobalIndex(ctx, nil, big.NewInt(9999)) + require.NoError(t, err) + require.Empty(t, got) + }) + + t.Run("retrieve existing claims", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + bigIndex := new(big.Int).SetUint64(5000) + + claim1 := claimsynctypes.Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: big.NewInt(1000), + Amount: big.NewInt(1), + Type: claimsynctypes.ClaimEvent, + } + insertBlockAndClaim(t, ctx, s, claim1) + + claim2 := claimsynctypes.Claim{ + BlockNum: 2, + BlockPos: 0, + GlobalIndex: new(big.Int).Set(bigIndex), + Amount: big.NewInt(2), + Metadata: []byte("meta2"), + ProofLocalExitRoot: treetypes.Proof{common.HexToHash("0x1a")}, + ProofRollupExitRoot: treetypes.Proof{common.HexToHash("0x1b")}, + MainnetExitRoot: common.HexToHash("0x2a"), + RollupExitRoot: common.HexToHash("0x2b"), + GlobalExitRoot: common.HexToHash("0x2c"), + Type: claimsynctypes.ClaimEvent, + } + insertBlockAndClaim(t, ctx, s, claim2) + + claim3 := claimsynctypes.Claim{ + BlockNum: 3, + BlockPos: 0, + GlobalIndex: new(big.Int).Set(bigIndex), + Amount: big.NewInt(3), + Metadata: []byte("meta3"), + ProofLocalExitRoot: treetypes.Proof{common.HexToHash("0x9a")}, + ProofRollupExitRoot: treetypes.Proof{common.HexToHash("0x9b")}, + MainnetExitRoot: common.HexToHash("0x9c"), + RollupExitRoot: common.HexToHash("0x9d"), + GlobalExitRoot: common.HexToHash("0x9e"), + Type: claimsynctypes.ClaimEvent, + } + insertBlockAndClaim(t, ctx, s, claim3) + + // Query bigIndex: should return compacted claim (oldest meta + newest proofs) + got, err := s.GetClaimsByGlobalIndex(ctx, nil, bigIndex) + require.NoError(t, err) + require.Len(t, got, 1) + + // Oldest block's metadata + require.Equal(t, claim2.BlockNum, got[0].BlockNum) + require.Equal(t, claim2.Metadata, got[0].Metadata) + // Newest block's proofs + require.Equal(t, claim3.ProofLocalExitRoot, got[0].ProofLocalExitRoot) + require.Equal(t, claim3.ProofRollupExitRoot, got[0].ProofRollupExitRoot) + require.Equal(t, claim3.MainnetExitRoot, got[0].MainnetExitRoot) + }) + + t.Run("large global index", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + // 2^128 - 1 + largeIndex := new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 128), big.NewInt(1)) + claim := claimsynctypes.Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: largeIndex, + Amount: big.NewInt(1), + Type: claimsynctypes.ClaimEvent, + } + insertBlockAndClaim(t, ctx, s, claim) + + got, err := s.GetClaimsByGlobalIndex(ctx, nil, largeIndex) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, 0, got[0].GlobalIndex.Cmp(largeIndex)) + }) + + t.Run("zero global index", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + zeroIndex := big.NewInt(0) + claim := claimsynctypes.Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: zeroIndex, + Amount: big.NewInt(1), + Type: claimsynctypes.ClaimEvent, + } + insertBlockAndClaim(t, ctx, s, claim) + + got, err := s.GetClaimsByGlobalIndex(ctx, nil, zeroIndex) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, 0, got[0].GlobalIndex.Cmp(zeroIndex)) + }) + + t.Run("nil global index", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + _, err := s.GetClaimsByGlobalIndex(ctx, nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "globalIndex cannot be nil") + }) + + t.Run("db error", func(t *testing.T) { + s, rawDB := newTestStorage(t) + ctx := context.Background() + + // Close the underlying DB to force an error + require.NoError(t, rawDB.Close()) + + _, err := s.GetClaimsByGlobalIndex(ctx, nil, big.NewInt(1)) + require.Error(t, err) + }) +} + +func TestGetClaims_Compact(t *testing.T) { + // Build a proof with a single distinguishing hash + makeProof := func(h common.Hash) treetypes.Proof { + var p treetypes.Proof + p[0] = h + return p + } + + // Define the claims used across test cases. + // Note: claims[0] and claims[2] are both at block=1 but have different block_pos (0 and 1), + // since the primary key is (block_num, block_pos). + buildClaims := func() ([]claimsynctypes.Claim, claimsynctypes.UnsetClaim) { + claims := []claimsynctypes.Claim{ + { // claims[0]: block=1, pos=0, gi=1 + BlockNum: 1, + BlockPos: 0, + TxHash: common.HexToHash("0x01"), + GlobalIndex: big.NewInt(1), + Metadata: []byte("metadata1"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x10")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x11")), + MainnetExitRoot: common.HexToHash("0x12"), + RollupExitRoot: common.HexToHash("0x13"), + GlobalExitRoot: common.HexToHash("0x14"), + Amount: big.NewInt(1), + Type: claimsynctypes.ClaimEvent, + }, + { // claims[1]: block=2, pos=0, gi=2 + BlockNum: 2, + BlockPos: 0, + TxHash: common.HexToHash("0x02"), + GlobalIndex: big.NewInt(2), + Metadata: []byte("metadata2"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x20")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x21")), + MainnetExitRoot: common.HexToHash("0x22"), + RollupExitRoot: common.HexToHash("0x23"), + GlobalExitRoot: common.HexToHash("0x24"), + Amount: big.NewInt(2), + Type: claimsynctypes.ClaimEvent, + }, + { // claims[2]: block=1, pos=1, gi=100 (oldest for gi=100) + BlockNum: 1, + BlockPos: 1, + TxHash: common.HexToHash("0x03"), + GlobalIndex: big.NewInt(100), + Metadata: []byte("original_metadata"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x1a")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x1b")), + MainnetExitRoot: common.HexToHash("0x1c"), + RollupExitRoot: common.HexToHash("0x1d"), + GlobalExitRoot: common.HexToHash("0x1e"), + Amount: big.NewInt(3), + Type: claimsynctypes.ClaimEvent, + }, + { // claims[3]: block=2, pos=1, gi=100 (middle for gi=100) + BlockNum: 2, + BlockPos: 1, + TxHash: common.HexToHash("0x04"), + GlobalIndex: big.NewInt(100), + Metadata: []byte("middle_metadata"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x2a")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x2b")), + MainnetExitRoot: common.HexToHash("0x2c"), + RollupExitRoot: common.HexToHash("0x2d"), + GlobalExitRoot: common.HexToHash("0x2e"), + Amount: big.NewInt(4), + Type: claimsynctypes.ClaimEvent, + }, + { // claims[4]: block=3, pos=0, gi=100 (newest for gi=100) + BlockNum: 3, + BlockPos: 0, + TxHash: common.HexToHash("0x05"), + GlobalIndex: big.NewInt(100), + Metadata: []byte("newest_metadata"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x3a")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x3b")), + MainnetExitRoot: common.HexToHash("0x3c"), + RollupExitRoot: common.HexToHash("0x3d"), + GlobalExitRoot: common.HexToHash("0x3e"), + DestinationNetwork: 5, + Amount: big.NewInt(5), + Type: claimsynctypes.DetailedClaimEvent, + }, + } + + unsetClaim := claimsynctypes.UnsetClaim{ + BlockNum: 5, + BlockPos: 0, + TxHash: common.HexToHash("0xaa"), + GlobalIndex: big.NewInt(100), + } + + return claims, unsetClaim + } + + insertClaims := func(t *testing.T, s claimsynctypes.ClaimStorager, ctx context.Context, toInsert []claimsynctypes.Claim) { + t.Helper() + // Track inserted blocks to avoid duplicates + insertedBlocks := map[uint64]bool{} + for _, c := range toInsert { + if !insertedBlocks[c.BlockNum] { + err := s.InsertBlock(ctx, nil, c.BlockNum, common.Hash{}) + require.NoError(t, err) + insertedBlocks[c.BlockNum] = true + } + err := s.InsertClaim(ctx, nil, c) + require.NoError(t, err) + } + } + + t.Run("single claim, no compaction", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + claims, _ := buildClaims() + + insertClaims(t, s, ctx, claims[:1]) + + got, err := s.GetClaims(ctx, nil, 1, 1) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, claims[0].GlobalIndex, got[0].GlobalIndex) + require.Equal(t, claims[0].Metadata, got[0].Metadata) + }) + + t.Run("two distinct global indexes, no compaction", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + claims, _ := buildClaims() + + insertClaims(t, s, ctx, claims[:2]) + + got, err := s.GetClaims(ctx, nil, 1, 2) + require.NoError(t, err) + require.Len(t, got, 2) + require.Equal(t, claims[0].GlobalIndex, got[0].GlobalIndex) + require.Equal(t, claims[1].GlobalIndex, got[1].GlobalIndex) + }) + + t.Run("compact three claims with same global index", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + claims, _ := buildClaims() + + // Insert claims[2], claims[3], claims[4] — all gi=100 + insertClaims(t, s, ctx, []claimsynctypes.Claim{claims[2], claims[3], claims[4]}) + + got, err := s.GetClaims(ctx, nil, 1, 3) + require.NoError(t, err) + require.Len(t, got, 1) + + // Oldest metadata (block 1, pos 1 = claims[2]) + require.Equal(t, claims[2].BlockNum, got[0].BlockNum) + require.Equal(t, claims[2].BlockPos, got[0].BlockPos) + require.Equal(t, claims[2].Metadata, got[0].Metadata) + require.Equal(t, claims[2].TxHash, got[0].TxHash) + require.Equal(t, claims[2].Amount, got[0].Amount) + // Type comes from oldest (claims[2] = ClaimEvent) + require.Equal(t, claims[2].Type, got[0].Type) + + // Newest proofs (block 3 = claims[4]) + require.Equal(t, claims[4].ProofLocalExitRoot, got[0].ProofLocalExitRoot) + require.Equal(t, claims[4].ProofRollupExitRoot, got[0].ProofRollupExitRoot) + require.Equal(t, claims[4].MainnetExitRoot, got[0].MainnetExitRoot) + require.Equal(t, claims[4].RollupExitRoot, got[0].RollupExitRoot) + require.Equal(t, claims[4].GlobalExitRoot, got[0].GlobalExitRoot) + // DestinationNetwork comes from oldest (claims[2]), not newest (claims[4]) + require.Equal(t, claims[2].DestinationNetwork, got[0].DestinationNetwork) + }) + + t.Run("no compaction when unset_claim exists", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + claims, unsetClaim := buildClaims() + + // Insert claims[2]+claims[3]+claims[4] and an unset_claim for gi=100 + insertClaims(t, s, ctx, []claimsynctypes.Claim{claims[2], claims[3], claims[4]}) + + // Insert block 5 and then the unset_claim + require.NoError(t, s.InsertBlock(ctx, nil, 5, common.Hash{})) + require.NoError(t, s.InsertUnsetClaim(ctx, nil, unsetClaim)) + + // GetClaims for blocks 1-3: all 3 claims should be returned uncompacted + got, err := s.GetClaims(ctx, nil, 1, 3) + require.NoError(t, err) + require.Len(t, got, 3) + }) + + t.Run("query range excludes some blocks", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + claims, _ := buildClaims() + + // Insert: claims[0](b1,p0,gi=1), claims[2](b1,p1,gi=100), claims[1](b2,p0,gi=2), + // claims[3](b2,p1,gi=100), claims[4](b3,p0,gi=100) + insertClaims(t, s, ctx, []claimsynctypes.Claim{claims[0], claims[2], claims[1], claims[3], claims[4]}) + + // GetClaims for block range [2,2]: + // all_claims_ranked ranks over ALL claims in DB: + // gi=1: claims[0] → rn_oldest_global=1, rn_newest_global=1 + // gi=2: claims[1] → rn_oldest_global=1, rn_newest_global=1 + // gi=100: claims[2](b1) rn_oldest=1, claims[3](b2) rn_oldest=2, claims[4](b3) rn_newest=1 + // claims_in_range [2,2]: claims[1](gi=2), claims[3](gi=100,rn_oldest_global=2,rn_newest=2) + // compactable_claims: WHERE o.rn_oldest_global = 1 — claims[3] has rn_oldest_global=2 → excluded + // claims[1](gi=2) has rn_oldest_global=1 → included, joins with claims[1](rn_newest_global=1 in range) + // claims_with_unset: no unset_claims → empty + // Result: 1 compacted claim: gi=2 (claims[1]) + got, err := s.GetClaims(ctx, nil, 2, 2) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, big.NewInt(2), got[0].GlobalIndex) + }) +} + +func TestGetClaimsByGlobalIndex_Compact(t *testing.T) { + makeProof := func(h common.Hash) treetypes.Proof { + var p treetypes.Proof + p[0] = h + return p + } + + buildClaims := func() ([]claimsynctypes.Claim, claimsynctypes.UnsetClaim) { + claims := []claimsynctypes.Claim{ + { // claims[2]: block=1, gi=100 (oldest) + BlockNum: 1, + BlockPos: 0, + TxHash: common.HexToHash("0x03"), + GlobalIndex: big.NewInt(100), + Metadata: []byte("original_metadata"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x1a")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x1b")), + MainnetExitRoot: common.HexToHash("0x1c"), + RollupExitRoot: common.HexToHash("0x1d"), + GlobalExitRoot: common.HexToHash("0x1e"), + Amount: big.NewInt(3), + Type: claimsynctypes.ClaimEvent, + }, + { // claims[3]: block=2, gi=100 (middle) + BlockNum: 2, + BlockPos: 0, + TxHash: common.HexToHash("0x04"), + GlobalIndex: big.NewInt(100), + Metadata: []byte("middle_metadata"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x2a")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x2b")), + MainnetExitRoot: common.HexToHash("0x2c"), + RollupExitRoot: common.HexToHash("0x2d"), + GlobalExitRoot: common.HexToHash("0x2e"), + Amount: big.NewInt(4), + Type: claimsynctypes.ClaimEvent, + }, + { // claims[4]: block=3, gi=100 (newest) + BlockNum: 3, + BlockPos: 0, + TxHash: common.HexToHash("0x05"), + GlobalIndex: big.NewInt(100), + Metadata: []byte("newest_metadata"), + ProofLocalExitRoot: makeProof(common.HexToHash("0x3a")), + ProofRollupExitRoot: makeProof(common.HexToHash("0x3b")), + MainnetExitRoot: common.HexToHash("0x3c"), + RollupExitRoot: common.HexToHash("0x3d"), + GlobalExitRoot: common.HexToHash("0x3e"), + Amount: big.NewInt(5), + Type: claimsynctypes.DetailedClaimEvent, + }, + } + + unsetClaim := claimsynctypes.UnsetClaim{ + BlockNum: 5, + BlockPos: 0, + TxHash: common.HexToHash("0xaa"), + GlobalIndex: big.NewInt(100), + } + + return claims, unsetClaim + } + + t.Run("no unset_claim -> compacted", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + claims, _ := buildClaims() + + insertedBlocks := map[uint64]bool{} + for _, c := range claims { + if !insertedBlocks[c.BlockNum] { + require.NoError(t, s.InsertBlock(ctx, nil, c.BlockNum, common.Hash{})) + insertedBlocks[c.BlockNum] = true + } + require.NoError(t, s.InsertClaim(ctx, nil, c)) + } + + got, err := s.GetClaimsByGlobalIndex(ctx, nil, big.NewInt(100)) + require.NoError(t, err) + require.Len(t, got, 1) + + // Oldest metadata (block 1) + require.Equal(t, claims[0].Metadata, got[0].Metadata) + require.Equal(t, claims[0].BlockNum, got[0].BlockNum) + // Newest proofs (block 3) + require.Equal(t, claims[2].ProofLocalExitRoot, got[0].ProofLocalExitRoot) + require.Equal(t, claims[2].MainnetExitRoot, got[0].MainnetExitRoot) + }) + + t.Run("with unset_claim -> uncompacted", func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + claims, unsetClaim := buildClaims() + + insertedBlocks := map[uint64]bool{} + for _, c := range claims { + if !insertedBlocks[c.BlockNum] { + require.NoError(t, s.InsertBlock(ctx, nil, c.BlockNum, common.Hash{})) + insertedBlocks[c.BlockNum] = true + } + require.NoError(t, s.InsertClaim(ctx, nil, c)) + } + + require.NoError(t, s.InsertBlock(ctx, nil, 5, common.Hash{})) + require.NoError(t, s.InsertUnsetClaim(ctx, nil, unsetClaim)) + + got, err := s.GetClaimsByGlobalIndex(ctx, nil, big.NewInt(100)) + require.NoError(t, err) + require.Len(t, got, 3) + }) +} + +func TestDatabaseQueryTimeout(t *testing.T) { + lg := logger.GetDefaultLogger() + dbPath := filepath.Join(t.TempDir(), "timeout_test.db") + + // Create storage with normal timeout for setup + s, err := NewStandalone(lg, dbPath, "setup", 100*time.Millisecond) + require.NoError(t, err) + + ctx := context.Background() + require.NoError(t, s.InsertBlock(ctx, nil, 1, common.Hash{})) + + // Create second storage pointing to same dbPath but with 1ns timeout + s2, err := NewStandalone(lg, dbPath, "timeout_storage", time.Nanosecond) + require.NoError(t, err) + + _, _, err = s2.GetLastProcessedBlock(ctx, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "context deadline exceeded") + + _, err = s2.GetClaims(ctx, nil, 1, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "context deadline exceeded") +} + +func TestClaimColumnsSQL_ReflectionCheck(t *testing.T) { + t.Parallel() + + claimType := reflect.TypeFor[claimsynctypes.Claim]() + meddlerColumns := make([]string, 0, claimType.NumField()) + for i := range claimType.NumField() { + field := claimType.Field(i) + tag := field.Tag.Get("meddler") + if tag == "" || tag == "-" { + continue + } + // meddler tag format: "column_name" or "column_name,encoder" + parts := strings.SplitN(tag, ",", 2) + colName := parts[0] + if colName == "" || colName == "-" { + continue + } + meddlerColumns = append(meddlerColumns, colName) + } + + // Normalize whitespace in claimColumnsSQL and split by comma + normalized := regexp.MustCompile(`\s+`).ReplaceAllString(claimColumnsSQL, " ") + normalized = strings.TrimSpace(normalized) + rawCols := strings.Split(normalized, ",") + sqlColumnSet := make(map[string]bool, len(rawCols)) + for _, col := range rawCols { + col = strings.TrimSpace(col) + if col != "" { + sqlColumnSet[col] = true + } + } + + require.Equal(t, len(meddlerColumns), len(sqlColumnSet), + "number of meddler-tagged fields (%d) != number of SQL columns (%d)", + len(meddlerColumns), len(sqlColumnSet)) + + for _, col := range meddlerColumns { + require.True(t, sqlColumnSet[col], + "meddler column %q not found in claimColumnsSQL", col) + } +} + +func TestGetBoundaryBlockForClaimType(t *testing.T) { + testCases := []struct { + name string + claimType claimsynctypes.ClaimType + claimsToInsert []struct { + blockNum uint64 + claimType claimsynctypes.ClaimType + } + expectedBlock uint64 + expectError bool + errorIs error + }{ + { + name: "no claims -> db.ErrNotFound", + claimType: claimsynctypes.DetailedClaimEvent, + claimsToInsert: nil, + expectError: true, + errorIs: db.ErrNotFound, + }, + { + name: "DetailedClaimEvent at blocks 1 and 6 -> returns 6", + claimType: claimsynctypes.DetailedClaimEvent, + claimsToInsert: []struct { + blockNum uint64 + claimType claimsynctypes.ClaimType + }{ + {1, claimsynctypes.DetailedClaimEvent}, + {6, claimsynctypes.DetailedClaimEvent}, + }, + expectedBlock: 6, + }, + { + name: "mixed types, DetailedClaimEvent at blocks 100 and 101 -> returns 101", + claimType: claimsynctypes.DetailedClaimEvent, + claimsToInsert: []struct { + blockNum uint64 + claimType claimsynctypes.ClaimType + }{ + {50, claimsynctypes.ClaimEvent}, + {100, claimsynctypes.DetailedClaimEvent}, + {101, claimsynctypes.DetailedClaimEvent}, + }, + expectedBlock: 101, + }, + { + name: "only ClaimEvent, ask DetailedClaimEvent -> db.ErrNotFound", + claimType: claimsynctypes.DetailedClaimEvent, + claimsToInsert: []struct { + blockNum uint64 + claimType claimsynctypes.ClaimType + }{ + {10, claimsynctypes.ClaimEvent}, + {20, claimsynctypes.ClaimEvent}, + }, + expectError: true, + errorIs: db.ErrNotFound, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + insertedBlocks := map[uint64]bool{} + for i, ci := range tc.claimsToInsert { + if !insertedBlocks[ci.blockNum] { + require.NoError(t, s.InsertBlock(ctx, nil, ci.blockNum, common.Hash{})) + insertedBlocks[ci.blockNum] = true + } + claim := claimsynctypes.Claim{ + BlockNum: ci.blockNum, + BlockPos: uint64(i), + GlobalIndex: big.NewInt(int64(i + 1)), + Amount: big.NewInt(0), + Type: ci.claimType, + } + require.NoError(t, s.InsertClaim(ctx, nil, claim)) + } + + gotBlock, err := s.GetBoundaryBlockForClaimType(ctx, nil, tc.claimType) + if tc.expectError { + require.Error(t, err) + if tc.errorIs != nil { + require.ErrorIs(t, err, tc.errorIs) + } + } else { + require.NoError(t, err) + require.Equal(t, tc.expectedBlock, gotBlock) + } + }) + } +} + +func TestGetClaimsByGER(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + gerHash := common.HexToHash("0xaaaa1111") + otherGER := common.HexToHash("0xbbbb2222") + unknownGER := common.HexToHash("0xcccc3333") + + // Insert blocks + require.NoError(t, s.InsertBlock(ctx, nil, 1, common.Hash{})) + require.NoError(t, s.InsertBlock(ctx, nil, 2, common.Hash{})) + require.NoError(t, s.InsertBlock(ctx, nil, 3, common.Hash{})) + + // detailedClaim: block=1, gi=100, ger=gerHash, type=DetailedClaimEvent + detailedClaim := claimsynctypes.Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: big.NewInt(100), + GlobalExitRoot: gerHash, + Amount: big.NewInt(0), + Type: claimsynctypes.DetailedClaimEvent, + } + require.NoError(t, s.InsertClaim(ctx, nil, detailedClaim)) + + // claimEventSameGER: block=2, gi=200, ger=gerHash, type=ClaimEvent (should NOT be returned) + claimEventSameGER := claimsynctypes.Claim{ + BlockNum: 2, + BlockPos: 0, + GlobalIndex: big.NewInt(200), + GlobalExitRoot: gerHash, + Amount: big.NewInt(0), + Type: claimsynctypes.ClaimEvent, + } + require.NoError(t, s.InsertClaim(ctx, nil, claimEventSameGER)) + + // detailedOtherGER: block=3, gi=300, ger=otherGER, type=DetailedClaimEvent (should NOT be returned) + detailedOtherGER := claimsynctypes.Claim{ + BlockNum: 3, + BlockPos: 0, + GlobalIndex: big.NewInt(300), + GlobalExitRoot: otherGER, + Amount: big.NewInt(0), + Type: claimsynctypes.DetailedClaimEvent, + } + require.NoError(t, s.InsertClaim(ctx, nil, detailedOtherGER)) + + t.Run("returns only DetailedClaimEvent with matching GER", func(t *testing.T) { + got, err := s.GetClaimsByGER(ctx, nil, gerHash) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, big.NewInt(100), got[0].GlobalIndex) + require.Equal(t, claimsynctypes.DetailedClaimEvent, got[0].Type) + }) + + t.Run("returns empty for unknown GER", func(t *testing.T) { + got, err := s.GetClaimsByGER(ctx, nil, unknownGER) + require.NoError(t, err) + require.Empty(t, got) + }) +} + +func TestGetUnsetClaimsPaged(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + unset := []claimsynctypes.UnsetClaim{ + { // unset[0]: block=1, gi=100 + BlockNum: 1, + BlockPos: 0, + TxHash: common.HexToHash("0x123"), + GlobalIndex: big.NewInt(100), + UnsetGlobalIndexHashChain: common.HexToHash("0xabc123"), + }, + { // unset[1]: block=2, gi=200 + BlockNum: 2, + BlockPos: 0, + TxHash: common.HexToHash("0x456"), + GlobalIndex: big.NewInt(200), + UnsetGlobalIndexHashChain: common.HexToHash("0xdef456"), + }, + { // unset[2]: block=3, gi=100 (same gi as first) + BlockNum: 3, + BlockPos: 0, + TxHash: common.HexToHash("0x789"), + GlobalIndex: big.NewInt(100), + UnsetGlobalIndexHashChain: common.HexToHash("0x987654"), + }, + } + + for _, u := range unset { + require.NoError(t, s.InsertBlock(ctx, nil, u.BlockNum, common.Hash{})) + require.NoError(t, s.InsertUnsetClaim(ctx, nil, u)) + } + + testCases := []struct { + name string + pageNumber uint32 + pageSize uint32 + globalIndex *big.Int + expectedCount int + expectedLen int + expectedGIs []*big.Int + expectError bool + errorContains string + }{ + { + name: "all results", + pageNumber: 1, + pageSize: 10, + globalIndex: nil, + expectedCount: 3, + expectedLen: 3, + expectedGIs: []*big.Int{big.NewInt(100), big.NewInt(200), big.NewInt(100)}, + }, + { + name: "page 2 size 1", + pageNumber: 2, + pageSize: 1, + globalIndex: nil, + expectedCount: 3, + expectedLen: 1, + expectedGIs: []*big.Int{big.NewInt(200)}, + }, + { + name: "filter by gi=100", + pageNumber: 1, + pageSize: 10, + globalIndex: big.NewInt(100), + expectedCount: 2, + expectedLen: 2, + expectedGIs: []*big.Int{big.NewInt(100), big.NewInt(100)}, + }, + { + name: "non-existent gi", + pageNumber: 1, + pageSize: 10, + globalIndex: big.NewInt(9999), + expectedCount: 0, + expectedLen: 0, + }, + { + name: "invalid page", + pageNumber: 5, + pageSize: 3, + globalIndex: nil, + expectError: true, + errorContains: "invalid page number for given page size and total number of unset_claim", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got, count, err := s.GetUnsetClaimsPaged(ctx, tc.pageNumber, tc.pageSize, tc.globalIndex) + if tc.expectError { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorContains) + return + } + + require.NoError(t, err) + require.Equal(t, tc.expectedCount, count) + require.Len(t, got, tc.expectedLen) + + // Results are in DESC order (highest block_num first) + for i, u := range got { + if i < len(tc.expectedGIs) { + require.Equal(t, tc.expectedGIs[i], u.GlobalIndex) + } + } + }) + } +} + +func TestGetSetClaimsPaged(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + set := []claimsynctypes.SetClaim{ + { // set[0]: block=1, gi=100 + BlockNum: 1, + BlockPos: 0, + TxHash: common.HexToHash("0x111"), + GlobalIndex: big.NewInt(100), + }, + { // set[1]: block=2, gi=200 + BlockNum: 2, + BlockPos: 0, + TxHash: common.HexToHash("0x222"), + GlobalIndex: big.NewInt(200), + }, + { // set[2]: block=3, gi=100 + BlockNum: 3, + BlockPos: 0, + TxHash: common.HexToHash("0x333"), + GlobalIndex: big.NewInt(100), + }, + { // set[3]: block=4, gi=300 + BlockNum: 4, + BlockPos: 0, + TxHash: common.HexToHash("0x444"), + GlobalIndex: big.NewInt(300), + }, + } + + for _, sc := range set { + require.NoError(t, s.InsertBlock(ctx, nil, sc.BlockNum, common.Hash{})) + require.NoError(t, s.InsertSetClaim(ctx, nil, sc)) + } + + testCases := []struct { + name string + pageNumber uint32 + pageSize uint32 + globalIndex *big.Int + expectedCount int + expectedLen int + expectError bool + errorContains string + }{ + { + name: "all results", + pageNumber: 1, + pageSize: 10, + globalIndex: nil, + expectedCount: 4, + expectedLen: 4, + }, + { + name: "page 2 size 1", + pageNumber: 2, + pageSize: 1, + globalIndex: nil, + expectedCount: 4, + expectedLen: 1, + }, + { + name: "filter by gi=100", + pageNumber: 1, + pageSize: 10, + globalIndex: big.NewInt(100), + expectedCount: 2, + expectedLen: 2, + }, + { + name: "non-existent gi", + pageNumber: 1, + pageSize: 10, + globalIndex: big.NewInt(9999), + expectedCount: 0, + expectedLen: 0, + }, + { + name: "invalid page", + pageNumber: 5, + pageSize: 4, + globalIndex: nil, + expectError: true, + errorContains: "invalid page number for given page size and total number of set_claim", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got, count, err := s.GetSetClaimsPaged(ctx, tc.pageNumber, tc.pageSize, tc.globalIndex) + if tc.expectError { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorContains) + return + } + + require.NoError(t, err) + require.Equal(t, tc.expectedCount, count) + require.Len(t, got, tc.expectedLen) + }) + } + + t.Run("all results descending order", func(t *testing.T) { + t.Parallel() + got, count, err := s.GetSetClaimsPaged(ctx, 1, 10, nil) + require.NoError(t, err) + require.Equal(t, 4, count) + require.Len(t, got, 4) + // DESC order: set[3](block4), set[2](block3), set[1](block2), set[0](block1) + require.Equal(t, set[3].TxHash, got[0].TxHash) + require.Equal(t, set[2].TxHash, got[1].TxHash) + require.Equal(t, set[1].TxHash, got[2].TxHash) + require.Equal(t, set[0].TxHash, got[3].TxHash) + }) +} + +func TestGetClaimsPaged(t *testing.T) { + s, _ := newTestStorage(t) + ctx := context.Background() + + // 2^64 - 1 + uint64Max := new(big.Int).SetUint64(^uint64(0)) + // 18446744073709551617 = 2^64 + 1 + num1 := new(big.Int).Add(new(big.Int).SetUint64(^uint64(0)), big.NewInt(2)) + // 18446744073709551618 = 2^64 + 2 + num2 := new(big.Int).Add(new(big.Int).SetUint64(^uint64(0)), big.NewInt(3)) + // 2^256 - 1 + uint256Max := new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(1)) + + claims := []claimsynctypes.Claim{ + { // claims[0]: block=1, gi=num2, originNetwork=1 + BlockNum: 1, + BlockPos: 0, + GlobalIndex: new(big.Int).Set(num2), + Amount: big.NewInt(1), + OriginNetwork: 1, + Type: claimsynctypes.ClaimEvent, + }, + { // claims[1]: block=2, gi=2, originNetwork=1 + BlockNum: 2, + BlockPos: 0, + GlobalIndex: big.NewInt(2), + Amount: big.NewInt(1), + OriginNetwork: 1, + Type: claimsynctypes.ClaimEvent, + }, + { // claims[2]: block=3, gi=uint64Max, originNetwork=2 + BlockNum: 3, + BlockPos: 0, + GlobalIndex: new(big.Int).Set(uint64Max), + Amount: big.NewInt(1), + OriginNetwork: 2, + Type: claimsynctypes.ClaimEvent, + }, + { // claims[3]: block=4, gi=num1, originNetwork=2 + BlockNum: 4, + BlockPos: 0, + GlobalIndex: new(big.Int).Set(num1), + Amount: big.NewInt(1), + OriginNetwork: 2, + Type: claimsynctypes.ClaimEvent, + }, + { // claims[4]: block=5, gi=5, originNetwork=3 + BlockNum: 5, + BlockPos: 0, + GlobalIndex: big.NewInt(5), + Amount: big.NewInt(1), + OriginNetwork: 3, + Type: claimsynctypes.ClaimEvent, + }, + { // claims[5]: block=6, gi=uint256Max, originNetwork=4 + BlockNum: 6, + BlockPos: 0, + GlobalIndex: new(big.Int).Set(uint256Max), + Amount: big.NewInt(1), + OriginNetwork: 4, + Type: claimsynctypes.ClaimEvent, + }, + } + + // Insert blocks 1-10 and claims + for i := uint64(1); i <= 10; i++ { + require.NoError(t, s.InsertBlock(ctx, nil, i, common.Hash{})) + } + for _, c := range claims { + require.NoError(t, s.InsertClaim(ctx, nil, c)) + } + + testCases := []struct { + name string + pageNumber uint32 + pageSize uint32 + networkIDs []uint32 + globalIndex *big.Int + expectedCount int + expectedLen int + expectedGIs []*big.Int + expectError bool + errorContains string + }{ + { + name: "page 2 size 1", + pageNumber: 2, + pageSize: 1, + networkIDs: nil, + globalIndex: nil, + expectedCount: 6, + expectedLen: 1, + // DESC: claims[5](b6), claims[4](b5), ... + // page 2 size 1 = offset 1 = claims[4] + expectedGIs: []*big.Int{big.NewInt(5)}, + }, + { + name: "all on same page", + pageNumber: 1, + pageSize: 20, + networkIDs: nil, + globalIndex: nil, + expectedCount: 6, + expectedLen: 6, + // DESC order: claims[5](b6), claims[4](b5), claims[3](b4), claims[2](b3), claims[1](b2), claims[0](b1) + expectedGIs: []*big.Int{ + new(big.Int).Set(uint256Max), + big.NewInt(5), + new(big.Int).Set(num1), + new(big.Int).Set(uint64Max), + big.NewInt(2), + new(big.Int).Set(num2), + }, + }, + { + name: "page 2 size 3", + pageNumber: 2, + pageSize: 3, + networkIDs: nil, + globalIndex: nil, + expectedCount: 6, + expectedLen: 3, + // offset=3: claims[2](b3), claims[1](b2), claims[0](b1) + expectedGIs: []*big.Int{ + new(big.Int).Set(uint64Max), + big.NewInt(2), + new(big.Int).Set(num2), + }, + }, + { + name: "invalid page", + pageNumber: 4, + pageSize: 3, + networkIDs: nil, + globalIndex: nil, + expectError: true, + errorContains: "invalid page number for given page size and total number of claims", + }, + { + name: "filter by networkIDs [1,3]", + pageNumber: 1, + pageSize: 3, + networkIDs: []uint32{1, 3}, + globalIndex: nil, + expectedCount: 3, + expectedLen: 3, + // claims[4](b5,net3), claims[1](b2,net1), claims[0](b1,net1) + expectedGIs: []*big.Int{ + big.NewInt(5), + big.NewInt(2), + new(big.Int).Set(num2), + }, + }, + { + name: "filter by gi=5", + pageNumber: 1, + pageSize: 3, + networkIDs: nil, + globalIndex: big.NewInt(5), + expectedCount: 1, + expectedLen: 1, + expectedGIs: []*big.Int{big.NewInt(5)}, + }, + { + name: "filter by networkIDs [2,3,4] and gi=uint64Max", + pageNumber: 1, + pageSize: 3, + networkIDs: []uint32{2, 3, 4}, + globalIndex: new(big.Int).Set(uint64Max), + expectedCount: 1, + expectedLen: 1, + expectedGIs: []*big.Int{new(big.Int).Set(uint64Max)}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got, count, err := s.GetClaimsPaged(ctx, tc.pageNumber, tc.pageSize, tc.networkIDs, tc.globalIndex) + if tc.expectError { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorContains) + return + } + + require.NoError(t, err) + require.Equal(t, tc.expectedCount, count) + require.Len(t, got, tc.expectedLen) + + for i, gi := range tc.expectedGIs { + require.Equal(t, 0, gi.Cmp(got[i].GlobalIndex), + "index %d: expected gi=%s, got %s", i, gi.String(), got[i].GlobalIndex.String()) + } + }) + } +} diff --git a/claimsync/types/claim_data_test.go b/claimsync/types/claim_data_test.go new file mode 100644 index 000000000..f5ad045bf --- /dev/null +++ b/claimsync/types/claim_data_test.go @@ -0,0 +1,434 @@ +package types + +import ( + "fmt" + "math/big" + "testing" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" + treetypes "github.com/agglayer/aggkit/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +func TestDecodePreEtrogCalldata_Valid(t *testing.T) { + bridgeV1ABI, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() + require.NoError(t, err) + + globalIndex := uint32(10) + originNetwork := uint32(5) + originAddress := common.HexToAddress("0x0a0a") + amount := big.NewInt(150) + destinationAddr := common.HexToAddress("0x0b0b") + + proof := treetypes.Proof{} + for i := range treetypes.DefaultHeight { + for j := range common.HashLength { + proof[i] = common.HexToHash(fmt.Sprintf("%x", (j+1)%common.HashLength)) + } + } + + expectedClaim := &Claim{ + GlobalIndex: new(big.Int).SetUint64(uint64(globalIndex)), + MainnetExitRoot: common.HexToHash("0xdead"), + RollupExitRoot: common.HexToHash("0xbeef"), + DestinationNetwork: uint32(6), + Metadata: common.Hex2Bytes("c001"), + ProofLocalExitRoot: proof, + } + expectedClaim.GlobalExitRoot = crypto.Keccak256Hash(expectedClaim.MainnetExitRoot.Bytes(), expectedClaim.RollupExitRoot.Bytes()) + + claimAssetInput, err := bridgeV1ABI.Pack("claimAsset", + expectedClaim.ProofLocalExitRoot, + globalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + originNetwork, + originAddress, + expectedClaim.DestinationNetwork, + destinationAddr, + amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + + claimAssetData, err := bridgeV1ABI.Methods["claimAsset"].Inputs.Unpack(claimAssetInput[4:]) + require.NoError(t, err) + + actualClaim := &Claim{GlobalIndex: new(big.Int).SetUint64(uint64(globalIndex))} + isFound, err := actualClaim.DecodePreEtrogCalldata(claimAssetData) + require.NoError(t, err) + require.True(t, isFound) + require.Equal(t, expectedClaim, actualClaim) +} + +func TestDecodePreEtrogCalldata(t *testing.T) { + var ( + globalIndex = uint32(12345) + mainnetExitRoot = common.HexToHash("0x11") + rollupExitRoot = common.HexToHash("0x22") + metadata = []byte("mock metadata") + destinationNetwork = uint32(1) + invalidTypePlaceholder = "invalidType" + ) + + tests := []struct { + name string + data []any + expectedIsDecoded bool + expectError bool + }{ + { + name: "Valid calldata", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, // Proof + globalIndex, // GlobalIndex + [common.HashLength]byte(mainnetExitRoot.Bytes()), // MainnetExitRoot + [common.HashLength]byte(rollupExitRoot.Bytes()), // RollupExitRoot + uint32(1), // OriginNetwork (not used) + common.Address{}, // OriginTokenAddress (not used) + destinationNetwork, + common.Address{}, // DestinationAddress (not used) + big.NewInt(0), // Amount (not used) + metadata, + }, + expectedIsDecoded: true, + expectError: false, + }, + { + name: "Mismatched GlobalIndex", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + uint32(99999), // Wrong GlobalIndex + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(1), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: false, + }, + { + name: "Invalid GlobalIndex Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + invalidTypePlaceholder, // Invalid GlobalIndex type + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(1), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid Proof Type", + data: []any{ + invalidTypePlaceholder, // Invalid Proof type + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(1), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid MainnetExitRoot Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + invalidTypePlaceholder, // Invalid MainnetExitRoot type + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(1), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid RollupExitRoot Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + invalidTypePlaceholder, // Invalid RollupExitRoot type + uint32(1), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid DestinationNetwork Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(1), + common.Address{}, + invalidTypePlaceholder, // Invalid DestinationNetwork type + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid Metadata Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(1), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + 123, // Invalid metadata type (should be []byte) + }, + expectedIsDecoded: false, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + claim := &Claim{ + GlobalIndex: new(big.Int).SetUint64(uint64(globalIndex)), + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + DestinationNetwork: 0, + Metadata: nil, + } + + match, err := claim.DecodePreEtrogCalldata(tt.data) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.expectedIsDecoded, match) + }) + } +} + +func TestDecodeEtrogCalldata(t *testing.T) { + var ( + globalIndex = big.NewInt(12345) + mainnetExitRoot = common.HexToHash("0x11") + rollupExitRoot = common.HexToHash("0x22") + metadata = []byte("mock metadata") + destinationNetwork = uint32(1) + invalidTypePlaceholder = "invalidType" + ) + + tests := []struct { + name string + data []any + expectedIsDecoded bool + expectError bool + }{ + { + name: "Valid calldata", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, // ProofLocalExitRoot + [treetypes.DefaultHeight][common.HashLength]byte{}, // ProofRollupExitRoot + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), // MainnetExitRoot + [common.HashLength]byte(rollupExitRoot.Bytes()), // RollupExitRoot + uint32(0), // OriginNetwork (not used) + common.Address{}, // OriginAddress (not used) + destinationNetwork, // DestinationNetwork + common.Address{}, // DestinationAddress (not used) + big.NewInt(0), // Amount (not used) + metadata, + }, + expectedIsDecoded: true, + expectError: false, + }, + { + name: "Mismatched GlobalIndex", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + [treetypes.DefaultHeight][common.HashLength]byte{}, + big.NewInt(99999), // Wrong GlobalIndex + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(0), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: false, + }, + { + name: "Invalid GlobalIndex Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + [treetypes.DefaultHeight][common.HashLength]byte{}, + invalidTypePlaceholder, // Invalid GlobalIndex type + mainnetExitRoot.Bytes(), + rollupExitRoot.Bytes(), + uint32(0), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid LocalExitRoot Proof Type", + data: []any{ + invalidTypePlaceholder, // Invalid ProofLocalExitRoot type + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(0), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid RollupExitRoot Proof Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + invalidTypePlaceholder, // Invalid RollupExitRoot proof type + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(0), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid MainnetExitRoot Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + invalidTypePlaceholder, // MainnetExitRoot + [common.HashLength]byte(rollupExitRoot.Bytes()), // RollupExitRoot + uint32(0), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid RollupExitRoot Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), // MainnetExitRoot + invalidTypePlaceholder, // RollupExitRoot + uint32(0), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid DestinationNetwork Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(0), + common.Address{}, + invalidTypePlaceholder, // DestinationNetwork + common.Address{}, + big.NewInt(0), + metadata, + }, + expectedIsDecoded: false, + expectError: true, + }, + { + name: "Invalid Metadata Type", + data: []any{ + [treetypes.DefaultHeight][common.HashLength]byte{}, + [treetypes.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte(mainnetExitRoot.Bytes()), + [common.HashLength]byte(rollupExitRoot.Bytes()), + uint32(0), + common.Address{}, + destinationNetwork, + common.Address{}, + big.NewInt(0), + 123, // Invalid metadata type (should be []byte) + }, + expectedIsDecoded: false, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + claim := &Claim{GlobalIndex: globalIndex} + + isDecoded, err := claim.DecodeEtrogCalldata(tt.data) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.expectedIsDecoded, isDecoded) + }) + } +} From dadffffb01a06aae19a1b0490579c413f801cac6 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 11:57:14 +0100 Subject: [PATCH 11/28] feat: coverage --- .mockery.yaml | 5 + claimsync/claimsync_rpc_test.go | 156 ++++++++++ claimsync/downloader_test.go | 286 ++++++++++++++++++ claimsync/mocks/mock_claim_syncer.go | 269 ++++++++++++++++ .../storage/migrations/migrations_test.go | 234 ++++++++++++++ 5 files changed, 950 insertions(+) create mode 100644 claimsync/claimsync_rpc_test.go create mode 100644 claimsync/mocks/mock_claim_syncer.go create mode 100644 claimsync/storage/migrations/migrations_test.go diff --git a/.mockery.yaml b/.mockery.yaml index 7fa9b1dd7..100c621bb 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -166,6 +166,11 @@ packages: config: dir: "{{ .InterfaceDir }}/mocks" all: true + github.com/agglayer/aggkit/claimsync: + config: + dir: "{{ .InterfaceDir }}/mocks" + interfaces: + ClaimSyncer: github.com/agglayer/aggkit/claimsync/types: config: dir: "{{ .InterfaceDir }}/mocks" diff --git a/claimsync/claimsync_rpc_test.go b/claimsync/claimsync_rpc_test.go new file mode 100644 index 000000000..20cec8b66 --- /dev/null +++ b/claimsync/claimsync_rpc_test.go @@ -0,0 +1,156 @@ +package claimsync + +import ( + "errors" + "math/big" + "testing" + + jRPC "github.com/0xPolygon/cdk-rpc/rpc" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + "github.com/agglayer/aggkit/claimsync/mocks" + logger "github.com/agglayer/aggkit/log" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func newTestRPC(t *testing.T) (*ClaimSyncRPC, *mocks.ClaimSyncer) { + t.Helper() + syncer := mocks.NewClaimSyncer(t) + lg := logger.WithFields("module", "test") + return NewClaimSyncRPC(lg, syncer), syncer +} + +// --- Status --- + +func TestClaimSyncRPC_Status_OK(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(42), true, nil) + + result, rpcErr := rpc.Status() + require.Nil(t, rpcErr) + require.NotNil(t, result) + + status := result.(struct { + Status string `json:"status"` + LastProcessedBlock uint64 `json:"lastProcessedBlock"` + }) + require.Equal(t, "running", status.Status) + require.Equal(t, uint64(42), status.LastProcessedBlock) +} + +func TestClaimSyncRPC_Status_Error(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, errors.New("db error")) + + result, rpcErr := rpc.Status() + require.Nil(t, result) + require.NotNil(t, rpcErr) + require.Equal(t, jRPC.DefaultErrorCode, rpcErr.ErrorCode()) + require.Contains(t, rpcErr.Error(), "getting last processed block") +} + +// --- GetClaims --- + +func TestClaimSyncRPC_GetClaims_OK(t *testing.T) { + rpc, syncer := newTestRPC(t) + expected := []claimsynctypes.Claim{ + {BlockNum: 1, GlobalIndex: big.NewInt(100)}, + {BlockNum: 2, GlobalIndex: big.NewInt(200)}, + } + syncer.EXPECT().GetClaims(mock.Anything, uint64(1), uint64(10)).Return(expected, nil) + + result, rpcErr := rpc.GetClaims(1, 10) + require.Nil(t, rpcErr) + require.Equal(t, expected, result) +} + +func TestClaimSyncRPC_GetClaims_Empty(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().GetClaims(mock.Anything, uint64(1), uint64(10)).Return([]claimsynctypes.Claim{}, nil) + + result, rpcErr := rpc.GetClaims(1, 10) + require.Nil(t, rpcErr) + require.Equal(t, []claimsynctypes.Claim{}, result) +} + +func TestClaimSyncRPC_GetClaims_Error(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().GetClaims(mock.Anything, uint64(1), uint64(10)).Return(nil, errors.New("storage error")) + + result, rpcErr := rpc.GetClaims(1, 10) + require.Nil(t, result) + require.NotNil(t, rpcErr) + require.Equal(t, jRPC.DefaultErrorCode, rpcErr.ErrorCode()) + require.Contains(t, rpcErr.Error(), "ClaimSyncRPC.GetClaims") +} + +// --- GetClaimsByGlobalIndex --- + +func TestClaimSyncRPC_GetClaimsByGlobalIndex_OK(t *testing.T) { + rpc, syncer := newTestRPC(t) + expected := []claimsynctypes.Claim{{BlockNum: 5, GlobalIndex: big.NewInt(123)}} + syncer.EXPECT().GetClaimsByGlobalIndex(mock.Anything, big.NewInt(123)).Return(expected, nil) + + result, rpcErr := rpc.GetClaimsByGlobalIndex("123") + require.Nil(t, rpcErr) + require.Equal(t, expected, result) +} + +func TestClaimSyncRPC_GetClaimsByGlobalIndex_InvalidInput(t *testing.T) { + rpc, _ := newTestRPC(t) + + result, rpcErr := rpc.GetClaimsByGlobalIndex("not-a-number") + require.Nil(t, result) + require.NotNil(t, rpcErr) + require.Equal(t, jRPC.DefaultErrorCode, rpcErr.ErrorCode()) + require.Contains(t, rpcErr.Error(), "invalid global index") +} + +func TestClaimSyncRPC_GetClaimsByGlobalIndex_NotFound(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().GetClaimsByGlobalIndex(mock.Anything, big.NewInt(999)).Return([]claimsynctypes.Claim{}, nil) + + result, rpcErr := rpc.GetClaimsByGlobalIndex("999") + require.Nil(t, result) + require.NotNil(t, rpcErr) + require.Equal(t, jRPC.NotFoundErrorCode, rpcErr.ErrorCode()) + require.Contains(t, rpcErr.Error(), "no claims found") +} + +func TestClaimSyncRPC_GetClaimsByGlobalIndex_Error(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().GetClaimsByGlobalIndex(mock.Anything, big.NewInt(1)).Return(nil, errors.New("db error")) + + result, rpcErr := rpc.GetClaimsByGlobalIndex("1") + require.Nil(t, result) + require.NotNil(t, rpcErr) + require.Equal(t, jRPC.DefaultErrorCode, rpcErr.ErrorCode()) + require.Contains(t, rpcErr.Error(), "ClaimSyncRPC.GetClaimsByGlobalIndex") +} + +// --- SetNextRequiredBlock --- + +func TestClaimSyncRPC_SetNextRequiredBlock_OK(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().SetNextRequiredBlock(mock.Anything, uint64(500)).Return(nil) + + result, rpcErr := rpc.SetNextRequiredBlock(500) + require.Nil(t, rpcErr) + require.NotNil(t, result) + + msg := result.(struct { + Message string `json:"message"` + }) + require.Equal(t, "next required block set to 500", msg.Message) +} + +func TestClaimSyncRPC_SetNextRequiredBlock_Error(t *testing.T) { + rpc, syncer := newTestRPC(t) + syncer.EXPECT().SetNextRequiredBlock(mock.Anything, uint64(500)).Return(errors.New("forbidden")) + + result, rpcErr := rpc.SetNextRequiredBlock(500) + require.Nil(t, result) + require.NotNil(t, rpcErr) + require.Equal(t, jRPC.DefaultErrorCode, rpcErr.ErrorCode()) + require.Contains(t, rpcErr.Error(), "ClaimSyncRPC.SetNextRequiredBlock") +} diff --git a/claimsync/downloader_test.go b/claimsync/downloader_test.go index 0001bfc84..3b6723fd4 100644 --- a/claimsync/downloader_test.go +++ b/claimsync/downloader_test.go @@ -2,12 +2,15 @@ package claimsync import ( "bytes" + "context" + "errors" "fmt" "math/big" "testing" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/agglayerbridgel2" + "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" claimtypemocks "github.com/agglayer/aggkit/claimsync/types/mocks" "github.com/agglayer/aggkit/db" logger "github.com/agglayer/aggkit/log" @@ -435,3 +438,286 @@ func TestBuildAppender(t *testing.T) { func strPtr(s string) *string { return &s } + +// --- BridgeDeployment.String() --- + +func TestBridgeDeploymentString(t *testing.T) { + require.Equal(t, "NonSovereignChain", NonSovereignChain.String()) + require.Equal(t, "SovereignChain", SovereignChain.String()) + require.Equal(t, "Unknown", Unknown.String()) + require.Equal(t, "Unknown", BridgeDeployment(99).String()) +} + +// --- resolveBridgeDeployment --- + +func TestResolveBridgeDeployment(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + ctx := context.Background() + + // ABI-encoded zero-value returns: address(0) and uint32(0) are both 32 zero bytes + validReturn := make([]byte, 32) + revertErr := errors.New("execution reverted") + + tests := []struct { + name string + setupMock func(c *mocks.EthClienter) + expectedKind BridgeDeployment + expectErr bool + }{ + { + name: "SovereignChain: BridgeManager succeeds", + setupMock: func(c *mocks.EthClienter) { + c.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything). + Return(validReturn, nil).Once() + }, + expectedKind: SovereignChain, + }, + { + name: "NonSovereignChain: BridgeManager reverts, LastUpdatedDepositCount succeeds", + setupMock: func(c *mocks.EthClienter) { + c.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything). + Return(nil, revertErr).Once() + c.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything). + Return(validReturn, nil).Once() + }, + expectedKind: NonSovereignChain, + }, + { + name: "Unknown: both calls revert", + setupMock: func(c *mocks.EthClienter) { + c.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything). + Return(nil, revertErr).Once() + c.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything). + Return(nil, revertErr).Once() + }, + expectedKind: Unknown, + }, + { + name: "error: BridgeManager returns unexpected error", + setupMock: func(c *mocks.EthClienter) { + c.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything). + Return(nil, errors.New("connection refused")).Once() + }, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ethClient := mocks.NewEthClienter(t) + tt.setupMock(ethClient) + + deployment, err := resolveBridgeDeployment(ctx, bridgeAddr, ethClient) + if tt.expectErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.expectedKind, deployment.kind) + }) + } +} + +// --- buildClaimEventHandler edge cases --- + +// buildClaimEventLog packs a valid etrog ClaimEvent log for the given globalIndex. +func buildClaimEventLog(t *testing.T, globalIndex *big.Int, txHash common.Hash, blockNum uint64) types.Log { + t.Helper() + agglayerBridgeABI, err := agglayerbridge.AgglayerbridgeMetaData.GetAbi() + require.NoError(t, err) + event, err := agglayerBridgeABI.EventByID(claimEventSignature) + require.NoError(t, err) + data, err := event.Inputs.Pack(globalIndex, uint32(1), common.Address{}, common.Address{}, big.NewInt(10)) + require.NoError(t, err) + return types.Log{ + Topics: []common.Hash{claimEventSignature}, + Data: data, + TxHash: txHash, + BlockNumber: blockNum, + } +} + +func TestBuildClaimEventHandler_BoundarySkip(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + lg := logger.WithFields("module", "test") + txHash := common.HexToHash("0xABCD") + blockNum := uint64(5) + + ethClient := mocks.NewEthClienter(t) + agglayerBridgeContract, err := agglayerbridge.NewAgglayerbridge(bridgeAddr, ethClient) + require.NoError(t, err) + + querier := claimtypemocks.NewClaimQuerier(t) + // Boundary is at block 5 — log is also at block 5, so it should be skipped + querier.EXPECT().GetBoundaryBlockForClaimType(mock.Anything, mock.Anything, DetailedClaimEvent). + Return(blockNum, nil) + + handler := buildClaimEventHandler(t.Context(), agglayerBridgeContract, ethClient, querier, bridgeAddr, true, lg) + + block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: blockNum}} + log := buildClaimEventLog(t, big.NewInt(100), txHash, blockNum) + + err = handler(block, log) + require.NoError(t, err) + require.Empty(t, block.Events, "ClaimEvent should be skipped when at or after DetailedClaimEvent boundary") +} + +func TestBuildClaimEventHandler_SameTxDetailedSkip(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + lg := logger.WithFields("module", "test") + txHash := common.HexToHash("0xABCD") + blockNum := uint64(3) + + ethClient := mocks.NewEthClienter(t) + agglayerBridgeContract, err := agglayerbridge.NewAgglayerbridge(bridgeAddr, ethClient) + require.NoError(t, err) + + querier := claimtypemocks.NewClaimQuerier(t) + querier.EXPECT().GetBoundaryBlockForClaimType(mock.Anything, mock.Anything, DetailedClaimEvent). + Return(uint64(0), db.ErrNotFound) + + handler := buildClaimEventHandler(t.Context(), agglayerBridgeContract, ethClient, querier, bridgeAddr, true, lg) + + // Block already has a DetailedClaimEvent for the same tx + block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: blockNum}} + block.Events = append(block.Events, Event{Claim: &Claim{ + Type: DetailedClaimEvent, + TxHash: txHash, + }}) + + log := buildClaimEventLog(t, big.NewInt(100), txHash, blockNum) + + err = handler(block, log) + require.NoError(t, err) + require.Len(t, block.Events, 1, "ClaimEvent should be skipped; DetailedClaimEvent for same tx already present") + require.Equal(t, DetailedClaimEvent, block.Events[0].(Event).Claim.Type) +} + +// --- buildDetailedClaimEventHandler: removes ClaimEvent for same tx --- + +func TestBuildDetailedClaimEventHandler_RemovesClaimEvent(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + txHash := common.HexToHash("0xDEAD") + + ethClient := mocks.NewEthClienter(t) + agglayerBridgeL2Contract, err := agglayerbridgel2.NewAgglayerbridgel2(bridgeAddr, ethClient) + require.NoError(t, err) + + handler := buildDetailedClaimEventHandler(agglayerBridgeL2Contract) + + l2ABI, err := agglayerbridgel2.Agglayerbridgel2MetaData.GetAbi() + require.NoError(t, err) + + detailedEvent, err := l2ABI.EventByID(detailedClaimEventSignature) + require.NoError(t, err) + + var nonIndexed abi.Arguments + for _, inp := range detailedEvent.Inputs { + if !inp.Indexed { + nonIndexed = append(nonIndexed, inp) + } + } + data, err := nonIndexed.Pack( + [tree.DefaultHeight][common.HashLength]byte{}, + [tree.DefaultHeight][common.HashLength]byte{}, + [common.HashLength]byte{}, + [common.HashLength]byte{}, + uint8(0), + uint32(1), + common.Address{}, + uint32(0), + big.NewInt(50), + []byte{}, + ) + require.NoError(t, err) + + log := types.Log{ + Topics: []common.Hash{ + detailedClaimEventSignature, + common.BigToHash(big.NewInt(42)), // globalIndex (indexed) + common.BytesToHash(common.Address{}.Bytes()), // destinationAddress (indexed) + }, + Data: data, + TxHash: txHash, + } + + // Block already contains a ClaimEvent for the same tx + block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: 1}} + block.Events = append(block.Events, Event{Claim: &Claim{ + Type: ClaimEvent, + TxHash: txHash, + }}) + + err = handler(block, log) + require.NoError(t, err) + require.Len(t, block.Events, 1) + require.Equal(t, DetailedClaimEvent, block.Events[0].(Event).Claim.Type, + "ClaimEvent should be replaced by DetailedClaimEvent for the same tx") +} + +// --- buildClaimEventHandlerPreEtrog --- + +func TestBuildClaimEventHandlerPreEtrog_OK(t *testing.T) { + bridgeAddr := common.HexToAddress("0x10") + lg := logger.WithFields("module", "test") + globalIndex := uint32(77) + txHash := common.HexToHash("0xBEEF") + + // Build pre-etrog ClaimEvent log + preEtrogABI, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() + require.NoError(t, err) + event, err := preEtrogABI.EventByID(claimEventSignaturePreEtrog) + require.NoError(t, err) + data, err := event.Inputs.Pack( + globalIndex, + uint32(1), + common.Address{}, + common.Address{}, + big.NewInt(10), + ) + require.NoError(t, err) + logEntry := types.Log{ + Topics: []common.Hash{claimEventSignaturePreEtrog}, + Data: data, + TxHash: txHash, + } + + // Build valid pre-etrog claimAsset calldata + claimAssetCalldata, err := preEtrogABI.Methods["claimAsset"].Inputs.Pack( + [tree.DefaultHeight][common.HashLength]byte{}, + globalIndex, + [common.HashLength]byte{}, + [common.HashLength]byte{}, + uint32(1), + common.Address{}, + uint32(0), + common.Address{}, + big.NewInt(10), + []byte{}, + ) + require.NoError(t, err) + claimAssetInput := append(append([]byte{}, claimAssetPreEtrogMethodID...), claimAssetCalldata...) + + ethClient := mocks.NewEthClienter(t) + ethClient.EXPECT().Call(mock.Anything, DebugTraceTxEndpoint, mock.Anything, mock.Anything). + Run(func(result any, method string, args ...any) { + arg, ok := result.(*Call) + require.True(t, ok) + *arg = Call{To: bridgeAddr, From: common.HexToAddress("0x01"), Input: claimAssetInput} + }). + Return(nil) + + legacyBridge, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridgeAddr, ethClient) + require.NoError(t, err) + + handler := buildClaimEventHandlerPreEtrog(legacyBridge, ethClient, bridgeAddr, true, lg) + + block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: 1}} + err = handler(block, logEntry) + require.NoError(t, err) + require.Len(t, block.Events, 1) + + claim := block.Events[0].(Event).Claim + require.Equal(t, new(big.Int).SetUint64(uint64(globalIndex)), claim.GlobalIndex) + require.Equal(t, txHash, claim.TxHash) +} diff --git a/claimsync/mocks/mock_claim_syncer.go b/claimsync/mocks/mock_claim_syncer.go new file mode 100644 index 000000000..678f90819 --- /dev/null +++ b/claimsync/mocks/mock_claim_syncer.go @@ -0,0 +1,269 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/claimsync/types" +) + +// ClaimSyncer is an autogenerated mock type for the ClaimSyncer type +type ClaimSyncer struct { + mock.Mock +} + +type ClaimSyncer_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimSyncer) EXPECT() *ClaimSyncer_Expecter { + return &ClaimSyncer_Expecter{mock: &_m.Mock} +} + +// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *ClaimSyncer) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]types.Claim, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetClaims") + } + + var r0 []types.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]types.Claim, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []types.Claim); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimSyncer_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' +type ClaimSyncer_GetClaims_Call struct { + *mock.Call +} + +// GetClaims is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *ClaimSyncer_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *ClaimSyncer_GetClaims_Call { + return &ClaimSyncer_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} +} + +func (_c *ClaimSyncer_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *ClaimSyncer_GetClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *ClaimSyncer_GetClaims_Call) Return(_a0 []types.Claim, _a1 error) *ClaimSyncer_GetClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimSyncer_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]types.Claim, error)) *ClaimSyncer_GetClaims_Call { + _c.Call.Return(run) + return _c +} + +// GetClaimsByGlobalIndex provides a mock function with given fields: ctx, globalIndex +func (_m *ClaimSyncer) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]types.Claim, error) { + ret := _m.Called(ctx, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaimsByGlobalIndex") + } + + var r0 []types.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) ([]types.Claim, error)); ok { + return rf(ctx, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) []types.Claim); ok { + r0 = rf(ctx, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimSyncer_GetClaimsByGlobalIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimsByGlobalIndex' +type ClaimSyncer_GetClaimsByGlobalIndex_Call struct { + *mock.Call +} + +// GetClaimsByGlobalIndex is a helper method to define mock.On call +// - ctx context.Context +// - globalIndex *big.Int +func (_e *ClaimSyncer_Expecter) GetClaimsByGlobalIndex(ctx interface{}, globalIndex interface{}) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + return &ClaimSyncer_GetClaimsByGlobalIndex_Call{Call: _e.mock.On("GetClaimsByGlobalIndex", ctx, globalIndex)} +} + +func (_c *ClaimSyncer_GetClaimsByGlobalIndex_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *ClaimSyncer_GetClaimsByGlobalIndex_Call) Return(_a0 []types.Claim, _a1 error) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimSyncer_GetClaimsByGlobalIndex_Call) RunAndReturn(run func(context.Context, *big.Int) ([]types.Claim, error)) *ClaimSyncer_GetClaimsByGlobalIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastProcessedBlock provides a mock function with given fields: ctx +func (_m *ClaimSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlock") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) bool); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimSyncer_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type ClaimSyncer_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *ClaimSyncer_Expecter) GetLastProcessedBlock(ctx interface{}) *ClaimSyncer_GetLastProcessedBlock_Call { + return &ClaimSyncer_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} +} + +func (_c *ClaimSyncer_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *ClaimSyncer_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ClaimSyncer_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ClaimSyncer_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimSyncer_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, bool, error)) *ClaimSyncer_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// SetNextRequiredBlock provides a mock function with given fields: ctx, blockNum +func (_m *ClaimSyncer) SetNextRequiredBlock(ctx context.Context, blockNum uint64) error { + ret := _m.Called(ctx, blockNum) + + if len(ret) == 0 { + panic("no return value specified for SetNextRequiredBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, blockNum) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimSyncer_SetNextRequiredBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetNextRequiredBlock' +type ClaimSyncer_SetNextRequiredBlock_Call struct { + *mock.Call +} + +// SetNextRequiredBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockNum uint64 +func (_e *ClaimSyncer_Expecter) SetNextRequiredBlock(ctx interface{}, blockNum interface{}) *ClaimSyncer_SetNextRequiredBlock_Call { + return &ClaimSyncer_SetNextRequiredBlock_Call{Call: _e.mock.On("SetNextRequiredBlock", ctx, blockNum)} +} + +func (_c *ClaimSyncer_SetNextRequiredBlock_Call) Run(run func(ctx context.Context, blockNum uint64)) *ClaimSyncer_SetNextRequiredBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ClaimSyncer_SetNextRequiredBlock_Call) Return(_a0 error) *ClaimSyncer_SetNextRequiredBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimSyncer_SetNextRequiredBlock_Call) RunAndReturn(run func(context.Context, uint64) error) *ClaimSyncer_SetNextRequiredBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimSyncer creates a new instance of ClaimSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimSyncer(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimSyncer { + mock := &ClaimSyncer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/claimsync/storage/migrations/migrations_test.go b/claimsync/storage/migrations/migrations_test.go new file mode 100644 index 000000000..b7e29779a --- /dev/null +++ b/claimsync/storage/migrations/migrations_test.go @@ -0,0 +1,234 @@ +package migrations + +import ( + "context" + "database/sql" + "path/filepath" + "testing" + + "github.com/agglayer/aggkit/db" + logger "github.com/agglayer/aggkit/log" + "github.com/stretchr/testify/require" +) + +func setupTestDB(t *testing.T) *sql.DB { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), t.Name()+".sqlite") + lg := logger.WithFields("module", "test") + + database, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + err = RunMigrations(lg, database) + require.NoError(t, err) + + t.Cleanup(func() { database.Close() }) + return database +} + +func TestMigration0001_TablesExist(t *testing.T) { + database := setupTestDB(t) + ctx := context.Background() + + tx, err := database.BeginTx(ctx, nil) + require.NoError(t, err) + + // Insert into all four tables created by claimsync0001 + _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES (1, '0xBLOCK1')`) + require.NoError(t, err) + + _, err = tx.Exec(` + INSERT INTO claim ( + block_num, block_pos, global_index, + origin_network, origin_address, destination_address, + amount, proof_local_exit_root, proof_rollup_exit_root, + mainnet_exit_root, rollup_exit_root, global_exit_root, + destination_network, metadata, is_message, tx_hash, block_timestamp, type + ) VALUES (1, 0, '100', 1, '0xORIGIN', '0xDEST', '50', + '0x00', '0x00', '0x01', '0x02', '0x03', + 2, NULL, FALSE, '0xTXHASH', 1000, 'ClaimEvent') + `) + require.NoError(t, err) + + _, err = tx.Exec(` + INSERT INTO unset_claim ( + block_num, block_pos, tx_hash, global_index, unset_global_index_hash_chain + ) VALUES (1, 1, '0xTXU', '200', '0xCHAIN') + `) + require.NoError(t, err) + + _, err = tx.Exec(` + INSERT INTO set_claim ( + block_num, block_pos, tx_hash, global_index + ) VALUES (1, 2, '0xTXS', '300') + `) + require.NoError(t, err) + + require.NoError(t, tx.Commit()) + + // Verify all four tables have the expected row counts + var count int + require.NoError(t, database.QueryRowContext(ctx, `SELECT COUNT(*) FROM block`).Scan(&count)) + require.Equal(t, 1, count) + + require.NoError(t, database.QueryRowContext(ctx, `SELECT COUNT(*) FROM claim`).Scan(&count)) + require.Equal(t, 1, count) + + require.NoError(t, database.QueryRowContext(ctx, `SELECT COUNT(*) FROM unset_claim`).Scan(&count)) + require.Equal(t, 1, count) + + require.NoError(t, database.QueryRowContext(ctx, `SELECT COUNT(*) FROM set_claim`).Scan(&count)) + require.Equal(t, 1, count) +} + +func TestMigration0001_ForeignKeyConstraint(t *testing.T) { + database := setupTestDB(t) + ctx := context.Background() + + // Inserting a claim without a matching block should fail + _, err := database.ExecContext(ctx, ` + INSERT INTO claim ( + block_num, block_pos, global_index, + origin_network, origin_address, destination_address, + amount, proof_local_exit_root, proof_rollup_exit_root, + mainnet_exit_root, rollup_exit_root, global_exit_root, + destination_network, metadata, is_message, tx_hash, block_timestamp, type + ) VALUES (999, 0, '1', 1, '0x0', '0x0', '0', '0x0', '0x0', '0x0', '0x0', '0x0', + 1, NULL, FALSE, '0x0', 0, 'ClaimEvent') + `) + require.Error(t, err, "inserting claim with non-existent block_num should fail") +} + +func TestMigration0001_PrimaryKeyConstraint(t *testing.T) { + database := setupTestDB(t) + ctx := context.Background() + + tx, err := database.BeginTx(ctx, nil) + require.NoError(t, err) + + _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES (1, '0xBLOCK1')`) + require.NoError(t, err) + + insertClaim := ` + INSERT INTO claim ( + block_num, block_pos, global_index, + origin_network, origin_address, destination_address, + amount, proof_local_exit_root, proof_rollup_exit_root, + mainnet_exit_root, rollup_exit_root, global_exit_root, + destination_network, metadata, is_message, tx_hash, block_timestamp, type + ) VALUES (1, 0, '1', 1, '0x0', '0x0', '0', '0x0', '0x0', '0x0', '0x0', '0x0', + 1, NULL, FALSE, '0x0', 0, 'ClaimEvent')` + + _, err = tx.Exec(insertClaim) + require.NoError(t, err) + + // Inserting the same (block_num, block_pos) again should fail + _, err = tx.Exec(insertClaim) + require.Error(t, err, "duplicate primary key (block_num, block_pos) in claim should fail") + + require.NoError(t, tx.Rollback()) +} + +func TestMigration0001_CascadeDelete(t *testing.T) { + database := setupTestDB(t) + ctx := context.Background() + + tx, err := database.BeginTx(ctx, nil) + require.NoError(t, err) + + _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES (1, '0xBLOCK1')`) + require.NoError(t, err) + + _, err = tx.Exec(` + INSERT INTO claim ( + block_num, block_pos, global_index, + origin_network, origin_address, destination_address, + amount, proof_local_exit_root, proof_rollup_exit_root, + mainnet_exit_root, rollup_exit_root, global_exit_root, + destination_network, metadata, is_message, tx_hash, block_timestamp, type + ) VALUES (1, 0, '100', 1, '0x0', '0x0', '0', '0x0', '0x0', '0x0', '0x0', '0x0', + 1, NULL, FALSE, '0x0', 0, 'ClaimEvent') + `) + require.NoError(t, err) + + _, err = tx.Exec(` + INSERT INTO unset_claim (block_num, block_pos, tx_hash, global_index, unset_global_index_hash_chain) + VALUES (1, 1, '0xTXU', '200', '0xCHAIN') + `) + require.NoError(t, err) + + _, err = tx.Exec(` + INSERT INTO set_claim (block_num, block_pos, tx_hash, global_index) + VALUES (1, 2, '0xTXS', '300') + `) + require.NoError(t, err) + + require.NoError(t, tx.Commit()) + + // Delete the block — claims, unset_claims and set_claims should cascade + _, err = database.ExecContext(ctx, `DELETE FROM block WHERE num = 1`) + require.NoError(t, err) + + var count int + require.NoError(t, database.QueryRowContext(ctx, `SELECT COUNT(*) FROM claim`).Scan(&count)) + require.Equal(t, 0, count, "claims should be deleted on block cascade") + + require.NoError(t, database.QueryRowContext(ctx, `SELECT COUNT(*) FROM unset_claim`).Scan(&count)) + require.Equal(t, 0, count, "unset_claims should be deleted on block cascade") + + require.NoError(t, database.QueryRowContext(ctx, `SELECT COUNT(*) FROM set_claim`).Scan(&count)) + require.Equal(t, 0, count, "set_claims should be deleted on block cascade") +} + +func TestMigration0001_IndexExists(t *testing.T) { + database := setupTestDB(t) + ctx := context.Background() + + // SQLite stores index info in sqlite_master + var name string + err := database.QueryRowContext(ctx, + `SELECT name FROM sqlite_master WHERE type='index' AND name='idx_claim_type_block'`, + ).Scan(&name) + require.NoError(t, err) + require.Equal(t, "idx_claim_type_block", name) +} + +func TestMigration0001_ClaimDefaultType(t *testing.T) { + database := setupTestDB(t) + ctx := context.Background() + + tx, err := database.BeginTx(ctx, nil) + require.NoError(t, err) + + _, err = tx.Exec(`INSERT INTO block (num, hash) VALUES (1, '0xBLOCK1')`) + require.NoError(t, err) + + // Insert a claim without specifying 'type' — should default to '' + _, err = tx.Exec(` + INSERT INTO claim ( + block_num, block_pos, global_index, + origin_network, origin_address, destination_address, + amount, destination_network + ) VALUES (1, 0, '1', 0, '0x0', '0x0', '0', 0) + `) + require.NoError(t, err) + require.NoError(t, tx.Commit()) + + var claimType string + require.NoError(t, database.QueryRowContext(ctx, `SELECT type FROM claim WHERE block_num = 1`).Scan(&claimType)) + require.Equal(t, "", claimType, "default type should be empty string") +} + +func TestMigration0001_Idempotent(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "idempotent.sqlite") + lg := logger.WithFields("module", "test") + + database, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + defer database.Close() + + // Run migrations twice — should not error + require.NoError(t, RunMigrations(lg, database)) + require.NoError(t, RunMigrations(lg, database)) +} From 01947a3bfe396ba30338bbd6d3e791baffd8dba0 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 12:47:51 +0100 Subject: [PATCH 12/28] feat: coverage --- claimsync/claimsync_rpc_test.go | 8 +++--- claimsync/downloader_test.go | 14 +++++++--- claimsync/storage/storage_test.go | 42 +++++++++++++++++------------- claimsync/types/claim_data_test.go | 10 +++---- 4 files changed, 44 insertions(+), 30 deletions(-) diff --git a/claimsync/claimsync_rpc_test.go b/claimsync/claimsync_rpc_test.go index 20cec8b66..d51d0e63a 100644 --- a/claimsync/claimsync_rpc_test.go +++ b/claimsync/claimsync_rpc_test.go @@ -6,8 +6,8 @@ import ( "testing" jRPC "github.com/0xPolygon/cdk-rpc/rpc" - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/claimsync/mocks" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" logger "github.com/agglayer/aggkit/log" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -30,10 +30,11 @@ func TestClaimSyncRPC_Status_OK(t *testing.T) { require.Nil(t, rpcErr) require.NotNil(t, result) - status := result.(struct { + status, ok := result.(struct { Status string `json:"status"` LastProcessedBlock uint64 `json:"lastProcessedBlock"` }) + require.True(t, ok) require.Equal(t, "running", status.Status) require.Equal(t, uint64(42), status.LastProcessedBlock) } @@ -138,9 +139,10 @@ func TestClaimSyncRPC_SetNextRequiredBlock_OK(t *testing.T) { require.Nil(t, rpcErr) require.NotNil(t, result) - msg := result.(struct { + msg, ok := result.(struct { Message string `json:"message"` }) + require.True(t, ok) require.Equal(t, "next required block set to 500", msg.Message) } diff --git a/claimsync/downloader_test.go b/claimsync/downloader_test.go index 3b6723fd4..998d57a2d 100644 --- a/claimsync/downloader_test.go +++ b/claimsync/downloader_test.go @@ -590,7 +590,9 @@ func TestBuildClaimEventHandler_SameTxDetailedSkip(t *testing.T) { err = handler(block, log) require.NoError(t, err) require.Len(t, block.Events, 1, "ClaimEvent should be skipped; DetailedClaimEvent for same tx already present") - require.Equal(t, DetailedClaimEvent, block.Events[0].(Event).Claim.Type) + event, ok := block.Events[0].(Event) + require.True(t, ok) + require.Equal(t, DetailedClaimEvent, event.Claim.Type) } // --- buildDetailedClaimEventHandler: removes ClaimEvent for same tx --- @@ -634,7 +636,7 @@ func TestBuildDetailedClaimEventHandler_RemovesClaimEvent(t *testing.T) { log := types.Log{ Topics: []common.Hash{ detailedClaimEventSignature, - common.BigToHash(big.NewInt(42)), // globalIndex (indexed) + common.BigToHash(big.NewInt(42)), // globalIndex (indexed) common.BytesToHash(common.Address{}.Bytes()), // destinationAddress (indexed) }, Data: data, @@ -651,7 +653,9 @@ func TestBuildDetailedClaimEventHandler_RemovesClaimEvent(t *testing.T) { err = handler(block, log) require.NoError(t, err) require.Len(t, block.Events, 1) - require.Equal(t, DetailedClaimEvent, block.Events[0].(Event).Claim.Type, + ev, ok := block.Events[0].(Event) + require.True(t, ok) + require.Equal(t, DetailedClaimEvent, ev.Claim.Type, "ClaimEvent should be replaced by DetailedClaimEvent for the same tx") } @@ -717,7 +721,9 @@ func TestBuildClaimEventHandlerPreEtrog_OK(t *testing.T) { require.NoError(t, err) require.Len(t, block.Events, 1) - claim := block.Events[0].(Event).Claim + ev, ok := block.Events[0].(Event) + require.True(t, ok) + claim := ev.Claim require.Equal(t, new(big.Int).SetUint64(uint64(globalIndex)), claim.GlobalIndex) require.Equal(t, txHash, claim.TxHash) } diff --git a/claimsync/storage/storage_test.go b/claimsync/storage/storage_test.go index 621f7acbc..2632ee831 100644 --- a/claimsync/storage/storage_test.go +++ b/claimsync/storage/storage_test.go @@ -610,8 +610,8 @@ func TestClaimColumnsSQL_ReflectionCheck(t *testing.T) { func TestGetBoundaryBlockForClaimType(t *testing.T) { testCases := []struct { - name string - claimType claimsynctypes.ClaimType + name string + claimType claimsynctypes.ClaimType claimsToInsert []struct { blockNum uint64 claimType claimsynctypes.ClaimType @@ -621,11 +621,11 @@ func TestGetBoundaryBlockForClaimType(t *testing.T) { errorIs error }{ { - name: "no claims -> db.ErrNotFound", - claimType: claimsynctypes.DetailedClaimEvent, + name: "no claims -> db.ErrNotFound", + claimType: claimsynctypes.DetailedClaimEvent, claimsToInsert: nil, - expectError: true, - errorIs: db.ErrNotFound, + expectError: true, + errorIs: db.ErrNotFound, }, { name: "DetailedClaimEvent at blocks 1 and 6 -> returns 6", @@ -764,29 +764,31 @@ func TestGetClaimsByGER(t *testing.T) { } func TestGetUnsetClaimsPaged(t *testing.T) { + t.Parallel() + s, _ := newTestStorage(t) ctx := context.Background() unset := []claimsynctypes.UnsetClaim{ { // unset[0]: block=1, gi=100 - BlockNum: 1, - BlockPos: 0, - TxHash: common.HexToHash("0x123"), - GlobalIndex: big.NewInt(100), + BlockNum: 1, + BlockPos: 0, + TxHash: common.HexToHash("0x123"), + GlobalIndex: big.NewInt(100), UnsetGlobalIndexHashChain: common.HexToHash("0xabc123"), }, { // unset[1]: block=2, gi=200 - BlockNum: 2, - BlockPos: 0, - TxHash: common.HexToHash("0x456"), - GlobalIndex: big.NewInt(200), + BlockNum: 2, + BlockPos: 0, + TxHash: common.HexToHash("0x456"), + GlobalIndex: big.NewInt(200), UnsetGlobalIndexHashChain: common.HexToHash("0xdef456"), }, { // unset[2]: block=3, gi=100 (same gi as first) - BlockNum: 3, - BlockPos: 0, - TxHash: common.HexToHash("0x789"), - GlobalIndex: big.NewInt(100), + BlockNum: 3, + BlockPos: 0, + TxHash: common.HexToHash("0x789"), + GlobalIndex: big.NewInt(100), UnsetGlobalIndexHashChain: common.HexToHash("0x987654"), }, } @@ -878,6 +880,8 @@ func TestGetUnsetClaimsPaged(t *testing.T) { } func TestGetSetClaimsPaged(t *testing.T) { + t.Parallel() + s, _ := newTestStorage(t) ctx := context.Background() @@ -997,6 +1001,8 @@ func TestGetSetClaimsPaged(t *testing.T) { } func TestGetClaimsPaged(t *testing.T) { + t.Parallel() + s, _ := newTestStorage(t) ctx := context.Background() diff --git a/claimsync/types/claim_data_test.go b/claimsync/types/claim_data_test.go index f5ad045bf..f5f423864 100644 --- a/claimsync/types/claim_data_test.go +++ b/claimsync/types/claim_data_test.go @@ -82,10 +82,10 @@ func TestDecodePreEtrogCalldata(t *testing.T) { { name: "Valid calldata", data: []any{ - [treetypes.DefaultHeight][common.HashLength]byte{}, // Proof - globalIndex, // GlobalIndex - [common.HashLength]byte(mainnetExitRoot.Bytes()), // MainnetExitRoot - [common.HashLength]byte(rollupExitRoot.Bytes()), // RollupExitRoot + [treetypes.DefaultHeight][common.HashLength]byte{}, // Proof + globalIndex, // GlobalIndex + [common.HashLength]byte(mainnetExitRoot.Bytes()), // MainnetExitRoot + [common.HashLength]byte(rollupExitRoot.Bytes()), // RollupExitRoot uint32(1), // OriginNetwork (not used) common.Address{}, // OriginTokenAddress (not used) destinationNetwork, @@ -350,7 +350,7 @@ func TestDecodeEtrogCalldata(t *testing.T) { [treetypes.DefaultHeight][common.HashLength]byte{}, [treetypes.DefaultHeight][common.HashLength]byte{}, globalIndex, - invalidTypePlaceholder, // MainnetExitRoot + invalidTypePlaceholder, // MainnetExitRoot [common.HashLength]byte(rollupExitRoot.Bytes()), // RollupExitRoot uint32(0), common.Address{}, From 77bb924bafdec6b4c2aec55711a0390e5513b6e9 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 13:23:41 +0100 Subject: [PATCH 13/28] feat: coverage --- claimsync/claimsync_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/claimsync/claimsync_test.go b/claimsync/claimsync_test.go index c0c8af9e9..ccb063fc9 100644 --- a/claimsync/claimsync_test.go +++ b/claimsync/claimsync_test.go @@ -29,9 +29,6 @@ import ( // 5. Call SetNextRequiredBlock(ctx, 1) to unlock the syncer. // 6. Assert GetLastProcessedBlock returns found=true — the syncer processed the blocks and captured the event. func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test in short mode") - } ctx, cancelFn := context.WithCancel(context.Background()) // Setup Docker L1 client, auth := startGeth(t, ctx, cancelFn) From d02662d099d18e470d26963140dfb87dc9e95f1a Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:08:10 +0100 Subject: [PATCH 14/28] feat: coverage --- claimsync/embedded_test.go | 245 +++++++++++++++++++++++++++++++++++++ sync/evmdriver_test.go | 182 +++++++++++++++++++++++++++ 2 files changed, 427 insertions(+) create mode 100644 claimsync/embedded_test.go diff --git a/claimsync/embedded_test.go b/claimsync/embedded_test.go new file mode 100644 index 000000000..4bab56bec --- /dev/null +++ b/claimsync/embedded_test.go @@ -0,0 +1,245 @@ +package claimsync + +import ( + "errors" + "math/big" + "testing" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + claimstoragemocks "github.com/agglayer/aggkit/claimsync/types/mocks" + "github.com/agglayer/aggkit/db" + dbmocks "github.com/agglayer/aggkit/db/mocks" + logger "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var errEmbeddedUnittest = errors.New("embedded unittest error") + +func newTestEmbeddedProcessor(t *testing.T) (*claimEmbeddedProcessor, *claimstoragemocks.ClaimStorager) { + t.Helper() + storageMock := claimstoragemocks.NewClaimStorager(t) + lg := logger.WithFields("module", "test") + return newEmbeddedProcessor(lg, storageMock), storageMock +} + +// --- Event.String --- + +func TestEvent_String_AllNil(t *testing.T) { + t.Parallel() + e := Event{} + require.Equal(t, "claimsync.Event{}", e.String()) +} + +func TestEvent_String_ClaimOnly(t *testing.T) { + t.Parallel() + e := Event{Claim: &Claim{BlockNum: 1, GlobalIndex: big.NewInt(10)}} + s := e.String() + require.Contains(t, s, "claimsync.Event{") + require.Contains(t, s, "Claim{") +} + +func TestEvent_String_UnsetClaimOnly(t *testing.T) { + t.Parallel() + e := Event{UnsetClaim: &UnsetClaim{BlockNum: 2, GlobalIndex: big.NewInt(20)}} + s := e.String() + require.Contains(t, s, "claimsync.Event{") + require.Contains(t, s, "UnsetClaim{") +} + +func TestEvent_String_SetClaimOnly(t *testing.T) { + t.Parallel() + e := Event{SetClaim: &SetClaim{BlockNum: 3, GlobalIndex: big.NewInt(30)}} + s := e.String() + require.Contains(t, s, "claimsync.Event{") + require.Contains(t, s, "SetClaim{") +} + +func TestEvent_String_AllThree(t *testing.T) { + t.Parallel() + e := Event{ + Claim: &Claim{BlockNum: 1, GlobalIndex: big.NewInt(10)}, + UnsetClaim: &UnsetClaim{BlockNum: 1, GlobalIndex: big.NewInt(20)}, + SetClaim: &SetClaim{BlockNum: 1, GlobalIndex: big.NewInt(30)}, + } + s := e.String() + require.Contains(t, s, "Claim{") + require.Contains(t, s, "UnsetClaim{") + require.Contains(t, s, "SetClaim{") +} + +// --- ProcessBlockWithTx --- + +func TestProcessBlockWithTx_WrongEventType(t *testing.T) { + t.Parallel() + proc, _ := newTestEmbeddedProcessor(t) + block := sync.Block{Num: 1} + + err := proc.ProcessBlockWithTx(t.Context(), nil, block, "not-an-event") + require.Error(t, err) + require.Contains(t, err.Error(), "unexpected event type") +} + +func TestProcessBlockWithTx_EmptyEvent(t *testing.T) { + t.Parallel() + proc, _ := newTestEmbeddedProcessor(t) + block := sync.Block{Num: 1} + + err := proc.ProcessBlockWithTx(t.Context(), nil, block, Event{}) + require.NoError(t, err) +} + +func TestProcessBlockWithTx_ClaimOnly(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + claim := Claim{BlockNum: 5, TxHash: common.HexToHash("0x1"), GlobalIndex: big.NewInt(100)} + block := sync.Block{Num: 5} + + storageMock.EXPECT().InsertClaim(mock.Anything, tx, claim).Return(nil) + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{Claim: &claim}) + require.NoError(t, err) +} + +func TestProcessBlockWithTx_UnsetClaimOnly(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + unset := UnsetClaim{BlockNum: 5, TxHash: common.HexToHash("0x2"), GlobalIndex: big.NewInt(200)} + block := sync.Block{Num: 5} + + storageMock.EXPECT().InsertUnsetClaim(mock.Anything, tx, unset).Return(nil) + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{UnsetClaim: &unset}) + require.NoError(t, err) +} + +func TestProcessBlockWithTx_SetClaimOnly(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + set := SetClaim{BlockNum: 5, TxHash: common.HexToHash("0x3"), GlobalIndex: big.NewInt(300)} + block := sync.Block{Num: 5} + + storageMock.EXPECT().InsertSetClaim(mock.Anything, tx, set).Return(nil) + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{SetClaim: &set}) + require.NoError(t, err) +} + +func TestProcessBlockWithTx_AllThreeEvents(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + claim := Claim{BlockNum: 7, TxHash: common.HexToHash("0xA"), GlobalIndex: big.NewInt(1)} + unset := UnsetClaim{BlockNum: 7, TxHash: common.HexToHash("0xB"), GlobalIndex: big.NewInt(2)} + set := SetClaim{BlockNum: 7, TxHash: common.HexToHash("0xC"), GlobalIndex: big.NewInt(3)} + block := sync.Block{Num: 7} + + storageMock.EXPECT().InsertClaim(mock.Anything, tx, claim).Return(nil) + storageMock.EXPECT().InsertUnsetClaim(mock.Anything, tx, unset).Return(nil) + storageMock.EXPECT().InsertSetClaim(mock.Anything, tx, set).Return(nil) + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{ + Claim: &claim, UnsetClaim: &unset, SetClaim: &set, + }) + require.NoError(t, err) +} + +func TestProcessBlockWithTx_InsertClaimError(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + claim := Claim{BlockNum: 1, GlobalIndex: big.NewInt(1)} + block := sync.Block{Num: 1} + + storageMock.EXPECT().InsertClaim(mock.Anything, tx, claim).Return(errEmbeddedUnittest) + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{Claim: &claim}) + require.ErrorIs(t, err, errEmbeddedUnittest) +} + +func TestProcessBlockWithTx_InsertUnsetClaimError(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + unset := UnsetClaim{BlockNum: 1, GlobalIndex: big.NewInt(1)} + block := sync.Block{Num: 1} + + storageMock.EXPECT().InsertUnsetClaim(mock.Anything, tx, unset).Return(errEmbeddedUnittest) + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{UnsetClaim: &unset}) + require.ErrorIs(t, err, errEmbeddedUnittest) +} + +func TestProcessBlockWithTx_InsertSetClaimError(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + set := SetClaim{BlockNum: 1, GlobalIndex: big.NewInt(1)} + block := sync.Block{Num: 1} + + storageMock.EXPECT().InsertSetClaim(mock.Anything, tx, set).Return(errEmbeddedUnittest) + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{SetClaim: &set}) + require.ErrorIs(t, err, errEmbeddedUnittest) +} + +// InsertClaim fails: InsertUnsetClaim and InsertSetClaim must not be called +func TestProcessBlockWithTx_ClaimErrorAborts(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + claim := Claim{BlockNum: 1, GlobalIndex: big.NewInt(1)} + unset := UnsetClaim{BlockNum: 1, GlobalIndex: big.NewInt(2)} + block := sync.Block{Num: 1} + + storageMock.EXPECT().InsertClaim(mock.Anything, tx, claim).Return(errEmbeddedUnittest) + // InsertUnsetClaim must NOT be called after InsertClaim fails + + err := proc.ProcessBlockWithTx(t.Context(), tx, block, Event{Claim: &claim, UnsetClaim: &unset}) + require.ErrorIs(t, err, errEmbeddedUnittest) +} + +// --- ReorgWithTx --- + +func TestReorgWithTx_HappyPath(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + + storageMock.EXPECT().DeleteBlocksFrom(mock.Anything, tx, uint64(10)).Return(int64(3), nil) + + rows, err := proc.ReorgWithTx(t.Context(), tx, 10) + require.NoError(t, err) + require.Equal(t, int64(3), rows) +} + +func TestReorgWithTx_Error(t *testing.T) { + t.Parallel() + proc, storageMock := newTestEmbeddedProcessor(t) + tx := dbmocks.NewQuerier(t) + + storageMock.EXPECT().DeleteBlocksFrom(mock.Anything, tx, uint64(5)).Return(int64(0), errEmbeddedUnittest) + + rows, err := proc.ReorgWithTx(t.Context(), tx, 5) + require.ErrorIs(t, err, errEmbeddedUnittest) + require.Equal(t, int64(0), rows) +} + +// --- NewClaimStorage --- + +func TestNewClaimStorage_OK(t *testing.T) { + t.Parallel() + lg := logger.WithFields("module", "test") + sqlDB, err := db.NewSQLiteDB(t.TempDir() + "/test.db") + require.NoError(t, err) + t.Cleanup(func() { sqlDB.Close() }) + + storage, err := NewClaimStorage(sqlDB, lg, claimsynctypes.L1ClaimSyncer, 0) + require.NoError(t, err) + require.NotNil(t, storage) +} diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index 41a2f8958..b035e839c 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -953,3 +953,185 @@ func TestRuntimeData_IsCompatible_NilAddresses(t *testing.T) { require.Contains(t, err.Error(), "addresses len mismatch") }) } + +// makeDriver is a helper that creates an EVMDriver with fresh mocks for each test. +func makeDriver(t *testing.T) (*EVMDriver, *ReorgDetectorMock, *ProcessorMock, *DownloaderMock) { + t.Helper() + rh := &RetryHandler{ + MaxRetryAttemptsAfterError: 5, + RetryAfterErrorPeriod: 10 * time.Millisecond, + } + rdm := NewReorgDetectorMock(t) + pm := NewProcessorMock(t) + dm := NewDownloaderMock(t) + compatMock := compmocks.NewCompatibilityChecker(t) + rdm.EXPECT().Subscribe(reorgDetectorID).Return(&reorgdetector.Subscription{}, nil) + driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh, compatMock) + if err != nil { + t.Fatalf("could not construct EVMDriver: %v", err) + } + return driver, rdm, pm, dm +} + +// --- SyncNextBlock --- + +func TestSyncNextBlock_AlreadyBootstrapped(t *testing.T) { + t.Parallel() + driver, _, pm, _ := makeDriver(t) + pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(5), true, nil) + + err := driver.SyncNextBlock(t.Context(), 1) + require.ErrorIs(t, err, ErrAlreadyBootstrapped) +} + +func TestSyncNextBlock_GetLastProcessedBlockError(t *testing.T) { + t.Parallel() + driver, _, pm, _ := makeDriver(t) + pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, errUnittest) + + err := driver.SyncNextBlock(t.Context(), 1) + require.ErrorContains(t, err, "SyncNextBlock: getting last processed block") + require.ErrorIs(t, err, errUnittest) +} + +func TestSyncNextBlock_DownloadChannelClosedUnexpectedly(t *testing.T) { + t.Parallel() + driver, _, pm, dm := makeDriver(t) + pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) + dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything). + Run(func(_ context.Context, _ uint64, ch chan EVMBlock) { + close(ch) + }) + + err := driver.SyncNextBlock(t.Context(), 5) + require.ErrorContains(t, err, "download channel closed unexpectedly") +} + +func TestSyncNextBlock_ContextCancelledBeforeBlock(t *testing.T) { + t.Parallel() + driver, _, pm, dm := makeDriver(t) + ctx, cancel := context.WithCancel(t.Context()) + pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) + // The goroutine may or may not start before the select returns ctx.Done() + dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything). + Run(func(downloadCtx context.Context, _ uint64, _ chan EVMBlock) { + <-downloadCtx.Done() + }).Maybe() + cancel() + + err := driver.SyncNextBlock(ctx, 5) + require.ErrorIs(t, err, context.Canceled) +} + +func TestSyncNextBlock_HappyPath(t *testing.T) { + t.Parallel() + driver, rdm, pm, dm := makeDriver(t) + ctx := t.Context() + expectedBlock := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{Num: 5, Hash: common.HexToHash("0x5")}, + } + + pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) + dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything). + Run(func(downloadCtx context.Context, _ uint64, ch chan EVMBlock) { + ch <- expectedBlock + <-downloadCtx.Done() // wait for cancel() triggered inside SyncNextBlock + }) + rdm.EXPECT().AddBlockToTrack(mock.Anything, reorgDetectorID, expectedBlock.Num, expectedBlock.Hash).Return(nil) + pm.EXPECT().ProcessBlock(mock.Anything, Block{Num: expectedBlock.Num, Hash: expectedBlock.Hash}).Return(nil) + + err := driver.SyncNextBlock(ctx, 5) + require.NoError(t, err) +} + +// --- Sync with firstBlockNumber --- + +func TestSync_WithFirstBlockNumber_StartsFromGivenBlock(t *testing.T) { + t.Parallel() + + rh := &RetryHandler{ + MaxRetryAttemptsAfterError: 5, + RetryAfterErrorPeriod: 10 * time.Millisecond, + } + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + rdm := NewReorgDetectorMock(t) + pm := NewProcessorMock(t) + dm := NewDownloaderMock(t) + compatMock := compmocks.NewCompatibilityChecker(t) + compatMock.EXPECT().Check(mock.Anything, mock.Anything).Return(nil) + rdm.EXPECT().Subscribe(reorgDetectorID).Return(&reorgdetector.Subscription{ + ReorgedBlock: make(chan uint64), + ReorgProcessed: make(chan bool), + }, nil) + + driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh, compatMock) + require.NoError(t, err) + + firstBlockNum := uint64(42) + // no processed blocks exist yet + pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) + + downloadStartedFrom := make(chan uint64, 1) + dm.EXPECT().Download(mock.Anything, mock.Anything, mock.Anything). + Run(func(downloadCtx context.Context, fromBlock uint64, ch chan EVMBlock) { + downloadStartedFrom <- fromBlock + <-downloadCtx.Done() + close(ch) + }) + + go driver.Sync(ctx, &firstBlockNum) + + select { + case from := <-downloadStartedFrom: + require.Equal(t, firstBlockNum, from, "Download should start from firstBlockNumber when no processed blocks exist") + cancel() + case <-time.After(500 * time.Millisecond): + t.Fatal("timeout waiting for Download to be called with firstBlockNumber") + } +} + +// --- Sync waits when no processed blocks and no firstBlockNumber --- + +func TestSync_WaitsWhenNoProcessedBlockAndNoFirstBlock(t *testing.T) { + t.Parallel() + + rh := &RetryHandler{ + MaxRetryAttemptsAfterError: 5, + RetryAfterErrorPeriod: 10 * time.Millisecond, + } + ctx, cancel := context.WithCancel(t.Context()) + + rdm := NewReorgDetectorMock(t) + pm := NewProcessorMock(t) + dm := NewDownloaderMock(t) + compatMock := compmocks.NewCompatibilityChecker(t) + compatMock.EXPECT().Check(mock.Anything, mock.Anything).Return(nil) + rdm.EXPECT().Subscribe(reorgDetectorID).Return(&reorgdetector.Subscription{ + ReorgedBlock: make(chan uint64), + ReorgProcessed: make(chan bool), + }, nil) + + driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh, compatMock) + require.NoError(t, err) + + // GetLastProcessedBlock returns not-found on every call + pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil).Maybe() + + done := make(chan struct{}) + go func() { + defer close(done) + driver.Sync(ctx, nil) + }() + + time.Sleep(50 * time.Millisecond) // let it loop a few times with RetryAfterErrorPeriod + cancel() + + select { + case <-done: + // good: Sync exited cleanly after context cancellation + case <-time.After(500 * time.Millisecond): + t.Fatal("Sync did not exit after context cancellation") + } +} From 247862fa1ed5532f52c118806c079e181ff54d4b Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:08:22 +0100 Subject: [PATCH 15/28] feat: coverage --- claimsync/processor_test.go | 398 ++++++++++++++++++++++++++++++++++++ 1 file changed, 398 insertions(+) create mode 100644 claimsync/processor_test.go diff --git a/claimsync/processor_test.go b/claimsync/processor_test.go new file mode 100644 index 000000000..23dd0ca21 --- /dev/null +++ b/claimsync/processor_test.go @@ -0,0 +1,398 @@ +package claimsync + +import ( + "context" + "database/sql" + "errors" + "math/big" + "path" + "testing" + "time" + + claimsyncStorage "github.com/agglayer/aggkit/claimsync/storage" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + claimstoragemocks "github.com/agglayer/aggkit/claimsync/types/mocks" + dbmocks "github.com/agglayer/aggkit/db/mocks" + logger "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +const processorTestDBQueryTimeout = 30 * time.Second + +func newTestProcessor(t *testing.T) *processor { + t.Helper() + lg := logger.WithFields("module", "test-processor") + store, err := claimsyncStorage.NewStandalone(lg, + path.Join(t.TempDir(), "processor.db"), + "test-syncer", + processorTestDBQueryTimeout, + ) + require.NoError(t, err) + return newProcessor(lg, store, processorTestDBQueryTimeout) +} + +// --- Test data --- + +var ( + procBlock1 = sync.Block{ + Num: 1, + Hash: common.HexToHash("0x01"), + Events: []any{ + Event{Claim: &Claim{ + BlockNum: 1, + BlockPos: 0, + TxHash: common.HexToHash("0xa1"), + GlobalIndex: big.NewInt(10), + }}, + Event{UnsetClaim: &UnsetClaim{ + BlockNum: 1, + BlockPos: 1, + TxHash: common.HexToHash("0xa2"), + GlobalIndex: big.NewInt(20), + UnsetGlobalIndexHashChain: common.HexToHash("0xff"), + }}, + }, + } + procBlock3 = sync.Block{ + Num: 3, + Hash: common.HexToHash("0x03"), + Events: []any{ + Event{Claim: &Claim{ + BlockNum: 3, + BlockPos: 0, + TxHash: common.HexToHash("0xb1"), + GlobalIndex: big.NewInt(30), + }}, + Event{SetClaim: &SetClaim{ + BlockNum: 3, + BlockPos: 1, + TxHash: common.HexToHash("0xb2"), + GlobalIndex: big.NewInt(40), + }}, + }, + } + procBlock4 = sync.Block{Num: 4, Hash: common.HexToHash("0x04"), Events: []any{}} + procBlock5 = sync.Block{ + Num: 5, + Hash: common.HexToHash("0x05"), + Events: []any{ + Event{Claim: &Claim{ + BlockNum: 5, + BlockPos: 0, + TxHash: common.HexToHash("0xc1"), + GlobalIndex: big.NewInt(50), + }}, + }, + } +) + +// --- Action interfaces (mirroring bridgesync/processor_test.go pattern) --- + +type procTestAction interface { + method() string + desc() string + execute(t *testing.T) +} + +// processBlockAction + +type procProcessBlockAction struct { + p *processor + description string + block sync.Block + expectedErr error +} + +func (a *procProcessBlockAction) method() string { return "ProcessBlock" } +func (a *procProcessBlockAction) desc() string { return a.description } +func (a *procProcessBlockAction) execute(t *testing.T) { + t.Helper() + err := a.p.ProcessBlock(context.Background(), a.block) + require.Equal(t, a.expectedErr, err) +} + +// reorgAction + +type procReorgAction struct { + p *processor + description string + firstReorgedBlock uint64 + expectedErr error +} + +func (a *procReorgAction) method() string { return "Reorg" } +func (a *procReorgAction) desc() string { return a.description } +func (a *procReorgAction) execute(t *testing.T) { + t.Helper() + err := a.p.Reorg(context.Background(), a.firstReorgedBlock) + require.Equal(t, a.expectedErr, err) +} + +// getLastProcessedBlockAction + +type procGetLastAction struct { + p *processor + description string + expectedBlock uint64 + expectedFound bool +} + +func (a *procGetLastAction) method() string { return "GetLastProcessedBlock" } +func (a *procGetLastAction) desc() string { return a.description } +func (a *procGetLastAction) execute(t *testing.T) { + t.Helper() + block, found, err := a.p.GetLastProcessedBlock(context.Background()) + require.NoError(t, err) + require.Equal(t, a.expectedFound, found) + if found { + require.Equal(t, a.expectedBlock, block) + } +} + +// getFirstProcessedBlockAction + +type procGetFirstAction struct { + p *processor + description string + expectedBlock uint64 + expectedFound bool +} + +func (a *procGetFirstAction) method() string { return "GetFirstProcessedBlock" } +func (a *procGetFirstAction) desc() string { return a.description } +func (a *procGetFirstAction) execute(t *testing.T) { + t.Helper() + block, found, err := a.p.GetFirstProcessedBlock(context.Background()) + require.NoError(t, err) + require.Equal(t, a.expectedFound, found) + if found { + require.Equal(t, a.expectedBlock, block) + } +} + +// --- Integration tests --- + +func TestProcessor(t *testing.T) { + p := newTestProcessor(t) + + actions := []procTestAction{ + // empty state + &procGetLastAction{p: p, description: "empty: no last block", expectedFound: false}, + &procGetFirstAction{p: p, description: "empty: no first block", expectedFound: false}, + &procReorgAction{p: p, description: "reorg on empty (block 0)", firstReorgedBlock: 0}, + &procReorgAction{p: p, description: "reorg on empty (block 1)", firstReorgedBlock: 1}, + + // process block1 + &procProcessBlockAction{p: p, description: "process block1", block: procBlock1}, + &procGetLastAction{p: p, description: "after block1: last=1", expectedFound: true, expectedBlock: 1}, + &procGetFirstAction{p: p, description: "after block1: first=1", expectedFound: true, expectedBlock: 1}, + + // reorg block1 away + &procReorgAction{p: p, description: "reorg block1", firstReorgedBlock: 1}, + &procGetLastAction{p: p, description: "after reorg block1: no last", expectedFound: false}, + + // process block1 again, then block3 + &procProcessBlockAction{p: p, description: "process block1 again", block: procBlock1}, + &procProcessBlockAction{p: p, description: "process block3", block: procBlock3}, + &procGetLastAction{p: p, description: "after block3: last=3", expectedFound: true, expectedBlock: 3}, + &procGetFirstAction{p: p, description: "after block3: first=1", expectedFound: true, expectedBlock: 1}, + + // reorg from block3 → only block1 remains + &procReorgAction{p: p, description: "reorg block3", firstReorgedBlock: 3}, + &procGetLastAction{p: p, description: "after reorg block3: last=1", expectedFound: true, expectedBlock: 1}, + + // process block3, block4, block5 + &procProcessBlockAction{p: p, description: "process block3 again", block: procBlock3}, + &procProcessBlockAction{p: p, description: "process empty block4", block: procBlock4}, + &procProcessBlockAction{p: p, description: "process block5", block: procBlock5}, + &procGetLastAction{p: p, description: "after block5: last=5", expectedFound: true, expectedBlock: 5}, + &procGetFirstAction{p: p, description: "after block5: first=1", expectedFound: true, expectedBlock: 1}, + + // reorg last block only + &procReorgAction{p: p, description: "reorg block5", firstReorgedBlock: 5}, + &procGetLastAction{p: p, description: "after reorg block5: last=4", expectedFound: true, expectedBlock: 4}, + } + + for _, a := range actions { + t.Logf("%s: %s", a.method(), a.desc()) + a.execute(t) + } +} + +// --- GetBoundaryBlockForClaimType --- + +func TestProcessor_GetBoundaryBlockForClaimType(t *testing.T) { + t.Parallel() + p := newTestProcessor(t) + ctx := context.Background() + + b1 := sync.Block{ + Num: 1, + Hash: common.HexToHash("0x01"), + Events: []any{ + Event{Claim: &Claim{BlockNum: 1, BlockPos: 0, TxHash: common.HexToHash("0x1"), GlobalIndex: big.NewInt(1), Type: claimsynctypes.ClaimEvent}}, + }, + } + b3 := sync.Block{ + Num: 3, + Hash: common.HexToHash("0x03"), + Events: []any{ + Event{Claim: &Claim{BlockNum: 3, BlockPos: 0, TxHash: common.HexToHash("0x3"), GlobalIndex: big.NewInt(3), Type: claimsynctypes.ClaimEvent}}, + }, + } + require.NoError(t, p.ProcessBlock(ctx, b1)) + require.NoError(t, p.ProcessBlock(ctx, b3)) + + blockNum, err := p.GetBoundaryBlockForClaimType(ctx, nil, claimsynctypes.ClaimEvent) + require.NoError(t, err) + require.Equal(t, uint64(3), blockNum) +} + +// --- ProcessBlock error paths (mocks) --- + +func newMockProcessor(t *testing.T) (*processor, *claimstoragemocks.ClaimStorager) { + t.Helper() + lg := logger.WithFields("module", "test-mock-processor") + storageMock := claimstoragemocks.NewClaimStorager(t) + proc := newProcessor(lg, storageMock, processorTestDBQueryTimeout) + return proc, storageMock +} + +func TestProcessBlock_NewTxError(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + storageMock.EXPECT().NewTx(mock.Anything).Return(nil, errors.New("connection failed")) + + err := proc.ProcessBlock(t.Context(), sync.Block{Num: 1}) + require.ErrorContains(t, err, "connection failed") +} + +func TestProcessBlock_InsertBlockError(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + tx := dbmocks.NewTxer(t) + + storageMock.EXPECT().NewTx(mock.Anything).Return(tx, nil) + storageMock.EXPECT().InsertBlock(mock.Anything, tx, uint64(1), common.Hash{}).Return(errors.New("insert block failed")) + tx.EXPECT().Rollback().Return(nil) + + err := proc.ProcessBlock(t.Context(), sync.Block{Num: 1}) + require.ErrorContains(t, err, "insert block failed") +} + +func TestProcessBlock_ProcessEventError(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + tx := dbmocks.NewTxer(t) + embMock := claimstoragemocks.NewEmbeddedProcessor(t) + proc.embeddedProcessor = embMock + + block := sync.Block{Num: 2, Events: []any{Event{}}} + storageMock.EXPECT().NewTx(mock.Anything).Return(tx, nil) + storageMock.EXPECT().InsertBlock(mock.Anything, tx, block.Num, block.Hash).Return(nil) + embMock.EXPECT().ProcessBlockWithTx(mock.Anything, tx, mock.Anything, Event{}).Return(errors.New("event error")) + tx.EXPECT().Rollback().Return(nil) + + err := proc.ProcessBlock(t.Context(), block) + require.ErrorContains(t, err, "event error") +} + +func TestProcessBlock_CommitError(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + tx := dbmocks.NewTxer(t) + + block := sync.Block{Num: 3} + storageMock.EXPECT().NewTx(mock.Anything).Return(tx, nil) + storageMock.EXPECT().InsertBlock(mock.Anything, tx, block.Num, block.Hash).Return(nil) + tx.EXPECT().Commit().Return(errors.New("commit failed")) + tx.EXPECT().Rollback().Return(nil) + + err := proc.ProcessBlock(t.Context(), block) + require.ErrorContains(t, err, "commit failed") +} + +func TestProcessBlock_RollbackErrTxDone(t *testing.T) { + t.Parallel() + // rollbackTx must not log an error when Rollback returns sql.ErrTxDone + proc, storageMock := newMockProcessor(t) + tx := dbmocks.NewTxer(t) + + storageMock.EXPECT().NewTx(mock.Anything).Return(tx, nil) + storageMock.EXPECT().InsertBlock(mock.Anything, tx, uint64(1), common.Hash{}).Return(errors.New("trigger rollback")) + tx.EXPECT().Rollback().Return(sql.ErrTxDone) // must be silenced + + err := proc.ProcessBlock(t.Context(), sync.Block{Num: 1}) + require.Error(t, err) // the InsertBlock error propagates +} + +// --- Reorg error paths (mocks) --- + +func TestReorg_NewTxError(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + storageMock.EXPECT().NewTx(mock.Anything).Return(nil, errors.New("tx error")) + + err := proc.Reorg(t.Context(), 5) + require.ErrorContains(t, err, "claimsync Reorg: start tx") + require.ErrorContains(t, err, "tx error") +} + +func TestReorg_EmbeddedProcessorError(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + tx := dbmocks.NewTxer(t) + embMock := claimstoragemocks.NewEmbeddedProcessor(t) + proc.embeddedProcessor = embMock + + storageMock.EXPECT().NewTx(mock.Anything).Return(tx, nil) + embMock.EXPECT().ReorgWithTx(mock.Anything, tx, uint64(5)).Return(int64(0), errors.New("delete failed")) + tx.EXPECT().Rollback().Return(nil) + + err := proc.Reorg(t.Context(), 5) + require.ErrorContains(t, err, "claimsync Reorg") + require.ErrorContains(t, err, "delete failed") +} + +func TestReorg_CommitError(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + tx := dbmocks.NewTxer(t) + embMock := claimstoragemocks.NewEmbeddedProcessor(t) + proc.embeddedProcessor = embMock + + storageMock.EXPECT().NewTx(mock.Anything).Return(tx, nil) + embMock.EXPECT().ReorgWithTx(mock.Anything, tx, uint64(5)).Return(int64(3), nil) + tx.EXPECT().Commit().Return(errors.New("commit failed")) + tx.EXPECT().Rollback().Return(nil) + + err := proc.Reorg(t.Context(), 5) + require.ErrorContains(t, err, "claimsync Reorg: commit") +} + +// --- GetLastProcessedBlock / GetFirstProcessedBlock via mock --- + +func TestGetLastProcessedBlock_Delegates(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + storageMock.EXPECT().GetLastProcessedBlock(mock.Anything, nil).Return(uint64(42), true, nil) + + block, found, err := proc.GetLastProcessedBlock(t.Context()) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, uint64(42), block) +} + +func TestGetFirstProcessedBlock_Delegates(t *testing.T) { + t.Parallel() + proc, storageMock := newMockProcessor(t) + storageMock.EXPECT().GetFirstProcessedBlock(mock.Anything, nil).Return(uint64(1), true, nil) + + block, found, err := proc.GetFirstProcessedBlock(t.Context()) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, uint64(1), block) +} From c670f926f0c711d1be1c99b8a4ca0973267c533f Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:45:30 +0100 Subject: [PATCH 16/28] fix: validator nil ptr --- cmd/run.go | 1 + common/components.go | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 453e135f1..86061cbba 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -1057,6 +1057,7 @@ func createRollupDataQuerier( if !isNeeded([]string{ aggkitcommon.AGGORACLE, aggkitcommon.AGGSENDER, + aggkitcommon.AGGSENDERVALIDATOR, aggkitcommon.AGGCHAINPROOFGEN, aggkitcommon.BRIDGE, aggkitcommon.L1BRIDGESYNC, diff --git a/common/components.go b/common/components.go index 9c12c88b4..441b91af1 100644 --- a/common/components.go +++ b/common/components.go @@ -11,8 +11,6 @@ const ( AGGORACLE = "aggoracle" // BRIDGE name to identify the bridge component (have RPC) BRIDGE = "bridge" - // PROVER name to identify the prover component - PROVER = "prover" // AGGSENDER name to identify the aggsender component AGGSENDER = "aggsender" // L1INFOTREESYNC name to identify the l1infotreesync component @@ -36,7 +34,6 @@ func ValidateComponents(components []string) error { validComponents := map[string]struct{}{ AGGORACLE: {}, BRIDGE: {}, - PROVER: {}, AGGSENDER: {}, L1INFOTREESYNC: {}, L2BRIDGESYNC: {}, From 76bdf02333f38c594e6d5aa54ca096dcfeead6a0 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 19 Mar 2026 10:15:24 +0100 Subject: [PATCH 17/28] fix: claimsync issue if first block has no events --- claimsync/claimsync.go | 12 +- claimsync/claimsync_test.go | 5 +- l2gersync/evm_downloader_legacy.go | 2 +- l2gersync/evm_downloader_sovereign.go | 2 +- l2gersync/evm_downloader_sovereign_test.go | 2 +- sync/evmdownloader.go | 66 +++++++- sync/evmdownloader_test.go | 171 ++++++++++++++++++++- sync/evmdriver.go | 9 +- sync/evmdriver_test.go | 24 +-- sync/mock_downloader.go | 22 ++- 10 files changed, 275 insertions(+), 40 deletions(-) diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index 7496ef146..0d2530714 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -204,16 +204,14 @@ func (c *ClaimSync) SetNextRequiredBlock(ctx context.Context, blockNumber uint64 if err != nil { return fmt.Errorf("claimsync: failed to get first processed block: %w", err) } - if blockNumber <= firstBlock { + if blockNumber < firstBlock { return fmt.Errorf("claimsync: cannot set next required block to %d, "+ - "it must be greater than the first block in DB (%d)", + "it must be greater or equal than the first block in DB (%d)", blockNumber, firstBlock) } - if blockNumber > lastBlock { - c.logger.Infof("Cannot set next required block to %d because is running,"+ - " last processed block is %d. Distance: %d", blockNumber, lastBlock, - blockNumber-lastBlock) - } + + c.logger.Infof("Cannot set next required block to %d because is running, but is included. "+ + " Processed blocks [%d - %d]", blockNumber, firstBlock, lastBlock) return nil } diff --git a/claimsync/claimsync_test.go b/claimsync/claimsync_test.go index ccb063fc9..be3e9f23d 100644 --- a/claimsync/claimsync_test.go +++ b/claimsync/claimsync_test.go @@ -90,7 +90,7 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { _, found, err2 := claimSyncer.GetLastProcessedBlock(ctx) require.NoError(t, err2) require.False(t, found) - logger.Info("*** Setting next required block to 1, so must starting syncing and sync the ClaimAsset") + logger.Info("*** Setting next required block to 0, so must starting syncing and sync the ClaimAsset") err = claimSyncer.SetNextRequiredBlock(ctx, 0) require.NoError(t, err) for i := 0; i < 10; i++ { @@ -111,4 +111,7 @@ func TestClaimSyncerWaitUntilSetNextRequiredBlock(t *testing.T) { require.NoError(t, err) logger.Infof("*** Claims retrieved: %v", claims) require.Equal(t, 1, len(claims)) + logger.Info("*** Setting next required block to 0, it have started but 0 is synced so no error") + err = claimSyncer.SetNextRequiredBlock(ctx, 0) + require.NoError(t, err) } diff --git a/l2gersync/evm_downloader_legacy.go b/l2gersync/evm_downloader_legacy.go index 73bbf0f81..acf277e9b 100644 --- a/l2gersync/evm_downloader_legacy.go +++ b/l2gersync/evm_downloader_legacy.go @@ -90,7 +90,7 @@ func (d *downloaderLegacy) RuntimeData(ctx context.Context) (sync.RuntimeData, e }, nil } -func (d *downloaderLegacy) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock) { +func (d *downloaderLegacy) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock, _ *uint64, _ bool) { var ( attempts int nextL1InfoTreeIndex uint32 diff --git a/l2gersync/evm_downloader_sovereign.go b/l2gersync/evm_downloader_sovereign.go index 243bfbeda..308646aa8 100644 --- a/l2gersync/evm_downloader_sovereign.go +++ b/l2gersync/evm_downloader_sovereign.go @@ -91,7 +91,7 @@ func (d *downloaderSovereign) RuntimeData(ctx context.Context) (sync.RuntimeData }, nil } -func (d *downloaderSovereign) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock) { +func (d *downloaderSovereign) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock, _ *uint64, _ bool) { for { select { case <-ctx.Done(): diff --git a/l2gersync/evm_downloader_sovereign_test.go b/l2gersync/evm_downloader_sovereign_test.go index 5e72a8398..13ae3bbac 100644 --- a/l2gersync/evm_downloader_sovereign_test.go +++ b/l2gersync/evm_downloader_sovereign_test.go @@ -101,7 +101,7 @@ func TestDownloaderSovereign_Download(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - downloader.Download(ctx, fromBlock, downloadedCh) + downloader.Download(ctx, fromBlock, downloadedCh, nil, false) // Collect blocks sent through the channel for block := range downloadedCh { diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index cfad11382..6c71f3609 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -141,7 +141,7 @@ func (d *EVMDownloader) RuntimeData(ctx context.Context) (RuntimeData, error) { }, nil } -func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) { +func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, lastBlockNum *uint64, includeEmptyFirstBlock bool) { timeTracker := aggkitcommon.NewTimeTracker() timeTracker.Start() defer func() { @@ -149,7 +149,14 @@ func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, download d.log.Infof("EVMDownloader.Download finished in %s", timeTracker.String()) }() lastBlock := d.WaitForNewBlocks(ctx, 0) + initialFromBlock := fromBlock + // initialBlockSent tracks whether fromBlock has been sent on the channel. + // When includeEmptyFirstBlock is false we treat it as already sent (no special handling needed). + initialBlockSent := !includeEmptyFirstBlock toBlock := fromBlock + d.syncBlockChunkSize + if lastBlockNum != nil { + toBlock = min(toBlock, *lastBlockNum) + } iteration := 0 reachTop := false for { @@ -172,6 +179,9 @@ func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, download if fromBlock-toBlock < d.syncBlockChunkSize { toBlock = fromBlock + d.syncBlockChunkSize + if lastBlockNum != nil { + toBlock = min(toBlock, *lastBlockNum) + } } } reachTop = false @@ -188,39 +198,85 @@ func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, download requestToBlock = lastBlock reachTop = true } + if lastBlockNum != nil && requestToBlock > *lastBlockNum { + requestToBlock = *lastBlockNum + } d.log.Debugf("getting events from blocks [%d to %d] toBlock: %d. lastFinalizedBlock: %d lastBlock: %d", fromBlock, requestToBlock, toBlock, lastFinalizedBlockNumber, lastBlock) blocks := d.GetEventsByBlockRange(ctx, fromBlock, requestToBlock) d.log.Debugf("result events from blocks [%d to %d] -> len(blocks)=%d", fromBlock, requestToBlock, len(blocks)) + + // Force-report initialFromBlock if not yet sent (even when it has no events). + // preReportedInitial tracks whether we sent it as empty this iteration, + // so we can avoid double-reporting it in the normal reporting paths below. + preReportedInitial := false + if !initialBlockSent && requestToBlock >= initialFromBlock { + initialInBlocks := false + for _, b := range blocks { + if b.Num == initialFromBlock { + initialInBlocks = true + break + } + } + if !initialInBlocks { + d.reportEmptyBlock(ctx, downloadedCh, initialFromBlock, lastFinalizedBlockNumber) + preReportedInitial = true + } + initialBlockSent = true + } + if requestToBlock <= lastFinalizedBlockNumber { d.log.Debugf("range is in a safe zone (requestToBlock: %d <= finalized: %d)", requestToBlock, lastFinalizedBlockNumber) d.reportBlocks(downloadedCh, blocks, lastFinalizedBlockNumber) - if blocks.Len() == 0 || blocks[blocks.Len()-1].Num < requestToBlock { + skipEmpty := preReportedInitial && requestToBlock == initialFromBlock + if !skipEmpty && (blocks.Len() == 0 || blocks[blocks.Len()-1].Num < requestToBlock) { d.reportEmptyBlock(ctx, downloadedCh, requestToBlock, lastFinalizedBlockNumber) } fromBlock = requestToBlock + 1 toBlock = fromBlock + d.syncBlockChunkSize + if lastBlockNum != nil { + toBlock = min(toBlock, *lastBlockNum) + } } else { d.log.Debugf("range is not in a safe zone (requestToBlock: %d > finalized: %d)", requestToBlock, lastFinalizedBlockNumber) if blocks.Len() == 0 { if lastFinalizedBlockNumber >= fromBlock { emptyBlock := lastFinalizedBlockNumber - d.reportEmptyBlock(ctx, downloadedCh, emptyBlock, lastFinalizedBlockNumber) + skipEmpty := preReportedInitial && emptyBlock == initialFromBlock + if !skipEmpty { + d.reportEmptyBlock(ctx, downloadedCh, emptyBlock, lastFinalizedBlockNumber) + } fromBlock = emptyBlock + 1 toBlock = fromBlock + d.syncBlockChunkSize + if lastBlockNum != nil { + toBlock = min(toBlock, *lastBlockNum) + } } else { - // Extend range until find logs or reach the last finalized block - toBlock += d.syncBlockChunkSize + if lastBlockNum == nil { + // Extend range until find logs or reach the last finalized block + toBlock += d.syncBlockChunkSize + } + // If lastBlockNum is set, don't extend; stop condition below handles it } } else { d.reportBlocks(downloadedCh, blocks, lastFinalizedBlockNumber) fromBlock = blocks[blocks.Len()-1].Num + 1 toBlock = fromBlock + d.syncBlockChunkSize + if lastBlockNum != nil { + toBlock = min(toBlock, *lastBlockNum) + } } } + + // Stop once we have processed up to lastBlockNum + if lastBlockNum != nil && (fromBlock > *lastBlockNum || requestToBlock >= *lastBlockNum) { + close(downloadedCh) + return + } + iteration++ if d.stopDownloaderOnIterationN != 0 && iteration >= d.stopDownloaderOnIterationN { d.log.Infof("stop downloader on iteration %d", iteration) diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index 773164ba8..a080d8793 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -710,7 +710,7 @@ func runSteps(t *testing.T, fromBlock uint64, steps []evmTestStep) { }) } } - downloader.Download(ctx1, fromBlock, downloadCh) + downloader.Download(ctx1, fromBlock, downloadCh, nil, false) mockEthDownloader.AssertExpectations(t) for _, expectedBlock := range expectedBlocks { log.Debugf("waiting block %d ", expectedBlock.Num) @@ -826,3 +826,172 @@ func TestGetLastFinalizedBlock(t *testing.T) { require.Equal(t, uint64(200), blockNumber) }) } + +// newMockDownloader returns an EVMDownloader with a mocked EVMDownloaderInterface injected. +func newMockDownloader(t *testing.T) (*EVMDownloader, *EVMDownloaderMock) { + t.Helper() + downloader, _ := NewTestDownloader(t, time.Millisecond) + iface := NewEVMDownloaderMock(t) + downloader.EVMDownloaderInterface = iface + return downloader, iface +} + +func blockHeader(num uint64) EVMBlockHeader { + return EVMBlockHeader{ + Num: num, + Hash: common.HexToHash(fmt.Sprintf("0x%x", num)), + } +} + +func drainChannel(ch chan EVMBlock) []EVMBlock { + var result []EVMBlock + for b := range ch { + result = append(result, b) + } + return result +} + +// TestDownload_LastBlockNum_BlockWithEvents verifies that when lastBlockNum is set and the +// target block has events, the block is reported and Download stops. +func TestDownload_LastBlockNum_BlockWithEvents(t *testing.T) { + t.Parallel() + downloader, iface := newMockDownloader(t) + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + lastBlock := uint64(5) + // Initial WaitForNewBlocks called with 0 + iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() + iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(10), nil).Once() + eventsBlock := &EVMBlock{ + IsFinalizedBlock: true, + EVMBlockHeader: blockHeader(5), + Events: []any{testEvent(common.HexToHash("0xAA"))}, + } + iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{eventsBlock}).Once() + + downloadCh := make(chan EVMBlock, 10) + downloader.Download(ctx, 5, downloadCh, &lastBlock, false) + + received := drainChannel(downloadCh) + require.Len(t, received, 1) + require.Equal(t, uint64(5), received[0].Num) + require.Equal(t, eventsBlock.Events, received[0].Events) + require.True(t, received[0].IsFinalizedBlock) +} + +// TestDownload_LastBlockNum_EmptyFinalizedBlock verifies that when lastBlockNum is set and the +// target block is empty in the finalized zone, an empty block is reported and Download stops. +func TestDownload_LastBlockNum_EmptyFinalizedBlock(t *testing.T) { + t.Parallel() + downloader, iface := newMockDownloader(t) + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + lastBlock := uint64(5) + iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() + iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(10), nil).Once() + iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{}).Once() + hdr := blockHeader(5) + iface.EXPECT().GetBlockHeader(mock.Anything, uint64(5)).Return(hdr, false).Once() + + downloadCh := make(chan EVMBlock, 10) + downloader.Download(ctx, 5, downloadCh, &lastBlock, false) + + received := drainChannel(downloadCh) + require.Len(t, received, 1) + require.Equal(t, uint64(5), received[0].Num) + require.Empty(t, received[0].Events) + require.True(t, received[0].IsFinalizedBlock) +} + +// TestDownload_IncludeEmptyFirstBlock_FinalizedZone verifies that with includeEmptyFirstBlock=true, +// the initial block is reported via the pre-report path (not doubled) when in the finalized zone. +func TestDownload_IncludeEmptyFirstBlock_FinalizedZone(t *testing.T) { + t.Parallel() + downloader, iface := newMockDownloader(t) + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + lastBlock := uint64(5) + iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() + iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(10), nil).Once() + iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{}).Once() + // GetBlockHeader called exactly once — by the pre-report path, not duplicated by the finalized zone path + hdr := blockHeader(5) + iface.EXPECT().GetBlockHeader(mock.Anything, uint64(5)).Return(hdr, false).Once() + + downloadCh := make(chan EVMBlock, 10) + downloader.Download(ctx, 5, downloadCh, &lastBlock, true) + + received := drainChannel(downloadCh) + require.Len(t, received, 1) + require.Equal(t, uint64(5), received[0].Num) + require.Empty(t, received[0].Events) + require.True(t, received[0].IsFinalizedBlock) +} + +// TestDownload_IncludeEmptyFirstBlock_NotFinalizedZone verifies that with includeEmptyFirstBlock=true, +// the initial block is reported immediately even when it is not yet finalized. +func TestDownload_IncludeEmptyFirstBlock_NotFinalizedZone(t *testing.T) { + t.Parallel() + downloader, iface := newMockDownloader(t) + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + lastBlock := uint64(5) + // finalizedBlock=3 is below fromBlock=5 → not-finalized zone + iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() + iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(3), nil).Once() + iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{}).Once() + hdr := blockHeader(5) + iface.EXPECT().GetBlockHeader(mock.Anything, uint64(5)).Return(hdr, false).Once() + + downloadCh := make(chan EVMBlock, 10) + downloader.Download(ctx, 5, downloadCh, &lastBlock, true) + + received := drainChannel(downloadCh) + require.Len(t, received, 1) + require.Equal(t, uint64(5), received[0].Num) + require.Empty(t, received[0].Events) + // Block 5 > finalizedBlock 3 → not finalized + require.False(t, received[0].IsFinalizedBlock) +} + +// TestDownload_LastBlockNum_MultipleBlocksInRange verifies that when lastBlockNum > fromBlock, +// all blocks up to lastBlockNum are reported and Download stops after that. +func TestDownload_LastBlockNum_MultipleBlocksInRange(t *testing.T) { + t.Parallel() + downloader, iface := newMockDownloader(t) + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + lastBlock := uint64(7) + iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() + iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(10), nil).Once() + // Block 6 has events; block 7 is empty → reportEmptyBlock(7) called + eventsBlock6 := &EVMBlock{ + IsFinalizedBlock: true, + EVMBlockHeader: blockHeader(6), + Events: []any{testEvent(common.HexToHash("0xBB"))}, + } + iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(7)).Return(EVMBlocks{eventsBlock6}).Once() + hdr7 := blockHeader(7) + iface.EXPECT().GetBlockHeader(mock.Anything, uint64(7)).Return(hdr7, false).Once() + + downloadCh := make(chan EVMBlock, 10) + downloader.Download(ctx, 5, downloadCh, &lastBlock, false) + + received := drainChannel(downloadCh) + require.Len(t, received, 2) + require.Equal(t, uint64(6), received[0].Num) + require.Equal(t, eventsBlock6.Events, received[0].Events) + require.Equal(t, uint64(7), received[1].Num) + require.Empty(t, received[1].Events) + require.True(t, received[1].IsFinalizedBlock) +} diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 77733607e..2c9ca0d19 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -34,7 +34,10 @@ func (b Block) String() string { } type Downloader interface { - Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) + // Download downloads blocks starting from fromBlock, sending them to downloadedCh. + // If lastBlockNum is not nil, it stops after processing that block. + // If includeEmptyFirstBlock is true, fromBlock is always sent even if it has no events. + Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, lastBlockNum *uint64, includeEmptyFirstBlock bool) // RuntimeData returns the runtime data from this downloader // this is used to check that DB is compatible with the runtime data RuntimeData(ctx context.Context) (RuntimeData, error) @@ -151,7 +154,7 @@ func (d *EVMDriver) SyncNextBlock(ctx context.Context, blockNum uint64) error { defer cancel() downloadCh := make(chan EVMBlock, 1) - go d.downloader.Download(cancelCtx, blockNum, downloadCh) + go d.downloader.Download(cancelCtx, blockNum, downloadCh, &blockNum, true) select { case <-ctx.Done(): @@ -222,7 +225,7 @@ reset: // start downloading downloadCh := make(chan EVMBlock, d.downloadBufferSize) go func() { - d.downloader.Download(cancellableCtx, nextBlock, downloadCh) + d.downloader.Download(cancellableCtx, nextBlock, downloadCh, nil, false) log.Warnf("downloader.Download exited, cancelling context") cancel() }() diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index b035e839c..42a05c66b 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -60,8 +60,8 @@ func TestSync(t *testing.T) { green bool } reorg1Completed := reorgSemaphore{} - dm.EXPECT().Download(mock.Anything, mock.Anything, mock.Anything). - Run(func(ctx context.Context, _ uint64, downloadedCh chan EVMBlock) { + dm.EXPECT().Download(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Run(func(ctx context.Context, _ uint64, downloadedCh chan EVMBlock, _ *uint64, _ bool) { log.Info("entering mock loop") for { select { @@ -155,8 +155,8 @@ func TestSync_ReorgCancelsRetryHandlerInHandleNewBlock(t *testing.T) { cancelObserved := make(chan struct{}) // infinite loop that keeps feeding the same block - dm.EXPECT().Download(mock.Anything, mock.Anything, mock.Anything). - Run(func(ctx context.Context, _ uint64, ch chan EVMBlock) { + dm.EXPECT().Download(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Run(func(ctx context.Context, _ uint64, ch chan EVMBlock, _ *uint64, _ bool) { for { ch <- expectedBlock select { @@ -998,8 +998,8 @@ func TestSyncNextBlock_DownloadChannelClosedUnexpectedly(t *testing.T) { t.Parallel() driver, _, pm, dm := makeDriver(t) pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) - dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything). - Run(func(_ context.Context, _ uint64, ch chan EVMBlock) { + dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything, mock.Anything, mock.Anything). + Run(func(_ context.Context, _ uint64, ch chan EVMBlock, _ *uint64, _ bool) { close(ch) }) @@ -1013,8 +1013,8 @@ func TestSyncNextBlock_ContextCancelledBeforeBlock(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) // The goroutine may or may not start before the select returns ctx.Done() - dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything). - Run(func(downloadCtx context.Context, _ uint64, _ chan EVMBlock) { + dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything, mock.Anything, mock.Anything). + Run(func(downloadCtx context.Context, _ uint64, _ chan EVMBlock, _ *uint64, _ bool) { <-downloadCtx.Done() }).Maybe() cancel() @@ -1032,8 +1032,8 @@ func TestSyncNextBlock_HappyPath(t *testing.T) { } pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) - dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything). - Run(func(downloadCtx context.Context, _ uint64, ch chan EVMBlock) { + dm.EXPECT().Download(mock.Anything, uint64(5), mock.Anything, mock.Anything, mock.Anything). + Run(func(downloadCtx context.Context, _ uint64, ch chan EVMBlock, _ *uint64, _ bool) { ch <- expectedBlock <-downloadCtx.Done() // wait for cancel() triggered inside SyncNextBlock }) @@ -1074,8 +1074,8 @@ func TestSync_WithFirstBlockNumber_StartsFromGivenBlock(t *testing.T) { pm.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), false, nil) downloadStartedFrom := make(chan uint64, 1) - dm.EXPECT().Download(mock.Anything, mock.Anything, mock.Anything). - Run(func(downloadCtx context.Context, fromBlock uint64, ch chan EVMBlock) { + dm.EXPECT().Download(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Run(func(downloadCtx context.Context, fromBlock uint64, ch chan EVMBlock, _ *uint64, _ bool) { downloadStartedFrom <- fromBlock <-downloadCtx.Done() close(ch) diff --git a/sync/mock_downloader.go b/sync/mock_downloader.go index c197d450b..b91dd85a4 100644 --- a/sync/mock_downloader.go +++ b/sync/mock_downloader.go @@ -21,9 +21,9 @@ func (_m *DownloaderMock) EXPECT() *DownloaderMock_Expecter { return &DownloaderMock_Expecter{mock: &_m.Mock} } -// Download provides a mock function with given fields: ctx, fromBlock, downloadedCh -func (_m *DownloaderMock) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) { - _m.Called(ctx, fromBlock, downloadedCh) +// Download provides a mock function with given fields: ctx, fromBlock, downloadedCh, lastBlockNum, includeEmptyFirstBlock +func (_m *DownloaderMock) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, lastBlockNum *uint64, includeEmptyFirstBlock bool) { + _m.Called(ctx, fromBlock, downloadedCh, lastBlockNum, includeEmptyFirstBlock) } // DownloaderMock_Download_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Download' @@ -35,13 +35,19 @@ type DownloaderMock_Download_Call struct { // - ctx context.Context // - fromBlock uint64 // - downloadedCh chan EVMBlock -func (_e *DownloaderMock_Expecter) Download(ctx interface{}, fromBlock interface{}, downloadedCh interface{}) *DownloaderMock_Download_Call { - return &DownloaderMock_Download_Call{Call: _e.mock.On("Download", ctx, fromBlock, downloadedCh)} +// - lastBlockNum *uint64 +// - includeEmptyFirstBlock bool +func (_e *DownloaderMock_Expecter) Download(ctx interface{}, fromBlock interface{}, downloadedCh interface{}, lastBlockNum interface{}, includeEmptyFirstBlock interface{}) *DownloaderMock_Download_Call { + return &DownloaderMock_Download_Call{Call: _e.mock.On("Download", ctx, fromBlock, downloadedCh, lastBlockNum, includeEmptyFirstBlock)} } -func (_c *DownloaderMock_Download_Call) Run(run func(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock)) *DownloaderMock_Download_Call { +func (_c *DownloaderMock_Download_Call) Run(run func(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, lastBlockNum *uint64, includeEmptyFirstBlock bool)) *DownloaderMock_Download_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(chan EVMBlock)) + var lastBlockNum *uint64 + if args[3] != nil { + lastBlockNum = args[3].(*uint64) + } + run(args[0].(context.Context), args[1].(uint64), args[2].(chan EVMBlock), lastBlockNum, args[4].(bool)) }) return _c } @@ -51,7 +57,7 @@ func (_c *DownloaderMock_Download_Call) Return() *DownloaderMock_Download_Call { return _c } -func (_c *DownloaderMock_Download_Call) RunAndReturn(run func(context.Context, uint64, chan EVMBlock)) *DownloaderMock_Download_Call { +func (_c *DownloaderMock_Download_Call) RunAndReturn(run func(context.Context, uint64, chan EVMBlock, *uint64, bool)) *DownloaderMock_Download_Call { _c.Run(run) return _c } From 1ea89c108481ecb38e9c2974e15cdee9b1432350 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 19 Mar 2026 11:05:34 +0100 Subject: [PATCH 18/28] fix: lint --- l2gersync/evm_downloader_legacy.go | 4 +- l2gersync/evm_downloader_sovereign.go | 4 +- sync/evmdownloader.go | 13 +-- sync/evmdownloader_test.go | 119 +++++++++----------------- sync/evmdriver.go | 3 +- 5 files changed, 55 insertions(+), 88 deletions(-) diff --git a/l2gersync/evm_downloader_legacy.go b/l2gersync/evm_downloader_legacy.go index acf277e9b..85540976c 100644 --- a/l2gersync/evm_downloader_legacy.go +++ b/l2gersync/evm_downloader_legacy.go @@ -90,7 +90,9 @@ func (d *downloaderLegacy) RuntimeData(ctx context.Context) (sync.RuntimeData, e }, nil } -func (d *downloaderLegacy) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock, _ *uint64, _ bool) { +func (d *downloaderLegacy) Download( + ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock, _ *uint64, _ bool, +) { var ( attempts int nextL1InfoTreeIndex uint32 diff --git a/l2gersync/evm_downloader_sovereign.go b/l2gersync/evm_downloader_sovereign.go index 308646aa8..3c940025f 100644 --- a/l2gersync/evm_downloader_sovereign.go +++ b/l2gersync/evm_downloader_sovereign.go @@ -91,7 +91,9 @@ func (d *downloaderSovereign) RuntimeData(ctx context.Context) (sync.RuntimeData }, nil } -func (d *downloaderSovereign) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock, _ *uint64, _ bool) { +func (d *downloaderSovereign) Download( + ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock, _ *uint64, _ bool, +) { for { select { case <-ctx.Done(): diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index 6c71f3609..b430e52d2 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -141,7 +141,10 @@ func (d *EVMDownloader) RuntimeData(ctx context.Context) (RuntimeData, error) { }, nil } -func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, lastBlockNum *uint64, includeEmptyFirstBlock bool) { +func (d *EVMDownloader) Download( + ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, + lastBlockNum *uint64, includeEmptyFirstBlock bool, +) { timeTracker := aggkitcommon.NewTimeTracker() timeTracker.Start() defer func() { @@ -254,11 +257,9 @@ func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, download if lastBlockNum != nil { toBlock = min(toBlock, *lastBlockNum) } - } else { - if lastBlockNum == nil { - // Extend range until find logs or reach the last finalized block - toBlock += d.syncBlockChunkSize - } + } else if lastBlockNum == nil { + // Extend range until find logs or reach the last finalized block + toBlock += d.syncBlockChunkSize // If lastBlockNum is set, don't extend; stop condition below handles it } } else { diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index a080d8793..438cff90f 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -844,7 +844,7 @@ func blockHeader(num uint64) EVMBlockHeader { } func drainChannel(ch chan EVMBlock) []EVMBlock { - var result []EVMBlock + result := make([]EVMBlock, 0) for b := range ch { result = append(result, b) } @@ -881,85 +881,46 @@ func TestDownload_LastBlockNum_BlockWithEvents(t *testing.T) { require.True(t, received[0].IsFinalizedBlock) } -// TestDownload_LastBlockNum_EmptyFinalizedBlock verifies that when lastBlockNum is set and the -// target block is empty in the finalized zone, an empty block is reported and Download stops. -func TestDownload_LastBlockNum_EmptyFinalizedBlock(t *testing.T) { +// TestDownload_EmptyBlock verifies empty-block reporting for every combination of +// includeEmptyFirstBlock and finality zone. GetBlockHeader must always be called exactly once. +func TestDownload_EmptyBlock(t *testing.T) { t.Parallel() - downloader, iface := newMockDownloader(t) - - ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) - defer cancel() - - lastBlock := uint64(5) - iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() - iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(10), nil).Once() - iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{}).Once() - hdr := blockHeader(5) - iface.EXPECT().GetBlockHeader(mock.Anything, uint64(5)).Return(hdr, false).Once() - - downloadCh := make(chan EVMBlock, 10) - downloader.Download(ctx, 5, downloadCh, &lastBlock, false) - - received := drainChannel(downloadCh) - require.Len(t, received, 1) - require.Equal(t, uint64(5), received[0].Num) - require.Empty(t, received[0].Events) - require.True(t, received[0].IsFinalizedBlock) -} - -// TestDownload_IncludeEmptyFirstBlock_FinalizedZone verifies that with includeEmptyFirstBlock=true, -// the initial block is reported via the pre-report path (not doubled) when in the finalized zone. -func TestDownload_IncludeEmptyFirstBlock_FinalizedZone(t *testing.T) { - t.Parallel() - downloader, iface := newMockDownloader(t) - - ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) - defer cancel() - - lastBlock := uint64(5) - iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() - iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(10), nil).Once() - iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{}).Once() - // GetBlockHeader called exactly once — by the pre-report path, not duplicated by the finalized zone path - hdr := blockHeader(5) - iface.EXPECT().GetBlockHeader(mock.Anything, uint64(5)).Return(hdr, false).Once() - - downloadCh := make(chan EVMBlock, 10) - downloader.Download(ctx, 5, downloadCh, &lastBlock, true) - - received := drainChannel(downloadCh) - require.Len(t, received, 1) - require.Equal(t, uint64(5), received[0].Num) - require.Empty(t, received[0].Events) - require.True(t, received[0].IsFinalizedBlock) -} - -// TestDownload_IncludeEmptyFirstBlock_NotFinalizedZone verifies that with includeEmptyFirstBlock=true, -// the initial block is reported immediately even when it is not yet finalized. -func TestDownload_IncludeEmptyFirstBlock_NotFinalizedZone(t *testing.T) { - t.Parallel() - downloader, iface := newMockDownloader(t) - - ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) - defer cancel() - - lastBlock := uint64(5) - // finalizedBlock=3 is below fromBlock=5 → not-finalized zone - iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() - iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(uint64(3), nil).Once() - iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{}).Once() - hdr := blockHeader(5) - iface.EXPECT().GetBlockHeader(mock.Anything, uint64(5)).Return(hdr, false).Once() - - downloadCh := make(chan EVMBlock, 10) - downloader.Download(ctx, 5, downloadCh, &lastBlock, true) - - received := drainChannel(downloadCh) - require.Len(t, received, 1) - require.Equal(t, uint64(5), received[0].Num) - require.Empty(t, received[0].Events) - // Block 5 > finalizedBlock 3 → not finalized - require.False(t, received[0].IsFinalizedBlock) + cases := []struct { + name string + finalizedBlock uint64 + includeEmptyFirst bool + wantIsFinalizedBlock bool + }{ + // finalized zone — normal path (includeEmptyFirstBlock=false) + {"finalized/noFlag", 10, false, true}, + // finalized zone — pre-report path (includeEmptyFirstBlock=true); GetBlockHeader called once, not twice + {"finalized/withFlag", 10, true, true}, + // not-finalized zone — pre-report path forces immediate report without waiting for finality + {"notFinalized/withFlag", 3, true, false}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + downloader, iface := newMockDownloader(t) + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + lastBlock := uint64(5) + iface.EXPECT().WaitForNewBlocks(mock.Anything, uint64(0)).Return(uint64(10)).Once() + iface.EXPECT().GetLastFinalizedBlock(mock.Anything).Return(tc.finalizedBlock, nil).Once() + iface.EXPECT().GetEventsByBlockRange(mock.Anything, uint64(5), uint64(5)).Return(EVMBlocks{}).Once() + iface.EXPECT().GetBlockHeader(mock.Anything, uint64(5)).Return(blockHeader(5), false).Once() + + downloadCh := make(chan EVMBlock, 10) + downloader.Download(ctx, 5, downloadCh, &lastBlock, tc.includeEmptyFirst) + + received := drainChannel(downloadCh) + require.Len(t, received, 1) + require.Equal(t, uint64(5), received[0].Num) + require.Empty(t, received[0].Events) + require.Equal(t, tc.wantIsFinalizedBlock, received[0].IsFinalizedBlock) + }) + } } // TestDownload_LastBlockNum_MultipleBlocksInRange verifies that when lastBlockNum > fromBlock, diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 2c9ca0d19..acb83eb98 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -37,7 +37,8 @@ type Downloader interface { // Download downloads blocks starting from fromBlock, sending them to downloadedCh. // If lastBlockNum is not nil, it stops after processing that block. // If includeEmptyFirstBlock is true, fromBlock is always sent even if it has no events. - Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, lastBlockNum *uint64, includeEmptyFirstBlock bool) + Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, + lastBlockNum *uint64, includeEmptyFirstBlock bool) // RuntimeData returns the runtime data from this downloader // this is used to check that DB is compatible with the runtime data RuntimeData(ctx context.Context) (RuntimeData, error) From f018cd491271683fc01ba40f22670390c72b7417 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 19 Mar 2026 11:46:51 +0100 Subject: [PATCH 19/28] fix: error in FEP that get stuck because claimsyncer was not started --- aggsender/aggsender.go | 10 ++- aggsender/query/bridge_query.go | 4 +- aggsender/types/status.go | 21 +++++- claimsync/claim_data_test.go | 78 +++++++++++++++++++++ claimsync/claimsync.go | 4 ++ claimsync/claimsync_rpc.go | 21 ++++-- claimsync/mocks/mock_claim_syncer.go | 63 +++++++++++++++++ claimsync/types/claim_reader.go | 1 + claimsync/types/mocks/mock_claims_reader.go | 64 +++++++++++++++++ scripts/local_config_fep | 2 +- sync/mock_downloader.go | 6 +- test/config/fep-config.toml.template | 5 -- 12 files changed, 255 insertions(+), 24 deletions(-) create mode 100644 claimsync/claim_data_test.go diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 76e4f216e..8ed59e90a 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -277,9 +277,14 @@ func (a *AggSender) Start(ctx context.Context) { a.log.Info("AggSender started") metrics.Register() a.status.Start(time.Now().UTC()) - + a.status.SetStatus(types.StatusCheckingDBCompatibility, a.log) a.checkDBCompatibility(ctx) + a.status.SetStatus(types.StatusCheckingInitialStage, a.log) a.certStatusChecker.CheckInitialStatus(ctx, a.cfg.DelayBetweenRetries.Duration, a.status) + a.status.SetStatus(types.StartingClaimSyncerStage, a.log) + a.setClaimSyncerNextRequiredBlock(ctx) + + a.status.SetStatus(types.StatusFlowCheckingInitialStage, a.log) if err := a.flow.CheckInitialStatus(ctx); err != nil { a.log.Panicf("error checking flow Initial Status: %v", err) } @@ -327,9 +332,8 @@ func (a *AggSender) sendCertificates(ctx context.Context, returnAfterNIterations a.log.Debugf("AggSender: OnIdle") a.certificateSendTrigger.OnIdle() } - a.setClaimSyncerNextRequiredBlock(ctx) - a.status.Status = types.StatusCertificateStage + a.status.SetStatus(types.StatusCertificateStage, a.log) iteration := 0 for { select { diff --git a/aggsender/query/bridge_query.go b/aggsender/query/bridge_query.go index 142743783..5da9d1f22 100644 --- a/aggsender/query/bridge_query.go +++ b/aggsender/query/bridge_query.go @@ -149,7 +149,9 @@ func (b *bridgeDataQuerier) WaitForSyncerToCatchUp(ctx context.Context, block ui if bridgeReady && claimReady { return nil } - + b.log.Infof("bridgeDataQuerier - waiting for syncers to catch up to block: %d "+ + "(bridgeReady: %t, claimReady: %t), retrying in %s", + block, bridgeReady, claimReady, b.delayBetweenRetries) select { case <-ctx.Done(): return ctx.Err() diff --git a/aggsender/types/status.go b/aggsender/types/status.go index 510d27afe..3e14ea2c2 100644 --- a/aggsender/types/status.go +++ b/aggsender/types/status.go @@ -4,14 +4,18 @@ import ( "time" "github.com/agglayer/aggkit" + aggkitcommon "github.com/agglayer/aggkit/common" ) type AggsenderStatusType string const ( - StatusNone AggsenderStatusType = "none" - StatusCheckingInitialStage AggsenderStatusType = "checking_initial_stage" - StatusCertificateStage AggsenderStatusType = "certificate_stage" + StatusNone AggsenderStatusType = "none" + StatusCheckingDBCompatibility AggsenderStatusType = "checking_db_compatibility" + StatusCheckingInitialStage AggsenderStatusType = "checking_initial_stage" + StartingClaimSyncerStage AggsenderStatusType = "starting_claim_syncer_stage" + StatusFlowCheckingInitialStage AggsenderStatusType = "checking_flow_initial_stage" + StatusCertificateStage AggsenderStatusType = "certificate_stage" ) type AggsenderStatus struct { @@ -34,6 +38,17 @@ func (a *AggsenderStatus) Start(startTime time.Time) { a.StartTime = startTime } +func (a *AggsenderStatus) SetStatus(status AggsenderStatusType, logger aggkitcommon.Logger) { + a.Status = status + if logger != nil { + logger.Infof("Aggsender status changed to: %s", status) + } +} + +func (a *AggsenderStatus) GetStatus() AggsenderStatusType { + return a.Status +} + func (a *AggsenderStatus) SetLastError(err error) { if err == nil { a.LastError = "" diff --git a/claimsync/claim_data_test.go b/claimsync/claim_data_test.go new file mode 100644 index 000000000..c87afc9d7 --- /dev/null +++ b/claimsync/claim_data_test.go @@ -0,0 +1,78 @@ +package claimsync + +import ( + "math/big" + "testing" + + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +// --- Constants --- + +func TestClaimTypeConstants(t *testing.T) { + t.Parallel() + require.Equal(t, ClaimType("ClaimEvent"), ClaimEvent) + require.Equal(t, ClaimType("DetailedClaimEvent"), DetailedClaimEvent) + require.NotEqual(t, ClaimEvent, DetailedClaimEvent) +} + +func TestClaimTypeConstants_MatchUnderlyingPackage(t *testing.T) { + t.Parallel() + require.Equal(t, claimsynctypes.ClaimEvent, ClaimEvent) + require.Equal(t, claimsynctypes.DetailedClaimEvent, DetailedClaimEvent) +} + +// --- Type aliases --- +// These tests verify that the aliases are truly interchangeable with their +// underlying types by passing claimsynctypes values to functions that accept +// the alias types — a compile error here would mean the alias is broken. + +func requireClaim(t *testing.T, c Claim, blockNum uint64, claimType ClaimType) { + t.Helper() + require.Equal(t, blockNum, c.BlockNum) + require.Equal(t, claimType, c.Type) +} + +func requireUnsetClaim(t *testing.T, u UnsetClaim, blockNum uint64) { + t.Helper() + require.Equal(t, blockNum, u.BlockNum) +} + +func requireSetClaim(t *testing.T, s SetClaim, blockNum uint64) { + t.Helper() + require.Equal(t, blockNum, s.BlockNum) +} + +func TestClaimAlias_AssignableFromUnderlying(t *testing.T) { + t.Parallel() + c := claimsynctypes.Claim{ + BlockNum: 1, + GlobalIndex: big.NewInt(42), + TxHash: common.HexToHash("0xdeadbeef"), + Type: claimsynctypes.ClaimEvent, + } + // passing claimsynctypes.Claim where Claim is expected proves alias identity + requireClaim(t, c, 1, ClaimEvent) +} + +func TestUnsetClaimAlias_AssignableFromUnderlying(t *testing.T) { + t.Parallel() + u := claimsynctypes.UnsetClaim{ + BlockNum: 2, + GlobalIndex: big.NewInt(10), + TxHash: common.HexToHash("0xabc"), + } + requireUnsetClaim(t, u, 2) +} + +func TestSetClaimAlias_AssignableFromUnderlying(t *testing.T) { + t.Parallel() + s := claimsynctypes.SetClaim{ + BlockNum: 3, + GlobalIndex: big.NewInt(20), + TxHash: common.HexToHash("0x123"), + } + requireSetClaim(t, s, 3) +} diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index 0d2530714..98ca2b2a3 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -220,6 +220,10 @@ func (c *ClaimSync) GetLastProcessedBlock(ctx context.Context) (uint64, bool, er return c.reader.GetLastProcessedBlock(ctx, nil) } +func (c *ClaimSync) GetFirstProcessedBlock(ctx context.Context) (uint64, bool, error) { + return c.reader.GetFirstProcessedBlock(ctx, nil) +} + func (c *ClaimSync) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) { return c.reader.GetClaims(ctx, nil, fromBlock, toBlock) } diff --git a/claimsync/claimsync_rpc.go b/claimsync/claimsync_rpc.go index 72568d3d9..4d5e8ac73 100644 --- a/claimsync/claimsync_rpc.go +++ b/claimsync/claimsync_rpc.go @@ -14,6 +14,7 @@ import ( // ClaimSyncer is the interface required by ClaimSyncRPC. type ClaimSyncer interface { GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) + GetFirstProcessedBlock(ctx context.Context) (uint64, bool, error) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) SetNextRequiredBlock(ctx context.Context, blockNum uint64) error @@ -37,17 +38,25 @@ func NewClaimSyncRPC(logger aggkitcommon.Logger, claimSync ClaimSyncer) *ClaimSy // curl -X POST http://localhost:5576/ -H "Content-Type: application/json" \ // -d '{"method":"l2claimsync_status", "params":[], "id":1}' func (r *ClaimSyncRPC) Status() (interface{}, jRPC.Error) { - lastBlock, _, err := r.claimSync.GetLastProcessedBlock(context.Background()) + lastBlock, foundLast, err := r.claimSync.GetLastProcessedBlock(context.Background()) if err != nil { return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, "ClaimSyncRPC.Status: getting last processed block: %v", err) } + firstBlock, foundFirst, err := r.claimSync.GetFirstProcessedBlock(context.Background()) + if err != nil { + return nil, jRPC.NewRPCError(jRPC.DefaultErrorCode, + "ClaimSyncRPC.Status: getting first processed block: %v", err) + } info := struct { - Status string `json:"status"` - LastProcessedBlock uint64 `json:"lastProcessedBlock"` - }{ - Status: "running", - LastProcessedBlock: lastBlock, + FirstProcessedBlock *uint64 `json:"firstProcessedBlock,omitempty"` + LastProcessedBlock *uint64 `json:"lastProcessedBlock"` + }{} + if foundFirst { + info.FirstProcessedBlock = &firstBlock + } + if foundLast { + info.LastProcessedBlock = &lastBlock } return info, nil } diff --git a/claimsync/mocks/mock_claim_syncer.go b/claimsync/mocks/mock_claim_syncer.go index 678f90819..726f8dbb3 100644 --- a/claimsync/mocks/mock_claim_syncer.go +++ b/claimsync/mocks/mock_claim_syncer.go @@ -144,6 +144,69 @@ func (_c *ClaimSyncer_GetClaimsByGlobalIndex_Call) RunAndReturn(run func(context return _c } +// GetFirstProcessedBlock provides a mock function with given fields: ctx +func (_m *ClaimSyncer) GetFirstProcessedBlock(ctx context.Context) (uint64, bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstProcessedBlock") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) bool); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimSyncer_GetFirstProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstProcessedBlock' +type ClaimSyncer_GetFirstProcessedBlock_Call struct { + *mock.Call +} + +// GetFirstProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *ClaimSyncer_Expecter) GetFirstProcessedBlock(ctx interface{}) *ClaimSyncer_GetFirstProcessedBlock_Call { + return &ClaimSyncer_GetFirstProcessedBlock_Call{Call: _e.mock.On("GetFirstProcessedBlock", ctx)} +} + +func (_c *ClaimSyncer_GetFirstProcessedBlock_Call) Run(run func(ctx context.Context)) *ClaimSyncer_GetFirstProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ClaimSyncer_GetFirstProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ClaimSyncer_GetFirstProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimSyncer_GetFirstProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, bool, error)) *ClaimSyncer_GetFirstProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + // GetLastProcessedBlock provides a mock function with given fields: ctx func (_m *ClaimSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) { ret := _m.Called(ctx) diff --git a/claimsync/types/claim_reader.go b/claimsync/types/claim_reader.go index 9a949a1ce..5dea927ac 100644 --- a/claimsync/types/claim_reader.go +++ b/claimsync/types/claim_reader.go @@ -10,6 +10,7 @@ import ( // ClaimsReader provides read-only access type ClaimsReader interface { + GetFirstProcessedBlock(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) GetLastProcessedBlock(ctx context.Context, tx dbtypes.Querier) (uint64, bool, error) GetBoundaryBlockForClaimType(ctx context.Context, tx dbtypes.Querier, claimType ClaimType) (uint64, error) GetClaims(ctx context.Context, tx dbtypes.Querier, fromBlock, toBlock uint64) ([]Claim, error) diff --git a/claimsync/types/mocks/mock_claims_reader.go b/claimsync/types/mocks/mock_claims_reader.go index d5ff81c11..96c0e499b 100644 --- a/claimsync/types/mocks/mock_claims_reader.go +++ b/claimsync/types/mocks/mock_claims_reader.go @@ -336,6 +336,70 @@ func (_c *ClaimsReader_GetClaimsPaged_Call) RunAndReturn(run func(context.Contex return _c } +// GetFirstProcessedBlock provides a mock function with given fields: ctx, tx +func (_m *ClaimsReader) GetFirstProcessedBlock(ctx context.Context, tx types.Querier) (uint64, bool, error) { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstProcessedBlock") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) (uint64, bool, error)); ok { + return rf(ctx, tx) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Querier) uint64); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Querier) bool); ok { + r1 = rf(ctx, tx) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(context.Context, types.Querier) error); ok { + r2 = rf(ctx, tx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ClaimsReader_GetFirstProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstProcessedBlock' +type ClaimsReader_GetFirstProcessedBlock_Call struct { + *mock.Call +} + +// GetFirstProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Querier +func (_e *ClaimsReader_Expecter) GetFirstProcessedBlock(ctx interface{}, tx interface{}) *ClaimsReader_GetFirstProcessedBlock_Call { + return &ClaimsReader_GetFirstProcessedBlock_Call{Call: _e.mock.On("GetFirstProcessedBlock", ctx, tx)} +} + +func (_c *ClaimsReader_GetFirstProcessedBlock_Call) Run(run func(ctx context.Context, tx types.Querier)) *ClaimsReader_GetFirstProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Querier)) + }) + return _c +} + +func (_c *ClaimsReader_GetFirstProcessedBlock_Call) Return(_a0 uint64, _a1 bool, _a2 error) *ClaimsReader_GetFirstProcessedBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ClaimsReader_GetFirstProcessedBlock_Call) RunAndReturn(run func(context.Context, types.Querier) (uint64, bool, error)) *ClaimsReader_GetFirstProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + // GetLastProcessedBlock provides a mock function with given fields: ctx, tx func (_m *ClaimsReader) GetLastProcessedBlock(ctx context.Context, tx types.Querier) (uint64, bool, error) { ret := _m.Called(ctx, tx) diff --git a/scripts/local_config_fep b/scripts/local_config_fep index c0e4cc88a..e128133d7 100755 --- a/scripts/local_config_fep +++ b/scripts/local_config_fep @@ -2,7 +2,7 @@ source $(dirname $0)/local_config_helper export KURTOSIS_ENCLAVE=op -export AGGKIT_CONFIG_ARTIFACT_NAME=aggkit-config-artifact +export AGGKIT_CONFIG_ARTIFACT_NAME=${AGGKIT_CONFIG_ARTIFACT_NAME:=aggkit-config-001} ############################################################################### function export_values_of_aggkit_config() { local AGGKIT_CONFIG_FILE="$1" diff --git a/sync/mock_downloader.go b/sync/mock_downloader.go index b91dd85a4..9baa4f242 100644 --- a/sync/mock_downloader.go +++ b/sync/mock_downloader.go @@ -43,11 +43,7 @@ func (_e *DownloaderMock_Expecter) Download(ctx interface{}, fromBlock interface func (_c *DownloaderMock_Download_Call) Run(run func(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock, lastBlockNum *uint64, includeEmptyFirstBlock bool)) *DownloaderMock_Download_Call { _c.Call.Run(func(args mock.Arguments) { - var lastBlockNum *uint64 - if args[3] != nil { - lastBlockNum = args[3].(*uint64) - } - run(args[0].(context.Context), args[1].(uint64), args[2].(chan EVMBlock), lastBlockNum, args[4].(bool)) + run(args[0].(context.Context), args[1].(uint64), args[2].(chan EVMBlock), args[3].(*uint64), args[4].(bool)) }) return _c } diff --git a/test/config/fep-config.toml.template b/test/config/fep-config.toml.template index 9ca6628ee..20eb821e2 100644 --- a/test/config/fep-config.toml.template +++ b/test/config/fep-config.toml.template @@ -319,11 +319,6 @@ PrivateKeys = [{Path = "{{.zkevm_aggoracle_privatekey_path}}", Password = "{{.zk # ------------------------------------------------------------------------------ # FinalizedStatusL1NumberOfBlocks = 10 -[AggOracle.EVMSender.EthTxManager.Etherman] -# ------------------------------------------------------------------------------ -# Needs to be set to be the sovereign L2 chain id -# ------------------------------------------------------------------------------ -L1ChainID = "{{.l2_chain_id}}" # ============================================================================== # ____ ____ ___ ____ ____ _____ _ ____ ______ ___ _ ____ From f883259d3e55b9a58ac34721de0037f75073b017 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 19 Mar 2026 15:48:32 +0100 Subject: [PATCH 20/28] fix: start claimsync --- aggsender/aggsender.go | 1 - claimsync/claimsync.go | 14 -------------- cmd/run.go | 21 +++++++-------------- 3 files changed, 7 insertions(+), 29 deletions(-) diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 8ed59e90a..1785b5dd1 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -283,7 +283,6 @@ func (a *AggSender) Start(ctx context.Context) { a.certStatusChecker.CheckInitialStatus(ctx, a.cfg.DelayBetweenRetries.Duration, a.status) a.status.SetStatus(types.StartingClaimSyncerStage, a.log) a.setClaimSyncerNextRequiredBlock(ctx) - a.status.SetStatus(types.StatusFlowCheckingInitialStage, a.log) if err := a.flow.CheckInitialStatus(ctx); err != nil { a.log.Panicf("error checking flow Initial Status: %v", err) diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index 98ca2b2a3..9fe05d413 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -102,20 +102,6 @@ func NewClaimSync( if err != nil { return nil, fmt.Errorf("claimsync: failed to create EVMDownloader: %w", err) } - // TODO: Remove - // lastBlock, _, err := proc.GetLastProcessedBlock(ctx) - // if err != nil { - // return nil, fmt.Errorf("claimsync: get last processed block: %w", err) - // } - // if lastBlock < cfg.InitialBlockNum { - // header, err := ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(cfg.InitialBlockNum)) - // if err != nil { - // return nil, fmt.Errorf("claimsync: get initial block %d: %w", cfg.InitialBlockNum, err) - // } - // if err := proc.ProcessBlock(ctx, sync.Block{Num: cfg.InitialBlockNum, Hash: header.Hash}); err != nil { - // return nil, fmt.Errorf("claimsync: process initial block %d: %w", cfg.InitialBlockNum, err) - // } - // } compatibilityChecker := compatibility.NewCompatibilityCheck( cfg.RequireStorageContentCompatibility, diff --git a/cmd/run.go b/cmd/run.go index 86061cbba..35b9ae26b 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -841,7 +841,7 @@ func runClaimSyncL1IfNeeded( log.Fatalf("invalid BridgeL1Sync config: %v", err) } - autoStart := cfg.AutoStart.Resolve(isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L1BRIDGESYNC}, components)) + cfg.AutoStart.Resolve(isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L1BRIDGESYNC}, components)) res, err := claimsync.NewStandaloneClaimSync( ctx, @@ -854,12 +854,8 @@ func runClaimSyncL1IfNeeded( if err != nil { log.Fatalf("error creating ClaimSyncL1: %s", err) } - if autoStart { - log.Infof("Starting ClaimSyncL1 (autoStart=true)") - go res.Start(ctx) - } else { - log.Infof("ClaimSyncL1 created (autoStart=false, on-demand)") - } + log.Infof("Starting ClaimSyncL1 (autoStart=%t)", *cfg.AutoStart.Resolved) + go res.Start(ctx) return res } @@ -934,7 +930,7 @@ func runClaimSyncL2IfNeeded( return nil } - autoStart := cfg.AutoStart.Resolve(isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L2BRIDGESYNC}, components)) + cfg.AutoStart.Resolve(isNeeded([]string{aggkitcommon.BRIDGE, aggkitcommon.L2BRIDGESYNC}, components)) res, err := claimsync.NewStandaloneClaimSync( ctx, @@ -947,12 +943,9 @@ func runClaimSyncL2IfNeeded( if err != nil { log.Fatalf("error creating ClaimSyncL2: %s", err) } - if autoStart { - log.Infof("Starting ClaimSyncL2 (autoStart=true)") - go res.Start(ctx) - } else { - log.Infof("ClaimSyncL2 created (autoStart=false, on-demand)") - } + + log.Infof("Starting ClaimSyncL2 (autoStart=%t)", *cfg.AutoStart.Resolved) + go res.Start(ctx) return res } From 71d18bdbcc92b0fe6bc75395eb4da7c9435fad9b Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 10:16:04 +0100 Subject: [PATCH 21/28] feat: refactor claimsync initial block setup via InitialBlockClaimSyncerSetter - Remove GetNextBlockNumber from AggsenderBuilderFlow and AggsenderFlowBaser interfaces and their implementations - Introduce InitialBlockClaimSyncerSetter interface and implementation to encapsulate the logic of setting the initial claim syncer block - AggsenderValidator uses the new interface with a RetryHandler for retry/sleep management - Rename claimsync/claim_data.go to claimsync/claimdata.go Co-Authored-By: Claude Sonnet 4.6 --- aggsender/aggsender.go | 68 ++++++-------- aggsender/aggsender_validator.go | 34 +++++-- .../flows/builder_flow_aggchain_prover.go | 5 -- aggsender/flows/builder_flow_pp.go | 5 -- aggsender/flows/flow_base.go | 11 --- .../mocks/mock_aggsender_builder_flow.go | 55 ------------ aggsender/mocks/mock_aggsender_flow_baser.go | 55 ------------ .../mock_initial_block_claim_syncer_setter.go | 88 +++++++++++++++++++ .../initial_block_to_claimsync_setter.go | 85 ++++++++++++++++++ aggsender/types/interfaces.go | 13 ++- claimsync/claim_data_test.go | 78 ---------------- claimsync/claimcalldata_test.go | 67 ++++++++++++++ claimsync/{claim_data.go => claimdata.go} | 0 claimsync/config.go | 1 + cmd/run.go | 24 +++-- grpc/server.go | 2 + 16 files changed, 319 insertions(+), 272 deletions(-) create mode 100644 aggsender/mocks/mock_initial_block_claim_syncer_setter.go create mode 100644 aggsender/query/initial_block_to_claimsync_setter.go delete mode 100644 claimsync/claim_data_test.go rename claimsync/{claim_data.go => claimdata.go} (100%) diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 1785b5dd1..d497d45ca 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -38,14 +38,15 @@ type RateLimiter interface { type AggSender struct { log aggkitcommon.Logger - storage db.AggSenderStorage - aggLayerClient agglayer.AgglayerClientInterface - compatibilityStoragedChecker compatibility.CompatibilityChecker - certStatusChecker types.CertificateStatusChecker - certQuerier types.CertificateQuerier - rollupDataQuerier types.RollupDataQuerier - validatorPoller types.ValidatorPoller - localValidator types.CertificateValidateAndSigner + storage db.AggSenderStorage + aggLayerClient agglayer.AgglayerClientInterface + compatibilityStoragedChecker compatibility.CompatibilityChecker + certStatusChecker types.CertificateStatusChecker + certQuerier types.CertificateQuerier + rollupDataQuerier types.RollupDataQuerier + initialBlockClaimSyncerSetter types.InitialBlockClaimSyncerSetter + validatorPoller types.ValidatorPoller + localValidator types.CertificateValidateAndSigner l1Client aggkittypes.BaseEthereumClienter l1InfoTreeSyncer types.L1InfoTreeSyncer @@ -154,6 +155,13 @@ func newAggsender( aggLayerClient, initialLER, ) + l2OriginNetwork := l2Syncer.OriginNetwork() + initialBlockClaimSyncerSetter := query.NewSetInitialBlockToClaimSyncer( + certQuerier, + aggLayerClient, + l2OriginNetwork, + logger, + ) flowManager, err := flows.NewBuilderFlow( ctx, @@ -176,8 +184,6 @@ func newAggsender( logger.Infof("Aggsender Config: %s.", cfg.String()) - l2OriginNetwork := l2Syncer.OriginNetwork() - compatibilityStoragedChecker := compatibility.NewCompatibilityCheck( cfg.RequireStorageContentCompatibility, func(ctx context.Context) (db.RuntimeData, error) { @@ -235,10 +241,11 @@ func newAggsender( ), certStatusChecker: statuschecker.NewCertStatusChecker( logger, storage, aggLayerClient, certQuerier, l2OriginNetwork), - l1Client: l1Client, - l1InfoTreeSyncer: l1InfoTreeSyncer, - l2ClaimSyncer: l2ClaimSyncer, - certificateSendTrigger: certificateSendTrigger, + l1Client: l1Client, + l1InfoTreeSyncer: l1InfoTreeSyncer, + l2ClaimSyncer: l2ClaimSyncer, + certificateSendTrigger: certificateSendTrigger, + initialBlockClaimSyncerSetter: initialBlockClaimSyncerSetter, }, nil } @@ -282,7 +289,10 @@ func (a *AggSender) Start(ctx context.Context) { a.status.SetStatus(types.StatusCheckingInitialStage, a.log) a.certStatusChecker.CheckInitialStatus(ctx, a.cfg.DelayBetweenRetries.Duration, a.status) a.status.SetStatus(types.StartingClaimSyncerStage, a.log) - a.setClaimSyncerNextRequiredBlock(ctx) + err := a.initialBlockClaimSyncerSetter.SetClaimSyncerNextRequiredBlock(ctx, a.l2ClaimSyncer, nil) + if err != nil { + a.log.Panicf("error setting next required block for claim syncer: %v", err) + } a.status.SetStatus(types.StatusFlowCheckingInitialStage, a.log) if err := a.flow.CheckInitialStatus(ctx); err != nil { a.log.Panicf("error checking flow Initial Status: %v", err) @@ -399,34 +409,6 @@ func (a *AggSender) sendCertificates(ctx context.Context, returnAfterNIterations } } -func (a *AggSender) setClaimSyncerNextRequiredBlock(ctx context.Context) { - if a.l2ClaimSyncer == nil { - a.log.Debugf("l2 claim syncer is nil, skipping setClaimSyncerNextRequiredBlock") - return - } - for { - select { - case <-ctx.Done(): - return - default: - } - nextBlock, err := a.flow.GetNextBlockNumber() - if err != nil { - a.log.Errorf("error getting next block number for claim syncer: %v", err) - time.Sleep(a.cfg.DelayBetweenRetries.Duration) - continue - } - a.log.Infof("Setting starting Claim L2 Syncer block to %d", nextBlock) - if err := a.l2ClaimSyncer.SetNextRequiredBlock(ctx, nextBlock); err != nil { - a.log.Errorf("error setting next required block for claim syncer: %v", err) - time.Sleep(a.cfg.DelayBetweenRetries.Duration) - continue - } - a.log.Infof("Set next required block for claim syncer to %d", nextBlock) - break - } -} - func (a *AggSender) sendCertificateWithRetries(ctx context.Context) (*agglayertypes.Certificate, error) { retryHandler, err := a.cfg.RetriesToBuildAndSendCertificate.NewRetryHandler() if err != nil { diff --git a/aggsender/aggsender_validator.go b/aggsender/aggsender_validator.go index 8203fb3db..0ee429ba8 100644 --- a/aggsender/aggsender_validator.go +++ b/aggsender/aggsender_validator.go @@ -9,7 +9,9 @@ import ( "github.com/agglayer/aggkit/aggsender/types" "github.com/agglayer/aggkit/aggsender/validator" v1 "github.com/agglayer/aggkit/aggsender/validator/proto/v1" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" aggkitcommon "github.com/agglayer/aggkit/common" + configtypes "github.com/agglayer/aggkit/config/types" "github.com/agglayer/aggkit/grpc" signertypes "github.com/agglayer/go_signer/signer/types" ethcommon "github.com/ethereum/go-ethereum/common" @@ -21,22 +23,26 @@ var ( ) type AggsenderValidator struct { - log aggkitcommon.Logger - validator types.CertificateValidator - validatorService *grpc.Server - cfg validator.Config + log aggkitcommon.Logger + validator types.CertificateValidator + validatorService *grpc.Server + initialBlockClaimSyncerSetter types.InitialBlockClaimSyncerSetter + l2ClaimSyncer claimsynctypes.ClaimSyncer + cfg validator.Config } func NewAggsenderValidator(ctx context.Context, logger aggkitcommon.Logger, cfg validator.Config, + l2ClaimSyncer claimsynctypes.ClaimSyncer, flow types.AggsenderVerifierFlow, l1InfoTreeDataQuerier validator.L1InfoTreeRootByLeafQuerier, aggLayerClient agglayer.AggLayerClientCertificateIDQuerier, certQuerier types.CertificateQuerier, aggchainFEPQuerier types.AggchainFEPRollupQuerier, initialLER ethcommon.Hash, - signer signertypes.Signer) (*AggsenderValidator, error) { + signer signertypes.Signer, + initialBlockClaimSyncerSetter types.InitialBlockClaimSyncerSetter) (*AggsenderValidator, error) { validatorCert := validator.NewAggsenderValidator( logger, flow, l1InfoTreeDataQuerier, certQuerier, initialLER) grpcServer, err := grpc.NewServer(cfg.ServerConfig) @@ -51,14 +57,24 @@ func NewAggsenderValidator(ctx context.Context, signer, )) return &AggsenderValidator{ - log: logger, - validator: validatorCert, - validatorService: grpcServer, - cfg: cfg, + log: logger, + validator: validatorCert, + validatorService: grpcServer, + cfg: cfg, + l2ClaimSyncer: l2ClaimSyncer, + initialBlockClaimSyncerSetter: initialBlockClaimSyncerSetter, }, nil } func (a *AggsenderValidator) Start(ctx context.Context) { metrics.Register() + // This is hardcoded because validator to just do 1 retry if fails it and stop + rh := aggkitcommon.NewRetryHandler([]configtypes.Duration{{Duration: a.cfg.DelayBetweenRetries.Duration}}, + 1) + err := a.initialBlockClaimSyncerSetter.SetClaimSyncerNextRequiredBlock(ctx, a.l2ClaimSyncer, rh) + if err != nil { + a.log.Fatalf("failed to set claim syncer next required block: %v", err) + return + } a.validatorService.Start(ctx) } diff --git a/aggsender/flows/builder_flow_aggchain_prover.go b/aggsender/flows/builder_flow_aggchain_prover.go index 373093a45..50b0662bb 100644 --- a/aggsender/flows/builder_flow_aggchain_prover.go +++ b/aggsender/flows/builder_flow_aggchain_prover.go @@ -392,11 +392,6 @@ func (a *AggchainProverBuilderFlow) GeneratePreBuildParams(ctx context.Context, return a.baseFlow.GeneratePreBuildParams(ctx, certType) } -// GetNextBlockNumber returns the first block number of the next certificate to generate -func (a *AggchainProverBuilderFlow) GetNextBlockNumber() (uint64, error) { - return a.baseFlow.GetNextBlockNumber() -} - // Signer returns the signer used to sign the certificate func (a *AggchainProverBuilderFlow) Signer() signertypes.Signer { return a.certificateSigner diff --git a/aggsender/flows/builder_flow_pp.go b/aggsender/flows/builder_flow_pp.go index 94b5b5113..8159a3351 100644 --- a/aggsender/flows/builder_flow_pp.go +++ b/aggsender/flows/builder_flow_pp.go @@ -153,11 +153,6 @@ func (p *PPBuilderFlow) GeneratePreBuildParams(ctx context.Context, return p.baseFlow.GeneratePreBuildParams(ctx, certType) } -// GetNextBlockNumber returns the first block number of the next certificate to generate -func (p *PPBuilderFlow) GetNextBlockNumber() (uint64, error) { - return p.baseFlow.GetNextBlockNumber() -} - // Signer returns the signer used to sign the certificate func (p *PPBuilderFlow) Signer() signertypes.Signer { return p.certificateSigner diff --git a/aggsender/flows/flow_base.go b/aggsender/flows/flow_base.go index 478f8de76..40b987579 100644 --- a/aggsender/flows/flow_base.go +++ b/aggsender/flows/flow_base.go @@ -110,17 +110,6 @@ func (f *baseFlow) StartL2Block() uint64 { return f.cfg.StartL2Block } -// GetNextBlockNumber returns the first block number of the next certificate to generate. -// It reads the last sent certificate from storage to determine the starting block. -func (f *baseFlow) GetNextBlockNumber() (uint64, error) { - lastSentCertificate, err := f.storage.GetLastSentCertificateHeader() - if err != nil { - return 0, fmt.Errorf("error getting last sent certificate: %w", err) - } - previousToBlock, _ := f.getLastSentBlockAndRetryCount(lastSentCertificate) - return previousToBlock + 1, nil -} - // NextCertificateBlockRange returns the block range and retryCount for the next certificate func (f *baseFlow) NextCertificateBlockRange(ctx context.Context, lastSentCertificate *types.CertificateHeader) (aggkitcommon.BlockRange, int, error) { diff --git a/aggsender/mocks/mock_aggsender_builder_flow.go b/aggsender/mocks/mock_aggsender_builder_flow.go index 81b4670ed..c88b0b4e7 100644 --- a/aggsender/mocks/mock_aggsender_builder_flow.go +++ b/aggsender/mocks/mock_aggsender_builder_flow.go @@ -308,61 +308,6 @@ func (_c *AggsenderBuilderFlow_GetCertificateBuildParams_Call) RunAndReturn(run return _c } -// GetNextBlockNumber provides a mock function with no fields -func (_m *AggsenderBuilderFlow) GetNextBlockNumber() (uint64, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetNextBlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderBuilderFlow_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' -type AggsenderBuilderFlow_GetNextBlockNumber_Call struct { - *mock.Call -} - -// GetNextBlockNumber is a helper method to define mock.On call -func (_e *AggsenderBuilderFlow_Expecter) GetNextBlockNumber() *AggsenderBuilderFlow_GetNextBlockNumber_Call { - return &AggsenderBuilderFlow_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} -} - -func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Run(run func()) *AggsenderBuilderFlow_GetNextBlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderBuilderFlow_GetNextBlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderBuilderFlow_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderBuilderFlow_GetNextBlockNumber_Call { - _c.Call.Return(run) - return _c -} - // Signer provides a mock function with no fields func (_m *AggsenderBuilderFlow) Signer() signertypes.Signer { ret := _m.Called() diff --git a/aggsender/mocks/mock_aggsender_flow_baser.go b/aggsender/mocks/mock_aggsender_flow_baser.go index 0b5aa5437..b37170477 100644 --- a/aggsender/mocks/mock_aggsender_flow_baser.go +++ b/aggsender/mocks/mock_aggsender_flow_baser.go @@ -383,61 +383,6 @@ func (_c *AggsenderFlowBaser_GetNewLocalExitRoot_Call) RunAndReturn(run func(con return _c } -// GetNextBlockNumber provides a mock function with no fields -func (_m *AggsenderFlowBaser) GetNextBlockNumber() (uint64, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetNextBlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderFlowBaser_GetNextBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextBlockNumber' -type AggsenderFlowBaser_GetNextBlockNumber_Call struct { - *mock.Call -} - -// GetNextBlockNumber is a helper method to define mock.On call -func (_e *AggsenderFlowBaser_Expecter) GetNextBlockNumber() *AggsenderFlowBaser_GetNextBlockNumber_Call { - return &AggsenderFlowBaser_GetNextBlockNumber_Call{Call: _e.mock.On("GetNextBlockNumber")} -} - -func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Run(run func()) *AggsenderFlowBaser_GetNextBlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) Return(_a0 uint64, _a1 error) *AggsenderFlowBaser_GetNextBlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderFlowBaser_GetNextBlockNumber_Call) RunAndReturn(run func() (uint64, error)) *AggsenderFlowBaser_GetNextBlockNumber_Call { - _c.Call.Return(run) - return _c -} - // LimitCertSize provides a mock function with given fields: certParams func (_m *AggsenderFlowBaser) LimitCertSize(certParams *types.CertificateBuildParams) (*types.CertificateBuildParams, error) { ret := _m.Called(certParams) diff --git a/aggsender/mocks/mock_initial_block_claim_syncer_setter.go b/aggsender/mocks/mock_initial_block_claim_syncer_setter.go new file mode 100644 index 000000000..e5d2db655 --- /dev/null +++ b/aggsender/mocks/mock_initial_block_claim_syncer_setter.go @@ -0,0 +1,88 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + commontypes "github.com/agglayer/aggkit/common/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/claimsync/types" +) + +// InitialBlockClaimSyncerSetter is an autogenerated mock type for the InitialBlockClaimSyncerSetter type +type InitialBlockClaimSyncerSetter struct { + mock.Mock +} + +type InitialBlockClaimSyncerSetter_Expecter struct { + mock *mock.Mock +} + +func (_m *InitialBlockClaimSyncerSetter) EXPECT() *InitialBlockClaimSyncerSetter_Expecter { + return &InitialBlockClaimSyncerSetter_Expecter{mock: &_m.Mock} +} + +// SetClaimSyncerNextRequiredBlock provides a mock function with given fields: ctx, l2ClaimSyncer, retryHandler +func (_m *InitialBlockClaimSyncerSetter) SetClaimSyncerNextRequiredBlock(ctx context.Context, l2ClaimSyncer types.ClaimSyncer, retryHandler commontypes.RetryHandler) error { + ret := _m.Called(ctx, l2ClaimSyncer, retryHandler) + + if len(ret) == 0 { + panic("no return value specified for SetClaimSyncerNextRequiredBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.ClaimSyncer, commontypes.RetryHandler) error); ok { + r0 = rf(ctx, l2ClaimSyncer, retryHandler) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetClaimSyncerNextRequiredBlock' +type InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call struct { + *mock.Call +} + +// SetClaimSyncerNextRequiredBlock is a helper method to define mock.On call +// - ctx context.Context +// - l2ClaimSyncer types.ClaimSyncer +// - retryHandler commontypes.RetryHandler +func (_e *InitialBlockClaimSyncerSetter_Expecter) SetClaimSyncerNextRequiredBlock(ctx interface{}, l2ClaimSyncer interface{}, retryHandler interface{}) *InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call { + return &InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call{Call: _e.mock.On("SetClaimSyncerNextRequiredBlock", ctx, l2ClaimSyncer, retryHandler)} +} + +func (_c *InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call) Run(run func(ctx context.Context, l2ClaimSyncer types.ClaimSyncer, retryHandler commontypes.RetryHandler)) *InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.ClaimSyncer), args[2].(commontypes.RetryHandler)) + }) + return _c +} + +func (_c *InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call) Return(_a0 error) *InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call) RunAndReturn(run func(context.Context, types.ClaimSyncer, commontypes.RetryHandler) error) *InitialBlockClaimSyncerSetter_SetClaimSyncerNextRequiredBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewInitialBlockClaimSyncerSetter creates a new instance of InitialBlockClaimSyncerSetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewInitialBlockClaimSyncerSetter(t interface { + mock.TestingT + Cleanup(func()) +}) *InitialBlockClaimSyncerSetter { + mock := &InitialBlockClaimSyncerSetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/query/initial_block_to_claimsync_setter.go b/aggsender/query/initial_block_to_claimsync_setter.go new file mode 100644 index 000000000..88b619ab1 --- /dev/null +++ b/aggsender/query/initial_block_to_claimsync_setter.go @@ -0,0 +1,85 @@ +package query + +import ( + "context" + "fmt" + "time" + + "github.com/agglayer/aggkit/agglayer" + "github.com/agglayer/aggkit/aggsender/types" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + aggkitcommon "github.com/agglayer/aggkit/common" + commontypes "github.com/agglayer/aggkit/common/types" + configtypes "github.com/agglayer/aggkit/config/types" +) + +type SetInitialBlockToClaimSyncer struct { + certQuerier types.CertificateQuerier + agglayerClient agglayer.AgglayerClientInterface + l2OriginNetwork uint32 + logger aggkitcommon.Logger +} + +func NewSetInitialBlockToClaimSyncer( + certQuerier types.CertificateQuerier, + agglayerClient agglayer.AgglayerClientInterface, + l2OriginNetwork uint32, + logger aggkitcommon.Logger, +) *SetInitialBlockToClaimSyncer { + return &SetInitialBlockToClaimSyncer{ + certQuerier: certQuerier, + agglayerClient: agglayerClient, + l2OriginNetwork: l2OriginNetwork, + logger: logger, + } +} + +func (n *SetInitialBlockToClaimSyncer) SetClaimSyncerNextRequiredBlock( + ctx context.Context, + l2ClaimSyncer claimsynctypes.ClaimSyncer, + retryHandler commontypes.RetryHandler) error { + if l2ClaimSyncer == nil { + n.logger.Debugf("l2 claim syncer is nil, skipping setClaimSyncerNextRequiredBlock") + return nil + } + if retryHandler == nil { + retryHandler = aggkitcommon.NewRetryHandler( + []configtypes.Duration{{Duration: time.Second}}, + aggkitcommon.MaxAttemptsInfinite, + ) + } + _, err := aggkitcommon.Execute(retryHandler, + ctx, + n.logger.Infof, + "Setting next required block for claim syncer based on agglayer's latest settled certificate", + func() (bool, error) { + nextBlock, err := n.getNextBlockNumber(ctx) + if err != nil { + return true, fmt.Errorf("error getting next block number for claim syncer: %v", err) + } + if err := l2ClaimSyncer.SetNextRequiredBlock(ctx, nextBlock); err != nil { + return true, fmt.Errorf("error setting next required block for claim syncer: %v", err) + } + n.logger.Infof("Set next required block for claim syncer to %d", nextBlock) + return true, nil + }) + if err != nil { + return fmt.Errorf("error setting next required block for claim syncer: %v", err) + } + return nil +} + +// getNextBlockNumber returns the first block number of the next certificate to generate. +// It reads the last sent certificate from agglayer to determine the starting block. +func (n *SetInitialBlockToClaimSyncer) getNextBlockNumber(ctx context.Context) (uint64, error) { + certHeader, err := n.agglayerClient.GetLatestSettledCertificateHeader(ctx, n.l2OriginNetwork) + if err != nil { + return 0, fmt.Errorf("error getting latest settled certificate header from agglayer: %w", err) + } + // Even if certHeader is nil it returns the first block number + toBlock, err := n.certQuerier.GetLastSettledCertificateToBlock(ctx, certHeader) + if err != nil { + return 0, fmt.Errorf("error getting last settled certificate to block: %w", err) + } + return toBlock, nil +} diff --git a/aggsender/types/interfaces.go b/aggsender/types/interfaces.go index 317b838e8..297535034 100644 --- a/aggsender/types/interfaces.go +++ b/aggsender/types/interfaces.go @@ -11,6 +11,7 @@ import ( agglayertypes "github.com/agglayer/aggkit/agglayer/types" "github.com/agglayer/aggkit/bridgesync" claimsynctypes "github.com/agglayer/aggkit/claimsync/types" + commontypes "github.com/agglayer/aggkit/common/types" "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/l2gersync" "github.com/agglayer/aggkit/sync" @@ -41,8 +42,6 @@ type AggsenderBuilderFlow interface { UpdateAggchainData(cert *agglayertypes.Certificate, multisig *agglayertypes.Multisig) error // Signer is the signer used to sign the certificate Signer() signertypes.Signer - // GetNextBlockNumber returns the first block number of the next certificate to generate - GetNextBlockNumber() (uint64, error) } // AggsenderVerifierFlow is an interface that defines the methods to verify the certificate @@ -77,8 +76,6 @@ type AggsenderFlowBaser interface { newFromBlock, newToBlock uint64) error ConvertClaimToImportedBridgeExit(claim claimsynctypes.Claim) (*agglayertypes.ImportedBridgeExit, error) StartL2Block() uint64 - // GetNextBlockNumber returns the first block number of the next certificate to generate - GetNextBlockNumber() (uint64, error) GeneratePreBuildParams(ctx context.Context, certType CertificateType) (*CertificatePreBuildParams, error) GenerateBuildParams(ctx context.Context, @@ -413,3 +410,11 @@ type CertificateSendTrigger interface { // OnIdle Aggsender is waiting for a trigger to generate a new certificate OnIdle() } + +// InitialBlockClaimSyncerSetter is an interface that defines the method to set the initial block for the claim syncer +type InitialBlockClaimSyncerSetter interface { + SetClaimSyncerNextRequiredBlock( + ctx context.Context, + l2ClaimSyncer claimsynctypes.ClaimSyncer, + retryHandler commontypes.RetryHandler) error +} diff --git a/claimsync/claim_data_test.go b/claimsync/claim_data_test.go deleted file mode 100644 index c87afc9d7..000000000 --- a/claimsync/claim_data_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package claimsync - -import ( - "math/big" - "testing" - - claimsynctypes "github.com/agglayer/aggkit/claimsync/types" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -// --- Constants --- - -func TestClaimTypeConstants(t *testing.T) { - t.Parallel() - require.Equal(t, ClaimType("ClaimEvent"), ClaimEvent) - require.Equal(t, ClaimType("DetailedClaimEvent"), DetailedClaimEvent) - require.NotEqual(t, ClaimEvent, DetailedClaimEvent) -} - -func TestClaimTypeConstants_MatchUnderlyingPackage(t *testing.T) { - t.Parallel() - require.Equal(t, claimsynctypes.ClaimEvent, ClaimEvent) - require.Equal(t, claimsynctypes.DetailedClaimEvent, DetailedClaimEvent) -} - -// --- Type aliases --- -// These tests verify that the aliases are truly interchangeable with their -// underlying types by passing claimsynctypes values to functions that accept -// the alias types — a compile error here would mean the alias is broken. - -func requireClaim(t *testing.T, c Claim, blockNum uint64, claimType ClaimType) { - t.Helper() - require.Equal(t, blockNum, c.BlockNum) - require.Equal(t, claimType, c.Type) -} - -func requireUnsetClaim(t *testing.T, u UnsetClaim, blockNum uint64) { - t.Helper() - require.Equal(t, blockNum, u.BlockNum) -} - -func requireSetClaim(t *testing.T, s SetClaim, blockNum uint64) { - t.Helper() - require.Equal(t, blockNum, s.BlockNum) -} - -func TestClaimAlias_AssignableFromUnderlying(t *testing.T) { - t.Parallel() - c := claimsynctypes.Claim{ - BlockNum: 1, - GlobalIndex: big.NewInt(42), - TxHash: common.HexToHash("0xdeadbeef"), - Type: claimsynctypes.ClaimEvent, - } - // passing claimsynctypes.Claim where Claim is expected proves alias identity - requireClaim(t, c, 1, ClaimEvent) -} - -func TestUnsetClaimAlias_AssignableFromUnderlying(t *testing.T) { - t.Parallel() - u := claimsynctypes.UnsetClaim{ - BlockNum: 2, - GlobalIndex: big.NewInt(10), - TxHash: common.HexToHash("0xabc"), - } - requireUnsetClaim(t, u, 2) -} - -func TestSetClaimAlias_AssignableFromUnderlying(t *testing.T) { - t.Parallel() - s := claimsynctypes.SetClaim{ - BlockNum: 3, - GlobalIndex: big.NewInt(20), - TxHash: common.HexToHash("0x123"), - } - requireSetClaim(t, s, 3) -} diff --git a/claimsync/claimcalldata_test.go b/claimsync/claimcalldata_test.go index 8565eb353..b3a22aa78 100644 --- a/claimsync/claimcalldata_test.go +++ b/claimsync/claimcalldata_test.go @@ -5,6 +5,7 @@ import ( "math/big" "testing" + claimsynctypes "github.com/agglayer/aggkit/claimsync/types" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/test/contracts/claimmock" "github.com/agglayer/aggkit/test/contracts/claimmockcaller" @@ -1118,3 +1119,69 @@ func encodeClaimCalldata(claimMockABI *abi.ABI, funcName string, claim.DestinationNetwork, claim.DestinationAddress, claim.Amount, claim.Metadata) } + +func TestClaimTypeConstants(t *testing.T) { + t.Parallel() + require.Equal(t, ClaimType("ClaimEvent"), ClaimEvent) + require.Equal(t, ClaimType("DetailedClaimEvent"), DetailedClaimEvent) + require.NotEqual(t, ClaimEvent, DetailedClaimEvent) +} + +func TestClaimTypeConstants_MatchUnderlyingPackage(t *testing.T) { + t.Parallel() + require.Equal(t, claimsynctypes.ClaimEvent, ClaimEvent) + require.Equal(t, claimsynctypes.DetailedClaimEvent, DetailedClaimEvent) +} + +// --- Type aliases --- +// These tests verify that the aliases are truly interchangeable with their +// underlying types by passing claimsynctypes values to functions that accept +// the alias types — a compile error here would mean the alias is broken. + +func requireClaim(t *testing.T, c Claim, blockNum uint64, claimType ClaimType) { + t.Helper() + require.Equal(t, blockNum, c.BlockNum) + require.Equal(t, claimType, c.Type) +} + +func requireUnsetClaim(t *testing.T, u UnsetClaim, blockNum uint64) { + t.Helper() + require.Equal(t, blockNum, u.BlockNum) +} + +func requireSetClaim(t *testing.T, s SetClaim, blockNum uint64) { + t.Helper() + require.Equal(t, blockNum, s.BlockNum) +} + +func TestClaimAlias_AssignableFromUnderlying(t *testing.T) { + t.Parallel() + c := claimsynctypes.Claim{ + BlockNum: 1, + GlobalIndex: big.NewInt(42), + TxHash: common.HexToHash("0xdeadbeef"), + Type: claimsynctypes.ClaimEvent, + } + // passing claimsynctypes.Claim where Claim is expected proves alias identity + requireClaim(t, c, 1, ClaimEvent) +} + +func TestUnsetClaimAlias_AssignableFromUnderlying(t *testing.T) { + t.Parallel() + u := claimsynctypes.UnsetClaim{ + BlockNum: 2, + GlobalIndex: big.NewInt(10), + TxHash: common.HexToHash("0xabc"), + } + requireUnsetClaim(t, u, 2) +} + +func TestSetClaimAlias_AssignableFromUnderlying(t *testing.T) { + t.Parallel() + s := claimsynctypes.SetClaim{ + BlockNum: 3, + GlobalIndex: big.NewInt(20), + TxHash: common.HexToHash("0x123"), + } + requireSetClaim(t, s, 3) +} diff --git a/claimsync/claim_data.go b/claimsync/claimdata.go similarity index 100% rename from claimsync/claim_data.go rename to claimsync/claimdata.go diff --git a/claimsync/config.go b/claimsync/config.go index 9ca44e487..a9812cdcd 100644 --- a/claimsync/config.go +++ b/claimsync/config.go @@ -32,6 +32,7 @@ type ConfigStandalone struct { // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. // Any number smaller than zero will be considered as unlimited retries MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` + // WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` // RequireStorageContentCompatibility is true it's mandatory that data stored in the database diff --git a/cmd/run.go b/cmd/run.go index 35b9ae26b..8fee536ea 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -318,8 +318,8 @@ func createAggchainProofGen( func createAggSenderValidator(ctx context.Context, cfg validator.Config, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - l2Syncer *bridgesync.BridgeSync, - claimSyncer claimsynctypes.ClaimSyncer, + l2BridgeSyncer *bridgesync.BridgeSync, + l2ClaimSyncer claimsynctypes.ClaimSyncer, l1Client aggkittypes.BaseEthereumClienter, l2Client aggkittypes.BaseEthereumClienter, rollupDataQuerier *ethermanquierier.RollupDataQuerier, @@ -357,8 +357,8 @@ func createAggSenderValidator(ctx context.Context, } certQuerier := query.NewCertificateQuerier( - l2Syncer, - claimSyncer, + l2BridgeSyncer, + l2ClaimSyncer, aggchainFEPQuerier, agglayerClient, initialLER, @@ -371,8 +371,8 @@ func createAggSenderValidator(ctx context.Context, l1Client, l2Client, l1InfoTreeSync, - l2Syncer, - claimSyncer, + l2BridgeSyncer, + l2ClaimSyncer, rollupDataQuerier, committeeQuerier, initialLER, @@ -380,15 +380,25 @@ func createAggSenderValidator(ctx context.Context, if err != nil { return nil, fmt.Errorf("failed to create verifier flow: %w", err) } + l2OriginNetwork := l2BridgeSyncer.OriginNetwork() + + nextBlockQuerier := query.NewSetInitialBlockToClaimSyncer( + certQuerier, + agglayerClient, + l2OriginNetwork, + logger) return aggsender.NewAggsenderValidator( - ctx, logger, cfg, flow, + ctx, logger, cfg, + l2ClaimSyncer, + flow, flowParams.L1InfoTreeDataQuerier, agglayerClient, certQuerier, aggchainFEPQuerier, flowParams.InitialLER, flowParams.Signer, + nextBlockQuerier, ) } diff --git a/grpc/server.go b/grpc/server.go index 742043d12..487999e83 100644 --- a/grpc/server.go +++ b/grpc/server.go @@ -70,6 +70,8 @@ func (s *Server) Start(ctx context.Context) { s.stop() }() + // TODO: Set starting block for claimsyncer + if err := s.grpcServer.Serve(s.listener); err != nil { log.Errorf("failed to start gRPC server: %v", err) } From 489b7333cb46f2acce07100e71f3d90ea9e9b632 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 12:28:58 +0100 Subject: [PATCH 22/28] feat: fix ut, reduce verbosity --- aggsender/aggsender_test.go | 3 + .../initial_block_to_claimsync_setter.go | 6 +- .../initial_block_to_claimsync_setter_test.go | 105 ++++++++++++++++++ claimsync/claimsync_rpc_test.go | 11 +- multidownloader/evm_multidownloader.go | 14 +-- multidownloader/sync/evmdriver.go | 16 +-- 6 files changed, 133 insertions(+), 22 deletions(-) create mode 100644 aggsender/query/initial_block_to_claimsync_setter_test.go diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index 63432a270..236504741 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -578,6 +578,9 @@ func TestAggSenderStartFailFlowCheckInitialStatus(t *testing.T) { testData := newAggsenderTestData(t, testDataFlagMockStorage|testDataFlagMockFlow|testDataFlagMockStatusChecker) testData.sut.cfg.RequireStorageContentCompatibility = false testData.certStatusCheckerMock.EXPECT().CheckInitialStatus(mock.Anything, mock.Anything, testData.sut.status).Once() + mockInitialBlockSetter := mocks.NewInitialBlockClaimSyncerSetter(t) + mockInitialBlockSetter.EXPECT().SetClaimSyncerNextRequiredBlock(mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + testData.sut.initialBlockClaimSyncerSetter = mockInitialBlockSetter testData.flowMock.EXPECT().CheckInitialStatus(mock.Anything).Return(fmt.Errorf("error")).Once() require.Panics(t, func() { diff --git a/aggsender/query/initial_block_to_claimsync_setter.go b/aggsender/query/initial_block_to_claimsync_setter.go index 88b619ab1..c02e70160 100644 --- a/aggsender/query/initial_block_to_claimsync_setter.go +++ b/aggsender/query/initial_block_to_claimsync_setter.go @@ -55,16 +55,16 @@ func (n *SetInitialBlockToClaimSyncer) SetClaimSyncerNextRequiredBlock( func() (bool, error) { nextBlock, err := n.getNextBlockNumber(ctx) if err != nil { - return true, fmt.Errorf("error getting next block number for claim syncer: %v", err) + return true, fmt.Errorf("error getting next block number for claim syncer: %w", err) } if err := l2ClaimSyncer.SetNextRequiredBlock(ctx, nextBlock); err != nil { - return true, fmt.Errorf("error setting next required block for claim syncer: %v", err) + return true, fmt.Errorf("error setting next required block for claim syncer: %w", err) } n.logger.Infof("Set next required block for claim syncer to %d", nextBlock) return true, nil }) if err != nil { - return fmt.Errorf("error setting next required block for claim syncer: %v", err) + return fmt.Errorf("error setting next required block for claim syncer: %w", err) } return nil } diff --git a/aggsender/query/initial_block_to_claimsync_setter_test.go b/aggsender/query/initial_block_to_claimsync_setter_test.go new file mode 100644 index 000000000..948f8a38e --- /dev/null +++ b/aggsender/query/initial_block_to_claimsync_setter_test.go @@ -0,0 +1,105 @@ +package query + +import ( + "errors" + "testing" + + agglayermocks "github.com/agglayer/aggkit/agglayer/mocks" + agglayertypes "github.com/agglayer/aggkit/agglayer/types" + "github.com/agglayer/aggkit/aggsender/mocks" + claimsynctypesmocks "github.com/agglayer/aggkit/claimsync/types/mocks" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/log" + "github.com/stretchr/testify/require" +) + +func newTestSetter(t *testing.T) ( + *SetInitialBlockToClaimSyncer, + *mocks.CertificateQuerier, + *agglayermocks.AgglayerClientMock, +) { + t.Helper() + certQuerier := mocks.NewCertificateQuerier(t) + agglayerClient := agglayermocks.NewAgglayerClientMock(t) + logger := log.WithFields("module", "test") + setter := NewSetInitialBlockToClaimSyncer(certQuerier, agglayerClient, uint32(1), logger) + return setter, certQuerier, agglayerClient +} + +// noRetryHandler executes exactly once with no sleep. +func noRetryHandler() *aggkitcommon.RetryHandlerDelays { + return aggkitcommon.NewRetryHandler(nil, 0) +} + +func TestSetClaimSyncerNextRequiredBlock_NilClaimSyncer(t *testing.T) { + t.Parallel() + setter, _, _ := newTestSetter(t) + + err := setter.SetClaimSyncerNextRequiredBlock(t.Context(), nil, noRetryHandler()) + require.NoError(t, err) +} + +func TestSetClaimSyncerNextRequiredBlock_Success(t *testing.T) { + t.Parallel() + ctx := t.Context() + setter, certQuerier, agglayerClient := newTestSetter(t) + + certHeader := &agglayertypes.CertificateHeader{} + agglayerClient.EXPECT().GetLatestSettledCertificateHeader(ctx, uint32(1)).Return(certHeader, nil) + certQuerier.EXPECT().GetLastSettledCertificateToBlock(ctx, certHeader).Return(uint64(42), nil) + + claimSyncer := claimsynctypesmocks.NewClaimSyncer(t) + claimSyncer.EXPECT().SetNextRequiredBlock(ctx, uint64(42)).Return(nil) + + err := setter.SetClaimSyncerNextRequiredBlock(ctx, claimSyncer, noRetryHandler()) + require.NoError(t, err) +} + +func TestSetClaimSyncerNextRequiredBlock_GetLatestSettledCertHeaderError(t *testing.T) { + t.Parallel() + ctx := t.Context() + setter, _, agglayerClient := newTestSetter(t) + + agglayerClient.EXPECT().GetLatestSettledCertificateHeader(ctx, uint32(1)). + Return(nil, errors.New("agglayer unavailable")) + + claimSyncer := claimsynctypesmocks.NewClaimSyncer(t) + + err := setter.SetClaimSyncerNextRequiredBlock(ctx, claimSyncer, noRetryHandler()) + require.ErrorIs(t, err, aggkitcommon.ErrExecutionFails) + require.ErrorContains(t, err, "agglayer unavailable") +} + +func TestSetClaimSyncerNextRequiredBlock_GetLastSettledCertToBlockError(t *testing.T) { + t.Parallel() + ctx := t.Context() + setter, certQuerier, agglayerClient := newTestSetter(t) + + certHeader := &agglayertypes.CertificateHeader{} + agglayerClient.EXPECT().GetLatestSettledCertificateHeader(ctx, uint32(1)).Return(certHeader, nil) + certQuerier.EXPECT().GetLastSettledCertificateToBlock(ctx, certHeader). + Return(uint64(0), errors.New("db error")) + + claimSyncer := claimsynctypesmocks.NewClaimSyncer(t) + + err := setter.SetClaimSyncerNextRequiredBlock(ctx, claimSyncer, noRetryHandler()) + require.ErrorIs(t, err, aggkitcommon.ErrExecutionFails) + require.ErrorContains(t, err, "db error") +} + +func TestSetClaimSyncerNextRequiredBlock_SetNextRequiredBlockError(t *testing.T) { + t.Parallel() + ctx := t.Context() + setter, certQuerier, agglayerClient := newTestSetter(t) + + certHeader := &agglayertypes.CertificateHeader{} + agglayerClient.EXPECT().GetLatestSettledCertificateHeader(ctx, uint32(1)).Return(certHeader, nil) + certQuerier.EXPECT().GetLastSettledCertificateToBlock(ctx, certHeader).Return(uint64(10), nil) + + claimSyncer := claimsynctypesmocks.NewClaimSyncer(t) + claimSyncer.EXPECT().SetNextRequiredBlock(ctx, uint64(10)).Return(errors.New("syncer error")) + + err := setter.SetClaimSyncerNextRequiredBlock(ctx, claimSyncer, noRetryHandler()) + require.ErrorIs(t, err, aggkitcommon.ErrExecutionFails) + require.ErrorContains(t, err, "syncer error") +} diff --git a/claimsync/claimsync_rpc_test.go b/claimsync/claimsync_rpc_test.go index d51d0e63a..e439cb971 100644 --- a/claimsync/claimsync_rpc_test.go +++ b/claimsync/claimsync_rpc_test.go @@ -25,18 +25,21 @@ func newTestRPC(t *testing.T) (*ClaimSyncRPC, *mocks.ClaimSyncer) { func TestClaimSyncRPC_Status_OK(t *testing.T) { rpc, syncer := newTestRPC(t) syncer.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(42), true, nil) + syncer.EXPECT().GetFirstProcessedBlock(mock.Anything).Return(uint64(10), true, nil) result, rpcErr := rpc.Status() require.Nil(t, rpcErr) require.NotNil(t, result) status, ok := result.(struct { - Status string `json:"status"` - LastProcessedBlock uint64 `json:"lastProcessedBlock"` + FirstProcessedBlock *uint64 `json:"firstProcessedBlock,omitempty"` + LastProcessedBlock *uint64 `json:"lastProcessedBlock"` }) require.True(t, ok) - require.Equal(t, "running", status.Status) - require.Equal(t, uint64(42), status.LastProcessedBlock) + require.NotNil(t, status.FirstProcessedBlock) + require.Equal(t, uint64(10), *status.FirstProcessedBlock) + require.NotNil(t, status.LastProcessedBlock) + require.Equal(t, uint64(42), *status.LastProcessedBlock) } func TestClaimSyncRPC_Status_Error(t *testing.T) { diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 03da6580f..6a962b988 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -382,19 +382,19 @@ func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { } safePendingBlockRange, unsafePendingBlockRange := pendingBlockRange.SplitByBlockNumber(finalizedBlockNumber) if !safePendingBlockRange.IsEmpty() { - dh.log.Infof("🛡️ StartStep: Safe sync for pending range %s", safePendingBlockRange.String()) + dh.log.Debugf("🛡️ StartStep: Safe sync for pending range %s", safePendingBlockRange.String()) _, err = dh.StepSafe(ctx) return err } if !unsafePendingBlockRange.IsEmpty() { - dh.log.Infof("😈 StartStep: Unsafe sync for pending range %s", unsafePendingBlockRange.String()) + dh.log.Debugf("😈 StartStep: Unsafe sync for pending range %s", unsafePendingBlockRange.String()) _, err = dh.StepUnsafe(ctx) return err } } else { dh.log.Debugf("StartStep: no pending blocks to sync") } - dh.log.Infof("⏳StartStep: waiting new block...") + dh.log.Debugf("⏳StartStep: waiting new block...") if err = dh.WaitForNewLatestBlocks(ctx); err != nil { return err } @@ -416,7 +416,7 @@ func (dh *EVMMultidownloader) WaitForNewLatestBlocks(ctx context.Context) error // Is not in DB, so must be finalized finalized = true } - dh.log.Infof("waiting new block (%s>%d)...", lastSyncedBlockTag.String(), latestSyncedBlockNumber) + dh.log.Debugf("waiting new block (%s>%d)...", lastSyncedBlockTag.String(), latestSyncedBlockNumber) _, err = dh.waitForNewBlocks(ctx, lastSyncedBlockTag, lastBlockHeader, finalized) return err } @@ -670,7 +670,7 @@ func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { dh.state = newState finished := dh.state.IsSyncFinished() totalBlocksPendingToSync := dh.state.TotalBlocksPendingToSync() - dh.log.Infof("Unsafe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", + dh.log.Debugf("Unsafe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", dh.statistics.ElapsedSyncing().String(), pendingBlockRange.String(), len(logs), @@ -740,7 +740,7 @@ func (dh *EVMMultidownloader) StepSafe(ctx context.Context) (bool, error) { dh.state = newState finished := dh.state.IsSyncFinished() totalBlocksPendingToSync := dh.state.TotalBlocksPendingToSync() - dh.log.Infof("Safe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", + dh.log.Debugf("Safe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", dh.statistics.ElapsedSyncing().String(), logQueryData.BlockRange.String(), len(logs), @@ -1029,7 +1029,7 @@ func (dh *EVMMultidownloader) moveUnsafeToSafeIfPossible(ctx context.Context) er if err != nil { return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot update is_final for block bases: %w", err) } - dh.log.Infof("moveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, "+ + dh.log.Debugf("moveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, "+ "block moved to safe zone: %s (len=%d)", finalizedBlockNumber, blocks.BlockRange().String(), blocks.Len()) committed = true if err := tx.Commit(); err != nil { diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index a67c8c736..86782391f 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -81,7 +81,7 @@ func (d *EVMDriver) Sync(ctx context.Context, firstBlockNumber *uint64) { func (d *EVMDriver) syncStep(ctx context.Context) error { if d.compatibilityChecker != nil { if err := d.compatibilityChecker.Check(ctx, nil); err != nil { - err := fmt.Errorf("EVMDriver: error checking compatibility data between downloader (runtime)"+ + err := fmt.Errorf("Multidownloader_EVMDriver: error checking compatibility data between downloader (runtime)"+ " and processor (db): %w", err) return err } @@ -90,9 +90,9 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { lastBlockHeader, err := d.processor.GetLastProcessedBlockHeader(ctx) if err != nil { - return fmt.Errorf("EVMDriver: error getting last processed block from processor: %w", err) + return fmt.Errorf("Multidownloader_EVMDriver: error getting last processed block from processor: %w", err) } - d.logger.Infof("EVMDriver: starting sync from last processed block: %s", lastBlockHeader.Brief()) + d.logger.Debugf("Multidownloader_EVMDriver: starting sync from last processed block: %s", lastBlockHeader.Brief()) blocks, err := d.downloader.DownloadNextBlocks(ctx, lastBlockHeader, d.syncBlockChunkSize, @@ -102,23 +102,23 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { switch { case mdrtypes.IsReorgedError(err): if reorgErr := d.handleReorg(ctx, mdrtypes.CastReorgedError(err)); reorgErr != nil { - return fmt.Errorf("EVMDriver: error handling reorg: %w", reorgErr) + return fmt.Errorf("Multidownloader_EVMDriver: error handling reorg: %w", reorgErr) } // Reorg processed return nil case errors.Is(err, ErrLogsNotAvailable): - d.logger.Debug("EVMDriver: no logs available yet, waiting to retry") + d.logger.Debug("Multidownloader_EVMDriver: no logs available yet, waiting to retry") return nil default: - return fmt.Errorf("EVMDriver: error downloading blocks: %w", err) + return fmt.Errorf("Multidownloader_EVMDriver: error downloading blocks: %w", err) } } if err = d.processBlocks(ctx, blocks); err != nil { - return fmt.Errorf("EVMDriver: error processing blocks: %w", err) + return fmt.Errorf("Multidownloader_EVMDriver: error processing blocks: %w", err) } if blocks != nil { LastProcessedBlock := blocks.Data.LastBlock() - d.logger.Infof("EVMDriver: processed %d blocks, percent %.2f%% complete. LastBlock: %s", + d.logger.Debugf("Multidownloader_EVMDriver: processed %d blocks, percent %.2f%% complete. LastBlock: %s", len(blocks.Data), blocks.CompletionPercentage, LastProcessedBlock.Brief()) } return nil From f74f02adfbb0ac2c06b9471636999c94856805cc Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 15:40:24 +0100 Subject: [PATCH 23/28] fix: remove unneeded function GeneratePreBuildParams --- .../flows/builder_flow_aggchain_prover.go | 6 -- aggsender/flows/builder_flow_pp.go | 6 -- .../mocks/mock_aggsender_builder_flow.go | 59 ------------------- aggsender/types/interfaces.go | 3 - cmd/run.go | 22 +++++-- l2gersync/evm_downloader_sovereign.go | 4 +- l2gersync/l2_ger_syncer.go | 2 +- multidownloader/sync/evmdownloader.go | 27 +++++---- 8 files changed, 37 insertions(+), 92 deletions(-) diff --git a/aggsender/flows/builder_flow_aggchain_prover.go b/aggsender/flows/builder_flow_aggchain_prover.go index 50b0662bb..6bf071bef 100644 --- a/aggsender/flows/builder_flow_aggchain_prover.go +++ b/aggsender/flows/builder_flow_aggchain_prover.go @@ -386,12 +386,6 @@ func (a *AggchainProverBuilderFlow) getLastProvenBlock( return fromBlock - 1 } -// GeneratePreBuildParams generates the pre-build parameters delegating to the base flow -func (a *AggchainProverBuilderFlow) GeneratePreBuildParams(ctx context.Context, - certType types.CertificateType) (*types.CertificatePreBuildParams, error) { - return a.baseFlow.GeneratePreBuildParams(ctx, certType) -} - // Signer returns the signer used to sign the certificate func (a *AggchainProverBuilderFlow) Signer() signertypes.Signer { return a.certificateSigner diff --git a/aggsender/flows/builder_flow_pp.go b/aggsender/flows/builder_flow_pp.go index 8159a3351..cf0ce811e 100644 --- a/aggsender/flows/builder_flow_pp.go +++ b/aggsender/flows/builder_flow_pp.go @@ -147,12 +147,6 @@ func (p *PPBuilderFlow) UpdateAggchainData( return nil } -// GeneratePreBuildParams generates the pre-build parameters delegating to the base flow -func (p *PPBuilderFlow) GeneratePreBuildParams(ctx context.Context, - certType types.CertificateType) (*types.CertificatePreBuildParams, error) { - return p.baseFlow.GeneratePreBuildParams(ctx, certType) -} - // Signer returns the signer used to sign the certificate func (p *PPBuilderFlow) Signer() signertypes.Signer { return p.certificateSigner diff --git a/aggsender/mocks/mock_aggsender_builder_flow.go b/aggsender/mocks/mock_aggsender_builder_flow.go index c88b0b4e7..2963c3653 100644 --- a/aggsender/mocks/mock_aggsender_builder_flow.go +++ b/aggsender/mocks/mock_aggsender_builder_flow.go @@ -191,65 +191,6 @@ func (_c *AggsenderBuilderFlow_GenerateBuildParams_Call) RunAndReturn(run func(c return _c } -// GeneratePreBuildParams provides a mock function with given fields: ctx, certType -func (_m *AggsenderBuilderFlow) GeneratePreBuildParams(ctx context.Context, certType types.CertificateType) (*types.CertificatePreBuildParams, error) { - ret := _m.Called(ctx, certType) - - if len(ret) == 0 { - panic("no return value specified for GeneratePreBuildParams") - } - - var r0 *types.CertificatePreBuildParams - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)); ok { - return rf(ctx, certType) - } - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateType) *types.CertificatePreBuildParams); ok { - r0 = rf(ctx, certType) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificatePreBuildParams) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, types.CertificateType) error); ok { - r1 = rf(ctx, certType) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderBuilderFlow_GeneratePreBuildParams_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GeneratePreBuildParams' -type AggsenderBuilderFlow_GeneratePreBuildParams_Call struct { - *mock.Call -} - -// GeneratePreBuildParams is a helper method to define mock.On call -// - ctx context.Context -// - certType types.CertificateType -func (_e *AggsenderBuilderFlow_Expecter) GeneratePreBuildParams(ctx interface{}, certType interface{}) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - return &AggsenderBuilderFlow_GeneratePreBuildParams_Call{Call: _e.mock.On("GeneratePreBuildParams", ctx, certType)} -} - -func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Run(run func(ctx context.Context, certType types.CertificateType)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateType)) - }) - return _c -} - -func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) Return(_a0 *types.CertificatePreBuildParams, _a1 error) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderBuilderFlow_GeneratePreBuildParams_Call) RunAndReturn(run func(context.Context, types.CertificateType) (*types.CertificatePreBuildParams, error)) *AggsenderBuilderFlow_GeneratePreBuildParams_Call { - _c.Call.Return(run) - return _c -} - // GetCertificateBuildParams provides a mock function with given fields: ctx func (_m *AggsenderBuilderFlow) GetCertificateBuildParams(ctx context.Context) (*types.CertificateBuildParams, error) { ret := _m.Called(ctx) diff --git a/aggsender/types/interfaces.go b/aggsender/types/interfaces.go index 297535034..45306c1f6 100644 --- a/aggsender/types/interfaces.go +++ b/aggsender/types/interfaces.go @@ -32,9 +32,6 @@ type AggsenderBuilderFlow interface { // BuildCertificate builds a certificate based on the buildParams BuildCertificate(ctx context.Context, buildParams *CertificateBuildParams) (*agglayertypes.Certificate, error) - // GeneratePreBuildParams generates the pre-build parameters based on the certificate type - GeneratePreBuildParams(ctx context.Context, - certType CertificateType) (*CertificatePreBuildParams, error) // GenerateBuildParams generates the build parameters based on the preParams GenerateBuildParams(ctx context.Context, preParams *CertificatePreBuildParams) (*CertificateBuildParams, error) diff --git a/cmd/run.go b/cmd/run.go index 8fee536ea..87d2c0c82 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -139,8 +139,7 @@ func start(cliCtx *cli.Context) error { l1BridgeSync := runBridgeSyncL1IfNeeded(ctx, components, cfg.BridgeL1Sync, reorgDetectorL1, l1Client, MainnetID, &backfillWg) - initialLER, err := query.NewLERDataQuerier( - cfg.AggSender.RollupCreationBlockL1, rollupDataQuerier).GetInitialLocalExitRoot() + initialLER, err := GetInitialLER(cfg.AggSender.RollupCreationBlockL1, rollupDataQuerier) if err != nil { return fmt.Errorf("failed to get initial local exit root: %w", err) } @@ -152,7 +151,7 @@ func start(cliCtx *cli.Context) error { } l2BridgeSync := runBridgeSyncL2IfNeeded(ctx, components, cfg.BridgeL2Sync, reorgDetectorL2, - l2Client, rollupDataQuerier.RollupID, initialLER, &backfillWg) + l2Client, rollupDataQuerier.RollupID, *initialLER, &backfillWg) l2GERSync := runL2GERSyncIfNeeded( ctx, components, cfg.L2GERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, l1Client, ) @@ -221,7 +220,7 @@ func start(cliCtx *cli.Context) error { l2Client, rollupDataQuerier, committeeQuerier, - initialLER, + *initialLER, ) if err != nil { log.Fatalf("failed to create AggSender: %v", err) @@ -255,7 +254,7 @@ func start(cliCtx *cli.Context) error { l2Client, rollupDataQuerier, committeeQuerier, - initialLER, + *initialLER, ) if err != nil { log.Fatal(err) @@ -791,6 +790,17 @@ func resolveL1BridgeConfig(cfg *bridgesync.Config, components []string, logprefi } } +func GetInitialLER( + rollupCreationBlockL1 uint64, + rollupDataQuerier *ethermanquierier.RollupDataQuerier) (*common.Hash, error) { + if rollupDataQuerier == nil { + return nil, nil + } + lerQuery := query.NewLERDataQuerier(rollupCreationBlockL1, rollupDataQuerier) + ler, err := lerQuery.GetInitialLocalExitRoot() + return &ler, err +} + func runBridgeSyncL1IfNeeded( ctx context.Context, components []string, @@ -1064,7 +1074,9 @@ func createRollupDataQuerier( aggkitcommon.AGGCHAINPROOFGEN, aggkitcommon.BRIDGE, aggkitcommon.L1BRIDGESYNC, + aggkitcommon.L1INFOTREESYNC, aggkitcommon.L2BRIDGESYNC, + aggkitcommon.L2GERSYNC, }, components) { return nil, nil } diff --git a/l2gersync/evm_downloader_sovereign.go b/l2gersync/evm_downloader_sovereign.go index 3c940025f..a6bf83075 100644 --- a/l2gersync/evm_downloader_sovereign.go +++ b/l2gersync/evm_downloader_sovereign.go @@ -148,8 +148,8 @@ func (d *downloaderSovereign) buildAppender( l1InfoTreeLeaf, err := d.l1InfoTreeSync.GetInfoByGlobalExitRoot(insertGEREvent.NewGlobalExitRoot) if err != nil { - log.Errorf("failed to fetch l1 info tree for global exit root %s: %v", - common.Hash(insertGEREvent.NewGlobalExitRoot).Hex(), err) + log.Errorf("failed to fetch l1 info tree for global exit root %s (block: %d): %v", + common.Hash(insertGEREvent.NewGlobalExitRoot).Hex(), b.Num, err) ctx := context.Background() isUpToDate, upToDateErr := d.l1InfoTreeSync.IsUpToDate(ctx, d.l1Client) if upToDateErr != nil { diff --git a/l2gersync/l2_ger_syncer.go b/l2gersync/l2_ger_syncer.go index 7bdbc75bb..d65edcff0 100644 --- a/l2gersync/l2_ger_syncer.go +++ b/l2gersync/l2_ger_syncer.go @@ -145,7 +145,7 @@ func resolveSyncMode(ctx context.Context, address common.Address, backend bind.C // Start initiates the synchronization process. func (s *L2GERSync) Start(ctx context.Context) { - s.processor.log.Info("starting l2gersync") + s.processor.log.Info("starting l2gersync at block %d", s.cfg.InitialBlockNum) s.driver.Sync(ctx, &s.cfg.InitialBlockNum) } diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go index 0718fff38..38ad6fcb3 100644 --- a/multidownloader/sync/evmdownloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -88,18 +88,20 @@ func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, return true, nil }) if errors.Is(err, aggkitcommon.ErrTimeoutReached) { - return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: logs not available for query: %s after waiting %s: %w", + return nil, fmt.Errorf("Multidownloader_EVMDownloader.DownloadNextBlocks: "+ + "logs not available for query: %s after waiting %s: %w", maxLogQuery.String(), d.waitPeriodToCatchUpMaximumLogRange.String(), ErrLogsNotAvailable) } if err != nil { return nil, err } if !conditionMet { - return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: logs not available for query: %s. Err: %w", + return nil, fmt.Errorf("Multidownloader_EVMDownloader.DownloadNextBlocks: "+ + "logs not available for query: %s. Err: %w", maxLogQuery.String(), ErrLogsNotAvailable) } if result == nil { - return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: executeLogQuery "+ + return nil, fmt.Errorf("Multidownloader_EVMDownloader.DownloadNextBlocks: executeLogQuery "+ "return result=nil. Range: %s", maxLogQuery.BlockRange.String()) } // Before returning we check again that lastBlockHeader is not reorged @@ -145,9 +147,10 @@ func (d *EVMDownloader) executeLogQuery(ctx context.Context, err = d.addLastBlockIfNotIncluded(ctx, result, logQueryResponse.ResponseRange, logQueryResponse.UnsafeRange) if err != nil { - return nil, fmt.Errorf("EVMDownloader.executeLogQuery: adding last block: %w", err) + return nil, fmt.Errorf("Multidownloader_EVMDownloader.executeLogQuery: adding last block: %w", err) } - d.logger.Infof("EVMDownloader.executeLogQuery(block:%s): len(logs)= %d", logQuery.BlockRange.String(), totalLogs) + d.logger.Debugf("Multidownloader_EVMDownloader.executeLogQuery(block:%s): "+ + "len(logs)= %d", logQuery.BlockRange.String(), totalLogs) return result, nil } @@ -155,7 +158,7 @@ func (d *EVMDownloader) getFullBlockRange(ctx context.Context, syncerConfig aggkittypes.SyncerConfig) (*aggkitcommon.BlockRange, error) { blockTo, err := d.multidownloader.HeaderByNumber(ctx, &syncerConfig.ToBlock) if err != nil || blockTo == nil { - return nil, fmt.Errorf("EVMDownloader.getFullBlockRange: error getting 'to' block header: %w", err) + return nil, fmt.Errorf("Multidownloader_EVMDownloader.getFullBlockRange: error getting 'to' block header: %w", err) } br := aggkitcommon.NewBlockRange(syncerConfig.FromBlock, blockTo.Number) return &br, nil @@ -166,7 +169,8 @@ func (d *EVMDownloader) calculatePercentCompletation(ctx context.Context, syncerConfig aggkittypes.SyncerConfig, lastRange aggkitcommon.BlockRange) (float64, error) { fullRange, err := d.getFullBlockRange(ctx, syncerConfig) if err != nil { - return 0, fmt.Errorf("EVMDownloader.calculatePercentCompletation: error getting full block range: %w", err) + return 0, fmt.Errorf("Multidownloader_EVMDownloader.calculatePercentCompletation: "+ + "error getting full block range: %w", err) } totalBlocks := fullRange.CountBlocks() pendingRange := aggkitcommon.NewBlockRange(lastRange.ToBlock+1, fullRange.ToBlock) @@ -192,7 +196,8 @@ func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, hdr, _, err := d.multidownloader.StorageHeaderByNumber(ctx, aggkittypes.NewBlockNumber(lastBlockNumber)) if err != nil { - d.logger.Errorf("EVMDownloader: error getting block header for block number %d: %v", lastBlockNumber, err) + d.logger.Errorf("Multidownloader_EVMDownloader: error getting block header for block number %d: %v", + lastBlockNumber, err) return nil } isFinalizedBlock := !unsafeRange.ContainsBlockNumber(lastBlockNumber) @@ -200,7 +205,8 @@ func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, // Check that we are not in the unsafe zone. Because in that case we can't fake the Hash and it's an error // because the block must in in storage if !isFinalizedBlock { - err := fmt.Errorf("EVMDownloader: cannot get block header for block number %d in unsafe zone", lastBlockNumber) + err := fmt.Errorf("Multidownloader_EVMDownloader: "+ + "cannot get block header for block number %d in unsafe zone", lastBlockNumber) d.logger.Error(err) return err } @@ -224,7 +230,8 @@ func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, if hdr.ParentHash != nil { emptyBlock.ParentHash = *hdr.ParentHash } - d.logger.Debugf("EVMDownloader.addLastBlockIfNotIncluded: to response %s adding empty block number %d / %s", + d.logger.Debugf("Multidownloader_EVMDownloader.addLastBlockIfNotIncluded: "+ + "to response %s adding empty block number %d / %s", responseRange.String(), lastBlockNumber, hdr.Hash.Hex()) result.Data = append(result.Data, emptyBlock) From 3133a3017a5923d2a266d4f26961e216b983dccb Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 15:51:57 +0100 Subject: [PATCH 24/28] fix: nil pointer in bridge service --- cmd/run.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/run.go b/cmd/run.go index 87d2c0c82..58642b146 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -946,6 +946,7 @@ func runClaimSyncL2IfNeeded( aggkitcommon.AGGSENDER, aggkitcommon.AGGSENDERVALIDATOR, aggkitcommon.AGGCHAINPROOFGEN, + aggkitcommon.BRIDGE, aggkitcommon.L2CLAIMSYNC}, components) { return nil } From 9956a2cdfb9155d355c2b8812c7907a0a51260eb Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 16:26:20 +0100 Subject: [PATCH 25/28] feat: coverage --- aggsender/aggsender_validator.go | 3 +- aggsender/aggsender_validator_test.go | 95 ++++++++++++++++++ aggsender/query/bridge_query_test.go | 128 +++++++++++++++++++------ bridgesync/config.go | 17 +--- bridgesync/config_test.go | 13 +-- multidownloader/sync/evmdriver.go | 18 ++-- multidownloader/sync/evmdriver_test.go | 39 ++++++++ 7 files changed, 256 insertions(+), 57 deletions(-) create mode 100644 aggsender/aggsender_validator_test.go diff --git a/aggsender/aggsender_validator.go b/aggsender/aggsender_validator.go index 0ee429ba8..fe6407fd7 100644 --- a/aggsender/aggsender_validator.go +++ b/aggsender/aggsender_validator.go @@ -72,8 +72,7 @@ func (a *AggsenderValidator) Start(ctx context.Context) { 1) err := a.initialBlockClaimSyncerSetter.SetClaimSyncerNextRequiredBlock(ctx, a.l2ClaimSyncer, rh) if err != nil { - a.log.Fatalf("failed to set claim syncer next required block: %v", err) - return + a.log.Panicf("failed to set claim syncer next required block: %v", err) } a.validatorService.Start(ctx) } diff --git a/aggsender/aggsender_validator_test.go b/aggsender/aggsender_validator_test.go new file mode 100644 index 000000000..f5d12b1a7 --- /dev/null +++ b/aggsender/aggsender_validator_test.go @@ -0,0 +1,95 @@ +package aggsender + +import ( + "context" + "errors" + "testing" + + agglayertypes "github.com/agglayer/aggkit/agglayer/types" + "github.com/agglayer/aggkit/aggsender/mocks" + aggsendertypes "github.com/agglayer/aggkit/aggsender/types" + validatorcfg "github.com/agglayer/aggkit/aggsender/validator" + aggkitgrpc "github.com/agglayer/aggkit/grpc" + "github.com/agglayer/aggkit/log" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func newTestAggsenderValidator(t *testing.T) *AggsenderValidator { + t.Helper() + grpcServer, err := aggkitgrpc.NewServer(aggkitgrpc.ServerConfig{ + Host: "127.0.0.1", + Port: 0, + }) + require.NoError(t, err) + return &AggsenderValidator{ + log: log.WithFields("module", "aggsender-validator-test"), + validatorService: grpcServer, + cfg: validatorcfg.Config{}, + } +} + +// TestAggsenderValidatorStart_SetClaimSyncerFails checks that Start panics (via Fatalf) +// when SetClaimSyncerNextRequiredBlock returns an error. +func TestAggsenderValidatorStart_SetClaimSyncerFails(t *testing.T) { + sut := newTestAggsenderValidator(t) + mockSetter := mocks.NewInitialBlockClaimSyncerSetter(t) + mockSetter.EXPECT(). + SetClaimSyncerNextRequiredBlock(mock.Anything, mock.Anything, mock.Anything). + Return(errors.New("setter error")).Once() + sut.initialBlockClaimSyncerSetter = mockSetter + + require.Panics(t, func() { + sut.Start(t.Context()) + }) +} + +// TestAggsenderValidatorStart_Success checks that Start succeeds when +// SetClaimSyncerNextRequiredBlock returns nil and the gRPC server starts normally. +func TestAggsenderValidatorStart_Success(t *testing.T) { + sut := newTestAggsenderValidator(t) + mockSetter := mocks.NewInitialBlockClaimSyncerSetter(t) + mockSetter.EXPECT(). + SetClaimSyncerNextRequiredBlock(mock.Anything, mock.Anything, mock.Anything). + Return(nil).Once() + sut.initialBlockClaimSyncerSetter = mockSetter + + // Cancel the context immediately so the gRPC server stops right after starting. + ctx, cancel := context.WithCancel(t.Context()) + cancel() + + require.NotPanics(t, func() { + sut.Start(ctx) + }) +} + +// TestAggsenderValidatorValidateCertificate_Success checks that ValidateCertificate +// delegates to the inner validator and returns nil on success. +func TestAggsenderValidatorValidateCertificate_Success(t *testing.T) { + sut := newTestAggsenderValidator(t) + mockValidator := mocks.NewCertificateValidator(t) + params := aggsendertypes.VerifyIncomingRequest{ + Certificate: &agglayertypes.Certificate{}, + } + mockValidator.EXPECT().ValidateCertificate(mock.Anything, params).Return(nil).Once() + sut.validator = mockValidator + + err := sut.ValidateCertificate(t.Context(), params) + require.NoError(t, err) +} + +// TestAggsenderValidatorValidateCertificate_Error checks that ValidateCertificate +// propagates the error returned by the inner validator. +func TestAggsenderValidatorValidateCertificate_Error(t *testing.T) { + sut := newTestAggsenderValidator(t) + mockValidator := mocks.NewCertificateValidator(t) + expectedErr := errors.New("validation failed") + params := aggsendertypes.VerifyIncomingRequest{ + Certificate: &agglayertypes.Certificate{}, + } + mockValidator.EXPECT().ValidateCertificate(mock.Anything, params).Return(expectedErr).Once() + sut.validator = mockValidator + + err := sut.ValidateCertificate(t.Context(), params) + require.ErrorIs(t, err, expectedErr) +} diff --git a/aggsender/query/bridge_query_test.go b/aggsender/query/bridge_query_test.go index d9845ec8f..a791203f1 100644 --- a/aggsender/query/bridge_query_test.go +++ b/aggsender/query/bridge_query_test.go @@ -174,50 +174,124 @@ func TestGetLastProcessedBlock(t *testing.T) { t.Parallel() ctx := context.Background() - testCases := []struct { - name string - mockFn func(*mocks.L2BridgeSyncer) - expectedBlock uint64 - expectedError string - }{ + + type testCase struct { + name string + claimSyncer claimsynctypes.ClaimSyncer + setupBridge func(*mocks.L2BridgeSyncer) + expectedBlock uint64 + expectedFound bool + expectedErrMsg string + } + + tests := []testCase{ { - name: "success - valid last processed block", - mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) + name: "bridge error", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, errors.New("bridge error")) + }, + expectedErrMsg: "error getting last processed block: bridge error", + }, + { + name: "bridge not found", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, nil) + }, + expectedFound: false, + }, + { + name: "no claim syncer returns bridge block", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) }, expectedBlock: 150, + expectedFound: true, }, { - name: "error - failed to fetch last processed block", - mockFn: func(mockSyncer *mocks.L2BridgeSyncer) { - mockSyncer.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, errors.New("some error")) + name: "claim syncer error", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) + }, + claimSyncer: func() claimsynctypes.ClaimSyncer { + m := claimsynctypesmocks.NewClaimSyncer(t) + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, errors.New("claim error")) + return m + }(), + expectedErrMsg: "error getting claim syncer last processed block: claim error", + }, + { + name: "claim syncer not found", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) + }, + claimSyncer: func() claimsynctypes.ClaimSyncer { + m := claimsynctypesmocks.NewClaimSyncer(t) + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(0), false, nil) + return m + }(), + expectedFound: false, + }, + { + name: "claim block is behind bridge block - returns claim block", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) + }, + claimSyncer: func() claimsynctypes.ClaimSyncer { + m := claimsynctypesmocks.NewClaimSyncer(t) + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(100), true, nil) + return m + }(), + expectedBlock: 100, + expectedFound: true, + }, + { + name: "claim block is ahead of bridge block - returns bridge block", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) }, - expectedError: "error getting last processed block: some error", + claimSyncer: func() claimsynctypes.ClaimSyncer { + m := claimsynctypesmocks.NewClaimSyncer(t) + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(200), true, nil) + return m + }(), + expectedBlock: 150, + expectedFound: true, + }, + { + name: "claim block equals bridge block - returns bridge block", + setupBridge: func(m *mocks.L2BridgeSyncer) { + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) + }, + claimSyncer: func() claimsynctypes.ClaimSyncer { + m := claimsynctypesmocks.NewClaimSyncer(t) + m.EXPECT().GetLastProcessedBlock(ctx).Return(uint64(150), true, nil) + return m + }(), + expectedBlock: 150, + expectedFound: true, }, } - for _, tc := range testCases { - tc := tc - + for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { t.Parallel() - mockSyncer := new(mocks.L2BridgeSyncer) - mockSyncer.EXPECT().OriginNetwork().Return(1).Once() - AgglayerBridgeL2Reader := new(mocks.AgglayerBridgeL2Reader) - tc.mockFn(mockSyncer) + mockBridge := mocks.NewL2BridgeSyncer(t) + mockBridge.EXPECT().OriginNetwork().Return(uint32(1)).Once() + tc.setupBridge(mockBridge) - bridgeQuerier := NewBridgeDataQuerier(nil, mockSyncer, nil, 0, AgglayerBridgeL2Reader) + querier := NewBridgeDataQuerier(nil, mockBridge, tc.claimSyncer, 0, mocks.NewAgglayerBridgeL2Reader(t)) - block, _, err := bridgeQuerier.GetLastProcessedBlock(ctx) - if tc.expectedError != "" { - require.ErrorContains(t, err, tc.expectedError) + block, found, err := querier.GetLastProcessedBlock(ctx) + if tc.expectedErrMsg != "" { + require.ErrorContains(t, err, tc.expectedErrMsg) } else { require.NoError(t, err) - require.Equal(t, tc.expectedBlock, block) + require.Equal(t, tc.expectedFound, found) + if tc.expectedFound { + require.Equal(t, tc.expectedBlock, block) + } } - - mockSyncer.AssertExpectations(t) }) } } diff --git a/bridgesync/config.go b/bridgesync/config.go index b2c85fad9..03b2f6ad2 100644 --- a/bridgesync/config.go +++ b/bridgesync/config.go @@ -8,19 +8,6 @@ import ( "github.com/ethereum/go-ethereum/common" ) -// TrueFalseAutoMode is an alias for config/types.TrueFalseAutoMode. -type TrueFalseAutoMode = types.TrueFalseAutoMode - -// Re-export the TrueFalseAutoMode values from config/types. -var ( - // TrueMode always extracts FromAddress using debug_traceTransaction - TrueMode = types.TrueMode - // FalseMode never extracts FromAddress - FalseMode = types.FalseMode - // AutoMode decides automatically based on whether BRIDGE component is active - AutoMode = types.AutoMode -) - type Config struct { // DBPath path of the DB DBPath string `mapstructure:"DBPath"` @@ -54,7 +41,7 @@ type Config struct { // Note: TxnSender and ToAddress are always extracted via standard eth_getTransactionByHash. // Default: "auto" // SyncFromInBridges.Resolved is set programmatically after resolution; not read from config. - SyncFromInBridges TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"SyncFromInBridges"` //nolint:lll + SyncFromInBridges types.TrueFalseAutoMode `jsonschema:"enum=true, enum=false, enum=auto" mapstructure:"SyncFromInBridges"` //nolint:lll } // Validate checks if the configuration is valid @@ -64,7 +51,7 @@ func (c Config) Validate() error { } // Validate SyncFromInBridges (empty is allowed — means not configured) if c.SyncFromInBridges.Mode != "" { - var m TrueFalseAutoMode + var m types.TrueFalseAutoMode if err := m.UnmarshalText([]byte(c.SyncFromInBridges.Mode)); err != nil { return fmt.Errorf("invalid SyncFromInBridges value: %w", err) } diff --git a/bridgesync/config_test.go b/bridgesync/config_test.go index 9167b987b..104ca52b7 100644 --- a/bridgesync/config_test.go +++ b/bridgesync/config_test.go @@ -3,6 +3,7 @@ package bridgesync import ( "testing" + "github.com/agglayer/aggkit/config/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/stretchr/testify/require" ) @@ -24,7 +25,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with SyncFromInBridges true", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: TrueMode, + SyncFromInBridges: types.TrueMode, }, expectedError: "", }, @@ -32,7 +33,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with SyncFromInBridges false", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: FalseMode, + SyncFromInBridges: types.FalseMode, }, expectedError: "", }, @@ -40,7 +41,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with SyncFromInBridges auto", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: AutoMode, + SyncFromInBridges: types.AutoMode, }, expectedError: "", }, @@ -48,7 +49,7 @@ func TestConfig_Validate(t *testing.T) { name: "valid config with empty SyncFromInBridges", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: TrueFalseAutoMode{}, + SyncFromInBridges: types.TrueFalseAutoMode{}, }, expectedError: "", }, @@ -66,7 +67,7 @@ func TestConfig_Validate(t *testing.T) { name: "invalid config with invalid SyncFromInBridges", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: TrueFalseAutoMode{Mode: "invalid_value"}, + SyncFromInBridges: types.TrueFalseAutoMode{Mode: "invalid_value"}, }, expectedError: "invalid SyncFromInBridges value:", }, @@ -74,7 +75,7 @@ func TestConfig_Validate(t *testing.T) { name: "invalid config with numeric SyncFromInBridges", config: Config{ BlockFinality: aggkittypes.SafeBlock, - SyncFromInBridges: TrueFalseAutoMode{Mode: "123"}, + SyncFromInBridges: types.TrueFalseAutoMode{Mode: "123"}, }, expectedError: "invalid SyncFromInBridges value:", }, diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index 86782391f..33c3a9826 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -51,18 +51,22 @@ func NewEVMDriver( compatibilityChecker: compatibilityChecker, } } - -func (d *EVMDriver) Sync(ctx context.Context, firstBlockNumber *uint64) { - // firstBlockNumber is unused and not support in the current implementation - // it just check that is equal to syncerConfig.InitialBlockNum +func (d *EVMDriver) checkFirstBlockNumberParams(firstBlockNumber *uint64) error { if firstBlockNumber == nil { - d.logger.Fatalf("multidownloader doesnt support firstBlockNumber==nil") - return + return fmt.Errorf("multidownloader doesnt support firstBlockNumber==nil") } if *firstBlockNumber != d.syncerConfig.FromBlock { - d.logger.Fatalf("multidownloader doesnt support firstBlockNumber different than FromBlock, got %d, expected %d", + return fmt.Errorf("multidownloader doesnt support firstBlockNumber different than FromBlock, got %d, expected %d", *firstBlockNumber, d.syncerConfig.FromBlock) } + return nil +} +func (d *EVMDriver) Sync(ctx context.Context, firstBlockNumber *uint64) { + // firstBlockNumber is unused and not support in the current implementation + // it just check that is equal to syncerConfig.InitialBlockNum + if err := d.checkFirstBlockNumberParams(firstBlockNumber); err != nil { + d.logger.Fatalf("error in firstBlockNumber parameter: %v", err) + } attempts := 0 for { if ctx.Err() != nil { diff --git a/multidownloader/sync/evmdriver_test.go b/multidownloader/sync/evmdriver_test.go index 908b8a016..cc9c05e7e 100644 --- a/multidownloader/sync/evmdriver_test.go +++ b/multidownloader/sync/evmdriver_test.go @@ -63,6 +63,45 @@ func newEVMDriverTestData(t *testing.T, compatibilityCheckExpectations bool) *ev } } +func TestEVMDriver_CheckFirstBlockNumberParams(t *testing.T) { + fromBlock := uint64(100) + syncerConfig := aggkittypes.SyncerConfig{FromBlock: fromBlock} + + tests := []struct { + name string + firstBlockNumber *uint64 + expectErr bool + }{ + { + name: "nil firstBlockNumber returns error", + firstBlockNumber: nil, + expectErr: true, + }, + { + name: "firstBlockNumber different from FromBlock returns error", + firstBlockNumber: func() *uint64 { v := uint64(99); return &v }(), + expectErr: true, + }, + { + name: "firstBlockNumber equal to FromBlock returns nil", + firstBlockNumber: &fromBlock, + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + driver := &EVMDriver{syncerConfig: syncerConfig} + err := driver.checkFirstBlockNumberParams(tt.firstBlockNumber) + if tt.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + func TestNewEVMDriver_SyncStep(t *testing.T) { t.Run("fail compatibility check", func(t *testing.T) { testData := newEVMDriverTestData(t, false) From 3cb717378bd303124253dc2b061151a1a081eb31 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 16:50:28 +0100 Subject: [PATCH 26/28] fix: PR comments --- aggsender/aggsender_validator.go | 5 ----- claimsync/embedded.go | 2 +- claimsync/processor.go | 2 +- claimsync/storage/migrations/migrations.go | 7 ++----- claimsync/types/syncer_id.go | 5 +++++ l2gersync/l2_ger_syncer.go | 2 +- 6 files changed, 10 insertions(+), 13 deletions(-) diff --git a/aggsender/aggsender_validator.go b/aggsender/aggsender_validator.go index fe6407fd7..754ca979b 100644 --- a/aggsender/aggsender_validator.go +++ b/aggsender/aggsender_validator.go @@ -2,7 +2,6 @@ package aggsender import ( "context" - "errors" "github.com/agglayer/aggkit/agglayer" "github.com/agglayer/aggkit/aggsender/metrics" @@ -17,10 +16,6 @@ import ( ethcommon "github.com/ethereum/go-ethereum/common" ) -var ( - ErrNilCertificate = errors.New("aggsender-validator nil certificate") - ErrMetadataNotCompatible = errors.New("aggsender-validator metadata not compatible with the current version") -) type AggsenderValidator struct { log aggkitcommon.Logger diff --git a/claimsync/embedded.go b/claimsync/embedded.go index 9842c58f3..39bf1eff2 100644 --- a/claimsync/embedded.go +++ b/claimsync/embedded.go @@ -113,7 +113,7 @@ func (p *claimEmbeddedProcessor) ProcessBlockWithTx( ) error { event, ok := eventRaw.(Event) if !ok { - return fmt.Errorf("claimsync ProcessBlock: unexpected event type %T in block %d", event, block.Num) + return fmt.Errorf("claimsync ProcessBlock: unexpected event type %T in block %d", eventRaw, block.Num) } if event.Claim != nil { diff --git a/claimsync/processor.go b/claimsync/processor.go index 84272e499..25657c429 100644 --- a/claimsync/processor.go +++ b/claimsync/processor.go @@ -51,7 +51,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } }() - if err := p.storage.InsertBlock(ctx, tx, block.Num, block.Hash); err != nil { + if err := p.storage.InsertBlock(dbCtx, tx, block.Num, block.Hash); err != nil { p.log.Errorf("failed to insert block %d: %v", block.Num, err) return err } diff --git a/claimsync/storage/migrations/migrations.go b/claimsync/storage/migrations/migrations.go index a84f67274..76458c000 100644 --- a/claimsync/storage/migrations/migrations.go +++ b/claimsync/storage/migrations/migrations.go @@ -10,17 +10,14 @@ import ( "github.com/agglayer/aggkit/db/types" ) -// ClaimSync0001 is public because bridegsync needs it to -// set the migrations:this 0001 is equivalent to bridgesync0014, -// //go:embed claimsync0001.sql -var ClaimSync0001 string +var claimSync0001 string func GetClaimSyncMigrations() []types.Migration { return []types.Migration{ { ID: "claimsync0001", - SQL: ClaimSync0001, + SQL: claimSync0001, }, } } diff --git a/claimsync/types/syncer_id.go b/claimsync/types/syncer_id.go index e9d016a2d..dd4cf87f1 100644 --- a/claimsync/types/syncer_id.go +++ b/claimsync/types/syncer_id.go @@ -1,5 +1,7 @@ package types +import "fmt" + // ClaimSyncerID represents the type of bridge syncer type ClaimSyncerID int @@ -14,5 +16,8 @@ const ( ) func (b ClaimSyncerID) String() string { + if b < L1ClaimSyncer || b > L2ClaimSyncer { + return fmt.Sprintf("UnknownClaimSyncerID(%d)", b) + } return [...]string{"L1ClaimSyncer", "L2ClaimSyncer"}[b] } diff --git a/l2gersync/l2_ger_syncer.go b/l2gersync/l2_ger_syncer.go index d65edcff0..0b8cdc395 100644 --- a/l2gersync/l2_ger_syncer.go +++ b/l2gersync/l2_ger_syncer.go @@ -145,7 +145,7 @@ func resolveSyncMode(ctx context.Context, address common.Address, backend bind.C // Start initiates the synchronization process. func (s *L2GERSync) Start(ctx context.Context) { - s.processor.log.Info("starting l2gersync at block %d", s.cfg.InitialBlockNum) + s.processor.log.Infof("starting l2gersync at block %d", s.cfg.InitialBlockNum) s.driver.Sync(ctx, &s.cfg.InitialBlockNum) } From fcc47895d5843b83540dc7f1b135fd209389197b Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 17:05:36 +0100 Subject: [PATCH 27/28] fix: PR comments --- aggsender/aggsender_validator.go | 5 +++-- aggsender/query/initial_block_to_claimsync_setter.go | 4 ++-- aggsender/types/interfaces.go | 2 -- claimsync/claimsync.go | 10 ++++------ claimsync/config.go | 4 ++-- claimsync/types/syncer_id.go | 8 ++++++-- 6 files changed, 17 insertions(+), 16 deletions(-) diff --git a/aggsender/aggsender_validator.go b/aggsender/aggsender_validator.go index 754ca979b..7e17296ea 100644 --- a/aggsender/aggsender_validator.go +++ b/aggsender/aggsender_validator.go @@ -16,7 +16,6 @@ import ( ethcommon "github.com/ethereum/go-ethereum/common" ) - type AggsenderValidator struct { log aggkitcommon.Logger validator types.CertificateValidator @@ -60,9 +59,11 @@ func NewAggsenderValidator(ctx context.Context, initialBlockClaimSyncerSetter: initialBlockClaimSyncerSetter, }, nil } + +// Start starts the AggsenderValidator service. func (a *AggsenderValidator) Start(ctx context.Context) { metrics.Register() - // This is hardcoded because validator to just do 1 retry if fails it and stop + // The validator only attempts once: if it fails, it stops. rh := aggkitcommon.NewRetryHandler([]configtypes.Duration{{Duration: a.cfg.DelayBetweenRetries.Duration}}, 1) err := a.initialBlockClaimSyncerSetter.SetClaimSyncerNextRequiredBlock(ctx, a.l2ClaimSyncer, rh) diff --git a/aggsender/query/initial_block_to_claimsync_setter.go b/aggsender/query/initial_block_to_claimsync_setter.go index c02e70160..6f8569646 100644 --- a/aggsender/query/initial_block_to_claimsync_setter.go +++ b/aggsender/query/initial_block_to_claimsync_setter.go @@ -69,8 +69,8 @@ func (n *SetInitialBlockToClaimSyncer) SetClaimSyncerNextRequiredBlock( return nil } -// getNextBlockNumber returns the first block number of the next certificate to generate. -// It reads the last sent certificate from agglayer to determine the starting block. +// getNextBlockNumber returns the starting block number for the claim syncer. +// It queries the latest settled certificate from agglayer to determine from which block claims must be synced. func (n *SetInitialBlockToClaimSyncer) getNextBlockNumber(ctx context.Context) (uint64, error) { certHeader, err := n.agglayerClient.GetLatestSettledCertificateHeader(ctx, n.l2OriginNetwork) if err != nil { diff --git a/aggsender/types/interfaces.go b/aggsender/types/interfaces.go index 45306c1f6..8591f625f 100644 --- a/aggsender/types/interfaces.go +++ b/aggsender/types/interfaces.go @@ -100,11 +100,9 @@ type L2BridgeSyncer interface { GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) GetBridges(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Bridge, error) - // GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]claimsynctypes.Claim, error) OriginNetwork() uint32 GetLastProcessedBlock(ctx context.Context) (uint64, bool, error) GetExitRootByHash(ctx context.Context, root common.Hash) (*treetypes.Root, error) - // GetClaimsByGlobalIndex(ctx context.Context, globalIndex *big.Int) ([]claimsynctypes.Claim, error) SubscribeToSync(subscriberID string) <-chan sync.Block SubscribeToNewBridge(subscriberID string) <-chan uint64 } diff --git a/claimsync/claimsync.go b/claimsync/claimsync.go index 9fe05d413..52a791af7 100644 --- a/claimsync/claimsync.go +++ b/claimsync/claimsync.go @@ -161,15 +161,13 @@ func (c *ClaimSync) syncNextBlockInfinite(ctx context.Context, blockNumber uint6 } // SyncNextBlock downloads and processes blockNum as a bootstrap step. -// Returns sync.ErrAlreadyBootstrapped (ignorable) if a processed block already exists. func (c *ClaimSync) SyncNextBlock(ctx context.Context, blockNum uint64) error { c.logger.Infof("SyncNextBlock: syncing block %d", blockNum) c.syncNextBlockInfinite(ctx, blockNum) return nil } -// OriginNetwork returns the network ID of the origin chain - +// OriginNetwork returns the network ID of the origin chain. func (c *ClaimSync) OriginNetwork() uint32 { return c.originNetwork } @@ -182,7 +180,7 @@ func (c *ClaimSync) SetNextRequiredBlock(ctx context.Context, blockNumber uint64 if !found { c.logger.Infof("Starting to sync from block %d (no processed blocks found)", blockNumber) if err := c.driver.SyncNextBlock(ctx, blockNumber); err != nil { - return fmt.Errorf("claimsync: failed to createStartingPoint: %w", err) + return fmt.Errorf("claimsync: failed to create starting point: %w", err) } return nil } @@ -196,8 +194,8 @@ func (c *ClaimSync) SetNextRequiredBlock(ctx context.Context, blockNumber uint64 blockNumber, firstBlock) } - c.logger.Infof("Cannot set next required block to %d because is running, but is included. "+ - " Processed blocks [%d - %d]", blockNumber, firstBlock, lastBlock) + c.logger.Infof("Syncer is already running; block %d is within the processed range [%d - %d], no action needed", + blockNumber, firstBlock, lastBlock) return nil } diff --git a/claimsync/config.go b/claimsync/config.go index a9812cdcd..02292a68a 100644 --- a/claimsync/config.go +++ b/claimsync/config.go @@ -23,13 +23,13 @@ type ConfigStandalone struct { // BlockFinality indicates the status of the blocks that will be queried in order to sync BlockFinality aggkittypes.BlockNumberFinality `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll // InitialBlockNum is the first block that will be queried when starting the synchronization from scratch. - // It should be a number equal or bellow the creation of the bridge contract + // It should be a number equal or below the creation of the bridge contract InitialBlockNum uint64 `mapstructure:"InitialBlockNum"` // SyncBlockChunkSize is the amount of blocks that will be queried to the client on each request SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"` // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` - // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. + // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicking. // Any number smaller than zero will be considered as unlimited retries MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` diff --git a/claimsync/types/syncer_id.go b/claimsync/types/syncer_id.go index dd4cf87f1..dcc08d3a6 100644 --- a/claimsync/types/syncer_id.go +++ b/claimsync/types/syncer_id.go @@ -16,8 +16,12 @@ const ( ) func (b ClaimSyncerID) String() string { - if b < L1ClaimSyncer || b > L2ClaimSyncer { + switch b { + case L1ClaimSyncer: + return "L1ClaimSyncer" + case L2ClaimSyncer: + return "L2ClaimSyncer" + default: return fmt.Sprintf("UnknownClaimSyncerID(%d)", b) } - return [...]string{"L1ClaimSyncer", "L2ClaimSyncer"}[b] } From d3a7b4ca6ad7d03989c2b7a7cbfaecd47037ebaa Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Mar 2026 17:24:50 +0100 Subject: [PATCH 28/28] fix: coverage --- claimsync/types/claim_data_test.go | 58 ++++++++++++++++++++++++++++++ claimsync/types/syncer_id_test.go | 25 +++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 claimsync/types/syncer_id_test.go diff --git a/claimsync/types/claim_data_test.go b/claimsync/types/claim_data_test.go index f5f423864..95d762e6d 100644 --- a/claimsync/types/claim_data_test.go +++ b/claimsync/types/claim_data_test.go @@ -432,3 +432,61 @@ func TestDecodeEtrogCalldata(t *testing.T) { }) } } + +func TestClaim_String(t *testing.T) { + t.Run("nil GlobalIndex and Amount", func(t *testing.T) { + c := &Claim{} + s := c.String() + require.Contains(t, s, "GlobalIndex: nil") + require.Contains(t, s, "Amount: nil") + }) + + t.Run("with GlobalIndex and Amount set", func(t *testing.T) { + c := &Claim{ + BlockNum: 10, + BlockPos: 2, + TxHash: common.HexToHash("0xaabb"), + GlobalIndex: big.NewInt(42), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111"), + DestinationAddress: common.HexToAddress("0x2222"), + Amount: big.NewInt(1000), + DestinationNetwork: 3, + IsMessage: true, + BlockTimestamp: 9999, + Type: ClaimEvent, + } + s := c.String() + require.Contains(t, s, "BlockNum: 10") + require.Contains(t, s, "BlockPos: 2") + require.Contains(t, s, "GlobalIndex: 42") + require.Contains(t, s, "Amount: 1000") + require.Contains(t, s, "OriginNetwork: 1") + require.Contains(t, s, "DestinationNetwork: 3") + require.Contains(t, s, "IsMessage: true") + require.Contains(t, s, "BlockTimestamp: 9999") + require.Contains(t, s, fmt.Sprintf("Type: %s", ClaimEvent)) + }) +} + +func TestSetClaim_String(t *testing.T) { + t.Run("nil GlobalIndex", func(t *testing.T) { + s := (&SetClaim{}).String() + require.Contains(t, s, "GlobalIndex: nil") + }) + + t.Run("with all fields set", func(t *testing.T) { + sc := &SetClaim{ + BlockNum: 5, + BlockPos: 1, + TxHash: common.HexToHash("0xccdd"), + GlobalIndex: big.NewInt(7), + CreatedAt: 12345, + } + s := sc.String() + require.Contains(t, s, "BlockNum: 5") + require.Contains(t, s, "BlockPos: 1") + require.Contains(t, s, "GlobalIndex: 7") + require.Contains(t, s, "CreatedAt: 12345") + }) +} diff --git a/claimsync/types/syncer_id_test.go b/claimsync/types/syncer_id_test.go new file mode 100644 index 000000000..8584fc5fa --- /dev/null +++ b/claimsync/types/syncer_id_test.go @@ -0,0 +1,25 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestClaimSyncerID_String(t *testing.T) { + tests := []struct { + id ClaimSyncerID + expected string + }{ + {L1ClaimSyncer, "L1ClaimSyncer"}, + {L2ClaimSyncer, "L2ClaimSyncer"}, + {ClaimSyncerID(99), fmt.Sprintf("UnknownClaimSyncerID(%d)", 99)}, + } + + for _, tt := range tests { + t.Run(tt.expected, func(t *testing.T) { + require.Equal(t, tt.expected, tt.id.String()) + }) + } +}