diff --git a/core/bittorrent_trackers.go b/core/bittorrent_trackers.go index 4c2e718..6933be0 100644 --- a/core/bittorrent_trackers.go +++ b/core/bittorrent_trackers.go @@ -11,7 +11,7 @@ import ( "crypto/rand" "encoding/hex" "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -81,7 +81,7 @@ func addPeerToSwarm(peerID string, infoHash string, port int) error { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/core/nakamoto/netpeer.go b/core/nakamoto/netpeer.go index 02436b2..cc5b778 100644 --- a/core/nakamoto/netpeer.go +++ b/core/nakamoto/netpeer.go @@ -50,6 +50,7 @@ type PeerCore struct { OnGetTip func(msg GetTipMessage) (BlockHeader, error) OnSyncGetTipAtDepth func(msg SyncGetTipAtDepthMessage) (SyncGetTipAtDepthReply, error) OnSyncGetData func(msg SyncGetBlockDataMessage) (SyncGetBlockDataReply, error) + OnHasBlock func(msg HasBlockMessage) (bool, error) peerLogger log.Logger } @@ -213,6 +214,27 @@ func NewPeerCore(config PeerConfig) *PeerCore { return reply, nil }) + p.server.RegisterMesageHandler("has_block", func(message []byte) (interface{}, error) { + var msg HasBlockMessage + if err := json.Unmarshal(message, &msg); err != nil { + return nil, err + } + + if p.OnHasBlock == nil { + return nil, fmt.Errorf("HasBlock callback not set") + } + + has, err := p.OnHasBlock(msg) + if err != nil { + return nil, err + } + + return HasBlockReply{ + Type: "has_block", + Has: has, + }, nil + }) + p.server.RegisterMesageHandler("gossip_peers", func(message []byte) (interface{}, error) { var msg GossipPeersMessage if err := json.Unmarshal(message, &msg); err != nil { @@ -411,7 +433,7 @@ func (p *PeerCore) SyncGetBlockData(peer Peer, fromBlock [32]byte, heights core. func (p *PeerCore) HasBlock(peer Peer, blockhash [32]byte) (bool, error) { msg := HasBlockMessage{ Type: "has_block", - BlockHash: fmt.Sprintf("%x", blockhash), + BlockHash: blockhash, } res, err := SendMessageToPeer(peer.Addr, msg, &p.peerLogger) if err != nil { diff --git a/core/nakamoto/netpeer_server.go b/core/nakamoto/netpeer_server.go index f55e382..7932421 100644 --- a/core/nakamoto/netpeer_server.go +++ b/core/nakamoto/netpeer_server.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "log" "net/http" "sort" @@ -88,7 +88,7 @@ func (s *PeerServer) inboxHandler(w http.ResponseWriter, r *http.Request) { return } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { http.Error(w, "Failed to read request body", http.StatusBadRequest) return @@ -168,7 +168,7 @@ func SendMessageToPeer(peerUrl string, message any, log *log.Logger) ([]byte, er defer resp.Body.Close() // Read response. - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed to read response: %v", err) } diff --git a/core/nakamoto/node_test.go b/core/nakamoto/node_test.go index 8d198dc..8a9ec59 100644 --- a/core/nakamoto/node_test.go +++ b/core/nakamoto/node_test.go @@ -3,7 +3,6 @@ package nakamoto import ( "encoding/binary" "encoding/json" - "math" "testing" "time" @@ -220,108 +219,6 @@ func TestNodeSyncMissingBlocks(t *testing.T) { assert.Equal(tip1, tip2) } -// One part of the block sync algorithm is determining the common ancestor of two chains: -// -// Chain 1: the chain we have on our local node. -// Chain 2: the chain of a remote peer who has a more recent tip. -// -// We determine the common ancestor in order to download the most minimal set of block headers required to sync to the latest tip. -// There are a few approaches to this: -// - naive approach: download all headers from the tip to the remote peer's genesis block, and then compare the headers to find the common ancestor. This is O(N) where N is the length of the longest chain. -// - naive approach 2: send the peer the block we have at (height - 6), which is according to Nakamoto's calculations, "probabilistically final" and unlikely to be reorg-ed. Ask them if they have this block, and if so, sync the remaining 6 blocks. This fails when there is ongoing volatile reorgs, as well as doesn't work for a full sync. -// - slightly less naive approach: send the peer "checkpoints" at a regular interval. So for the full list of block hashes, we send H/I where I is the interval size, and use this to sync. This is O(H/I). -// - slightly slightly less naive approach: send the peer a list of "checkpoints" at exponentially decreasing intervals. This is smart since the finality of a block increases exponentially with the number of confirmations. This is O(H/log(H)). -// - the most efficient approach. Interactively binary search with the node. At each step of the binary search, we split their view of the chain hash list in half, and ask them if they have the block at the midpoint. -// -// Let me explain the binary search. -// <------------------------> our view -// <------------------------> their view -// n=1 -// <------------|-----------> their view -// <------------------|-----> their view -// <---------------|--------> their view -// At each iteration we ask: do you have a block at height/2 with this hash? -// - if the answer is yes, we move to the right half. -// - if the answer is no, we move to the left half. -// We continue until the length of our search space = 1. -// -// Now for some modelling. -// Finding the common ancestor is O(log N). Each message is (blockhash [32]byte, height uint64). Message size is 40 bytes. -// Total networking cost is O(40 * log N), bitcoin's chain height is 850585, O(40 * log 850585) = O(40 * 20) = O(800) bytes. -// Less than 1KB of data to find common ancestor. -func TestInteractiveBinarySearchFindCommonAncestor(t *testing.T) { - local_chainhashes := [][32]byte{} - remote_chainhashes := [][32]byte{} - - // Populate blockhashes for test. - for i := 0; i < 100; i++ { - local_chainhashes = append(local_chainhashes, uint64To32ByteArray(uint64(i))) - remote_chainhashes = append(remote_chainhashes, uint64To32ByteArray(uint64(i))) - } - // Set remote to branch at block height 90. - for i := 90; i < 100; i++ { - remote_chainhashes[i] = uint64To32ByteArray(uint64(i + 1000)) - } - - // Print both for debugging. - t.Logf("Local chainhashes:\n") - for _, x := range local_chainhashes { - t.Logf("%x", x) - } - t.Logf("\n") - t.Logf("Remote chainhashes:\n") - for _, x := range remote_chainhashes { - t.Logf("%x", x) - } - t.Logf("\n") - - // Peer method. - hasBlockhash := func(blockhash [32]byte) bool { - for _, x := range remote_chainhashes { - if x == blockhash { - return true - } - } - return false - } - - // - // Find the common ancestor. - // - - // This is a classical binary search algorithm. - floor := 0 - ceil := len(local_chainhashes) - n_iterations := 0 - - for (floor + 1) < ceil { - guess_idx := (floor + ceil) / 2 - guess_value := local_chainhashes[guess_idx] - - t.Logf("Iteration %d: floor=%d, ceil=%d, guess_idx=%d, guess_value=%x", n_iterations, floor, ceil, guess_idx, guess_value) - n_iterations += 1 - - // Send our tip's blockhash - // Peer responds with "SEEN" or "NOT SEEN" - // If "SEEN", we move to the right half. - // If "NOT SEEN", we move to the left half. - if hasBlockhash(guess_value) { - // Move to the right half. - floor = guess_idx - } else { - // Move to the left half. - ceil = guess_idx - } - } - - ancestor := local_chainhashes[floor] - t.Logf("Common ancestor: %x", ancestor) - t.Logf("Found in %d iterations.", n_iterations) - - expectedIterations := math.Ceil(math.Log2(float64(len(local_chainhashes)))) - t.Logf("Expected iterations: %f", expectedIterations) -} - func uint64To32ByteArray(num uint64) [32]byte { var arr [32]byte binary.BigEndian.PutUint64(arr[24:], num) // Store the uint64 in the last 8 bytes of the array diff --git a/core/nakamoto/sync.go b/core/nakamoto/sync.go index dfe8dbe..5008eda 100644 --- a/core/nakamoto/sync.go +++ b/core/nakamoto/sync.go @@ -2,7 +2,7 @@ package nakamoto import ( "fmt" - "math/big" + "slices" "sync" "github.com/liamzebedee/tinychain-go/core" @@ -212,82 +212,6 @@ func (n *Node) Sync() int { // The sync algorithm is a greedy iterative search. // We continue downloading block headers from a peer until we reach their tip. - // TODO handle peers joining. - WINDOW_SIZE := 2048 - - // Greedily searches the block DAG from a tip hash, downloading headers in parallel from peers from all subbranches up to a depth. - // The depth is referred to as the "window size", and is a constant value of 2048 blocks. - search := func(currentTipHash [32]byte) int { - // 1. Get the tips from all our peers and bucket them. - peersTips, err := n.getPeerTips(currentTipHash, uint64(WINDOW_SIZE), 1) - if err != nil { - n.syncLog.Printf("Failed to get peer tips: %s\n", err) - return 0 - } - - // 2. For each tip, download a window of headers and ingest them. - downloaded := 0 - for _, peers := range peersTips { - // 2a. Identify heights. - heights := core.NewBitset(WINDOW_SIZE) - for i := 0; i < WINDOW_SIZE; i++ { - heights.Insert(i) - } - - // 2b. Download headers. - headers, _, err := n.SyncDownloadData(currentTipHash, *heights, peers, true, false) - if err != nil { - n.syncLog.Printf("Failed to download headers: %s\n", err) - continue - } - - // 2c. Validate headers. - // Sanity-check: verify we have all the headers for the heights in order. TODO. - headers2 := orderValidateHeaders(currentTipHash, headers) - - // 2d. Ingest headers. - for _, header := range headers2 { - err := n.Dag.IngestHeader(header) - if err != nil { - // Skip. We will not be able to download the bodies. - continue - } - - downloaded += 1 - } - - n.syncLog.Printf("Downloaded %d headers\n", downloaded) - - // Now get the bodies. - // Filter through missing bodies for headers. - heights2 := core.NewBitset(WINDOW_SIZE) - for i, _ := range headers2 { - heights2.Insert(i) - } - _, bodies, err := n.SyncDownloadData(currentTipHash, *heights2, peers, false, true) - if err != nil { - n.syncLog.Printf("Failed to download bodies: %s\n", err) - continue - } - - // Print the bdoeis and exit. - n.syncLog.Printf("Downloaded bodies n=%d\n", len(bodies)) - - // 2d. Ingest bodies. - for i, body := range bodies { - err := n.Dag.IngestBlockBody(body) - if err != nil { - // Skip. We will not be able to download the bodies. - n.syncLog.Printf("Failed to ingest body %d: %s\n", i, err) - continue - } - } - } - - // 3. Return the number of headers downloaded. - return downloaded - } - currentTip, err := n.Dag.GetLatestHeadersTip() if err != nil { n.syncLog.Printf("Failed to get latest tip: %s\n", err) @@ -298,7 +222,7 @@ func (n *Node) Sync() int { for { // Search for headers from current tip. - downloaded := search(currentTip.Hash) + downloaded := n.sync_search(currentTip.Hash) totalSynced += downloaded // Exit when there are no more headers to download. @@ -313,21 +237,93 @@ func (n *Node) Sync() int { return totalSynced } +func (n *Node) sync_search(baseBlock [32]byte) int { + // The size of the window we are searching. + WINDOW_SIZE := 2048 + + // Greedily searches the block DAG from a tip hash, downloading headers in parallel from peers from all subbranches up to a depth. + // The depth is referred to as the "window size", and is a constant value of 2048 blocks. + // 1. Get the tips from all our peers and bucket them. + peersTips, err := n.getPeerTips(baseBlock, uint64(WINDOW_SIZE), 1) + if err != nil { + n.syncLog.Printf("Failed to get peer tips: %s\n", err) + return 0 + } + + // 2. For each tip, download a window of headers and ingest them. + downloaded := 0 + for _, peers := range peersTips { + // 2a. Identify heights. + heights := core.NewBitset(WINDOW_SIZE) + for i := 0; i < WINDOW_SIZE; i++ { + heights.Insert(i) + } + + // 2b. Download headers. + headers, _, err := n.SyncDownloadData(baseBlock, *heights, peers, true, false) + if err != nil { + n.syncLog.Printf("Failed to download headers: %s\n", err) + continue + } + + // 2c. Validate headers. + // Sanity-check: verify we have all the headers for the heights in order. TODO. + headers2 := orderValidateHeaders(baseBlock, headers) + + // 2d. Ingest headers. + for _, header := range headers2 { + err := n.Dag.IngestHeader(header) + if err != nil { + // Skip. We will not be able to download the bodies. + continue + } + + downloaded += 1 + } + + n.syncLog.Printf("Downloaded %d headers\n", downloaded) + + // Now get the bodies. + // Filter through missing bodies for headers. + heights2 := core.NewBitset(WINDOW_SIZE) + for i, _ := range headers2 { + heights2.Insert(i) + } + _, bodies, err := n.SyncDownloadData(baseBlock, *heights2, peers, false, true) + if err != nil { + n.syncLog.Printf("Failed to download bodies: %s\n", err) + continue + } + + // Print the bdoeis and exit. + n.syncLog.Printf("Downloaded bodies n=%d\n", len(bodies)) + + // 2d. Ingest bodies. + for i, body := range bodies { + err := n.Dag.IngestBlockBody(body) + if err != nil { + // Skip. We will not be able to download the bodies. + n.syncLog.Printf("Failed to ingest body %d: %s\n", i, err) + continue + } + } + } + + // 3. Return the number of headers downloaded. + return downloaded +} + // Contacts all our peers in parallel, gets the block header of their tip, and returns the best tip based on total work. -func (n *Node) sync_getBestTipFromPeers() [32]byte { +func (n *Node) sync_getBestTipFromPeers(peers []Peer) (BlockHeader, error) { syncLog := NewLogger("node", "sync") - - // 1. Contact all our peers. - // 2. Get their current tips in parallel. + + // 1. Contact all our peers and get their current tips in parallel. syncLog.Printf("Getting tips from %d peers...\n", len(n.Peer.peers)) - var wg sync.WaitGroup - - tips := make([]BlockHeader, 0) tipsChan := make(chan BlockHeader, len(n.Peer.peers)) - // timeout := time.After(5 * time.Second) + tips := make([]BlockHeader, 0) - for _, peer := range n.Peer.peers { + for _, peer := range peers { wg.Add(1) go func(peer Peer) { defer wg.Done() @@ -341,62 +337,34 @@ func (n *Node) sync_getBestTipFromPeers() [32]byte { }(peer) } - go func() { - wg.Wait() - close(tipsChan) - }() - - // TODO WIP - // for { - // select { - // case tip, ok := <-tipsChan: - // if !ok { - // break - // } - // tips = append(tips, tip) - // case <-timeout: - // syncLog.Printf("Timed out getting tips from peers\n") - // } - // } + wg.Wait() + close(tipsChan) + for tip := range tipsChan { + tips = append(tips, tip) + } syncLog.Printf("Received %d tips\n", len(tips)) if len(tips) == 0 { syncLog.Printf("No tips received. Exiting sync.\n") - return [32]byte{} // TODO, should return error + return BlockHeader{}, fmt.Errorf("No tips received") } - // 3. Sort the tips by max(work). - // 4. Reduce the tips to (tip, work, num_peers). - // 5. Choose the tip with the highest work and the most peers mining on it. - numPeersOnTip := make(map[[32]byte]int) - tipWork := make(map[[32]byte]*big.Int) - - highestWork := big.NewInt(0) - bestTipHash := [32]byte{} - - for _, tip := range tips { - hash := tip.BlockHash() - // TODO embed difficulty into block header so we can verify POW. - work := CalculateWork(Bytes32ToBigInt(hash)) - - // -1 if x < y - // highestWork < work - if highestWork.Cmp(work) == -1 { - highestWork = work - bestTipHash = hash - } - - numPeersOnTip[hash] += 1 - tipWork[hash] = work - } - - syncLog.Printf("Best tip: %s\n", bestTipHash) - return bestTipHash + // 2. Sort the tips by max(work). + slices.SortFunc(tips, func(bh1, bh2 BlockHeader) int { + w1 := CalculateWork(Bytes32ToBigInt(bh1.BlockHash())) + w2 := CalculateWork(Bytes32ToBigInt(bh2.BlockHash())) + return w1.Cmp(w2) + }) + + // 3. Return the best tip. + bestTip := tips[len(tips)-1] + syncLog.Printf("Best tip: %x\n", bestTip.BlockHash()) + return bestTip, nil } // Computes the common ancestor of our local canonical chain and a remote peer's canonical chain through an interactive binary search. // O(log N * query_size). -func (n *Node) sync_computeCommonAncestorWithPeer(peer Peer, local_chainhashes *[][32]byte) [32]byte { +func GetPeerCommonAncestor(localPeer *PeerCore, remotePeer Peer, local_chainhashes *[][32]byte) (ancestor [32]byte, nIterations int, err error) { syncLog := NewLogger("node", "sync") // 6a. Compute the common ancestor (interactive binary search). @@ -416,10 +384,10 @@ func (n *Node) sync_computeCommonAncestorWithPeer(peer Peer, local_chainhashes * // Peer responds with "SEEN" or "NOT SEEN" // If "SEEN", we move to the right half. // If "NOT SEEN", we move to the left half. - has, err := n.Peer.HasBlock(peer, guess_value) + has, err := localPeer.HasBlock(remotePeer, guess_value) if err != nil { syncLog.Printf("Failed to get block from peer: %s\n", err) - continue + return [32]byte{}, 0, err } if has { // Move to the right half. @@ -430,8 +398,8 @@ func (n *Node) sync_computeCommonAncestorWithPeer(peer Peer, local_chainhashes * } } - ancestor := (*local_chainhashes)[floor] + ancestor = (*local_chainhashes)[floor] syncLog.Printf("Common ancestor: %x", ancestor) syncLog.Printf("Found in %d iterations.", n_iterations) - return ancestor + return ancestor, n_iterations, nil } diff --git a/core/nakamoto/sync_test.go b/core/nakamoto/sync_test.go index 8bc81c7..92bbc2a 100644 --- a/core/nakamoto/sync_test.go +++ b/core/nakamoto/sync_test.go @@ -3,6 +3,7 @@ package nakamoto import ( "context" "fmt" + "math" "testing" "time" @@ -371,7 +372,6 @@ func TestSyncScheduleDownloadWork1(t *testing.T) { func TestSyncSyncDownloadDataHeaders(t *testing.T) { // After getting the tips, then we need to divide them into work units. - assert := assert.New(t) peers := setupTestNetwork(t) @@ -470,7 +470,6 @@ func TestSyncSyncDownloadDataHeaders(t *testing.T) { func TestSyncSync(t *testing.T) { // After getting the tips, then we need to divide them into work units. - assert := assert.New(t) peers := setupTestNetwork(t) @@ -530,5 +529,325 @@ func TestSyncSync(t *testing.T) { assertIntEqual(t, 0, downloaded2) downloaded3 := node3.Sync() assertIntEqual(t, 0, downloaded3) +} + + +// One part of the block sync algorithm is determining the common ancestor of two chains: +// +// Chain 1: the chain we have on our local node. +// Chain 2: the chain of a remote peer who has a more recent tip. +// +// We determine the common ancestor in order to download the most minimal set of block headers required to sync to the latest tip. +// There are a few approaches to this: +// - naive approach: download all headers from the tip to the remote peer's genesis block, and then compare the headers to find the common ancestor. This is O(N) where N is the length of the longest chain. +// - naive approach 2: send the peer the block we have at (height - 6), which is according to Nakamoto's calculations, "probabilistically final" and unlikely to be reorg-ed. Ask them if they have this block, and if so, sync the remaining 6 blocks. This fails when there is ongoing volatile reorgs, as well as doesn't work for a full sync. +// - slightly less naive approach: send the peer "checkpoints" at a regular interval. So for the full list of block hashes, we send H/I where I is the interval size, and use this to sync. This is O(H/I). +// - slightly slightly less naive approach: send the peer a list of "checkpoints" at exponentially decreasing intervals. This is smart since the finality of a block increases exponentially with the number of confirmations. This is O(H/log(H)). +// - the most efficient approach. Interactively binary search with the node. At each step of the binary search, we split their view of the chain hash list in half, and ask them if they have the block at the midpoint. +// +// Let me explain the binary search. +// <------------------------> our view +// <------------------------> their view +// n=1 +// <------------|-----------> their view +// <------------------|-----> their view +// <---------------|--------> their view +// At each iteration we ask: do you have a block at height/2 with this hash? +// - if the answer is yes, we move to the right half. +// - if the answer is no, we move to the left half. +// We continue until the length of our search space = 1. +// +// Now for some modelling. +// Finding the common ancestor is O(log N). Each message is (blockhash [32]byte, height uint64). Message size is 40 bytes. +// Total networking cost is O(40 * log N), bitcoin's chain height is 850585, O(40 * log 850585) = O(40 * 20) = O(800) bytes. +// Less than 1KB of data to find common ancestor. +func TestInteractiveBinarySearchFindCommonAncestor(t *testing.T) { + local_chainhashes := [][32]byte{} + remote_chainhashes := [][32]byte{} + + // Populate blockhashes for test. + for i := 0; i < 100; i++ { + local_chainhashes = append(local_chainhashes, uint64To32ByteArray(uint64(i))) + remote_chainhashes = append(remote_chainhashes, uint64To32ByteArray(uint64(i))) + } + // Set remote to branch at block height 90. + for i := 90; i < 100; i++ { + remote_chainhashes[i] = uint64To32ByteArray(uint64(i + 1000)) + } + + // Print both for debugging. + t.Logf("Local chainhashes:\n") + for _, x := range local_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + t.Logf("Remote chainhashes:\n") + for _, x := range remote_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + + // Peer method. + hasBlockhash := func(blockhash [32]byte) bool { + for _, x := range remote_chainhashes { + if x == blockhash { + return true + } + } + return false + } + + // + // Find the common ancestor. + // + + // This is a classical binary search algorithm. + floor := 0 + ceil := len(local_chainhashes) + n_iterations := 0 + + for (floor + 1) < ceil { + guess_idx := (floor + ceil) / 2 + guess_value := local_chainhashes[guess_idx] + + t.Logf("Iteration %d: floor=%d, ceil=%d, guess_idx=%d, guess_value=%x", n_iterations, floor, ceil, guess_idx, guess_value) + n_iterations += 1 + + // Send our tip's blockhash + // Peer responds with "SEEN" or "NOT SEEN" + // If "SEEN", we move to the right half. + // If "NOT SEEN", we move to the left half. + if hasBlockhash(guess_value) { + // Move to the right half. + floor = guess_idx + } else { + // Move to the left half. + ceil = guess_idx + } + } + + ancestor := local_chainhashes[floor] + t.Logf("Common ancestor: %x", ancestor) + t.Logf("Found in %d iterations.", n_iterations) + + expectedIterations := math.Ceil(math.Log2(float64(len(local_chainhashes)))) + t.Logf("Expected iterations: %f", expectedIterations) +} + +func TestGetPeerCommonAncestor(t *testing.T) { + local_chainhashes := [][32]byte{} + remote_chainhashes := [][32]byte{} + + // Populate blockhashes for test. + for i := 0; i < 100; i++ { + local_chainhashes = append(local_chainhashes, uint64To32ByteArray(uint64(i))) + remote_chainhashes = append(remote_chainhashes, uint64To32ByteArray(uint64(i))) + } + // Set remote to branch at block height 90. + for i := 90; i < 100; i++ { + remote_chainhashes[i] = uint64To32ByteArray(uint64(i + 1000)) + } + + // Print both for debugging. + t.Logf("Local chainhashes:\n") + for _, x := range local_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + t.Logf("Remote chainhashes:\n") + for _, x := range remote_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + + // Peer mock. + peer1 := NewPeerCore(PeerConfig{ipAddress: "127.0.0.1", port: getRandomPort()}) + peer2 := NewPeerCore(PeerConfig{ipAddress: "127.0.0.1", port: getRandomPort()}) + + go peer1.Start() + go peer2.Start() + + // Wait for peers online. + waitForPeersOnline([]*PeerCore{peer1, peer2}) + + // Bootstrap. + peer1.Bootstrap([]string{peer2.GetLocalAddr()}) + + peer2.OnHasBlock = func(msg HasBlockMessage) (bool, error) { + blockhash := msg.BlockHash + for _, x := range remote_chainhashes { + if x == blockhash { + return true, nil + } + } + return false, nil + } + + // + // Find the common ancestor. + // + + remotePeer := peer1.GetPeers()[0] + ancestor, n_iterations, err := GetPeerCommonAncestor(peer1, remotePeer, &local_chainhashes) + if err != nil { + t.Fatalf("Error finding common ancestor: %s", err) + } + t.Logf("Common ancestor: %x", ancestor) + t.Logf("Found in %d iterations.", n_iterations) + + expectedIterations := math.Ceil(math.Log2(float64(len(local_chainhashes)))) + t.Logf("Expected iterations: %f", expectedIterations) + + // Now we assert the common ancestor. + assert := assert.New(t) + assert.Equal(local_chainhashes[89], ancestor) + assertIntEqual(t, int(expectedIterations), n_iterations) +} + + + +// Sync scenarios: +// +// SCENARIO 1 +// =========== +// DESCRIPTION: local tip is behind remote tip, same branch. remote tip is heavier. +// NETWORK STATE: +// node1: a -> b -> c -> d -> e (work=100) +// node2: a -> b -> c -> d -> e -> f -> g (work=150) +// +// SCENARIO 2 +// =========== +// DESCRIPTION: local tip is behind remote tip, fork branch. remote branch is heavier. +// NETWORK STATE: +// node1: a -> b -> ca -> da -> ea (work=100) +// node2: a -> b -> cb -> db -> eb (work=105) +// +// SCENARIO 3 +// =========== +// DESCRIPTION: local tip is behind remote tip, fork branch. local branch is heavier. +// NETWORK STATE: +// node1: a -> b -> ca -> da -> ea (work=105) +// node2: a -> b -> cb -> db -> eb (work=100) +// + + +func printBlockchainView(t *testing.T, label string, dag *BlockDAG) { + // Print the entire hash chain according to node1. + hashlist, err := dag.GetLongestChainHashList(dag.FullTip.Hash, dag.FullTip.Height+10) + if err != nil { + t.Fatalf("Error getting longest chain: %s", err) + } + t.Logf("") + t.Logf("Longest chain (%s):", label) + for i, hash := range hashlist { + t.Logf("Block #%d: %x", i, hash) + } + t.Logf("") +} + +func TestSyncRemoteForkBranchRemoteHeavier(t *testing.T) { + assert := assert.New(t) + peers := setupTestNetwork(t) + + node1 := peers[0] + node2 := peers[1] + + // Then we check the tips. + tip1 := node1.Dag.FullTip + tip2 := node2.Dag.FullTip + + // Print the height of the tip. + t.Logf("Tip 1 height: %d", tip1.Height) + t.Logf("Tip 2 height: %d", tip2.Height) + + // Check that the tips are the same. + assert.Equal(tip1.HashStr(), tip2.HashStr()) + + // Node 1 mines 15 blocks, gossips with node 2 + node1.Miner.Start(15) + // Wait for nodes [1,2] to sync. + err := waitForNodesToSyncSameTip([]*Node{node1, node2}) + assert.Nil(err) + + // Disable nodes syncing. + node1.Peer.OnNewBlock = nil + node2.Peer.OnNewBlock = nil + + // Node 1 mines 5 blocks on alternative chain. + // Node 2 mines 7 blocks on alternative chain. + node1.Miner.Start(1) + node2.Miner.Start(5) + + // Assert state. + tip1 = node1.Dag.FullTip + tip2 = node2.Dag.FullTip + assertIntEqual(t, 16, tip1.Height) + assertIntEqual(t, 20, tip2.Height) + assert.NotEqual(tip1.HashStr(), tip2.HashStr()) + + // Now print both hash chains. + printBlockchainView(t, "Node 1", node1.Dag) + printBlockchainView(t, "Node 2", node2.Dag) + + // Now sync node 2 to node 1. + // Get the heavier tip. + // nodes := []*Node{node1, node2} + tips := []Block{tip1, tip2} + var heavierTipIndex int = -1 + if tips[0].AccumulatedWork.Cmp(&tips[1].AccumulatedWork) == -1 { + heavierTipIndex = 1 + } else if tips[1].AccumulatedWork.Cmp(&tips[0].AccumulatedWork) == -1 { + heavierTipIndex = 0 + } else if tips[0].AccumulatedWork.Cmp(&tips[1].AccumulatedWork) == 0 { + t.Errorf("Tips have the same work. Re-run test.") + } + t.Logf("Heavier tip index: %d", heavierTipIndex) + assertIntEqual(t, 1, heavierTipIndex) + + // The common ancestor should be } + + +func TestSyncGetBestTipFromPeers(t *testing.T) { + assert := assert.New(t) + peers := setupTestNetwork(t) + + node1 := peers[0] + node2 := peers[1] + node3 := peers[2] + + // Base case: all tips are the same. + bestTip, err := node1.sync_getBestTipFromPeers(node1.Peer.peers) + assert.Nil(err) + assert.Equal(node1.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) + assert.Equal(node2.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) + assert.Equal(node3.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) + + // Now we test the case where one peer has a different tip. + // Node 1 mines 15 blocks, gossips with node 2 + node1.Miner.Start(5) + node2.Miner.Start(10) + + // node2 should have best tip. + bestTip, err = node1.sync_getBestTipFromPeers(node1.Peer.peers) + assert.Nil(err) + assert.Equal(node2.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) +} + +// Sync process: +// 1. Ask all peers for tips +// 2. Choose the tip with highest amount of work +// 3. Find the common ancestor +// 4. Sync from this base block + + +// Sync needs to distinguish between two scenarios: +// 1) live sync: the node is syncing in real-time with the network. +// 2) cold sync: the node is syncing from a cold start, and needs to download all blocks from the network. +// what changes in each scenario? +// - live sync: +// -- we validate timestamps +// -- we download just one branch +// - cold sync +// -- we download all branches diff --git a/core/nakamoto/types.go b/core/nakamoto/types.go index 0a11cd5..6861ed6 100644 --- a/core/nakamoto/types.go +++ b/core/nakamoto/types.go @@ -96,7 +96,7 @@ type GetBlocksReply struct { // has_block type HasBlockMessage struct { Type string `json:"type"` // "have_block" - BlockHash string `json:"blockHash"` + BlockHash [32]byte `json:"blockHash"` } type HasBlockReply struct {