From b2ec5ead421d303e1bcf83b549a213364abbeaa0 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 18 Mar 2026 09:49:20 -0700 Subject: [PATCH 1/4] fix mEnqueueTimes race --- src/history/HistoryManagerImpl.cpp | 26 ++++++++++++++++---------- src/history/HistoryManagerImpl.h | 5 ++++- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index 6b7375eb97..170e029e3a 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -337,7 +337,10 @@ HistoryManagerImpl::queueCurrentHistory(uint32_t ledger, uint32_t ledgerVers) } CLOG_INFO(History, "Queueing publish state for ledger {}", ledger); - mEnqueueTimes.emplace(ledger, std::chrono::steady_clock::now()); + { + LOCK_GUARD(mEnqueueTimesMtx, guard); + mEnqueueTimes.emplace(ledger, std::chrono::steady_clock::now()); + } // We queue history inside ledger commit, so do not finalize the file yet writeCheckpointFile(mApp, has, /* finalize */ false); @@ -587,16 +590,19 @@ HistoryManagerImpl::historyPublished( ZoneScoped; if (success) { - auto iter = mEnqueueTimes.find(ledgerSeq); - if (iter != mEnqueueTimes.end()) { - auto now = std::chrono::steady_clock::now(); - CLOG_INFO( - Perf, "Published history for ledger {} in {} seconds", - ledgerSeq, - std::chrono::duration(now - iter->second).count()); - mEnqueueToPublishTimer.Update(now - iter->second); - mEnqueueTimes.erase(iter); + LOCK_GUARD(mEnqueueTimesMtx, guard); + auto iter = mEnqueueTimes.find(ledgerSeq); + if (iter != mEnqueueTimes.end()) + { + auto now = std::chrono::steady_clock::now(); + CLOG_INFO( + Perf, "Published history for ledger {} in {} seconds", + ledgerSeq, + std::chrono::duration(now - iter->second).count()); + mEnqueueToPublishTimer.Update(now - iter->second); + mEnqueueTimes.erase(iter); + } } this->mPublishSuccess.Mark(); diff --git a/src/history/HistoryManagerImpl.h b/src/history/HistoryManagerImpl.h index f5c495b8a2..5a41150b67 100644 --- a/src/history/HistoryManagerImpl.h +++ b/src/history/HistoryManagerImpl.h @@ -6,6 +6,7 @@ #include "history/CheckpointBuilder.h" #include "history/HistoryManager.h" +#include "util/ThreadAnnotations.h" #include "util/TmpDir.h" #include "work/Work.h" #include @@ -32,7 +33,9 @@ class HistoryManagerImpl : public HistoryManager medida::Meter& mPublishFailure; medida::Timer& mEnqueueToPublishTimer; - UnorderedMap mEnqueueTimes; + ANNOTATED_MUTEX(mEnqueueTimesMtx); + UnorderedMap + mEnqueueTimes GUARDED_BY(mEnqueueTimesMtx); CheckpointBuilder mCheckpointBuilder; #ifdef BUILD_TESTS From e707da0ab8aab944f1be14f2abf8ed44c938ee6e Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 18 Mar 2026 09:51:56 -0700 Subject: [PATCH 2/4] Fix edge case where VerifyBucketWork gets wedged --- src/historywork/VerifyBucketWork.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/historywork/VerifyBucketWork.cpp b/src/historywork/VerifyBucketWork.cpp index 8a8e62038f..073c526d5e 100644 --- a/src/historywork/VerifyBucketWork.cpp +++ b/src/historywork/VerifyBucketWork.cpp @@ -49,6 +49,19 @@ VerifyBucketWork::onRun() } spawnVerifier(); + + // Handle synchronous completion (e.g., oversized bucket rejection). + // spawnVerifier() may set mDone immediately without posting async work, + // so we must check before returning WORK_WAITING. + if (mDone) + { + if (mEc) + { + return State::WORK_FAILURE; + } + return State::WORK_SUCCESS; + } + return State::WORK_WAITING; } From c132c3e96834ffaa77bae3ce356334e56af4dfc4 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 18 Mar 2026 09:57:13 -0700 Subject: [PATCH 3/4] Limit shadow hashes --- src/bucket/FutureBucket.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/bucket/FutureBucket.h b/src/bucket/FutureBucket.h index e052fabc46..70f9e43f36 100644 --- a/src/bucket/FutureBucket.h +++ b/src/bucket/FutureBucket.h @@ -133,6 +133,11 @@ template class FutureBucket // Return all hashes referenced by this future. std::vector getHashes() const; + // Maximum number of shadow hashes allowed during deserialization. + // Shadows were removed in protocol 12; this cap prevents OOM from + // crafted HAS JSON with enormous shadow arrays. + static constexpr size_t MAX_SHADOW_HASHES = 32; + template void load(Archive& ar) @@ -145,9 +150,26 @@ template class FutureBucket ar(cereal::make_nvp("curr", mInputCurrBucketHash)); ar(cereal::make_nvp("snap", mInputSnapBucketHash)); ar(cereal::make_nvp("shadow", mInputShadowBucketHashes)); + // Validate required fields before checkState to avoid + // releaseAssert abort on malformed archive data + if (mInputCurrBucketHash.empty() || mInputSnapBucketHash.empty()) + { + throw std::runtime_error( + "FutureBucket FB_HASH_INPUTS has empty curr or snap hash"); + } + if (mInputShadowBucketHashes.size() > MAX_SHADOW_HASHES) + { + throw std::runtime_error( + "FutureBucket has too many shadow hashes"); + } break; case FB_HASH_OUTPUT: ar(cereal::make_nvp("output", mOutputBucketHash)); + if (mOutputBucketHash.empty()) + { + throw std::runtime_error( + "FutureBucket FB_HASH_OUTPUT has empty output hash"); + } break; case FB_CLEAR: break; From 15e5d284a1bccadf84c1042dc81e39ad3a62312d Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 18 Mar 2026 13:16:17 -0700 Subject: [PATCH 4/4] HistoryArchive hardening --- src/history/HistoryArchive.cpp | 117 +++- src/history/HistoryArchive.h | 13 + ....testnet.6714239.networkPassphrase.v2.json | 2 +- .../test/HistoryArchiveFormatTests.cpp | 561 ++++++++++++++++++ src/util/Fs.cpp | 2 +- 5 files changed, 686 insertions(+), 9 deletions(-) create mode 100644 src/history/test/HistoryArchiveFormatTests.cpp diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index 8fe966d390..ab6b27929d 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -23,9 +23,11 @@ #include #include +#include #include #include #include +#include #include #include #include @@ -148,10 +150,107 @@ HistoryArchiveState::toString() const return out.str(); } +static bool +isValidHexHash(std::string const& s) +{ + if (s.size() != 64) + { + return false; + } + for (unsigned char c : s) + { + if (!std::isxdigit(c)) + { + return false; + } + } + return true; +} + +static void +validateHASAfterDeserialization(HistoryArchiveState const& has) +{ + if (has.version != HistoryArchiveState:: + HISTORY_ARCHIVE_STATE_VERSION_BEFORE_HOT_ARCHIVE && + has.version != + HistoryArchiveState::HISTORY_ARCHIVE_STATE_VERSION_WITH_HOT_ARCHIVE) + { + CLOG_ERROR(History, "Unexpected history archive state version: {}", + has.version); + throw std::runtime_error("unexpected history archive state version"); + } + + if (has.currentBuckets.size() != LiveBucketList::kNumLevels) + { + throw std::runtime_error( + fmt::format(FMT_STRING("Invalid currentBuckets count: {}"), + has.currentBuckets.size())); + } + + if (has.hasHotArchiveBuckets() && + has.hotArchiveBuckets.size() != HotArchiveBucketList::kNumLevels) + { + throw std::runtime_error( + fmt::format(FMT_STRING("Invalid hotArchiveBuckets count: {}"), + has.hotArchiveBuckets.size())); + } + + // Prevent integer overflow in downstream CheckpointRange calculations + if (has.currentLedger > HistoryArchiveState::MAX_CURRENT_LEDGER) + { + throw std::runtime_error(fmt::format( + FMT_STRING("currentLedger {} is too large"), has.currentLedger)); + } + + // Validate all bucket hash strings are well-formed 64-character hex + auto validateHashesInBuckets = [](auto const& buckets, + std::string const& name) { + for (size_t i = 0; i < buckets.size(); ++i) + { + auto const& level = buckets[i]; + if (!isValidHexHash(level.curr)) + { + throw std::runtime_error(fmt::format( + FMT_STRING("Invalid {} curr hash at level {}"), name, i)); + } + if (!isValidHexHash(level.snap)) + { + throw std::runtime_error(fmt::format( + FMT_STRING("Invalid {} snap hash at level {}"), name, i)); + } + for (auto const& h : level.next.getHashes()) + { + if (!isValidHexHash(h)) + { + throw std::runtime_error(fmt::format( + FMT_STRING("Invalid {} next hash at level {}"), name, + i)); + } + } + } + }; + + validateHashesInBuckets(has.currentBuckets, "currentBuckets"); + if (has.hasHotArchiveBuckets()) + { + validateHashesInBuckets(has.hotArchiveBuckets, "hotArchiveBuckets"); + } +} + void HistoryArchiveState::load(std::string const& inFile) { ZoneScoped; + + // Check file size before parsing to prevent OOM from crafted JSON + auto fileSize = std::filesystem::file_size(inFile); + if (fileSize > MAX_HAS_FILE_SIZE) + { + throw std::runtime_error( + fmt::format(FMT_STRING("HAS file size {} exceeds maximum {}"), + fileSize, MAX_HAS_FILE_SIZE)); + } + std::ifstream in(inFile); if (!in) { @@ -161,22 +260,26 @@ HistoryArchiveState::load(std::string const& inFile) in.exceptions(std::ios::badbit); cereal::JSONInputArchive ar(in); serialize(ar); - if (version != HISTORY_ARCHIVE_STATE_VERSION_BEFORE_HOT_ARCHIVE && - version != HISTORY_ARCHIVE_STATE_VERSION_WITH_HOT_ARCHIVE) - { - CLOG_ERROR(History, "Unexpected history archive state version: {}", - version); - throw std::runtime_error("unexpected history archive state version"); - } + validateHASAfterDeserialization(*this); } void HistoryArchiveState::fromString(std::string const& str) { ZoneScoped; + + // Check string size before parsing to prevent OOM from crafted JSON + if (str.size() > MAX_HAS_FILE_SIZE) + { + throw std::runtime_error( + fmt::format(FMT_STRING("HAS string size {} exceeds maximum {}"), + str.size(), MAX_HAS_FILE_SIZE)); + } + std::istringstream in(str); cereal::JSONInputArchive ar(in); serialize(ar); + validateHASAfterDeserialization(*this); } std::string diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h index a7b89ba442..18d524c134 100644 --- a/src/history/HistoryArchive.h +++ b/src/history/HistoryArchive.h @@ -80,6 +80,19 @@ struct HistoryArchiveState static constexpr size_t MAX_HISTORY_ARCHIVE_BUCKET_SIZE = 1024ull * 1024ull * 1024ull * 100ull; // 100 GB + // Maximum allowed size for a History Archive State (HAS) JSON + // file or string. This applies only to the HAS JSON document itself + // (stellar-history.json), not to bucket files or other archive + // contents. Checked before parsing to prevent OOM from crafted JSON + // with oversized arrays or strings. A valid HAS is typically under + // 50KB; 10MB is extremely generous. + static constexpr size_t MAX_HAS_FILE_SIZE = 10 * 1024 * 1024; // 10 MB + + // Upper bound on currentLedger to prevent uint32_t overflow in + // downstream arithmetic. + static constexpr uint32_t MAX_CURRENT_LEDGER = + std::numeric_limits::max() - 256; + static inline unsigned const HISTORY_ARCHIVE_STATE_VERSION_BEFORE_HOT_ARCHIVE = 1; static inline unsigned const diff --git a/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json b/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json index 18fb8d3b29..ec2c63366e 100644 --- a/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json +++ b/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json @@ -176,7 +176,7 @@ "curr": "ae7e4814b50e176d8e3532e462e2e9db02f218adebd74603d7e349cc19f489e2", "next": { "state": 1, - "output": "50abed8a9d86c072cfe8388246b7a378dc355fe996fd7384a5ee57e8da2ad52" + "output": "50abed8a9d86c072cfe8388246b7a378dc355fe996fd7384a5ee57e8da2ad52d" }, "snap": "0000000000000000000000000000000000000000000000000000000000000000" } diff --git a/src/history/test/HistoryArchiveFormatTests.cpp b/src/history/test/HistoryArchiveFormatTests.cpp new file mode 100644 index 0000000000..75192bed44 --- /dev/null +++ b/src/history/test/HistoryArchiveFormatTests.cpp @@ -0,0 +1,561 @@ +// Copyright 2026 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +// Comprehensive tests for History Archive State (HAS) format validation. +// Ensures that all malformed or crafted HAS JSON inputs are rejected +// gracefully (via exception) rather than crashing. + +#include "bucket/HotArchiveBucketList.h" +#include "bucket/LiveBucketList.h" +#include "history/HistoryArchive.h" +#include "main/Application.h" +#include "test/TestUtils.h" +#include "test/test.h" +#include "util/Fs.h" +#include +#include +#include +#include +#include +#include + +using namespace stellar; + +namespace +{ + +std::string const ZERO_HASH(64, '0'); + +// Per-level bucket fields for the HAS JSON builder. +struct TestBucketLevel +{ + std::string curr = ZERO_HASH; + std::string snap = ZERO_HASH; + std::string next = "{\"state\": 0}"; +}; + +// Emit a JSON array of bucket levels. +void +emitBucketArray(std::ostringstream& json, + std::vector const& levels) +{ + json << "[\n"; + for (size_t i = 0; i < levels.size(); ++i) + { + auto const& l = levels[i]; + json << " {\"curr\": \"" << l.curr << "\", \"next\": " << l.next + << ", \"snap\": \"" << l.snap << "\"}"; + if (i + 1 < levels.size()) + { + json << ","; + } + json << "\n"; + } + json << " ]"; +} + +// Core builder: emits a HAS JSON string from explicit parameters. +// Every makeHAS* helper delegates here. For version 2, pass hotLevels +// to include the hotArchiveBuckets and networkPassphrase fields. +std::string +buildHASJson(uint32_t version, uint32_t currentLedger, + std::vector const& levels, + std::vector const* hotLevels = nullptr) +{ + std::ostringstream json; + json << "{\n"; + json << " \"version\": " << version << ",\n"; + json << " \"server\": \"test\",\n"; + json << " \"currentLedger\": " << currentLedger << ",\n"; + if (hotLevels) + { + json << " \"networkPassphrase\": \"Test SDF Network ; September " + "2015\",\n"; + } + json << " \"currentBuckets\": "; + emitBucketArray(json, levels); + if (hotLevels) + { + json << ",\n \"hotArchiveBuckets\": "; + emitBucketArray(json, *hotLevels); + } + json << "\n}\n"; + return json.str(); +} + +// Default all-zeros bucket list matching LiveBucketList structure. +std::vector +defaultLevels() +{ + return std::vector(LiveBucketList::kNumLevels); +} + +// Convenience: valid HAS with optional ledger override. +std::string +makeHASJson(uint32_t currentLedger = 63) +{ + return buildHASJson(1, currentLedger, defaultLevels()); +} + +// Override a single level's fields via a mutation lambda. +std::string +makeHASJsonWithLevel(int level, + std::function const& mutate) +{ + auto levels = defaultLevels(); + mutate(levels.at(level)); + return buildHASJson(1, 63, levels); +} + +// Override the number of levels. +std::string +makeHASJsonWithLevelCount(int numLevels) +{ + return buildHASJson(1, 63, std::vector(numLevels)); +} + +// Build a valid version 2 HAS with hot archive buckets. +std::string +makeHASJsonV2() +{ + auto hot = std::vector(HotArchiveBucketList::kNumLevels); + return buildHASJson(2, 63, defaultLevels(), &hot); +} +} + +TEST_CASE("valid HAS round-trips through fromString", + "[history][archive-format]") +{ + auto json = makeHASJson(63); + HistoryArchiveState has; + REQUIRE_NOTHROW(has.fromString(json)); + CHECK(has.currentLedger == 63); + CHECK(has.currentBuckets.size() == LiveBucketList::kNumLevels); +} + +TEST_CASE("valid HAS with currentLedger 0", "[history][archive-format]") +{ + auto json = makeHASJson(0); + HistoryArchiveState has; + REQUIRE_NOTHROW(has.fromString(json)); + CHECK(has.currentLedger == 0); +} + +TEST_CASE("HAS rejects invalid version", "[history][archive-format]") +{ + auto withVersion = [](int v) { + std::string json = makeHASJson(63); + auto pos = json.find("\"version\": 1"); + REQUIRE(pos != std::string::npos); + json.replace(pos, 12, fmt::format("\"version\": {}", v)); + return json; + }; + + // version 0 + { + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(withVersion(0)), std::runtime_error); + } + + // version 3 + { + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(withVersion(3)), std::runtime_error); + } +} + +TEST_CASE("HAS rejects extreme currentLedger", "[history][archive-format]") +{ + // UINT32_MAX + { + auto json = makeHASJson(std::numeric_limits::max()); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // Just above MAX_CURRENT_LEDGER + { + auto json = makeHASJson(HistoryArchiveState::MAX_CURRENT_LEDGER + 1); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // Exactly at MAX_CURRENT_LEDGER (should be accepted) + { + auto json = makeHASJson(HistoryArchiveState::MAX_CURRENT_LEDGER); + HistoryArchiveState has; + REQUIRE_NOTHROW(has.fromString(json)); + } +} +TEST_CASE("HAS rejects wrong-sized bucket vectors", "[history][archive-format]") +{ + // too many currentBuckets + { + auto json = makeHASJsonWithLevelCount(12); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // too few currentBuckets + { + auto json = makeHASJsonWithLevelCount(5); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // zero currentBuckets + { + auto json = makeHASJsonWithLevelCount(0); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } +} + +TEST_CASE("HAS rejects oversized input", "[history][archive-format]") +{ + // oversized string + { + std::string oversized(HistoryArchiveState::MAX_HAS_FILE_SIZE + 1, 'x'); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(oversized), std::runtime_error); + } + + // oversized file + { + VirtualClock clock; + auto cfg = getTestConfig(); + auto app = createTestApplication(clock, cfg); + auto tmpDir = app->getTmpDirManager().tmpDir("has-test"); + std::string filename = tmpDir.getName() + "/oversized-has.json"; + + { + std::ofstream out(filename); + std::string padding(HistoryArchiveState::MAX_HAS_FILE_SIZE + 1, + ' '); + out << padding; + } + + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.load(filename), std::runtime_error); + } +} + +TEST_CASE("HAS rejects malformed JSON", "[history][archive-format]") +{ + // not JSON at all + { + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString("this is not json")); + } + + // empty string + { + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString("")); + } + + // truncated JSON + { + auto json = makeHASJson(63); + // Truncate in the middle + json = json.substr(0, json.size() / 2); + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString(json)); + } + + // missing currentBuckets field + { + std::string json = "{\n" + " \"version\": 1,\n" + " \"server\": \"test\",\n" + " \"currentLedger\": 63\n" + "}\n"; + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString(json)); + } + + // missing version field + { + std::string json = "{\n" + " \"server\": \"test\",\n" + " \"currentLedger\": 63,\n" + " \"currentBuckets\": []\n" + "}\n"; + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString(json)); + } + + // missing currentLedger field + { + std::string json = "{\n" + " \"version\": 1,\n" + " \"server\": \"test\",\n" + " \"currentBuckets\": []\n" + "}\n"; + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString(json)); + } + + // version 2 missing networkPassphrase + { + // version 2 requires networkPassphrase; omitting it should fail + auto hot = + std::vector(HotArchiveBucketList::kNumLevels); + // Build manually without networkPassphrase + std::ostringstream json; + json << "{\n"; + json << " \"version\": 2,\n"; + json << " \"server\": \"test\",\n"; + json << " \"currentLedger\": 63,\n"; + json << " \"currentBuckets\": "; + emitBucketArray(json, defaultLevels()); + json << ",\n \"hotArchiveBuckets\": "; + emitBucketArray(json, hot); + json << "\n}\n"; + + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString(json.str())); + } + + // version 2 missing hotArchiveBuckets + { + // version 2 expects hotArchiveBuckets; omitting it should fail + std::ostringstream json; + json << "{\n"; + json << " \"version\": 2,\n"; + json << " \"server\": \"test\",\n"; + json << " \"currentLedger\": 63,\n"; + json << " \"networkPassphrase\": \"Test SDF Network ; September " + "2015\",\n"; + json << " \"currentBuckets\": "; + emitBucketArray(json, defaultLevels()); + json << "\n}\n"; + + HistoryArchiveState has; + REQUIRE_THROWS(has.fromString(json.str())); + } + + // version 1 valid without networkPassphrase + { + // version 1 should work fine without networkPassphrase + auto json = makeHASJson(63); + HistoryArchiveState has; + REQUIRE_NOTHROW(has.fromString(json)); + } + + // version 2 valid with all fields + { + auto json = makeHASJsonV2(); + HistoryArchiveState has; + REQUIRE_NOTHROW(has.fromString(json)); + } +} + +// =========================================================================== +// FutureBucket state validation +// =========================================================================== + +TEST_CASE("FutureBucket rejects invalid state values", + "[history][archive-format]") +{ + // state 3 (FB_LIVE_OUTPUT - never serialized) + { + auto json = makeHASJsonWithLevel( + 1, [](TestBucketLevel& l) { l.next = "{\"state\": 3}"; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // state 4 (FB_LIVE_INPUTS - never serialized) + { + auto json = makeHASJsonWithLevel( + 1, [](TestBucketLevel& l) { l.next = "{\"state\": 4}"; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // state 99 (out of range) + { + auto json = makeHASJsonWithLevel( + 1, [](TestBucketLevel& l) { l.next = "{\"state\": 99}"; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } +} + +TEST_CASE("FutureBucket rejects empty output hash", "[history][archive-format]") +{ + auto json = makeHASJsonWithLevel(1, [](TestBucketLevel& l) { + l.next = "{\"state\": 1, \"output\": \"\"}"; + }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); +} + +TEST_CASE("FutureBucket rejects empty input hashes", + "[history][archive-format]") +{ + // empty curr hash + { + auto json = makeHASJsonWithLevel(1, [](TestBucketLevel& l) { + l.next = "{\"state\": 2, \"curr\": \"\", \"snap\": \"" + ZERO_HASH + + "\", \"shadow\": []}"; + }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // empty snap hash + { + auto json = makeHASJsonWithLevel(1, [](TestBucketLevel& l) { + l.next = "{\"state\": 2, \"curr\": \"" + ZERO_HASH + + "\", \"snap\": \"\", \"shadow\": []}"; + }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } +} + +TEST_CASE("FutureBucket rejects too many shadow hashes", + "[history][archive-format]") +{ + // Build a shadow array with 33 entries (just over MAX_SHADOW_HASHES=32) + std::ostringstream shadows; + shadows << "["; + for (int i = 0; i < 33; ++i) + { + if (i > 0) + { + shadows << ", "; + } + shadows << "\"" << ZERO_HASH << "\""; + } + shadows << "]"; + + auto shadowStr = shadows.str(); + auto json = makeHASJsonWithLevel(1, [&](TestBucketLevel& l) { + l.next = "{\"state\": 2, \"curr\": \"" + ZERO_HASH + + "\", \"snap\": \"" + ZERO_HASH + + "\", \"shadow\": " + shadowStr + "}"; + }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); +} + +// =========================================================================== +// Hash string format validation +// =========================================================================== + +TEST_CASE("HAS rejects non-hex curr hash", "[history][archive-format]") +{ + // contains non-hex characters + { + std::string badHash = + "gg00000000000000000000000000000000000000000000000000000000000000"; + auto json = makeHASJsonWithLevel( + 0, [&](TestBucketLevel& l) { l.curr = badHash; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // special characters + { + std::string badHash = + "../../../../../../etc/passwd00000000000000000000000000000000"; + auto json = makeHASJsonWithLevel( + 0, [&](TestBucketLevel& l) { l.curr = badHash; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } +} + +TEST_CASE("HAS rejects non-hex snap hash", "[history][archive-format]") +{ + std::string badHash = + "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; + auto json = + makeHASJsonWithLevel(0, [&](TestBucketLevel& l) { l.snap = badHash; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); +} + +TEST_CASE("HAS rejects wrong-length hash strings", "[history][archive-format]") +{ + // too short + { + auto json = makeHASJsonWithLevel( + 0, [](TestBucketLevel& l) { l.curr = "aabb"; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // too long + { + std::string longHash(128, 'a'); + auto json = makeHASJsonWithLevel( + 0, [&](TestBucketLevel& l) { l.curr = longHash; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // empty hash + { + auto json = + makeHASJsonWithLevel(0, [](TestBucketLevel& l) { l.curr = ""; }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } +} + +TEST_CASE("HAS rejects non-hex FutureBucket output hash", + "[history][archive-format]") +{ + std::string badHash = + "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"; + auto json = makeHASJsonWithLevel(1, [&](TestBucketLevel& l) { + l.next = "{\"state\": 1, \"output\": \"" + badHash + "\"}"; + }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); +} + +TEST_CASE("HAS rejects non-hex FutureBucket input hashes", + "[history][archive-format]") +{ + std::string badHash = + "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"; + + // bad curr in FB_HASH_INPUTS + { + auto json = makeHASJsonWithLevel(1, [&](TestBucketLevel& l) { + l.next = "{\"state\": 2, \"curr\": \"" + badHash + + "\", \"snap\": \"" + ZERO_HASH + "\", \"shadow\": []}"; + }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } + + // bad shadow hash + { + auto json = makeHASJsonWithLevel(1, [&](TestBucketLevel& l) { + l.next = "{\"state\": 2, \"curr\": \"" + ZERO_HASH + + "\", \"snap\": \"" + ZERO_HASH + "\", \"shadow\": [\"" + + badHash + "\"]}"; + }); + HistoryArchiveState has; + REQUIRE_THROWS_AS(has.fromString(json), std::runtime_error); + } +} + +TEST_CASE("hexDir rejects non-hex strings", "[history][archive-format]") +{ + CHECK_NOTHROW(fs::hexDir("aabbcc")); + CHECK_NOTHROW(fs::hexDir( + "aabbccdd0011223344556677889900aabbccdd0011223344556677889900aabb")); + + CHECK_THROWS_AS(fs::hexDir(""), std::runtime_error); + CHECK_THROWS_AS(fs::hexDir("zz"), std::runtime_error); + CHECK_THROWS_AS(fs::hexDir("not-a-hex-string"), std::runtime_error); + CHECK_THROWS_AS(fs::hexDir("gg0000"), std::runtime_error); +} diff --git a/src/util/Fs.cpp b/src/util/Fs.cpp index ada28d9ec9..8de6a79e6c 100644 --- a/src/util/Fs.cpp +++ b/src/util/Fs.cpp @@ -364,7 +364,7 @@ hexDir(std::string const& hexStr) "([[:xdigit:]]{2})([[:xdigit:]]{2})([[:xdigit:]]{2}).*"); std::smatch sm; bool matched = std::regex_match(hexStr, sm, rx); - releaseAssert(matched); + releaseAssertOrThrow(matched); return (std::string(sm[1]) + "/" + std::string(sm[2]) + "/" + std::string(sm[3])); }