diff --git a/.github/workflows/ci-deploy.yaml b/.github/workflows/ci-deploy.yaml index 025d1efa..4d500644 100644 --- a/.github/workflows/ci-deploy.yaml +++ b/.github/workflows/ci-deploy.yaml @@ -50,6 +50,9 @@ jobs: submodules: recursive - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 + - name: Install libuuid + run: | + yum -y install libuuid-devel - name: Configure run: | python3 -m venv venv && . ./venv/bin/activate diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 11f22b05..85564cf6 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -20,7 +20,8 @@ jobs: uses: mozilla-actions/sccache-action@v0.0.9 - name: Install dependencies run: | - sudo apt-get install ninja-build + sudo apt-get update + sudo apt-get install -y ninja-build uuid-dev pip install gcovr gcovr --version - name: Configure diff --git a/CMakeLists.txt b/CMakeLists.txt index 1d73bb5b..db6ed39b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,12 +7,16 @@ project(mapget LANGUAGES CXX C) # Allow version to be set from command line for CI/CD # For local development, use the default version if(NOT DEFINED MAPGET_VERSION) - set(MAPGET_VERSION 2025.5.1) + set(MAPGET_VERSION 2026.1.0) endif() set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) +if (WIN32) + add_compile_definitions(NOMINMAX) +endif() + include(FetchContent) include(GNUInstallDirs) @@ -22,12 +26,21 @@ if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) set(MAPGET_WITH_HTTPLIB ON CACHE BOOL "Enable mapget-http-datasource and mapget-http-service libraries.") set(MAPGET_ENABLE_TESTING ON CACHE BOOL "Enable testing.") set(MAPGET_BUILD_EXAMPLES ON CACHE BOOL "Build examples.") + # Prevent CPM dependencies (e.g. Drogon/Trantor) from registering their own + # tests into this project's ctest run. + set(BUILD_TESTING OFF CACHE BOOL "Disable dependency tests" FORCE) endif() option(MAPGET_WITH_WHEEL "Enable mapget Python wheel (output to WHEEL_DEPLOY_DIRECTORY).") option(MAPGET_WITH_SERVICE "Enable mapget-service library. Requires threads.") option(MAPGET_WITH_HTTPLIB "Enable mapget-http-datasource and mapget-http-service libraries.") +if (MAPGET_ENABLE_TESTING) + # Enable testing before adding CPM dependencies so stale/third-party CTest + # files don't linger in the build tree. + enable_testing() +endif() + set(Python3_FIND_STRATEGY LOCATION) if (NOT MSVC) @@ -112,7 +125,6 @@ endif() # tests if (MAPGET_ENABLE_TESTING) - enable_testing() add_subdirectory(test/unit) if (MAPGET_WITH_WHEEL) diff --git a/cmake/deps.cmake b/cmake/deps.cmake index 7473f2b9..c0e1e194 100644 --- a/cmake/deps.cmake +++ b/cmake/deps.cmake @@ -14,17 +14,53 @@ CPMAddPackage( OPTIONS "EXPECTED_BUILD_TESTS OFF" "EXPECTED_BUILD_PACKAGE_DEB OFF") -CPMAddPackage( - URI "gh:Klebert-Engineering/simfil@0.6.2" - OPTIONS - "SIMFIL_WITH_MODEL_JSON ON" - "SIMFIL_SHARED OFF") + +set(MAPGET_SIMFIL_SOURCE_DIR "" CACHE PATH + "Local simfil source directory to use instead of fetching from Git.") + +set(_mapget_simfil_source_dir "${MAPGET_SIMFIL_SOURCE_DIR}") +if ("${_mapget_simfil_source_dir}" STREQUAL "" + AND EXISTS "${CMAKE_CURRENT_LIST_DIR}/../../simfil/CMakeLists.txt") + set(_mapget_simfil_source_dir "${CMAKE_CURRENT_LIST_DIR}/../../simfil") +endif() + +if (NOT "${_mapget_simfil_source_dir}" STREQUAL "") + message(STATUS "Using local simfil from ${_mapget_simfil_source_dir}") + CPMAddPackage( + NAME simfil + SOURCE_DIR "${_mapget_simfil_source_dir}" + OPTIONS + "SIMFIL_WITH_MODEL_JSON ON" + "SIMFIL_SHARED OFF") +else() + CPMAddPackage( + URI "gh:Klebert-Engineering/simfil#v0.7.0" + OPTIONS + "SIMFIL_WITH_MODEL_JSON ON" + "SIMFIL_SHARED OFF") +endif() + CPMAddPackage( URI "gl:eidheim/tiny-process-library#8bbb5a" # Switch to release > 2.0.4 once available OPTIONS "BUILD_TESTING OFF") if (MAPGET_WITH_WHEEL OR MAPGET_WITH_HTTPLIB OR MAPGET_ENABLE_TESTING) + # OpenSSL's Configure script needs a "full" Perl distribution. Git for + # Windows ships a minimal perl that is missing required modules (e.g. + # Locale::Maketext::Simple), causing OpenSSL builds to fail. + if (WIN32) + if (NOT DEFINED PERL_EXECUTABLE OR PERL_EXECUTABLE MATCHES "[\\\\/]Git[\\\\/]usr[\\\\/]bin[\\\\/]perl\\.exe$") + find_program(_MAPGET_STRAWBERRY_PERL + NAMES perl.exe + PATHS "C:/Strawberry/perl/bin" + NO_DEFAULT_PATH) + if (_MAPGET_STRAWBERRY_PERL) + set(PERL_EXECUTABLE "${_MAPGET_STRAWBERRY_PERL}" CACHE FILEPATH "" FORCE) + endif() + endif() + endif() + set (OPENSSL_VERSION openssl-3.5.2) CPMAddPackage("gh:klebert-engineering/openssl-cmake@1.0.0") CPMAddPackage( @@ -40,19 +76,67 @@ if (MAPGET_WITH_WHEEL OR MAPGET_WITH_HTTPLIB OR MAPGET_ENABLE_TESTING) endif() CPMAddPackage( - URI "gh:yhirose/cpp-httplib@0.15.3" + NAME jsoncpp + GIT_REPOSITORY https://github.com/open-source-parsers/jsoncpp + GIT_TAG 1.9.5 + GIT_SHALLOW ON + OPTIONS + "JSONCPP_WITH_TESTS OFF" + "JSONCPP_WITH_POST_BUILD_UNITTEST OFF" + "JSONCPP_WITH_PKGCONFIG_SUPPORT OFF" + "JSONCPP_WITH_CMAKE_PACKAGE OFF" + "BUILD_SHARED_LIBS OFF" + "BUILD_STATIC_LIBS ON" + "BUILD_OBJECT_LIBS OFF") + # Help Drogon's FindJsoncpp.cmake locate jsoncpp when built via CPM. + set(JSONCPP_INCLUDE_DIRS "${jsoncpp_SOURCE_DIR}/include" CACHE PATH "" FORCE) + set(JSONCPP_LIBRARIES jsoncpp_static CACHE STRING "" FORCE) + # CPM generates a dummy package redirect config at + # `${CMAKE_FIND_PACKAGE_REDIRECTS_DIR}/jsoncpp-config.cmake`. Drogon uses + # `find_package(Jsoncpp)` (config-first), so make that redirect actually + # define the expected `Jsoncpp_lib` target. + if (DEFINED CMAKE_FIND_PACKAGE_REDIRECTS_DIR) + file(MAKE_DIRECTORY "${CMAKE_FIND_PACKAGE_REDIRECTS_DIR}") + file(WRITE "${CMAKE_FIND_PACKAGE_REDIRECTS_DIR}/jsoncpp-extra.cmake" [=[ +if(NOT TARGET Jsoncpp_lib) + add_library(Jsoncpp_lib INTERFACE) + target_include_directories(Jsoncpp_lib INTERFACE "${JSONCPP_INCLUDE_DIRS}") + target_link_libraries(Jsoncpp_lib INTERFACE ${JSONCPP_LIBRARIES}) +endif() +]=]) + endif() + + # Drogon defines install(EXPORT ...) rules unconditionally, which fail when + # used as a subproject with CPM-provided dependencies (zlib/jsoncpp/etc). + # Since mapget only needs Drogon for building, temporarily suppress install + # rule generation while configuring Drogon. + set(_MAPGET_PREV_SKIP_INSTALL_RULES "${CMAKE_SKIP_INSTALL_RULES}") + if (DEFINED BUILD_TESTING) + set(_MAPGET_PREV_BUILD_TESTING "${BUILD_TESTING}") + endif() + set(CMAKE_SKIP_INSTALL_RULES ON) + set(BUILD_TESTING OFF) + + CPMAddPackage( + URI "gh:drogonframework/drogon@1.9.7" OPTIONS - "CPPHTTPLIB_USE_POLL ON" - "HTTPLIB_USE_CERTS_FROM_MACOSX_KEYCHAIN OFF" - "HTTPLIB_INSTALL OFF" - "HTTPLIB_USE_OPENSSL_IF_AVAILABLE OFF" - "HTTPLIB_USE_ZLIB_IF_AVAILABLE OFF") - # Manually enable openssl/zlib in httplib to avoid FindPackage calls. - target_compile_definitions(httplib INTERFACE - CPPHTTPLIB_OPENSSL_SUPPORT - CPPHTTPLIB_ZLIB_SUPPORT) - target_link_libraries(httplib INTERFACE - OpenSSL::SSL OpenSSL::Crypto ZLIB::ZLIB) + "BUILD_CTL OFF" + "BUILD_EXAMPLES OFF" + "BUILD_ORM OFF" + "BUILD_BROTLI OFF" + "BUILD_YAML_CONFIG OFF" + "BUILD_SHARED_LIBS OFF" + "USE_SUBMODULE ON" + "USE_STATIC_LIBS_ONLY OFF" + "USE_POSTGRESQL OFF" + "USE_MYSQL OFF" + "USE_SQLITE3 OFF" + GIT_SUBMODULES "trantor") + + set(CMAKE_SKIP_INSTALL_RULES "${_MAPGET_PREV_SKIP_INSTALL_RULES}") + if (DEFINED _MAPGET_PREV_BUILD_TESTING) + set(BUILD_TESTING "${_MAPGET_PREV_BUILD_TESTING}") + endif() CPMAddPackage( URI "gh:jbeder/yaml-cpp#aa8d4e@0.8.0" # Use > 0.8.0 once available. @@ -64,6 +148,7 @@ if (MAPGET_WITH_WHEEL OR MAPGET_WITH_HTTPLIB OR MAPGET_ENABLE_TESTING) CPMAddPackage("gh:CLIUtils/CLI11@2.5.0") CPMAddPackage("gh:pboettch/json-schema-validator#2.3.0") CPMAddPackage("gh:okdshin/PicoSHA2@1.0.1") + endif () if (MAPGET_WITH_WHEEL AND NOT TARGET pybind11) @@ -76,7 +161,7 @@ if (MAPGET_WITH_SERVICE OR MAPGET_WITH_HTTPLIB OR MAPGET_ENABLE_TESTING) endif() if (MAPGET_WITH_WHEEL AND NOT TARGET python-cmake-wheel) - CPMAddPackage("gh:Klebert-Engineering/python-cmake-wheel@1.1.0") + CPMAddPackage("gh:Klebert-Engineering/python-cmake-wheel#v1.2.7@1.2.7") endif() if (MAPGET_ENABLE_TESTING) diff --git a/docs/mapget-api.md b/docs/mapget-api.md index e4e7ce11..cf39bc8a 100644 --- a/docs/mapget-api.md +++ b/docs/mapget-api.md @@ -1,12 +1,12 @@ -# REST/GeoJSON API Guide +# HTTP / WebSocket API Guide -Mapget exposes a small HTTP API that lets clients discover datasources, stream tiles, abort long‑running requests, locate features by ID and inspect or update the running configuration. This guide describes the endpoints and their request and response formats. +Mapget exposes a small HTTP + WebSocket API that lets clients discover datasources, stream tiles, locate features by ID and inspect or update the running configuration. Interactive tile streaming now uses a WebSocket control channel plus `/tiles/next` pull requests for the binary tile data. This guide describes the endpoints and their request and response formats. ## Base URL and formats The server started by `mapget serve` listens on the configured host and port (by default on all interfaces and an automatically chosen port). All endpoints are rooted at that host and port. -Requests that send JSON use `Content-Type: application/json`. Tile streaming supports two response encodings, selected via the `Accept` header: +Requests that send JSON use `Content-Type: application/json`. HTTP tile streaming supports two response encodings, selected via the `Accept` header: - `Accept: application/jsonl` returns a JSON‑Lines stream where each line is one JSON object. - `Accept: application/binary` returns a compact binary stream optimized for high-volume traffic. @@ -21,28 +21,61 @@ The binary format and the logical feature model are described in more detail in - **Request body:** none - **Response:** `application/json` array of datasource descriptors -Each item contains map ID, available layers and basic metadata. This endpoint is typically used by frontends to discover which maps and layers can be requested via `/tiles`. +Each item contains map ID, available layers and basic metadata. Each layer entry includes its type, `zoomLevels`, `coverage`, staged-loading metadata (`stages`, optional `stageLabels`, `highFidelityStage`) and feature-type information. This endpoint is typically used by frontends to discover which maps and layers can be requested via `/tiles`. -## `/tiles` – stream tiles +## `/tiles` – stream tiles (HTTP) -`POST /tiles` streams tiles for one or more map–layer combinations. It is the main data retrieval endpoint used by clients such as erdblick. +`POST /tiles` streams tiles for one or more map–layer combinations. - **Method:** `POST` - **Request body (JSON):** - `requests`: array of objects, each with: - `mapId`: string, ID of the map to query. - `layerId`: string, ID of the layer within that map. - - `tileIds`: array of numeric tile IDs in mapget’s tiling scheme. + - either `tileIds`: array of numeric tile IDs in mapget’s tiling scheme. This is an **unstaged** request shape: the service does not expand it into one backend fetch per advertised stage and returns one tile response per requested tile with no explicit stage affinity. + - or `tileIdsByNextStage`: array of arrays where bucket `i` lists tiles whose next missing stage is `i`. This is the **staged** request shape: the service expands each tile to stage `i` and all higher stages advertised by the layer. - `stringPoolOffsets` (optional): dictionary from datasource node ID to last known string ID. Used by advanced clients to avoid receiving the same field names repeatedly in the binary stream. - - `clientId` (optional): arbitrary string identifying this client connection for abort handling. - **Response:** - `application/jsonl` if `Accept: application/jsonl` is sent. - `application/binary` if `Accept: application/binary` is sent, using the tile stream protocol. Tiles are streamed as they become available. In JSONL mode, each line is the JSON representation of one tile layer. In binary mode, the response is a sequence of versioned messages that can be decoded using the tile stream protocol from `mapget-model.md`. +For staged feature-layer clients, `tileIdsByNextStage` must be used even when only bucket `0` is non-empty. Collapsing such a request to plain `tileIds` changes its semantics to an unstaged request. + If `Accept-Encoding: gzip` is set, the server compresses responses where possible, which is especially useful for JSONL streams. +To cancel an in-flight HTTP stream, close the HTTP connection. + +## `/tiles` – interactive control channel (WebSocket) + +`GET /tiles` supports WebSocket upgrades. This endpoint is the control channel for interactive clients. It carries request updates and lightweight status/control frames; binary tile data is pulled separately via `/tiles/next`. + +- **Connect:** `ws://:/tiles` +- **Client → Server:** send one *text* message containing the same JSON body as for `POST /tiles` (`requests`, optional `stringPoolOffsets`). + - `stringPoolOffsets` is optional; the server remembers the latest offsets per WebSocket connection. Clients may re-send it to reset/resync offsets. +- **Server → Client:** sends *binary* WebSocket messages carrying VTLV control frames. + - `RequestContext` frames contain a UTF-8 JSON payload with `requestId` and `clientId`. The `clientId` is then used for `/tiles/next`. + - `Status` frames contain UTF-8 JSON describing per-request `RequestStatus` transitions and a human-readable message. The final status frame has `"allDone": true`. + - `LoadStateChange` exists in the protocol but is currently not emitted by the HTTP service. + +To cancel, either send a new request message on the same connection (which replaces the current one) or close the WebSocket connection. + +## `/tiles/next` – pull binary tile frames + +`GET /tiles/next` (also accepts `POST`) returns the next available binary tile frame batch for an active interactive `/tiles` session. + +- **Method:** `GET` or `POST` +- **Query parameters:** + - `clientId` (required): numeric client id received via the websocket `RequestContext` frame. + - `waitMs` (optional): long-poll timeout in milliseconds. Defaults to 25000 and is clamped to 30000. + - `maxBytes` (optional): batch size budget. If greater than zero, the response may concatenate multiple VTLV frames up to that byte budget (capped at 5 MiB). + - `compress` (optional): set to `1` to enable gzip compression when the client also sends `Accept-Encoding: gzip`. +- **Response:** + - `200 application/octet-stream` with one or more concatenated `TileLayerStream` VTLV frames. + - `204 No Content` if the long-poll timed out before any frame became available. + - `410 Gone` if the interactive session no longer exists. + ### Why JSONL instead of JSON? JSON Lines is better suited to streaming large responses than a single JSON array. Clients can start processing the first tiles immediately, do not need to buffer the complete response in memory, and can naturally consume the stream with incremental parsers. @@ -87,16 +120,6 @@ Each line in the JSONL response is a GeoJSON-like FeatureCollection with additio The `error` object is only present if an error occurred while filling the tile. When present, the `features` array may be empty or contain partial data. -## `/abort` – cancel tile streaming - -`POST /abort` cancels a running `/tiles` request that was started with a matching `clientId`. It is useful when the viewport changes and the previous stream should be abandoned. - -- **Method:** `POST` -- **Request body (JSON):** `{ "clientId": "" }` -- **Response:** `text/plain` confirmation; a 400 status code if `clientId` is missing. - -Internally the service marks the matching tile requests as aborted and stops scheduling further work for them. - ### Curl Call Example For example, the following curl call could be used to stream GeoJSON feature objects @@ -179,7 +202,24 @@ Keep in mind, that you can also run a `mapget` service without any RPCs in your - **Request body:** none - **Response:** `text/html` -The page shows the number of active datasources and worker threads, the size of the active request queue and cache statistics such as hit/miss counters. This endpoint is primarily used during development and debugging. +The page shows the number of active datasources and worker threads, cache statistics, websocket/pull metrics, and optional tile-size-distribution data. It refreshes by polling `/status-data`. This endpoint is primarily used during development and debugging. + +## `/status-data` – machine-readable diagnostics + +`GET /status-data` returns the JSON payload that powers `/status`. + +- **Method:** `GET` +- **Query parameters:** + - `includeTileSizeDistribution` (optional, default `false`): include the heavy cached-tile size histogram / distribution calculations. + - `includeCachedFeatureTreeBytes` (optional, default `true`): include cached feature-tree byte breakdowns. +- **Response:** `application/json` + +The response contains: + +- `timestampMs` +- `service`: service statistics, datasource info, cache occupancy, and optional tile-size-distribution data +- `cache`: cache hit/miss counters and cache sizes +- `tilesWebsocket`: control-channel metrics such as active sessions, pending queued frames for `/tiles/next`, blocked pull requests, and total forwarded bytes / frames ## `/locate` – resolve external feature IDs diff --git a/docs/mapget-cache.md b/docs/mapget-cache.md index fd72fea8..10e46e98 100644 --- a/docs/mapget-cache.md +++ b/docs/mapget-cache.md @@ -33,10 +33,9 @@ Advanced clients can take advantage of this by setting the `stringPoolOffsets` f ## Inspecting cache statistics -The easiest way to see how the cache behaves is to call `GET /status` on the running server. The HTML status page contains: +The easiest way to see how the cache behaves is to call `GET /status` on the running server or query `GET /status-data` directly. The HTML status page contains: - Global service information such as the number of active datasources and worker threads. - Cache statistics, including `cache-hits`, `cache-misses` and the number of loaded string pools. When the in‑memory cache is used, additional fields show the current number of cached tiles and the size of the FIFO queue. These values provide a quick indication of whether the chosen cache size is appropriate for the workload. - diff --git a/docs/mapget-config.md b/docs/mapget-config.md index d44fddb2..9fa54eb8 100644 --- a/docs/mapget-config.md +++ b/docs/mapget-config.md @@ -194,7 +194,7 @@ The generator will produce deterministic but varied features for any requested t -`GeoJsonFolder` serves tiles from a directory containing GeoJSON files. +`GeoJsonFolder` serves tiles from a directory containing GeoJSON files. It accepts the usual GeoJSON geometry families, including `Point`, `MultiPoint`, `LineString`, `MultiLineString`, `Polygon`, `MultiPolygon`, and `GeometryCollection`. Required fields: @@ -203,7 +203,8 @@ Required fields: Optional fields: -- `withAttrLayers`: boolean flag. If `true`, nested objects in the GeoJSON `properties` are interpreted as attribute layers; if `false`, only scalar top‑level properties are emitted. +- `mapId`: optional map ID override. If omitted, mapget derives a display name from the input directory. +- `withAttrLayers` (default: `true`): boolean flag. If `true`, nested objects in the GeoJSON `properties` are converted to mapget attribute layers; if `false`, only scalar top‑level properties are emitted and nested objects are silently dropped. Example: @@ -211,6 +212,7 @@ Example: sources: - type: GeoJsonFolder folder: /data/tiles + mapId: Road Test Data withAttrLayers: true ``` diff --git a/docs/mapget-dev-guide.md b/docs/mapget-dev-guide.md index d7c9b905..36dab7c2 100644 --- a/docs/mapget-dev-guide.md +++ b/docs/mapget-dev-guide.md @@ -141,6 +141,8 @@ Jobs are keyed by `MapTileKey` so that concurrent requests for the same tile can The following sequence diagram summarises what happens when a client requests tiles through the HTTP service: +The plain HTTP `/tiles` path looks like this: + ```mermaid sequenceDiagram participant Client @@ -150,7 +152,7 @@ sequenceDiagram participant Ds as DataSource participant Cache - Client->>Http: POST /tiles
requests, clientId + Client->>Http: POST /tiles
requests Http->>Service: request(requests, headers) Service->>Worker: enqueue jobs per datasource loop per tile @@ -169,30 +171,52 @@ sequenceDiagram Service-->>Http: request complete ``` -If a client supplies a `clientId` in the `/tiles` request, the HTTP layer uses it to track open requests and to implement `/abort`. +For interactive clients, the transport is split into a control channel and a pull channel: + +```mermaid +sequenceDiagram + participant Client + participant Ws as WebSocket /tiles + participant Http as /tiles/next + participant Service as Service + participant Worker as Service Worker + + Client->>Ws: connect + send request JSON
(tileIds or tileIdsByNextStage) + Ws->>Service: request(requests, headers) + Ws-->>Client: RequestContext(requestId, clientId) + Ws-->>Client: Status(requests, allDone=false) + loop while more frames are needed + Client->>Http: GET /tiles/next?clientId=...&maxBytes=... + Http->>Ws: pop queued VTLV frame batch + Worker-->>Ws: enqueue StringPool / TileLayer frames + Http-->>Client: binary VTLV batch + end + Ws-->>Client: Status(allDone=true) +``` + +For interactive clients, tile streaming uses WebSocket `GET /tiles` as a control channel. Clients send request updates there, receive `RequestContext` / `Status` control frames back, and pull the actual binary tile frames via `/tiles/next`. Sending a new request message replaces the current in-flight request on that connection. ## HTTP service internals `mapget::HttpService` binds the core service to an HTTP server implementation. Its responsibilities are: -- map HTTP endpoints to service calls (`/sources`, `/tiles`, `/abort`, `/status`, `/locate`, `/config`), +- map HTTP/WebSocket endpoints to service calls (`/sources`, `/tiles`, `/tiles/next`, `/status`, `/status-data`, `/locate`, `/config`), - parse JSON requests and build `LayerTilesRequest` objects, - serialize tile responses as JSONL or binary streams, -- manage per‑client state such as `clientId` for abort handling, and - provide `/config` as a JSON view on the YAML config file. ### Tile streaming For `/tiles`, the HTTP layer: -- parses the JSON body to extract `requests`, `stringPoolOffsets` and an optional `clientId`, +- parses the JSON body to extract `requests` (`tileIds` or stage-aware `tileIdsByNextStage`) and optional `stringPoolOffsets`, - constructs one `LayerTilesRequest` per map–layer combination, - attaches callbacks that feed results into a shared `HttpTilesRequestState`, and - sends out each tile as soon as it is produced by the service. In JSONL mode the response is a sequence of newline‑separated JSON objects. In binary mode the HTTP layer uses `TileLayerStream::Writer` to serialize string pool updates and tile blobs. Binary responses can optionally be compressed using gzip if the client sends `Accept-Encoding: gzip`. -The `/abort` endpoint uses the `clientId` mechanism to cancel all open tile requests for a given client and to prevent further work from being scheduled for them. +WebSocket `/tiles` uses the same request JSON shape but serves only as the control plane: it emits `RequestContext` and `Status` VTLV frames, while `/tiles/next` performs the long-poll delivery of one or more binary tile frames (optionally batched up to `maxBytes`). ### Configuration endpoints @@ -207,7 +231,7 @@ These endpoints are guarded by command‑line flags: `--no-get-config` disables The model library provides both the binary tile encoding and the simfil query integration: -- `TileLayerStream::Writer` and `TileLayerStream::Reader` handle versioned, type‑tagged messages for string pools and tile layers. Each message starts with a protocol version, a `MessageType` (string pool, feature tile, SourceData tile, end-of-stream), and a payload size. +- `TileLayerStream::Writer` and `TileLayerStream::Reader` handle versioned, type‑tagged messages for string pools and tile layers. Each message starts with a protocol version, a `MessageType` (string pool, feature tile, SourceData tile, status, request-context, optional load-state change, end-of-stream), and a payload size. - `TileFeatureLayer` derives from `simfil::ModelPool` and exposes methods such as `evaluate(...)` and `complete(...)` to run simfil expressions and obtain completion candidates. String pools are streamed incrementally. The server keeps a `StringPoolOffsetMap` that tracks, for each ongoing tile request, the highest string ID known to a given client per datasource node id. When a tile is written, `TileLayerStream::Writer` compares that offset with the current `StringPool::highest()` value: @@ -230,7 +254,7 @@ The details of simfil itself are covered in the simfil language and developer gu For development and operations it is important to understand how to observe a running mapget instance: -- The `/status` endpoint shows JSON dumps of service and cache statistics embedded in a simple HTML page. +- The `/status` endpoint renders a live HTML diagnostics page backed by `/status-data`. - Environment variables such as `MAPGET_LOG_LEVEL`, `MAPGET_LOG_FILE` and `MAPGET_LOG_FILE_MAXSIZE` control logging behaviour and are honoured by both the Python entry point and the native binary. - Cache statistics expose hit and miss counts, and in memory mode additional metrics about cached tiles. diff --git a/docs/mapget-model.md b/docs/mapget-model.md index 1c80c90e..27d044b1 100644 --- a/docs/mapget-model.md +++ b/docs/mapget-model.md @@ -9,7 +9,7 @@ The atomic unit of data in mapget is the feature. Conceptually, a feature is clo - layered attributes with their own validity information, and - explicit relations and source data references. -The `properties.layers` tree in a feature holds these layered attributes and their validity arrays, while top-level entries under `properties` are regular attributes without layering. +The `properties.layers` tree in a feature holds these layered attributes and their validity entries, while top-level entries under `properties` are regular attributes without layering. To make this as fast as possible, mapget uses the simfil binary format with a small VTLV (Version-Type-Length-Value) message wrapper. This is explained in the following section. @@ -34,6 +34,9 @@ classDiagram +LayerType type +vector zoomLevels +vector coverage + +uint32_t stages + +vector stageLabels + +uint32_t highFidelityStage +bool canRead +bool canWrite +Version version @@ -65,7 +68,7 @@ classDiagram ``` - **`DataSourceInfo`** identifies the datasource node, the map ID that node serves, all attached layers and operational limits such as `maxParallelJobs`. When a datasource is marked as `isAddOn`, the service chains it behind the main datasource for the same map. -- **`LayerInfo`** describes a single layer: type (`Features` or `SourceData`), zoom levels, coverage rectangles, read/write flags and the semantic version. The service uses this to validate client requests, and the reader/writer uses it when parsing tile streams. +- **`LayerInfo`** describes a single layer: type (`Features` or `SourceData`), advertised zoom levels, coverage rectangles, staged-loading metadata (`stages`, optional `stageLabels`, `highFidelityStage`), read/write flags and the semantic version. The service uses this to validate client requests, and the reader/writer uses it when parsing tile streams. - **`FeatureTypeInfo`** and **`IdPart`** list the allowed unique ID compositions per feature type, which is why clients can rely on the ID schemes described earlier. - **`Coverage`** entries describe filled tile ranges so that caches and clients can reason about availability without probing every tile if a dataset is sparse. @@ -83,6 +86,45 @@ Add‑on datasources are registered with `isAddOn` and must share the same `mapI Clients see both base and add‑on entries in the `/sources` response (add‑ons are marked `isAddOn`), but the base datasource remains the entry point for tile requests. This mechanism is used by Python LiveSource overlays that attach Road and Lane attribute layers to an existing NDS.Live or NDS.Classic base map. +## Staged loading and feature LOD + +Mapget layer metadata can describe staged loading as well as a per-feature level-of-detail (LOD) signal used by low-fidelity renderers. + +```mermaid +flowchart LR + LayerInfo["LayerInfo
stages / stageLabels / highFidelityStage"] + ClientState["Client tracks nextMissingStage
per tile"] + RequestJson["/tiles request
tileIdsByNextStage[]"] + Service["Service expands each tile to
requested stages"] + Stage0["Stage 0
early geometry payload"] + Stage1["Stage 1
full geometry / non-ADAS enrichment"] + Stage2["Stage 2
ADAS enrichment"] + FeatureLOD["Feature.lod
LOD_0..LOD_7"] + ClientPolicy["Low-fi client policy
may cap rendered LOD"] + + LayerInfo --> ClientState + ClientState --> RequestJson --> Service + LayerInfo --> Service + Service --> Stage0 + Service --> Stage1 + Service --> Stage2 + Stage0 --> FeatureLOD + FeatureLOD --> ClientPolicy +``` + +- `LayerInfo.stages` declares how many stages exist for a layer. `stageLabels` are presentation metadata only. `highFidelityStage` is the actual rule-fidelity cutover used by consumers: stages below it are low-fidelity, stages at/above it are high-fidelity. +- Clients request staged tiles with `tileIdsByNextStage`: bucket `i` contains tiles whose next missing stage is `i`. The service expands each tile to the remaining stages for that layer. +- Plain `tileIds` are an unstaged request form. They do not mean “bucket 0 only”; they mean “request this tile without stage-bucket expansion”. +- Therefore a staged client must preserve `tileIdsByNextStage` even when only bucket `0` is populated. +- Payload partitioning is datasource-defined. In current `mapget-live-cpp`, the common patterns are: + - `SINGLE_STAGE`: stage `0` carries the complete feature payload. + - `GEOMETRY_THEN_ATTRIBUTES`: stage `0` carries full geometry/internal relations, stage `1` carries non-ADAS attributes and relations. + - `LOW_FI_HIGH_FI_ADAS`: stage `0` carries the low-fidelity geometry payload, stage `1` carries full geometry plus non-ADAS enrichment, stage `2` carries ADAS-only enrichment. + - `LOW_FI_FULL_GEOM_HIGH_FI_ADAS`: stage `0` already carries the canonical base geometry, stage `1` adds non-ADAS enrichment, stage `2` adds ADAS-only enrichment. +- A consequence of the last two patterns: stage number and stage label do not, by themselves, tell you whether a stage is “high fidelity”. Use `highFidelityStage` instead. +- Each feature also carries a backend `lod` (`LOD_0..LOD_7`). This is independent of stage: a stage answers “which payload slice arrived?”, while `lod` answers “how aggressively may a low-fidelity renderer cull this feature?”. +- `TileFeatureLayer::newFeature(...)` defaults stage-`0` features to `LOD_0` and later-stage feature records to `MAX_LOD`. Converters may override the stage-`0` value semantically (for example by road class). During stage merge, the stage-`0` feature data remains authoritative for `lod`. + ## Feature IDs Every feature in mapget is uniquely identified by a composite ID. Logically, it is made up of: @@ -119,7 +161,7 @@ Mapget supports a range of geometry types, including: - Lines and polylines. - Polygons and derived meshes. -All geometries may carry three‑dimensional coordinates. Internally, the model represents them as a geometry collection so that a feature can combine several geometry primitives if necessary. Each geometry may also have a `name`, which can be referenced by attribute validity information. +All geometries may carry three‑dimensional coordinates. Internally, the model stores either a single geometry directly or a geometry collection when a feature combines several primitives. Each geometry may also have a `name`, which can be referenced by attribute validity information. Validity information describes where and how an attribute or relation applies along a feature. A validity entry may include: @@ -130,22 +172,32 @@ Validity information describes where and how an attribute or relation applies al Together, these fields allow datasources to express, for example, that a speed limit applies only along part of a road, or that a relation to another feature is valid only within a spatial region. -At the JSON level, validity entries inside `properties.layers` look like this: +At the JSON level, a single validity is exposed as an object. Multiple validity entries are exposed as an array in request order. The same flattening rule applies to attribute `validity` as well as relation `sourceValidity` / `targetValidity`. ```json -"validity": [ - { - "direction": "POSITIVE", - "offsetType": "MetricLengthOffset", - "start": 31.0, - "end": 57.6, - "geometryName": "centerline" - } -] +"validity": { + "direction": "POSITIVE", + "offsetType": "MetricLengthOffset", + "start": 31.0, + "end": 57.6, + "geometryName": "centerline" +} ``` Here `offsetType` describes how `start` and `end` should be interpreted (for example as metric distance along the line string or as relative length fractions). +Transition-number validities are exposed semantically rather than as baked helper geometry: + +```json +"validity": { + "from": "Road.545555028.1", + "fromConnectedEnd": "END", + "to": "Road.545555028.2", + "toConnectedEnd": "START", + "transitionNumber": 1 +} +``` + ### Validity internals The validity objects exposed in JSON map directly to the `Validity` C++ class: @@ -153,6 +205,7 @@ The validity objects exposed in JSON map directly to the `Validity` C++ class: - **Geometry description** indicates how a validity links to geometry: - `SimpleGeometry` embeds or references a complete geometry object. - `OffsetPointValidity` and `OffsetRangeValidity` point into an existing geometry by name and add offsets. + - `FeatureTransition` references two features plus their connected ends; mapget derives the rendered transition polyline from that semantic payload. - `NoGeometry` is used when only direction or feature references are available. - **Geometry offset type** controls the coordinate space used for offsets: @@ -163,9 +216,9 @@ The validity objects exposed in JSON map directly to the `Validity` C++ class: | `RelativeLengthOffset` | Values represent fractions (0–1) along the total geometry length. | | `MetricLengthOffset` | Values represent metres along the geometry (requires a polyline geometry). | -- **Direction** (`POSITIVE`, `NEGATIVE`, `BOTH`, `NONE`) describes whether the attribute applies relative to the digitisation direction of the referenced geometry. +- **Direction** (`POSITIVE`, `NEGATIVE`, `COMPLETE`, `NONE`) describes whether the attribute applies relative to the digitisation direction of the referenced geometry. `EMPTY` remains an internal sentinel and is not serialized. -Attributes and Relations can attach their own `MultiValidity` lists, so a datasource can mix and match: an attribute may reference a geometric sub‑range via `OffsetRangeValidity`, while the relation that connects two features uses a separate `SimpleGeometry` to express a polygon of influence. +Attributes and relations can attach their own validity lists, so a datasource can mix and match: an attribute may reference a geometric sub‑range via `OffsetRangeValidity`, while another attribute or relation may carry a semantic `FeatureTransition`. ## Source data references and relations @@ -221,6 +274,8 @@ classDiagram class Feature { +string_view typeId() + +LOD lod() + +void setLod(LOD) +model_ptr~FeatureId~ id() +model_ptr~GeometryCollection~ geom() +model_ptr~AttributeLayerList~ attributeLayers() @@ -265,6 +320,10 @@ classDiagram +model_ptr~Validity~ newPoint(...) +model_ptr~Validity~ newRange(...) +model_ptr~Validity~ newGeometry(...) + +model_ptr~Validity~ newFeatureId(...) + +model_ptr~Validity~ newGeomStage(...) + +model_ptr~Validity~ newFeatureTransition(...) + +model_ptr~Validity~ newComplete(...) +model_ptr~Validity~ newDirection(...) } @@ -317,6 +376,7 @@ classDiagram Attribute "0..1" *-- "1" MultiValidity Relation "0..1" *-- "1" MultiValidity : source Relation "0..1" *-- "1" MultiValidity : target + Validity ..> Feature : transition refs MultiValidity "1" *-- "many" Validity Geometry "0..1" *-- "1" SourceDataReferenceCollection Attribute "0..1" *-- "1" SourceDataReferenceCollection @@ -394,6 +454,13 @@ Each feature inside that tile looks like this: "start": 0.1, "end": 0.5, "geometryName": "centerline" + }, + { + "direction": "NEGATIVE", + "offsetType": "RelativeLengthOffset", + "start": 0.6, + "end": 1.0, + "geometryName": "centerline" } ] } diff --git a/docs/mapget-user-guide.md b/docs/mapget-user-guide.md index 978cf6c9..5385df53 100644 --- a/docs/mapget-user-guide.md +++ b/docs/mapget-user-guide.md @@ -10,7 +10,7 @@ The guide is split into several focused documents: - [**Setup Guide**](mapget-setup.md) explains how to install mapget via `pip`, how to build the native executable from source, and how to start a server or use the built‑in `fetch` client for quick experiments. - [**Configuration Guide**](mapget-config.md) documents the YAML configuration file used with `--config`, the supported datasource types (`DataSourceHost`, `DataSourceProcess`, `GridDataSource`, `GeoJsonFolder) and the optional `http-settings` section used by tools and UIs. -- [**REST API Guide**](mapget-api.md) describes the HTTP endpoints exposed by `mapget serve`, including `/sources`, `/tiles`, `/abort`, `/status`, `/locate` and `/config`, along with their request and response formats and example calls. +- [**HTTP / WebSocket API Guide**](mapget-api.md) describes the endpoints exposed by `mapget serve`, including `/sources`, `/tiles`, `/tiles/next`, `/status`, `/status-data`, `/locate` and `/config`, along with their request and response formats and example calls. - [**Caching Guide**](mapget-cache.md) covers the available cache modes (`memory`, `persistent`, `none`), explains how to configure cache size and location, and shows how to inspect cache statistics via the status endpoint. - [**Simfil Language Extensions**](mapget-simfil-extensions.md) introduces the feature model, tiling scheme, geometry and validity concepts, and the binary tile stream format. This chapter is especially relevant if you are writing datasources or low‑level clients. - [**Layered Data Model**](mapget-model.md) introduces the feature model, tiling scheme, geometry and validity concepts, and the binary tile stream format. This chapter is especially relevant if you are writing datasources or low‑level clients. diff --git a/libs/geojsonsource/src/geojsonsource.cpp b/libs/geojsonsource/src/geojsonsource.cpp index 6a0bd37c..5fa8be20 100644 --- a/libs/geojsonsource/src/geojsonsource.cpp +++ b/libs/geojsonsource/src/geojsonsource.cpp @@ -32,13 +32,13 @@ simfil::ModelNode::Ptr jsonToMapget( // NOLINT (recursive) if (j.is_null()) return {}; if (j.is_object()) { - auto subObject = tfl->newObject(j.size()); + auto subObject = tfl->newObject(j.size(), true); for (auto& el : j.items()) subObject->addField(el.key(), jsonToMapget(tfl, el.value())); return subObject; } if (j.is_array()) { - auto subArray = tfl->newArray(j.size()); + auto subArray = tfl->newArray(j.size(), true); for (auto& el : j.items()) subArray->append(jsonToMapget(tfl, el.value())); return subArray; @@ -335,18 +335,57 @@ void GeoJsonSource::fill(const mapget::TileFeatureLayer::Ptr& tile) auto feature = tile->newFeature(featureTypeName, {{"featureIndex", featureId}}); featureId++; - // Get geometry data - auto geometry = feature_data["geometry"]; - if (geometry["type"] == "Point") { - auto coordinates = geometry["coordinates"]; - feature->addPoint({coordinates[0], coordinates[1]}); - } - else if (geometry["type"] == "LineString") { - auto line = feature->geom()->newGeometry(GeomType::Line, 2); - for (auto& coordinates : geometry["coordinates"]) { - line->append({coordinates[0], coordinates[1]}); + // Parse geometry data (recursive lambda to support GeometryCollection) + std::function addGeometry; + addGeometry = [&](nlohmann::json const& geom) { + if (!geom.is_object() || !geom.contains("type")) + return; + auto const type = geom["type"].get(); + if (type == "Point") { + auto const& c = geom["coordinates"]; + feature->addPoint({c[0], c[1]}); } - } + else if (type == "MultiPoint") { + auto points = feature->geom()->newGeometry(GeomType::Points, geom["coordinates"].size()); + for (auto const& c : geom["coordinates"]) + points->append({c[0], c[1]}); + } + else if (type == "LineString") { + auto line = feature->geom()->newGeometry(GeomType::Line, geom["coordinates"].size()); + for (auto const& c : geom["coordinates"]) + line->append({c[0], c[1]}); + } + else if (type == "MultiLineString") { + for (auto const& coords : geom["coordinates"]) { + auto line = feature->geom()->newGeometry(GeomType::Line, coords.size()); + for (auto const& c : coords) + line->append({c[0], c[1]}); + } + } + else if (type == "Polygon") { + if (!geom["coordinates"].empty()) { + auto const& ring = geom["coordinates"][0]; + auto poly = feature->geom()->newGeometry(GeomType::Polygon, ring.size()); + for (auto const& c : ring) + poly->append({c[0], c[1]}); + } + } + else if (type == "MultiPolygon") { + for (auto const& polygon : geom["coordinates"]) { + if (!polygon.empty()) { + auto const& ring = polygon[0]; + auto poly = feature->geom()->newGeometry(GeomType::Polygon, ring.size()); + for (auto const& c : ring) + poly->append({c[0], c[1]}); + } + } + } + else if (type == "GeometryCollection") { + for (auto const& child : geom["geometries"]) + addGeometry(child); + } + }; + addGeometry(feature_data["geometry"]); // Add top-level properties as attributes for (auto& property : feature_data["properties"].items()) { @@ -372,7 +411,7 @@ void GeoJsonSource::fill(const mapget::TileFeatureLayer::Ptr& tile) std::string dir = attr.value()["_direction"]; auto validDir = dir == "POSITIVE" ? Validity::Positive : dir == "NEGATIVE" ? Validity::Negative : - dir == "BOTH" ? Validity::Both : + (dir == "COMPLETE" || dir == "BOTH") ? Validity::Both : Validity::Empty; attribute->validity()->newDirection(validDir); } diff --git a/libs/gridsource/src/gridsource.cpp b/libs/gridsource/src/gridsource.cpp index a2de04fc..08dce71d 100644 --- a/libs/gridsource/src/gridsource.cpp +++ b/libs/gridsource/src/gridsource.cpp @@ -946,7 +946,7 @@ void GridDataSource::generateIntersections(TileSpatialContext& ctx, {{config.featureType + "Id", intersection.id}}); // Create point geometry (Points type with single point) - auto points = feature->geom()->newGeometry(GeomType::Points, 1); + auto points = feature->geom()->newGeometry(GeomType::Points, 1, true); points->append(intersection.position); // Add relations to connected roads diff --git a/libs/http-datasource/CMakeLists.txt b/libs/http-datasource/CMakeLists.txt index e46557b5..7b323329 100644 --- a/libs/http-datasource/CMakeLists.txt +++ b/libs/http-datasource/CMakeLists.txt @@ -17,7 +17,7 @@ target_include_directories(mapget-http-datasource target_link_libraries(mapget-http-datasource PUBLIC - httplib::httplib + drogon mapget-model mapget-service tiny-process-library) diff --git a/libs/http-datasource/include/mapget/detail/http-server.h b/libs/http-datasource/include/mapget/detail/http-server.h index c80b7b62..cc6db8cb 100644 --- a/libs/http-datasource/include/mapget/detail/http-server.h +++ b/libs/http-datasource/include/mapget/detail/http-server.h @@ -1,11 +1,12 @@ #pragma once +#include #include #include -// Pre-declare httplib::Server to avoid including httplib.h in header -namespace httplib { -class Server; +// Forward declare Drogon app type to avoid including drogon headers in public headers. +namespace drogon { +class HttpAppFramework; } namespace mapget { @@ -76,7 +77,7 @@ class HttpServer * This function is called upon the first call to go(), * and allows any derived server class to add endpoints. */ - virtual void setup(httplib::Server&) = 0; + virtual void setup(drogon::HttpAppFramework&) = 0; /** * Derived servers can use this to control whether diff --git a/libs/http-datasource/include/mapget/http-datasource/datasource-client.h b/libs/http-datasource/include/mapget/http-datasource/datasource-client.h index 5bb03aee..5a00d000 100644 --- a/libs/http-datasource/include/mapget/http-datasource/datasource-client.h +++ b/libs/http-datasource/include/mapget/http-datasource/datasource-client.h @@ -3,15 +3,24 @@ #include "mapget/model/sourcedatalayer.h" #include "mapget/model/featurelayer.h" #include "mapget/service/datasource.h" -#include "httplib.h" #include +#include #include +#include namespace TinyProcessLib { class Process; } +namespace drogon { +class HttpClient; +} + +namespace trantor { +class EventLoopThread; +} + namespace mapget { @@ -32,12 +41,17 @@ class RemoteDataSource : public DataSource * fails for any reason. */ RemoteDataSource(std::string const& host, uint16_t port); + ~RemoteDataSource(); // DataSource method overrides DataSourceInfo info() override; void fill(TileFeatureLayer::Ptr const& featureTile) override; void fill(TileSourceDataLayer::Ptr const& blobTile) override; - TileLayer::Ptr get(MapTileKey const& k, Cache::Ptr& cache, DataSourceInfo const& info) override; + TileLayer::Ptr get( + MapTileKey const& k, + Cache::Ptr& cache, + DataSourceInfo const& info, + TileLayer::LoadStateCallback loadStateCallback = {}) override; std::vector locate(const mapget::LocateRequest &req) override; private: @@ -48,7 +62,8 @@ class RemoteDataSource : public DataSource std::string error_; // Multiple http clients allow parallel GET requests - std::vector httpClients_; + std::unique_ptr httpClientLoop_; + std::vector> httpClients_; std::atomic_uint64_t nextClient_{0}; }; @@ -76,7 +91,11 @@ class RemoteDataSourceProcess : public DataSource DataSourceInfo info() override; void fill(TileFeatureLayer::Ptr const& featureTile) override; void fill(TileSourceDataLayer::Ptr const& sourceDataLayer) override; - TileLayer::Ptr get(MapTileKey const& k, Cache::Ptr& cache, DataSourceInfo const& info) override; + TileLayer::Ptr get( + MapTileKey const& k, + Cache::Ptr& cache, + DataSourceInfo const& info, + TileLayer::LoadStateCallback loadStateCallback = {}) override; std::vector locate(const mapget::LocateRequest &req) override; private: diff --git a/libs/http-datasource/include/mapget/http-datasource/datasource-server.h b/libs/http-datasource/include/mapget/http-datasource/datasource-server.h index ed11adde..a0deda49 100644 --- a/libs/http-datasource/include/mapget/http-datasource/datasource-server.h +++ b/libs/http-datasource/include/mapget/http-datasource/datasource-server.h @@ -50,7 +50,7 @@ class DataSourceServer : public HttpServer DataSourceInfo const& info(); private: - void setup(httplib::Server&) override; + void setup(drogon::HttpAppFramework&) override; struct Impl; std::unique_ptr impl_; diff --git a/libs/http-datasource/src/datasource-client.cpp b/libs/http-datasource/src/datasource-client.cpp index 1199c48c..9fc99f22 100644 --- a/libs/http-datasource/src/datasource-client.cpp +++ b/libs/http-datasource/src/datasource-client.cpp @@ -3,6 +3,10 @@ #include "process.hpp" #include "mapget/log.h" +#include +#include +#include + #include #include @@ -11,25 +15,43 @@ namespace mapget RemoteDataSource::RemoteDataSource(const std::string& host, uint16_t port) { + httpClientLoop_ = std::make_unique("MapgetRemoteDataSource"); + httpClientLoop_->run(); + + const auto hostString = fmt::format("http://{}:{}/", host, port); + // Fetch data source info. - httplib::Client client(host, port); - auto fetchedInfoJson = client.Get("/info"); - if (!fetchedInfoJson || fetchedInfoJson->status >= 300) - raise("Failed to fetch datasource info."); - info_ = DataSourceInfo::fromJson(nlohmann::json::parse(fetchedInfoJson->body)); + auto infoClient = drogon::HttpClient::newHttpClient(hostString, httpClientLoop_->getLoop()); + auto infoReq = drogon::HttpRequest::newHttpRequest(); + infoReq->setMethod(drogon::Get); + infoReq->setPath("/info"); + + auto [result, fetchedInfoResp] = infoClient->sendRequest(infoReq); + if (result != drogon::ReqResult::Ok || !fetchedInfoResp) { + raise(fmt::format("Failed to fetch datasource info: [{}]", drogon::to_string_view(result))); + } + if ((int)fetchedInfoResp->statusCode() >= 300) { + raise(fmt::format("Failed to fetch datasource info: [{}]", (int)fetchedInfoResp->statusCode())); + } + info_ = DataSourceInfo::fromJson(nlohmann::json::parse(std::string(fetchedInfoResp->body()))); if (info_.nodeId_.empty()) { // Unique node IDs are required for the string pool offsets. raise( fmt::format("Remote data source is missing node ID! Source info: {}", - fetchedInfoJson->body)); + std::string(fetchedInfoResp->body()))); } // Create as many clients as parallel requests are allowed. - for (auto i = 0; i < std::max(info_.maxParallelJobs_, 1); ++i) - httpClients_.emplace_back(host, port); + const auto clientCount = (std::max)(info_.maxParallelJobs_, 1); + httpClients_.reserve(clientCount); + for (auto i = 0; i < clientCount; ++i) { + httpClients_.emplace_back(drogon::HttpClient::newHttpClient(hostString, httpClientLoop_->getLoop())); + } } +RemoteDataSource::~RemoteDataSource() = default; + DataSourceInfo RemoteDataSource::info() { return info_; @@ -48,42 +70,43 @@ void RemoteDataSource::fill(const TileSourceDataLayer::Ptr& blobTile) } TileLayer::Ptr -RemoteDataSource::get(const MapTileKey& k, Cache::Ptr& cache, const DataSourceInfo& info) +RemoteDataSource::get( + const MapTileKey& k, + Cache::Ptr& cache, + const DataSourceInfo& info, + TileLayer::LoadStateCallback loadStateCallback) { // Round-robin usage of http clients to facilitate parallel requests. auto& client = httpClients_[(nextClient_++) % httpClients_.size()]; // Send a GET tile request. - auto tileResponse = client.Get(fmt::format( - "/tile?layer={}&tileId={}&stringPoolOffset={}", + auto tileReq = drogon::HttpRequest::newHttpRequest(); + tileReq->setMethod(drogon::Get); + tileReq->setPath(fmt::format( + "/tile?layer={}&tileId={}&stage={}&stringPoolOffset={}", k.layerId_, k.tileId_.value_, + k.stage_, cachedStringPoolOffset(info.nodeId_, cache))); + auto [resultCode, tileResponse] = client->sendRequest(tileReq); // Check that the response is OK. - if (!tileResponse || tileResponse->status >= 300) { + if (resultCode != drogon::ReqResult::Ok || !tileResponse || (int)tileResponse->statusCode() >= 300) { // Forward to base class get(). This will instantiate a // default TileLayer and call fill(). In our implementation // of fill, we set an error. - if (tileResponse) { - if (tileResponse->has_header("HTTPLIB_ERROR")) { - error_ = tileResponse->get_header_value("HTTPLIB_ERROR"); - } - else if (tileResponse->has_header("EXCEPTION_WHAT")) { - error_ = tileResponse->get_header_value("EXCEPTION_WHAT"); - } - else { - error_ = fmt::format("Code {}", tileResponse->status); - } - } - else { + if (resultCode != drogon::ReqResult::Ok) { + error_ = drogon::to_string(resultCode); + } else if (tileResponse) { + error_ = fmt::format("Code {}", (int)tileResponse->statusCode()); + } else { error_ = "No remote response."; } // Use tile instantiation logic of the base class, // the error is then set in fill(). - return DataSource::get(k, cache, info); + return DataSource::get(k, cache, info, std::move(loadStateCallback)); } // Check the response body for expected content. @@ -92,8 +115,11 @@ RemoteDataSource::get(const MapTileKey& k, Cache::Ptr& cache, const DataSourceIn [&](auto&& mapId, auto&& layerId) { return info.getLayer(std::string(layerId)); }, [&](auto&& tile) { result = tile; }, cache); - reader.read(tileResponse->body); + reader.read(std::string(tileResponse->body())); + if (result && loadStateCallback) { + result->setLoadStateCallback(std::move(loadStateCallback)); + } return result; } @@ -102,12 +128,15 @@ std::vector RemoteDataSource::locate(const LocateRequest& req) // Round-robin usage of http clients to facilitate parallel requests. auto& client = httpClients_[(nextClient_++) % httpClients_.size()]; - // Send a GET tile request. - auto locateResponse = client.Post( - fmt::format("/locate"), req.serialize().dump(), "application/json"); + auto locateReq = drogon::HttpRequest::newHttpRequest(); + locateReq->setMethod(drogon::Post); + locateReq->setPath("/locate"); + locateReq->setContentTypeCode(drogon::CT_APPLICATION_JSON); + locateReq->setBody(req.serialize().dump()); + auto [resultCode, locateResponse] = client->sendRequest(locateReq); // Check that the response is OK. - if (!locateResponse || locateResponse->status >= 300) { + if (resultCode != drogon::ReqResult::Ok || !locateResponse || (int)locateResponse->statusCode() >= 300) { // Forward to base class get(). This will instantiate a // default TileFeatureLayer and call fill(). In our implementation // of fill, we set an error. @@ -116,7 +145,7 @@ std::vector RemoteDataSource::locate(const LocateRequest& req) } // Check the response body for expected content. - auto responseJson = nlohmann::json::parse(locateResponse->body); + auto responseJson = nlohmann::json::parse(std::string(locateResponse->body())); if (responseJson.is_null()) { return {}; } @@ -222,11 +251,15 @@ void RemoteDataSourceProcess::fill(TileSourceDataLayer::Ptr const& sourceDataLay } TileLayer::Ptr -RemoteDataSourceProcess::get(MapTileKey const& k, Cache::Ptr& cache, DataSourceInfo const& info) +RemoteDataSourceProcess::get( + MapTileKey const& k, + Cache::Ptr& cache, + DataSourceInfo const& info, + TileLayer::LoadStateCallback loadStateCallback) { if (!remoteSource_) raise("Remote data source is not initialized."); - return remoteSource_->get(k, cache, info); + return remoteSource_->get(k, cache, info, std::move(loadStateCallback)); } std::vector RemoteDataSourceProcess::locate(const LocateRequest& req) diff --git a/libs/http-datasource/src/datasource-server.cpp b/libs/http-datasource/src/datasource-server.cpp index f6ea10f0..1d93813c 100644 --- a/libs/http-datasource/src/datasource-server.cpp +++ b/libs/http-datasource/src/datasource-server.cpp @@ -1,54 +1,52 @@ #include "datasource-server.h" -#include "mapget/detail/http-server.h" -#include "mapget/model/sourcedatalayer.h" -#include "mapget/model/featurelayer.h" + +#include "mapget/log.h" #include "mapget/model/info.h" -#include "mapget/model/layer.h" #include "mapget/model/stream.h" -#include "httplib.h" +#include +#include + #include #include +#include -namespace mapget { +#include "fmt/format.h" + +namespace mapget +{ struct DataSourceServer::Impl { DataSourceInfo info_; - std::function tileFeatureCallback_ = [](auto&&) - { + std::function tileFeatureCallback_ = [](auto&&) { throw std::runtime_error("TileFeatureLayer callback is unset!"); }; - std::function tileSourceDataCallback_ = [](auto&&) - { + std::function tileSourceDataCallback_ = [](auto&&) { throw std::runtime_error("TileSourceDataLayer callback is unset!"); }; std::function(const LocateRequest&)> locateCallback_; std::shared_ptr strings_; - explicit Impl(DataSourceInfo info) - : info_(std::move(info)), strings_(std::make_shared(info_.nodeId_)) + explicit Impl(DataSourceInfo info) : info_(std::move(info)), strings_(std::make_shared(info_.nodeId_)) { } }; -DataSourceServer::DataSourceServer(DataSourceInfo const& info) - : HttpServer(), impl_(new Impl(info)) +DataSourceServer::DataSourceServer(DataSourceInfo const& info) : HttpServer(), impl_(new Impl(info)) { printPortToStdOut(true); } DataSourceServer::~DataSourceServer() = default; -DataSourceServer& -DataSourceServer::onTileFeatureRequest(std::function const& callback) +DataSourceServer& DataSourceServer::onTileFeatureRequest(std::function const& callback) { impl_->tileFeatureCallback_ = callback; return *this; } -DataSourceServer& -DataSourceServer::onTileSourceDataRequest(std::function const& callback) +DataSourceServer& DataSourceServer::onTileSourceDataRequest(std::function const& callback) { impl_->tileSourceDataCallback_ = callback; return *this; @@ -61,97 +59,141 @@ DataSourceServer& DataSourceServer::onLocateRequest( return *this; } -DataSourceInfo const& DataSourceServer::info() { - return impl_->info_; -} +DataSourceInfo const& DataSourceServer::info() { return impl_->info_; } -void DataSourceServer::setup(httplib::Server& server) +void DataSourceServer::setup(drogon::HttpAppFramework& app) { - // Set up GET /tile endpoint - server.Get( + app.registerHandler( "/tile", - [this](const httplib::Request& req, httplib::Response& res) { - // Extract parameters from request. - auto layerIdParam = req.get_param_value("layer"); - auto layer = impl_->info_.getLayer(layerIdParam); - - auto tileIdParam = TileId{std::stoull(req.get_param_value("tileId"))}; - auto stringPoolOffsetParam = (simfil::StringId)0; - if (req.has_param("stringPoolOffset")) - stringPoolOffsetParam = (simfil::StringId) - std::stoul(req.get_param_value("stringPoolOffset")); - - std::string responseType = "binary"; - if (req.has_param("responseType")) - responseType = req.get_param_value("responseType"); - - // Create response TileFeatureLayer. - auto tileLayer = [&]() -> std::shared_ptr { - switch (layer->type_) { - case mapget::LayerType::Features: { - auto tileFeatureLayer = std::make_shared( - tileIdParam, - impl_->info_.nodeId_, - impl_->info_.mapId_, - layer, - impl_->strings_); - impl_->tileFeatureCallback_(tileFeatureLayer); - return tileFeatureLayer; + [this](const drogon::HttpRequestPtr& req, std::function&& callback) + { + try { + auto const& layerIdParam = req->getParameter("layer"); + auto const& tileIdParam = req->getParameter("tileId"); + + if (layerIdParam.empty() || tileIdParam.empty()) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("Missing query parameter: layer and/or tileId"); + callback(resp); + return; + } + + auto layer = impl_->info_.getLayer(layerIdParam); + TileId tileId{std::stoull(tileIdParam)}; + + auto stageParam = 0u; + auto const& stageStr = req->getParameter("stage"); + if (!stageStr.empty()) { + stageParam = std::stoul(stageStr); } - case mapget::LayerType::SourceData: { - auto tileSourceLayer = std::make_shared( - tileIdParam, - impl_->info_.nodeId_, - impl_->info_.mapId_, - layer, - impl_->strings_); - impl_->tileSourceDataCallback_(tileSourceLayer); - return tileSourceLayer; + + auto stringPoolOffsetParam = (simfil::StringId)0; + auto const& stringPoolOffsetStr = req->getParameter("stringPoolOffset"); + if (!stringPoolOffsetStr.empty()) { + stringPoolOffsetParam = (simfil::StringId)std::stoul(stringPoolOffsetStr); } - default: - throw std::runtime_error(fmt::format("Unsupported layer type {}", (int)layer->type_)); + + std::string responseType = "binary"; + auto const& responseTypeStr = req->getParameter("responseType"); + if (!responseTypeStr.empty()) + responseType = responseTypeStr; + + auto tileLayer = [&]() -> std::shared_ptr + { + switch (layer->type_) { + case mapget::LayerType::Features: { + auto tileFeatureLayer = std::make_shared( + tileId, impl_->info_.nodeId_, impl_->info_.mapId_, layer, impl_->strings_); + if (layer->stages_ > 1) { + tileFeatureLayer->setStage(stageParam); + } + impl_->tileFeatureCallback_(tileFeatureLayer); + return tileFeatureLayer; + } + case mapget::LayerType::SourceData: { + auto tileSourceLayer = std::make_shared( + tileId, impl_->info_.nodeId_, impl_->info_.mapId_, layer, impl_->strings_); + impl_->tileSourceDataCallback_(tileSourceLayer); + return tileSourceLayer; + } + default: + throw std::runtime_error(fmt::format("Unsupported layer type {}", (int)layer->type_)); + } + }(); + + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + + if (responseType == "binary") { + std::string content; + TileLayerStream::StringPoolOffsetMap stringPoolOffsets{{impl_->info_.nodeId_, stringPoolOffsetParam}}; + TileLayerStream::Writer layerWriter{ + [&](std::string const& bytes, TileLayerStream::MessageType) { content.append(bytes); }, + stringPoolOffsets}; + layerWriter.write(tileLayer); + + resp->setContentTypeString("application/binary"); + resp->setBody(std::move(content)); + } else { + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(tileLayer->toJson().dump()); } - }(); - - // Serialize TileLayer using TileLayerStream. - if (responseType == "binary") { - std::stringstream content; - TileLayerStream::StringPoolOffsetMap stringPoolOffsets{ - {impl_->info_.nodeId_, stringPoolOffsetParam}}; - TileLayerStream::Writer layerWriter{ - [&](auto&& msg, auto&& msgType) { content << msg; }, - stringPoolOffsets}; - layerWriter.write(tileLayer); - res.set_content(content.str(), "application/binary"); + + callback(resp); } - else { - res.set_content(nlohmann::to_string(tileLayer->toJson()), "application/json"); + catch (std::exception const& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k500InternalServerError); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Error: ") + e.what()); + callback(resp); } - }); + }, + {drogon::Get}); - // Set up GET /info endpoint - server.Get( + app.registerHandler( "/info", - [this](const httplib::Request&, httplib::Response& res) { - nlohmann::json j = impl_->info_.toJson(); - res.set_content(j.dump(), "application/json"); - }); - - // Set up POST /locate endpoint - server.Post( + [this](const drogon::HttpRequestPtr&, std::function&& callback) + { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(impl_->info_.toJson().dump()); + callback(resp); + }, + {drogon::Get}); + + app.registerHandler( "/locate", - [this](const httplib::Request& req, httplib::Response& res) { - LocateRequest parsedReq(nlohmann::json::parse(req.body)); - auto responseJson = nlohmann::json::array(); - - if (impl_->locateCallback_) { - for (auto const& response : impl_->locateCallback_(parsedReq)) { - responseJson.emplace_back(response.serialize()); + [this](const drogon::HttpRequestPtr& req, std::function&& callback) + { + try { + LocateRequest parsedReq(nlohmann::json::parse(std::string(req->body()))); + auto responseJson = nlohmann::json::array(); + + if (impl_->locateCallback_) { + for (auto const& response : impl_->locateCallback_(parsedReq)) { + responseJson.emplace_back(response.serialize()); + } } - } - res.set_content(responseJson.dump(), "application/json"); - }); + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(responseJson.dump()); + callback(resp); + } + catch (std::exception const& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Invalid request: ") + e.what()); + callback(resp); + } + }, + {drogon::Post}); } } // namespace mapget diff --git a/libs/http-datasource/src/http-server.cpp b/libs/http-datasource/src/http-server.cpp index 6b49eb66..422ab1b6 100644 --- a/libs/http-datasource/src/http-server.cpp +++ b/libs/http-datasource/src/http-server.cpp @@ -1,130 +1,280 @@ #include "mapget/detail/http-server.h" #include "mapget/log.h" -#include "httplib.h" -#include +#include + +#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "fmt/format.h" namespace mapget { -// initialize the atomic activeHttpServer with nullptr +// Used by waitForSignal() so the signal handler knows what to stop. static std::atomic activeHttpServer = nullptr; +// Drogon uses a singleton app instance; running multiple independent servers +// in-process is not supported. +static std::atomic activeDrogonServer = nullptr; + +namespace +{ + +struct MountPoint +{ + std::string urlPrefix; + std::filesystem::path fsRoot; +}; + +[[nodiscard]] bool looksLikeWindowsDrivePath(std::string_view s) +{ + if (s.size() < 3) + return false; + const unsigned char drive = static_cast(s[0]); + return std::isalpha(drive) && s[1] == ':' && (s[2] == '\\' || s[2] == '/'); +} + +[[nodiscard]] std::string normalizeUrlPrefix(std::string prefix) +{ + if (prefix.empty()) + prefix = "/"; + if (prefix.front() != '/') + prefix.insert(prefix.begin(), '/'); + if (prefix.size() > 1 && prefix.back() == '/') + prefix.pop_back(); + return prefix; +} + +} // namespace + struct HttpServer::Impl { - httplib::Server server_; std::thread serverThread_; + std::atomic_bool running_{false}; + + std::mutex startMutex_; + std::condition_variable startCv_; + bool startNotified_ = false; + std::string startError_; + uint16_t port_ = 0; - bool setupWasCalled_ = false; bool printPortToStdout_ = false; + bool startedOnce_ = false; + + std::mutex mountsMutex_; + std::vector mounts_; static void handleSignal(int) { - // Temporarily holds the current active HttpServer auto* expected = activeHttpServer.load(); - - // Stop the active instance when a signal is received. - // We use compare_exchange_strong to make the operation atomic. if (activeHttpServer.compare_exchange_strong(expected, nullptr)) { if (expected) { expected->stop(); } } } + + void notifyStart(std::string errorMessage = {}) + { + std::lock_guard lock(startMutex_); + startError_ = std::move(errorMessage); + startNotified_ = true; + startCv_.notify_one(); + } }; HttpServer::HttpServer() : impl_(new Impl()) {} -HttpServer::~HttpServer() { - if (isRunning()) - stop(); +HttpServer::~HttpServer() +{ + stop(); } -void HttpServer::go( - std::string const& interfaceAddr, - uint16_t port, - uint32_t waitMs) +void HttpServer::go(std::string const& interfaceAddr, uint16_t port, uint32_t waitMs) { - if (!impl_->setupWasCalled_) { - // Allow derived class to set up the server - setup(impl_->server_); - impl_->setupWasCalled_ = true; - } - - if (impl_->server_.is_running() || impl_->serverThread_.joinable()) + if (impl_->running_ || impl_->serverThread_.joinable()) raise("HttpServer is already running"); + if (impl_->startedOnce_) + raise("HttpServer cannot be restarted in-process (Drogon singleton)"); - if (port == 0) { - impl_->port_ = impl_->server_.bind_to_any_port(interfaceAddr); - } - else { - impl_->port_ = port; - impl_->server_.bind_to_port(interfaceAddr, port); + HttpServer* expected = nullptr; + if (!activeDrogonServer.compare_exchange_strong(expected, this)) + raise("Only one HttpServer can run per process (Drogon singleton)"); + + impl_->startedOnce_ = true; + + // Reset start state. + { + std::lock_guard lock(impl_->startMutex_); + impl_->startNotified_ = false; + impl_->startError_.clear(); } impl_->serverThread_ = std::thread( - [this, interfaceAddr] + [this, interfaceAddr, port] { - if (impl_->printPortToStdout_) - std::cout << "====== Running on port " << impl_->port_ << " ======" << std::endl; - else - log().info("====== Running on port {} ======", impl_->port_); - impl_->server_.listen_after_bind(); + try { + auto& app = drogon::app(); + + // Copy mounts to avoid locking after the server thread starts. + std::vector mountsCopy; + { + std::lock_guard lock(impl_->mountsMutex_); + mountsCopy = impl_->mounts_; + } + + if (!mountsCopy.empty()) { + std::sort( + mountsCopy.begin(), + mountsCopy.end(), + [](MountPoint const& a, MountPoint const& b) { return a.urlPrefix.size() > b.urlPrefix.size(); }); + + // Using empty document root makes addALocation's "alias" parameter + // work with absolute Windows paths (e.g. "C:/path"). + app.setDocumentRoot(""); + + for (auto const& m : mountsCopy) { + app.addALocation(m.urlPrefix, "", m.fsRoot.generic_string()); + } + } + + // Allow derived class to set up the server. + setup(app); + + app.addListener(interfaceAddr, port); + + app.registerBeginningAdvice([this]() { + // Beginning advice runs before listeners start. Post the actual + // startup notification to run after startListening() completed. + drogon::app().getLoop()->queueInLoop([this]() { + auto listeners = drogon::app().getListeners(); + if (listeners.empty()) { + impl_->notifyStart("HttpServer started without listeners"); + return; + } + + impl_->port_ = listeners.front().toPort(); + impl_->running_ = true; + impl_->notifyStart(); + + if (impl_->printPortToStdout_) + std::cout << "====== Running on port " << impl_->port_ << " ======" << std::endl; + else + log().info("====== Running on port {} ======", impl_->port_); + }); + }); + + app.run(); + } + catch (std::exception const& e) { + impl_->notifyStart(e.what()); + } + + impl_->running_ = false; + + HttpServer* expected = this; + (void)activeDrogonServer.compare_exchange_strong(expected, nullptr); }); - std::this_thread::sleep_for(std::chrono::milliseconds(waitMs)); - if (!impl_->server_.is_running() || !impl_->server_.is_valid()) - raise(fmt::format("Could not start HttpServer on {}:{}", interfaceAddr, port)); + std::unique_lock lk(impl_->startMutex_); + if (!impl_->startCv_.wait_for( + lk, + std::chrono::milliseconds(waitMs), + [this] { return impl_->startNotified_; })) { + raise(fmt::format("Could not start HttpServer on {}:{} (timeout)", interfaceAddr, port)); + } + + if (!impl_->startError_.empty()) + raise(impl_->startError_); } -bool HttpServer::isRunning() { - return impl_->server_.is_running(); +bool HttpServer::isRunning() +{ + return impl_->running_; } -void HttpServer::stop() { - if (!impl_->server_.is_running()) +void HttpServer::stop() +{ + if (!impl_->serverThread_.joinable()) return; - impl_->server_.stop(); - impl_->serverThread_.join(); + if (drogon::app().isRunning()) { + drogon::app().quit(); + } + + if (impl_->serverThread_.get_id() != std::this_thread::get_id()) + impl_->serverThread_.join(); } -uint16_t HttpServer::port() const { +uint16_t HttpServer::port() const +{ return impl_->port_; } -void HttpServer::waitForSignal() { - // So the signal handler knows what to call +void HttpServer::waitForSignal() +{ activeHttpServer = this; - // Set the signal handler for SIGINT and SIGTERM. std::signal(SIGINT, Impl::handleSignal); std::signal(SIGTERM, Impl::handleSignal); - // Wait for the signal handler to stop us, or the server to shut down on its own. while (isRunning()) { - std::this_thread::sleep_for(std::chrono::milliseconds (200)); + std::this_thread::sleep_for(std::chrono::milliseconds(200)); } activeHttpServer = nullptr; } -bool HttpServer::mountFileSystem(const std::string& pathFromTo) +bool HttpServer::mountFileSystem(std::string const& pathFromTo) { - using namespace std::ranges; - auto parts = pathFromTo | views::split(':') | views::transform([](auto&& s){return std::string(&*s.begin(), distance(s));}); - auto partsVec = std::vector(parts.begin(), parts.end()); + std::string urlPrefix; + std::string fsRootStr; + + const auto firstColon = pathFromTo.find(':'); + if (firstColon == std::string::npos || looksLikeWindowsDrivePath(pathFromTo)) { + urlPrefix = "/"; + fsRootStr = pathFromTo; + } else { + urlPrefix = pathFromTo.substr(0, firstColon); + fsRootStr = pathFromTo.substr(firstColon + 1); + if (fsRootStr.empty()) + return false; + } + + urlPrefix = normalizeUrlPrefix(std::move(urlPrefix)); - if (partsVec.size() == 1) - return impl_->server_.set_mount_point("/", partsVec[0]); - return impl_->server_.set_mount_point(partsVec[0], partsVec[1]); + std::filesystem::path fsRoot(fsRootStr); + std::error_code ec; + fsRoot = std::filesystem::absolute(fsRoot, ec); + if (ec) + return false; + + auto exists = std::filesystem::exists(fsRoot, ec); + if (!exists || ec) + return false; + auto isDirectory = std::filesystem::is_directory(fsRoot, ec); + if (!isDirectory || ec) + return false; + + std::scoped_lock lock(impl_->mountsMutex_); + impl_->mounts_.emplace_back(MountPoint{std::move(urlPrefix), std::move(fsRoot)}); + return true; } -void HttpServer::printPortToStdOut(bool enabled) { +void HttpServer::printPortToStdOut(bool enabled) +{ impl_->printPortToStdout_ = enabled; } diff --git a/libs/http-service/CMakeLists.txt b/libs/http-service/CMakeLists.txt index ce3d733e..7259d0b9 100644 --- a/libs/http-service/CMakeLists.txt +++ b/libs/http-service/CMakeLists.txt @@ -6,7 +6,17 @@ add_library(mapget-http-service STATIC include/mapget/http-service/cli.h src/http-service.cpp + src/http-service-impl.h + src/http-service-impl.cpp + src/config-handler.cpp src/http-client.cpp + src/locate-handler.cpp + src/sources-handler.cpp + src/status-handler.cpp + src/tiles-request-json.h + src/tiles-request-json.cpp + src/tiles-http-handler.cpp + src/tiles-ws-controller.cpp src/cli.cpp) target_include_directories(mapget-http-service @@ -17,7 +27,7 @@ target_include_directories(mapget-http-service target_link_libraries(mapget-http-service PUBLIC - httplib::httplib + drogon yaml-cpp CLI11::CLI11 nlohmann_json_schema_validator diff --git a/libs/http-service/include/mapget/http-service/http-client.h b/libs/http-service/include/mapget/http-service/http-client.h index 67c293d6..7fad612d 100644 --- a/libs/http-service/include/mapget/http-service/http-client.h +++ b/libs/http-service/include/mapget/http-service/http-client.h @@ -18,7 +18,7 @@ class HttpClient * endpoint, and caches the result for the lifetime of this object. * @param enableCompression Enable gzip compression for responses (default: true) */ - explicit HttpClient(std::string const& host, uint16_t port, httplib::Headers headers = {}, bool enableCompression = true); + explicit HttpClient(std::string const& host, uint16_t port, AuthHeaders headers = {}, bool enableCompression = true); ~HttpClient(); /** diff --git a/libs/http-service/include/mapget/http-service/http-service.h b/libs/http-service/include/mapget/http-service/http-service.h index 41f834c8..61ea4c8e 100644 --- a/libs/http-service/include/mapget/http-service/http-service.h +++ b/libs/http-service/include/mapget/http-service/http-service.h @@ -1,6 +1,5 @@ #pragma once -#include "httplib.h" #include "mapget/detail/http-server.h" #include "mapget/model/featurelayer.h" #include "mapget/model/stream.h" @@ -56,7 +55,7 @@ class HttpService : public HttpServer, public Service ~HttpService() override; protected: - void setup(httplib::Server& server) override; + void setup(drogon::HttpAppFramework& app) override; private: struct Impl; diff --git a/libs/http-service/src/cli.cpp b/libs/http-service/src/cli.cpp index 7e51c540..90e92e76 100644 --- a/libs/http-service/src/cli.cpp +++ b/libs/http-service/src/cli.cpp @@ -92,9 +92,16 @@ nlohmann::json geoJsonFolderSchema() {"title", "Folder"}, {"description", "Path to a folder containing GeoJSON tiles."} }}, + {"mapId", { + {"type", "string"}, + {"title", "Map ID"}, + {"description", "Custom map identifier. If not provided, derived from folder path."} + }}, {"withAttrLayers", { {"type", "boolean"}, - {"title", "With Attribute Layers"} + {"title", "With Attribute Layers"}, + {"description", "Convert nested GeoJSON property objects to mapget attribute layers. Default: true."}, + {"default", true} }} }}, {"required", nlohmann::json::array({"folder"})}, @@ -233,10 +240,13 @@ void registerDefaultDatasourceTypes() { "GeoJsonFolder", [](YAML::Node const& config) -> DataSource::Ptr { if (auto folder = config["folder"]) { - bool withAttributeLayers = false; + bool withAttributeLayers = true; if (auto withAttributeLayersNode = config["withAttrLayers"]) withAttributeLayers = withAttributeLayersNode.as(); - return std::make_shared(folder.as(), withAttributeLayers); + std::string mapId; + if (auto mapIdNode = config["mapId"]) + mapId = mapIdNode.as(); + return std::make_shared(folder.as(), withAttributeLayers, mapId); } throw std::runtime_error("Missing `folder` field."); }, @@ -446,7 +456,7 @@ struct ServeCommand log().info("Webapp: {}", webapp_); if (!srv.mountFileSystem(webapp_)) { log().error(" ...failed to mount!"); - exit(1); + raise("Failed to mount webapp filesystem path."); } } diff --git a/libs/http-service/src/config-handler.cpp b/libs/http-service/src/config-handler.cpp new file mode 100644 index 00000000..82ac86e6 --- /dev/null +++ b/libs/http-service/src/config-handler.cpp @@ -0,0 +1,229 @@ +#include "http-service-impl.h" + +#include "cli.h" +#include "mapget/log.h" +#include "mapget/service/config.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "nlohmann/json.hpp" +#include "yaml-cpp/yaml.h" + +namespace mapget +{ + +drogon::HttpResponsePtr HttpService::Impl::openConfigFile(std::ifstream& configFile) +{ + auto configFilePath = DataSourceConfigService::get().getConfigFilePath(); + if (!configFilePath.has_value()) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k404NotFound); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("The config file path is not set. Check the server configuration."); + return resp; + } + + std::filesystem::path path = *configFilePath; + if (!std::filesystem::exists(path)) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k404NotFound); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("The server does not have a config file."); + return resp; + } + + configFile.open(*configFilePath); + if (!configFile) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k500InternalServerError); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("Failed to open config file."); + return resp; + } + + return nullptr; +} + +void HttpService::Impl::handleGetConfigRequest( + const drogon::HttpRequestPtr& /*req*/, + std::function&& callback) +{ + if (!isGetConfigEndpointEnabled()) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k403Forbidden); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("The GET /config endpoint is disabled by the server administrator."); + callback(resp); + return; + } + + std::ifstream configFile; + if (auto errorResp = openConfigFile(configFile)) { + callback(errorResp); + return; + } + + nlohmann::json jsonSchema = DataSourceConfigService::get().getDataSourceConfigSchema(); + + try { + YAML::Node configYaml = YAML::Load(configFile); + nlohmann::json jsonConfig; + std::unordered_map maskedSecretMap; + for (const auto& key : DataSourceConfigService::get().topLevelDataSourceConfigKeys()) { + if (auto configYamlEntry = configYaml[key]) + jsonConfig[key] = yamlToJson(configYaml[key], true, &maskedSecretMap); + } + + nlohmann::json combinedJson; + combinedJson["schema"] = jsonSchema; + combinedJson["model"] = jsonConfig; + combinedJson["readOnly"] = !isPostConfigEndpointEnabled(); + + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(combinedJson.dump(2)); + callback(resp); + } + catch (const std::exception& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k500InternalServerError); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Error processing config file: ") + e.what()); + callback(resp); + } +} + +void HttpService::Impl::handlePostConfigRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const +{ + if (!isPostConfigEndpointEnabled()) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k403Forbidden); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("The POST /config endpoint is not enabled by the server administrator."); + callback(resp); + return; + } + + struct ConfigUpdateState : std::enable_shared_from_this + { + trantor::EventLoop* loop = nullptr; + std::atomic_bool done{false}; + std::atomic_bool wroteConfig{false}; + std::unique_ptr subscription; + std::function callback; + }; + + std::ifstream configFile; + if (auto errorResp = openConfigFile(configFile)) { + callback(errorResp); + return; + } + + nlohmann::json jsonConfig; + try { + jsonConfig = nlohmann::json::parse(std::string(req->body())); + } + catch (const nlohmann::json::parse_error& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Invalid JSON format: ") + e.what()); + callback(resp); + return; + } + + try { + DataSourceConfigService::get().validateDataSourceConfig(jsonConfig); + } + catch (const std::exception& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k500InternalServerError); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Validation failed: ") + e.what()); + callback(resp); + return; + } + + auto yamlConfig = YAML::Load(configFile); + std::unordered_map maskedSecrets; + yamlToJson(yamlConfig, true, &maskedSecrets); + + for (auto const& key : DataSourceConfigService::get().topLevelDataSourceConfigKeys()) { + if (jsonConfig.contains(key)) + yamlConfig[key] = jsonToYaml(jsonConfig[key], maskedSecrets); + } + + auto state = std::make_shared(); + state->loop = drogon::app().getLoop(); + state->callback = std::move(callback); + + state->subscription = DataSourceConfigService::get().subscribe( + [state](std::vector const&) mutable { + if (!state->wroteConfig) { + return; + } + if (state->done.exchange(true)) + return; + state->loop->queueInLoop([state]() mutable { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("Configuration updated and applied successfully."); + state->callback(resp); + state->subscription.reset(); + }); + }, + [state](std::string const& error) mutable { + if (!state->wroteConfig) { + return; + } + if (state->done.exchange(true)) + return; + state->loop->queueInLoop([state, error]() mutable { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k500InternalServerError); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Error applying the configuration: ") + error); + state->callback(resp); + state->subscription.reset(); + }); + }); + + configFile.close(); + log().trace("Writing new config."); + state->wroteConfig = true; + if (auto configFilePath = DataSourceConfigService::get().getConfigFilePath()) { + std::ofstream newConfigFile(*configFilePath); + newConfigFile << yamlConfig; + newConfigFile.close(); + } + + std::thread([weak = state->weak_from_this()]() { + std::this_thread::sleep_for(std::chrono::seconds(60)); + if (auto state = weak.lock()) { + if (state->done.exchange(true)) + return; + state->loop->queueInLoop([state]() mutable { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k500InternalServerError); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("Timeout while waiting for config to update."); + state->callback(resp); + state->subscription.reset(); + }); + } + }).detach(); +} + +} // namespace mapget + diff --git a/libs/http-service/src/http-client.cpp b/libs/http-service/src/http-client.cpp index a002c34f..1f2c4c99 100644 --- a/libs/http-service/src/http-client.cpp +++ b/libs/http-service/src/http-client.cpp @@ -1,48 +1,173 @@ #include "http-client.h" -#include "httplib.h" + #include "mapget/log.h" +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "fmt/format.h" + +#include + namespace mapget { +namespace +{ + +void applyHeaders(drogon::HttpRequestPtr const& req, AuthHeaders const& headers) +{ + for (auto const& [k, v] : headers) { + req->addHeader(k, v); + } +} + +[[nodiscard]] bool hasGzipContentEncoding(std::string_view contentEncoding) +{ + if (contentEncoding.empty()) { + return false; + } + + std::string normalized(contentEncoding); + std::transform( + normalized.begin(), + normalized.end(), + normalized.begin(), + [](unsigned char ch) { return static_cast(std::tolower(ch)); }); + return normalized.find("gzip") != std::string::npos; +} + +[[nodiscard]] bool looksLikeGzip(std::string_view bytes) +{ + return bytes.size() >= 2 && + static_cast(bytes[0]) == 0x1f && + static_cast(bytes[1]) == 0x8b; +} + +[[nodiscard]] std::optional gunzip(std::string_view input) +{ + if (input.empty()) { + return std::string{}; + } + if (input.size() > static_cast(std::numeric_limits::max())) { + return std::nullopt; + } + + z_stream stream{}; + stream.next_in = reinterpret_cast(const_cast(input.data())); + stream.avail_in = static_cast(input.size()); + + // 16 + MAX_WBITS enables gzip container decoding. + if (inflateInit2(&stream, 16 + MAX_WBITS) != Z_OK) { + return std::nullopt; + } + + std::string output; + output.reserve(input.size() * 2); + + char outBuffer[8192]; + int inflateResult = Z_OK; + do { + stream.next_out = reinterpret_cast(outBuffer); + stream.avail_out = sizeof(outBuffer); + inflateResult = inflate(&stream, Z_NO_FLUSH); + if (inflateResult != Z_OK && inflateResult != Z_STREAM_END) { + inflateEnd(&stream); + return std::nullopt; + } + output.append(outBuffer, sizeof(outBuffer) - stream.avail_out); + } while (inflateResult != Z_STREAM_END); + + inflateEnd(&stream); + return output; +} + +[[nodiscard]] std::optional decodeResponseBody(const drogon::HttpResponsePtr& resp) +{ + if (!resp) { + return std::nullopt; + } + + auto body = std::string_view(resp->body().data(), resp->body().size()); + auto contentEncoding = resp->getHeader("Content-Encoding"); + if (contentEncoding.empty()) { + contentEncoding = resp->getHeader("content-encoding"); + } + + const bool headerSaysGzip = hasGzipContentEncoding(contentEncoding); + const bool bodyLooksGzip = looksLikeGzip(body); + + // Drogon may already have decompressed the payload before exposing body(). + // If the header says gzip but bytes are not gzip-framed, treat body as decoded. + if (headerSaysGzip && !bodyLooksGzip) { + return std::string(body); + } + + if (!headerSaysGzip && !bodyLooksGzip) { + return std::string(body); + } + + return gunzip(body); +} + +} // namespace + struct HttpClient::Impl { - httplib::Client client_; + std::unique_ptr loopThread_; + drogon::HttpClientPtr client_; std::unordered_map sources_; std::shared_ptr stringPoolProvider_; - httplib::Headers headers_; + AuthHeaders headers_; - Impl(std::string const& host, uint16_t port, httplib::Headers headers, bool enableCompression) : - client_(host, port), - headers_(std::move(headers)) + Impl(std::string const& host, uint16_t port, AuthHeaders headers, bool enableCompression) : headers_(std::move(headers)) { - // Add Accept-Encoding header if compression is enabled and not already present - if (enableCompression) { - bool hasAcceptEncoding = false; - for (const auto& [key, value] : headers_) { - if (key == "Accept-Encoding") { - hasAcceptEncoding = true; - break; - } - } - if (!hasAcceptEncoding) { - headers_.emplace("Accept-Encoding", "gzip"); - } + if (enableCompression && !(headers_.contains("Accept-Encoding") || headers_.contains("accept-encoding"))) { + headers_.emplace("Accept-Encoding", "gzip"); } - + + loopThread_ = std::make_unique("MapgetHttpClient"); + loopThread_->run(); + + const auto hostString = fmt::format("http://{}:{}/", host, port); + client_ = drogon::HttpClient::newHttpClient(hostString, loopThread_->getLoop()); + stringPoolProvider_ = std::make_shared(); - client_.set_keep_alive(false); - auto sourcesJson = client_.Get("/sources", headers_); - if (!sourcesJson || sourcesJson->status != 200) - raise( - fmt::format("Failed to fetch sources: [{}]", sourcesJson->status)); - for (auto const& info : nlohmann::json::parse(sourcesJson->body)) { + + // Fetch data sources (/sources). + auto req = drogon::HttpRequest::newHttpRequest(); + req->setMethod(drogon::Get); + req->setPath("/sources"); + applyHeaders(req, headers_); + + auto [result, resp] = client_->sendRequest(req); + if (result != drogon::ReqResult::Ok || !resp) { + raise(fmt::format("Failed to fetch sources: [{}]", drogon::to_string_view(result))); + } + if (resp->statusCode() != drogon::k200OK) { + raise(fmt::format("Failed to fetch sources: [{}]", (int)resp->statusCode())); + } + + auto decodedBody = decodeResponseBody(resp); + if (!decodedBody) { + raise("Failed to decode /sources response body"); + } + + for (auto const& info : nlohmann::json::parse(*decodedBody)) { auto parsedInfo = DataSourceInfo::fromJson(info); sources_.emplace(parsedInfo.mapId_, parsedInfo); } } - [[nodiscard]] std::shared_ptr - resolve(std::string_view const& map, std::string_view const& layer) const + [[nodiscard]] std::shared_ptr resolve(std::string_view const& map, std::string_view const& layer) const { auto mapIt = sources_.find(std::string(map)); if (mapIt == sources_.end()) @@ -51,8 +176,10 @@ struct HttpClient::Impl { } }; -HttpClient::HttpClient(const std::string& host, uint16_t port, httplib::Headers headers, bool enableCompression) : impl_( - std::make_unique(host, port, std::move(headers), enableCompression)) {} +HttpClient::HttpClient(const std::string& host, uint16_t port, AuthHeaders headers, bool enableCompression) + : impl_(std::make_unique(host, port, std::move(headers), enableCompression)) +{ +} HttpClient::~HttpClient() = default; @@ -73,43 +200,75 @@ LayerTilesRequest::Ptr HttpClient::request(const LayerTilesRequest::Ptr& request } auto reader = std::make_unique( - [this](auto&& mapId, auto&& layerId){return impl_->resolve(mapId, layerId);}, + [this](auto&& mapId, auto&& layerId) { return impl_->resolve(mapId, layerId); }, [request](auto&& result) { request->notifyResult(result); }, impl_->stringPoolProvider_); using namespace nlohmann; - // TODO: Currently, cpp-httplib client-POST does not support async responses. - // Those are only supported by GET. So, currently, this HttpClient - // does not profit from the streaming response. However, erdblick is - // is fully able to process async responses as it uses the browser fetch()-API. - auto tileResponse = impl_->client_.Post( - "/tiles", - impl_->headers_, - json::object({ - {"requests", json::array({request->toJson()})}, - {"stringPoolOffsets", reader->stringPoolCache()->stringPoolOffsets()} - }).dump(), - "application/json"); - - if (tileResponse) { - if (tileResponse->status == 200) { - reader->read(tileResponse->body); - } - else if (tileResponse->status == 400) { + auto body = json::object({ + {"requests", json::array({request->toJson()})}, + {"stringPoolOffsets", reader->stringPoolCache()->stringPoolOffsets()}, + }).dump(); + + auto httpReq = drogon::HttpRequest::newHttpRequest(); + httpReq->setMethod(drogon::Post); + httpReq->setPath("/tiles"); + httpReq->setContentTypeCode(drogon::CT_APPLICATION_JSON); + httpReq->addHeader("Accept", "application/binary"); + httpReq->setBody(std::move(body)); + applyHeaders(httpReq, impl_->headers_); + + auto [result, resp] = impl_->client_->sendRequest(httpReq); + if (result == drogon::ReqResult::Ok && resp) { + if (resp->statusCode() == drogon::k200OK) { + try { + auto const layerInfo = impl_->resolve(request->mapId_, request->layerId_); + request->prepareResolvedLayer(layerInfo->type_, layerInfo->stages_); + } + catch (const std::exception& e) { + log().error("Failed to resolve request layer context: {}", e.what()); + request->setStatus(RequestStatus::Aborted); + return request; + } + + auto decodedBody = decodeResponseBody(resp); + if (!decodedBody) { + log().error("HttpClient /tiles decode failed"); + request->setStatus(RequestStatus::Aborted); + return request; + } + + // TODO: Support streamed/chunked tile responses. + // Drogon's `HttpClient` API only provides the full buffered body. + // True streaming would require a custom client built on + // `trantor::TcpClient` (still within the Drogon dependency). + try { + reader->read(*decodedBody); + } + catch (const std::exception& e) { + log().error("Failed to parse /tiles response: {}", e.what()); + request->setStatus(RequestStatus::Aborted); + return request; + } + + if (!request->isDone()) { + // HttpClient performs one fully-buffered request. If parsing did + // not resolve request status by now, no more bytes will arrive. + request->setStatus(RequestStatus::Aborted); + } + } else if (resp->statusCode() == drogon::k400BadRequest) { request->setStatus(RequestStatus::NoDataSource); - } - else if (tileResponse->status == 403) { + } else if (resp->statusCode() == drogon::k403Forbidden) { request->setStatus(RequestStatus::Unauthorized); + } else { + request->setStatus(RequestStatus::Aborted); } - // TODO if multiple LayerTileRequests are ever sent by this client, - // additionally handle RequestStatus::Aborted. - } - else { + } else { request->setStatus(RequestStatus::Aborted); } return request; } -} +} // namespace mapget diff --git a/libs/http-service/src/http-service-impl.cpp b/libs/http-service/src/http-service-impl.cpp new file mode 100644 index 00000000..1f323b31 --- /dev/null +++ b/libs/http-service/src/http-service-impl.cpp @@ -0,0 +1,39 @@ +#include "http-service-impl.h" + +#include "mapget/log.h" + +#ifdef __linux__ +#include +#endif + +namespace mapget +{ + +HttpService::Impl::Impl(HttpService& self, const HttpServiceConfig& config) : self_(self), config_(config) {} + +void HttpService::Impl::tryMemoryTrim(ResponseType responseType) const +{ + uint64_t interval = + (responseType == ResponseType::Binary) ? config_.memoryTrimIntervalBinary : config_.memoryTrimIntervalJson; + + if (interval == 0) { + return; + } + + auto& counter = (responseType == ResponseType::Binary) ? binaryRequestCounter_ : jsonRequestCounter_; + auto count = counter.fetch_add(1, std::memory_order_relaxed); + if ((count % interval) != 0) { + return; + } + +#ifdef __linux__ +#ifndef NDEBUG + const char* typeStr = (responseType == ResponseType::Binary) ? "binary" : "JSON"; + log().debug("Trimming memory after {} {} requests (interval: {})", count, typeStr, interval); +#endif + malloc_trim(0); +#endif +} + +} // namespace mapget + diff --git a/libs/http-service/src/http-service-impl.h b/libs/http-service/src/http-service-impl.h new file mode 100644 index 00000000..88d2d1af --- /dev/null +++ b/libs/http-service/src/http-service-impl.h @@ -0,0 +1,76 @@ +#pragma once + +#include "http-service.h" + +#include +#include + +#include +#include +#include +#include + +namespace mapget +{ + +namespace detail +{ + +[[nodiscard]] inline AuthHeaders authHeadersFromRequest(const drogon::HttpRequestPtr& req) +{ + AuthHeaders headers; + for (auto const& [k, v] : req->headers()) { + headers.emplace(k, v); + } + return headers; +} + +} // namespace detail + +struct HttpService::Impl +{ + HttpService& self_; + HttpServiceConfig config_; + mutable std::atomic binaryRequestCounter_{0}; + mutable std::atomic jsonRequestCounter_{0}; + + explicit Impl(HttpService& self, const HttpServiceConfig& config); + + enum class ResponseType { Binary, Json }; + + void tryMemoryTrim(ResponseType responseType) const; + + struct TilesStreamState; + + void handleTilesRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const; + + void handleSourcesRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const; + + void handleStatusRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const; + + void handleStatusDataRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const; + + void handleLocateRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const; + + static drogon::HttpResponsePtr openConfigFile(std::ifstream& configFile); + + static void handleGetConfigRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback); + + void handlePostConfigRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const; +}; + +} // namespace mapget diff --git a/libs/http-service/src/http-service.cpp b/libs/http-service/src/http-service.cpp index 0137a1b7..bc46f860 100644 --- a/libs/http-service/src/http-service.cpp +++ b/libs/http-service/src/http-service.cpp @@ -1,668 +1,81 @@ -#include "http-service.h" -#include "mapget/log.h" -#include "mapget/service/config.h" +#include "http-service-impl.h" -#include -#include -#include -#include -#include -#include -#include -#include "cli.h" -#include "httplib.h" -#include "nlohmann/json-schema.hpp" -#include "nlohmann/json.hpp" -#include "yaml-cpp/yaml.h" -#include +#include "tiles-ws-controller.h" -#ifdef __linux__ -#include -#endif +#include +#include +#include -namespace mapget -{ - -namespace -{ - -/** - * Simple gzip compressor for streaming compression - */ -class GzipCompressor { -public: - GzipCompressor() { - strm_.zalloc = Z_NULL; - strm_.zfree = Z_NULL; - strm_.opaque = Z_NULL; - // 16+MAX_WBITS enables gzip format (not just deflate) - int ret = deflateInit2(&strm_, Z_DEFAULT_COMPRESSION, Z_DEFLATED, - 16 + MAX_WBITS, 8, Z_DEFAULT_STRATEGY); - if (ret != Z_OK) { - throw std::runtime_error("Failed to initialize gzip compressor"); - } - } - - ~GzipCompressor() { - deflateEnd(&strm_); - } - - std::string compress(const char* data, size_t size, int flush_mode = Z_NO_FLUSH) { - std::string result; - if (size == 0 && flush_mode == Z_NO_FLUSH) { - return result; - } - - strm_.avail_in = static_cast(size); - strm_.next_in = reinterpret_cast(const_cast(data)); - - char outbuf[8192]; - do { - strm_.avail_out = sizeof(outbuf); - strm_.next_out = reinterpret_cast(outbuf); - - int ret = deflate(&strm_, flush_mode); - if (ret == Z_STREAM_ERROR) { - throw std::runtime_error("Gzip compression failed"); - } - - size_t have = sizeof(outbuf) - strm_.avail_out; - result.append(outbuf, have); - } while (strm_.avail_out == 0); - - return result; - } - - std::string finish() { - return compress(nullptr, 0, Z_FINISH); - } +#include -private: - z_stream strm_{}; -}; - -/** - * Recursively convert a YAML node to a JSON object, - * with special handling for sensitive fields. - * The function returns a nlohmann::json object and updates maskedSecretMap. - */ -} // namespace - -struct HttpService::Impl +namespace mapget { - HttpService& self_; - HttpServiceConfig config_; - mutable std::atomic binaryRequestCounter_{0}; - mutable std::atomic jsonRequestCounter_{0}; - - explicit Impl(HttpService& self, const HttpServiceConfig& config) - : self_(self), config_(config) {} - - enum class ResponseType { - Binary, - Json - }; - - void tryMemoryTrim(ResponseType responseType) const { - uint64_t interval = (responseType == ResponseType::Binary) - ? config_.memoryTrimIntervalBinary - : config_.memoryTrimIntervalJson; - - if (interval > 0) { - auto& counter = (responseType == ResponseType::Binary) - ? binaryRequestCounter_ - : jsonRequestCounter_; - - auto count = counter.fetch_add(1, std::memory_order_relaxed); - if ((count % interval) == 0) { -#ifdef __linux__ - // Only log in debug builds to reduce overhead - #ifndef NDEBUG - const char* typeStr = (responseType == ResponseType::Binary) ? "binary" : "JSON"; - log().debug("Trimming memory after {} {} requests (interval: {})", count, typeStr, interval); - #endif - malloc_trim(0); -#endif - // On non-Linux platforms, this is a no-op but we still track the counter - } - } - } - - // Use a shared buffer for the responses and a mutex for thread safety. - struct HttpTilesRequestState - { - static constexpr auto binaryMimeType = "application/binary"; - static constexpr auto jsonlMimeType = "application/jsonl"; - static constexpr auto anyMimeType = "*/*"; - - std::mutex mutex_; - std::condition_variable resultEvent_; - - uint64_t requestId_; - std::stringstream buffer_; - std::string responseType_; - std::unique_ptr writer_; - std::vector requests_; - TileLayerStream::StringPoolOffsetMap stringOffsets_; - std::unique_ptr compressor_; // Store compressor per request - - HttpTilesRequestState() - { - static std::atomic_uint64_t nextRequestId; - writer_ = std::make_unique( - [&, this](auto&& msg, auto&& msgType) { buffer_ << msg; }, - stringOffsets_); - requestId_ = nextRequestId++; - } - - void parseRequestFromJson(nlohmann::json const& requestJson) - { - std::string mapId = requestJson["mapId"]; - std::string layerId = requestJson["layerId"]; - std::vector tileIds; - tileIds.reserve(requestJson["tileIds"].size()); - for (auto const& tid : requestJson["tileIds"].get>()) - tileIds.emplace_back(tid); - requests_ - .push_back(std::make_shared(mapId, layerId, std::move(tileIds))); - } - - void setResponseType(std::string const& s) - { - responseType_ = s; - if (responseType_ == HttpTilesRequestState::binaryMimeType) - return; - if (responseType_ == HttpTilesRequestState::jsonlMimeType) - return; - if (responseType_ == HttpTilesRequestState::anyMimeType) { - responseType_ = binaryMimeType; - return; - } - raise(fmt::format("Unknown Accept-Header value {}", responseType_)); - } - - void addResult(TileLayer::Ptr const& result) - { - std::unique_lock lock(mutex_); - log().debug("Response ready: {}", MapTileKey(*result).toString()); - if (responseType_ == binaryMimeType) { - // Binary response - writer_->write(result); - } - else { - // JSON response - optimize with compact dump settings - // TODO: Implement direct streaming with result->writeGeoJsonTo(buffer_) - // to avoid intermediate JSON object creation entirely - auto json = result->toJson(); - // Use compact dump: no indentation, no spaces, ignore errors - // This reduces string allocation overhead - buffer_ << json.dump(-1, ' ', false, nlohmann::json::error_handler_t::ignore) << "\n"; - } - resultEvent_.notify_one(); - } - }; - - mutable std::mutex clientRequestMapMutex_; - mutable std::unordered_map> requestStatePerClientId_; - - void abortRequestsForClientId(std::string clientId, std::shared_ptr newState = nullptr) const - { - std::unique_lock clientRequestMapAccess(clientRequestMapMutex_); - auto clientRequestIt = requestStatePerClientId_.find(clientId); - if (clientRequestIt != requestStatePerClientId_.end()) { - // Ensure that any previous requests from the same clientId - // are finished post-haste! - bool anySoftAbort = false; - for (auto const& req : clientRequestIt->second->requests_) { - if (!req->isDone()) { - self_.abort(req); - anySoftAbort = true; - } - } - if (anySoftAbort) - log().warn("Soft-aborting tiles request {}", clientRequestIt->second->requestId_); - requestStatePerClientId_.erase(clientRequestIt); - } - if (newState) { - requestStatePerClientId_.emplace(clientId, newState); - } - } - - /** - * Wraps around the generic mapget service's request() function - * to include httplib request decoding and response encoding. - */ - void handleTilesRequest(const httplib::Request& req, httplib::Response& res) const - { - // Parse the JSON request. - nlohmann::json j = nlohmann::json::parse(req.body); - auto requestsJson = j["requests"]; - - // TODO: Limit number of requests to avoid DoS to other users. - // Within one HTTP request, all requested tiles from the same map+layer - // combination should be in a single LayerTilesRequest. - auto state = std::make_shared(); - log().info("Processing tiles request {}", state->requestId_); - for (auto& requestJson : requestsJson) { - state->parseRequestFromJson(requestJson); - } - - // Parse stringPoolOffsets. - if (j.contains("stringPoolOffsets")) { - for (auto& item : j["stringPoolOffsets"].items()) { - state->stringOffsets_[item.key()] = item.value().get(); - } - } - - // Determine response type. - state->setResponseType(req.get_header_value("Accept")); - - // Process requests. - for (auto& request : state->requests_) { - request->onFeatureLayer([state](auto&& layer) { state->addResult(layer); }); - request->onSourceDataLayer([state](auto&& layer) { state->addResult(layer); }); - request->onDone_ = [state](RequestStatus r) - { - state->resultEvent_.notify_one(); - }; - } - auto canProcess = self_.request( - state->requests_, - AuthHeaders{req.headers.begin(), req.headers.end()}); - - if (!canProcess) { - // Send a status report detailing for each request - // whether its data source is unavailable or it was aborted. - res.status = 400; - std::vector> requestStatuses{}; - for (const auto& r : state->requests_) { - requestStatuses.push_back(static_cast>(r->getStatus())); - if (r->getStatus() == RequestStatus::Unauthorized) { - res.status = 403; // Forbidden. - } - } - res.set_content( - nlohmann::json::object({{"requestStatuses", requestStatuses}}).dump(), - "application/json"); - return; - } - - // Parse/Process clientId. - if (j.contains("clientId")) { - auto clientId = j["clientId"].get(); - abortRequestsForClientId(clientId, state); - } - - // Check if client accepts gzip compression - bool enableGzip = false; - if (req.has_header("Accept-Encoding")) { - std::string acceptEncoding = req.get_header_value("Accept-Encoding"); - enableGzip = acceptEncoding.find("gzip") != std::string::npos; - log().debug("Accept-Encoding header: '{}', enableGzip: {}", acceptEncoding, enableGzip); - } else { - log().debug("No Accept-Encoding header present"); - } - - // Set Content-Encoding header if compression is enabled - if (enableGzip) { - res.set_header("Content-Encoding", "gzip"); - state->compressor_ = std::make_unique(); - log().debug("Set Content-Encoding: gzip header"); - } - - // For efficiency, set up httplib to stream tile layer responses to client: - // (1) Lambda continuously supplies response data to httplib's DataSink, - // picking up data from state->buffer_ until all tile requests are done. - // Then, signal sink->done() to close the stream with a 200 status. - // Using chunked transfer encoding with optional manual compression. - // (2) Lambda acts as a cleanup routine, triggered by httplib upon request wrap-up. - // The success flag indicates if wrap-up was due to sink->done() or external factors - // like network errors or request aborts in lengthy tile requests (e.g., map-viewer). - res.set_chunked_content_provider( - state->responseType_, - [state](size_t offset, httplib::DataSink& sink) - { - std::unique_lock lock(state->mutex_); - - // Wait until there is data to be read. - std::string strBuf; - bool allDone = false; - state->resultEvent_.wait( - lock, - [&] - { - allDone = std::all_of( - state->requests_.begin(), - state->requests_.end(), - [](const auto& r) { return r->isDone(); }); - if (allDone && state->responseType_ == HttpTilesRequestState::binaryMimeType) - state->writer_->sendEndOfStream(); - strBuf = state->buffer_.str(); - return !strBuf.empty() || allDone; - }); - - if (!strBuf.empty()) { - // Compress data if gzip is enabled - if (state->compressor_) { - std::string compressed = state->compressor_->compress(strBuf.data(), strBuf.size()); - if (!compressed.empty()) { - log().debug("Compressing: {} bytes -> {} bytes (request {})", - strBuf.size(), compressed.size(), state->requestId_); - sink.write(compressed.data(), compressed.size()); - } - } else { - log().debug("Streaming {} bytes (no compression)...", strBuf.size()); - sink.write(strBuf.data(), strBuf.size()); - } - sink.os.flush(); - state->buffer_.str(""); // Clear buffer content - state->buffer_.clear(); // Clear error flags - // Force release of internal buffer memory - std::stringstream().swap(state->buffer_); - } - - // Call sink.done() when all requests are done. - if (allDone) { - // Finish compression if enabled - if (state->compressor_) { - std::string finalChunk = state->compressor_->finish(); - log().debug( - "Final compression chunk is {} bytes.", - strBuf.size(), finalChunk.size(), state->requestId_); - if (!finalChunk.empty()) { - sink.write(finalChunk.data(), finalChunk.size()); - } - } - sink.done(); - } - - return true; - }, - // Network error/timeout of request to datasource: - // cleanup callback to abort the requests. - [state, this](bool success) - { - if (!success) { - log().warn("Aborting tiles request {}", state->requestId_); - for (auto& request : state->requests_) { - self_.abort(request); - } - } - else { - log().info("Tiles request {} was successful.", state->requestId_); - // Determine response type and trim accordingly - ResponseType respType = (state->responseType_ == HttpTilesRequestState::binaryMimeType) - ? ResponseType::Binary - : ResponseType::Json; - tryMemoryTrim(respType); - } - }); - } - - void handleAbortRequest(const httplib::Request& req, httplib::Response& res) const - { - // Parse the JSON request. - nlohmann::json j = nlohmann::json::parse(req.body); - if (j.contains("clientId")) { - auto const clientId = j["clientId"].get(); - abortRequestsForClientId(clientId); - } - else { - res.status = 400; - res.set_content("Missing clientId", "text/plain"); - } - } - - void handleSourcesRequest(const httplib::Request& req, httplib::Response& res) const - { - auto sourcesInfo = nlohmann::json::array(); - for (auto& source : self_.info(AuthHeaders{req.headers.begin(), req.headers.end()})) { - sourcesInfo.push_back(source.toJson()); - } - res.set_content(sourcesInfo.dump(), "application/json"); - } - - void handleStatusRequest(const httplib::Request&, httplib::Response& res) const - { - auto serviceStats = self_.getStatistics(); - auto cacheStats = self_.cache()->getStatistics(); - - std::ostringstream oss; - oss << ""; - oss << "

Status Information

"; - - // Output serviceStats - oss << "

Service Statistics

"; - oss << "
" << serviceStats.dump(4) << "
"; // Indentation of 4 for pretty printing - - // Output cacheStats - oss << "

Cache Statistics

"; - oss << "
" << cacheStats.dump(4) << "
"; // Indentation of 4 for pretty printing - - oss << ""; - res.set_content(oss.str(), "text/html"); - } - - void handleLocateRequest(const httplib::Request& req, httplib::Response& res) const - { - // Parse the JSON request. - nlohmann::json j = nlohmann::json::parse(req.body); - auto requestsJson = j["requests"]; - auto allResponsesJson = nlohmann::json::array(); - - for (auto const& locateReqJson : requestsJson) { - LocateRequest locateReq{locateReqJson}; - auto responsesJson = nlohmann::json::array(); - for (auto const& resp : self_.locate(locateReq)) - responsesJson.emplace_back(resp.serialize()); - allResponsesJson.emplace_back(responsesJson); - } - - res.set_content( - nlohmann::json::object({{"responses", allResponsesJson}}).dump(), - "application/json"); - } - - static bool openConfigFile(std::ifstream& configFile, httplib::Response& res) - { - auto configFilePath = DataSourceConfigService::get().getConfigFilePath(); - if (!configFilePath.has_value()) { - res.status = 404; // Not found. - res.set_content( - "The config file path is not set. Check the server configuration.", - "text/plain"); - return false; - } - - std::filesystem::path path = *configFilePath; - if (!configFilePath || !std::filesystem::exists(path)) { - res.status = 404; // Not found. - res.set_content("The server does not have a config file.", "text/plain"); - return false; - } - - configFile.open(*configFilePath); - if (!configFile) { - res.status = 500; // Internal Server Error. - res.set_content("Failed to open config file.", "text/plain"); - return false; - } - - return true; - } - - static void handleGetConfigRequest(const httplib::Request& req, httplib::Response& res) - { - if (!isGetConfigEndpointEnabled()) { - res.status = 403; // Forbidden. - res.set_content( - "The GET /config endpoint is disabled by the server administrator.", - "text/plain"); - return; - } - - std::ifstream configFile; - if (!openConfigFile(configFile, res)) { - return; - } - nlohmann::json jsonSchema = DataSourceConfigService::get().getDataSourceConfigSchema(); - - try { - // Load config YAML, expose the parts which clients may edit. - YAML::Node configYaml = YAML::Load(configFile); - nlohmann::json jsonConfig; - std::unordered_map maskedSecretMap; - for (const auto& key : DataSourceConfigService::get().topLevelDataSourceConfigKeys()) { - if (auto configYamlEntry = configYaml[key]) - jsonConfig[key] = yamlToJson(configYaml[key], true, &maskedSecretMap); - } - - nlohmann::json combinedJson; - combinedJson["schema"] = jsonSchema; - combinedJson["model"] = jsonConfig; - combinedJson["readOnly"] = !isPostConfigEndpointEnabled(); - - // Set the response - res.status = 200; // OK - res.set_content(combinedJson.dump(2), "application/json"); - } - catch (const std::exception& e) { - res.status = 500; // Internal Server Error - res.set_content("Error processing config file: " + std::string(e.what()), "text/plain"); - } - } - - static void handlePostConfigRequest(const httplib::Request& req, httplib::Response& res) - { - if (!isPostConfigEndpointEnabled()) { - res.status = 403; // Forbidden. - res.set_content( - "The POST /config endpoint is not enabled by the server administrator.", - "text/plain"); - return; - } - - std::mutex mtx; - std::condition_variable cv; - bool update_done = false; - - std::ifstream configFile; - if (!openConfigFile(configFile, res)) { - return; - } - - // Subscribe to configuration changes. - auto subscription = DataSourceConfigService::get().subscribe( - [&](const std::vector& serviceConfigNodes) - { - std::lock_guard lock(mtx); - res.status = 200; - res.set_content("Configuration updated and applied successfully.", "text/plain"); - update_done = true; - cv.notify_one(); - }, - [&](const std::string& error) - { - std::lock_guard lock(mtx); - res.status = 500; - res.set_content("Error applying the configuration: " + error, "text/plain"); - update_done = true; - cv.notify_one(); - }); - - // Parse the JSON from the request body. - nlohmann::json jsonConfig; - try { - jsonConfig = nlohmann::json::parse(req.body); - } - catch (const nlohmann::json::parse_error& e) { - res.status = 400; // Bad Request - res.set_content("Invalid JSON format: " + std::string(e.what()), "text/plain"); - return; - } - - // Validate JSON against schema. - try { - DataSourceConfigService::get().validateDataSourceConfig(jsonConfig); - } - catch (const std::exception& e) { - res.status = 500; // Internal Server Error. - res.set_content("Validation failed: " + std::string(e.what()), "text/plain"); - return; - } - - // Load the YAML, parse the secrets. - auto yamlConfig = YAML::Load(configFile); - std::unordered_map maskedSecrets; - yamlToJson(yamlConfig, true, &maskedSecrets); - - // Create YAML nodes from JSON nodes. - for (auto const& key : DataSourceConfigService::get().topLevelDataSourceConfigKeys()) { - if (jsonConfig.contains(key)) - yamlConfig[key] = jsonToYaml(jsonConfig[key], maskedSecrets); - } - - // Write the YAML to configFilePath. - update_done = false; - configFile.close(); - log().trace("Writing new config."); - std::ofstream newConfigFile(*DataSourceConfigService::get().getConfigFilePath()); - newConfigFile << yamlConfig; - newConfigFile.close(); - - // Wait for the subscription callback. - std::unique_lock lk(mtx); - if (!cv.wait_for(lk, std::chrono::seconds(60), [&] { return update_done; })) { - res.status = 500; // Internal Server Error. - res.set_content("Timeout while waiting for config to update.", "text/plain"); - } - } -}; HttpService::HttpService(Cache::Ptr cache, const HttpServiceConfig& config) - : Service(std::move(cache), config.watchConfig, config.defaultTtl), - impl_(std::make_unique(*this, config)) + : Service(std::move(cache), config.watchConfig, config.defaultTtl), impl_(std::make_unique(*this, config)) { } HttpService::~HttpService() = default; -void HttpService::setup(httplib::Server& server) +void HttpService::setup(drogon::HttpAppFramework& app) { - server.Post( - "/tiles", - [&](const httplib::Request& req, httplib::Response& res) - { impl_->handleTilesRequest(req, res); }); + detail::registerTilesWebSocketController(app, *this); - server.Post( - "/abort", - [&](const httplib::Request& req, httplib::Response& res) - { impl_->handleAbortRequest(req, res); }); + app.registerHandler( + "/tiles", + [this](const drogon::HttpRequestPtr& req, std::function&& callback) { + impl_->handleTilesRequest(req, std::move(callback)); + }, + {drogon::Post}); - server.Get( + app.registerHandler( "/sources", - [this](const httplib::Request& req, httplib::Response& res) - { impl_->handleSourcesRequest(req, res); }); + [this](const drogon::HttpRequestPtr& req, std::function&& callback) { + impl_->handleSourcesRequest(req, std::move(callback)); + }, + {drogon::Get}); - server.Get( + app.registerHandler( "/status", - [this](const httplib::Request& req, httplib::Response& res) - { impl_->handleStatusRequest(req, res); }); - - server.Post( + [this](const drogon::HttpRequestPtr& req, std::function&& callback) { + impl_->handleStatusRequest(req, std::move(callback)); + }, + {drogon::Get}); + + app.registerHandler( + "/status-data", + [this](const drogon::HttpRequestPtr& req, std::function&& callback) { + impl_->handleStatusDataRequest(req, std::move(callback)); + }, + {drogon::Get}); + + app.registerHandler( "/locate", - [this](const httplib::Request& req, httplib::Response& res) - { impl_->handleLocateRequest(req, res); }); + [this](const drogon::HttpRequestPtr& req, std::function&& callback) { + impl_->handleLocateRequest(req, std::move(callback)); + }, + {drogon::Post}); - server.Get( + app.registerHandler( "/config", - [this](const httplib::Request& req, httplib::Response& res) - { impl_->handleGetConfigRequest(req, res); }); + [this](const drogon::HttpRequestPtr& req, std::function&& callback) { + if (req->method() == drogon::Get) { + Impl::handleGetConfigRequest(req, std::move(callback)); + return; + } + if (req->method() == drogon::Post) { + impl_->handlePostConfigRequest(req, std::move(callback)); + return; + } - server.Post( - "/config", - [this](const httplib::Request& req, httplib::Response& res) - { impl_->handlePostConfigRequest(req, res); }); + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k405MethodNotAllowed); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("Method not allowed"); + callback(resp); + }, + {drogon::Get, drogon::Post}); } } // namespace mapget diff --git a/libs/http-service/src/locate-handler.cpp b/libs/http-service/src/locate-handler.cpp new file mode 100644 index 00000000..81ad9ea5 --- /dev/null +++ b/libs/http-service/src/locate-handler.cpp @@ -0,0 +1,43 @@ +#include "http-service-impl.h" + +#include + +#include "nlohmann/json.hpp" + +namespace mapget +{ + +void HttpService::Impl::handleLocateRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const +{ + try { + nlohmann::json j = nlohmann::json::parse(std::string(req->body())); + auto requestsJson = j["requests"]; + auto allResponsesJson = nlohmann::json::array(); + + for (auto const& locateReqJson : requestsJson) { + LocateRequest locateReq{locateReqJson}; + auto responsesJson = nlohmann::json::array(); + for (auto const& resp : self_.locate(locateReq)) + responsesJson.emplace_back(resp.serialize()); + allResponsesJson.emplace_back(responsesJson); + } + + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(nlohmann::json::object({{"responses", allResponsesJson}}).dump()); + callback(resp); + } + catch (const std::exception& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Invalid JSON: ") + e.what()); + callback(resp); + } +} + +} // namespace mapget + diff --git a/libs/http-service/src/sources-handler.cpp b/libs/http-service/src/sources-handler.cpp new file mode 100644 index 00000000..d085ac3f --- /dev/null +++ b/libs/http-service/src/sources-handler.cpp @@ -0,0 +1,27 @@ +#include "http-service-impl.h" + +#include + +#include "nlohmann/json.hpp" + +namespace mapget +{ + +void HttpService::Impl::handleSourcesRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const +{ + auto sourcesInfo = nlohmann::json::array(); + for (auto& source : self_.info(detail::authHeadersFromRequest(req))) { + sourcesInfo.push_back(source.toJson()); + } + + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(sourcesInfo.dump()); + callback(resp); +} + +} // namespace mapget + diff --git a/libs/http-service/src/status-handler.cpp b/libs/http-service/src/status-handler.cpp new file mode 100644 index 00000000..291bdfce --- /dev/null +++ b/libs/http-service/src/status-handler.cpp @@ -0,0 +1,543 @@ +#include "http-service-impl.h" + +#include "tiles-ws-controller.h" + +#include + +#include +#include +#include + +namespace mapget +{ +namespace +{ + +[[nodiscard]] bool parseBoolParameter(const drogon::HttpRequestPtr& req, std::string_view key, bool defaultValue = false) +{ + const std::string raw = req->getParameter(std::string(key)); + if (raw.empty()) + return defaultValue; + + if (raw == "1" || raw == "true" || raw == "TRUE" || raw == "yes" || raw == "on") { + return true; + } + if (raw == "0" || raw == "false" || raw == "FALSE" || raw == "no" || raw == "off") { + return false; + } + return defaultValue; +} + +[[nodiscard]] std::string statusPageHtml() +{ + return R"HTML( + + + + + +mapget status + + + +

Status Information

+
+ + + + Never updated + +
+ +

Tiles WebSocket Metrics

+
+ + + +
MetricValue
+
+ `pending-controller-*` covers tile frames currently queued for `/tiles/next` pulls. + `pending-pull-requests` counts currently blocked long-poll pull requests. + `total-forwarded-*` counts tile frames/bytes already served through `/tiles/next`. +
+
+ + + +

Service Statistics

+
+ +

Cache Statistics

+
+ +
+

Tile Size Distribution

+
+ + + +
MetricValue
+
+
+ + + +
BinCountShare
+
+
+ + + + +)HTML"; +} + +} // namespace + +void HttpService::Impl::handleStatusDataRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const +{ + const bool includeTileSizeDistribution = + parseBoolParameter(req, "includeTileSizeDistribution", false); + const bool includeCachedFeatureTreeBytes = + parseBoolParameter(req, "includeCachedFeatureTreeBytes", true); + + const auto payload = nlohmann::json::object({ + {"timestampMs", + std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count()}, + {"service", self_.getStatistics(includeCachedFeatureTreeBytes, includeTileSizeDistribution)}, + {"cache", self_.cache()->getStatistics()}, + {"tilesWebsocket", detail::tilesWebSocketMetricsSnapshot()}, + }); + + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(payload.dump()); + callback(resp); +} + +void HttpService::Impl::handleStatusRequest( + const drogon::HttpRequestPtr& /*req*/, + std::function&& callback) const +{ + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_TEXT_HTML); + resp->setBody(statusPageHtml()); + callback(resp); +} + +} // namespace mapget diff --git a/libs/http-service/src/tiles-http-handler.cpp b/libs/http-service/src/tiles-http-handler.cpp new file mode 100644 index 00000000..fe65efc0 --- /dev/null +++ b/libs/http-service/src/tiles-http-handler.cpp @@ -0,0 +1,438 @@ +#include "http-service-impl.h" +#include "tiles-request-json.h" + +#include "mapget/log.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nlohmann/json.hpp" + +#include + +namespace mapget +{ +namespace +{ + +class GzipCompressor +{ +public: + GzipCompressor() + { + strm_.zalloc = Z_NULL; + strm_.zfree = Z_NULL; + strm_.opaque = Z_NULL; + // 16+MAX_WBITS enables gzip format (not just deflate) + int ret = deflateInit2( + &strm_, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 16 + MAX_WBITS, 8, Z_DEFAULT_STRATEGY); + if (ret != Z_OK) { + throw std::runtime_error("Failed to initialize gzip compressor"); + } + } + + ~GzipCompressor() { deflateEnd(&strm_); } + + GzipCompressor(GzipCompressor const&) = delete; + GzipCompressor(GzipCompressor&&) = delete; + + std::string compress(const char* data, size_t size, int flush_mode = Z_NO_FLUSH) + { + std::string result; + if (size == 0 && flush_mode == Z_NO_FLUSH) { + return result; + } + + strm_.avail_in = static_cast(size); + strm_.next_in = reinterpret_cast(const_cast(data)); + + char outbuf[8192]; + do { + strm_.avail_out = sizeof(outbuf); + strm_.next_out = reinterpret_cast(outbuf); + + int ret = deflate(&strm_, flush_mode); + if (ret == Z_STREAM_ERROR) { + throw std::runtime_error("Gzip compression failed"); + } + + size_t have = sizeof(outbuf) - strm_.avail_out; + result.append(outbuf, have); + } while (strm_.avail_out == 0); + + return result; + } + + std::string finish() { return compress(nullptr, 0, Z_FINISH); } + +private: + z_stream strm_{}; +}; + +[[nodiscard]] bool containsGzip(std::string_view acceptEncoding) +{ + return !acceptEncoding.empty() && acceptEncoding.find("gzip") != std::string_view::npos; +} + +} // namespace + +struct HttpService::Impl::TilesStreamState : std::enable_shared_from_this +{ + static constexpr auto binaryMimeType = "application/binary"; + static constexpr auto jsonlMimeType = "application/jsonl"; + static constexpr auto anyMimeType = "*/*"; + + explicit TilesStreamState(Impl const& impl, trantor::EventLoop* loop) : impl_(impl), loop_(loop) + { + static std::atomic_uint64_t nextRequestId; + requestId_ = nextRequestId++; + writer_ = std::make_unique( + [this](auto&& msg, auto&& /*msgType*/) { appendOutgoingUnlocked(msg); }, stringOffsets_); + } + + void attachStream(drogon::ResponseStreamPtr stream) + { + { + std::lock_guard lock(mutex_); + if (aborted_ || responseEnded_) { + if (stream) + stream->close(); + return; + } + stream_ = std::move(stream); + } + scheduleDrain(); + } + + void parseRequestFromJson(nlohmann::json const& requestJson) + { + auto parsed = detail::parseLayerTilesRequestJson(requestJson); + if (parsed.usesStageBuckets) { + requests_.push_back(std::make_shared( + std::move(parsed.mapId), + std::move(parsed.layerId), + std::move(parsed.tileIdsByNextStage))); + } else { + auto tileIds = parsed.tileIdsByNextStage.empty() + ? std::vector{} + : std::move(parsed.tileIdsByNextStage.front()); + requests_.push_back(std::make_shared( + std::move(parsed.mapId), + std::move(parsed.layerId), + std::move(tileIds))); + } + } + + [[nodiscard]] bool setResponseTypeFromAccept(std::string_view acceptHeader, std::string& error) + { + responseType_ = std::string(acceptHeader); + if (responseType_.empty()) + responseType_ = anyMimeType; + if (responseType_ == anyMimeType) + responseType_ = binaryMimeType; + + if (responseType_ == binaryMimeType) { + trimResponseType_ = HttpService::Impl::ResponseType::Binary; + return true; + } + if (responseType_ == jsonlMimeType) { + trimResponseType_ = HttpService::Impl::ResponseType::Json; + return true; + } + + error = "Unknown Accept header value: " + responseType_; + return false; + } + + void enableGzip() { compressor_ = std::make_unique(); } + + void onAborted() + { + if (aborted_.exchange(true)) + return; + for (auto const& req : requests_) { + if (!req->isDone()) { + impl_.self_.abort(req); + } + } + drogon::ResponseStreamPtr stream; + { + std::lock_guard lock(mutex_); + if (responseEnded_.exchange(true)) + return; + stream = std::move(stream_); + } + if (stream) + stream->close(); + } + + void addResult(TileLayer::Ptr const& result) + { + { + std::lock_guard lock(mutex_); + if (aborted_) + return; + + log().debug("Response ready: {}", MapTileKey(*result).toString()); + if (responseType_ == binaryMimeType) { + writer_->write(result); + } else { + auto dumped = result->toJson().dump(-1, ' ', false, nlohmann::json::error_handler_t::ignore); + appendOutgoingUnlocked(dumped); + appendOutgoingUnlocked("\n"); + } + } + scheduleDrain(); + } + + void onRequestDone() + { + { + std::lock_guard lock(mutex_); + if (aborted_) + return; + + bool allDoneNow = + std::all_of(requests_.begin(), requests_.end(), [](auto const& r) { return r->isDone(); }); + + if (allDoneNow && !allDone_) { + allDone_ = true; + if (responseType_ == binaryMimeType && !endOfStreamSent_) { + writer_->sendEndOfStream(); + endOfStreamSent_ = true; + } + } + } + scheduleDrain(); + } + + void scheduleDrain() + { + if (aborted_ || responseEnded_) + return; + if (drainScheduled_.exchange(true)) + return; + + auto weak = weak_from_this(); + loop_->queueInLoop([weak = std::move(weak)]() mutable { + if (auto self = weak.lock()) { + self->drainOnLoop(); + } + }); + } + + void drainOnLoop() + { + drainScheduled_ = false; + if (aborted_ || responseEnded_) + return; + + constexpr size_t maxChunk = 64 * 1024; + + for (;;) { + std::string chunk; + bool done = false; + bool needAbort = false; + bool scheduleAgain = false; + drogon::ResponseStreamPtr streamToClose; + { + std::lock_guard lock(mutex_); + if (!stream_) + return; + + if (!pending_.empty()) { + size_t n = std::min(pending_.size(), maxChunk); + chunk.assign(pending_.data(), n); + pending_.erase(0, n); + } else { + if (allDone_ && compressor_ && !compressionFinished_) { + pending_.append(compressor_->finish()); + compressionFinished_ = true; + continue; + } + done = allDone_; + } + + if (!chunk.empty()) { + if (!stream_->send(chunk)) { + needAbort = true; + } else if (!pending_.empty() || allDone_) { + scheduleAgain = true; + } + } else if (done) { + responseEnded_ = true; + streamToClose = std::move(stream_); + } + } + + if (needAbort) { + onAborted(); + return; + } + + if (done) { + if (streamToClose) + streamToClose->close(); + impl_.tryMemoryTrim(trimResponseType_); + return; + } + if (scheduleAgain) + scheduleDrain(); + return; + } + } + + void appendOutgoingUnlocked(std::string_view bytes) + { + if (bytes.empty()) + return; + + if (compressor_) { + pending_.append(compressor_->compress(bytes.data(), bytes.size())); + } else { + pending_.append(bytes); + } + } + + Impl const& impl_; + trantor::EventLoop* loop_; + + std::mutex mutex_; + uint64_t requestId_ = 0; + + std::string responseType_; + HttpService::Impl::ResponseType trimResponseType_ = HttpService::Impl::ResponseType::Binary; + + std::string pending_; + drogon::ResponseStreamPtr stream_; + std::unique_ptr writer_; + std::vector requests_; + TileLayerStream::StringPoolOffsetMap stringOffsets_; + + std::unique_ptr compressor_; + bool compressionFinished_ = false; + bool endOfStreamSent_ = false; + bool allDone_ = false; + + std::atomic_bool aborted_{false}; + std::atomic_bool drainScheduled_{false}; + std::atomic_bool responseEnded_{false}; +}; + +void HttpService::Impl::handleTilesRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) const +{ + auto state = std::make_shared(*this, drogon::app().getLoop()); + + const std::string accept = req->getHeader("accept"); + const std::string acceptEncoding = req->getHeader("accept-encoding"); + auto clientHeaders = detail::authHeadersFromRequest(req); + + nlohmann::json j; + try { + j = nlohmann::json::parse(std::string(req->body())); + } + catch (const std::exception& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Invalid JSON: ") + e.what()); + callback(resp); + return; + } + + auto requestsIt = j.find("requests"); + if (requestsIt == j.end() || !requestsIt->is_array()) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("Missing or invalid 'requests' array"); + callback(resp); + return; + } + + log().info("Processing tiles request {}", state->requestId_); + try { + for (auto& requestJson : *requestsIt) { + state->parseRequestFromJson(requestJson); + } + } + catch (const std::exception& e) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::string("Invalid request JSON: ") + e.what()); + callback(resp); + return; + } + + if (j.contains("stringPoolOffsets")) { + for (auto& item : j["stringPoolOffsets"].items()) { + state->stringOffsets_[item.key()] = item.value().get(); + } + } + + std::string acceptError; + if (!state->setResponseTypeFromAccept(accept, acceptError)) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody(std::move(acceptError)); + callback(resp); + return; + } + + const bool gzip = containsGzip(acceptEncoding); + if (gzip) { + state->enableGzip(); + } + + for (auto& request : state->requests_) { + request->onFeatureLayer([state](auto&& layer) { state->addResult(layer); }); + request->onSourceDataLayer([state](auto&& layer) { state->addResult(layer); }); + request->onDone_ = [state](RequestStatus) { state->onRequestDone(); }; + } + + const auto canProcess = self_.request(state->requests_, clientHeaders); + if (!canProcess) { + std::vector> requestStatuses{}; + bool anyUnauthorized = false; + for (auto const& r : state->requests_) { + auto status = r->getStatus(); + requestStatuses.emplace_back(static_cast>(status)); + anyUnauthorized |= (status == RequestStatus::Unauthorized); + } + + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(anyUnauthorized ? drogon::k403Forbidden : drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_APPLICATION_JSON); + resp->setBody(nlohmann::json::object({{"status", requestStatuses}}).dump()); + callback(resp); + return; + } + + auto resp = drogon::HttpResponse::newAsyncStreamResponse( + [state](drogon::ResponseStreamPtr stream) { state->attachStream(std::move(stream)); }, + true); + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeString(state->responseType_); + if (gzip) { + resp->addHeader("Content-Encoding", "gzip"); + } + callback(resp); +} + +} // namespace mapget diff --git a/libs/http-service/src/tiles-request-json.cpp b/libs/http-service/src/tiles-request-json.cpp new file mode 100644 index 00000000..31143510 --- /dev/null +++ b/libs/http-service/src/tiles-request-json.cpp @@ -0,0 +1,96 @@ +#include "tiles-request-json.h" + +#include +#include +#include + +namespace mapget::detail +{ + +ParsedLayerTilesRequest parseLayerTilesRequestJson(const nlohmann::json& requestJson) +{ + ParsedLayerTilesRequest result; + result.mapId = requestJson.at("mapId").get(); + result.layerId = requestJson.at("layerId").get(); + + if (auto stagedIt = requestJson.find("tileIdsByNextStage"); + stagedIt != requestJson.end()) + { + result.usesStageBuckets = true; + if (!stagedIt->is_array()) { + throw std::runtime_error("tileIdsByNextStage must be an array"); + } + result.tileIdsByNextStage.reserve(stagedIt->size()); + for (auto const& bucketJson : *stagedIt) { + if (!bucketJson.is_array()) { + throw std::runtime_error("tileIdsByNextStage entries must be arrays"); + } + std::vector bucket; + bucket.reserve(bucketJson.size()); + for (auto const& tileIdJson : bucketJson) { + bucket.emplace_back(tileIdJson.get()); + } + result.tileIdsByNextStage.push_back(std::move(bucket)); + } + return result; + } + + auto const& tileIdsJson = requestJson.at("tileIds"); + if (!tileIdsJson.is_array()) { + throw std::runtime_error("tileIds must be an array"); + } + + std::vector tileIds; + tileIds.reserve(tileIdsJson.size()); + for (auto const& tileIdJson : tileIdsJson) { + tileIds.emplace_back(tileIdJson.get()); + } + result.tileIdsByNextStage.push_back(std::move(tileIds)); + return result; +} + +std::vector expandLayerTilesRequestKeys( + const ParsedLayerTilesRequest& request, + LayerType layerType, + uint32_t stageCount) +{ + std::vector result; + std::set seen; + + if (!request.usesStageBuckets) { + if (!request.tileIdsByNextStage.empty()) { + for (auto const& tileId : request.tileIdsByNextStage.front()) { + MapTileKey key( + layerType, + request.mapId, + request.layerId, + tileId, + UnspecifiedStage); + if (seen.insert(key).second) { + result.push_back(std::move(key)); + } + } + } + return result; + } + + auto const normalizedStageCount = std::max(1U, stageCount); + for (uint32_t stage = 0; stage < normalizedStageCount; ++stage) { + for (size_t bucketIndex = 0; bucketIndex < request.tileIdsByNextStage.size(); ++bucketIndex) { + auto const nextMissingStage = static_cast(bucketIndex); + if (nextMissingStage > stage || nextMissingStage >= normalizedStageCount) { + continue; + } + for (auto const& tileId : request.tileIdsByNextStage[bucketIndex]) { + MapTileKey key(layerType, request.mapId, request.layerId, tileId, stage); + if (seen.insert(key).second) { + result.push_back(std::move(key)); + } + } + } + } + + return result; +} + +} // namespace mapget::detail diff --git a/libs/http-service/src/tiles-request-json.h b/libs/http-service/src/tiles-request-json.h new file mode 100644 index 00000000..d82b74f4 --- /dev/null +++ b/libs/http-service/src/tiles-request-json.h @@ -0,0 +1,28 @@ +#pragma once + +#include "mapget/model/layer.h" + +#include +#include + +#include "nlohmann/json.hpp" + +namespace mapget::detail +{ + +struct ParsedLayerTilesRequest +{ + std::string mapId; + std::string layerId; + std::vector> tileIdsByNextStage; + bool usesStageBuckets = false; +}; + +ParsedLayerTilesRequest parseLayerTilesRequestJson(const nlohmann::json& requestJson); + +std::vector expandLayerTilesRequestKeys( + const ParsedLayerTilesRequest& request, + LayerType layerType, + uint32_t stageCount); + +} // namespace mapget::detail diff --git a/libs/http-service/src/tiles-ws-controller.cpp b/libs/http-service/src/tiles-ws-controller.cpp new file mode 100644 index 00000000..c402f741 --- /dev/null +++ b/libs/http-service/src/tiles-ws-controller.cpp @@ -0,0 +1,1560 @@ +#include "tiles-ws-controller.h" + +#include "mapget/http-service/http-service.h" +#include "tiles-request-json.h" + +#include "mapget/log.h" +#include "mapget/model/stream.h" + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fmt/format.h" +#include "nlohmann/json.hpp" + +#include + +namespace mapget::detail +{ +namespace +{ + +struct TilesWsMetrics +{ + std::atomic activeConnections{0}; + std::atomic activeSessions{0}; + std::atomic totalQueuedFrames{0}; + std::atomic totalQueuedBytes{0}; + std::atomic totalForwardedFrames{0}; + std::atomic totalForwardedBytes{0}; + std::atomic totalDroppedFrames{0}; + std::atomic totalDroppedBytes{0}; + std::atomic replacedRequests{0}; + std::atomic totalPullRequests{0}; + std::atomic totalPullTimeouts{0}; + std::atomic totalPullSessionMisses{0}; +}; + +TilesWsMetrics gTilesWsMetrics; +std::mutex gTrackedSessionsMutex; +std::vector> gTrackedSessions; +std::mutex gSessionRegistryMutex; +std::unordered_map> gSessionRegistry; +std::atomic gNextClientId{1}; + +constexpr int64_t DEFAULT_PULL_WAIT_MS = 25000; +constexpr int64_t MAX_PULL_WAIT_MS = 30000; +constexpr int64_t MAX_PULL_BATCH_BYTES = 5 * 1024 * 1024; +constexpr LayerType REQUEST_TILE_LAYER_TYPE = LayerType::Features; +constexpr int64_t LOWEST_TILE_PRIORITY = std::numeric_limits::max(); +constexpr bool EMIT_LOAD_STATE_FRAMES = false; + +/// Clamp an atomic metric value to zero to avoid exposing negative snapshots. +[[nodiscard]] int64_t nonNegative(std::atomic const& value) +{ + const auto v = value.load(std::memory_order_relaxed); + return v < 0 ? 0 : v; +} + +/// Copy inbound HTTP headers so backend requests can preserve auth context. +[[nodiscard]] AuthHeaders authHeadersFromRequest(const drogon::HttpRequestPtr& req) +{ + AuthHeaders headers; + for (auto const& [k, v] : req->headers()) { + headers.emplace(k, v); + } + return headers; +} + +/// Convert internal request status enum values to stable UI-facing strings. +[[nodiscard]] std::string_view requestStatusToString(RequestStatus s) +{ + switch (s) { + case RequestStatus::Open: + return "Open"; + case RequestStatus::Success: + return "Success"; + case RequestStatus::NoDataSource: + return "NoDataSource"; + case RequestStatus::Unauthorized: + return "Unauthorized"; + case RequestStatus::Aborted: + return "Aborted"; + } + return "Unknown"; +} + +/// Convert tile load-state enum values to stable UI-facing strings. +[[nodiscard]] std::string_view loadStateToString(TileLayer::LoadState s) +{ + switch (s) { + case TileLayer::LoadState::LoadingQueued: + return "LoadingQueued"; + case TileLayer::LoadState::BackendFetching: + return "BackendFetching"; + case TileLayer::LoadState::BackendConverting: + return "BackendConverting"; + } + return "Unknown"; +} + +/// Encode one mapget VTLV frame with protocol header plus payload bytes. +[[nodiscard]] std::string encodeStreamMessage(TileLayerStream::MessageType type, std::string_view payload) +{ + std::ostringstream headerStream; + bitsery::Serializer s(headerStream); + s.object(TileLayerStream::CurrentProtocolVersion); + s.value1b(type); + s.value4b(static_cast(payload.size())); + + auto message = headerStream.str(); + message.append(payload); + return message; +} + +/// Check whether the HTTP Accept-Encoding header allows gzip responses. +[[nodiscard]] bool containsGzip(std::string_view acceptEncoding) +{ + return !acceptEncoding.empty() && acceptEncoding.find("gzip") != std::string_view::npos; +} + +/// Compress one payload as gzip (RFC 1952). Returns nullopt on zlib failure. +[[nodiscard]] std::optional gzipCompress(std::string_view input) +{ + z_stream stream{}; + stream.zalloc = Z_NULL; + stream.zfree = Z_NULL; + stream.opaque = Z_NULL; + + // 16 + MAX_WBITS enables gzip framing instead of raw deflate. + const int initResult = deflateInit2( + &stream, + Z_DEFAULT_COMPRESSION, + Z_DEFLATED, + 16 + MAX_WBITS, + 8, + Z_DEFAULT_STRATEGY); + if (initResult != Z_OK) { + return std::nullopt; + } + + stream.next_in = reinterpret_cast(const_cast(input.data())); + stream.avail_in = static_cast(input.size()); + + std::string compressed; + compressed.reserve(input.size() / 2 + 128); + + char outBuffer[8192]; + int deflateResult = Z_OK; + do { + stream.next_out = reinterpret_cast(outBuffer); + stream.avail_out = sizeof(outBuffer); + deflateResult = deflate(&stream, Z_FINISH); + if (deflateResult != Z_OK && deflateResult != Z_STREAM_END) { + deflateEnd(&stream); + return std::nullopt; + } + compressed.append(outBuffer, sizeof(outBuffer) - stream.avail_out); + } while (deflateResult != Z_STREAM_END); + + deflateEnd(&stream); + return compressed; +} + +/// Parse a JSON numeric field into non-negative int64 while handling missing keys. +[[nodiscard]] int64_t parseNonNegativeInt64(const nlohmann::json& j, std::string_view key) +{ + const auto keyString = std::string(key); + const auto it = j.find(keyString); + if (it == j.end()) { + return 0; + } + if (it->is_number_unsigned()) { + const auto raw = it->get(); + const auto max = static_cast(std::numeric_limits::max()); + return static_cast(std::min(raw, max)); + } + if (it->is_number_integer()) { + const auto raw = it->get(); + return std::max(0, raw); + } + return 0; +} + +/// Build a canonical request key using map/layer/tile while normalizing layer type. +[[nodiscard]] MapTileKey makeCanonicalRequestedTileKey( + std::string_view mapId, + std::string_view layerId, + TileId tileId, + uint32_t stage = 0) +{ + return MapTileKey( + REQUEST_TILE_LAYER_TYPE, + std::string(mapId), + std::string(layerId), + tileId, + stage); +} + +/// Normalize an existing map tile key so request matching ignores source layer type. +[[nodiscard]] MapTileKey makeCanonicalRequestedTileKey(MapTileKey key) +{ + key.layer_ = REQUEST_TILE_LAYER_TYPE; + return key; +} + +class TilesWsSession : public std::enable_shared_from_this +{ +public: + /// Construct one websocket session object bound 1:1 to a websocket connection. + TilesWsSession( + HttpService& service, + std::weak_ptr conn, + AuthHeaders authHeaders) + : service_(service), + conn_(std::move(conn)), + authHeaders_(std::move(authHeaders)), + writer_( + std::make_unique( + [this](std::string msg, TileLayerStream::MessageType type) { onWriterMessage(std::move(msg), type); }, + writerOffsets_)) + { + gTilesWsMetrics.activeSessions.fetch_add(1, std::memory_order_relaxed); + } + + /// Destroy the session and abort any in-flight backend work. + ~TilesWsSession() + { + { + std::lock_guard lock(gSessionRegistryMutex); + gSessionRegistry.erase(clientId_); + } + gTilesWsMetrics.activeSessions.fetch_sub(1, std::memory_order_relaxed); + // Best-effort cleanup: abort any in-flight requests if the session is destroyed. + cancelNoStatus(); + } + + TilesWsSession(TilesWsSession const&) = delete; + TilesWsSession& operator=(TilesWsSession const&) = delete; + + /// Register this session in the global weak list used for `/status-data` snapshots. + void registerForMetrics() + { + std::lock_guard lock(gTrackedSessionsMutex); + gTrackedSessions.push_back(weak_from_this()); + } + + /// Return currently queued controller frames/bytes. + [[nodiscard]] std::pair pendingSnapshot() + { + std::lock_guard lock(mutex_); + int64_t pendingFrames = static_cast(outgoing_.size()); + int64_t pendingBytes = 0; + for (auto const& frame : outgoing_) { + pendingBytes += static_cast(frame.bytes.size()); + } + return {pendingFrames, pendingBytes}; + } + + /// Return numeric client id used by `/tiles/next` pull requests. + [[nodiscard]] int64_t clientId() const + { + return clientId_; + } + + /// Return current number of blocked `/tiles/next` long-poll requests. + [[nodiscard]] int64_t pendingPullRequestCount() const + { + std::lock_guard lock(mutex_); + return static_cast(pendingPullWaiters_.size()); + } + + struct PullFrameResult + { + enum class Status { + Frame, + Timeout, + Closed, + }; + + Status status = Status::Timeout; + std::string frameBytes; + }; + + using PullResultCallback = std::function; + + /// Resolve one `/tiles/next` request immediately or register an async waiter. + void requestNextTileFrameAsync( + std::chrono::milliseconds waitTimeout, + size_t maxBatchBytes, + PullResultCallback callback) + { + PullResultCallback immediateCallback; + std::optional immediateResult; + uint64_t timeoutWaiterId = 0; + double timeoutSeconds = 0.0; + + { + std::lock_guard lock(mutex_); + if (cancelled_) { + immediateCallback = std::move(callback); + immediateResult = PullFrameResult{.status = PullFrameResult::Status::Closed}; + } + else if (!outgoing_.empty()) { + immediateCallback = std::move(callback); + immediateResult = popFrameBatchLocked(maxBatchBytes); + } + else if (waitTimeout.count() <= 0) { + immediateCallback = std::move(callback); + immediateResult = PullFrameResult{.status = PullFrameResult::Status::Timeout}; + } + else { + timeoutWaiterId = nextPullWaiterId_++; + pendingPullWaiterOrder_.push_back(timeoutWaiterId); + pendingPullWaiters_.emplace(timeoutWaiterId, PullWaiter{ + .waiterId = timeoutWaiterId, + .maxBatchBytes = maxBatchBytes, + .callback = std::move(callback), + }); + timeoutSeconds = std::chrono::duration(waitTimeout).count(); + } + } + + if (immediateResult) { + dispatchPullResult(std::move(immediateCallback), std::move(*immediateResult)); + return; + } + + if (timeoutWaiterId == 0) { + return; + } + const auto weak = weak_from_this(); + drogon::app().getLoop()->runAfter(timeoutSeconds, [weak, timeoutWaiterId]() { + if (auto self = weak.lock()) { + self->onPullWaiterTimeout(timeoutWaiterId); + } + }); + } + + /// Patch per-connection string-pool offsets supplied by the client request. + [[nodiscard]] bool applyStringPoolOffsetsPatch(const nlohmann::json& offsetsJson, std::string& errorMessage) + { + if (!offsetsJson.is_object()) { + errorMessage = "stringPoolOffsets must be an object."; + return false; + } + + try { + std::lock_guard lock(mutex_); + for (auto const& item : offsetsJson.items()) { + const auto value = item.value().get(); + committedStringPoolOffsets_[item.key()] = value; + writerOffsets_[item.key()] = value; + } + return true; + } + catch (const std::exception& e) { + errorMessage = fmt::format("Invalid stringPoolOffsets: {}", e.what()); + return false; + } + } + + /// Allocate a request id while respecting optional client-provided request ids. + [[nodiscard]] uint64_t allocateRequestId(const nlohmann::json& requestJson) + { + uint64_t requestId = nextRequestId_++; + if (auto requestIdIt = requestJson.find("requestId"); + requestIdIt != requestJson.end() + && (requestIdIt->is_number_integer() || requestIdIt->is_number_unsigned())) { + const auto parsedRequestId = parseNonNegativeInt64(requestJson, "requestId"); + if (parsedRequestId > 0) { + requestId = static_cast(parsedRequestId); + nextRequestId_ = std::max(nextRequestId_, requestId + 1); + } + } + return requestId; + } + + /// Parse and apply a full logical tile request update from the client. + void updateFromClientRequest(const nlohmann::json& j, uint64_t requestId) + { + auto requestsIt = j.find("requests"); + if (requestsIt == j.end() || !requestsIt->is_array()) { + // Invalid request payload: publish an immediate status error for observability. + { + std::lock_guard lock(mutex_); + requestId_ = requestId; + requestInfos_.clear(); + requestStatuses_.clear(); + statusEmissionEnabled_ = true; + } + queueRequestContextMessage(); + queueStatusMessage("Missing or invalid 'requests' array"); + return; + } + + struct ParsedRequest + { + detail::ParsedLayerTilesRequest request; + LayerRequestContext context; + }; + std::vector parsedRequests; + std::set desiredTileKeys; + std::map nextTilePriorityRanks; + + try { + parsedRequests.reserve(requestsIt->size()); + for (auto const& requestJson : *requestsIt) { + int64_t nextPriorityRank = 0; + auto parsedRequest = detail::parseLayerTilesRequestJson(requestJson); + auto layerContext = service_.resolveLayerRequest( + parsedRequest.mapId, + parsedRequest.layerId, + authHeaders_); + auto expandedTileKeys = detail::expandLayerTilesRequestKeys( + parsedRequest, + REQUEST_TILE_LAYER_TYPE, + layerContext.stages_); + + // Priority ranks are layer-local: rank 0 means "highest priority" + // within that layer request, then increases with request order/stage. + for (auto const& tileKey : expandedTileKeys) { + auto requestedTileKey = makeCanonicalRequestedTileKey(tileKey); + desiredTileKeys.insert(requestedTileKey); + if (nextTilePriorityRanks.find(requestedTileKey) == nextTilePriorityRanks.end()) { + nextTilePriorityRanks.emplace(requestedTileKey, nextPriorityRank++); + } + } + + parsedRequests.push_back(ParsedRequest{ + .request = std::move(parsedRequest), + .context = std::move(layerContext), + }); + } + } + catch (const std::exception& e) { + { + std::lock_guard lock(mutex_); + requestId_ = requestId; + requestInfos_.clear(); + requestStatuses_.clear(); + statusEmissionEnabled_ = true; + } + queueRequestContextMessage(); + queueStatusMessage(fmt::format("Invalid request JSON: {}", e.what())); + return; + } + + std::vector serviceRequests; + std::vector nextActiveRequests; + std::vector nextRequestStatuses(parsedRequests.size(), RequestStatus::Success); + std::vector nextRequestInfos; + nextRequestInfos.reserve(parsedRequests.size()); + + for (size_t index = 0; index < parsedRequests.size(); ++index) { + auto& parsed = parsedRequests[index]; + nextRequestInfos.push_back(RequestInfo{ + .mapId = parsed.request.mapId, + .layerId = parsed.request.layerId, + }); + + std::vector> tileIdsByNextStageToFetch; + std::vector unstagedTileIdsToFetch; + bool hasTilesToFetch = false; + auto stageCount = std::max(1U, parsed.context.stages_); + { + std::lock_guard lock(mutex_); + if (parsed.request.usesStageBuckets) { + tileIdsByNextStageToFetch.resize(parsed.request.tileIdsByNextStage.size()); + for (size_t bucketIndex = 0; bucketIndex < parsed.request.tileIdsByNextStage.size(); ++bucketIndex) { + auto nextMissingStage = static_cast(bucketIndex); + if (nextMissingStage >= stageCount) { + continue; + } + for (auto const& tileId : parsed.request.tileIdsByNextStage[bucketIndex]) { + bool needsBackendFetch = false; + for (uint32_t stage = nextMissingStage; stage < stageCount; ++stage) { + auto requestedTileKey = makeCanonicalRequestedTileKey( + parsed.request.mapId, + parsed.request.layerId, + tileId, + stage); + const bool alreadyQueued = + queuedTileFrameRefCount_.find(requestedTileKey) != queuedTileFrameRefCount_.end(); + if (!alreadyQueued) { + needsBackendFetch = true; + break; + } + } + if (needsBackendFetch) { + tileIdsByNextStageToFetch[bucketIndex].push_back(tileId); + hasTilesToFetch = true; + } + } + } + } else if (!parsed.request.tileIdsByNextStage.empty()) { + for (auto const& tileId : parsed.request.tileIdsByNextStage.front()) { + auto requestedTileKey = makeCanonicalRequestedTileKey( + parsed.request.mapId, + parsed.request.layerId, + tileId, + UnspecifiedStage); + const bool alreadyQueued = + queuedTileFrameRefCount_.find(requestedTileKey) != queuedTileFrameRefCount_.end(); + if (!alreadyQueued) { + unstagedTileIdsToFetch.push_back(tileId); + hasTilesToFetch = true; + } + } + } + } + + if (!hasTilesToFetch) { + continue; + } + + LayerTilesRequest::Ptr request; + if (parsed.request.usesStageBuckets) { + request = std::make_shared( + parsed.request.mapId, + parsed.request.layerId, + std::move(tileIdsByNextStageToFetch)); + } else { + request = std::make_shared( + parsed.request.mapId, + parsed.request.layerId, + std::move(unstagedTileIdsToFetch)); + } + serviceRequests.push_back(request); + nextActiveRequests.push_back(request); + nextRequestStatuses[index] = RequestStatus::Open; + + const auto weak = weak_from_this(); + const auto expectedRequestId = requestId; + request->onFeatureLayer([weak](auto&& layer) { + if (auto self = weak.lock()) { + self->onTileLayer(std::forward(layer)); + } + }); + request->onSourceDataLayer([weak](auto&& layer) { + if (auto self = weak.lock()) { + self->onTileLayer(std::forward(layer)); + } + }); + if (EMIT_LOAD_STATE_FRAMES) { + request->onLayerLoadStateChanged([weak](MapTileKey const& key, TileLayer::LoadState state) { + if (auto self = weak.lock()) { + self->onLoadStateChanged(key, state); + } + }); + } + request->onDone_ = [weak, index, expectedRequestId, request](RequestStatus status) { + if (auto self = weak.lock()) { + self->onRequestDone(index, expectedRequestId, request, status); + } + }; + } + + std::vector replacedRequests; + { + std::lock_guard lock(mutex_); + replacedRequests = std::move(activeRequests_); + activeRequests_ = std::move(nextActiveRequests); + requestId_ = requestId; + requestInfos_ = std::move(nextRequestInfos); + requestStatuses_ = std::move(nextRequestStatuses); + desiredTileKeys_ = std::move(desiredTileKeys); + tilePriorityRanks_ = std::move(nextTilePriorityRanks); + // When request scope shrinks, remove stale tile data already queued for send. + filterOutgoingByDesiredLocked(); + // Refresh ordering so queued tiles follow the latest request priority. + reprioritizeOutgoingLocked(); + statusEmissionEnabled_ = true; + } + + if (!replacedRequests.empty()) { + gTilesWsMetrics.replacedRequests.fetch_add( + static_cast(replacedRequests.size()), + std::memory_order_relaxed); + abortRequests(std::move(replacedRequests)); + } + + queueRequestContextMessage(); + if (!serviceRequests.empty()) { + (void)service_.request(serviceRequests, authHeaders_); + } + queueStatusMessage({}); + } + + /// Cancel current requests, clear queued frames, and emit a terminal status. + void cancel(std::string reason) + { + cancelled_ = true; + std::vector requestsToAbort; + std::vector pullDispatches; + + // Stop sending any queued tile frames from this session. + { + std::lock_guard lock(mutex_); + clearOutgoingLocked(); + requestsToAbort = std::move(activeRequests_); + activeRequests_.clear(); + collectAllPullWaitersLocked(PullFrameResult::Status::Closed, pullDispatches); + } + + // Abort in-flight requests (best-effort). + abortRequests(std::move(requestsToAbort)); + + // Refresh locally cached statuses after aborting. + { + std::lock_guard lock(mutex_); + for (auto& status : requestStatuses_) { + if (status == RequestStatus::Open) { + status = RequestStatus::Aborted; + } + } + } + + dispatchPullResults(std::move(pullDispatches)); + queueStatusMessage(std::move(reason)); + } + +private: + /// Lightweight metadata emitted in status payloads for each logical request. + struct RequestInfo + { + std::string mapId; + std::string layerId; + }; + + /// One queued websocket frame plus metadata used for bookkeeping. + struct OutgoingFrame + { + std::string bytes; + TileLayerStream::MessageType type{TileLayerStream::MessageType::None}; + std::optional> stringPoolCommit; + std::optional requestedTileKey; + int64_t priorityRank = LOWEST_TILE_PRIORITY; + }; + + /// Batched writer output captured while serializing one tile layer. + struct WriterMessage + { + std::string bytes; + TileLayerStream::MessageType type{TileLayerStream::MessageType::None}; + }; + + /// One pending `/tiles/next` callback waiting for a frame or timeout. + struct PullWaiter + { + uint64_t waiterId = 0; + size_t maxBatchBytes = 0; + PullResultCallback callback; + }; + + /// One completed pull waiter callback plus the result to emit. + struct PullDispatch + { + PullResultCallback callback; + PullFrameResult result; + }; + + /// Increment queued/sent reference counters for one canonical tile key. + void incrementFrameRefCount(std::map& counts, const MapTileKey& key) + { + counts[key] += 1; + } + + /// Decrement queued/sent reference counters and erase exhausted entries. + void decrementFrameRefCount(std::map& counts, const MapTileKey& key) + { + auto it = counts.find(key); + if (it == counts.end()) { + return; + } + it->second -= 1; + if (it->second <= 0) { + counts.erase(it); + } + } + + /// Mark a frame as queued so request updates can avoid duplicate backend fetches. + void trackQueuedFrameLocked(const OutgoingFrame& frame) + { + if (frame.requestedTileKey) { + incrementFrameRefCount(queuedTileFrameRefCount_, *frame.requestedTileKey); + } + } + + /// Remove a frame from queued bookkeeping once it is dequeued or dropped. + void untrackQueuedFrameLocked(const OutgoingFrame& frame) + { + if (frame.requestedTileKey) { + decrementFrameRefCount(queuedTileFrameRefCount_, *frame.requestedTileKey); + } + } + + /// Pop the highest-priority queued tile frame and account forwarding metrics. + [[nodiscard]] PullFrameResult popNextFrameLocked() + { + if (outgoing_.empty()) { + return PullFrameResult{.status = PullFrameResult::Status::Timeout}; + } + + auto frame = std::move(outgoing_.front()); + outgoing_.pop_front(); + untrackQueuedFrameLocked(frame); + if (frame.stringPoolCommit) { + committedStringPoolOffsets_[frame.stringPoolCommit->first] = frame.stringPoolCommit->second; + } + + const auto frameBytes = static_cast(frame.bytes.size()); + gTilesWsMetrics.totalForwardedFrames.fetch_add(1, std::memory_order_relaxed); + gTilesWsMetrics.totalForwardedBytes.fetch_add(frameBytes, std::memory_order_relaxed); + + return PullFrameResult{ + .status = PullFrameResult::Status::Frame, + .frameBytes = std::move(frame.bytes), + }; + } + + /// Pop and concatenate queued frames up to one batch byte budget. + [[nodiscard]] PullFrameResult popFrameBatchLocked(size_t maxBatchBytes) + { + if (outgoing_.empty()) { + return PullFrameResult{.status = PullFrameResult::Status::Timeout}; + } + if (maxBatchBytes == 0) { + return popNextFrameLocked(); + } + + std::string batchBytes; + batchBytes.reserve(std::min(maxBatchBytes, outgoing_.front().bytes.size())); + + size_t appendedBytes = 0; + bool appendedAny = false; + while (!outgoing_.empty()) { + const auto& nextFrame = outgoing_.front(); + const auto nextBytes = nextFrame.bytes.size(); + if (appendedAny && appendedBytes + nextBytes > maxBatchBytes) { + break; + } + auto frameResult = popNextFrameLocked(); + if (frameResult.status != PullFrameResult::Status::Frame) { + break; + } + appendedBytes += frameResult.frameBytes.size(); + batchBytes.append(frameResult.frameBytes); + appendedAny = true; + if (appendedBytes >= maxBatchBytes) { + break; + } + } + + if (!appendedAny) { + return PullFrameResult{.status = PullFrameResult::Status::Timeout}; + } + return PullFrameResult{ + .status = PullFrameResult::Status::Frame, + .frameBytes = std::move(batchBytes), + }; + } + + /// Pop the next valid waiter in arrival order, skipping stale order entries. + [[nodiscard]] bool popNextPullWaiterLocked(PullWaiter& out) + { + while (!pendingPullWaiterOrder_.empty()) { + const auto waiterId = pendingPullWaiterOrder_.front(); + pendingPullWaiterOrder_.pop_front(); + auto waiterIt = pendingPullWaiters_.find(waiterId); + if (waiterIt == pendingPullWaiters_.end()) { + continue; + } + out = std::move(waiterIt->second); + pendingPullWaiters_.erase(waiterIt); + return true; + } + return false; + } + + /// Remove one waiter id from the FIFO order list. + void erasePullWaiterOrderEntryLocked(uint64_t waiterId) + { + pendingPullWaiterOrder_.erase( + std::remove(pendingPullWaiterOrder_.begin(), pendingPullWaiterOrder_.end(), waiterId), + pendingPullWaiterOrder_.end()); + } + + /// Complete queued pull waiters while both waiters and frames are available. + void drainReadyPullWaitersLocked(std::vector& dispatches) + { + PullWaiter waiter; + while (!outgoing_.empty() && popNextPullWaiterLocked(waiter)) { + dispatches.push_back(PullDispatch{ + .callback = std::move(waiter.callback), + .result = popFrameBatchLocked(waiter.maxBatchBytes), + }); + waiter = PullWaiter{}; + } + } + + /// Complete all currently pending pull waiters with one terminal status. + void collectAllPullWaitersLocked(PullFrameResult::Status status, std::vector& dispatches) + { + PullWaiter waiter; + while (popNextPullWaiterLocked(waiter)) { + dispatches.push_back(PullDispatch{ + .callback = std::move(waiter.callback), + .result = PullFrameResult{.status = status}, + }); + waiter = PullWaiter{}; + } + pendingPullWaiters_.clear(); + } + + /// Dispatch one completed pull callback on Drogon's loop. + static void dispatchPullResult(PullResultCallback callback, PullFrameResult result) + { + if (!callback) { + return; + } + drogon::app().getLoop()->queueInLoop( + [callback = std::move(callback), result = std::move(result)]() mutable { + callback(std::move(result)); + }); + } + + /// Dispatch a batch of completed pull callbacks on Drogon's loop. + static void dispatchPullResults(std::vector dispatches) + { + for (auto& dispatch : dispatches) { + dispatchPullResult(std::move(dispatch.callback), std::move(dispatch.result)); + } + } + + /// Resolve one waiter with timeout if it is still pending. + void onPullWaiterTimeout(uint64_t waiterId) + { + PullResultCallback timeoutCallback; + { + std::lock_guard lock(mutex_); + auto waiterIt = pendingPullWaiters_.find(waiterId); + if (waiterIt == pendingPullWaiters_.end()) { + return; + } + timeoutCallback = std::move(waiterIt->second.callback); + pendingPullWaiters_.erase(waiterIt); + erasePullWaiterOrderEntryLocked(waiterId); + } + + dispatchPullResult( + std::move(timeoutCallback), + PullFrameResult{.status = PullFrameResult::Status::Timeout}); + } + + /// Look up the current priority rank for one tile key, defaulting to lowest priority. + [[nodiscard]] int64_t tilePriorityRankLocked(const MapTileKey& tileKey) const + { + const auto it = tilePriorityRanks_.find(tileKey); + if (it == tilePriorityRanks_.end()) { + return LOWEST_TILE_PRIORITY; + } + return it->second; + } + + /// Refresh one queued frame's cached priority rank against the latest request priorities. + void refreshFramePriorityLocked(OutgoingFrame& frame) const + { + if (!frame.requestedTileKey) { + frame.priorityRank = LOWEST_TILE_PRIORITY; + return; + } + frame.priorityRank = tilePriorityRankLocked(*frame.requestedTileKey); + } + + /// Compare two frames for queue order; returns true if lhs should be sent before rhs. + [[nodiscard]] static bool framePrecedes(const OutgoingFrame& lhs, const OutgoingFrame& rhs) + { + const bool lhsIsStringPool = lhs.type == TileLayerStream::MessageType::StringPool; + const bool rhsIsStringPool = rhs.type == TileLayerStream::MessageType::StringPool; + if (lhsIsStringPool != rhsIsStringPool) { + // String pool updates always outrank everything else. + return lhsIsStringPool; + } + + const bool lhsHasTile = lhs.requestedTileKey.has_value(); + const bool rhsHasTile = rhs.requestedTileKey.has_value(); + if (lhsHasTile != rhsHasTile) { + // Keep non-tile control frames ahead of regular tile data frames. + return !lhsHasTile; + } + if (!lhsHasTile) { + return false; + } + return lhs.priorityRank < rhs.priorityRank; + } + + /// Drop queued tile data frames that no longer belong to the latest request set. + void filterOutgoingByDesiredLocked() + { + if (outgoing_.empty()) { + return; + } + + int64_t droppedFrames = 0; + int64_t droppedBytes = 0; + std::deque filtered; + for (auto& frame : outgoing_) { + const bool dropLoadStateFrame = !EMIT_LOAD_STATE_FRAMES + && frame.type == TileLayerStream::MessageType::LoadStateChange; + const bool dropStaleTileFrame = frame.requestedTileKey + && desiredTileKeys_.find(*frame.requestedTileKey) == desiredTileKeys_.end(); + const bool dropFrame = dropLoadStateFrame || dropStaleTileFrame; + if (dropFrame) { + ++droppedFrames; + droppedBytes += static_cast(frame.bytes.size()); + untrackQueuedFrameLocked(frame); + continue; + } + filtered.push_back(std::move(frame)); + } + outgoing_ = std::move(filtered); + if (droppedFrames > 0) { + gTilesWsMetrics.totalDroppedFrames.fetch_add(droppedFrames, std::memory_order_relaxed); + gTilesWsMetrics.totalDroppedBytes.fetch_add(droppedBytes, std::memory_order_relaxed); + } + } + + /// Reorder queued frames according to string-pool and tile-priority policy. + void reprioritizeOutgoingLocked() + { + if (outgoing_.size() < 2) { + return; + } + + for (auto& frame : outgoing_) { + refreshFramePriorityLocked(frame); + } + + std::vector reordered; + reordered.reserve(outgoing_.size()); + for (auto& frame : outgoing_) { + reordered.push_back(std::move(frame)); + } + + std::stable_sort( + reordered.begin(), + reordered.end(), + [](const OutgoingFrame& lhs, const OutgoingFrame& rhs) { return framePrecedes(lhs, rhs); }); + + outgoing_.clear(); + for (auto& frame : reordered) { + outgoing_.push_back(std::move(frame)); + } + } + + /// Append one frame to the websocket controller queue and update counters. + void enqueueOutgoingLocked(OutgoingFrame&& frame) + { + refreshFramePriorityLocked(frame); + trackQueuedFrameLocked(frame); + const auto bytes = static_cast(frame.bytes.size()); + auto insertIt = outgoing_.end(); + for (auto it = outgoing_.begin(); it != outgoing_.end(); ++it) { + if (framePrecedes(frame, *it)) { + insertIt = it; + break; + } + } + outgoing_.insert(insertIt, std::move(frame)); + gTilesWsMetrics.totalQueuedFrames.fetch_add(1, std::memory_order_relaxed); + gTilesWsMetrics.totalQueuedBytes.fetch_add(bytes, std::memory_order_relaxed); + } + + /// Drop all queued frames and account them as controller-side drops. + void clearOutgoingLocked() + { + if (outgoing_.empty()) { + return; + } + + int64_t droppedFrames = 0; + int64_t droppedBytes = 0; + for (auto const& frame : outgoing_) { + ++droppedFrames; + droppedBytes += static_cast(frame.bytes.size()); + untrackQueuedFrameLocked(frame); + } + outgoing_.clear(); + + gTilesWsMetrics.totalDroppedFrames.fetch_add(droppedFrames, std::memory_order_relaxed); + gTilesWsMetrics.totalDroppedBytes.fetch_add(droppedBytes, std::memory_order_relaxed); + } + + /// Internal cancel path used by destructor/connection tear-down (no status emission). + void cancelNoStatus() + { + if (cancelled_.exchange(true)) + return; + std::vector requestsToAbort; + std::vector pullDispatches; + + // Ensure we stop emitting any further frames. + { + std::lock_guard lock(mutex_); + clearOutgoingLocked(); + requestsToAbort = std::move(activeRequests_); + activeRequests_.clear(); + collectAllPullWaitersLocked(PullFrameResult::Status::Closed, pullDispatches); + } + + abortRequests(std::move(requestsToAbort)); + dispatchPullResults(std::move(pullDispatches)); + } + + /// Abort a batch of backend requests outside `mutex_` to avoid lock inversion. + void abortRequests(std::vector requests) + { + for (auto const& request : requests) { + if (!request || request->isDone()) { + continue; + } + service_.abort(request); + } + } + + /// Collect writer callbacks generated while serializing one tile layer. + void onWriterMessage(std::string msg, TileLayerStream::MessageType type) + { + // Writer messages are only generated from within onTileLayer under mutex_. + if (!currentWriteBatch_.has_value()) { + raise("TilesWsSession writer callback used out-of-band"); + } + currentWriteBatch_->push_back(WriterMessage{std::move(msg), type}); + } + + /// Convert one backend tile layer into outgoing websocket frames. + void onTileLayer(TileLayer::Ptr const& layer) + { + if (cancelled_) + return; + if (!layer) + return; + + const auto requestedTileKey = makeCanonicalRequestedTileKey(layer->id()); + std::optional> stringPoolCommit; + std::vector pullDispatches; + + { + std::lock_guard lock(mutex_); + if (cancelled_) + return; + // Late-arriving tile for an outdated request: drop before serialization work. + if (desiredTileKeys_.find(requestedTileKey) == desiredTileKeys_.end()) { + return; + } + + if (currentWriteBatch_.has_value()) { + raise("TilesWsSession writer callback re-entered"); + } + currentWriteBatch_.emplace(); + writer_->write(layer); + auto batch = std::move(*currentWriteBatch_); + currentWriteBatch_.reset(); + + // If a StringPool message was generated, the writer updates writerOffsets_ + // to the new highest string ID for this node after emitting it. + const auto nodeId = layer->nodeId(); + const auto it = writerOffsets_.find(nodeId); + if (it != writerOffsets_.end()) { + const auto newOffset = it->second; + for (auto const& m : batch) { + if (m.type == TileLayerStream::MessageType::StringPool) { + stringPoolCommit = std::make_pair(nodeId, newOffset); + break; + } + } + } + + for (auto& m : batch) { + OutgoingFrame frame; + frame.bytes = std::move(m.bytes); + frame.type = m.type; + if (m.type == TileLayerStream::MessageType::StringPool) { + frame.stringPoolCommit = stringPoolCommit; + frame.requestedTileKey = requestedTileKey; + } + if (m.type == TileLayerStream::MessageType::TileFeatureLayer + || m.type == TileLayerStream::MessageType::TileSourceDataLayer) { + frame.requestedTileKey = requestedTileKey; + } + enqueueOutgoingLocked(std::move(frame)); + } + // Newly queued frames can immediately satisfy blocked pull waiters. + drainReadyPullWaitersLocked(pullDispatches); + } + dispatchPullResults(std::move(pullDispatches)); + } + + /// Update per-request completion state and emit status when it changes. + void onRequestDone( + size_t requestIndex, + uint64_t expectedRequestId, + const LayerTilesRequest::Ptr& completedRequest, + RequestStatus status) + { + if (cancelled_) + return; + + bool shouldEmit = false; + { + std::lock_guard lock(mutex_); + if (cancelled_) + return; + activeRequests_.erase( + std::remove_if( + activeRequests_.begin(), + activeRequests_.end(), + [&](const LayerTilesRequest::Ptr& req) { + return !req || req == completedRequest || req->isDone(); + }), + activeRequests_.end()); + if (expectedRequestId == requestId_ && requestIndex < requestStatuses_.size()) { + if (requestStatuses_[requestIndex] == status) { + return; + } + requestStatuses_[requestIndex] = status; + shouldEmit = statusEmissionEnabled_; + } + } + + if (shouldEmit) { + queueStatusMessage({}); + } + } + + /// Send a websocket control frame immediately (status/request-context/load-state). + void sendControlMessage(TileLayerStream::MessageType type, std::string payload) + { + auto conn = conn_.lock(); + if (!conn || conn->disconnected()) { + cancelNoStatus(); + return; + } + conn->send(encodeStreamMessage(type, payload), drogon::WebSocketMessageType::Binary); + } + + /// Send a status frame describing the current request statuses. + void queueStatusMessage(std::string message) + { + sendControlMessage(TileLayerStream::MessageType::Status, buildStatusPayload(std::move(message))); + } + + /// Send a request-context frame so the client can track the active request id + client id. + void queueRequestContextMessage() + { + sendControlMessage(TileLayerStream::MessageType::RequestContext, buildRequestContextPayload()); + } + + /// Forward backend tile load-state changes for tiles still requested by the client. + void onLoadStateChanged(MapTileKey const& key, TileLayer::LoadState state) + { + if (!EMIT_LOAD_STATE_FRAMES) { + return; + } + if (cancelled_) + return; + const auto requestedTileKey = makeCanonicalRequestedTileKey(key); + { + std::lock_guard lock(mutex_); + // Keep load-state traffic scoped to the currently requested tile set. + if (desiredTileKeys_.find(requestedTileKey) == desiredTileKeys_.end()) { + return; + } + } + + sendControlMessage( + TileLayerStream::MessageType::LoadStateChange, + buildLoadStatePayload(key, state)); + } + + /// Build the JSON payload for `mapget.tiles.status`. + [[nodiscard]] std::string buildStatusPayload(std::string message) + { + nlohmann::json requestsJson = nlohmann::json::array(); + bool allDone = true; + + { + std::lock_guard lock(mutex_); + for (size_t i = 0; i < requestInfos_.size(); ++i) { + const auto status = (i < requestStatuses_.size()) ? requestStatuses_[i] : RequestStatus::Open; + allDone &= (status != RequestStatus::Open); + + nlohmann::json reqJson = nlohmann::json::object(); + reqJson["index"] = i; + reqJson["mapId"] = requestInfos_[i].mapId; + reqJson["layerId"] = requestInfos_[i].layerId; + reqJson["status"] = static_cast>(status); + reqJson["statusText"] = std::string(requestStatusToString(status)); + requestsJson.push_back(std::move(reqJson)); + } + } + + return nlohmann::json::object({ + {"type", "mapget.tiles.status"}, + {"requestId", requestId_}, + {"allDone", allDone}, + {"requests", std::move(requestsJson)}, + {"message", std::move(message)}, + }).dump(); + } + + /// Build the JSON payload for `mapget.tiles.load-state`. + [[nodiscard]] std::string buildLoadStatePayload(MapTileKey const& key, TileLayer::LoadState state) const + { + return nlohmann::json::object({ + {"type", "mapget.tiles.load-state"}, + {"requestId", requestId_}, + {"mapId", key.mapId_}, + {"layerId", key.layerId_}, + {"tileId", key.tileId_.value_}, + {"stage", key.stage_}, + {"state", static_cast(state)}, + {"stateText", std::string(loadStateToString(state))}, + }).dump(); + } + + /// Build the JSON payload for `mapget.tiles.request-context`. + [[nodiscard]] std::string buildRequestContextPayload() const + { + return nlohmann::json::object({ + {"type", "mapget.tiles.request-context"}, + {"requestId", requestId_}, + {"clientId", clientId_}, + }).dump(); + } + + HttpService& service_; + std::weak_ptr conn_; + int64_t clientId_ = gNextClientId.fetch_add(1, std::memory_order_relaxed); + uint64_t requestId_ = 0; + uint64_t nextRequestId_ = 1; + + AuthHeaders authHeaders_; + + mutable std::mutex mutex_; + uint64_t nextPullWaiterId_ = 1; + std::deque pendingPullWaiterOrder_; + std::unordered_map pendingPullWaiters_; + std::deque outgoing_; + std::vector requestInfos_; + std::vector requestStatuses_; + std::vector activeRequests_; + std::set desiredTileKeys_; + std::map tilePriorityRanks_; + std::map queuedTileFrameRefCount_; + bool statusEmissionEnabled_ = false; + + TileLayerStream::StringPoolOffsetMap committedStringPoolOffsets_; + TileLayerStream::StringPoolOffsetMap writerOffsets_; + std::unique_ptr writer_; + std::optional> currentWriteBatch_; + + std::atomic_bool cancelled_{false}; +}; + +class TilesWebSocketController final : public drogon::WebSocketController +{ +public: + /// Build the websocket controller bound to one shared HttpService instance. + explicit TilesWebSocketController(HttpService& service) : service_(service) {} + + /// Create and attach one `TilesWsSession` per accepted websocket connection. + void handleNewConnection(const drogon::HttpRequestPtr& req, const drogon::WebSocketConnectionPtr& conn) override + { + gTilesWsMetrics.activeConnections.fetch_add(1, std::memory_order_relaxed); + auto session = std::make_shared(service_, conn, authHeadersFromRequest(req)); + session->registerForMetrics(); + { + std::lock_guard lock(gSessionRegistryMutex); + gSessionRegistry[session->clientId()] = session; + } + conn->setContext(std::move(session)); + } + + /// Handle control and request messages from the websocket client. + void handleNewMessage( + const drogon::WebSocketConnectionPtr& conn, + std::string&& message, + const drogon::WebSocketMessageType& type) override + { + auto session = conn->getContext(); + if (!session) { + // This is a defensive fallback for unexpected context loss. + session = std::make_shared(service_, conn, AuthHeaders{}); + session->registerForMetrics(); + { + std::lock_guard lock(gSessionRegistryMutex); + gSessionRegistry[session->clientId()] = session; + } + conn->setContext(session); + } + + if (type != drogon::WebSocketMessageType::Text) { + const auto payload = nlohmann::json::object({ + {"type", "mapget.tiles.status"}, + {"allDone", true}, + {"requests", nlohmann::json::array()}, + {"message", "Expected a text message containing JSON."}, + }).dump(); + conn->send(encodeStreamMessage(TileLayerStream::MessageType::Status, payload), drogon::WebSocketMessageType::Binary); + return; + } + + nlohmann::json j; + try { + j = nlohmann::json::parse(message); + } + catch (const std::exception& e) { + const auto payload = nlohmann::json::object({ + {"type", "mapget.tiles.status"}, + {"allDone", true}, + {"requests", nlohmann::json::array()}, + {"message", fmt::format("Invalid JSON: {}", e.what())}, + }).dump(); + conn->send(encodeStreamMessage(TileLayerStream::MessageType::Status, payload), drogon::WebSocketMessageType::Binary); + return; + } + + // Patch per-connection string pool offsets if supplied. + if (j.contains("stringPoolOffsets")) { + std::string errorMessage; + if (!session->applyStringPoolOffsetsPatch(j["stringPoolOffsets"], errorMessage)) { + const auto payload = nlohmann::json::object({ + {"type", "mapget.tiles.status"}, + {"allDone", true}, + {"requests", nlohmann::json::array()}, + {"message", std::move(errorMessage)}, + }).dump(); + conn->send(encodeStreamMessage(TileLayerStream::MessageType::Status, payload), drogon::WebSocketMessageType::Binary); + return; + } + } + + const auto requestId = session->allocateRequestId(j); + session->updateFromClientRequest(j, requestId); + } + + /// Abort outstanding backend work once the websocket is closed. + void handleConnectionClosed(const drogon::WebSocketConnectionPtr& conn) override + { + gTilesWsMetrics.activeConnections.fetch_sub(1, std::memory_order_relaxed); + if (auto session = conn->getContext()) { + { + std::lock_guard lock(gSessionRegistryMutex); + gSessionRegistry.erase(session->clientId()); + } + session->cancel("WebSocket connection closed."); + } + } + + WS_PATH_LIST_BEGIN + WS_PATH_ADD("/tiles", drogon::Get); + WS_PATH_LIST_END + +private: + HttpService& service_; +}; + +} // namespace + +namespace +{ + +[[nodiscard]] std::shared_ptr findSessionByClientId(int64_t clientId) +{ + std::lock_guard lock(gSessionRegistryMutex); + auto it = gSessionRegistry.find(clientId); + if (it == gSessionRegistry.end()) { + return {}; + } + auto session = it->second.lock(); + if (!session) { + gSessionRegistry.erase(it); + return {}; + } + return session; +} + +[[nodiscard]] int64_t parseClampedInt64Parameter( + const drogon::HttpRequestPtr& req, + std::string_view key, + int64_t defaultValue, + int64_t minValue, + int64_t maxValue) +{ + const auto rawValue = req->getParameter(std::string(key)); + if (rawValue.empty()) { + return defaultValue; + } + try { + const auto parsed = static_cast(std::stoll(rawValue)); + return std::clamp(parsed, minValue, maxValue); + } + catch (const std::exception&) { + return defaultValue; + } +} + +void handleTilesNextRequest( + const drogon::HttpRequestPtr& req, + std::function&& callback) +{ + gTilesWsMetrics.totalPullRequests.fetch_add(1, std::memory_order_relaxed); + + const auto clientId = parseClampedInt64Parameter(req, "clientId", 0, 0, std::numeric_limits::max()); + if (clientId <= 0) { + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k400BadRequest); + resp->setContentTypeCode(drogon::CT_TEXT_PLAIN); + resp->setBody("Missing or invalid clientId parameter."); + callback(resp); + return; + } + + auto session = findSessionByClientId(clientId); + if (!session) { + gTilesWsMetrics.totalPullSessionMisses.fetch_add(1, std::memory_order_relaxed); + auto resp = drogon::HttpResponse::newHttpResponse(); + resp->setStatusCode(drogon::k410Gone); + callback(resp); + return; + } + + const auto waitMs = parseClampedInt64Parameter( + req, + "waitMs", + DEFAULT_PULL_WAIT_MS, + 0, + MAX_PULL_WAIT_MS); + const auto maxBytes = parseClampedInt64Parameter( + req, + "maxBytes", + 0, + 0, + MAX_PULL_BATCH_BYTES); + const bool compressRequested = parseClampedInt64Parameter(req, "compress", 0, 0, 1) != 0; + const bool enableGzip = compressRequested && containsGzip(req->getHeader("Accept-Encoding")); + session->requestNextTileFrameAsync( + std::chrono::milliseconds(waitMs), + static_cast(maxBytes), + [callback = std::move(callback), enableGzip](TilesWsSession::PullFrameResult result) mutable { + auto resp = drogon::HttpResponse::newHttpResponse(); + switch (result.status) { + case TilesWsSession::PullFrameResult::Status::Frame: + resp->setStatusCode(drogon::k200OK); + resp->setContentTypeCode(drogon::CT_APPLICATION_OCTET_STREAM); + if (enableGzip) { + if (auto compressed = gzipCompress(result.frameBytes)) { + resp->setBody(std::move(*compressed)); + resp->addHeader("Content-Encoding", "gzip"); + resp->addHeader("Vary", "Accept-Encoding"); + } else { + resp->setBody(std::move(result.frameBytes)); + } + } else { + resp->setBody(std::move(result.frameBytes)); + } + break; + case TilesWsSession::PullFrameResult::Status::Timeout: + gTilesWsMetrics.totalPullTimeouts.fetch_add(1, std::memory_order_relaxed); + resp->setStatusCode(drogon::k204NoContent); + break; + case TilesWsSession::PullFrameResult::Status::Closed: + resp->setStatusCode(drogon::k410Gone); + break; + } + callback(resp); + }); +} + +} // namespace + +/// Register the `/tiles` websocket controller with Drogon. +void registerTilesWebSocketController(drogon::HttpAppFramework& app, HttpService& service) +{ + app.registerController(std::make_shared(service)); + app.registerHandler( + "/tiles/next", + [](const drogon::HttpRequestPtr& req, std::function&& callback) { + handleTilesNextRequest(req, std::move(callback)); + }, + {drogon::Get, drogon::Post}); +} + +/// Build the websocket metrics payload consumed by `/status-data`. +nlohmann::json tilesWebSocketMetricsSnapshot() +{ + int64_t pendingControllerFrames = 0; + int64_t pendingControllerBytes = 0; + int64_t pendingPullRequests = 0; + { + std::lock_guard lock(gTrackedSessionsMutex); + auto out = gTrackedSessions.begin(); + for (auto it = gTrackedSessions.begin(); it != gTrackedSessions.end(); ++it) { + if (auto session = it->lock()) { + auto [frames, bytes] = session->pendingSnapshot(); + pendingControllerFrames += frames; + pendingControllerBytes += bytes; + pendingPullRequests += session->pendingPullRequestCount(); + *out++ = *it; + } + } + gTrackedSessions.erase(out, gTrackedSessions.end()); + } + + return nlohmann::json::object({ + {"active-connections", nonNegative(gTilesWsMetrics.activeConnections)}, + {"active-sessions", nonNegative(gTilesWsMetrics.activeSessions)}, + {"pending-controller-frames", pendingControllerFrames}, + {"pending-controller-bytes", pendingControllerBytes}, + {"pending-pull-requests", pendingPullRequests}, + {"total-queued-frames", nonNegative(gTilesWsMetrics.totalQueuedFrames)}, + {"total-queued-bytes", nonNegative(gTilesWsMetrics.totalQueuedBytes)}, + {"total-forwarded-frames", nonNegative(gTilesWsMetrics.totalForwardedFrames)}, + {"total-forwarded-bytes", nonNegative(gTilesWsMetrics.totalForwardedBytes)}, + {"total-dropped-frames", nonNegative(gTilesWsMetrics.totalDroppedFrames)}, + {"total-dropped-bytes", nonNegative(gTilesWsMetrics.totalDroppedBytes)}, + {"total-pull-requests", nonNegative(gTilesWsMetrics.totalPullRequests)}, + {"total-pull-timeouts", nonNegative(gTilesWsMetrics.totalPullTimeouts)}, + {"total-pull-session-misses", nonNegative(gTilesWsMetrics.totalPullSessionMisses)}, + {"replaced-requests", nonNegative(gTilesWsMetrics.replacedRequests)}, + }); +} + +} // namespace mapget::detail diff --git a/libs/http-service/src/tiles-ws-controller.h b/libs/http-service/src/tiles-ws-controller.h new file mode 100644 index 00000000..7f274e6a --- /dev/null +++ b/libs/http-service/src/tiles-ws-controller.h @@ -0,0 +1,21 @@ +#pragma once + +#include "nlohmann/json_fwd.hpp" + +namespace drogon +{ +class HttpAppFramework; +} + +namespace mapget +{ +class HttpService; +} + +namespace mapget::detail +{ + +void registerTilesWebSocketController(drogon::HttpAppFramework& app, HttpService& service); +[[nodiscard]] nlohmann::json tilesWebSocketMetricsSnapshot(); + +} // namespace mapget::detail diff --git a/libs/model/CMakeLists.txt b/libs/model/CMakeLists.txt index 6ae8ab52..6fffa562 100644 --- a/libs/model/CMakeLists.txt +++ b/libs/model/CMakeLists.txt @@ -14,6 +14,7 @@ add_library(mapget-model STATIC include/mapget/model/relation.h include/mapget/model/point.h include/mapget/model/pointnode.h + include/mapget/model/geometry-data.h include/mapget/model/geometry.h include/mapget/model/simfilgeometry.h include/mapget/model/simfilutil.h @@ -21,6 +22,7 @@ add_library(mapget-model STATIC include/mapget/model/sourcedatalayer.h include/mapget/model/sourceinfo.h include/mapget/model/sourcedatareference.h + include/mapget/model/validity-data.h include/mapget/model/validity.h include/mapget/model/hash.h diff --git a/libs/model/include/mapget/model/attr.h b/libs/model/include/mapget/model/attr.h index 5fef0b17..125567c2 100644 --- a/libs/model/include/mapget/model/attr.h +++ b/libs/model/include/mapget/model/attr.h @@ -17,7 +17,6 @@ class Geometry; class Attribute : public simfil::ProceduralObject<2, Attribute, TileFeatureLayer> { friend class TileFeatureLayer; - template friend struct simfil::model_ptr; public: /** @@ -49,26 +48,27 @@ class Attribute : public simfil::ProceduralObject<2, Attribute, TileFeatureLayer /** Actual per-attribute data that is stored in the model's attributes-column. */ struct Data { + MODEL_COLUMN_TYPE(16); + simfil::ModelNodeAddress validities_; - simfil::ArrayIndex fields_ = -1; + simfil::ArrayIndex fields_ = simfil::InvalidArrayIndex; simfil::StringId name_ = 0; simfil::ModelNodeAddress sourceDataRefs_; - - template - void serialize(S& s) { - s.object(validities_); - s.value4b(fields_); - s.value2b(name_); - s.object(sourceDataRefs_); - } }; - Attribute(Data* data, simfil::ModelConstPtr l, simfil::ModelNodeAddress a); - Attribute() = default; +public: + explicit Attribute(simfil::detail::mp_key key) + : simfil::ProceduralObject<2, Attribute, TileFeatureLayer>(key) {} + Attribute(Data* data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + Attribute() = delete; /** * Pointer to the actual data stored for the attribute. */ +protected: Data* data_ = nullptr; }; diff --git a/libs/model/include/mapget/model/attrlayer.h b/libs/model/include/mapget/model/attrlayer.h index f0225ef0..3f59c550 100644 --- a/libs/model/include/mapget/model/attrlayer.h +++ b/libs/model/include/mapget/model/attrlayer.h @@ -2,6 +2,7 @@ #include "featureid.h" #include "attr.h" +#include "merged-array-view.h" namespace mapget { @@ -14,17 +15,22 @@ class AttributeLayerList; * as speed limits, might belong to the same attribute layer. * TODO: Convert to use BaseObject */ -class AttributeLayer : public simfil::Object +class AttributeLayer : public simfil::BaseObject { friend class TileFeatureLayer; friend class bitsery::Access; - template friend struct simfil::model_ptr; public: + using BaseObject::addField; + using BaseObject::get; + /** * Create a new attribute and immediately insert it into the layer. */ - model_ptr newAttribute(std::string_view const& name, size_t initialCapacity=8); + model_ptr newAttribute( + std::string_view const& name, + size_t initialCapacity=8, + bool fixedSize=false); /** * Add an attribute to the layer which was created before - note: @@ -40,9 +46,13 @@ class AttributeLayer : public simfil::Object */ bool forEachAttribute(std::function const& attr)> const& cb) const; -protected: - AttributeLayer(simfil::ArrayIndex i, simfil::ModelConstPtr l, simfil::ModelNodeAddress a); - AttributeLayer() = default; +public: + explicit AttributeLayer(simfil::detail::mp_key key) : BaseObject(key) {} + AttributeLayer(simfil::ArrayIndex i, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + AttributeLayer() = delete; }; /** @@ -50,18 +60,20 @@ class AttributeLayer : public simfil::Object * stores (layer-name, layer) pairs. * TODO: Convert to use BaseObject */ -class AttributeLayerList : public simfil::Object +class AttributeLayerList : public MergedArrayView { friend class TileFeatureLayer; friend class bitsery::Access; friend class Feature; - template friend struct simfil::model_ptr; public: /** * Create a new named layer and immediately insert it into the collection. */ - model_ptr newLayer(std::string_view const& name, size_t initialCapacity=8); + model_ptr newLayer( + std::string_view const& name, + size_t initialCapacity=8, + bool fixedSize=false); /** * Add an attribute layer to the collection which was previously created. @@ -69,6 +81,12 @@ class AttributeLayerList : public simfil::Object */ void addLayer(std::string_view const& name, model_ptr l); + /** + * Backward-compatible alias for addLayer. + */ + tl::expected, simfil::Error> + addField(std::string_view const& name, model_ptr l); + /** * Iterate over the stored layers. The passed lambda must return * true to continue iterating, or false to abort iteration. @@ -78,9 +96,32 @@ class AttributeLayerList : public simfil::Object std::function const& layer)> const& cb ) const; -protected: - AttributeLayerList(simfil::ArrayIndex i, simfil::ModelConstPtr l, simfil::ModelNodeAddress a); - AttributeLayerList() = default; +public: + explicit AttributeLayerList(simfil::detail::mp_key key) + : MergedArrayView(key) + { + } + + AttributeLayerList(simfil::ArrayIndex i, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + AttributeLayerList() = delete; + + [[nodiscard]] simfil::ValueType type() const override; + [[nodiscard]] simfil::ModelNode::Ptr at(int64_t i) const override; + [[nodiscard]] uint32_t size() const override; + [[nodiscard]] simfil::ModelNode::Ptr get(const simfil::StringId& field) const override; + [[nodiscard]] simfil::StringId keyAt(int64_t i) const override; + bool iterate(simfil::ModelNode::IterCallback const& cb) const override; + +private: + [[nodiscard]] simfil::model_ptr localObject() const; + [[nodiscard]] uint32_t localMergedSize() const override; + [[nodiscard]] simfil::ModelNode::Ptr localMergedAt(int64_t i) const override; + bool localMergedIterate(simfil::ModelNode::IterCallback const& cb) const override; + + simfil::ArrayIndex members_ = simfil::InvalidArrayIndex; }; } diff --git a/libs/model/include/mapget/model/feature.h b/libs/model/include/mapget/model/feature.h index 8e5dbc69..2e7b20b5 100644 --- a/libs/model/include/mapget/model/feature.h +++ b/libs/model/include/mapget/model/feature.h @@ -8,14 +8,42 @@ #include "tileid.h" #include "relation.h" #include "geometry.h" +#include "merged-array-view.h" #include "tl/expected.hpp" #include "sfl/small_vector.hpp" #include "nlohmann/json.hpp" +#include +#include +#include namespace mapget { +class RelationArrayView : public MergedArrayView +{ +public: + explicit RelationArrayView(simfil::detail::mp_key key) + : MergedArrayView(key) + { + } + + RelationArrayView( + simfil::ModelConstPtr pool, + simfil::ModelNodeAddress address, + simfil::detail::mp_key key) + : MergedArrayView(std::move(pool), address, key) + { + } + + RelationArrayView() = delete; + +private: + [[nodiscard]] uint32_t localMergedSize() const override; + [[nodiscard]] simfil::ModelNode::Ptr localMergedAt(int64_t i) const override; + bool localMergedIterate(simfil::ModelNode::IterCallback const& cb) const override; +}; + /** * View onto a feature which belongs to a TileFeatureLayer. * You can create a feature through the TileFeatureLayer::newFeature function. @@ -55,11 +83,28 @@ class Feature : public simfil::MandatoryDerivedModelNodeBase friend class bitsery::Access; friend class TileFeatureLayer; friend class BoundFeature; - template friend struct simfil::model_ptr; + friend class RelationArrayView; public: + struct MergedBasicAttributesView; + + enum class LOD : uint8_t { + LOD_0 = 0, + LOD_1 = 1, + LOD_2 = 2, + LOD_3 = 3, + LOD_4 = 4, + LOD_5 = 5, + LOD_6 = 6, + LOD_7 = 7 + }; + + static constexpr LOD MAX_LOD = LOD::LOD_7; + /** Get the name of this feature's type. */ [[nodiscard]] std::string_view typeId() const; + [[nodiscard]] LOD lod() const; + void setLod(LOD lod); /** Get this feature's ID. */ [[nodiscard]] model_ptr id() const; @@ -71,6 +116,7 @@ class Feature : public simfil::MandatoryDerivedModelNodeBase model_ptr geom(); [[nodiscard]] model_ptr geomOrNull() const; [[nodiscard]] SelfContainedGeometry firstGeometry() const; + [[nodiscard]] SelfContainedGeometry preferredGeometry() const; /** * Get this feature's Attribute layers. The non-const version adds a @@ -85,10 +131,14 @@ class Feature : public simfil::MandatoryDerivedModelNodeBase */ model_ptr attributes(); [[nodiscard]] model_ptr attributesOrNull() const; + [[nodiscard]] model_ptr mergedAttributesOrNull() const; /** Add a point to the feature. */ void addPoint(Point const& p); + /** Attach an existing geometry to the feature. */ + void addGeometry(model_ptr const& geom); + /** Add multiple points to the feature. */ void addPoints(std::vector const& points); @@ -156,6 +206,10 @@ class Feature : public simfil::MandatoryDerivedModelNodeBase [[nodiscard]] model_ptr sourceDataReferences() const; void setSourceDataReferences(simfil::ModelNode::Ptr const& addresses); + [[nodiscard]] model_ptr extension() const; + void setExtension(model_ptr extension); + void setExtensionAddress(TileFeatureLayer const* extensionModel, simfil::ModelNodeAddress extensionAddress); + protected: /** * Simfil Model-Node Functions @@ -176,43 +230,83 @@ class Feature : public simfil::MandatoryDerivedModelNodeBase */ [[nodiscard]] model_ptr relations(); [[nodiscard]] model_ptr relationsOrNull() const; + [[nodiscard]] model_ptr mergedRelationsOrNull() const; + void refreshExtensionBindingFromOverlay() const; + + struct TypeIdAndLOD + { + MODEL_COLUMN_TYPE(4); + + simfil::StringId typeId_ = 0; + uint8_t lod_ = static_cast(MAX_LOD); + }; + + /** + * Feature data that is always allocated. + */ + struct BasicData + { + MODEL_COLUMN_TYPE(12); + + TypeIdAndLOD typeIdAndLod_{}; + simfil::ArrayIndex idPartValues_ = simfil::InvalidArrayIndex; + simfil::ModelNodeAddress geom_{}; + }; /** - * Feature Data + * Feature data that is allocated lazily only once needed. */ - struct Data + struct ComplexData { - simfil::ModelNodeAddress id_; - simfil::ModelNodeAddress geom_; - simfil::ModelNodeAddress attrLayers_; - simfil::ModelNodeAddress attrs_; - simfil::ModelNodeAddress relations_; - simfil::ModelNodeAddress sourceData_; - - template - void serialize(S& s) - { - s.object(id_); - s.object(geom_); - s.object(attrLayers_); - s.object(attrs_); - s.object(relations_); - s.object(sourceData_); - } + MODEL_COLUMN_TYPE(16); + + simfil::ModelNodeAddress attrLayers_{}; + simfil::ModelNodeAddress attrs_{}; + simfil::ModelNodeAddress relations_{}; + simfil::ModelNodeAddress sourceData_{}; }; - Feature(Data& d, simfil::ModelConstPtr l, simfil::ModelNodeAddress a); - Feature() = default; +public: + explicit Feature(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + Feature(BasicData& d, + ComplexData* c, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + Feature() = delete; - Data* data_ = nullptr; +protected: + BasicData* basicData_ = nullptr; + mutable ComplexData* complexData_ = nullptr; + mutable TileFeatureLayer const* extensionModel_ = nullptr; + mutable simfil::ModelNodeAddress extensionAddress_; // We keep the fields in a tiny vector on the stack, // because their number is dynamic, as a variable number // of id-part fields is adopted from the feature id. - sfl::small_vector, 32> fields_; - void updateFields(); + mutable sfl::small_vector, 32> fields_; + mutable bool fieldsDirty_ = true; + void ensureFieldsReady() const; + [[nodiscard]] simfil::ModelNodeAddress featureIdNodeAddress() const; + [[nodiscard]] simfil::ModelNodeAddress sourceDataNodeAddress() const; + [[nodiscard]] simfil::ModelNodeAddress geometryNodeAddress() const; + [[nodiscard]] simfil::ModelNodeAddress& geometryNodeAddress(); + [[nodiscard]] simfil::ModelNodeAddress attributeLayerNodeAddress() const; + [[nodiscard]] simfil::ModelNodeAddress& attributeLayerNodeAddress(); + [[nodiscard]] simfil::ModelNodeAddress attributeNodeAddress() const; + [[nodiscard]] simfil::ModelNodeAddress& attributeNodeAddress(); + [[nodiscard]] simfil::ModelNodeAddress relationNodeAddress() const; + [[nodiscard]] simfil::ModelNodeAddress& relationNodeAddress(); + void updateFields() const; + void materializeGeometryCollection(); + model_ptr appendGeometry( + GeomType type, + size_t initialCapacity, + bool fixedSize = false); - struct FeaturePropertyView : public simfil::MandatoryDerivedModelNodeBase +public: + struct MergedBasicAttributesView : public simfil::MandatoryDerivedModelNodeBase { [[nodiscard]] simfil::ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; @@ -221,11 +315,38 @@ class Feature : public simfil::MandatoryDerivedModelNodeBase [[nodiscard]] simfil::StringId keyAt(int64_t) const override; [[nodiscard]] bool iterate(IterCallback const& cb) const override; - FeaturePropertyView(Data& d, simfil::ModelConstPtr l, simfil::ModelNodeAddress a); - FeaturePropertyView() = default; + explicit MergedBasicAttributesView(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + MergedBasicAttributesView( + simfil::ModelConstPtr model, + simfil::ModelNodeAddress address, + simfil::detail::mp_key key); + MergedBasicAttributesView() = delete; + + private: + using AttrField = std::pair; + mutable std::vector mergedFields_; + mutable bool mergedFieldsDirty_ = true; + + void ensureMergedFieldsReady() const; + void rebuildMergedFields() const; + }; + +protected: + struct FeaturePropertyView : public simfil::MandatoryDerivedModelNodeBase + { + [[nodiscard]] simfil::ValueType type() const override; + [[nodiscard]] ModelNode::Ptr at(int64_t) const override; + [[nodiscard]] uint32_t size() const override; + [[nodiscard]] ModelNode::Ptr get(const simfil::StringId&) const override; + [[nodiscard]] simfil::StringId keyAt(int64_t) const override; + [[nodiscard]] bool iterate(IterCallback const& cb) const override; - Data* data_ = nullptr; - model_ptr attrs_; + explicit FeaturePropertyView(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + FeaturePropertyView(model_ptr feature, + simfil::detail::mp_key key); + FeaturePropertyView() = delete; }; }; diff --git a/libs/model/include/mapget/model/featureid.h b/libs/model/include/mapget/model/featureid.h index e9c18b0e..16cee92c 100644 --- a/libs/model/include/mapget/model/featureid.h +++ b/libs/model/include/mapget/model/featureid.h @@ -2,6 +2,7 @@ #include "simfil/model/nodes.h" #include "info.h" +#include namespace mapget { @@ -24,7 +25,6 @@ class FeatureId : public simfil::MandatoryDerivedModelNodeBase friend class Feature; friend class Relation; friend class bitsery::Access; - template friend struct simfil::model_ptr; public: /** Convert the FeatureId to a string like `....` */ @@ -36,6 +36,15 @@ class FeatureId : public simfil::MandatoryDerivedModelNodeBase /** Get all id-part key-value-pairs (including the common prefix). */ [[nodiscard]] KeyValueViewPairs keyValuePairs() const; + struct Data { + MODEL_COLUMN_TYPE(8); + + bool useCommonTilePrefix_ = false; + uint8_t idCompositionIndex_ = 0; + simfil::StringId typeId_ = 0; + simfil::ArrayIndex idPartValues_ = simfil::InvalidArrayIndex; + }; + protected: /** * Internal Node Access APIs @@ -48,25 +57,24 @@ class FeatureId : public simfil::MandatoryDerivedModelNodeBase [[nodiscard]] simfil::StringId keyAt(int64_t) const override; [[nodiscard]] bool iterate(IterCallback const& cb) const override; - struct Data { - bool useCommonTilePrefix_ = false; - simfil::StringId typeId_ = 0; - simfil::ModelNodeAddress idParts_; - - template - void serialize(S& s) { - s.value1b(useCommonTilePrefix_); - s.value2b(typeId_); - s.object(idParts_); - } - }; - - FeatureId(Data& data, simfil::ModelConstPtr l, simfil::ModelNodeAddress a); - FeatureId() = default; - - Data* data_ = nullptr; +public: + explicit FeatureId(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + FeatureId(Data& data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + FeatureId(Data const& data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + FeatureId() = delete; - model_ptr fields_; +protected: + Data data_{}; + model_ptr values_; + std::vector partNames_; + std::vector visibleValueIndices_; }; } diff --git a/libs/model/include/mapget/model/featurelayer.h b/libs/model/include/mapget/model/featurelayer.h index 97c4f569..8f942849 100644 --- a/libs/model/include/mapget/model/featurelayer.h +++ b/libs/model/include/mapget/model/featurelayer.h @@ -2,6 +2,9 @@ #include #include +#include +#include +#include #include "tl/expected.hpp" @@ -20,6 +23,7 @@ #include "geometry.h" #include "sourcedatareference.h" #include "pointnode.h" +#include "nlohmann/json.hpp" namespace mapget { @@ -31,6 +35,8 @@ namespace mapget */ class TileFeatureLayer : public TileLayer, public simfil::ModelPool { + template + friend class MergedArrayView; friend class Feature; friend class FeatureId; friend class Relation; @@ -48,8 +54,16 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool friend class SourceDataReferenceCollection; friend class SourceDataReferenceItem; friend class Validity; + template + friend model_ptr resolveInternal( + simfil::res::tag, + TileFeatureLayer const&, + simfil::ModelNode const&); public: + // Keep ModelPool::resolve overloads visible alongside the override below. + using ModelPool::resolve; + /** * This constructor initializes a new TileFeatureLayer instance. * Each instance is associated with a specific TileId, nodeId, and mapId. @@ -73,15 +87,15 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool std::shared_ptr const& strings); /** - * Constructor which parses a TileFeatureLayer from a binary stream. - * @param inputStream The binary stream to parse. + * Constructor which parses a TileFeatureLayer from a binary byte buffer. + * @param input The binary bytes to parse. * @param layerInfoResolveFun Function which will be called to retrieve * a layerInfo object for the layer name stored for the tile. * @param stringPoolGetter Function which will be called to retrieve * a string pool for the node name of the tile. */ TileFeatureLayer( - std::istream& inputStream, + const std::vector& input, LayerInfoResolveFun const& layerInfoResolveFun, StringPoolResolveFun const& stringPoolGetter ); @@ -92,6 +106,7 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool */ void setIdPrefix(KeyValueViewPairs const& prefix); model_ptr getIdPrefix(); + model_ptr getIdPrefix() const; /** Destructor for the TileFeatureLayer class. */ ~TileFeatureLayer() override; @@ -130,22 +145,25 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool /** * Create a new named attribute, which may be inserted into an attribute layer. */ - model_ptr newAttribute(std::string_view const& name, size_t initialCapacity=8); + model_ptr newAttribute( + std::string_view const& name, + size_t initialCapacity=8, + bool fixedSize=false); /** * Create a new attribute layer, which may be inserted into a feature. */ - model_ptr newAttributeLayer(size_t initialCapacity=8); + model_ptr newAttributeLayer(size_t initialCapacity=8, bool fixedSize=false); /** * Create a new geometry collection. */ - model_ptr newGeometryCollection(size_t initialCapacity=1); + model_ptr newGeometryCollection(size_t initialCapacity=2, bool fixedSize=false); /** * Create a new geometry. */ - model_ptr newGeometry(GeomType geomType, size_t initialCapacity=1); + model_ptr newGeometry(GeomType geomType, size_t initialCapacity=2, bool fixedSize=false); /** * Create a new geometry view. @@ -165,7 +183,16 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool /** * Create a new validity collection. */ - model_ptr newValidityCollection(size_t initialCapacity = 1); + model_ptr newValidityCollection(size_t initialCapacity = 2, bool fixedSize=false); + + /** + * Internal validity upgrade helpers used by Validity. + */ + simfil::ModelNodeAddress materializeSimpleValidity( + simfil::ModelNodeAddress simpleAddress, + Validity::Direction direction); + std::optional upgradedSimpleValidityAddress( + simfil::ModelNodeAddress simpleAddress) const; /** * Return type for begin() and end() methods to support range-based @@ -209,9 +236,19 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool /** Convert to (Geo-) JSON. */ nlohmann::json toJson() const override; + /** Report serialized size stats for feature-layer data and model-pool columns. */ + [[nodiscard]] nlohmann::json serializationSizeStats() const; + /** Access number of stored features */ size_t size() const; + /** Access total number of geometry vertices across this tile. */ + [[nodiscard]] uint64_t numVertices() const; + + /** Access layer-wide geometry anchor used for anchor-relative vertex encoding. */ + [[nodiscard]] Point geometryAnchor() const; + void setGeometryAnchor(Point const& anchor); + /** Access feature at index i */ model_ptr at(size_t i) const; @@ -223,13 +260,39 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool /** Shared pointer type */ using Ptr = std::shared_ptr; + /** Optional staged-loading index (0-based) for this feature tile. */ + [[nodiscard]] std::optional stage() const override; + void setStage(std::optional stage) override; + + /** + * Configure expected feature-id sequence for strict staged overlay validation. + * When configured, every newFeature call must match the next expected id. + */ + void setExpectedFeatureSequence(std::vector expectedFeatureIds); + void clearExpectedFeatureSequence(); + [[nodiscard]] bool hasExpectedFeatureSequence() const; + void validateExpectedFeatureSequenceComplete() const; + + /** + * Attach an overlay tile. Overlay tiles must have the same features in the + * same positions. Additional attribute layers, geometries and relations from + * overlay features are attached to the base features efficiently and lazily + * when retrieving the feature from the base layer. + * If this tile already has an overlay, the new overlay gets attached at the + * tail of the overlay chain. + */ + void attachOverlay(TileFeatureLayer::Ptr const& overlay); + + /** Get the next overlay tile in the chain (if any). */ + [[nodiscard]] TileFeatureLayer::Ptr overlay() const; + /** * Evaluate a (potentially cached) simfil query on this pool * * @param query Simfil query * @param node Model root node to query * @param anyMode Auto-wrap expression in `any(...)` - * @param autoWildcard Auto expand constant expressions to `** = ` + * @param autoWildcard Auto expand constant expressions to `** == ` */ struct QueryResult { // The list of values resulting from the query evaluation. @@ -293,45 +356,28 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool TileFeatureLayer::Ptr const& otherLayer, simfil::ModelNode::Ptr const& otherNode); - /** - * Node resolution functions. - */ - model_ptr resolveAttributeLayer(simfil::ModelNode const& n) const; - model_ptr resolveAttributeLayerList(simfil::ModelNode const& n) const; - model_ptr resolveAttribute(simfil::ModelNode const& n) const; - model_ptr resolveFeature(simfil::ModelNode const& n) const; - model_ptr resolveFeatureId(simfil::ModelNode const& n) const; - model_ptr resolveRelation(simfil::ModelNode const& n) const; - model_ptr resolvePoint(const simfil::ModelNode& n) const; - model_ptr resolvePointBuffer(const simfil::ModelNode& n) const; - model_ptr resolveGeometry(simfil::ModelNode const& n) const; - model_ptr resolveGeometryCollection(simfil::ModelNode const& n) const; - model_ptr resolveMesh(simfil::ModelNode const& n) const; - model_ptr resolveMeshTriangleCollection(simfil::ModelNode const& n) const; - model_ptr resolveMeshTriangleLinearRing(simfil::ModelNode const& n) const; - model_ptr resolvePolygon(simfil::ModelNode const& n) const; - model_ptr resolveLinearRing(simfil::ModelNode const& n) const; - model_ptr resolveSourceDataReferenceCollection(simfil::ModelNode const& n) const; - model_ptr resolveSourceDataReferenceItem(simfil::ModelNode const& n) const; - model_ptr resolveValidityPoint(const simfil::ModelNode& n) const; - model_ptr resolveValidity(simfil::ModelNode const& n) const; - model_ptr resolveValidityCollection(simfil::ModelNode const& n) const; - /** * The ColumnId enum provides identifiers for different * types of columns that can be associated with feature data. */ struct ColumnId { enum : uint8_t { Features = FirstCustomColumnId, + FeatureComplexData, FeatureProperties, FeatureIds, + ExternalFeatureIds, Attributes, AttributeLayers, AttributeLayerLists, Relations, Points, PointBuffers, - Geometries, + PointBuffersView, + PointGeometries, + LineGeometries, + PolygonGeometries, + MeshGeometries, + GeometryViews, GeometryCollections, Mesh, MeshTriangleCollection, @@ -343,6 +389,11 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool Validities, ValidityPoints, ValidityCollections, + FeatureRelationsView, + GeometryArrayView, + // Compact validity form without backing struct storage. + // Direction is encoded in ModelNodeAddress::index(). + SimpleValidity, }; }; protected: @@ -353,7 +404,7 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool /** * Create a new attribute layer collection. */ - model_ptr newAttributeLayers(size_t initialCapacity=8); + model_ptr newAttributeLayers(size_t initialCapacity=8, bool fixedSize=false); /** * Generic node resolution overload. @@ -361,8 +412,35 @@ class TileFeatureLayer : public TileLayer, public simfil::ModelPool tl::expected resolve(const simfil::ModelNode &n, const ResolveFn &cb) const override; Geometry::Storage& vertexBufferStorage(); + [[nodiscard]] Geometry::ViewData const* geometryViewData(simfil::ModelNodeAddress address) const; + [[nodiscard]] std::optional geometryStage(simfil::ModelNodeAddress address) const; + void setGeometryStage(simfil::ModelNodeAddress address, std::optional stage); + [[nodiscard]] simfil::ModelNodeAddress geometrySourceDataReferences(simfil::ModelNodeAddress address) const; + void setGeometrySourceDataReferences(simfil::ModelNodeAddress address, simfil::ModelNodeAddress refsAddress); + [[nodiscard]] Feature::ComplexData const* featureComplexDataOrNull(uint32_t featureIndex) const; + [[nodiscard]] Feature::ComplexData* featureComplexDataOrNull(uint32_t featureIndex); + Feature::ComplexData& ensureFeatureComplexData(uint32_t featureIndex); + + void setMergedArrayExtension( + simfil::ModelNodeAddress baseAddress, + TileFeatureLayer const* extensionModel, + simfil::ModelNodeAddress extensionAddress); + void clearMergedArrayExtension(simfil::ModelNodeAddress baseAddress); + [[nodiscard]] std::optional> + mergedArrayExtension(simfil::ModelNodeAddress baseAddress) const; struct Impl; std::unique_ptr impl_; + std::optional stage_; + TileFeatureLayer::Ptr overlay_; + std::vector expectedFeatureIds_; }; + +// Primary template for ADL-based resolve hooks (specialized in featurelayer.cpp). +template +simfil::model_ptr resolveInternal( + simfil::res::tag, + TileFeatureLayer const& model, + simfil::ModelNode const& node); + } diff --git a/libs/model/include/mapget/model/geometry-data.h b/libs/model/include/mapget/model/geometry-data.h new file mode 100644 index 00000000..a2655267 --- /dev/null +++ b/libs/model/include/mapget/model/geometry-data.h @@ -0,0 +1,47 @@ +#pragma once + +#include "simfil/model/nodes.h" + +#include +#include + +#include + +namespace mapget +{ + +enum class GeomType : uint8_t { + Points, // Point-cloud + Line, // Line-string + Polygon, // Auto-closed polygon + Mesh // Collection of triangles +}; + +struct GeometryViewData +{ + MODEL_COLUMN_TYPE(20); + + GeometryViewData() = default; + GeometryViewData( + GeomType t, + uint32_t offset, + uint32_t size, + simfil::ModelNodeAddress base) + : type_(t), + offset_(offset), + size_(size), + baseGeometry_(base) + {} + + GeomType type_ = GeomType::Points; + + // View range in base geometry point buffer. + uint32_t offset_ = 0; + uint32_t size_ = 0; + + // Address of referenced geometry (may itself be a view). + simfil::ModelNodeAddress baseGeometry_{}; + simfil::ModelNodeAddress sourceDataReferences_{}; +}; + +} // namespace mapget diff --git a/libs/model/include/mapget/model/geometry.h b/libs/model/include/mapget/model/geometry.h index c90a43f3..4f0884ec 100644 --- a/libs/model/include/mapget/model/geometry.h +++ b/libs/model/include/mapget/model/geometry.h @@ -2,13 +2,16 @@ #include "simfil/model/nodes.h" +#include "geometry-data.h" #include "point.h" #include "featureid.h" #include "sourcedatareference.h" #include "sourceinfo.h" +#include "merged-array-view.h" #include #include +#include using simfil::ValueType; using simfil::ModelNode; @@ -16,17 +19,18 @@ using simfil::ModelNodeAddress; using simfil::ModelConstPtr; using simfil::StringId; +namespace simfil::detail +{ +template <> +struct is_model_column_external_type : std::true_type +{}; +} + namespace mapget { class TileFeatureLayer; - -enum class GeomType: uint8_t { - Points, // Point-cloud - Line, // Line-string - Polygon, // Auto-closed polygon - Mesh // Collection of triangles -}; +class GeometryArrayView; /** * Small interface container type which may be used @@ -45,7 +49,6 @@ struct SelfContainedGeometry class Geometry final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class PointNode; friend class LinearRingNode; @@ -69,14 +72,14 @@ class Geometry final : public simfil::MandatoryDerivedModelNodeBase name() const; + /** Get a hash of the geometry. **/ [[nodiscard]] uint64_t getHash() const; - /** - * Get and set geometry name. - */ - [[nodiscard]] std::optional name() const; - void setName(const std::string_view &newName); + /** Get the persisted logical stage of this geometry, if any. */ + [[nodiscard]] std::optional stage() const; /** Iterate over all Points in the geometry. * @param callback Function which is called for each contained point. @@ -135,7 +138,7 @@ class Geometry final : public simfil::MandatoryDerivedModelNodeBase; - // Address of the referenced geometry - may be a view itself. - ModelNodeAddress baseGeometry_; - } view_; - } detail_; - - ModelNodeAddress sourceDataReferences_; - - template - void serialize(S& s) { - s.value1b(isView_); - s.value1b(type_); - s.value2b(geomName_); - if (!isView_) { - s.value4b(detail_.geom_.vertexArray_); - s.object(detail_.geom_.offset_); - } - else { - s.value4b(detail_.view_.offset_); - s.value4b(detail_.view_.size_); - s.object(detail_.view_.baseGeometry_); - } - s.object(sourceDataReferences_); - } - }; - - using Storage = simfil::ArrayArena; - - Data* geomData_ = nullptr; + ViewData* geomViewData_ = nullptr; Storage* storage_ = nullptr; - Geometry() = default; - Geometry(Data* data, ModelConstPtr pool, ModelNodeAddress a); +public: + explicit Geometry(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + Geometry(ModelConstPtr pool, + ModelNodeAddress a, + simfil::detail::mp_key key); + Geometry(ViewData* data, + ModelConstPtr pool, + ModelNodeAddress a, + simfil::detail::mp_key key); + Geometry() = delete; }; /** GeometryCollection node has `type` and `geometries` fields. */ -class GeometryCollection : public simfil::MandatoryDerivedModelNodeBase +class GeometryCollection : public MergedArrayView { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class Feature; using Storage = simfil::Array::Storage; /** Adds a new Geometry to the collection and returns a reference. */ - model_ptr newGeometry(GeomType type, size_t initialCapacity=4); + model_ptr newGeometry( + GeomType type, + size_t initialCapacity=4, + bool fixedSize=false); /** Append an existing Geometry to the collection. */ void addGeometry(model_ptr const& geom); @@ -244,6 +188,40 @@ class GeometryCollection : public simfil::MandatoryDerivedModelNodeBase preferredGeometryStage( + std::optional stageOverride = std::nullopt) const; + + /** + * Find the first geometry of the requested type at the preferred stage. + * When `stageOverride` is omitted, the layer's `highFidelityStage_` is used. + */ + [[nodiscard]] model_ptr geometryOfTypeAtPreferredStage( + GeomType type, + std::optional stageOverride = std::nullopt) const; + + /** Iterate over all geometries at the preferred stage. */ + template + bool forEachGeometryAtPreferredStage( + std::optional stageOverride, + LambdaType const& callback) const + { + auto const preferredStage = preferredGeometryStage(stageOverride); + if (!preferredStage) { + return true; + } + return forEachGeometry([&](model_ptr const& geom) { + if (geom->stage().value_or(0U) != *preferredStage) { + return true; + } + return callback(geom); + }); + } + /** Iterate over all Geometries in the collection. * @param callback Function which is called for each contained geometry. * Must return true to continue iteration, false to abort iteration. @@ -258,16 +236,34 @@ class GeometryCollection : public simfil::MandatoryDerivedModelNodeBase bool forEachGeometry(LambdaType const& callback) const { - auto geomArray = modelPtr()->arrayMemberStorage().range((simfil::ArrayIndex)addr().index()); - return std::all_of(geomArray.begin(), geomArray.end(), [this, &callback](auto&& geomNodeAddress){ - return callback(modelPtr()->resolveGeometry(*ModelNode::Ptr::make(model_, geomNodeAddress))); - }); + const auto localCount = this->localMergedSize(); + for (uint32_t i = 0; i < localCount; ++i) { + auto localGeom = localGeometryAt(i); + if (!localGeom) { + continue; + } + if (!callback(modelPtr()->template resolve(*localGeom))) { + return false; + } + } + if (auto ext = extension()) { + return ext->forEachGeometry(callback); + } + return true; } -private: - GeometryCollection() = default; - GeometryCollection(ModelConstPtr pool, ModelNodeAddress); +public: + explicit GeometryCollection(simfil::detail::mp_key key) + : MergedArrayView(key) {} + GeometryCollection(ModelConstPtr pool, ModelNodeAddress, simfil::detail::mp_key key); + GeometryCollection() = delete; +private: + [[nodiscard]] uint32_t localMergedSize() const override; + [[nodiscard]] ModelNode::Ptr localMergedAt(int64_t i) const override; + bool localMergedIterate(IterCallback const& cb) const override; // NOLINT (allow discard) + [[nodiscard]] ModelNode::Ptr localGeometryAt(int64_t i) const; + [[nodiscard]] model_ptr mergedGeometryArray() const; [[nodiscard]] ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; [[nodiscard]] uint32_t size() const override; @@ -278,16 +274,54 @@ class GeometryCollection : public simfil::MandatoryDerivedModelNodeBase +{ +public: + explicit GeometryArrayView(simfil::detail::mp_key key) + : MergedArrayView(key) + { + } + + GeometryArrayView( + ModelConstPtr pool, + ModelNodeAddress address, + simfil::detail::mp_key key) + : MergedArrayView(std::move(pool), address, key) + { + } + + GeometryArrayView( + ModelConstPtr pool, + ModelNodeAddress address, + ModelNodeAddress singleGeometryAddress, + simfil::detail::mp_key key) + : MergedArrayView(std::move(pool), address, key), + singleGeometryAddress_(singleGeometryAddress) + { + } + + GeometryArrayView() = delete; + +private: + [[nodiscard]] uint32_t localMergedSize() const override; + [[nodiscard]] ModelNode::Ptr localMergedAt(int64_t i) const override; + bool localMergedIterate(IterCallback const& cb) const override; // NOLINT (allow discard) + + ModelNodeAddress singleGeometryAddress_; +}; + /** VertexBuffer Node */ class PointBufferNode final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class Geometry; friend class MeshNode; + explicit PointBufferNode(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + [[nodiscard]] ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; [[nodiscard]] uint32_t size() const override; @@ -296,13 +330,18 @@ class PointBufferNode final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class Geometry; + explicit PolygonNode(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + [[nodiscard]] ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; [[nodiscard]] uint32_t size() const override; @@ -327,8 +368,8 @@ class PolygonNode final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class Geometry; + explicit MeshNode(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + [[nodiscard]] ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; [[nodiscard]] uint32_t size() const override; @@ -349,20 +392,24 @@ class MeshNode final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class Geometry; + explicit MeshTriangleCollectionNode(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + [[nodiscard]] ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; [[nodiscard]] uint32_t size() const override; @@ -370,9 +417,10 @@ class MeshTriangleCollectionNode : public simfil::MandatoryDerivedModelNodeBase< MeshTriangleCollectionNode() = delete; -private: - explicit MeshTriangleCollectionNode(const ModelNode& base); +public: + explicit MeshTriangleCollectionNode(const ModelNode& base, simfil::detail::mp_key key); +private: uint32_t index_ = 0; }; @@ -384,10 +432,12 @@ class MeshTriangleCollectionNode : public simfil::MandatoryDerivedModelNodeBase< class LinearRingNode : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class Geometry; + explicit LinearRingNode(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + [[nodiscard]] ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; [[nodiscard]] uint32_t size() const override; @@ -397,9 +447,11 @@ class LinearRingNode : public simfil::MandatoryDerivedModelNodeBase length = {}); +public: + explicit LinearRingNode(const ModelNode& base, simfil::detail::mp_key key); + LinearRingNode(const ModelNode& base, std::optional length, simfil::detail::mp_key key); +private: model_ptr vertexBuffer() const; enum class Orientation : uint8_t { CW, CCW }; diff --git a/libs/model/include/mapget/model/info.h b/libs/model/include/mapget/model/info.h index 5ca122a0..a53bef4c 100644 --- a/libs/model/include/mapget/model/info.h +++ b/libs/model/include/mapget/model/info.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "sfl/small_vector.hpp" #include @@ -159,6 +160,17 @@ struct IdPart size_t matchLength, bool requireCompositionEnd, std::string* error = nullptr); + + /** + * Return the composition index directly after the matched ID part sequence. + * Optional parts may be skipped while matching. + */ + static std::optional compositionMatchEndIndex( + std::vector const& candidateComposition, + uint32_t compositionMatchStartIdx, + KeyValueViewPairs const& featureIdParts, + size_t matchLength, + std::string* error = nullptr); }; /** Structure to represent the feature type info */ @@ -268,7 +280,7 @@ struct LayerInfo std::vector featureTypes_; /** Utility function to get some feature type info by name. */ - FeatureTypeInfo const* getTypeInfo(std::string_view const& sv, bool throwIfMissing=true); + FeatureTypeInfo const* getTypeInfo(std::string_view const& sv, bool throwIfMissing=true) const; /** List of zoom levels */ std::vector zoomLevels_; @@ -279,6 +291,23 @@ struct LayerInfo */ std::vector coverage_; + /** + * Number of staged-loading levels available for this layer. + * Stage indices are zero-based and the minimum valid value is 1. + */ + uint32_t stages_ = 1; + + /** + * Optional human-readable labels for stages (index-aligned with stage number). + */ + std::vector stageLabels_; + + /** + * Stage index at which this layer should be considered high-fidelity. + * Stage indices below this threshold are low-fidelity. + */ + uint32_t highFidelityStage_ = 0; + /** Can this layer be read from? */ bool canRead_ = true; @@ -289,18 +318,30 @@ struct LayerInfo Version version_; /** - * Validate that a unique id composition exists that matches this feature id. + * Return the index of the first unique ID composition that matches this feature ID. * The field values must match the limitations of the IdPartDataType, and - * The order of values in KeyValuePairs must be the same as in the composition! - * @param typeId Feature type id, throws error if the type was not registered. + * the order of values in KeyValuePairs must match the order in the composition. + * @param typeId Feature type id, throws if the type was not registered. + * @param featureIdParts Uniquely identifying information for the feature. + * @param validateForNewFeature True if only the primary composition may match. + */ + [[nodiscard]] std::optional matchingFeatureIdCompositionIndex( + const std::string_view& typeId, + KeyValueViewPairs const& featureIdParts, + bool validateForNewFeature) const; + + /** + * Validate that a unique ID composition exists that matches this feature ID. + * This is a convenience wrapper around matchingFeatureIdCompositionIndex(). + * @param typeId Feature type id, throws if the type was not registered. * @param featureIdParts Uniquely identifying information for the feature. - * @param validateForNewFeature True if the id should be evaluated with this tile's prefix prepended. + * @param validateForNewFeature True if only the primary composition may match. */ + [[nodiscard]] bool validFeatureId( const std::string_view& typeId, KeyValueViewPairs const& featureIdParts, - bool validateForNewFeature, - uint32_t compositionMatchStartIndex = 0); + bool validateForNewFeature) const; /** Create LayerInfo from JSON. */ static std::shared_ptr fromJson(const nlohmann::json& j, std::string const& layerId=""); @@ -351,6 +392,9 @@ struct DataSourceInfo * "featureTypes": [...], // Mandatory: A list of feature type information. * "zoomLevels": [...], // Optional: A list of zoom levels. Defaults to empty list. * "coverage": [...], // Optional: A list of coverage objects. Defaults to empty list. + * "stages": , // Optional: Number of staged-loading levels. Defaults to 1. + * "stageLabels": [...], // Optional: Human-readable labels for stages. + * "highFidelityStage": , // Optional: First stage considered high-fidelity. * "canRead": , // Optional: Whether the layer can be read. Defaults to true. * "canWrite": , // Optional: Whether the layer can be written. Defaults to false. * "version": { // Optional: The version of the layer. diff --git a/libs/model/include/mapget/model/layer.h b/libs/model/include/mapget/model/layer.h index 5a2acfa5..dd3c04d8 100644 --- a/libs/model/include/mapget/model/layer.h +++ b/libs/model/include/mapget/model/layer.h @@ -10,6 +10,9 @@ #include #include #include +#include +#include +#include #include namespace simfil { struct StringPool; } @@ -17,6 +20,8 @@ namespace simfil { struct StringPool; } namespace mapget { +constexpr uint32_t UnspecifiedStage = std::numeric_limits::max(); + /** * Callback type for a function which returns a string pool instance * for a given node identifier. @@ -46,6 +51,9 @@ struct MapTileKey // The tile's associated map tile id TileId tileId_; + // Staged-loading index for this tile/layer request (0-based). + uint32_t stage_ = 0; + /** Constructor to parse the key from a string, as returned by toString. */ explicit MapTileKey(std::string const& str); @@ -53,17 +61,18 @@ struct MapTileKey explicit MapTileKey(TileLayer const& data); /** Constructor to create the cache key from raw components. */ - explicit MapTileKey(LayerType layer, std::string mapId, std::string layerId, TileId tileId); + explicit MapTileKey(LayerType layer, std::string mapId, std::string layerId, TileId tileId, uint32_t stage = 0); /** Allow default ctor. */ MapTileKey() = default; /** Convert the key to a string. The string will be in the form of - * "(0):(1):(2):(3)", with + * "(0):(1):(2):(3):(4)", with * (0) being the layer type enum name, * (1) being the map id, * (2) being the layer id, - * (3) being the hexadecimal tile id. + * (3) being the hexadecimal tile id, + * (4) being the decimal stage index. */ [[nodiscard]] std::string toString() const; @@ -85,6 +94,12 @@ class TileLayer { public: using Ptr = std::shared_ptr; + enum class LoadState : uint8_t { + LoadingQueued = 0, + BackendFetching = 1, + BackendConverting = 2 + }; + using LoadStateCallback = std::function; /** * Constructor that takes tileId_, nodeId_, mapId_, layerInfo_, @@ -97,13 +112,14 @@ class TileLayer const std::shared_ptr& info); /** - * Parse a tile layer from an input stream. Will throw if + * Parse a tile layer from a binary byte buffer. Will throw if * the resolved major-minor version of the TileLayer is not the same - * as the one read from the stream. + * as the one read from the input. */ TileLayer( - std::istream& inputStream, - LayerInfoResolveFun const& layerInfoResolveFun); + const std::vector& input, + LayerInfoResolveFun const& layerInfoResolveFun, + size_t* bytesRead = nullptr); virtual ~TileLayer() = default; @@ -192,7 +208,24 @@ class TileLayer virtual tl::expected write(std::ostream& outputStream); virtual nlohmann::json toJson() const; + /** + * Set a load-state callback. Used by the service to forward state changes. + * Not serialized with the tile. + */ + void setLoadStateCallback(LoadStateCallback cb); + + /** Emit a load-state change (if a callback is registered). */ + void setLoadState(LoadState state); + + /** + * Optional staged-loading index for feature tiles. + * Base TileLayer implementation has no stage. + */ + [[nodiscard]] virtual std::optional stage() const; + virtual void setStage(std::optional stage); + protected: + size_t deserializationOffsetBytes_ = 0; Version mapVersion_{0, 0, 0}; TileId tileId_; std::string nodeId_; // Identifier of the string-pool/datasource instance @@ -204,6 +237,7 @@ class TileLayer std::optional ttl_; nlohmann::json info_; std::optional legalInfo_; // Copyright-related information + LoadStateCallback onLoadStateChanged_; }; } diff --git a/libs/model/include/mapget/model/merged-array-view.h b/libs/model/include/mapget/model/merged-array-view.h new file mode 100644 index 00000000..6cd634bb --- /dev/null +++ b/libs/model/include/mapget/model/merged-array-view.h @@ -0,0 +1,127 @@ +#pragma once + +#include "simfil/model/nodes.h" + +namespace mapget +{ + +class TileFeatureLayer; + +/** + * Generic forward-linked merged array view. + * + * The local entries can be customized by derived classes via + * localMerged* methods. By default this wraps the BaseArray storage. + */ +template +class MergedArrayView : public simfil::BaseArray +{ +public: + using Base = simfil::BaseArray; + using ExtensionPtr = simfil::model_ptr; + + explicit MergedArrayView(simfil::detail::mp_key key) + : Base(key) + { + } + + MergedArrayView( + simfil::ModelConstPtr pool, + simfil::ModelNodeAddress address, + simfil::detail::mp_key key) + : Base(std::move(pool), address, key) + { + } + + void setExtension(ExtensionPtr extension) + { + if (!extension) { + this->model().clearMergedArrayExtension(this->addr()); + return; + } + this->model().setMergedArrayExtension( + this->addr(), + &extension->model(), + extension->addr()); + } + + [[nodiscard]] ExtensionPtr extension() const + { + auto link = this->model().mergedArrayExtension(this->addr()); + if (!link || !link->first || !link->second) { + return {}; + } + return link->first->template resolve(link->second); + } + + [[nodiscard]] uint32_t mergedSize() const + { + auto size = localMergedSize(); + if (auto ext = extension()) { + size += ext->mergedSize(); + } + return size; + } + + [[nodiscard]] simfil::ModelNode::Ptr mergedAt(int64_t i) const + { + if (i < 0) { + return {}; + } + + auto localSize = static_cast(localMergedSize()); + if (i < localSize) { + return localMergedAt(i); + } + + auto ext = extension(); + if (!ext) { + return {}; + } + return ext->mergedAt(i - localSize); + } + + bool mergedIterate(simfil::ModelNode::IterCallback const& cb) const + { + if (!localMergedIterate(cb)) { + return false; + } + if (auto ext = extension()) { + return ext->mergedIterate(cb); + } + return true; + } + + [[nodiscard]] simfil::ModelNode::Ptr at(int64_t i) const override + { + return mergedAt(i); + } + + [[nodiscard]] uint32_t size() const override + { + return mergedSize(); + } + + bool iterate(simfil::ModelNode::IterCallback const& cb) const override + { + return mergedIterate(cb); + } + +protected: + [[nodiscard]] virtual uint32_t localMergedSize() const + { + return Base::size(); + } + + [[nodiscard]] virtual simfil::ModelNode::Ptr localMergedAt(int64_t i) const + { + return Base::at(i); + } + + virtual bool localMergedIterate(simfil::ModelNode::IterCallback const& cb) const + { + return Base::iterate(cb); + } +}; + +} // namespace mapget diff --git a/libs/model/include/mapget/model/pointnode.h b/libs/model/include/mapget/model/pointnode.h index 16b090b2..12f924ea 100644 --- a/libs/model/include/mapget/model/pointnode.h +++ b/libs/model/include/mapget/model/pointnode.h @@ -13,11 +13,13 @@ namespace mapget class PointNode final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; friend class Geometry; friend class PointBufferNode; + explicit PointNode(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + [[nodiscard]] ValueType type() const override; [[nodiscard]] ModelNode::Ptr at(int64_t) const override; [[nodiscard]] uint32_t size() const override; @@ -27,22 +29,24 @@ class PointNode final : public simfil::MandatoryDerivedModelNodeBase bool Geometry::forEachPoint(LambdaType const& callback) const { - PointBufferNode vertexBufferNode{geomData_, model_, {ModelType::ColumnId::PointBuffers, addr_.index()}}; - for (auto i = 0; i < vertexBufferNode.size(); ++i) { - PointNode vertex{*vertexBufferNode.at(i), vertexBufferNode.baseGeomData_}; - if (!callback(vertex.point_)) + for (size_t i = 0; i < numPoints(); ++i) { + if (!callback(pointAt(i))) return false; } return true; } -} \ No newline at end of file +} diff --git a/libs/model/include/mapget/model/relation.h b/libs/model/include/mapget/model/relation.h index 6cd9df8e..83acd644 100644 --- a/libs/model/include/mapget/model/relation.h +++ b/libs/model/include/mapget/model/relation.h @@ -20,7 +20,6 @@ class Relation : public simfil::ProceduralObject<6, Relation, TileFeatureLayer> { friend class TileFeatureLayer; friend class Feature; - template friend struct simfil::model_ptr; public: /** @@ -56,25 +55,25 @@ class Relation : public simfil::ProceduralObject<6, Relation, TileFeatureLayer> protected: /** Actual per-attribute data that is stored in the model's attributes-column. */ struct Data { + MODEL_COLUMN_TYPE(20); + simfil::StringId name_ = 0; simfil::ModelNodeAddress targetFeatureId_; simfil::ModelNodeAddress sourceValidity_; simfil::ModelNodeAddress targetValidity_; simfil::ModelNodeAddress sourceData_; - - template - void serialize(S& s) { - s.value2b(name_); - s.object(targetFeatureId_); - s.object(sourceValidity_); - s.object(targetValidity_); - s.object(sourceData_); - } }; - Relation(Data* data, simfil::ModelConstPtr l, simfil::ModelNodeAddress a); - Relation() = default; +public: + explicit Relation(simfil::detail::mp_key key) + : simfil::ProceduralObject<6, Relation, TileFeatureLayer>(key) {} + Relation(Data* data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + Relation() = delete; +protected: /** Reference to the actual data stored for the relation. */ Data* data_{}; }; diff --git a/libs/model/include/mapget/model/sourcedata.h b/libs/model/include/mapget/model/sourcedata.h index c7088c52..933ee8a0 100644 --- a/libs/model/include/mapget/model/sourcedata.h +++ b/libs/model/include/mapget/model/sourcedata.h @@ -20,7 +20,6 @@ class SourceDataCompoundNode : public simfil::MandatoryDerivedModelNodeBase; public: SourceDataCompoundNode() = delete; @@ -52,24 +51,28 @@ class SourceDataCompoundNode : public simfil::MandatoryDerivedModelNodeBase(key), + data_(nullptr) {} + SourceDataCompoundNode(Data* data, + TileSourceDataLayer::ConstPtr model, + simfil::ModelNodeAddress address, + simfil::detail::mp_key key); + SourceDataCompoundNode(Data* data, + TileSourceDataLayer::Ptr model, + simfil::ModelNodeAddress address, + size_t initialSize, + simfil::detail::mp_key key); private: struct Data { + MODEL_COLUMN_TYPE(16); + simfil::ModelNodeAddress object_; simfil::StringId schemaName_ = {}; SourceDataAddress sourceAddress_; - - template - void serialize(S& s) - { - s.object(object_); - s.value2b(schemaName_); - s.object(sourceAddress_); - } }; Data* const data_; diff --git a/libs/model/include/mapget/model/sourcedatalayer.h b/libs/model/include/mapget/model/sourcedatalayer.h index 4cb58928..19a5fd11 100644 --- a/libs/model/include/mapget/model/sourcedatalayer.h +++ b/libs/model/include/mapget/model/sourcedatalayer.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include "simfil/model/model.h" #include "simfil/environment.h" @@ -16,12 +17,21 @@ class SourceDataCompoundNode; class TileSourceDataLayer : public TileLayer, public simfil::ModelPool { public: + // Keep ModelPool::resolve overloads visible alongside the override below. + using ModelPool::resolve; + using Ptr = std::shared_ptr; using ConstPtr = std::shared_ptr; template using model_ptr = simfil::model_ptr; + template + friend model_ptr resolveInternal( + simfil::res::tag, + TileSourceDataLayer const&, + simfil::ModelNode const&); + /** * ModelPool colunm ids */ @@ -37,7 +47,7 @@ class TileSourceDataLayer : public TileLayer, public simfil::ModelPool std::shared_ptr const& stringPool); TileSourceDataLayer( - std::istream&, + const std::vector& input, LayerInfoResolveFun const& layerInfoResolveFun, StringPoolResolveFun const& stringPoolGetter); @@ -47,7 +57,6 @@ class TileSourceDataLayer : public TileLayer, public simfil::ModelPool * Node factory interface */ model_ptr newCompound(size_t initialSize); - model_ptr resolveCompound(simfil::ModelNode const&) const; /** * Get this pool's simfil evaluation environment. @@ -88,4 +97,11 @@ class TileSourceDataLayer : public TileLayer, public simfil::ModelPool std::unique_ptr impl_; }; +// Primary template for ADL-based resolve hooks (specialized in sourcedatalayer.cpp). +template +simfil::model_ptr resolveInternal( + simfil::res::tag, + TileSourceDataLayer const& model, + simfil::ModelNode const& node); + } diff --git a/libs/model/include/mapget/model/sourcedatareference.h b/libs/model/include/mapget/model/sourcedatareference.h index 4f1ac6e2..00cc9757 100644 --- a/libs/model/include/mapget/model/sourcedatareference.h +++ b/libs/model/include/mapget/model/sourcedatareference.h @@ -20,15 +20,11 @@ class TileFeatureLayer; class SourceDataReferenceItem; struct QualifiedSourceDataReference { + MODEL_COLUMN_TYPE(12); + + SourceDataAddress address_; + StringId layerId_; StringId qualifier_; - SourceDataReference reference_; - - template - void serialize(S& s) - { - s.value2b(qualifier_); - s.object(reference_); - } }; /** @@ -37,7 +33,6 @@ struct QualifiedSourceDataReference { class SourceDataReferenceCollection final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class TileFeatureLayer; ValueType type() const override; @@ -51,10 +46,17 @@ class SourceDataReferenceCollection final : public simfil::MandatoryDerivedModel */ void forEachReference(std::function fn) const; -private: - SourceDataReferenceCollection() = default; - SourceDataReferenceCollection(uint32_t offset, uint32_t size, ModelConstPtr pool, ModelNodeAddress a); +public: + explicit SourceDataReferenceCollection(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + SourceDataReferenceCollection(uint32_t offset, + uint32_t size, + ModelConstPtr pool, + ModelNodeAddress a, + simfil::detail::mp_key key); + SourceDataReferenceCollection() = delete; +private: uint32_t offset_ = {}; uint32_t size_ = {}; }; @@ -65,7 +67,6 @@ class SourceDataReferenceCollection final : public simfil::MandatoryDerivedModel class SourceDataReferenceItem final : public simfil::MandatoryDerivedModelNodeBase { public: - template friend struct simfil::model_ptr; friend class SourceDataReferenceCollection; friend class TileFeatureLayer; @@ -83,10 +84,16 @@ class SourceDataReferenceItem final : public simfil::MandatoryDerivedModelNodeBa std::string_view layerId() const; SourceDataAddress address() const; -private: - SourceDataReferenceItem() = default; - SourceDataReferenceItem(const QualifiedSourceDataReference* data, ModelConstPtr pool, ModelNodeAddress a); +public: + explicit SourceDataReferenceItem(simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(key) {} + SourceDataReferenceItem(const QualifiedSourceDataReference* data, + ModelConstPtr pool, + ModelNodeAddress a, + simfil::detail::mp_key key); + SourceDataReferenceItem() = delete; +private: const QualifiedSourceDataReference* const data_ = {}; }; diff --git a/libs/model/include/mapget/model/sourceinfo.h b/libs/model/include/mapget/model/sourceinfo.h index b350de31..7fee8660 100644 --- a/libs/model/include/mapget/model/sourceinfo.h +++ b/libs/model/include/mapget/model/sourceinfo.h @@ -2,6 +2,7 @@ #include #include +#include "simfil/model/column.h" #include "simfil/model/string-pool.h" namespace mapget @@ -13,14 +14,23 @@ namespace mapget */ struct SourceDataAddress { + MODEL_COLUMN_TYPE(8); + static constexpr uint64_t BitMask = 0xffffffff; - uint64_t value_ = 0u; + uint32_t bitOffset_ = 0u; + uint32_t bitSize_ = 0u; SourceDataAddress() = default; + SourceDataAddress(uint32_t bitOffset, uint32_t bitSize) + : bitOffset_(bitOffset), + bitSize_(bitSize) + {} + explicit SourceDataAddress(uint64_t value) - : value_(value) + : bitOffset_(static_cast((value >> 32) & BitMask)), + bitSize_(static_cast(value & BitMask)) {} /** @@ -33,22 +43,24 @@ struct SourceDataAddress assert((offset & BitMask) == offset); assert((size & BitMask) == size); - return SourceDataAddress{(static_cast(offset) << 32) | (size & BitMask)}; + return SourceDataAddress{ + static_cast(offset), + static_cast(size)}; } uint64_t u64() const { - return value_; + return (static_cast(bitOffset_) << 32) | bitSize_; } uint32_t bitSize() const { - return value_ & BitMask; + return bitSize_; } uint32_t bitOffset() const { - return (value_ >> 32) & BitMask; + return bitOffset_; } /** @@ -57,7 +69,8 @@ struct SourceDataAddress template void serialize(S& s) { - s.value8b(value_); + s.value4b(bitOffset_); + s.value4b(bitSize_); } }; @@ -69,17 +82,19 @@ struct SourceDataAddress */ struct SourceDataReference { - /** Layer Id */ - simfil::StringId layerId_; + MODEL_COLUMN_TYPE(12); /** Region in the source blob */ SourceDataAddress address_; + /** Layer Id */ + simfil::StringId layerId_; + template void serialize(S& s) { - s.value2b(layerId_); s.object(address_); + s.value2b(layerId_); } }; diff --git a/libs/model/include/mapget/model/stream.h b/libs/model/include/mapget/model/stream.h index 4568e117..89439c67 100644 --- a/libs/model/include/mapget/model/stream.h +++ b/libs/model/include/mapget/model/stream.h @@ -4,8 +4,9 @@ #include "stringpool.h" #include -#include +#include #include +#include namespace mapget { @@ -29,13 +30,48 @@ class TileLayerStream StringPool = 1, TileFeatureLayer = 2, TileSourceDataLayer = 3, + /** + * JSON-encoded status updates, e.g. for WebSocket /tiles. + * + * Payload: UTF-8 JSON bytes (not null-terminated). + */ + Status = 4, + /** + * JSON-encoded load-state updates for individual tiles. + * + * Payload: UTF-8 JSON bytes (not null-terminated). + */ + LoadStateChange = 5, + /** + * JSON-encoded request-context marker for WebSocket /tiles streams. + * + * Payload: UTF-8 JSON bytes (not null-terminated). + */ + RequestContext = 6, EndOfStream = 128 }; struct StringPoolCache; - /** Protocol Version which parsed blobs must be compatible with. */ - static constexpr Version CurrentProtocolVersion{0, 1, 1}; + /** + * Protocol Version which parsed blobs must be compatible with. + * Version History: + * - Version 1.0: + * + Added TileFeatureLayer Message + * + Added StringPool Message + * + Added TileSourceDataLayer Message + * + Added EndOfStream Message + * - Version 1.1: + * + Added errorCode field to TileLayer + * + Added Status Message + * + Added LoadStateChange Message + * - Version 1.2: + * - Removed LoadStateChange Message. + * + Added tile load stage. + * + Feature geometry reference may point directly to a Geometry + * (single-geometry fast-path) or to a GeometryCollection. + */ + static constexpr Version CurrentProtocolVersion{1, 2, 0}; /** Map to keep track of the highest sent string id per datasource node. */ using StringPoolOffsetMap = std::unordered_map; @@ -71,7 +107,11 @@ class TileLayerStream * size, or false, if no sufficient bytes are available. Throws if the protocol version * in the header does not match the version currently used by mapget. */ - static bool readMessageHeader(std::stringstream& stream, MessageType& outType, uint32_t& outSize); + static bool readMessageHeader( + std::span bytes, + MessageType& outType, + uint32_t& outSize, + size_t* bytesRead = nullptr); private: enum class Phase { ReadHeader, ReadValue }; @@ -86,7 +126,8 @@ class TileLayerStream */ bool continueReading(); - std::stringstream buffer_; + std::vector buffer_; + size_t readOffset_ = 0; LayerInfoResolveFun layerInfoProvider_; std::shared_ptr stringPoolProvider_; std::function onParsedLayer_; diff --git a/libs/model/include/mapget/model/stringpool.h b/libs/model/include/mapget/model/stringpool.h index 434a9b60..337200c8 100644 --- a/libs/model/include/mapget/model/stringpool.h +++ b/libs/model/include/mapget/model/stringpool.h @@ -1,6 +1,7 @@ #pragma once #include "simfil/model/string-pool.h" +#include namespace mapget { @@ -23,6 +24,7 @@ struct StringPool : public simfil::StringPool enum StaticStringIds : simfil::StringId { IdStr = NextStaticId, TypeIdStr, + LodStr, MapIdStr, LayerIdStr, LayerStr, @@ -49,7 +51,13 @@ struct StringPool : public simfil::StringPool StartStr, EndStr, PointStr, - FeatureIdStr + FeatureIdStr, + FromStr, + ToStr, + ConnectedEndStr, + FromConnectedEndStr, + ToConnectedEndStr, + TransitionNumberStr }; explicit StringPool(const std::string_view& nodeId); @@ -63,10 +71,12 @@ struct StringPool : public simfil::StringPool write(std::ostream& outputStream, simfil::StringId offset) const override; /** - * Call this before calling read() to figure out which strings- - * object to call read() with. + * Parse the datasource node id prefix from a serialized StringPool message. */ - static std::string readDataSourceNodeId(std::istream& inputStream); + static std::string readDataSourceNodeId( + const std::vector& input, + size_t offset = 0, + size_t* bytesRead = nullptr); std::string const nodeId_; }; diff --git a/libs/model/include/mapget/model/validity-data.h b/libs/model/include/mapget/model/validity-data.h new file mode 100644 index 00000000..cab0de87 --- /dev/null +++ b/libs/model/include/mapget/model/validity-data.h @@ -0,0 +1,84 @@ +#pragma once + +#include "point.h" + +#include "simfil/model/nodes.h" + +#include + +namespace mapget +{ + +struct ValidityData +{ + MODEL_COLUMN_TYPE(64); + + /** + * Validity direction values - may be used as flags. + */ + enum Direction : uint8_t { + Empty = 0x0, // No set direction + Positive = 0x1, // Positive (digitization) direction + Negative = 0x2, // Negative (against digitization) direction + Both = 0x3, // Both positive and negative direction + None = 0x4, // Not in any direction + }; + + /** + * Validity offset type enumeration. OffsetPointValidity and OffsetRangeValidity + * may be combined with one of GeoPosOffset, BufferOffset, RelativeLengthOffset + * or MetricLengthOffset. In this case, the validity geometry is based on + * an offset (range) of a feature's geometry. If SimpleGeometry is used, + * then the validity just references a whole Geometry object. + */ + enum GeometryDescriptionType : uint8_t { + NoGeometry = 0, + SimpleGeometry = 1, + OffsetPointValidity = 2, + OffsetRangeValidity = 3, + FeatureTransition = 4, + }; + enum GeometryOffsetType : uint8_t { + InvalidOffsetType = 0, + GeoPosOffset = 1, + BufferOffset = 2, + RelativeLengthOffset = 3, + MetricLengthOffset = 4, + }; + + struct Range { + Point first; + Point second; + }; + + enum TransitionEnd : uint8_t { + Start = 0, + End = 1, + }; + + struct FeatureTransitionDescription { + simfil::ModelNodeAddress fromFeature_; + simfil::ModelNodeAddress toFeature_; + uint32_t transitionNumber_ = 0; + // Bit 0 encodes the connected end of fromFeature_, bit 1 the end of toFeature_. + uint8_t connectedEnds_ = 0; + }; + + union GeometryDescription { + GeometryDescription() : simpleGeometry_() {} + simfil::ModelNodeAddress simpleGeometry_; + Range range_; + Point point_; + FeatureTransitionDescription featureTransition_; + }; + + Direction direction_ = Empty; + GeometryDescriptionType geomDescrType_ = NoGeometry; + GeometryOffsetType geomOffsetType_ = InvalidOffsetType; + GeometryDescription geomDescr_{}; + static constexpr int8_t InvalidReferencedStage = -1; + int8_t referencedStage_ = InvalidReferencedStage; + simfil::ModelNodeAddress featureAddress_; +}; + +} // namespace mapget diff --git a/libs/model/include/mapget/model/validity.h b/libs/model/include/mapget/model/validity.h index 218420f7..3933ef29 100644 --- a/libs/model/include/mapget/model/validity.h +++ b/libs/model/include/mapget/model/validity.h @@ -2,54 +2,50 @@ #include "geometry.h" #include "sourcedatareference.h" +#include "validity-data.h" namespace mapget { class Geometry; +class Feature; +class FeatureId; /** * Represents an attribute or relation validity with respect to a feature's geometry. */ -class Validity : public simfil::ProceduralObject<6, Validity, TileFeatureLayer> +class Validity : public simfil::ProceduralObject<7, Validity, TileFeatureLayer> { friend class TileFeatureLayer; - template - friend struct simfil::model_ptr; friend class PointNode; public: - /** - * Validity direction values - may be used as flags. - */ - enum Direction : uint8_t { - Empty = 0x0, // No set direction - Positive = 0x1, // Positive (digitization) direction - Negative = 0x2, // Negative (against digitization) direction - Both = 0x3, // Both positive and negative direction - None = 0x4, // Not in any direction - }; - - /** - * Validity offset type enumeration. OffsetPointValidity and OffsetRangeValidity - * may be combined with one of GeoPosOffset, BufferOffset, RelativeLengthOffset - * or MetricLengthOffset. In this case, the validity geometry is based on - * an offset (range) of a feature's geometry. If SimpleGeometry is used, - * then the validity just references a whole Geometry object. - */ - enum GeometryDescriptionType : uint8_t { - NoGeometry = 0, - SimpleGeometry = 1, - OffsetPointValidity = 2, - OffsetRangeValidity = 3, - }; - enum GeometryOffsetType : uint8_t { - InvalidOffsetType = 0, - GeoPosOffset = 1, - BufferOffset = 2, - RelativeLengthOffset = 3, - MetricLengthOffset = 4, - }; + using Direction = ValidityData::Direction; + using GeometryDescriptionType = ValidityData::GeometryDescriptionType; + using GeometryOffsetType = ValidityData::GeometryOffsetType; + using TransitionEnd = ValidityData::TransitionEnd; + + // Keep existing Validity::Empty-style API surface. + static constexpr Direction Empty = ValidityData::Empty; + static constexpr Direction Positive = ValidityData::Positive; + static constexpr Direction Negative = ValidityData::Negative; + static constexpr Direction Both = ValidityData::Both; + static constexpr Direction None = ValidityData::None; + + static constexpr GeometryDescriptionType NoGeometry = ValidityData::NoGeometry; + static constexpr GeometryDescriptionType SimpleGeometry = ValidityData::SimpleGeometry; + static constexpr GeometryDescriptionType OffsetPointValidity = ValidityData::OffsetPointValidity; + static constexpr GeometryDescriptionType OffsetRangeValidity = ValidityData::OffsetRangeValidity; + static constexpr GeometryDescriptionType FeatureTransition = ValidityData::FeatureTransition; + + static constexpr GeometryOffsetType InvalidOffsetType = ValidityData::InvalidOffsetType; + static constexpr GeometryOffsetType GeoPosOffset = ValidityData::GeoPosOffset; + static constexpr GeometryOffsetType BufferOffset = ValidityData::BufferOffset; + static constexpr GeometryOffsetType RelativeLengthOffset = ValidityData::RelativeLengthOffset; + static constexpr GeometryOffsetType MetricLengthOffset = ValidityData::MetricLengthOffset; + + static constexpr TransitionEnd Start = ValidityData::Start; + static constexpr TransitionEnd End = ValidityData::End; /** * Feature on which the validity applies. @@ -70,10 +66,11 @@ class Validity : public simfil::ProceduralObject<6, Validity, TileFeatureLayer> [[nodiscard]] GeometryDescriptionType geometryDescriptionType() const; /** - * Referenced geometry name accessors. + * Referenced geometry stage accessors. + * If unset, validity resolution falls back to the first matching line geometry. */ - [[nodiscard]] std::optional geometryName() const; - void setGeometryName(std::optional const& geometryName); + [[nodiscard]] std::optional geometryStage() const; + void setGeometryStage(std::optional geometryStage); /** * Single offset point accessors. Note for the getter: @@ -99,93 +96,61 @@ class Validity : public simfil::ProceduralObject<6, Validity, TileFeatureLayer> void setSimpleGeometry(model_ptr); [[nodiscard]] model_ptr simpleGeometry() const; + /** + * Get or set a semantic feature transition validity. + * The connected ends indicate which endpoint of each referenced feature touches the transition. + */ + void setFeatureTransition( + model_ptr const& fromFeature, + TransitionEnd fromConnectedEnd, + model_ptr const& toFeature, + TransitionEnd toConnectedEnd, + uint32_t transitionNumber); + [[nodiscard]] model_ptr transitionFromFeature() const; + [[nodiscard]] model_ptr transitionToFeature() const; + [[nodiscard]] std::optional transitionFromConnectedEnd() const; + [[nodiscard]] std::optional transitionToConnectedEnd() const; + [[nodiscard]] std::optional transitionNumber() const; + /** * Compute the actual shape-points of the validity with respect to one * of the geometries in the given collection, or the geometry collection of * the directly referenced feature. The geometry is picked based - * on the validity's geometryName. The return value may be one of the following: + * on the validity's geometryStage when available. The return value may be one of the following: * - An empty vector, indicating that the validity could not be applied. * If an error string was passed, then it would be set to an error message. * - A vector containing a single point, if the validity resolved to a point geometry. * - A vector containing more than one point, if the validity resolved to a poly-line. */ - SelfContainedGeometry computeGeometry(model_ptr geometryCollection, std::string* error=nullptr) const; + SelfContainedGeometry computeGeometry( + model_ptr geometryCollection, + std::string* error=nullptr, + std::optional defaultGeometryStage = std::nullopt) const; protected: - /** Actual per-validity data that is stored in the model's attributes-column. */ - struct Data - { - using Range = std::pair; - using GeometryDescription = std::variant; - - Direction direction_; - GeometryDescriptionType geomDescrType_ = NoGeometry; - GeometryOffsetType geomOffsetType_ = InvalidOffsetType; - GeometryDescription geomDescr_; - StringId referencedGeomName_ = 0; - ModelNodeAddress featureAddress_; - - template - static T& get_or_default_construct(std::variant& v) { - if (!std::holds_alternative(v)) { - v.template emplace(); - } - return std::get(v); - } - - template - void serialize(S& s) - { - s.value1b(direction_); - s.value1b(geomDescrType_); - s.value1b(geomOffsetType_); - - if (geomDescrType_ == SimpleGeometry) { - assert(geomOffsetType_ == InvalidOffsetType); - s.object(get_or_default_construct(geomDescr_)); - return; - } - - // The referenced geometry name is only used if the validity - // does not directly reference a geometry by a ModelNodeAddress. - s.value2b(referencedGeomName_); - - auto serializeOffsetPoint = [this, &s](Point& p) { - switch (geomOffsetType_) { - case InvalidOffsetType: - break; - case GeoPosOffset: - s.object(p); - break; - case BufferOffset: - case RelativeLengthOffset: - case MetricLengthOffset: - s.value8b(p.x); - break; - } - }; - - if (geomDescrType_ == OffsetRangeValidity) { - auto& [start, end] = get_or_default_construct(geomDescr_); - serializeOffsetPoint(start); - serializeOffsetPoint(end); - } - else if (geomDescrType_ == OffsetPointValidity) { - serializeOffsetPoint(get_or_default_construct(geomDescr_)); - } - - s.object(featureAddress_); - } - }; + using Data = ValidityData; -protected: - Validity(Data* data, simfil::ModelConstPtr layer, simfil::ModelNodeAddress a); - Validity() = default; +public: + explicit Validity(simfil::detail::mp_key key) + : simfil::ProceduralObject<7, Validity, TileFeatureLayer>(key) {} + Validity(Direction direction, + simfil::ModelConstPtr layer, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + Validity(Data* data, + simfil::ModelConstPtr layer, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key); + Validity() = delete; +protected: /** * Pointer to the actual data stored for the attribute. */ + void ensureMaterialized(); + Data* data_ = nullptr; + Direction simpleDirection_ = Empty; }; /** @@ -194,15 +159,13 @@ class Validity : public simfil::ProceduralObject<6, Validity, TileFeatureLayer> struct MultiValidity : public simfil::BaseArray { friend class TileFeatureLayer; - template - friend struct simfil::model_ptr; /** * Append a new line position validity based on an absolute geographic position. */ model_ptr newPoint( Point pos, - std::string_view geomName = {}, + std::optional geometryStage = std::nullopt, Validity::Direction direction = Validity::Empty); /** @@ -211,7 +174,7 @@ struct MultiValidity : public simfil::BaseArray model_ptr newRange( Point start, Point end, - std::string_view geomName = {}, + std::optional geometryStage = std::nullopt, Validity::Direction direction = Validity::Empty); /** @@ -223,7 +186,7 @@ struct MultiValidity : public simfil::BaseArray model_ptr newPoint( Validity::GeometryOffsetType offsetType, double pos, - std::string_view geomName = {}, + std::optional geometryStage = std::nullopt, Validity::Direction direction = Validity::Empty); /** @@ -234,7 +197,7 @@ struct MultiValidity : public simfil::BaseArray model_ptr newPoint( Validity::GeometryOffsetType offsetType, int32_t pos, - std::string_view geomName = {}, + std::optional geometryStage = std::nullopt, Validity::Direction direction = Validity::Empty); /** @@ -247,7 +210,7 @@ struct MultiValidity : public simfil::BaseArray Validity::GeometryOffsetType offsetType, double start, double end, - std::string_view geomName = {}, + std::optional geometryStage = std::nullopt, Validity::Direction direction = Validity::Empty); /** @@ -259,7 +222,7 @@ struct MultiValidity : public simfil::BaseArray Validity::GeometryOffsetType offsetType, int32_t start, int32_t end, - std::string_view geomName = {}, + std::optional geometryStage = std::nullopt, Validity::Direction direction = Validity::Empty); /** @@ -268,6 +231,36 @@ struct MultiValidity : public simfil::BaseArray model_ptr newGeometry(model_ptr, Validity::Direction direction = Validity::Empty); + /** + * Append a validity that references a feature ID without restricting the geometry. + * The referenced feature's geometry is resolved when the validity is evaluated. + */ + model_ptr + newFeatureId(model_ptr const& featureId, Validity::Direction direction = Validity::Empty); + + /** + * Append a validity that references a staged geometry in the current feature context. + */ + model_ptr + newGeomStage(uint32_t geometryStage, Validity::Direction direction = Validity::Empty); + + /** + * Append a semantic transition validity connecting two feature endpoints. + */ + model_ptr newFeatureTransition( + model_ptr const& fromFeature, + Validity::TransitionEnd fromConnectedEnd, + model_ptr const& toFeature, + Validity::TransitionEnd toConnectedEnd, + uint32_t transitionNumber, + Validity::Direction direction = Validity::Empty); + + /** + * Append a complete validity. If no explicit direction is given, + * it is represented as complete coverage in both directions. + */ + model_ptr newComplete(Validity::Direction direction = Validity::Empty); + /** * Append a direction validity without further restricting the range. * The direction value controls, in which direction along the referenced @@ -280,4 +273,4 @@ struct MultiValidity : public simfil::BaseArray using simfil::BaseArray::BaseArray; }; -} \ No newline at end of file +} diff --git a/libs/model/src/attr.cpp b/libs/model/src/attr.cpp index 70c3c359..8c13533e 100644 --- a/libs/model/src/attr.cpp +++ b/libs/model/src/attr.cpp @@ -5,14 +5,34 @@ namespace mapget { -Attribute::Attribute(Attribute::Data* data, simfil::ModelConstPtr l, simfil::ModelNodeAddress a) - : simfil::ProceduralObject<2, Attribute, TileFeatureLayer>(data->fields_, std::move(l), a), data_(data) +namespace +{ +simfil::ModelNode::Ptr exposedValidityNode( + TileFeatureLayer const& model, + simfil::ModelNodeAddress const& validityCollectionAddress) +{ + auto validities = model.resolve(validityCollectionAddress); + if (validities && validities->size() == 1) { + if (auto validity = validities->at(0)) { + return validity; + } + } + return model.resolve(validityCollectionAddress); +} +} + +Attribute::Attribute(Attribute::Data* data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::ProceduralObject<2, Attribute, TileFeatureLayer>(data->fields_, std::move(l), a, key), + data_(data) { if (data_->validities_) fields_.emplace_back( StringPool::ValidityStr, [](Attribute const& self) { - return model_ptr::make(self.model_, self.data_->validities_); + return exposedValidityNode(self.model(), self.data_->validities_); }); } @@ -48,7 +68,8 @@ bool Attribute::forEachField( model_ptr Attribute::sourceDataReferences() const { if (data_->sourceDataRefs_) { - return model().resolveSourceDataReferenceCollection(*model_ptr::make(model_, data_->sourceDataRefs_)); + return model().resolve( + *model_ptr::make(model_, data_->sourceDataRefs_)); } return {}; } @@ -63,7 +84,7 @@ model_ptr Attribute::validity() if (auto returnValue = validityOrNull()) { return returnValue; } - auto returnValue = model().newValidityCollection(1); + auto returnValue = model().newValidityCollection(2); data_->validities_ = returnValue->addr(); return returnValue; } @@ -73,7 +94,7 @@ model_ptr Attribute::validityOrNull() const if (!data_->validities_) { return {}; } - return model().resolveValidityCollection(*ModelNode::Ptr::make(model_, data_->validities_)); + return model().resolve(data_->validities_); } void Attribute::setValidity(const model_ptr& validities) const diff --git a/libs/model/src/attrlayer.cpp b/libs/model/src/attrlayer.cpp index 43f901df..6ae5ca14 100644 --- a/libs/model/src/attrlayer.cpp +++ b/libs/model/src/attrlayer.cpp @@ -8,23 +8,30 @@ namespace mapget AttributeLayer::AttributeLayer( simfil::ArrayIndex i, simfil::ModelConstPtr l, - simfil::ModelNodeAddress a + simfil::ModelNodeAddress a, + simfil::detail::mp_key key ) - : simfil::Object(i, std::move(l), a) + : simfil::BaseObject(i, std::move(l), a, key) { } model_ptr -AttributeLayer::newAttribute(const std::string_view& name, size_t initialCapacity) +AttributeLayer::newAttribute( + const std::string_view& name, + size_t initialCapacity, + bool fixedSize) { - auto result = static_cast(model()).newAttribute(name, initialCapacity); + auto result = static_cast(model()).newAttribute( + name, + initialCapacity, + fixedSize); addAttribute(result); return result; } void AttributeLayer::addAttribute(model_ptr a) { - addField(a->name(), simfil::ModelNode::Ptr(a)); + addField(a->name(), a); } bool AttributeLayer::forEachAttribute(const std::function&)>& cb) const @@ -36,7 +43,7 @@ bool AttributeLayer::forEachAttribute(const std::function(model()).resolveAttribute(*value); + auto attr = static_cast(model()).resolve(*value); if (!cb(attr)) return false; } @@ -46,23 +53,38 @@ bool AttributeLayer::forEachAttribute(const std::function(std::move(l), a, key), + members_(i) { } model_ptr -AttributeLayerList::newLayer(const std::string_view& name, size_t initialCapacity) +AttributeLayerList::newLayer( + const std::string_view& name, + size_t initialCapacity, + bool fixedSize) { - auto result = modelPtr()->newAttributeLayer(initialCapacity); + auto result = modelPtr()->newAttributeLayer(initialCapacity, fixedSize); addLayer(name, result); return result; } void AttributeLayerList::addLayer(const std::string_view& name, model_ptr l) { - addField(name, simfil::ModelNode::Ptr(std::move(l))); + (void) localObject()->addField(name, l); +} + +tl::expected, simfil::Error> +AttributeLayerList::addField(std::string_view const& name, model_ptr l) +{ + auto result = localObject()->addField(name, l); + if (!result) { + return tl::unexpected(result.error()); + } + return std::ref(*this); } bool AttributeLayerList::forEachLayer( @@ -76,7 +98,7 @@ bool AttributeLayerList::forEachLayer( log().warn("Don't add anything other than AttributeLayers into AttributeLayerLists!"); continue; } - auto attrLayer = static_cast(model()).resolveAttributeLayer(*value); + auto attrLayer = static_cast(model()).resolve(*value); if (!cb(*layerName, attrLayer)) return false; } @@ -84,4 +106,95 @@ bool AttributeLayerList::forEachLayer( return true; } +simfil::model_ptr AttributeLayerList::localObject() const +{ + return simfil::model_ptr::make(members_, model_, addr_); +} + +uint32_t AttributeLayerList::localMergedSize() const +{ + return localObject()->size(); +} + +simfil::ModelNode::Ptr AttributeLayerList::localMergedAt(int64_t i) const +{ + return localObject()->at(i); +} + +bool AttributeLayerList::localMergedIterate(simfil::ModelNode::IterCallback const& cb) const +{ + return localObject()->iterate(cb); +} + +simfil::ValueType AttributeLayerList::type() const +{ + return simfil::ValueType::Object; +} + +simfil::ModelNode::Ptr AttributeLayerList::at(int64_t i) const +{ + if (i < 0) { + return {}; + } + + auto localSize = static_cast(localObject()->size()); + if (i < localSize) { + return localObject()->at(i); + } + + if (auto ext = extension()) { + return ext->at(i - localSize); + } + return {}; +} + +uint32_t AttributeLayerList::size() const +{ + auto result = localObject()->size(); + if (auto ext = extension()) { + result += ext->size(); + } + return result; +} + +simfil::ModelNode::Ptr AttributeLayerList::get(const simfil::StringId& field) const +{ + auto local = localObject()->get(field); + if (local) { + return local; + } + if (auto ext = extension()) { + return ext->get(field); + } + return {}; +} + +simfil::StringId AttributeLayerList::keyAt(int64_t i) const +{ + if (i < 0) { + return {}; + } + + auto localSize = static_cast(localObject()->size()); + if (i < localSize) { + return localObject()->keyAt(i); + } + + if (auto ext = extension()) { + return ext->keyAt(i - localSize); + } + return {}; +} + +bool AttributeLayerList::iterate(simfil::ModelNode::IterCallback const& cb) const +{ + if (!localObject()->iterate(cb)) { + return false; + } + if (auto ext = extension()) { + return ext->iterate(cb); + } + return true; +} + } diff --git a/libs/model/src/feature.cpp b/libs/model/src/feature.cpp index 11b4b76f..923aa825 100644 --- a/libs/model/src/feature.cpp +++ b/libs/model/src/feature.cpp @@ -8,85 +8,208 @@ #include "stringpool.h" #include "tl/expected.hpp" +#include +#include + namespace mapget { -Feature::Feature(Feature::Data& d, simfil::ModelConstPtr l, simfil::ModelNodeAddress a) - : simfil::MandatoryDerivedModelNodeBase(std::move(l), a), data_(&d) +namespace { - updateFields(); +model_ptr resolveFeatureByRootIndex(TileFeatureLayer const& model, uint32_t index) +{ + auto rootResult = model.root(index); + if (!rootResult || !*rootResult) { + return {}; + } + return model.resolve(**rootResult); +} +} + +uint32_t RelationArrayView::localMergedSize() const +{ + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return 0; + } + if (auto rel = feature->relationsOrNull()) { + return rel->size(); + } + return 0; +} + +simfil::ModelNode::Ptr RelationArrayView::localMergedAt(int64_t i) const +{ + if (i < 0) { + return {}; + } + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return {}; + } + auto rel = feature->relationsOrNull(); + if (!rel || i >= static_cast(rel->size())) { + return {}; + } + return rel->at(i); +} + +bool RelationArrayView::localMergedIterate(simfil::ModelNode::IterCallback const& cb) const +{ + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return true; + } + if (auto rel = feature->relationsOrNull()) { + return rel->iterate(cb); + } + return true; +} + +Feature::Feature(Feature::BasicData& d, + Feature::ComplexData* c, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(std::move(l), a, key), + basicData_(&d), + complexData_(c) +{ + fieldsDirty_ = true; } model_ptr Feature::id() const { - return model().resolveFeatureId(*Ptr::make(model_, data_->id_)); + auto featureIdAddress = featureIdNodeAddress(); + if (featureIdAddress) { + return model().resolve(featureIdAddress); + } + if (auto ext = extension()) { + return ext->id(); + } + return {}; } std::string_view mapget::Feature::typeId() const { - return model().resolveFeatureId(*Ptr::make(model_, data_->id_))->typeId(); + if (basicData_) { + if (auto s = model().strings()->resolve(basicData_->typeIdAndLod_.typeId_)) + return *s; + } + return id()->typeId(); +} + +Feature::LOD Feature::lod() const +{ + if (!basicData_) { + return MAX_LOD; + } + auto const raw = std::min( + basicData_->typeIdAndLod_.lod_, + static_cast(MAX_LOD)); + return static_cast(raw); +} + +void Feature::setLod(LOD newLod) +{ + if (!basicData_) { + return; + } + basicData_->typeIdAndLod_.lod_ = static_cast(newLod); + fieldsDirty_ = true; } model_ptr Feature::geom() { - if (!data_->geom_) { + auto& geomAddress = geometryNodeAddress(); + if (!geomAddress) { auto result = model().newGeometryCollection(); - data_->geom_ = result->addr(); - updateFields(); + geomAddress = result->addr(); + fieldsDirty_ = true; return result; } + materializeGeometryCollection(); return const_cast(this)->geomOrNull(); } model_ptr Feature::geomOrNull() const { - if (!data_->geom_) - return {}; - return model().resolveGeometryCollection(*Ptr::make(model_, data_->geom_)); + model_ptr local; + auto localGeomAddress = geometryNodeAddress(); + if (localGeomAddress) { + local = model().resolve(localGeomAddress); + } + + auto extFeature = extension(); + auto ext = extFeature ? extFeature->geomOrNull() : model_ptr{}; + if (!local) { + return ext; + } + local->setExtension(ext); + return local; } model_ptr Feature::attributeLayers() { - if (!data_->attrLayers_) { + if (!attributeLayerNodeAddress()) { auto result = model().newAttributeLayers(); - data_->attrLayers_ = result->addr(); - updateFields(); + attributeLayerNodeAddress() = result->addr(); + fieldsDirty_ = true; return result; } - return const_cast(this)->attributeLayersOrNull(); + return attributeLayersOrNull(); } model_ptr Feature::attributeLayersOrNull() const { - if (!data_->attrLayers_) - return {}; - return model().resolveAttributeLayerList(*Ptr::make(model_, data_->attrLayers_)); + model_ptr local; + if (auto localAddress = attributeLayerNodeAddress()) { + local = model().resolve(localAddress); + } + + auto extFeature = extension(); + auto ext = extFeature ? extFeature->attributeLayersOrNull() : model_ptr{}; + if (!local) { + return ext; + } + local->setExtension(ext); + return local; } model_ptr Feature::attributes() { - if (!data_->attrs_) { + if (!attributeNodeAddress()) { auto result = model().newObject(8); - data_->attrs_ = result->addr(); - updateFields(); + attributeNodeAddress() = result->addr(); + fieldsDirty_ = true; return result; } - return const_cast(this)->attributesOrNull(); + return attributesOrNull(); } model_ptr Feature::attributesOrNull() const { - if (!data_->attrs_) + auto localAddress = attributeNodeAddress(); + if (!localAddress) + return {}; + return model().resolve(localAddress); +} + +model_ptr Feature::mergedAttributesOrNull() const +{ + auto extFeature = extension(); + if (!attributeNodeAddress() && !extFeature) { return {}; - return model().resolveObject(Ptr::make(model_, data_->attrs_)); + } + return model_ptr::make(model_, addr()); } model_ptr Feature::relations() { - if (!data_->relations_) { + if (!relationNodeAddress()) { auto result = model().newArray(8); - data_->relations_ = result->addr(); - updateFields(); + relationNodeAddress() = result->addr(); + fieldsDirty_ = true; return result; } return const_cast(this)->relationsOrNull(); @@ -94,9 +217,24 @@ model_ptr Feature::relations() model_ptr Feature::relationsOrNull() const { - if (!data_->relations_) + auto localAddress = relationNodeAddress(); + if (!localAddress) + return {}; + return model().resolve(localAddress); +} + +model_ptr Feature::mergedRelationsOrNull() const +{ + auto extFeature = extension(); + auto ext = extFeature ? extFeature->mergedRelationsOrNull() : model_ptr{}; + if (!relationNodeAddress() && !ext) { return {}; - return model().resolveArray(Ptr::make(model_, data_->relations_)); + } + auto result = model_ptr::make( + model_, + simfil::ModelNodeAddress{TileFeatureLayer::ColumnId::FeatureRelationsView, addr().index()}); + result->setExtension(ext); + return result; } tl::expected, simfil::Error> @@ -132,7 +270,9 @@ simfil::ValueType Feature::type() const simfil::ModelNode::Ptr Feature::at(int64_t i) const { - if (data_->sourceData_) { + ensureFieldsReady(); + auto sourceDataAddress = sourceDataNodeAddress(); + if (sourceDataAddress) { if (i == 0) return get(StringPool::SourceDataStr); i -= 1; @@ -144,13 +284,19 @@ simfil::ModelNode::Ptr Feature::at(int64_t i) const uint32_t Feature::size() const { - return fields_.size() + (data_->sourceData_ ? 1 : 0); + ensureFieldsReady(); + return fields_.size() + (sourceDataNodeAddress() ? 1 : 0); } simfil::ModelNode::Ptr Feature::get(const simfil::StringId& f) const { - if (f == StringPool::SourceDataStr) - return ModelNode::Ptr::make(model().shared_from_this(), data_->sourceData_); + ensureFieldsReady(); + if (f == StringPool::SourceDataStr) { + auto sourceDataAddress = sourceDataNodeAddress(); + if (sourceDataAddress) { + return model().resolve(sourceDataAddress); + } + } for (auto const& [fieldName, fieldValue] : fields_) if (fieldName == f) @@ -161,7 +307,8 @@ simfil::ModelNode::Ptr Feature::get(const simfil::StringId& f) const simfil::StringId Feature::keyAt(int64_t i) const { - if (data_->sourceData_) { + ensureFieldsReady(); + if (sourceDataNodeAddress()) { if (i == 0) return StringPool::SourceDataStr; i -= 1; @@ -173,6 +320,7 @@ simfil::StringId Feature::keyAt(int64_t i) const bool Feature::iterate(const simfil::ModelNode::IterCallback& cb) const { + ensureFieldsReady(); for (auto i = 0; i < size(); ++i) if (!cb(*at(i))) return false; @@ -180,21 +328,126 @@ bool Feature::iterate(const simfil::ModelNode::IterCallback& cb) const return true; } -void Feature::updateFields() { +simfil::ModelNodeAddress Feature::featureIdNodeAddress() const +{ + using Col = TileFeatureLayer::ColumnId; + if (!basicData_) { + return {}; + } + return {Col::FeatureIds, addr_.index()}; +} + +simfil::ModelNodeAddress Feature::sourceDataNodeAddress() const +{ + if (!complexData_ && basicData_) { + complexData_ = model().featureComplexDataOrNull(addr().index()); + } + if (complexData_) { + return complexData_->sourceData_; + } + return {}; +} + +simfil::ModelNodeAddress Feature::geometryNodeAddress() const +{ + return basicData_ ? basicData_->geom_ : simfil::ModelNodeAddress{}; +} + +simfil::ModelNodeAddress& Feature::geometryNodeAddress() +{ + if (!basicData_) { + throw std::runtime_error("Feature has no mutable geometry storage."); + } + return basicData_->geom_; +} + +simfil::ModelNodeAddress Feature::attributeLayerNodeAddress() const +{ + if (!complexData_ && basicData_) { + complexData_ = model().featureComplexDataOrNull(addr().index()); + } + if (complexData_) { + return complexData_->attrLayers_; + } + return {}; +} + +simfil::ModelNodeAddress& Feature::attributeLayerNodeAddress() +{ + if (!basicData_) { + throw std::runtime_error("Feature has no mutable attribute-layer storage."); + } + if (!complexData_) { + complexData_ = &model().ensureFeatureComplexData(addr().index()); + } + return complexData_->attrLayers_; +} + +simfil::ModelNodeAddress Feature::attributeNodeAddress() const +{ + if (!complexData_ && basicData_) { + complexData_ = model().featureComplexDataOrNull(addr().index()); + } + if (complexData_) { + return complexData_->attrs_; + } + return {}; +} + +simfil::ModelNodeAddress& Feature::attributeNodeAddress() +{ + if (!basicData_) { + throw std::runtime_error("Feature has no mutable attribute storage."); + } + if (!complexData_) { + complexData_ = &model().ensureFeatureComplexData(addr().index()); + } + return complexData_->attrs_; +} + +simfil::ModelNodeAddress Feature::relationNodeAddress() const +{ + if (!complexData_ && basicData_) { + complexData_ = model().featureComplexDataOrNull(addr().index()); + } + if (complexData_) { + return complexData_->relations_; + } + return {}; +} + +simfil::ModelNodeAddress& Feature::relationNodeAddress() +{ + if (!basicData_) { + throw std::runtime_error("Feature has no mutable relation storage."); + } + if (!complexData_) { + complexData_ = &model().ensureFeatureComplexData(addr().index()); + } + return complexData_->relations_; +} + +void Feature::updateFields() const { fields_.clear(); // Add type field - fields_.emplace_back(StringPool::TypeStr, simfil::ValueNode(std::string_view("Feature"), model_)); + fields_.emplace_back( + StringPool::TypeStr, + simfil::model_ptr::make(std::string_view("Feature"), model_)); // Add id field - fields_.emplace_back(StringPool::IdStr, Ptr::make(model_, data_->id_)); - auto idNode = model().resolveFeatureId(*fields_.back().second); + fields_.emplace_back(StringPool::IdStr, Ptr::make(model_, featureIdNodeAddress())); + auto idNode = model().resolve(*fields_.back().second); // Add type id field fields_.emplace_back( StringPool::TypeIdStr, model_ptr::make(idNode->typeId(), model_)); + fields_.emplace_back( + StringPool::LodStr, + model_ptr::make(static_cast(lod()), model_)); + // Add map and layer ids. fields_.emplace_back( StringPool::MapIdStr, @@ -216,52 +469,135 @@ void Feature::updateFields() { } // Add other fields - if (data_->geom_) - fields_.emplace_back(StringPool::GeometryStr, Ptr::make(model_, data_->geom_)); - if (data_->attrLayers_ || data_->attrs_) + if (auto geomNode = geomOrNull()) { + fields_.emplace_back(StringPool::GeometryStr, geomNode); + } + bool hasExtensionProperties = false; + if (auto extFeature = extension()) { + hasExtensionProperties = + extFeature->attributeLayersOrNull() || + extFeature->attributesOrNull(); + } + auto const localAttrLayerAddress = attributeLayerNodeAddress(); + auto const localAttrAddress = attributeNodeAddress(); + if (localAttrLayerAddress || localAttrAddress || hasExtensionProperties) fields_.emplace_back( StringPool::PropertiesStr, Ptr::make( model_, simfil::ModelNodeAddress{TileFeatureLayer::ColumnId::FeatureProperties, addr().index()})); - if (data_->relations_) - fields_.emplace_back(StringPool::RelationsStr, Ptr::make(model_, data_->relations_)); + if (auto rel = mergedRelationsOrNull()) { + fields_.emplace_back(StringPool::RelationsStr, rel); + } + fieldsDirty_ = false; } nlohmann::json Feature::toJson() const { - return simfil::MandatoryDerivedModelNodeBase::toJson(); + auto json = simfil::MandatoryDerivedModelNodeBase::toJson(); + json.erase("lod"); + return json; } void Feature::addPoint(const Point& p) { - auto newGeom = geom()->newGeometry(GeomType::Points, 0); + auto newGeom = appendGeometry(GeomType::Points, 1, true); newGeom->append(p); } +void Feature::addGeometry(const model_ptr& geom) +{ + if (!geom) { + return; + } + + auto& geomAddress = geometryNodeAddress(); + if (!geomAddress) { + geomAddress = geom->addr(); + fieldsDirty_ = true; + return; + } + + materializeGeometryCollection(); + if (geometryNodeAddress().column() != TileFeatureLayer::ColumnId::GeometryCollections) { + simfil::raise( + "Feature geometry reference is neither Geometry nor GeometryCollection."); + } + + auto collection = model().resolve(geometryNodeAddress()); + collection->addGeometry(geom); +} + void Feature::addPoints(const std::vector& points) { - auto newGeom = geom()->newGeometry(GeomType::Points, points.size()-1); + auto newGeom = appendGeometry(GeomType::Points, points.size()); for (auto const& p : points) newGeom->append(p); } void Feature::addLine(const std::vector& points) { - auto newGeom = geom()->newGeometry(GeomType::Line, points.size()-1); + auto newGeom = appendGeometry(GeomType::Line, points.size()); for (auto const& p : points) newGeom->append(p); } void Feature::addMesh(const std::vector& points) { - auto newGeom = geom()->newGeometry(GeomType::Mesh, points.size()-1); + auto newGeom = appendGeometry(GeomType::Mesh, points.size()); for (auto const& p : points) newGeom->append(p); } void Feature::addPoly(const std::vector& points) { - auto newGeom = geom()->newGeometry(GeomType::Polygon, points.size()-1); + auto newGeom = appendGeometry(GeomType::Polygon, points.size()); for (auto const& p : points) newGeom->append(p); } +void Feature::materializeGeometryCollection() +{ + auto const isBaseGeometryColumn = [](uint8_t column) { + using Col = TileFeatureLayer::ColumnId; + return column == Col::PointGeometries || + column == Col::LineGeometries || + column == Col::PolygonGeometries || + column == Col::MeshGeometries; + }; + auto currentGeomAddress = geometryNodeAddress(); + if (!currentGeomAddress || + (!isBaseGeometryColumn(currentGeomAddress.column()) && + currentGeomAddress.column() != TileFeatureLayer::ColumnId::GeometryViews)) { + return; + } + auto existingGeometry = model().resolve(currentGeomAddress); + auto collection = model().newGeometryCollection(2); + collection->addGeometry(existingGeometry); + geometryNodeAddress() = collection->addr(); + fieldsDirty_ = true; +} + +model_ptr Feature::appendGeometry( + GeomType type, + size_t initialCapacity, + bool fixedSize) +{ + auto& geomAddress = geometryNodeAddress(); + if (!geomAddress) { + auto geom = model().newGeometry(type, initialCapacity, fixedSize); + geomAddress = geom->addr(); + fieldsDirty_ = true; + return geom; + } + + materializeGeometryCollection(); + if (geometryNodeAddress().column() != TileFeatureLayer::ColumnId::GeometryCollections) { + simfil::raise( + "Feature geometry reference is neither Geometry nor GeometryCollection."); + } + + auto collection = model().resolve(geometryNodeAddress()); + auto geom = model().newGeometry(type, initialCapacity, fixedSize); + collection->addGeometry(geom); + return geom; +} + model_ptr Feature::addRelation( const std::string_view& name, const std::string_view& targetType, @@ -283,26 +619,47 @@ model_ptr Feature::addRelation(const model_ptr& relation) uint32_t Feature::numRelations() const { - if (data_->relations_) - return relationsOrNull()->size(); - return 0; + auto localCount = relationNodeAddress() ? relationsOrNull()->size() : 0U; + if (auto extFeature = extension()) { + localCount += extFeature->numRelations(); + } + return localCount; } model_ptr Feature::getRelation(uint32_t index) const { - if (data_->relations_) - return model().resolveRelation(*relationsOrNull()->at(index)); + if (relationNodeAddress()) { + auto localRelations = relationsOrNull(); + auto localCount = localRelations->size(); + if (index < localCount) { + return model().resolve(*localRelations->at(index)); + } + index -= localCount; + } + + if (auto extFeature = extension()) { + return extFeature->getRelation(index); + } return {}; } bool Feature::forEachRelation(std::function&)> const& callback) const { - auto relationsPtr = relationsOrNull(); - if (!relationsPtr || !callback) + if (!callback) return true; - for (auto const& relation : *relationsPtr) { - if (!callback(model().resolveRelation(*relation))) + + if (relationNodeAddress()) { + auto relationsPtr = relationsOrNull(); + for (auto const& relation : *relationsPtr) { + if (!callback(model().resolve(*relation))) + return false; + } + } + + if (auto extFeature = extension()) { + if (!extFeature->forEachRelation(callback)) { return false; + } } return true; } @@ -323,6 +680,32 @@ SelfContainedGeometry Feature::firstGeometry() const return {}; } +SelfContainedGeometry Feature::preferredGeometry() const +{ + model_ptr result; + if (auto geometryCollection = geomOrNull()) { + geometryCollection->forEachGeometryAtPreferredStage( + std::nullopt, + [&result](auto&& geometry) + { + result = geometry; + return false; + }); + if (!result) { + geometryCollection->forEachGeometry( + [&result](auto&& geometry) + { + result = geometry; + return false; + }); + } + } + if (result) { + return result->toSelfContained(); + } + return {}; +} + std::optional>> Feature::filterRelations(const std::string_view& name) const { @@ -342,29 +725,216 @@ Feature::filterRelations(const std::string_view& name) const model_ptr Feature::sourceDataReferences() const { - if (data_->sourceData_) - return model().resolveSourceDataReferenceCollection(*model_ptr::make(model_, data_->sourceData_)); + if (auto sourceDataAddress = sourceDataNodeAddress()) + return model().resolve( + *model_ptr::make(model_, sourceDataAddress)); return {}; } void Feature::setSourceDataReferences(simfil::ModelNode::Ptr const& addresses) { - data_->sourceData_ = addresses->addr(); + if (!basicData_) { + throw std::runtime_error("Cannot attach source-data references to a feature without basic storage."); + } + if (!complexData_) { + complexData_ = &model().ensureFeatureComplexData(addr().index()); + } + complexData_->sourceData_ = addresses->addr(); +} + +void Feature::refreshExtensionBindingFromOverlay() const +{ + if (extensionModel_ || extensionAddress_) { + return; + } + if (addr().column() != TileFeatureLayer::ColumnId::Features) { + return; + } + auto const overlay = model().overlay(); + if (!overlay || addr().index() >= overlay->size()) { + return; + } + extensionModel_ = overlay.get(); + extensionAddress_ = simfil::ModelNodeAddress{ + TileFeatureLayer::ColumnId::Features, + static_cast(addr().index())}; + fieldsDirty_ = true; +} + +model_ptr Feature::extension() const +{ + refreshExtensionBindingFromOverlay(); + if (!extensionModel_ || !extensionAddress_) { + return {}; + } + return extensionModel_->resolve(extensionAddress_); +} + +void Feature::setExtension(model_ptr extension) +{ + if (!extension) { + extensionModel_ = nullptr; + extensionAddress_ = {}; + fieldsDirty_ = true; + return; + } + extensionModel_ = &extension->model(); + extensionAddress_ = extension->addr(); + fieldsDirty_ = true; +} + +void Feature::setExtensionAddress(TileFeatureLayer const* extensionModel, simfil::ModelNodeAddress extensionAddress) +{ + if (!extensionModel || !extensionAddress) { + extensionModel_ = nullptr; + extensionAddress_ = {}; + fieldsDirty_ = true; + return; + } + extensionModel_ = extensionModel; + extensionAddress_ = extensionAddress; + fieldsDirty_ = true; +} + +void Feature::ensureFieldsReady() const +{ + refreshExtensionBindingFromOverlay(); + if (!fieldsDirty_) { + return; + } + updateFields(); } ////////////////////////////////////////// -Feature::FeaturePropertyView::FeaturePropertyView( - Feature::Data& d, - simfil::ModelConstPtr l, - simfil::ModelNodeAddress a -) - : simfil::MandatoryDerivedModelNodeBase(std::move(l), a), data_(&d) +Feature::MergedBasicAttributesView::MergedBasicAttributesView( + simfil::ModelConstPtr model, + simfil::ModelNodeAddress address, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(std::move(model), address, key) { - if (data_->attrs_) - attrs_ = model().resolveObject(Ptr::make(model_, data_->attrs_)); } +void Feature::MergedBasicAttributesView::ensureMergedFieldsReady() const +{ + if (!mergedFieldsDirty_) { + return; + } + rebuildMergedFields(); +} + +void Feature::MergedBasicAttributesView::rebuildMergedFields() const +{ + mergedFields_.clear(); + + model_ptr feature; + if (addr().column() == TileFeatureLayer::ColumnId::Features) { + feature = model().resolve(addr()); + } + else { + auto rootResult = model().root(addr().index()); + if (rootResult && *rootResult) { + feature = model().resolve(**rootResult); + } + } + + if (feature) { + if (auto attrs = feature->attributesOrNull()) { + mergedFields_.reserve(attrs->size()); + for (auto i = 0U; i < attrs->size(); ++i) { + mergedFields_.emplace_back( + attrs->keyAt(static_cast(i)), + attrs->at(static_cast(i))); + } + } + + if (auto extFeature = feature->extension()) { + if (auto ext = extFeature->mergedAttributesOrNull()) { + ext->ensureMergedFieldsReady(); + for (auto const& [key, value] : ext->mergedFields_) { + auto it = std::find_if( + mergedFields_.begin(), + mergedFields_.end(), + [&](const AttrField& existing) { return existing.first == key; }); + if (it == mergedFields_.end()) { + mergedFields_.emplace_back(key, value); + } + else { + it->second = value; + } + } + } + } + } + + mergedFieldsDirty_ = false; +} + +simfil::ValueType Feature::MergedBasicAttributesView::type() const +{ + return simfil::ValueType::Object; +} + +simfil::ModelNode::Ptr Feature::MergedBasicAttributesView::at(int64_t i) const +{ + ensureMergedFieldsReady(); + if (i < 0 || i >= static_cast(mergedFields_.size())) { + return {}; + } + return mergedFields_[static_cast(i)].second; +} + +uint32_t Feature::MergedBasicAttributesView::size() const +{ + ensureMergedFieldsReady(); + return static_cast(mergedFields_.size()); +} + +simfil::ModelNode::Ptr Feature::MergedBasicAttributesView::get(const simfil::StringId& f) const +{ + ensureMergedFieldsReady(); + auto it = std::find_if( + mergedFields_.begin(), + mergedFields_.end(), + [&](const AttrField& field) { return field.first == f; }); + if (it == mergedFields_.end()) { + return {}; + } + return it->second; +} + +simfil::StringId Feature::MergedBasicAttributesView::keyAt(int64_t i) const +{ + ensureMergedFieldsReady(); + if (i < 0 || i >= static_cast(mergedFields_.size())) { + return {}; + } + return mergedFields_[static_cast(i)].first; +} + +bool Feature::MergedBasicAttributesView::iterate(const simfil::ModelNode::IterCallback& cb) const +{ + ensureMergedFieldsReady(); + for (auto const& [_, value] : mergedFields_) { + if (!value || !cb(*value)) { + return false; + } + } + return true; +} + +////////////////////////////////////////// + +Feature::FeaturePropertyView::FeaturePropertyView( + model_ptr feature, + simfil::detail::mp_key key +) + : simfil::MandatoryDerivedModelNodeBase( + feature->model().shared_from_this(), + feature->addr(), + key) +{} + simfil::ValueType Feature::FeaturePropertyView::type() const { return simfil::ValueType::Object; @@ -372,50 +942,82 @@ simfil::ValueType Feature::FeaturePropertyView::type() const simfil::ModelNode::Ptr Feature::FeaturePropertyView::at(int64_t i) const { - if (data_->attrLayers_) { + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return {}; + } + auto mergedLayers = feature->attributeLayersOrNull(); + if (mergedLayers) { if (i == 0) - return Ptr::make(model_, data_->attrLayers_); + return mergedLayers; i -= 1; } - if (attrs_) - return attrs_->at(i); + if (auto mergedAttrs = feature->mergedAttributesOrNull()) { + return mergedAttrs->at(i); + } return {}; } uint32_t Feature::FeaturePropertyView::size() const { - return (data_->attrLayers_ ? 1 : 0) + (attrs_ ? attrs_->size() : 0); + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return 0; + } + auto mergedAttrs = feature->mergedAttributesOrNull(); + return (feature->attributeLayersOrNull() ? 1 : 0) + (mergedAttrs ? mergedAttrs->size() : 0U); } simfil::ModelNode::Ptr Feature::FeaturePropertyView::get(const simfil::StringId& f) const { - if (f == StringPool::LayerStr && data_->attrLayers_) - return Ptr::make(model_, data_->attrLayers_); - if (attrs_) - return attrs_->get(f); + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return {}; + } + if (f == StringPool::LayerStr) { + auto mergedLayers = feature->attributeLayersOrNull(); + if (mergedLayers) { + return mergedLayers; + } + } + if (auto mergedAttrs = feature->mergedAttributesOrNull()) { + return mergedAttrs->get(f); + } return {}; } simfil::StringId Feature::FeaturePropertyView::keyAt(int64_t i) const { - if (data_->attrLayers_) { + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return {}; + } + if (feature->attributeLayersOrNull()) { if (i == 0) return StringPool::LayerStr; i -= 1; } - if (attrs_) - return attrs_->keyAt(i); + if (auto mergedAttrs = feature->mergedAttributesOrNull()) { + return mergedAttrs->keyAt(i); + } return {}; } bool Feature::FeaturePropertyView::iterate(const simfil::ModelNode::IterCallback& cb) const { - if (data_->attrLayers_) { - if (!cb(*model().resolveAttributeLayerList(*Ptr::make(model_, data_->attrLayers_)))) + auto feature = resolveFeatureByRootIndex(model(), addr().index()); + if (!feature) { + return true; + } + if (auto mergedLayers = feature->attributeLayersOrNull()) { + if (!cb(*mergedLayers)) + return false; + } + if (auto mergedAttrs = feature->mergedAttributesOrNull()) { + if (!mergedAttrs->iterate(cb)) { return false; + } } - if (attrs_) - return attrs_->iterate(cb); return true; } diff --git a/libs/model/src/featureid.cpp b/libs/model/src/featureid.cpp index ebd3f877..fd3d8320 100644 --- a/libs/model/src/featureid.cpp +++ b/libs/model/src/featureid.cpp @@ -1,22 +1,208 @@ #include "featureid.h" #include "featurelayer.h" +#include #include +#include "mapget/log.h" + namespace mapget { -FeatureId::FeatureId(FeatureId::Data& data, simfil::ModelConstPtr l, simfil::ModelNodeAddress a) - : simfil::MandatoryDerivedModelNodeBase(l, a), - data_(&data), - fields_(model().resolveObject(Ptr::make(l, data_->idParts_))) +namespace +{ +std::vector const* resolveComposition( + TileFeatureLayer const& model, + simfil::StringId typeId, + uint8_t idCompositionIndex) +{ + auto typeName = model.strings()->resolve(typeId); + if (!typeName) { + return nullptr; + } + + auto typeInfo = model.layerInfo()->getTypeInfo(*typeName, false); + if (!typeInfo || typeInfo->uniqueIdCompositions_.empty()) { + return nullptr; + } + + auto const compositionIndex = std::min( + idCompositionIndex, + typeInfo->uniqueIdCompositions_.size() - 1U); + return &typeInfo->uniqueIdCompositions_[compositionIndex]; +} + +void resolveVisiblePartLayout( + TileFeatureLayer const& model, + FeatureId::Data const& data, + model_ptr const& values, + std::vector& partNames, + std::vector& visibleValueIndices) +{ + partNames.clear(); + visibleValueIndices.clear(); + + if (!values) { + return; + } + + auto const* composition = resolveComposition(model, data.typeId_, data.idCompositionIndex_); + if (!composition) { + return; + } + + uint32_t localStartIndex = 0U; + if (data.useCommonTilePrefix_) { + KeyValueViewPairs prefixFeatureIdParts; + if (auto const idPrefix = model.getIdPrefix()) { + prefixFeatureIdParts.reserve(idPrefix->size()); + for (auto const& [key, value] : idPrefix->fields()) { + auto const keyStr = model.strings()->resolve(key); + if (!keyStr || !value) { + continue; + } + + std::visit( + [&](auto&& v) + { + using T = std::decay_t; + if constexpr (std::is_same_v || + std::is_same_v || + std::is_same_v) { + } + else if constexpr (std::is_same_v || + std::is_same_v) { + prefixFeatureIdParts.emplace_back(*keyStr, std::string_view(v)); + } + else { + prefixFeatureIdParts.emplace_back(*keyStr, static_cast(v)); + } + }, + value->value()); + } + } + + if (!prefixFeatureIdParts.empty()) { + auto const matchEndIndex = IdPart::compositionMatchEndIndex( + *composition, + 0, + prefixFeatureIdParts, + prefixFeatureIdParts.size()); + if (!matchEndIndex) { + return; + } + localStartIndex = *matchEndIndex; + } + } + + auto const maxSlots = std::min( + values->size(), + static_cast(composition->size()) - std::min( + localStartIndex, + static_cast(composition->size()))); + + for (uint32_t slot = 0; slot < maxSlots; ++slot) { + auto const valueNode = values->at(static_cast(slot)); + if (!valueNode || + std::holds_alternative(valueNode->value())) { + continue; + } + + auto sid = model.strings()->emplace((*composition)[localStartIndex + slot].idPartLabel_); + if (!sid) { + break; + } + partNames.push_back(*sid); + visibleValueIndices.push_back(slot); + } +} + +template +void appendTypedKeyValue( + TileFeatureLayer const& model, + simfil::StringId key, + simfil::ModelNode::Ptr const& valueNode, + Fn&& fn) { + auto keyStr = model.strings()->resolve(key); + if (!keyStr || !valueNode) { + return; + } + + std::visit( + [&](auto&& v) + { + using T = std::decay_t; + if constexpr (std::is_same_v) { + raiseFmt("FeatureId part '{}' cannot be a ByteArray.", *keyStr); + } + else if constexpr (!std::is_same_v && !std::is_same_v) { + fn(*keyStr, v); + } + }, + valueNode->value()); +} + +void appendNodeValueToString(std::stringstream& out, simfil::ModelNode::Ptr const& node) +{ + if (!node) { + return; + } + + std::visit( + [&out](auto&& v) + { + using T = std::decay_t; + if constexpr (std::is_same_v) { + raiseFmt("FeatureId part value 'b\"{}\"' cannot be a ByteArray.", v.toHex()); + } + else if constexpr (!std::is_same_v) { + out << "." << v; + } + }, + node->value()); +} +} + +FeatureId::FeatureId(FeatureId::Data& data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(l, a, key), + data_(data) +{ + if (data_.idPartValues_ != simfil::InvalidArrayIndex) { + values_ = model().resolve( + simfil::ModelNodeAddress{ + simfil::ModelPool::ColumnId::Arrays, + static_cast(data_.idPartValues_)}); + } + + resolveVisiblePartLayout(model(), data_, values_, partNames_, visibleValueIndices_); +} + +FeatureId::FeatureId(FeatureId::Data const& data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(l, a, key), + data_(data) +{ + if (data_.idPartValues_ != simfil::InvalidArrayIndex) { + values_ = model().resolve( + simfil::ModelNodeAddress{ + simfil::ModelPool::ColumnId::Arrays, + static_cast(data_.idPartValues_)}); + } + + resolveVisiblePartLayout(model(), data_, values_, partNames_, visibleValueIndices_); } std::string_view FeatureId::typeId() const { - if (auto s = model().strings()->resolve(data_->typeId_)) + if (auto s = model().strings()->resolve(data_.typeId_)) { return *s; + } return "err-unresolved-typename"; } @@ -25,23 +211,19 @@ std::string FeatureId::toString() const std::stringstream result; result << typeId(); - auto addIdPart = [&result](auto&& v) - { - if constexpr (!std::is_same_v, std::monostate>) - result << "." << v; - }; - - // Add common id-part fields - if (data_->useCommonTilePrefix_) { + if (data_.useCommonTilePrefix_) { if (auto idPrefix = model().getIdPrefix()) { - for (auto const& [_, value] : idPrefix->fields()) - std::visit(addIdPart, value->value()); + for (auto const& [_, value] : idPrefix->fields()) { + appendNodeValueToString(result, value); + } } } - // Add specific id-part fields - for (auto const& [_, value] : fields()) - std::visit(addIdPart, value->value()); + if (values_) { + for (auto const& value : *values_) { + appendNodeValueToString(result, value); + } + } return result.str(); } @@ -58,26 +240,48 @@ simfil::ScalarValueType FeatureId::value() const simfil::ModelNode::Ptr FeatureId::at(int64_t i) const { - return fields_->at(i); + if (i < 0 || !values_ || i >= static_cast(visibleValueIndices_.size())) { + return {}; + } + return values_->at(static_cast(visibleValueIndices_[static_cast(i)])); } uint32_t FeatureId::size() const { - return fields_->size(); + return static_cast(visibleValueIndices_.size()); } simfil::ModelNode::Ptr FeatureId::get(const simfil::StringId& f) const { - return fields_->get(f); + if (!values_) { + return {}; + } + + for (size_t i = 0; i < partNames_.size(); ++i) { + if (partNames_[i] == f && i < visibleValueIndices_.size()) { + return values_->at(static_cast(visibleValueIndices_[i])); + } + } + + return {}; } simfil::StringId FeatureId::keyAt(int64_t i) const { - return fields_->keyAt(i); + if (i < 0 || i >= static_cast(partNames_.size())) { + return {}; + } + return partNames_[static_cast(i)]; } bool FeatureId::iterate(const simfil::ModelNode::IterCallback& cb) const { + for (auto i = 0U; i < size(); ++i) { + auto node = at(static_cast(i)); + if (node && !cb(*node)) { + return false; + } + } return true; } @@ -85,31 +289,27 @@ KeyValueViewPairs FeatureId::keyValuePairs() const { KeyValueViewPairs result; - auto objectFieldsToKeyValuePairs = [&result, this](simfil::ModelNode::FieldRange fields){ - for (auto const& [key, value] : fields) { - auto keyStr = model().strings()->resolve(key); - std::visit( - [&result, &keyStr](auto&& v) - { - if constexpr (!std::is_same_v, std::monostate> && !std::is_same_v, double>) { - result.emplace_back(*keyStr, v); - } - }, - value->value()); - } - }; - - // Add common id-part fields. - if (data_->useCommonTilePrefix_) { + if (data_.useCommonTilePrefix_) { if (auto idPrefix = model().getIdPrefix()) { - objectFieldsToKeyValuePairs(idPrefix->fields()); + for (auto const& [key, value] : idPrefix->fields()) { + appendTypedKeyValue(model(), key, value, [&](std::string_view keyName, auto&& v) { + result.emplace_back(keyName, v); + }); + } } } - // Add specific id-part fields. - objectFieldsToKeyValuePairs(fields()); + if (values_) { + auto const limit = std::min(partNames_.size(), visibleValueIndices_.size()); + for (size_t i = 0; i < limit; ++i) { + auto valueNode = values_->at(static_cast(visibleValueIndices_[i])); + appendTypedKeyValue(model(), partNames_[i], valueNode, [&](std::string_view keyName, auto&& v) { + result.emplace_back(keyName, v); + }); + } + } - return std::move(result); + return result; } } diff --git a/libs/model/src/featurelayer.cpp b/libs/model/src/featurelayer.cpp index 826883b4..e05c6a58 100644 --- a/libs/model/src/featurelayer.cpp +++ b/libs/model/src/featurelayer.cpp @@ -1,21 +1,28 @@ #include "featurelayer.h" #include +#include #include #include #include #include +#include #include #include +#include #include #include +#include +#include +#include #include #include +#include #include +#include #include #include -#include "sfl/segmented_vector.hpp" #include "simfil/environment.h" #include "simfil/model/arena.h" @@ -34,23 +41,19 @@ namespace bitsery { template -void serialize(S& s, glm::fvec3& v) { +void serialize(S& s, glm::vec3& v) { s.value4b(v.x); s.value4b(v.y); s.value4b(v.z); } -template -void serialize(S& s, mapget::Point& v) { - s.value8b(v.x); - s.value8b(v.y); - s.value8b(v.z); -} - } namespace { + using GeometryPointBufferArena = + simfil::ArrayArena; + /** * Views into the sourceDataAddresses_ array are stored as a single u32, which * uses 20 bits for the index and 4 bits for the length. @@ -59,7 +62,6 @@ namespace constexpr uint32_t SourceAddressArenaIndexMax = (~static_cast(0)) >> (32 - SourceAddressArenaIndexBits); constexpr uint32_t SourceAddressArenaSizeBits = 4; constexpr uint32_t SourceAddressArenaSizeMax = (~static_cast(0)) >> (32 - SourceAddressArenaSizeBits); - std::tuple modelAddressToSourceDataAddressList(uint32_t addr) { const auto index = addr >> SourceAddressArenaSizeBits; @@ -81,42 +83,168 @@ namespace namespace mapget { +namespace +{ +bool isBufferedGeometryColumn(uint8_t column) +{ + using Col = TileFeatureLayer::ColumnId; + return column == Col::LineGeometries || + column == Col::PolygonGeometries || + column == Col::MeshGeometries; +} + +bool isBaseGeometryColumn(uint8_t column) +{ + using Col = TileFeatureLayer::ColumnId; + return column == Col::PointGeometries || isBufferedGeometryColumn(column); +} + +GeomType geometryTypeForColumn(uint8_t column) +{ + using Col = TileFeatureLayer::ColumnId; + switch (column) { + case Col::PointGeometries: + return GeomType::Points; + case Col::LineGeometries: + return GeomType::Line; + case Col::PolygonGeometries: + return GeomType::Polygon; + case Col::MeshGeometries: + return GeomType::Mesh; + default: + raiseFmt("Unexpected geometry column {}.", column); + return GeomType::Points; + } +} + +void ensureGeometrySourceRefCapacity( + simfil::ModelColumn& refs, + simfil::ArrayIndex index) +{ + if (index == simfil::InvalidArrayIndex) { + raiseFmt("Invalid geometry buffer index {}.", index); + } + while (refs.size() <= static_cast(index)) { + refs.emplace_back(simfil::ModelNodeAddress{}); + } +} + +constexpr uint8_t InvalidGeometryStage = std::numeric_limits::max(); + +void ensureGeometryStageCapacity( + simfil::ModelColumn& stages, + simfil::ArrayIndex index) +{ + if (index == simfil::InvalidArrayIndex) { + raiseFmt("Invalid geometry buffer index {}.", index); + } + while (stages.size() <= static_cast(index)) { + stages.emplace_back(InvalidGeometryStage); + } +} + +uint32_t extraGeometryDataStorageIndex(simfil::ArrayIndex geometryIndex) +{ + if (geometryIndex == simfil::InvalidArrayIndex) { + raiseFmt("Invalid geometry buffer index {}.", geometryIndex); + } + + // Base geometries use point-buffer array handles as their geometry index. + // Regular handles and singleton handles live in disjoint source domains: + // regular arrays are plain indices, while singleton handles are encoded as + // 0x00800000 | payload. We remap them into one collision-free auxiliary + // storage space for geometry stages and extra geometry data by reserving + // even indices for regular arrays and odd indices for singleton payloads: + // regular n -> 2 * n + // singleton p -> 2 * p + 1 + // This keeps the storage compact without materializing huge sparse columns. + if (GeometryPointBufferArena::is_singleton_handle(geometryIndex)) { + return GeometryPointBufferArena::singleton_payload(geometryIndex) * 2U + 1U; + } + + return geometryIndex * 2U; +} + +simfil::ModelNodeAddress geometrySourceRefsAt( + simfil::ModelColumn const& refs, + uint32_t index) +{ + if (index < refs.size()) { + return refs.at(index); + } + return {}; +} + +std::optional geometryStageAt( + simfil::ModelColumn const& stages, + uint32_t index) +{ + if (index >= stages.size()) { + return std::nullopt; + } + auto const storedStage = stages.at(index); + if (storedStage == InvalidGeometryStage) { + return std::nullopt; + } + return storedStage; +} + +void ensureFeatureComplexDataRefCapacity( + simfil::ModelColumn& refs, + uint32_t index) +{ + while (refs.size() <= static_cast(index)) { + refs.emplace_back(simfil::ModelNodeAddress{}); + } +} +} + +struct FeatureAddrWithIdHash +{ + MODEL_COLUMN_TYPE(8); + + ModelNodeAddress featureAddr_{}; + uint32_t idHash_ = 0; + + FeatureAddrWithIdHash() = default; + FeatureAddrWithIdHash(ModelNodeAddress featureAddr, uint32_t idHash) + : featureAddr_(featureAddr), + idHash_(idHash) + {} + + bool operator< (FeatureAddrWithIdHash const& other) const { + return std::tie(idHash_, featureAddr_) < std::tie(other.idHash_, other.featureAddr_); + } +}; + struct TileFeatureLayer::Impl { - simfil::ModelNodeAddress featureIdPrefix_; - - sfl::segmented_vector features_; - sfl::segmented_vector attributes_; - sfl::segmented_vector validities_; - sfl::segmented_vector featureIds_; - sfl::segmented_vector attrLayers_; - sfl::segmented_vector attrLayerLists_; - sfl::segmented_vector relations_; - sfl::segmented_vector geom_; - sfl::segmented_vector sourceDataReferences_; + ModelNodeAddress featureIdPrefix_; + Point geometryAnchor_{}; + + simfil::ModelColumn features_; + simfil::ModelColumn complexFeatureData_; + simfil::ModelColumn complexFeatureDataRefs_; + simfil::ModelColumn attributes_; + simfil::ModelColumn validities_; + simfil::ModelColumn featureIds_; + simfil::ModelColumn attrLayers_; + simfil::ModelColumn attrLayerLists_; + simfil::ModelColumn relations_; + simfil::ModelColumn geomSourceDataRefs_; + simfil::ModelColumn geomStages_; + simfil::ModelColumn geomViews_; + simfil::ModelColumn sourceDataReferences_; Geometry::Storage pointBuffers_; + std::unordered_map> mergedArrayExtensions_; /** * Indexing of features by their id hash. The hash-feature pairs are kept * in a vector, which is kept in a sorted state. This allows finding a * feature by its id in O(log(n)) time. */ - struct FeatureAddrWithIdHash - { - simfil::ModelNodeAddress featureAddr_; - uint64_t idHash_ = 0; - - template - void serialize(S& s) { - s.object(featureAddr_); - s.value8b(idHash_); - } - - bool operator< (FeatureAddrWithIdHash const& other) const { - return std::tie(idHash_, featureAddr_) < std::tie(other.idHash_, other.featureAddr_); - } - }; - sfl::segmented_vector featureHashIndex_; + simfil::ModelColumn featureHashIndex_; bool featureHashIndexNeedsSorting_ = false; + std::unordered_map upgradedSimpleValidityAddresses_; void sortFeatureHashIndex() { if (!featureHashIndexNeedsSorting_) @@ -131,20 +259,26 @@ struct TileFeatureLayer::Impl { // (De-)Serialization template void readWrite(S& s) { - constexpr size_t maxColumnSize = std::numeric_limits::max(); - s.container(features_, maxColumnSize); - s.container(attributes_, maxColumnSize); - s.container(validities_, maxColumnSize); - s.container(featureIds_, maxColumnSize); - s.container(attrLayers_, maxColumnSize); - s.container(attrLayerLists_, maxColumnSize); + s.value8b(geometryAnchor_.x); + s.value8b(geometryAnchor_.y); + s.value8b(geometryAnchor_.z); + s.object(features_); + s.object(complexFeatureData_); + s.object(complexFeatureDataRefs_); + s.object(attributes_); + s.object(validities_); + s.object(featureIds_); + s.object(attrLayers_); + s.object(attrLayerLists_); s.object(featureIdPrefix_); - s.container(relations_, maxColumnSize); + s.object(relations_); sortFeatureHashIndex(); - s.container(featureHashIndex_, maxColumnSize); - s.container(geom_, maxColumnSize); + s.object(featureHashIndex_); + s.object(geomSourceDataRefs_); + s.object(geomViews_); s.ext(pointBuffers_, bitsery::ext::ArrayArenaExt{}); - s.container(sourceDataReferences_, maxColumnSize); + s.object(sourceDataReferences_); + s.object(geomStages_); } explicit Impl(std::shared_ptr stringPool) @@ -164,29 +298,202 @@ TileFeatureLayer::TileFeatureLayer( impl_(std::make_unique(strings)), TileLayer(tileId, nodeId, mapId, layerInfo) { + impl_->geometryAnchor_ = tileId.center(); } TileFeatureLayer::TileFeatureLayer( - std::istream& inputStream, + const std::vector& input, LayerInfoResolveFun const& layerInfoResolveFun, StringPoolResolveFun const& stringPoolGetter ) : - TileLayer(inputStream, layerInfoResolveFun), + TileLayer(input, layerInfoResolveFun, &deserializationOffsetBytes_), ModelPool(stringPoolGetter(nodeId_)), impl_(std::make_unique(stringPoolGetter(nodeId_))) { - bitsery::Deserializer s(inputStream); + impl_->geometryAnchor_ = tileId_.center(); + using Adapter = bitsery::InputBufferAdapter>; + if (deserializationOffsetBytes_ > input.size()) { + raise("Failed to read TileFeatureLayer: invalid deserialization offset."); + } + bitsery::Deserializer s(Adapter( + input.begin() + static_cast(deserializationOffsetBytes_), + input.end())); + s.ext4b(stage_, bitsery::ext::StdOptional{}); impl_->readWrite(s); if (s.adapter().error() != bitsery::ReaderError::NoError) { raise(fmt::format( "Failed to read TileFeatureLayer: Error {}", static_cast>(s.adapter().error()))); } - ModelPool::read(inputStream); + const auto modelOffset = deserializationOffsetBytes_ + s.adapter().currentReadPos(); + if (auto result = ModelPool::read(input, modelOffset); !result) { + raise(result.error().message); + } +} + +Point TileFeatureLayer::geometryAnchor() const +{ + return impl_->geometryAnchor_; +} + +void TileFeatureLayer::setGeometryAnchor(Point const& anchor) +{ + impl_->geometryAnchor_ = anchor; } TileFeatureLayer::~TileFeatureLayer() = default; +std::optional TileFeatureLayer::stage() const +{ + return stage_; +} + +void TileFeatureLayer::setStage(std::optional stage) +{ + stage_ = stage; +} + +void TileFeatureLayer::setExpectedFeatureSequence(std::vector expectedFeatureIds) +{ + expectedFeatureIds_ = std::move(expectedFeatureIds); +} + +void TileFeatureLayer::clearExpectedFeatureSequence() +{ + expectedFeatureIds_.clear(); +} + +bool TileFeatureLayer::hasExpectedFeatureSequence() const +{ + return !expectedFeatureIds_.empty(); +} + +void TileFeatureLayer::validateExpectedFeatureSequenceComplete() const +{ + if (expectedFeatureIds_.empty()) { + return; + } + + auto const createdFeatureCount = impl_->features_.size(); + if (createdFeatureCount == expectedFeatureIds_.size()) { + return; + } + + if (createdFeatureCount < expectedFeatureIds_.size()) { + auto const& nextExpectedId = expectedFeatureIds_[createdFeatureCount]; + raiseFmt( + "Feature sequence incomplete: created {} of {} expected features. Next expected id: {}.", + createdFeatureCount, + expectedFeatureIds_.size(), + nextExpectedId); + } + + raiseFmt( + "Feature sequence overflow: created {} features, expected {}.", + createdFeatureCount, + expectedFeatureIds_.size()); +} + +Feature::ComplexData const* TileFeatureLayer::featureComplexDataOrNull(uint32_t featureIndex) const +{ + if (featureIndex >= impl_->complexFeatureDataRefs_.size()) { + return nullptr; + } + auto const addr = impl_->complexFeatureDataRefs_.at(featureIndex); + if (!addr) { + return nullptr; + } + if (addr.column() != ColumnId::FeatureComplexData) { + raiseFmt("Invalid complex feature data address column {}.", addr.column()); + } + if (addr.index() >= impl_->complexFeatureData_.size()) { + raiseFmt("Complex feature data index {} is out of bounds.", addr.index()); + } + return &impl_->complexFeatureData_.at(addr.index()); +} + +Feature::ComplexData* TileFeatureLayer::featureComplexDataOrNull(uint32_t featureIndex) +{ + return const_cast( + static_cast(*this).featureComplexDataOrNull(featureIndex)); +} + +Feature::ComplexData& TileFeatureLayer::ensureFeatureComplexData(uint32_t featureIndex) +{ + ensureFeatureComplexDataRefCapacity(impl_->complexFeatureDataRefs_, featureIndex); + auto& addr = impl_->complexFeatureDataRefs_.at(featureIndex); + if (!addr) { + auto const complexIndex = static_cast(impl_->complexFeatureData_.size()); + impl_->complexFeatureData_.emplace_back(Feature::ComplexData{}); + addr = simfil::ModelNodeAddress{ColumnId::FeatureComplexData, complexIndex}; + } + if (addr.column() != ColumnId::FeatureComplexData) { + raiseFmt("Invalid complex feature data address column {}.", addr.column()); + } + return impl_->complexFeatureData_.at(addr.index()); +} + +void TileFeatureLayer::attachOverlay(TileFeatureLayer::Ptr const& overlay) +{ + if (!overlay) { + return; + } + + if (overlay->size() < size()) { + raiseFmt( + "Overlay feature count {} is smaller than base feature count {}.", + overlay->size(), + size()); + } + + if (overlay_) { + overlay_->attachOverlay(overlay); + return; + } + + overlay_ = overlay; +} + +TileFeatureLayer::Ptr TileFeatureLayer::overlay() const +{ + return overlay_; +} + +void TileFeatureLayer::setMergedArrayExtension( + ModelNodeAddress baseAddress, + TileFeatureLayer const* extensionModel, + ModelNodeAddress extensionAddress) +{ + if (!baseAddress || !extensionModel || !extensionAddress) { + clearMergedArrayExtension(baseAddress); + return; + } + impl_->mergedArrayExtensions_[baseAddress.value_] = { + extensionModel, + extensionAddress}; +} + +void TileFeatureLayer::clearMergedArrayExtension(ModelNodeAddress baseAddress) +{ + if (!baseAddress) { + return; + } + impl_->mergedArrayExtensions_.erase(baseAddress.value_); +} + +std::optional> +TileFeatureLayer::mergedArrayExtension(ModelNodeAddress baseAddress) const +{ + if (!baseAddress) { + return {}; + } + auto it = impl_->mergedArrayExtensions_.find(baseAddress.value_); + if (it == impl_->mergedArrayExtensions_.end()) { + return {}; + } + return it->second; +} + namespace { @@ -239,6 +546,59 @@ stripOptionalIdParts(KeyValueViewPairs const& keysAndValues, std::vector return result; } +/** + * Materialize per-feature ID values aligned to the composition suffix that starts + * after the tile-level common prefix. Omitted optional parts in that suffix are + * stored as null sentinels to keep the local feature ID shape stable. + */ +simfil::ArrayIndex idPartValuesToArrayIndex( + TileFeatureLayer& layer, + std::vector const& composition, + KeyValueViewPairs const& idParts, + uint32_t compositionStartIndex = 0) +{ + auto idValues = layer.newArray( + composition.size() - std::min( + compositionStartIndex, + static_cast(composition.size())), + true); + auto idPartsIter = idParts.begin(); + + for (uint32_t compositionIndex = compositionStartIndex; + compositionIndex < composition.size(); + ++compositionIndex) { + auto const& idPart = composition[compositionIndex]; + if (idPartsIter != idParts.end() && idPart.idPartLabel_ == idPartsIter->first) { + idValues->append(std::visit( + [&](auto&& v) -> simfil::ModelNode::Ptr { + return layer.newValue(v); + }, + idPartsIter->second)); + ++idPartsIter; + continue; + } + + if (!idPart.isOptional_) { + raiseFmt( + "Missing non-optional ID part '{}' while materializing feature ID values.", + idPart.idPartLabel_); + } + + idValues->append( + layer.resolve( + simfil::ModelNodeAddress{simfil::Model::Null, 1}, + simfil::ScalarValueType{})); + } + + if (idPartsIter != idParts.end()) { + raiseFmt( + "Unexpected trailing ID part '{}' while materializing feature ID values.", + idPartsIter->first); + } + + return static_cast(idValues->addr().index()); +} + } // namespace simfil::model_ptr TileFeatureLayer::newFeature( @@ -249,60 +609,118 @@ simfil::model_ptr TileFeatureLayer::newFeature( raise("Tried to create an empty feature ID."); } - uint32_t idPrefixLength = 0; - if (auto const idPrefix = getIdPrefix()) - idPrefixLength = idPrefix->size(); + KeyValueViewPairs fullFeatureIdParts; + KeyValueViewPairs prefixFeatureIdParts; + if (auto const idPrefix = getIdPrefix()) { + fullFeatureIdParts.reserve(idPrefix->size() + featureIdParts.size()); + prefixFeatureIdParts.reserve(idPrefix->size()); + for (auto const& [key, value] : idPrefix->fields()) { + auto const keyStr = strings()->resolve(key); + if (!keyStr || !value) { + continue; + } - if (!layerInfo_->validFeatureId(typeId, featureIdParts, true, idPrefixLength)) { + std::visit( + [&](auto&& v) + { + using T = std::decay_t; + if constexpr (std::is_same_v || + std::is_same_v || + std::is_same_v) { + } + else if constexpr (std::is_same_v || + std::is_same_v) { + fullFeatureIdParts.emplace_back(*keyStr, std::string_view(v)); + prefixFeatureIdParts.emplace_back(*keyStr, std::string_view(v)); + } + else { + fullFeatureIdParts.emplace_back(*keyStr, static_cast(v)); + prefixFeatureIdParts.emplace_back(*keyStr, static_cast(v)); + } + }, + value->value()); + } + } + fullFeatureIdParts.insert(fullFeatureIdParts.end(), featureIdParts.begin(), featureIdParts.end()); + + if (!layerInfo_->validFeatureId(typeId, fullFeatureIdParts, true)) { raise(fmt::format( "Could not find a matching ID composition of type {} with parts {}.", typeId, - idPartsToString(featureIdParts))); + idPartsToString(fullFeatureIdParts))); } - - auto featureIdIndex = impl_->featureIds_.size(); - auto featureIdObject = newObject(featureIdParts.size()); + auto const& primaryIdComposition = getPrimaryIdComposition(typeId); + auto const localStartIndex = prefixFeatureIdParts.empty() + ? 0U + : *IdPart::compositionMatchEndIndex( + primaryIdComposition, + 0, + prefixFeatureIdParts, + prefixFeatureIdParts.size()); + auto idPartValues = idPartValuesToArrayIndex( + *this, + primaryIdComposition, + featureIdParts, + localStartIndex); auto res = strings()->emplace(typeId); if (!res) raise(res.error().message); - impl_->featureIds_.emplace_back(FeatureId::Data{ - true, - *res, - featureIdObject->addr() - }); - for (auto const& [k, v] : featureIdParts) { - auto&& kk = k; - std::visit([&](auto&& x){ - featureIdObject->addField(kk, x); - }, v); + // Initial backend LOD strategy: + // - stage 0 ("Low-Fi"): default to LOD_0 (no random culling); converters can + // override per-feature LOD semantically (e.g. road classes). + // - other stages: default to MAX_LOD. During stage merge, stage-0 feature data + // remains authoritative for LOD. + auto lodValue = static_cast(Feature::MAX_LOD); + if (stage_ && *stage_ == 0) { + lodValue = static_cast(Feature::LOD::LOD_0); } auto featureIndex = impl_->features_.size(); - impl_->features_.emplace_back(Feature::Data{ - simfil::ModelNodeAddress{ColumnId::FeatureIds, (uint32_t)featureIdIndex}, - simfil::ModelNodeAddress{Null, 0}, - simfil::ModelNodeAddress{Null, 0}, - simfil::ModelNodeAddress{Null, 0}, - simfil::ModelNodeAddress{Null, 0}, + impl_->features_.emplace_back(Feature::BasicData{ + Feature::TypeIdAndLOD{ + *res, + lodValue}, + idPartValues, + ModelNodeAddress{Null, 0}, }); auto result = Feature( impl_->features_.back(), + nullptr, shared_from_this(), - simfil::ModelNodeAddress{ColumnId::Features, (uint32_t)featureIndex}); + ModelNodeAddress{ColumnId::Features, (uint32_t)featureIndex}, + mpKey_); + + if (!expectedFeatureIds_.empty()) { + if (featureIndex >= expectedFeatureIds_.size()) { + raiseFmt( + "Feature sequence mismatch: unexpected extra feature at index {}: {}.", + featureIndex, + result.id()->toString()); + } + + auto const& expectedFeatureId = expectedFeatureIds_[featureIndex]; + auto const actualFeatureId = result.id()->toString(); + if (actualFeatureId != expectedFeatureId) { + raiseFmt( + "Feature sequence mismatch at index {}: expected {}, got {}.", + featureIndex, + expectedFeatureId, + actualFeatureId); + } + } // Add feature hash index entry. - auto const& primaryIdComposition = getPrimaryIdComposition(typeId); auto fullStrippedFeatureId = stripOptionalIdParts(result.id()->keyValuePairs(), primaryIdComposition); - auto hash = Hash().mix(typeId).mix(fullStrippedFeatureId).value(); - impl_->featureHashIndex_.emplace_back(TileFeatureLayer::Impl::FeatureAddrWithIdHash{result.addr(), hash}); + auto hash = static_cast(Hash().mix(typeId).mix(fullStrippedFeatureId).value()); + impl_->featureHashIndex_.emplace_back(FeatureAddrWithIdHash{result.addr(), hash}); impl_->featureHashIndexNeedsSorting_ = true; // Note: Here we rely on the assertion that the root_ collection // contains only references to feature nodes, in the order // of the feature node column. - addRoot(simfil::ModelNode::Ptr(result)); - setInfo("num-features", numRoots()); + addRoot(ModelNode::Ptr(result)); + setInfo("Size/Features#features", numRoots()); return result; } @@ -318,23 +736,25 @@ TileFeatureLayer::newFeatureId( idPartsToString(featureIdParts))); } - auto featureIdObject = newObject(featureIdParts.size()); auto featureIdIndex = impl_->featureIds_.size(); auto typeIdStringId = strings()->emplace(typeId); if (!typeIdStringId) raise(typeIdStringId.error().message); + auto const idCompositionIndex = + *layerInfo_->matchingFeatureIdCompositionIndex(typeId, featureIdParts, false); + auto const& composition = + layerInfo_->getTypeInfo(typeId)->uniqueIdCompositions_[idCompositionIndex]; impl_->featureIds_.emplace_back(FeatureId::Data{ false, + idCompositionIndex, *typeIdStringId, - featureIdObject->addr() + idPartValuesToArrayIndex(*this, composition, featureIdParts) }); - for (auto const& [k, v] : featureIdParts) { - auto&& kk = k; - std::visit([&](auto&& x){ - featureIdObject->addField(kk, x); - }, v); - } - return FeatureId(impl_->featureIds_.back(), shared_from_this(), {ColumnId::FeatureIds, (uint32_t)featureIdIndex}); + return FeatureId( + impl_->featureIds_.back(), + shared_from_this(), + {ColumnId::ExternalFeatureIds, static_cast(featureIdIndex)}, + mpKey_); } model_ptr @@ -348,18 +768,30 @@ TileFeatureLayer::newRelation(const std::string_view& name, const model_ptraddr() }); - return Relation(&impl_->relations_.back(), shared_from_this(), {ColumnId::Relations, (uint32_t)relationIndex}); + return Relation( + &impl_->relations_.back(), + shared_from_this(), + {ColumnId::Relations, (uint32_t)relationIndex}, + mpKey_); } model_ptr TileFeatureLayer::getIdPrefix() +{ + return static_cast(*this).getIdPrefix(); +} + +model_ptr TileFeatureLayer::getIdPrefix() const { if (impl_->featureIdPrefix_) - return resolveObject(simfil::ModelNode::Ptr::make(shared_from_this(), impl_->featureIdPrefix_)); + return resolve(impl_->featureIdPrefix_); return {}; } model_ptr -TileFeatureLayer::newAttribute(const std::string_view& name, size_t initialCapacity) +TileFeatureLayer::newAttribute( + const std::string_view& name, + size_t initialCapacity, + bool fixedSize) { auto attrIndex = impl_->attributes_.size(); auto nameStringId = strings()->emplace(name); @@ -367,51 +799,96 @@ TileFeatureLayer::newAttribute(const std::string_view& name, size_t initialCapac raise(nameStringId.error().message); impl_->attributes_.emplace_back(Attribute::Data{ {Null, 0}, - objectMemberStorage().new_array(initialCapacity), + objectMemberStorage().new_array(initialCapacity, fixedSize), *nameStringId, }); return Attribute( &impl_->attributes_.back(), shared_from_this(), - {ColumnId::Attributes, (uint32_t)attrIndex}); + {ColumnId::Attributes, (uint32_t)attrIndex}, + mpKey_); } -model_ptr TileFeatureLayer::newAttributeLayer(size_t initialCapacity) +model_ptr TileFeatureLayer::newAttributeLayer(size_t initialCapacity, bool fixedSize) { auto layerIndex = impl_->attrLayers_.size(); - impl_->attrLayers_.emplace_back(objectMemberStorage().new_array(initialCapacity)); + impl_->attrLayers_.emplace_back(objectMemberStorage().new_array(initialCapacity, fixedSize)); return AttributeLayer( impl_->attrLayers_.back(), shared_from_this(), - {ColumnId::AttributeLayers, (uint32_t)layerIndex}); + {ColumnId::AttributeLayers, (uint32_t)layerIndex}, + mpKey_); } -model_ptr TileFeatureLayer::newAttributeLayers(size_t initialCapacity) +model_ptr TileFeatureLayer::newAttributeLayers(size_t initialCapacity, bool fixedSize) { auto listIndex = impl_->attrLayerLists_.size(); - impl_->attrLayerLists_.emplace_back(objectMemberStorage().new_array(initialCapacity)); + impl_->attrLayerLists_.emplace_back(objectMemberStorage().new_array(initialCapacity, fixedSize)); return AttributeLayerList( impl_->attrLayerLists_.back(), shared_from_this(), - {ColumnId::AttributeLayerLists, (uint32_t)listIndex}); + {ColumnId::AttributeLayerLists, (uint32_t)listIndex}, + mpKey_); } -model_ptr TileFeatureLayer::newGeometryCollection(size_t initialCapacity) +model_ptr TileFeatureLayer::newGeometryCollection(size_t initialCapacity, bool fixedSize) { - auto listIndex = arrayMemberStorage().new_array(initialCapacity); + auto listIndex = arrayMemberStorage().new_array(initialCapacity, fixedSize); return GeometryCollection( shared_from_this(), - {ColumnId::GeometryCollections, (uint32_t)listIndex}); + {ColumnId::GeometryCollections, (uint32_t)listIndex}, + mpKey_); } -model_ptr TileFeatureLayer::newGeometry(GeomType geomType, size_t initialCapacity) +model_ptr TileFeatureLayer::newGeometry( + GeomType geomType, + size_t initialCapacity, + bool fixedSize) { - initialCapacity = std::max((size_t)1, initialCapacity); - impl_->geom_.emplace_back(geomType, initialCapacity); - return Geometry( - &impl_->geom_.back(), - shared_from_this(), - {ColumnId::Geometries, (uint32_t)impl_->geom_.size() - 1}); + initialCapacity = std::max(1, initialCapacity); + + auto const currentGeometryStage = [this]() -> std::optional + { + auto stage = stage_.value_or(layerInfo_ ? layerInfo_->highFidelityStage_ : 0U); + if (stage > std::numeric_limits::max()) { + raiseFmt("Geometry stage {} exceeds uint8_t range.", stage); + } + return static_cast(stage); + }(); + + auto makeGeometry = + [this, currentGeometryStage](uint8_t column, simfil::ArrayIndex vertexArray) + { + auto const geometryAddress = + simfil::ModelNodeAddress{column, static_cast(vertexArray)}; + setGeometryStage(geometryAddress, currentGeometryStage); + return Geometry(shared_from_this(), geometryAddress, mpKey_); + }; + + switch (geomType) { + case GeomType::Points: { + auto const vertexArray = impl_->pointBuffers_.new_array(initialCapacity, fixedSize); + return makeGeometry(ColumnId::PointGeometries, vertexArray); + } + case GeomType::Line: + { + auto const vertexArray = impl_->pointBuffers_.new_array(initialCapacity, fixedSize); + return makeGeometry(ColumnId::LineGeometries, vertexArray); + } + case GeomType::Polygon: + { + auto const vertexArray = impl_->pointBuffers_.new_array(initialCapacity, fixedSize); + return makeGeometry(ColumnId::PolygonGeometries, vertexArray); + } + case GeomType::Mesh: + { + auto const vertexArray = impl_->pointBuffers_.new_array(initialCapacity, fixedSize); + return makeGeometry(ColumnId::MeshGeometries, vertexArray); + } + } + + raise("Unsupported geometry type."); + return {}; } model_ptr TileFeatureLayer::newGeometryView( @@ -420,11 +897,12 @@ model_ptr TileFeatureLayer::newGeometryView( uint32_t size, const model_ptr& base) { - impl_->geom_.emplace_back(geomType, offset, size, base->addr()); + impl_->geomViews_.emplace_back(geomType, offset, size, base->addr()); return Geometry( - &impl_->geom_.back(), + &impl_->geomViews_.back(), shared_from_this(), - {ColumnId::Geometries, (uint32_t)impl_->geom_.size() - 1}); + {ColumnId::GeometryViews, (uint32_t)impl_->geomViews_.size() - 1}, + mpKey_); } model_ptr TileFeatureLayer::newSourceDataReferenceCollection(std::span list) @@ -437,7 +915,8 @@ model_ptr TileFeatureLayer::newSourceDataReferenc return { SourceDataReferenceCollection(index, size, shared_from_this(), - ModelNodeAddress(ColumnId::SourceDataReferenceCollections, sourceDataAddressListToModelAddress(index, size)))}; + ModelNodeAddress(ColumnId::SourceDataReferenceCollections, sourceDataAddressListToModelAddress(index, size)), + mpKey_)}; } model_ptr TileFeatureLayer::newValidity() @@ -446,256 +925,484 @@ model_ptr TileFeatureLayer::newValidity() return Validity( &impl_->validities_.back(), shared_from_this(), - {ColumnId::Validities, (uint32_t)impl_->validities_.size() - 1}); + {ColumnId::Validities, (uint32_t)impl_->validities_.size() - 1}, + mpKey_); } -model_ptr TileFeatureLayer::newValidityCollection(size_t initialCapacity) +model_ptr TileFeatureLayer::newValidityCollection(size_t initialCapacity, bool fixedSize) { - auto validityArrId = arrayMemberStorage().new_array(initialCapacity); + auto validityArrId = arrayMemberStorage().new_array(initialCapacity, fixedSize); return MultiValidity( shared_from_this(), - {ColumnId::ValidityCollections, (uint32_t)validityArrId}); + {ColumnId::ValidityCollections, (uint32_t)validityArrId}, + mpKey_); } -model_ptr TileFeatureLayer::resolveAttributeLayer(simfil::ModelNode const& n) const +std::optional TileFeatureLayer::upgradedSimpleValidityAddress( + ModelNodeAddress simpleAddress) const { - if (n.addr().column() != ColumnId::AttributeLayers) + if (simpleAddress.column() != ColumnId::SimpleValidity) { + return std::nullopt; + } + if (auto existing = impl_->upgradedSimpleValidityAddresses_.find(simpleAddress.value_); + existing != impl_->upgradedSimpleValidityAddresses_.end()) { + return existing->second; + } + return std::nullopt; +} + +ModelNodeAddress TileFeatureLayer::materializeSimpleValidity( + ModelNodeAddress simpleAddress, + Validity::Direction direction) +{ + if (simpleAddress.column() != ColumnId::SimpleValidity) { + raise("Cannot materialize non-simple validity node."); + } + ModelNodeAddress upgradedAddress{}; + if (auto existing = upgradedSimpleValidityAddress(simpleAddress)) { + upgradedAddress = *existing; + } + else { + impl_->validities_.emplace_back(); + upgradedAddress = ModelNodeAddress{ + ColumnId::Validities, + static_cast(impl_->validities_.size() - 1)}; + auto& upgraded = impl_->validities_.back(); + upgraded.direction_ = direction; + impl_->upgradedSimpleValidityAddresses_.emplace(simpleAddress.value_, upgradedAddress); + } + + // Rebind every simple validity address reference to the upgraded full node. + auto& members = arrayMemberStorage(); + auto rebindArrayMembers = [&](simfil::ArrayIndex arrayIndex) + { + members.iterate( + arrayIndex, + [&](ModelNodeAddress& memberAddress) + { + if (memberAddress.value_ == simpleAddress.value_) { + memberAddress = upgradedAddress; + } + }); + }; + + for (simfil::ArrayIndex arrayIndex = simfil::FirstRegularArrayIndex; + arrayIndex < static_cast(members.size()); + ++arrayIndex) { + rebindArrayMembers(arrayIndex); + } + for (simfil::ArrayIndex singletonOrdinal = 0; + singletonOrdinal < static_cast(members.singleton_handle_count()); + ++singletonOrdinal) { + rebindArrayMembers(simfil::SingletonArrayHandleMask | singletonOrdinal); + } + + return upgradedAddress; +} + +// Short aliases to keep resolve hook signatures compact. +using simfil::ModelNode; +using simfil::res::tag; + +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) +{ + if (node.addr().column() != TileFeatureLayer::ColumnId::AttributeLayers) raise("Cannot cast this node to an AttributeLayer."); return AttributeLayer( - impl_->attrLayers_[n.addr().index()], - shared_from_this(), - n.addr()); + model.impl_->attrLayers_[node.addr().index()], + model.shared_from_this(), + node.addr(), + model.mpKey_); } -model_ptr TileFeatureLayer::resolveAttributeLayerList(simfil::ModelNode const& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::AttributeLayerLists) + if (node.addr().column() != TileFeatureLayer::ColumnId::AttributeLayerLists) raise("Cannot cast this node to an AttributeLayerList."); return AttributeLayerList( - impl_->attrLayerLists_[n.addr().index()], - shared_from_this(), - n.addr()); + model.impl_->attrLayerLists_[node.addr().index()], + model.shared_from_this(), + node.addr(), + model.mpKey_); } -model_ptr TileFeatureLayer::resolveAttribute(simfil::ModelNode const& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::Attributes) + if (node.addr().column() != TileFeatureLayer::ColumnId::Attributes) raise("Cannot cast this node to an Attribute."); return Attribute( - &impl_->attributes_[n.addr().index()], - shared_from_this(), - n.addr()); + &model.impl_->attributes_[node.addr().index()], + model.shared_from_this(), + node.addr(), + model.mpKey_); } -model_ptr TileFeatureLayer::resolveFeature(simfil::ModelNode const& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::Features) + if (node.addr().column() != TileFeatureLayer::ColumnId::Features) { raise("Cannot cast this node to a Feature."); - return Feature( - impl_->features_[n.addr().index()], - shared_from_this(), - n.addr()); + } + auto* complexData = + const_cast(model).featureComplexDataOrNull(node.addr().index()); + model_ptr result = Feature( + model.impl_->features_[node.addr().index()], + complexData, + model.shared_from_this(), + node.addr(), + model.mpKey_); + + if (model.overlay_ && node.addr().index() < model.overlay_->size()) { + result->setExtensionAddress( + model.overlay_.get(), + ModelNodeAddress{ + TileFeatureLayer::ColumnId::Features, + static_cast(node.addr().index())}); + } + return result; } -model_ptr TileFeatureLayer::resolveFeatureId(simfil::ModelNode const& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::FeatureIds) + if (node.addr().column() != TileFeatureLayer::ColumnId::FeatureRelationsView) + raise("Cannot cast this node to a RelationArrayView."); + + auto result = RelationArrayView( + model.shared_from_this(), + node.addr(), + model.mpKey_); + + if (model.overlay_ && node.addr().index() < model.overlay_->size()) { + result.setExtension(model.overlay_->resolve( + ModelNodeAddress{TileFeatureLayer::ColumnId::FeatureRelationsView, node.addr().index()})); + } else { + result.setExtension({}); + } + return result; +} + +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) +{ + switch (node.addr().column()) { + case TileFeatureLayer::ColumnId::FeatureIds: { + auto const& featureData = model.impl_->features_[node.addr().index()]; + return FeatureId( + FeatureId::Data{ + true, + 0, + featureData.typeIdAndLod_.typeId_, + featureData.idPartValues_}, + model.shared_from_this(), + node.addr(), + model.mpKey_); + } + case TileFeatureLayer::ColumnId::ExternalFeatureIds: + return FeatureId( + model.impl_->featureIds_[node.addr().index()], + model.shared_from_this(), + node.addr(), + model.mpKey_); + default: raise("Cannot cast this node to a FeatureId."); - return FeatureId( - impl_->featureIds_[n.addr().index()], - shared_from_this(), - n.addr()); + } } -model_ptr TileFeatureLayer::resolveRelation(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::Relations) + if (node.addr().column() != TileFeatureLayer::ColumnId::Relations) raise("Cannot cast this node to a Relation."); return Relation( - &impl_->relations_[n.addr().index()], - shared_from_this(), - n.addr()); + &model.impl_->relations_[node.addr().index()], + model.shared_from_this(), + node.addr(), + model.mpKey_); } -model_ptr TileFeatureLayer::resolvePoint(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::Points) + switch (node.addr().column()) { + case TileFeatureLayer::ColumnId::Points: + return PointNode( + node, + static_cast(node.addr().index()), + model.mpKey_); + case TileFeatureLayer::ColumnId::ValidityPoints: + return PointNode(node, &model.impl_->validities_.at(node.addr().index()), model.mpKey_); + default: raise("Cannot cast this node to a Point."); - return PointNode( - n, &impl_->geom_.at(n.addr().index())); -} - -model_ptr TileFeatureLayer::resolveValidityPoint(const simfil::ModelNode& n) const -{ - if (n.addr().column() != ColumnId::ValidityPoints) - raise("Cannot cast this node to a ValidityPoint."); - return PointNode( - n, &impl_->validities_.at(n.addr().index())); + } } -model_ptr TileFeatureLayer::resolveValidity(simfil::ModelNode const& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::Validities) - raise("Cannot cast this node to a Validity."); - return Validity( - &impl_->validities_[n.addr().index()], - shared_from_this(), - n.addr()); + if (auto existing = dynamic_cast(&node)) { + return PointBufferNode( + model.shared_from_this(), + existing->baseGeometryAddress(), + model.mpKey_); + } + switch (node.addr().column()) { + case TileFeatureLayer::ColumnId::PointBuffers: + return PointBufferNode( + model.shared_from_this(), + ModelNodeAddress{ + TileFeatureLayer::ColumnId::PointGeometries, + node.addr().index()}, + model.mpKey_); + case TileFeatureLayer::ColumnId::PointBuffersView: + return PointBufferNode( + model.shared_from_this(), + ModelNodeAddress{TileFeatureLayer::ColumnId::GeometryViews, node.addr().index()}, + model.mpKey_); + default: + raise("Cannot cast this node to a PointBuffer."); + } } -model_ptr TileFeatureLayer::resolveValidityCollection(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::ValidityCollections) - raise("Cannot cast this node to a ValidityCollection."); - return MultiValidity( - shared_from_this(), - n.addr()); + switch (node.addr().column()) { + case TileFeatureLayer::ColumnId::PointGeometries: + case TileFeatureLayer::ColumnId::LineGeometries: + case TileFeatureLayer::ColumnId::PolygonGeometries: + case TileFeatureLayer::ColumnId::MeshGeometries: + { + return Geometry( + model.shared_from_this(), + node.addr(), + model.mpKey_); + } + case TileFeatureLayer::ColumnId::GeometryViews: { + auto* geomData = &model.impl_->geomViews_.at(node.addr().index()); + using MutableGeomData = + std::remove_const_t>; + return Geometry( + const_cast(geomData), // FIXME: const_cast?! + model.shared_from_this(), + node.addr(), + model.mpKey_); + } + default: + raise("Cannot cast this node to a Geometry."); + } } -model_ptr TileFeatureLayer::resolvePointBuffer(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - return PointBufferNode( - &impl_->geom_.at(n.addr().index()), - shared_from_this(), - n.addr()); + if (node.addr().column() != TileFeatureLayer::ColumnId::GeometryCollections && + !isBaseGeometryColumn(node.addr().column()) && + node.addr().column() != TileFeatureLayer::ColumnId::GeometryViews) { + raise("Cannot cast this node to a GeometryCollection."); + } + return GeometryCollection( + model.shared_from_this(), node.addr(), model.mpKey_); } -model_ptr TileFeatureLayer::resolvePolygon(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - return PolygonNode( - shared_from_this(), - n.addr()); + if (node.addr().column() != TileFeatureLayer::ColumnId::GeometryArrayView) + raise("Cannot cast this node to a GeometryArrayView."); + return GeometryArrayView( + model.shared_from_this(), + node.addr(), + model.mpKey_); } -model_ptr TileFeatureLayer::resolveMesh(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { return MeshNode( - &impl_->geom_.at(n.addr().index()), - shared_from_this(), - n.addr()); + model.shared_from_this(), + node.addr(), + model.mpKey_); } -model_ptr TileFeatureLayer::resolveLinearRing(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - return LinearRingNode(n); + return MeshTriangleCollectionNode(node, model.mpKey_); } -model_ptr TileFeatureLayer::resolveGeometry(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - return Geometry( - &const_cast(impl_->geom_.at(n.addr().index())), // FIXME: const_cast?! - shared_from_this(), - n.addr()); + switch (node.addr().column()) { + case TileFeatureLayer::ColumnId::LinearRing: + return LinearRingNode(node, model.mpKey_); + case TileFeatureLayer::ColumnId::MeshTriangleLinearRing: + return LinearRingNode(node, 3, model.mpKey_); + default: + raise("Cannot cast this node to a LinearRing."); + } } -model_ptr TileFeatureLayer::resolveMeshTriangleLinearRing(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - return LinearRingNode(n, 3); + return PolygonNode( + model.shared_from_this(), + node.addr(), + model.mpKey_); } -model_ptr TileFeatureLayer::resolveMeshTriangleCollection(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - return MeshTriangleCollectionNode(n); -} + if (node.addr().column() != TileFeatureLayer::ColumnId::SourceDataReferenceCollections) + raise("Cannot cast this node to an SourceDataReferenceCollection."); -model_ptr -TileFeatureLayer::resolveGeometryCollection(const simfil::ModelNode& n) const -{ - return GeometryCollection( - shared_from_this(), n.addr()); + auto [index, size] = modelAddressToSourceDataAddressList(node.addr().index()); + return SourceDataReferenceCollection(index, size, model.shared_from_this(), node.addr(), model.mpKey_); } -model_ptr -TileFeatureLayer::resolveSourceDataReferenceCollection(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::SourceDataReferenceCollections) - raise("Cannot cast this node to an SourceDataReferenceCollection."); + if (node.addr().column() != TileFeatureLayer::ColumnId::SourceDataReferences) + raise("Cannot cast this node to an SourceDataReferenceItem."); - auto [index, size] = modelAddressToSourceDataAddressList(n.addr().index()); - const auto& data = impl_->sourceDataReferences_; - return SourceDataReferenceCollection(index, size, shared_from_this(), n.addr()); + const auto* data = &model.impl_->sourceDataReferences_.at(node.addr().index()); + return SourceDataReferenceItem(data, model.shared_from_this(), node.addr(), model.mpKey_); } -model_ptr -TileFeatureLayer::resolveSourceDataReferenceItem(const simfil::ModelNode& n) const +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) { - if (n.addr().column() != ColumnId::SourceDataReferences) - raise("Cannot cast this node to an SourceDataReferenceItem."); + switch (node.addr().column()) { + case TileFeatureLayer::ColumnId::Validities: + return Validity( + &model.impl_->validities_[node.addr().index()], + model.shared_from_this(), + node.addr(), + model.mpKey_); + case TileFeatureLayer::ColumnId::SimpleValidity: { + if (auto upgraded = model.upgradedSimpleValidityAddress(node.addr())) { + return Validity( + &model.impl_->validities_[upgraded->index()], + model.shared_from_this(), + *upgraded, + model.mpKey_); + } + auto const direction = static_cast(node.addr().index()); + if (direction < Validity::Empty || direction > Validity::None) { + raiseFmt( + "Cannot cast this node to a Validity: invalid simple validity direction value {}.", + node.addr().index()); + } + return Validity(direction, model.shared_from_this(), node.addr(), model.mpKey_); + } + default: + raise("Cannot cast this node to a Validity."); + } +} - const auto* data = &impl_->sourceDataReferences_.at(n.addr().index()); - return SourceDataReferenceItem(data, shared_from_this(), n.addr()); +template<> +model_ptr resolveInternal(tag, TileFeatureLayer const& model, ModelNode const& node) +{ + if (node.addr().column() != TileFeatureLayer::ColumnId::ValidityCollections) + raise("Cannot cast this node to a ValidityCollection."); + return MultiValidity( + model.shared_from_this(), + node.addr(), + model.mpKey_); } -tl::expected TileFeatureLayer::resolve(const simfil::ModelNode& n, const simfil::Model::ResolveFn& cb) const +tl::expected TileFeatureLayer::resolve(const ModelNode& n, const simfil::Model::ResolveFn& cb) const { switch (n.addr().column()) { case ColumnId::Features: - cb(*resolveFeature(n)); + cb(*resolve(n)); return {}; case ColumnId::FeatureProperties: - cb(Feature::FeaturePropertyView( - impl_->features_[n.addr().index()], - shared_from_this(), - n.addr() - )); + { + auto rootResult = root(n.addr().index()); + if (!rootResult || !*rootResult) { + raise("FeatureProperties index out of bounds."); + } + cb(Feature::FeaturePropertyView(resolve(**rootResult), mpKey_)); + return {}; + } + case ColumnId::FeatureRelationsView: + cb(*resolve(n)); return {}; case ColumnId::FeatureIds: - cb(*resolveFeatureId(n)); + case ColumnId::ExternalFeatureIds: + cb(*resolve(n)); return {}; case ColumnId::Attributes: - cb(*resolveAttribute(n)); + cb(*resolve(n)); return {}; case ColumnId::AttributeLayers: - cb(*resolveAttributeLayer(n)); + cb(*resolve(n)); return {}; case ColumnId::AttributeLayerLists: - cb(*resolveAttributeLayerList(n)); + cb(*resolve(n)); return {}; case ColumnId::Relations: - cb(*resolveRelation(n)); + cb(*resolve(n)); return {}; case ColumnId::Points: - cb(*resolvePoint(n)); + cb(*resolve(n)); return {}; case ColumnId::PointBuffers: - cb(*resolvePointBuffer(n)); + case ColumnId::PointBuffersView: + cb(*resolve(n)); return {}; - case ColumnId::Geometries: - cb(*resolveGeometry(n)); + case ColumnId::PointGeometries: + case ColumnId::LineGeometries: + case ColumnId::PolygonGeometries: + case ColumnId::MeshGeometries: + case ColumnId::GeometryViews: + cb(*resolve(n)); return {}; case ColumnId::GeometryCollections: - cb(*resolveGeometryCollection(n)); + cb(*resolve(n)); + return {}; + case ColumnId::GeometryArrayView: + cb(*resolve(n)); return {}; case ColumnId::Polygon: - cb(*resolvePolygon(n)); + cb(*resolve(n)); return {}; case ColumnId::Mesh: - cb(*resolveMesh(n)); + cb(*resolve(n)); return {}; case ColumnId::MeshTriangleCollection: - cb(*resolveMeshTriangleCollection(n)); + cb(*resolve(n)); return {}; case ColumnId::MeshTriangleLinearRing: - cb(*resolveMeshTriangleLinearRing(n)); + cb(*resolve(n)); return {}; case ColumnId::LinearRing: - cb(*resolveLinearRing(n)); + cb(*resolve(n)); return {}; case ColumnId::SourceDataReferenceCollections: - cb(*resolveSourceDataReferenceCollection(n)); + cb(*resolve(n)); return {}; case ColumnId::SourceDataReferences: - cb(*resolveSourceDataReferenceItem(n)); + cb(*resolve(n)); return {}; case ColumnId::Validities: - cb(*resolveValidity(n)); + case ColumnId::SimpleValidity: + cb(*resolve(n)); return {}; case ColumnId::ValidityPoints: - cb(*resolveValidityPoint(n)); + cb(*resolve(n)); return {}; case ColumnId::ValidityCollections: - cb(*resolveValidityCollection(n)); + cb(*resolve(n)); return {}; } @@ -733,7 +1440,7 @@ TileFeatureLayer::complete(std::string_view query, int point, ModelNode const& n void TileFeatureLayer::setIdPrefix(const KeyValueViewPairs& prefix) { // The prefix must be set, before any feature is added. - if (!impl_->features_.empty()) + if (numRoots() > 0) throw std::runtime_error("Cannot set feature id prefix after a feature was added."); // Check that the prefix is compatible with all primary id composites. @@ -758,7 +1465,7 @@ void TileFeatureLayer::setIdPrefix(const KeyValueViewPairs& prefix) } } - auto idPrefix = newObject(prefix.size()); + auto idPrefix = newObject(prefix.size(), true); for (auto const& [k, v] : prefix) { auto&& kk = k; std::visit([&](auto&& x){ @@ -782,6 +1489,7 @@ tl::expected TileFeatureLayer::write(std::ostream& outputSt { TileLayer::write(outputStream); bitsery::Serializer s(outputStream); + s.ext4b(stage_, bitsery::ext::StdOptional{}); impl_->readWrite(s); return ModelPool::write(outputStream); } @@ -794,6 +1502,10 @@ nlohmann::json TileFeatureLayer::toJson() const result["mapgetTileId"] = tileId_.value_; result["mapId"] = mapId_; result["mapgetLayerId"] = layerInfo_->layerId_; + result["geometryAnchor"] = { + impl_->geometryAnchor_.x, + impl_->geometryAnchor_.y, + impl_->geometryAnchor_.z}; // Add ID prefix if set if (impl_->featureIdPrefix_) { @@ -841,17 +1553,365 @@ nlohmann::json TileFeatureLayer::toJson() const return result; } +nlohmann::json TileFeatureLayer::serializationSizeStats() const +{ + auto featureLayer = nlohmann::json::object(); + + featureLayer["features"] = impl_->features_.byte_size(); + featureLayer["feature-complex-data"] = impl_->complexFeatureData_.byte_size(); + featureLayer["feature-complex-data-refs"] = impl_->complexFeatureDataRefs_.byte_size(); + featureLayer["attributes"] = impl_->attributes_.byte_size(); + featureLayer["validities"] = impl_->validities_.byte_size(); + featureLayer["feature-ids"] = impl_->featureIds_.byte_size(); + featureLayer["attribute-layers"] = impl_->attrLayers_.byte_size(); + featureLayer["attribute-layer-lists"] = impl_->attrLayerLists_.byte_size(); + featureLayer["relations"] = impl_->relations_.byte_size(); + featureLayer["feature-hash-index"] = impl_->featureHashIndex_.byte_size(); + featureLayer["point-geometries"] = 0; + featureLayer["geometries"] = impl_->geomViews_.byte_size(); + featureLayer["geometry-source-data-references"] = impl_->geomSourceDataRefs_.byte_size(); + featureLayer["geometry-stages"] = impl_->geomStages_.byte_size(); + featureLayer["geometry-views"] = impl_->geomViews_.byte_size(); + featureLayer["point-buffers"] = impl_->pointBuffers_.byte_size(); + featureLayer["source-data-references"] = impl_->sourceDataReferences_.byte_size(); + + auto singletonStatsToJson = [](auto const& stats) { + return nlohmann::json::object({ + {"handles", stats.handleCount}, + {"occupied", stats.occupiedCount}, + {"empty", stats.emptyCount}, + {"singleton-storage-bytes", stats.singletonStorageBytes}, + {"hypothetical-regular-bytes", stats.hypotheticalRegularBytes}, + {"estimated-saved-bytes", stats.estimatedSavedBytes}, + }); + }; + + auto pointBufferSingletonStats = impl_->pointBuffers_.singleton_stats(); + auto objectMemberSingletonStats = objectMemberStorage().singleton_stats(); + auto arrayMemberSingletonStats = arrayMemberStorage().singleton_stats(); + auto arrayArenaSingletons = nlohmann::json::object({ + {"point-buffers", singletonStatsToJson(pointBufferSingletonStats)}, + {"object-members", singletonStatsToJson(objectMemberSingletonStats)}, + {"array-members", singletonStatsToJson(arrayMemberSingletonStats)}, + }); + + auto geometryUsage = nlohmann::json::object({ + {"total", 0}, + {"base", 0}, + {"view", 0}, + {"with-source-data-references", 0}, + {"base-vertex-buffer-allocated", 0}, + {"base-vertex-buffer-unallocated", 0}, + {"by-type", nlohmann::json::object({ + {"points", 0}, + {"line", 0}, + {"polygon", 0}, + {"mesh", 0}, + })}, + {"base-by-type", nlohmann::json::object({ + {"points", 0}, + {"line", 0}, + {"polygon", 0}, + {"mesh", 0}, + })}, + {"view-by-type", nlohmann::json::object({ + {"points", 0}, + {"line", 0}, + {"polygon", 0}, + {"mesh", 0}, + })}, + }); + + auto validityUsage = nlohmann::json::object({ + {"total", 0}, + {"simple-column", 0}, + {"direction-only", 0}, + {"with-direction", 0}, + {"with-geometry-stage", 0}, + {"with-feature-id", 0}, + {"simple-geometry-with-address", 0}, + {"by-direction", nlohmann::json::object({ + {"empty", 0}, + {"positive", 0}, + {"negative", 0}, + {"both", 0}, + {"none", 0}, + })}, + {"by-geometry-description", nlohmann::json::object({ + {"none", 0}, + {"simple-geometry", 0}, + {"offset-point", 0}, + {"offset-range", 0}, + })}, + {"by-offset-type", nlohmann::json::object({ + {"invalid", 0}, + {"geo-pos", 0}, + {"buffer", 0}, + {"relative-length", 0}, + {"metric-length", 0}, + })}, + }); + + auto featureLodUsage = nlohmann::json::object(); + for (uint32_t lod = 0; lod <= static_cast(Feature::MAX_LOD); ++lod) { + featureLodUsage[fmt::format("lod_{}", lod)] = 0; + } + + auto increment = [](nlohmann::json& obj, std::string_view key) { + obj[std::string(key)] = obj[std::string(key)].get() + 1; + }; + + auto geometryTypeKey = [](GeomType type) -> const char* { + switch (type) { + case GeomType::Points: return "points"; + case GeomType::Line: return "line"; + case GeomType::Polygon: return "polygon"; + case GeomType::Mesh: return "mesh"; + } + return "points"; + }; + + std::unordered_set countedGeometryAddresses; + auto countGeometryAddress = [&](auto&& self, simfil::ModelNodeAddress geometryAddress) -> void + { + if (!geometryAddress) { + return; + } + + if (geometryAddress.column() == ColumnId::GeometryCollections) { + auto collection = resolve(geometryAddress); + if (!collection) { + return; + } + collection->forEachGeometry( + [&](auto&& geometry) + { + self(self, geometry->addr()); + return true; + }); + return; + } + + if (!isBaseGeometryColumn(geometryAddress.column()) && + geometryAddress.column() != ColumnId::GeometryViews) { + return; + } + + if (!countedGeometryAddresses.insert(geometryAddress.value_).second) { + return; + } + + auto geometryType = geometryAddress.column() == ColumnId::GeometryViews + ? impl_->geomViews_.at(geometryAddress.index()).type_ + : geometryTypeForColumn(geometryAddress.column()); + auto typeKey = geometryTypeKey(geometryType); + + increment(geometryUsage, "total"); + increment(geometryUsage["by-type"], typeKey); + if (geometrySourceDataReferences(geometryAddress)) { + increment(geometryUsage, "with-source-data-references"); + } + + if (geometryAddress.column() == ColumnId::GeometryViews) { + increment(geometryUsage, "view"); + increment(geometryUsage["view-by-type"], typeKey); + return; + } + + increment(geometryUsage, "base"); + increment(geometryUsage["base-by-type"], typeKey); + increment(geometryUsage, "base-vertex-buffer-allocated"); + }; + + for (auto const& feature : *this) { + if (auto geometry = feature->geomOrNull()) { + geometry->forEachGeometry( + [&](auto&& geometryEntry) + { + countGeometryAddress(countGeometryAddress, geometryEntry->addr()); + return true; + }); + } + } + + for (auto const& featureData : impl_->features_) { + auto const lod = std::min( + featureData.typeIdAndLod_.lod_, + static_cast(Feature::MAX_LOD)); + increment(featureLodUsage, fmt::format("lod_{}", lod)); + } + + for (auto const& validity : impl_->validities_) { + increment(validityUsage, "total"); + + switch (validity.direction_) { + case Validity::Empty: increment(validityUsage["by-direction"], "empty"); break; + case Validity::Positive: increment(validityUsage["by-direction"], "positive"); break; + case Validity::Negative: increment(validityUsage["by-direction"], "negative"); break; + case Validity::Both: increment(validityUsage["by-direction"], "both"); break; + case Validity::None: increment(validityUsage["by-direction"], "none"); break; + } + + if (validity.direction_ != Validity::Empty) { + increment(validityUsage, "with-direction"); + } + if (validity.referencedStage_ != ValidityData::InvalidReferencedStage) { + increment(validityUsage, "with-geometry-stage"); + } + if (validity.featureAddress_) { + increment(validityUsage, "with-feature-id"); + } + + switch (validity.geomDescrType_) { + case Validity::NoGeometry: + increment(validityUsage["by-geometry-description"], "none"); + break; + case Validity::SimpleGeometry: + increment(validityUsage["by-geometry-description"], "simple-geometry"); + if (validity.geomDescr_.simpleGeometry_) { + increment(validityUsage, "simple-geometry-with-address"); + } + break; + case Validity::OffsetPointValidity: + increment(validityUsage["by-geometry-description"], "offset-point"); + break; + case Validity::OffsetRangeValidity: + increment(validityUsage["by-geometry-description"], "offset-range"); + break; + case Validity::FeatureTransition: + increment(validityUsage["by-geometry-description"], "feature-transition"); + break; + } + + switch (validity.geomOffsetType_) { + case Validity::InvalidOffsetType: + increment(validityUsage["by-offset-type"], "invalid"); + break; + case Validity::GeoPosOffset: + increment(validityUsage["by-offset-type"], "geo-pos"); + break; + case Validity::BufferOffset: + increment(validityUsage["by-offset-type"], "buffer"); + break; + case Validity::RelativeLengthOffset: + increment(validityUsage["by-offset-type"], "relative-length"); + break; + case Validity::MetricLengthOffset: + increment(validityUsage["by-offset-type"], "metric-length"); + break; + } + + if (validity.direction_ != Validity::Empty && + validity.geomDescrType_ == Validity::NoGeometry && + validity.geomOffsetType_ == Validity::InvalidOffsetType && + validity.referencedStage_ == ValidityData::InvalidReferencedStage && + !validity.featureAddress_) { + increment(validityUsage, "direction-only"); + } + } + + auto countSimpleValidity = [&](Validity::Direction direction) { + increment(validityUsage, "total"); + increment(validityUsage, "simple-column"); + + switch (direction) { + case Validity::Empty: increment(validityUsage["by-direction"], "empty"); break; + case Validity::Positive: increment(validityUsage["by-direction"], "positive"); break; + case Validity::Negative: increment(validityUsage["by-direction"], "negative"); break; + case Validity::Both: increment(validityUsage["by-direction"], "both"); break; + case Validity::None: increment(validityUsage["by-direction"], "none"); break; + } + if (direction != Validity::Empty) { + increment(validityUsage, "with-direction"); + } + + increment(validityUsage["by-geometry-description"], "none"); + increment(validityUsage["by-offset-type"], "invalid"); + increment(validityUsage, "direction-only"); + }; + + std::unordered_set seenValidityCollections; + auto collectSimpleValidities = [&](simfil::ModelNodeAddress const& validityCollectionAddress) { + if (!validityCollectionAddress) { + return; + } + if (!seenValidityCollections.insert(validityCollectionAddress.value_).second) { + return; + } + + auto collection = resolve(validityCollectionAddress); + if (!collection) { + return; + } + + for (auto const& validity : *collection) { + auto const validityAddress = validity->addr(); + if (validityAddress.column() != ColumnId::SimpleValidity) { + continue; + } + if (validityAddress.index() > static_cast(Validity::None)) { + continue; + } + countSimpleValidity(static_cast(validityAddress.index())); + } + }; + + for (auto const& attribute : impl_->attributes_) { + collectSimpleValidities(attribute.validities_); + } + for (auto const& relation : impl_->relations_) { + collectSimpleValidities(relation.sourceValidity_); + collectSimpleValidities(relation.targetValidity_); + } + + int64_t featureLayerTotal = 0; + for (const auto& [_, value] : featureLayer.items()) { + if (value.is_number_integer()) + featureLayerTotal += value.get(); + } + + auto modelStats = ModelPool::serializationSizeStats(); + auto modelPool = nlohmann::json::object({ + {"roots", static_cast(modelStats.rootsBytes)}, + {"int64", static_cast(modelStats.int64Bytes)}, + {"double", static_cast(modelStats.doubleBytes)}, + {"string-data", static_cast(modelStats.stringDataBytes)}, + {"string-ranges", static_cast(modelStats.stringRangeBytes)}, + {"object-members", static_cast(modelStats.objectMemberBytes)}, + {"array-members", static_cast(modelStats.arrayMemberBytes)}, + }); + + int64_t modelPoolTotal = static_cast(modelStats.totalBytes()); + + return { + {"feature-layer", featureLayer}, + {"geometry-usage", geometryUsage}, + {"feature-lod-usage", featureLodUsage}, + {"validity-usage", validityUsage}, + {"array-arena-singletons", arrayArenaSingletons}, + {"model-pool", modelPool}, + {"feature-layer-total-bytes", featureLayerTotal}, + {"model-pool-total-bytes", modelPoolTotal}, + {"total-bytes", featureLayerTotal + modelPoolTotal} + }; +} + size_t TileFeatureLayer::size() const { return numRoots(); } +uint64_t TileFeatureLayer::numVertices() const +{ + return static_cast(impl_->pointBuffers_.byte_size() / sizeof(glm::vec3)); +} + model_ptr TileFeatureLayer::at(size_t i) const { auto rootResult = root(i); if (!rootResult) return {}; - return resolveFeature(**rootResult); + return resolve(**rootResult); } model_ptr @@ -859,19 +1919,19 @@ TileFeatureLayer::find(const std::string_view& type, const KeyValueViewPairs& qu { auto const& primaryIdComposition = getPrimaryIdComposition(type); auto queryIdPartsStripped = stripOptionalIdParts(queryIdParts, primaryIdComposition); - auto hash = Hash().mix(type).mix(queryIdPartsStripped).value(); + auto hash = static_cast(Hash().mix(type).mix(queryIdPartsStripped).value()); impl_->sortFeatureHashIndex(); auto it = std::lower_bound( impl_->featureHashIndex_.begin(), impl_->featureHashIndex_.end(), - Impl::FeatureAddrWithIdHash{0, hash}, + FeatureAddrWithIdHash{ModelNodeAddress{0, 0}, hash}, [](auto&& l, auto&& r) { return l.idHash_ < r.idHash_; }); // Iterate through potential matches to handle hash collisions. while (it != impl_->featureHashIndex_.end() && it->idHash_ == hash) { - auto feature = resolveFeature(*simfil::ModelNode::Ptr::make(shared_from_this(), it->featureAddr_)); + auto feature = resolve(it->featureAddr_); if (feature->id()->typeId() == type) { auto featureIdParts = stripOptionalIdParts(feature->id()->keyValuePairs(), primaryIdComposition); // Ensure that ID parts match exactly, not just the hash. @@ -940,18 +2000,18 @@ TileFeatureLayer::setStrings(std::shared_ptr const& newDict) return tl::unexpected(res.error()); } } - for (auto& validity : impl_->validities_) { - if (auto resolvedName = strings()->resolve(validity.referencedGeomName_)) { + for (auto& fid : impl_->featureIds_) { + if (auto resolvedName = oldDict->resolve(fid.typeId_)) { if (auto res = newDict->emplace(*resolvedName)) - validity.referencedGeomName_ = *res; + fid.typeId_ = *res; else return tl::unexpected(res.error()); } } - for (auto& fid : impl_->featureIds_) { - if (auto resolvedName = oldDict->resolve(fid.typeId_)) { + for (auto& feature : impl_->features_) { + if (auto resolvedName = oldDict->resolve(feature.typeIdAndLod_.typeId_)) { if (auto res = newDict->emplace(*resolvedName)) - fid.typeId_ = *res; + feature.typeIdAndLod_.typeId_ = *res; else return tl::unexpected(res.error()); } @@ -967,10 +2027,10 @@ TileFeatureLayer::setStrings(std::shared_ptr const& newDict) return {}; } -simfil::ModelNode::Ptr TileFeatureLayer::clone( - std::unordered_map& cache, +ModelNode::Ptr TileFeatureLayer::clone( + std::unordered_map& cache, const TileFeatureLayer::Ptr& otherLayer, - const simfil::ModelNode::Ptr& otherNode) + const ModelNode::Ptr& otherNode) { auto it = cache.find(otherNode->addr().value_); if (it != cache.end()) { @@ -981,8 +2041,8 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( ModelNode::Ptr& newCacheNode = cache[otherNode->addr().value_]; switch (otherNode->addr().column()) { case Objects: { - auto resolved = otherLayer->resolveObject(otherNode); - auto newNode = newObject(resolved->size()); + auto resolved = otherLayer->resolve(otherNode); + auto newNode = newObject(resolved->size(), true); newCacheNode = newNode; for (auto [key, value] : resolved->fields()) { if (auto keyStr = otherLayer->strings()->resolve(key)) { @@ -992,20 +2052,32 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( break; } case Arrays: { - auto resolved = otherLayer->resolveArray(otherNode); - auto newNode = newArray(resolved->size()); + auto resolved = otherLayer->resolve(otherNode); + auto newNode = newArray(resolved->size(), true); newCacheNode = newNode; for (auto value : *resolved) { newNode->append(clone(cache, otherLayer, value)); } break; } - case ColumnId::Geometries: { + case ColumnId::GeometryArrayView: { + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newArray(resolved->size(), true); + newCacheNode = newNode; + for (auto value : *resolved) { + newNode->append(clone(cache, otherLayer, value)); + } + break; + } + case ColumnId::PointGeometries: + case ColumnId::LineGeometries: + case ColumnId::PolygonGeometries: + case ColumnId::MeshGeometries: + case ColumnId::GeometryViews: { // TODO: This implementation is not great, because it does not respect // Geometry views - it just converts every Geometry to a self-contained one. - // TODO: Clone geometry name. - auto resolved = otherLayer->resolveGeometry(*otherNode); - auto newNode = newGeometry(resolved->geomType(), resolved->numPoints()); + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newGeometry(resolved->geomType(), resolved->numPoints(), true); newCacheNode = newNode; resolved->forEachPoint( [&newNode](auto&& pt) @@ -1013,16 +2085,22 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( newNode->append(pt); return true; }); + if (auto geometryStage = resolved->stage()) { + if (*geometryStage > std::numeric_limits::max()) { + raiseFmt("Geometry stage {} exceeds uint8_t range during clone.", *geometryStage); + } + setGeometryStage(newNode->addr(), static_cast(*geometryStage)); + } break; } case ColumnId::GeometryCollections: { - auto resolved = otherLayer->resolveGeometryCollection(*otherNode); - auto newNode = newGeometryCollection(resolved->numGeometries()); + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newGeometryCollection(resolved->numGeometries(), true); newCacheNode = newNode; resolved->forEachGeometry( [this, &newNode, &cache, &otherLayer](auto&& geom) { - newNode->addGeometry(resolveGeometry(*clone(cache, otherLayer, geom))); + newNode->addGeometry(resolve(*clone(cache, otherLayer, geom))); return true; }); break; @@ -1055,19 +2133,34 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( case ColumnId::FeatureProperties: { raise("Cannot clone entire feature yet."); } + case ColumnId::FeatureRelationsView: { + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newArray(resolved->size(), true); + newCacheNode = newNode; + for (auto value : *resolved) { + newNode->append(clone(cache, otherLayer, value)); + } + break; + } case ColumnId::FeatureIds: { - auto resolved = otherLayer->resolveFeatureId(*otherNode); + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newFeatureId(resolved->typeId(), resolved->keyValuePairs()); + newCacheNode = newNode; + break; + } + case ColumnId::ExternalFeatureIds: { + auto resolved = otherLayer->resolve(*otherNode); auto newNode = newFeatureId(resolved->typeId(), resolved->keyValuePairs()); newCacheNode = newNode; break; } case ColumnId::Attributes: { - auto resolved = otherLayer->resolveAttribute(*otherNode); + auto resolved = otherLayer->resolve(*otherNode); auto newNode = newAttribute(resolved->name()); newCacheNode = newNode; if (resolved->validityOrNull()) { newNode->setValidity( - resolveValidityCollection(*clone(cache, otherLayer, resolved->validityOrNull()))); + resolve(*clone(cache, otherLayer, resolved->validityOrNull()))); } resolved->forEachField( [this, &newNode, &cache, &otherLayer](auto&& key, auto&& value) @@ -1078,7 +2171,7 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( break; } case ColumnId::Validities: { - auto resolved = otherLayer->resolveValidity(*otherNode); + auto resolved = otherLayer->resolve(*otherNode); auto newNode = newValidity(); newCacheNode = newNode; newNode->setDirection(resolved->direction()); @@ -1086,7 +2179,8 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( case Validity::NoGeometry: break; case Validity::SimpleGeometry: - newNode->setSimpleGeometry(resolveGeometry(*clone(cache, otherLayer, resolved->simpleGeometry()))); + newNode->setSimpleGeometry(resolve( + *clone(cache, otherLayer, resolved->simpleGeometry()))); break; case Validity::OffsetPointValidity: if (resolved->geometryOffsetType() == Validity::GeoPosOffset) { @@ -1104,58 +2198,77 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( newNode->setOffsetRange(resolved->geometryOffsetType(), resolved->offsetRange()->first.x, resolved->offsetRange()->second.x); } break; + case Validity::FeatureTransition: + newNode->setFeatureTransition( + resolve(*clone(cache, otherLayer, resolved->transitionFromFeature())), + *resolved->transitionFromConnectedEnd(), + resolve(*clone(cache, otherLayer, resolved->transitionToFeature())), + *resolved->transitionToConnectedEnd(), + *resolved->transitionNumber()); + break; } break; } + case ColumnId::SimpleValidity: { + auto resolved = otherLayer->resolve(*otherNode); + newCacheNode = model_ptr::make( + shared_from_this(), + ModelNodeAddress{ + ColumnId::SimpleValidity, + static_cast(resolved->direction())}); + break; + } case ColumnId::ValidityCollections: { - auto resolved = otherLayer->resolveValidityCollection(*otherNode); - auto newNode = newValidityCollection(resolved->size()); + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newValidityCollection(resolved->size(), true); newCacheNode = newNode; for (auto value : *resolved) { - newNode->append(resolveValidity(*clone(cache, otherLayer, value))); + newNode->append(resolve(*clone(cache, otherLayer, value))); } break; } case ColumnId::AttributeLayers: { - auto resolved = otherLayer->resolveAttributeLayer(*otherNode); - auto newNode = newAttributeLayer(resolved->size()); + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newAttributeLayer(resolved->size(), true); newCacheNode = newNode; for (auto [key, value] : resolved->fields()) { if (auto keyStr = otherLayer->strings()->resolve(key)) { - newNode->addField(*keyStr, clone(cache, otherLayer, value)); + auto cloned = clone(cache, otherLayer, value); + newNode->addField(*keyStr, resolve(*cloned)); } } break; } case ColumnId::AttributeLayerLists: { - auto resolved = otherLayer->resolveAttributeLayerList(*otherNode); - auto newNode = newAttributeLayers(resolved->size()); + auto resolved = otherLayer->resolve(*otherNode); + auto newNode = newAttributeLayers(resolved->size(), true); newCacheNode = newNode; for (auto [key, value] : resolved->fields()) { if (auto keyStr = otherLayer->strings()->resolve(key)) { - newNode->addField(*keyStr, clone(cache, otherLayer, value)); + auto cloned = clone(cache, otherLayer, value); + newNode->addLayer(*keyStr, resolve(*cloned)); } } break; } case ColumnId::Relations: { - auto resolved = otherLayer->resolveRelation(*otherNode); + auto resolved = otherLayer->resolve(*otherNode); auto newNode = newRelation( resolved->name(), - resolveFeatureId(*clone(cache, otherLayer, resolved->target()))); + resolve(*clone(cache, otherLayer, resolved->target()))); if (resolved->sourceValidityOrNull()) { - newNode->setSourceValidity(resolveValidityCollection( + newNode->setSourceValidity(resolve( *clone(cache, otherLayer, resolved->sourceValidityOrNull()))); } if (resolved->targetValidityOrNull()) { - newNode->setTargetValidity(resolveValidityCollection( + newNode->setTargetValidity(resolve( *clone(cache, otherLayer, resolved->targetValidityOrNull()))); } newCacheNode = newNode; break; } case ColumnId::SourceDataReferenceCollections: { - auto resolved = otherLayer->resolveSourceDataReferenceCollection(*otherNode); + auto resolved = otherLayer->resolve(*otherNode); auto items = std::vector( otherLayer->impl_->sourceDataReferences_.begin() + resolved->offset_, otherLayer->impl_->sourceDataReferences_.begin() + resolved->offset_ + resolved->size_); @@ -1169,11 +2282,12 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( case ColumnId::Polygon: case ColumnId::LinearRing: case ColumnId::PointBuffers: + case ColumnId::PointBuffersView: case ColumnId::SourceDataReferences: case ColumnId::ValidityPoints: raiseFmt("Encountered unexpected column type {} in clone().", otherNode->addr().column()); default: { - newCacheNode = ModelNode::Ptr::make(shared_from_this(), otherNode->addr()); + newCacheNode = resolve(otherNode->addr()); } } cache.insert({otherNode->addr().value_, newCacheNode}); @@ -1181,7 +2295,7 @@ simfil::ModelNode::Ptr TileFeatureLayer::clone( } void TileFeatureLayer::clone( - std::unordered_map& clonedModelNodes, + std::unordered_map& clonedModelNodes, const TileFeatureLayer::Ptr& otherLayer, const Feature& otherFeature, const std::string_view& type, @@ -1198,7 +2312,7 @@ void TileFeatureLayer::clone( } auto lookupOrClone = - [&](simfil::ModelNode::Ptr const& n) -> simfil::ModelNode::Ptr + [&](ModelNode::Ptr const& n) -> ModelNode::Ptr { return clone(clonedModelNodes, otherLayer, n); }; @@ -1218,7 +2332,7 @@ void TileFeatureLayer::clone( auto baseAttrLayers = cloneTarget->attributeLayers(); for (auto const& [key, value] : attrLayers->fields()) { if (auto keyStr = otherLayer->strings()->resolve(key)) { - baseAttrLayers->addField(*keyStr, lookupOrClone(value)); + baseAttrLayers->addLayer(*keyStr, resolve(*lookupOrClone(value))); } } } @@ -1230,7 +2344,7 @@ void TileFeatureLayer::clone( [this, &baseGeom, &lookupOrClone](auto&& geomElement) { baseGeom->addGeometry( - resolveGeometry(*lookupOrClone(geomElement))); + resolve(*lookupOrClone(geomElement))); return true; }); } @@ -1240,7 +2354,7 @@ void TileFeatureLayer::clone( otherFeature.forEachRelation( [this, &cloneTarget, &lookupOrClone](auto&& rel) { - auto newRel = resolveRelation(*lookupOrClone(rel)); + auto newRel = resolve(*lookupOrClone(rel)); cloneTarget->addRelation(newRel); return true; }); @@ -1252,6 +2366,98 @@ Geometry::Storage& TileFeatureLayer::vertexBufferStorage() return impl_->pointBuffers_; } +Geometry::ViewData const* TileFeatureLayer::geometryViewData(simfil::ModelNodeAddress address) const +{ + if (address.column() != ColumnId::GeometryViews) { + return nullptr; + } + return &impl_->geomViews_.at(address.index()); +} + +std::optional TileFeatureLayer::geometryStage(simfil::ModelNodeAddress address) const +{ + switch (address.column()) { + case ColumnId::PointGeometries: + case ColumnId::LineGeometries: + case ColumnId::PolygonGeometries: + case ColumnId::MeshGeometries: { + auto const compactIndex = extraGeometryDataStorageIndex(address.index()); + if (auto storedStage = geometryStageAt(impl_->geomStages_, compactIndex)) { + return storedStage; + } + auto fallbackStage = stage_.value_or(layerInfo_ ? layerInfo_->highFidelityStage_ : 0U); + if (fallbackStage > std::numeric_limits::max()) { + raiseFmt("Geometry stage {} exceeds uint8_t range.", fallbackStage); + } + return static_cast(fallbackStage); + } + case ColumnId::GeometryViews: + return geometryStage(impl_->geomViews_.at(address.index()).baseGeometry_); + default: + return std::nullopt; + } +} + +void TileFeatureLayer::setGeometryStage( + simfil::ModelNodeAddress address, + std::optional stage) +{ + switch (address.column()) { + case ColumnId::PointGeometries: + case ColumnId::LineGeometries: + case ColumnId::PolygonGeometries: + case ColumnId::MeshGeometries: { + auto const compactIndex = extraGeometryDataStorageIndex(address.index()); + ensureGeometryStageCapacity(impl_->geomStages_, compactIndex); + impl_->geomStages_.at(compactIndex) = stage.value_or(InvalidGeometryStage); + break; + } + case ColumnId::GeometryViews: + break; + default: + break; + } +} + +simfil::ModelNodeAddress TileFeatureLayer::geometrySourceDataReferences(simfil::ModelNodeAddress address) const +{ + switch (address.column()) { + case ColumnId::PointGeometries: + case ColumnId::LineGeometries: + case ColumnId::PolygonGeometries: + case ColumnId::MeshGeometries: { + auto const compactIndex = extraGeometryDataStorageIndex(address.index()); + return geometrySourceRefsAt(impl_->geomSourceDataRefs_, compactIndex); + } + case ColumnId::GeometryViews: + return impl_->geomViews_.at(address.index()).sourceDataReferences_; + default: + return {}; + } +} + +void TileFeatureLayer::setGeometrySourceDataReferences( + simfil::ModelNodeAddress address, + simfil::ModelNodeAddress refsAddress) +{ + switch (address.column()) { + case ColumnId::PointGeometries: + case ColumnId::LineGeometries: + case ColumnId::PolygonGeometries: + case ColumnId::MeshGeometries: { + auto const compactIndex = extraGeometryDataStorageIndex(address.index()); + ensureGeometrySourceRefCapacity(impl_->geomSourceDataRefs_, compactIndex); + impl_->geomSourceDataRefs_.at(compactIndex) = refsAddress; + break; + } + case ColumnId::GeometryViews: + impl_->geomViews_.at(address.index()).sourceDataReferences_ = refsAddress; + break; + default: + break; + } +} + model_ptr TileFeatureLayer::find(const std::string_view& featureId) const { using namespace std::ranges; diff --git a/libs/model/src/geometry.cpp b/libs/model/src/geometry.cpp index d0a59d30..350cef3a 100644 --- a/libs/model/src/geometry.cpp +++ b/libs/model/src/geometry.cpp @@ -9,6 +9,8 @@ #include "mapget/log.h" #include +#include +#include #include #include #include @@ -60,10 +62,62 @@ namespace mapget using namespace simfil; +namespace +{ +bool isBaseGeometryColumn(uint8_t column) +{ + using Col = TileFeatureLayer::ColumnId; + return column == Col::PointGeometries || + column == Col::LineGeometries || + column == Col::PolygonGeometries || + column == Col::MeshGeometries; +} + +GeomType geometryTypeForColumn(uint8_t column) +{ + using Col = TileFeatureLayer::ColumnId; + switch (column) { + case Col::PointGeometries: + return GeomType::Points; + case Col::LineGeometries: + return GeomType::Line; + case Col::PolygonGeometries: + return GeomType::Polygon; + case Col::MeshGeometries: + return GeomType::Mesh; + default: + raiseFmt("Unexpected geometry column {}.", column); + return GeomType::Points; + } +} + +std::optional geometryNameForStage( + TileFeatureLayer const& model, + std::optional geometryStage) +{ + if (!geometryStage || !model.layerInfo()) { + return std::nullopt; + } + auto const& layerInfo = *model.layerInfo(); + if (*geometryStage <= layerInfo.highFidelityStage_) { + return std::nullopt; + } + if (*geometryStage >= layerInfo.stageLabels_.size()) { + return std::nullopt; + } + auto const& label = layerInfo.stageLabels_.at(*geometryStage); + if (label.empty()) { + return std::nullopt; + } + return label; +} + +} + /** Model node impls. for GeometryCollection */ -GeometryCollection::GeometryCollection(ModelConstPtr pool_, ModelNodeAddress a) - : simfil::MandatoryDerivedModelNodeBase(std::move(pool_), a) +GeometryCollection::GeometryCollection(ModelConstPtr pool_, ModelNodeAddress a, simfil::detail::mp_key key) + : MergedArrayView(std::move(pool_), a, key) {} ValueType GeometryCollection::type() const { @@ -73,8 +127,8 @@ ValueType GeometryCollection::type() const { ModelNode::Ptr GeometryCollection::at(int64_t i) const { if (auto singleGeomEntry = singleGeom()) return singleGeomEntry->at(i); - if (i == 0) return ValueNode(GeometryCollectionStr, model_); - if (i == 1) return ModelNode::Ptr::make(model_, ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); + if (i == 0) return model_ptr::make(GeometryCollectionStr, model_); + if (i == 1) return mergedGeometryArray(); throw std::out_of_range("geom collection: Out of range."); } @@ -88,7 +142,7 @@ ModelNode::Ptr GeometryCollection::get(const StringId& f) const { if (auto singleGeomEntry = singleGeom()) return singleGeomEntry->get(f); if (f == StringPool::TypeStr) return at(0); - if (f == StringPool::GeometriesStr) return at(1); + if (f == StringPool::GeometriesStr) return mergedGeometryArray(); return {}; } @@ -97,14 +151,20 @@ StringId GeometryCollection::keyAt(int64_t i) const { return singleGeomEntry->keyAt(i); if (i == 0) return StringPool::TypeStr; if (i == 1) return StringPool::GeometriesStr; - if (i == 1) return StringPool::SourceDataStr; throw std::out_of_range("geom collection: Out of range."); } -model_ptr GeometryCollection::newGeometry(GeomType type, size_t initialCapacity) { - auto result = model().newGeometry(type, initialCapacity); - auto arrayPtr = ModelNode::Ptr::make(model_, ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); - model().resolveArray(arrayPtr)->append(result); +model_ptr GeometryCollection::newGeometry( + GeomType type, + size_t initialCapacity, + bool fixedSize) +{ + if (addr_.column() != TileFeatureLayer::ColumnId::GeometryCollections) { + raise("Cannot append to a single-geometry view."); + } + auto result = model().newGeometry(type, initialCapacity, fixedSize); + auto array = model().resolve(ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); + array->append(result); return result; } @@ -119,28 +179,186 @@ bool GeometryCollection::iterate(const IterCallback& cb) const ModelNode::Ptr GeometryCollection::singleGeom() const { + if (extension()) { + return {}; + } + if (isBaseGeometryColumn(addr_.column()) || + addr_.column() == TileFeatureLayer::ColumnId::GeometryViews) { + return model().resolve(addr_); + } if (model().arrayMemberStorage().size((ArrayIndex)addr_.index()) == 1) { - auto arrayPtr = ModelNode::Ptr::make(model_, ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); - return model().resolveArray(arrayPtr)->at(0); + auto array = model().resolve(ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); + return array->at(0); } return {}; } void GeometryCollection::addGeometry(const model_ptr& geom) { - auto arrayPtr = ModelNode::Ptr::make(model_, ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); - model().resolveArray(arrayPtr)->append(ModelNode::Ptr(geom)); + if (addr_.column() != TileFeatureLayer::ColumnId::GeometryCollections) { + raise("Cannot append to a single-geometry view."); + } + auto array = model().resolve(ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); + array->append(ModelNode::Ptr(geom)); } size_t GeometryCollection::numGeometries() const { - return model().arrayMemberStorage().size((ArrayIndex)addr().index()); + auto result = localMergedSize(); + if (auto ext = extension()) { + result += ext->numGeometries(); + } + return result; +} + +std::optional GeometryCollection::preferredGeometryStage( + std::optional stageOverride) const +{ + if (stageOverride) { + return stageOverride; + } + + std::optional preferredStage; + forEachGeometry([&](model_ptr const& geom) { + preferredStage = geom->model().layerInfo()->highFidelityStage_; + return false; + }); + return preferredStage; +} + +model_ptr GeometryCollection::geometryOfTypeAtPreferredStage( + GeomType type, + std::optional stageOverride) const +{ + auto const preferredStage = preferredGeometryStage(stageOverride); + model_ptr result; + if (!preferredStage) { + return result; + } + + forEachGeometry([&](model_ptr const& geom) { + if (geom->geomType() != type) { + return true; + } + auto const geometryStage = geom->stage().value_or(0U); + if (geometryStage != *preferredStage) { + return true; + } + result = geom; + return false; + }); + return result; +} + +ModelNode::Ptr GeometryCollection::localGeometryAt(int64_t i) const +{ + if (i < 0) { + return {}; + } + if (isBaseGeometryColumn(addr_.column()) || + addr_.column() == TileFeatureLayer::ColumnId::GeometryViews) { + if (i == 0) { + return model().resolve(addr_); + } + return {}; + } + auto array = model().resolve(ModelNodeAddress{simfil::ModelPool::Arrays, addr_.index()}); + if (i >= static_cast(array->size())) { + return {}; + } + return array->at(i); +} + +model_ptr GeometryCollection::mergedGeometryArray() const +{ + auto result = (isBaseGeometryColumn(addr_.column()) || + addr_.column() == TileFeatureLayer::ColumnId::GeometryViews) + ? model_ptr::make( + model_, + ModelNodeAddress{TileFeatureLayer::ColumnId::GeometryArrayView, addr_.index()}, + addr_) + : model_ptr::make( + model_, + ModelNodeAddress{TileFeatureLayer::ColumnId::GeometryArrayView, addr_.index()}); + if (auto ext = extension()) { + result->setExtension(ext->mergedGeometryArray()); + } else { + result->setExtension({}); + } + return result; +} + +uint32_t GeometryCollection::localMergedSize() const +{ + if (isBaseGeometryColumn(addr_.column()) || + addr_.column() == TileFeatureLayer::ColumnId::GeometryViews) { + return 1; + } + return model().arrayMemberStorage().size(static_cast(addr_.index())); +} + +ModelNode::Ptr GeometryCollection::localMergedAt(int64_t i) const +{ + return localGeometryAt(i); +} + +bool GeometryCollection::localMergedIterate(const IterCallback& cb) const +{ + const auto localCount = localMergedSize(); + for (uint32_t i = 0; i < localCount; ++i) { + if (auto node = localMergedAt(i)) { + if (!cb(*node)) { + return false; + } + } + } + return true; +} + +uint32_t GeometryArrayView::localMergedSize() const +{ + if (singleGeometryAddress_) { + return 1; + } + return MergedArrayView::Base::size(); +} + +ModelNode::Ptr GeometryArrayView::localMergedAt(int64_t i) const +{ + if (singleGeometryAddress_) { + if (i == 0) { + return this->model().resolve(singleGeometryAddress_); + } + return {}; + } + return MergedArrayView::Base::at(i); +} + +bool GeometryArrayView::localMergedIterate(const IterCallback& cb) const +{ + if (singleGeometryAddress_) { + if (auto node = this->model().resolve(singleGeometryAddress_)) { + return cb(*node); + } + return true; + } + return MergedArrayView::Base::iterate(cb); } /** ModelNode impls. for Geometry */ -Geometry::Geometry(Data* data, ModelConstPtr pool_, ModelNodeAddress a) - : simfil::MandatoryDerivedModelNodeBase(std::move(pool_), a), geomData_(data) +Geometry::Geometry( + ModelConstPtr pool_, + ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(std::move(pool_), a, key) +{ + storage_ = &model().vertexBufferStorage(); +} + +Geometry::Geometry(ViewData* data, ModelConstPtr pool_, ModelNodeAddress a, simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(std::move(pool_), a, key), + geomViewData_(data) { storage_ = &model().vertexBufferStorage(); } @@ -156,6 +374,19 @@ uint64_t Geometry::getHash() const return result.value(); } +std::optional Geometry::name() const +{ + return geometryNameForStage(model(), stage()); +} + +std::optional Geometry::stage() const +{ + if (auto geometryStage = model().geometryStage(addr_)) { + return *geometryStage; + } + return std::nullopt; +} + SelfContainedGeometry Geometry::toSelfContained() const { SelfContainedGeometry result{{}, geomType()}; @@ -173,111 +404,120 @@ ValueType Geometry::type() const { } ModelNode::Ptr Geometry::at(int64_t i) const { - if (geomData_->sourceDataReferences_) { + auto const sourceDataReferences = model().geometrySourceDataReferences(addr_); + auto const geometryName = name(); + if (sourceDataReferences) { if (i == 0) return get(StringPool::SourceDataStr); i -= 1; } + if (geometryName) { + if (i == 0) + return get(StringPool::GeometryNameStr); + i -= 1; + } if (i == 0) return get(StringPool::TypeStr); if (i == 1) return get(StringPool::CoordinatesStr); - if (i == 2) - return get(StringPool::NameStr); throw std::out_of_range("geom: Out of range."); } uint32_t Geometry::size() const { - return 3 + (geomData_->sourceDataReferences_ ? 1 : 0); + auto const sourceDataReferences = model().geometrySourceDataReferences(addr_); + auto const geometryName = name(); + return 2 + (sourceDataReferences ? 1 : 0) + (geometryName ? 1 : 0); } ModelNode::Ptr Geometry::get(const StringId& f) const { - if (f == StringPool::SourceDataStr && geomData_->sourceDataReferences_) { - return ModelNode::Ptr::make(model_, geomData_->sourceDataReferences_); + auto const sourceDataReferences = model().geometrySourceDataReferences(addr_); + auto const geometryName = name(); + auto const type = geomViewData_ ? geomViewData_->type_ : geometryTypeForColumn(addr_.column()); + if (f == StringPool::SourceDataStr && sourceDataReferences) { + return model().resolve(sourceDataReferences); + } + if (f == StringPool::GeometryNameStr && geometryName) { + return model_ptr::make(*geometryName, model_); } if (f == StringPool::TypeStr) { - return ValueNode( - geomData_->type_ == GeomType::Points ? MultiPointStr : - geomData_->type_ == GeomType::Line ? LineStringStr : - geomData_->type_ == GeomType::Polygon ? PolygonStr : - geomData_->type_ == GeomType::Mesh ? MultiPolygonStr : "", + return model_ptr::make( + type == GeomType::Points ? MultiPointStr : + type == GeomType::Line ? LineStringStr : + type == GeomType::Polygon ? PolygonStr : + type == GeomType::Mesh ? MultiPolygonStr : "", model_); } if (f == StringPool::CoordinatesStr) { - switch (geomData_->type_) { + switch (type) { case GeomType::Polygon: - return ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::Polygon, addr_.index()}); + if (geomViewData_) { + break; + } + return model().resolve( + ModelNodeAddress{TileFeatureLayer::ColumnId::Polygon, addr_.index()}); case GeomType::Mesh: - return ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::Mesh, addr_.index()}); + if (geomViewData_) { + break; + } + return model().resolve( + ModelNodeAddress{TileFeatureLayer::ColumnId::Mesh, addr_.index()}); default: - return ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::PointBuffers, addr_.index()}); + return model_ptr::make(model_, addr_); } - } - if (f == StringPool::NameStr) { - auto resolvedString = model().strings()->resolve(geomData_->geomName_); - return model_ptr::make( - resolvedString ? - *resolvedString : - std::string_view(""), - model_); + return model_ptr::make(model_, addr_); } return {}; } StringId Geometry::keyAt(int64_t i) const { - if (geomData_->sourceDataReferences_) { + auto const sourceDataReferences = model().geometrySourceDataReferences(addr_); + auto const geometryName = name(); + if (sourceDataReferences) { if (i == 0) return StringPool::SourceDataStr; i -= 1; } + if (geometryName) { + if (i == 0) + return StringPool::GeometryNameStr; + i -= 1; + } if (i == 0) return StringPool::TypeStr; if (i == 1) return StringPool::CoordinatesStr; - if (i == 2) return StringPool::NameStr; throw std::out_of_range("geom: Out of range."); } model_ptr Geometry::sourceDataReferences() const { - if (geomData_->sourceDataReferences_) - return model().resolveSourceDataReferenceCollection(*model_ptr::make(model_, geomData_->sourceDataReferences_)); + auto const sourceDataReferences = model().geometrySourceDataReferences(addr_); + if (sourceDataReferences) + return model().resolve(sourceDataReferences); return {}; } void Geometry::setSourceDataReferences(simfil::ModelNode::Ptr const& refs) { - geomData_->sourceDataReferences_ = refs->addr(); + model().setGeometrySourceDataReferences(addr_, refs->addr()); } void Geometry::append(Point const& p) { - if (geomData_->isView_) + if (geomViewData_) throw std::runtime_error("Cannot append to geometry view."); - auto& geomData = geomData_->detail_.geom_; + auto const anchor = model().geometryAnchor(); + auto const anchoredPoint = glm::vec3{ + static_cast(p.x - anchor.x), + static_cast(p.y - anchor.y), + static_cast(p.z - anchor.z)}; - // Before the geometry is assigned with a vertex array, - // a negative array handle denotes the desired initial - // capacity, +1, because there is always the additional - // offset point. - if (geomData.vertexArray_ < 0) { - auto initialCapacity = abs(geomData_->detail_.geom_.vertexArray_); - geomData.vertexArray_ = storage_->new_array(initialCapacity-1); - geomData.offset_ = p; - return; - } storage_->emplace_back( - geomData.vertexArray_, - glm::fvec3{ - static_cast(p.x - geomData.offset_.x), - static_cast(p.y - geomData.offset_.y), - static_cast(p.z - geomData.offset_.z)}); + static_cast(addr_.index()), + anchoredPoint); } GeomType Geometry::geomType() const { - return geomData_->type_; + return geomViewData_ ? geomViewData_->type_ : geometryTypeForColumn(addr_.column()); } bool Geometry::iterate(const IterCallback& cb) const @@ -290,31 +530,14 @@ bool Geometry::iterate(const IterCallback& cb) const size_t Geometry::numPoints() const { - PointBufferNode vertexBufferNode{geomData_, model_, {TileFeatureLayer::ColumnId::PointBuffers, addr_.index()}}; - return vertexBufferNode.size(); + auto vertexBufferNode = model_ptr::make(model_, addr_); + return vertexBufferNode->size(); } Point Geometry::pointAt(size_t index) const { - PointBufferNode vertexBufferNode{geomData_, model_, {TileFeatureLayer::ColumnId::PointBuffers, addr_.index()}}; - PointNode vertex{*vertexBufferNode.at((int64_t)index), vertexBufferNode.baseGeomData_}; - return vertex.point_; -} - -std::optional Geometry::name() const -{ - if (geomData_->geomName_ == StringPool::Empty) { - return {}; - } - return model().strings()->resolve(geomData_->geomName_); -} - -void Geometry::setName(const std::string_view& newName) -{ - auto newNameId = model().strings()->emplace(newName); - if (!newNameId) - raise(newNameId.error().message); - geomData_->geomName_ = *newNameId; + auto vertexBufferNode = model_ptr::make(model_, addr_); + return vertexBufferNode->pointAt(static_cast(index)); } double Geometry::length() const @@ -473,8 +696,8 @@ Point Geometry::percentagePositionFromGeometries(std::vector /** ModelNode impls. for PolygonNode */ -PolygonNode::PolygonNode(ModelConstPtr pool, ModelNodeAddress const& a) - : simfil::MandatoryDerivedModelNodeBase(std::move(pool), a) +PolygonNode::PolygonNode(ModelConstPtr pool, ModelNodeAddress const& a, simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(std::move(pool), a, key) {} ValueType PolygonNode::type() const @@ -486,8 +709,8 @@ ModelNode::Ptr PolygonNode::at(int64_t index) const { // Index 0 is the outer ring, all following rings are holes if (index == 0) - return ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::LinearRing, addr_.index()}); + return model().resolve( + ModelNodeAddress{TileFeatureLayer::ColumnId::LinearRing, addr_.index()}); throw std::out_of_range("PolygonNode: index out of bounds."); } @@ -515,13 +738,16 @@ bool PolygonNode::iterate(IterCallback const& cb) const /** ModelNode impls. for MeshNode */ -MeshNode::MeshNode(Geometry::Data const* geomData, ModelConstPtr pool, ModelNodeAddress const& a) - : simfil::MandatoryDerivedModelNodeBase(std::move(pool), a), geomData_(geomData) +MeshNode::MeshNode(ModelConstPtr pool, + ModelNodeAddress const& a, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(std::move(pool), a, key) { - auto vertex_buffer = PointBufferNode{ - geomData_, model_, {TileFeatureLayer::ColumnId::PointBuffers, addr_.index()}}; - assert(vertex_buffer.size() % 3 == 0); - size_ = vertex_buffer.size() / 3; + auto vertex_buffer = model_ptr::make( + model_, + ModelNodeAddress{TileFeatureLayer::ColumnId::MeshGeometries, addr_.index()}); + assert(vertex_buffer->size() % 3 == 0); + size_ = vertex_buffer->size() / 3; } ValueType MeshNode::type() const @@ -532,8 +758,9 @@ ValueType MeshNode::type() const ModelNode::Ptr MeshNode::at(int64_t index) const { if (0 <= index && index < size_) - return ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::MeshTriangleCollection, addr_.index()}, index); + return model().resolve( + ModelNodeAddress{TileFeatureLayer::ColumnId::MeshTriangleCollection, addr_.index()}, + index); throw std::out_of_range("MeshNode: index out of bounds."); } @@ -551,9 +778,9 @@ bool MeshNode::iterate(IterCallback const& cb) const return true; } -MeshTriangleCollectionNode::MeshTriangleCollectionNode(const ModelNode& base) - : simfil::MandatoryDerivedModelNodeBase(base) - , index_(std::get(data_) * 3) +MeshTriangleCollectionNode::MeshTriangleCollectionNode(const ModelNode& base, simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(base, key), + index_(std::get(data_) * 3) {} ValueType MeshTriangleCollectionNode::type() const @@ -564,7 +791,9 @@ ValueType MeshTriangleCollectionNode::type() const ModelNode::Ptr MeshTriangleCollectionNode::at(int64_t index) const { if (index == 0) - return ModelNode::Ptr::make(model_, ModelNodeAddress{TileFeatureLayer::ColumnId::MeshTriangleLinearRing, addr_.index()}, index_); + return model().resolve( + ModelNodeAddress{TileFeatureLayer::ColumnId::MeshTriangleLinearRing, addr_.index()}, + index_); throw std::out_of_range("MeshTriangleCollectionNode: index out of bounds."); } @@ -583,8 +812,12 @@ bool MeshTriangleCollectionNode::iterate(IterCallback const& cb) const /** ModelNode impls. for LinearRingNode (a closed, simple polygon in CCW order) */ -LinearRingNode::LinearRingNode(const ModelNode& base, std::optional length) - : simfil::MandatoryDerivedModelNodeBase(base) +LinearRingNode::LinearRingNode(const ModelNode& base, simfil::detail::mp_key key) + : LinearRingNode(base, std::optional{}, key) +{} + +LinearRingNode::LinearRingNode(const ModelNode& base, std::optional length, simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(base, key) { if (std::get_if(&data_)) offset_ = std::get(data_); @@ -687,38 +920,73 @@ uint32_t LinearRingNode::size() const model_ptr LinearRingNode::vertexBuffer() const { - auto ptr = ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::PointBuffers, addr_.index()}, 0); - return model().resolvePointBuffer(*ptr); + using Col = TileFeatureLayer::ColumnId; + switch (addr_.column()) { + case Col::LinearRing: + return model_ptr::make( + model_, + ModelNodeAddress{Col::PolygonGeometries, addr_.index()}); + case Col::MeshTriangleLinearRing: + return model_ptr::make( + model_, + ModelNodeAddress{Col::MeshGeometries, addr_.index()}); + default: + return model_ptr::make(model_, addr_); + } } /** ModelNode impls. for VertexBufferNode */ -PointBufferNode::PointBufferNode(Geometry::Data const* geomData, ModelConstPtr pool_, ModelNodeAddress const& a) - : simfil::MandatoryDerivedModelNodeBase(std::move(pool_), a), baseGeomData_(geomData), baseGeomAddress_(a) +PointBufferNode::PointBufferNode( + ModelConstPtr pool_, + ModelNodeAddress const& baseGeometryAddress, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase( + std::move(pool_), + ModelNodeAddress{ + baseGeometryAddress.column() == TileFeatureLayer::ColumnId::GeometryViews ? + TileFeatureLayer::ColumnId::PointBuffersView : + TileFeatureLayer::ColumnId::PointBuffers, + baseGeometryAddress.index()}, + key), + baseGeomAddress_(baseGeometryAddress) { storage_ = &model().vertexBufferStorage(); - // Resolve geometry view to actual geometry, process - // actual offset and length. - if (baseGeomData_->isView_) { - size_ = baseGeomData_->detail_.view_.size_; - - while (baseGeomData_->isView_) { - offset_ += baseGeomData_->detail_.view_.offset_; - baseGeomAddress_ = baseGeomData_->detail_.view_.baseGeometry_; - baseGeomData_ = model().resolveGeometry( - *ModelNode::Ptr::make(model_, baseGeomData_->detail_.view_.baseGeometry_))->geomData_; + // Resolve geometry views to their base geometry while preserving the + // selected point range. + if (baseGeomAddress_.column() == TileFeatureLayer::ColumnId::GeometryViews) { + auto const* viewData = model().geometryViewData(baseGeomAddress_); + if (!viewData) { + throw std::runtime_error("Failed to resolve geometry view."); + } + offset_ = viewData->offset_; + size_ = viewData->size_; + baseGeomAddress_ = viewData->baseGeometry_; + + while (baseGeomAddress_.column() == TileFeatureLayer::ColumnId::GeometryViews) { + viewData = model().geometryViewData(baseGeomAddress_); + if (!viewData) { + throw std::runtime_error("Failed to resolve nested geometry view."); + } + offset_ += viewData->offset_; + baseGeomAddress_ = viewData->baseGeometry_; } - auto maxSize = 1 + storage_->size(baseGeomData_->detail_.geom_.vertexArray_); + if (!isBaseGeometryColumn(baseGeomAddress_.column())) { + throw std::runtime_error("Geometry view must resolve to a base geometry."); + } + baseVertexArray_ = static_cast(baseGeomAddress_.index()); + auto maxSize = storage_->size(baseVertexArray_); if (offset_ + size_ > maxSize) throw std::runtime_error("Geometry view is out of bounds."); } else { - // Just get the correct length. - if (baseGeomData_->detail_.geom_.vertexArray_ >= 0) - size_ = 1 + storage_->size(baseGeomData_->detail_.geom_.vertexArray_); + if (!isBaseGeometryColumn(baseGeomAddress_.column())) { + throw std::runtime_error("PointBuffer expects geometry or geometry-view address."); + } + baseVertexArray_ = static_cast(baseGeomAddress_.index()); + size_ = storage_->size(baseVertexArray_); } } @@ -730,7 +998,12 @@ ModelNode::Ptr PointBufferNode::at(int64_t i) const { if (i < 0 || i >= size()) throw std::out_of_range("vertex-buffer: Out of range."); i += offset_; - return ModelNode::Ptr::make(model_, ModelNodeAddress{TileFeatureLayer::ColumnId::Points, baseGeomAddress_.index()}, i); + auto const pointNodeAddress = ModelNodeAddress{ + TileFeatureLayer::ColumnId::Points, + baseGeomAddress_.index()}; + return model().resolve( + pointNodeAddress, + i); } uint32_t PointBufferNode::size() const { @@ -751,9 +1024,13 @@ bool PointBufferNode::iterate(const IterCallback& cb) const auto resolveAndCb = Model::Lambda([&cb, &cont](auto && node){ cont = cb(node); }); + auto const pointNodeAddress = ModelNodeAddress{ + TileFeatureLayer::ColumnId::Points, + baseGeomAddress_.index()}; for (auto i = 0u; i < size_; ++i) { - resolveAndCb(*ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::Points, baseGeomAddress_.index()}, (int64_t)i+offset_)); + resolveAndCb(*model().resolve( + pointNodeAddress, + static_cast(i) + offset_)); if (!cont) break; } @@ -762,8 +1039,18 @@ bool PointBufferNode::iterate(const IterCallback& cb) const Point PointBufferNode::pointAt(int64_t index) const { - PointNode vertex{*at(index), baseGeomData_}; - return vertex.point_; + if (index < 0 || index >= static_cast(size_)) { + throw std::out_of_range("vertex-buffer: Out of range."); + } + auto vertexResult = storage_->at( + baseVertexArray_, + static_cast(index + offset_)); + if (!vertexResult) { + throw std::out_of_range("vertex-buffer: Out of range."); + } + auto point = model().geometryAnchor(); + point += vertexResult->get(); + return point; } } diff --git a/libs/model/src/info.cpp b/libs/model/src/info.cpp index 7088b84d..c7832d48 100644 --- a/libs/model/src/info.cpp +++ b/libs/model/src/info.cpp @@ -138,12 +138,11 @@ nlohmann::json IdPart::toJson() const {"isOptional", isOptional_}}; } -bool IdPart::idPartsMatchComposition( +std::optional IdPart::compositionMatchEndIndex( const std::vector& candidateComposition, uint32_t compositionMatchStartIdx, const KeyValueViewPairs& featureIdParts, size_t matchLength, - bool requireCompositionEnd, std::string* error) { auto featureIdIter = featureIdParts.begin(); @@ -157,7 +156,7 @@ bool IdPart::idPartsMatchComposition( while (matchLength > 0 && compositionIter != candidateComposition.end()) { // Have we exhausted feature ID parts? if (featureIdIter == featureIdParts.end()) { - return false; + return std::nullopt; } auto [idPartKey, idPartValue] = *featureIdIter; @@ -168,26 +167,54 @@ bool IdPart::idPartsMatchComposition( ++compositionIter; continue; } - return false; + return std::nullopt; } // Does the ID part's value match? if (!compositionIter->validate(idPartValue, error)) - return false; + return std::nullopt; ++featureIdIter; ++compositionIter; --matchLength; } + if (matchLength != 0) { + return std::nullopt; + } + + return static_cast(std::distance(candidateComposition.begin(), compositionIter)); +} + +bool IdPart::idPartsMatchComposition( + const std::vector& candidateComposition, + uint32_t compositionMatchStartIdx, + const KeyValueViewPairs& featureIdParts, + size_t matchLength, + bool requireCompositionEnd, + std::string* error) +{ + auto const matchEndIndex = compositionMatchEndIndex( + candidateComposition, + compositionMatchStartIdx, + featureIdParts, + matchLength, + error); + if (!matchEndIndex) { + return false; + } + if (requireCompositionEnd) { + auto compositionIter = candidateComposition.begin() + *matchEndIndex; while (compositionIter != candidateComposition.end()) { - if (!compositionIter->isOptional_) + if (!compositionIter->isOptional_) { return false; + } + ++compositionIter; } } - return matchLength == 0; + return true; } bool IdPart::validate(std::variant& val, std::string* error) const @@ -358,12 +385,28 @@ std::shared_ptr LayerInfo::fromJson(const nlohmann::json& j, std::str coverages.push_back(Coverage::fromJson(item)); } + const auto stages = std::max(1U, j.value("stages", 1U)); + auto stageLabels = j.value("stageLabels", std::vector{}); + if (stageLabels.size() < stages) { + stageLabels.reserve(stages); + for (uint32_t i = static_cast(stageLabels.size()); i < stages; ++i) { + stageLabels.emplace_back(fmt::format("Stage {}", i)); + } + } + const auto defaultHighFidelityStage = stages > 1U ? 1U : 0U; + const auto highFidelityStage = std::min( + stages - 1U, + j.value("highFidelityStage", defaultHighFidelityStage)); + return std::make_shared(LayerInfo{ j.value("layerId", layerId), type, featureTypes, j.value("zoomLevels", std::vector()), coverages, + stages, + stageLabels, + highFidelityStage, j.value("canRead", true), j.value("canWrite", false), Version::fromJson(j.value("version", Version().toJson()))}); @@ -393,12 +436,15 @@ nlohmann::json LayerInfo::toJson() const {"featureTypes", featureTypes}, {"zoomLevels", zoomLevels_}, {"coverage", coverages}, + {"stages", stages_}, + {"stageLabels", stageLabels_}, + {"highFidelityStage", highFidelityStage_}, {"canRead", canRead_}, {"canWrite", canWrite_}, {"version", version_.toJson()}}; } -FeatureTypeInfo const* LayerInfo::getTypeInfo(const std::string_view& sv, bool throwIfMissing) +FeatureTypeInfo const* LayerInfo::getTypeInfo(const std::string_view& sv, bool throwIfMissing) const { auto typeIt = std::find_if( featureTypes_.begin(), @@ -412,32 +458,42 @@ FeatureTypeInfo const* LayerInfo::getTypeInfo(const std::string_view& sv, bool t return nullptr; } -bool LayerInfo::validFeatureId( +std::optional LayerInfo::matchingFeatureIdCompositionIndex( const std::string_view& typeId, KeyValueViewPairs const& featureIdParts, - bool validateForNewFeature, - uint32_t compositionMatchStartIndex) + bool validateForNewFeature) const { auto typeInfo = getTypeInfo(typeId); - for (auto& candidateComposition : typeInfo->uniqueIdCompositions_) { + for (uint32_t compositionIndex = 0; + compositionIndex < typeInfo->uniqueIdCompositions_.size(); + ++compositionIndex) { + auto const& candidateComposition = typeInfo->uniqueIdCompositions_[compositionIndex]; if (IdPart::idPartsMatchComposition( candidateComposition, - compositionMatchStartIndex, + 0, featureIdParts, featureIdParts.size(), true)) { - return true; + return static_cast(std::min(compositionIndex, 255)); } // References may use alternative ID compositions, // but the feature itself must always use the first (primary) one. if (validateForNewFeature) - return false; + return std::nullopt; } - return false; + return std::nullopt; +} + +bool LayerInfo::validFeatureId( + const std::string_view& typeId, + KeyValueViewPairs const& featureIdParts, + bool validateForNewFeature) const +{ + return matchingFeatureIdCompositionIndex(typeId, featureIdParts, validateForNewFeature).has_value(); } std::shared_ptr DataSourceInfo::getLayer(std::string const& layerId, bool throwIfMissing) const diff --git a/libs/model/src/layer.cpp b/libs/model/src/layer.cpp index be94dbac..9dd08a45 100644 --- a/libs/model/src/layer.cpp +++ b/libs/model/src/layer.cpp @@ -2,8 +2,10 @@ #include "mapget/log.h" #include +#include #include #include +#include #include "simfil/model/bitsery-traits.h" @@ -28,14 +30,27 @@ MapTileKey::MapTileKey(const std::string& str) if (partsVec.size() < 4) raise(fmt::format("Invalid cache tile id: {}", str)); - layer_ = nlohmann::json(std::string_view(&*partsVec[1].begin(), distance(partsVec[1]))).get(); + layer_ = nlohmann::json(std::string_view(&*partsVec[0].begin(), distance(partsVec[0]))).get(); mapId_ = std::string_view(&*partsVec[1].begin(), distance(partsVec[1])); layerId_ = std::string_view(&*partsVec[2].begin(), distance(partsVec[2])); std::from_chars(&*partsVec[3].begin(), &*partsVec[3].begin() + distance(partsVec[3]), tileId_.value_, 16); + if (partsVec.size() >= 5) { + uint32_t parsedStage = 0; + auto* stageBegin = &*partsVec[4].begin(); + auto* stageEnd = stageBegin + distance(partsVec[4]); + auto parseResult = std::from_chars( + stageBegin, + stageEnd, + parsedStage, + 10); + if (parseResult.ec == std::errc() && parseResult.ptr == stageEnd) { + stage_ = parsedStage; + } + } } -MapTileKey::MapTileKey(LayerType layer, std::string mapId, std::string layerId, TileId tileId) : - layer_(layer), mapId_(std::move(mapId)), layerId_(std::move(layerId)), tileId_(tileId) +MapTileKey::MapTileKey(LayerType layer, std::string mapId, std::string layerId, TileId tileId, uint32_t stage) : + layer_(layer), mapId_(std::move(mapId)), layerId_(std::move(layerId)), tileId_(tileId), stage_(stage) {} MapTileKey::MapTileKey(const TileLayer& data) @@ -44,28 +59,30 @@ MapTileKey::MapTileKey(const TileLayer& data) mapId_ = data.mapId(); layerId_ = data.layerInfo()->layerId_; tileId_ = data.tileId(); + stage_ = data.stage().value_or(UnspecifiedStage); } std::string MapTileKey::toString() const { return fmt::format( - "{}:{}:{}:{:0x}", + "{}:{}:{}:{:0x}:{}", nlohmann::json(layer_).get(), mapId_, layerId_, - tileId_.value_); + tileId_.value_, + stage_); } bool MapTileKey::operator<(const MapTileKey& other) const { - return std::tie(layer_, mapId_, layerId_, tileId_) < - std::tie(other.layer_, other.mapId_, other.layerId_, other.tileId_); + return std::tie(layer_, mapId_, layerId_, tileId_, stage_) < + std::tie(other.layer_, other.mapId_, other.layerId_, other.tileId_, other.stage_); } bool MapTileKey::operator==(const MapTileKey& other) const { - return std::tie(layer_, mapId_, layerId_, tileId_) == - std::tie(other.layer_, other.mapId_, other.layerId_, other.tileId_); + return std::tie(layer_, mapId_, layerId_, tileId_, stage_) == + std::tie(other.layer_, other.mapId_, other.layerId_, other.tileId_, other.stage_); } bool MapTileKey::operator!=(const MapTileKey& other) const @@ -89,14 +106,16 @@ TileLayer::TileLayer( } TileLayer::TileLayer( - std::istream& inputStream, - const LayerInfoResolveFun& layerInfoResolveFun + const std::vector& input, + const LayerInfoResolveFun& layerInfoResolveFun, + size_t* bytesRead ) : tileId_(0) { using namespace std::chrono; using namespace nlohmann; - bitsery::Deserializer s(inputStream); + using Adapter = bitsery::InputBufferAdapter>; + bitsery::Deserializer s(Adapter(input.begin(), input.end())); s.text1b(mapId_, std::numeric_limits::max()); std::string layerName; s.text1b(layerName, std::numeric_limits::max()); @@ -151,6 +170,15 @@ TileLayer::TileLayer( legalInfo_ = ""; // Tell the optional that it has a value. s.text1b(*legalInfo_, std::numeric_limits::max()); } + + if (s.adapter().error() != bitsery::ReaderError::NoError) { + raise(fmt::format( + "Failed to read TileLayer: Error {}", + static_cast>(s.adapter().error()))); + } + if (bytesRead != nullptr) { + *bytesRead = s.adapter().currentReadPos(); + } } TileId TileLayer::tileId() const { @@ -243,6 +271,28 @@ void TileLayer::setLegalInfo(const std::string& legalInfoString) legalInfo_ = legalInfoString; } +void TileLayer::setLoadStateCallback(LoadStateCallback cb) +{ + onLoadStateChanged_ = std::move(cb); +} + +void TileLayer::setLoadState(LoadState state) +{ + if (onLoadStateChanged_) { + onLoadStateChanged_(state); + } +} + +std::optional TileLayer::stage() const +{ + return {}; +} + +void TileLayer::setStage(std::optional /*stage*/) +{ + // Base TileLayer does not carry stage information. +} + tl::expected TileLayer::write(std::ostream& outputStream) { using namespace std::chrono; diff --git a/libs/model/src/pointnode.cpp b/libs/model/src/pointnode.cpp index abb0a531..6e9300be 100644 --- a/libs/model/src/pointnode.cpp +++ b/libs/model/src/pointnode.cpp @@ -9,32 +9,35 @@ namespace mapget /** Model node impls for VertexNode. */ -PointNode::PointNode(ModelNode const& baseNode, Geometry::Data const* geomData) - : simfil::MandatoryDerivedModelNodeBase(baseNode) +PointNode::PointNode( + ModelNode const& baseNode, + simfil::ArrayIndex vertexArray, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(baseNode, key) { - if (geomData->isView_) - throw std::runtime_error("Point must be constructed through VertexBuffer which resolves view to geometry."); auto i = std::get(data_); - point_ = geomData->detail_.geom_.offset_; - if (i > 0) { - auto vertexResult = model().vertexBufferStorage().at(geomData->detail_.geom_.vertexArray_, i - 1); - if (!vertexResult) { - raise("Failed to get vertex from buffer"); - } - point_ += vertexResult->get(); + point_ = model().geometryAnchor(); + auto vertexResult = model().vertexBufferStorage().at( + vertexArray, + static_cast(i)); + if (!vertexResult) { + raise("Failed to get vertex from buffer"); } + point_ += vertexResult->get(); } -PointNode::PointNode(ModelNode const& baseNode, Validity::Data const* geomData) - : simfil::MandatoryDerivedModelNodeBase(baseNode) +PointNode::PointNode(ModelNode const& baseNode, + Validity::Data const* geomData, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(baseNode, key) { auto i = std::get(data_); // The extracted point index may point to a validity's single point // or to one of its range points. These magic indices are used in validity.cpp. switch (i) { - case 0: point_ = std::get(geomData->geomDescr_); break; - case 1: point_ = std::get(geomData->geomDescr_).first; break; - case 2: point_ = std::get(geomData->geomDescr_).second; break; + case 0: point_ = geomData->geomDescr_.point_; break; + case 1: point_ = geomData->geomDescr_.range_.first; break; + case 2: point_ = geomData->geomDescr_.range_.second; break; default: mapget::raiseFmt("Invalid validity point index {}", i); } @@ -71,10 +74,10 @@ StringId PointNode::keyAt(int64_t i) const { bool PointNode::iterate(const IterCallback& cb) const { - if (!cb(ValueNode(point_.x, model_))) return false; - if (!cb(ValueNode(point_.y, model_))) return false; - if (!cb(ValueNode(point_.z, model_))) return false; + if (!cb(*model_ptr::make(point_.x, model_))) return false; + if (!cb(*model_ptr::make(point_.y, model_))) return false; + if (!cb(*model_ptr::make(point_.z, model_))) return false; return true; } -} \ No newline at end of file +} diff --git a/libs/model/src/relation.cpp b/libs/model/src/relation.cpp index e686e3d6..8586343d 100644 --- a/libs/model/src/relation.cpp +++ b/libs/model/src/relation.cpp @@ -6,8 +6,28 @@ namespace mapget { -Relation::Relation(Relation::Data* data, simfil::ModelConstPtr l, simfil::ModelNodeAddress a) - : simfil::ProceduralObject<6, Relation, TileFeatureLayer>(std::move(l), a), data_(data) +namespace +{ +simfil::ModelNode::Ptr exposedValidityNode( + TileFeatureLayer const& model, + simfil::ModelNodeAddress const& validityCollectionAddress) +{ + auto validities = model.resolve(validityCollectionAddress); + if (validities && validities->size() == 1) { + if (auto validity = validities->at(0)) { + return validity; + } + } + return model.resolve(validityCollectionAddress); +} +} + +Relation::Relation(Relation::Data* data, + simfil::ModelConstPtr l, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::ProceduralObject<6, Relation, TileFeatureLayer>(std::move(l), a, key), + data_(data) { fields_.emplace_back( StringPool::NameStr, @@ -18,25 +38,25 @@ Relation::Relation(Relation::Data* data, simfil::ModelConstPtr l, simfil::ModelN fields_.emplace_back( StringPool::TargetStr, [](Relation const& self) { - return ModelNode::Ptr::make(self.model().shared_from_this(), self.data_->targetFeatureId_); + return self.model().resolve(self.data_->targetFeatureId_); }); if (data_->sourceValidity_) fields_.emplace_back( StringPool::SourceValidityStr, [](Relation const& self) { - return ModelNode::Ptr::make(self.model().shared_from_this(), self.data_->sourceValidity_); + return exposedValidityNode(self.model(), self.data_->sourceValidity_); }); if (data_->targetValidity_) fields_.emplace_back( StringPool::TargetValidityStr, [](Relation const& self) { - return ModelNode::Ptr::make(self.model().shared_from_this(), self.data_->targetValidity_); + return exposedValidityNode(self.model(), self.data_->targetValidity_); }); if (data_->sourceData_) fields_.emplace_back( StringPool::SourceDataStr, [](Relation const& self) { - return ModelNode::Ptr::make(self.model().shared_from_this(), self.data_->sourceData_); + return self.model().resolve(self.data_->sourceData_); }); } @@ -45,7 +65,7 @@ model_ptr Relation::sourceValidity() if (data_->sourceValidity_) { return sourceValidityOrNull(); } - auto returnValue = model().newValidityCollection(1); + auto returnValue = model().newValidityCollection(2); data_->sourceValidity_ = returnValue->addr(); return returnValue; } @@ -54,7 +74,8 @@ model_ptr Relation::sourceValidityOrNull() const { if (!data_->sourceValidity_) return {}; - return model().resolveValidityCollection(*model_ptr::make(model_, data_->sourceValidity_)); + return model().resolve( + *model_ptr::make(model_, data_->sourceValidity_)); } void Relation::setSourceValidity(const model_ptr& validityGeom) @@ -67,7 +88,7 @@ model_ptr Relation::targetValidity() if (data_->targetValidity_) { return targetValidityOrNull(); } - auto returnValue = model().newValidityCollection(1); + auto returnValue = model().newValidityCollection(2); data_->targetValidity_ = returnValue->addr(); return returnValue; } @@ -77,7 +98,8 @@ model_ptr Relation::targetValidityOrNull() const { if (!data_->targetValidity_) return {}; - return model().resolveValidityCollection(*model_ptr::make(model_, data_->targetValidity_)); + return model().resolve( + *model_ptr::make(model_, data_->targetValidity_)); } void Relation::setTargetValidity(const model_ptr& validityGeom) @@ -94,13 +116,15 @@ std::string_view Relation::name() const model_ptr Relation::target() const { - return model().resolveFeatureId(*model_ptr::make(model_, data_->targetFeatureId_)); + return model().resolve( + *model_ptr::make(model_, data_->targetFeatureId_)); } model_ptr Relation::sourceDataReferences() const { if (data_->sourceData_) - return model().resolveSourceDataReferenceCollection(*model_ptr::make(model_, data_->sourceData_)); + return model().resolve( + *model_ptr::make(model_, data_->sourceData_)); return {}; } diff --git a/libs/model/src/simfilexpressioncache.h b/libs/model/src/simfilexpressioncache.h index 0be5bdc8..d56b22ef 100644 --- a/libs/model/src/simfilexpressioncache.h +++ b/libs/model/src/simfilexpressioncache.h @@ -94,11 +94,11 @@ struct SimfilExpressionCache auto diagnostics(std::string_view query, const simfil::Diagnostics& diag, bool anyMode) -> tl::expected, simfil::Error> { - auto ast = compile(query, true); + auto ast = compile(query, anyMode); if (!ast) return tl::unexpected(std::move(ast.error())); - return simfil::diagnostics(environment(), *ast->get(), diag); + return simfil::diagnostics(diag); } auto completions(std::string_view query, size_t point, simfil::ModelNode const& node, simfil::CompletionOptions const& opts) -> tl::expected, simfil::Error> diff --git a/libs/model/src/sourcedata.cpp b/libs/model/src/sourcedata.cpp index b9d9ebf8..ebf33471 100644 --- a/libs/model/src/sourcedata.cpp +++ b/libs/model/src/sourcedata.cpp @@ -50,7 +50,7 @@ simfil::model_ptr SourceDataCompoundNode::object() const { if (!data_->object_) return {}; - return model().resolveObject(ModelNode::Ptr::make(model_, data_->object_)); + return model().resolve(data_->object_); } ValueType SourceDataCompoundNode::type() const @@ -94,14 +94,23 @@ bool SourceDataCompoundNode::iterate(IterCallback const& cb) const return true; } -SourceDataCompoundNode::SourceDataCompoundNode(Data* data, TileSourceDataLayer::ConstPtr model, simfil::ModelNodeAddress addr) - : simfil::MandatoryDerivedModelNodeBase(std::move(model), addr), data_(data) +SourceDataCompoundNode::SourceDataCompoundNode(Data* data, + TileSourceDataLayer::ConstPtr model, + simfil::ModelNodeAddress addr, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(std::move(model), addr, key), + data_(data) { assert(data_); } -SourceDataCompoundNode::SourceDataCompoundNode(Data* data, TileSourceDataLayer::Ptr model, simfil::ModelNodeAddress addr, size_t initialSize) - : simfil::MandatoryDerivedModelNodeBase(model, addr), data_(data) +SourceDataCompoundNode::SourceDataCompoundNode(Data* data, + TileSourceDataLayer::Ptr model, + simfil::ModelNodeAddress addr, + size_t initialSize, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(model, addr, key), + data_(data) { assert(data_); assert(!data_->object_); diff --git a/libs/model/src/sourcedatalayer.cpp b/libs/model/src/sourcedatalayer.cpp index b6d3e250..4a96f741 100644 --- a/libs/model/src/sourcedatalayer.cpp +++ b/libs/model/src/sourcedatalayer.cpp @@ -4,13 +4,13 @@ #include #include "bitsery/bitsery.h" -#include "bitsery/adapter/stream.h" +#include "bitsery/adapter/buffer.h" #include "bitsery/adapter/stream.h" #include "bitsery/deserializer.h" #include "bitsery/serializer.h" #include "bitsery/traits/string.h" #include "bitsery/traits/vector.h" -#include "simfil/model/bitsery-traits.h" // segmented_vector traits +#include "simfil/model/bitsery-traits.h" #include "mapget/log.h" #include "sourcedata.h" @@ -31,7 +31,7 @@ namespace mapget struct TileSourceDataLayer::Impl { SourceDataAddressFormat format_; - sfl::segmented_vector compounds_; + simfil::ModelColumn compounds_; // Simfil compiled expression and environment SimfilExpressionCache expressionCache_; @@ -44,8 +44,7 @@ struct TileSourceDataLayer::Impl // Bitsery (de-)serialization interface template void readWrite(S& s) { - constexpr size_t maxColumnSize = std::numeric_limits::max(); - s.container(compounds_, maxColumnSize); + s.object(compounds_); s.value1b(format_); } }; @@ -62,22 +61,31 @@ TileSourceDataLayer::TileSourceDataLayer( {} TileSourceDataLayer::TileSourceDataLayer( - std::istream& in, + const std::vector& input, LayerInfoResolveFun const& layerInfoResolveFun, StringPoolResolveFun const& stringPoolGetter ) : - TileLayer(in, layerInfoResolveFun), + TileLayer(input, layerInfoResolveFun, &deserializationOffsetBytes_), ModelPool(stringPoolGetter(nodeId_)), impl_(std::make_unique(stringPoolGetter(nodeId_))) { - bitsery::Deserializer s(in); + using Adapter = bitsery::InputBufferAdapter>; + if (deserializationOffsetBytes_ > input.size()) { + raise("Failed to read TileSourceDataLayer: invalid deserialization offset."); + } + bitsery::Deserializer s(Adapter( + input.begin() + static_cast(deserializationOffsetBytes_), + input.end())); impl_->readWrite(s); if (s.adapter().error() != bitsery::ReaderError::NoError) { raiseFmt( "Failed to read TileFeatureLayer: Error {}", static_cast>(s.adapter().error())); } - ModelPool::read(in); + const auto modelOffset = deserializationOffsetBytes_ + s.adapter().currentReadPos(); + if (auto result = ModelPool::read(input, modelOffset); !result) { + raise(result.error().message); + } } TileSourceDataLayer::~TileSourceDataLayer() = default; @@ -96,21 +104,31 @@ model_ptr TileSourceDataLayer::newCompound(size_t initia &data, std::static_pointer_cast(shared_from_this()), ModelNodeAddress(Compound, static_cast(index)), - initialSize); + initialSize, + mpKey_); } -model_ptr TileSourceDataLayer::resolveCompound(simfil::ModelNode const& n) const +// Short aliases to keep resolve hook signatures compact. +using simfil::ModelNode; +using simfil::res::tag; + +template<> +model_ptr resolveInternal(tag, TileSourceDataLayer const& model, ModelNode const& node) { - assert(n.addr().column() == Compound && "Unexpected column type!"); + assert(node.addr().column() == TileSourceDataLayer::Compound && "Unexpected column type!"); - auto& data = impl_->compounds_.at(n.addr().index()); - return SourceDataCompoundNode(&data, std::static_pointer_cast(shared_from_this()), n.addr()); + auto& data = model.impl_->compounds_.at(node.addr().index()); + return SourceDataCompoundNode( + &data, + std::static_pointer_cast(model.shared_from_this()), + node.addr(), + model.mpKey_); } tl::expected TileSourceDataLayer::resolve(const simfil::ModelNode& n, const ResolveFn& cb) const { if (n.addr().column() == Compound) { - cb(*resolveCompound(n)); + cb(*resolve(n)); return {}; } return ModelPool::resolve(n, cb); diff --git a/libs/model/src/sourcedatareference.cpp b/libs/model/src/sourcedatareference.cpp index 95b771ea..67149c87 100644 --- a/libs/model/src/sourcedatareference.cpp +++ b/libs/model/src/sourcedatareference.cpp @@ -20,8 +20,8 @@ ModelNode::Ptr SourceDataReferenceCollection::at(int64_t index) const if (index < 0 || index >= size_ || (offset_ + index) > 0xffffff) throw std::out_of_range("Index out of range"); - return ModelNode::Ptr::make( - model_, ModelNodeAddress{TileFeatureLayer::ColumnId::SourceDataReferences, static_cast(offset_ + index)}); + return model().resolve( + ModelNodeAddress{TileFeatureLayer::ColumnId::SourceDataReferences, static_cast(offset_ + index)}); } uint32_t SourceDataReferenceCollection::size() const @@ -41,12 +41,18 @@ void SourceDataReferenceCollection::forEachReference(std::function(*at(i))); } } -SourceDataReferenceCollection::SourceDataReferenceCollection(uint32_t offset, uint32_t size, ModelConstPtr pool, ModelNodeAddress a) - : simfil::MandatoryDerivedModelNodeBase(pool, a), offset_(offset), size_(size) +SourceDataReferenceCollection::SourceDataReferenceCollection(uint32_t offset, + uint32_t size, + ModelConstPtr pool, + ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(pool, a, key), + offset_(offset), + size_(size) {} ValueType SourceDataReferenceItem::type() const @@ -70,9 +76,11 @@ ModelNode::Ptr SourceDataReferenceItem::get(const StringId& key) const { switch (key) { case StringPool::AddressStr: - return model_ptr::make(static_cast(data_->reference_.address_.u64()), model().shared_from_this()); + return model_ptr::make( + static_cast(data_->address_.u64()), + model().shared_from_this()); case StringPool::LayerIdStr: - if (auto layerId = model().strings()->resolve(data_->reference_.layerId_)) + if (auto layerId = model().strings()->resolve(data_->layerId_)) return model_ptr::make(*layerId, model().shared_from_this()); return {}; case StringPool::QualifierStr: @@ -110,18 +118,22 @@ std::string_view SourceDataReferenceItem::qualifier() const std::string_view SourceDataReferenceItem::layerId() const { - if (auto str = model().strings()->resolve(data_->reference_.layerId_)) + if (auto str = model().strings()->resolve(data_->layerId_)) return *str; return {}; } SourceDataAddress SourceDataReferenceItem::address() const { - return data_->reference_.address_; + return data_->address_; } -SourceDataReferenceItem::SourceDataReferenceItem(const QualifiedSourceDataReference* const data, const ModelConstPtr pool, const ModelNodeAddress a) - : simfil::MandatoryDerivedModelNodeBase(pool, a), data_(data) +SourceDataReferenceItem::SourceDataReferenceItem(const QualifiedSourceDataReference* const data, + const ModelConstPtr pool, + const ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::MandatoryDerivedModelNodeBase(pool, a, key), + data_(data) {} } diff --git a/libs/model/src/stream.cpp b/libs/model/src/stream.cpp index f7d78c18..828d002a 100644 --- a/libs/model/src/stream.cpp +++ b/libs/model/src/stream.cpp @@ -5,8 +5,10 @@ #include "simfil/model/nodes.h" #include +#include #include #include +#include #include #include "featurelayer.h" @@ -29,20 +31,37 @@ TileLayerStream::Reader::Reader( void TileLayerStream::Reader::read(const std::string_view& bytes) { - buffer_ << bytes; - while (continueReading()); + buffer_.insert( + buffer_.end(), + reinterpret_cast(bytes.data()), + reinterpret_cast(bytes.data()) + bytes.size()); + while (continueReading()) {} + + if (readOffset_ == buffer_.size()) { + buffer_.clear(); + readOffset_ = 0; + } + else if (readOffset_ > 65536 && (readOffset_ * 2 > buffer_.size())) { + buffer_.erase( + buffer_.begin(), + buffer_.begin() + static_cast(readOffset_)); + readOffset_ = 0; + } } bool TileLayerStream::Reader::eos() { - return (buffer_.tellp() - buffer_.tellg()) == 0; + return readOffset_ == buffer_.size(); } bool TileLayerStream::Reader::continueReading() { if (currentPhase_ == Phase::ReadHeader) { - if (readMessageHeader(buffer_, nextValueType_, nextValueSize_)) { + size_t headerBytesRead = 0; + auto unreadBytes = std::span(buffer_).subspan(readOffset_); + if (readMessageHeader(unreadBytes, nextValueType_, nextValueSize_, &headerBytesRead)) { + readOffset_ += headerBytesRead; currentPhase_ = Phase::ReadValue; } else { @@ -50,15 +69,19 @@ bool TileLayerStream::Reader::continueReading() } } - bitsery::Deserializer s(buffer_); - auto numUnreadBytes = buffer_.tellp() - buffer_.tellg(); + auto numUnreadBytes = buffer_.size() - readOffset_; if (numUnreadBytes < nextValueSize_) return false; + std::vector payload( + buffer_.begin() + static_cast(readOffset_), + buffer_.begin() + static_cast(readOffset_ + nextValueSize_)); + readOffset_ += nextValueSize_; + if (nextValueType_ == MessageType::TileFeatureLayer) { auto start = std::chrono::system_clock::now(); - auto layer = std::make_shared(buffer_, layerInfoProvider_, [this](auto&& nodeId) { + auto layer = std::make_shared(payload, layerInfoProvider_, [this](auto&& nodeId) { return stringPoolProvider_->getStringPool(nodeId); }); @@ -69,16 +92,19 @@ bool TileLayerStream::Reader::continueReading() } else if (nextValueType_ == MessageType::TileSourceDataLayer) { - auto layer = std::make_shared(buffer_, layerInfoProvider_, [this](auto&& nodeId) { + auto layer = std::make_shared(payload, layerInfoProvider_, [this](auto&& nodeId) { return stringPoolProvider_->getStringPool(nodeId); }); onParsedLayer_(layer); } else if (nextValueType_ == MessageType::StringPool) { - // Read the node id which identifies the string pool. - std::string stringPoolNodeId = StringPool::readDataSourceNodeId(buffer_); - stringPoolProvider_->getStringPool(stringPoolNodeId)->read(buffer_); + size_t nodeIdBytesRead = 0; + auto stringPoolNodeId = StringPool::readDataSourceNodeId(payload, 0, &nodeIdBytesRead); + auto result = stringPoolProvider_->getStringPool(stringPoolNodeId)->read(payload, nodeIdBytesRead); + if (!result) { + raise(result.error().message); + } } currentPhase_ = Phase::ReadHeader; @@ -90,26 +116,39 @@ std::shared_ptr TileLayerStream::Reader::strin return stringPoolProvider_; } -bool TileLayerStream::Reader::readMessageHeader(std::stringstream & stream, MessageType& outType, uint32_t& outSize) +bool TileLayerStream::Reader::readMessageHeader( + std::span bytes, + MessageType& outType, + uint32_t& outSize, + size_t* bytesRead) { - bitsery::Deserializer s(stream); - auto numUnreadBytes = stream.tellp() - stream.tellg(); - // Version: 6B, Type: 1B, Size: 4B - constexpr auto headerSize = 6 + 1 + 4; - if (numUnreadBytes < headerSize) + constexpr size_t headerSize = 6 + 1 + 4; + if (bytes.size() < headerSize) return false; + std::vector header(bytes.begin(), bytes.begin() + static_cast(headerSize)); + using Adapter = bitsery::InputBufferAdapter>; + bitsery::Deserializer s(Adapter(header.begin(), header.end())); + Version protocolVersion; s.object(protocolVersion); + s.value1b(outType); + s.value4b(outSize); + if (s.adapter().error() != bitsery::ReaderError::NoError) { + raise(fmt::format( + "Failed to read stream message header: Error {}", + static_cast>(s.adapter().error()))); + } if (!protocolVersion.isCompatible(CurrentProtocolVersion)) { raise(fmt::format( "Unable to read message with version {} using version {}.", protocolVersion.toString(), CurrentProtocolVersion.toString())); } - s.value1b(outType); - s.value4b(outSize); + if (bytesRead != nullptr) { + *bytesRead = s.adapter().currentReadPos(); + } return true; } diff --git a/libs/model/src/stringpool.cpp b/libs/model/src/stringpool.cpp index 56664ad7..3946bc53 100644 --- a/libs/model/src/stringpool.cpp +++ b/libs/model/src/stringpool.cpp @@ -1,8 +1,11 @@ #include "stringpool.h" +#include "mapget/log.h" #include +#include #include #include +#include namespace mapget { @@ -10,6 +13,7 @@ namespace mapget StringPool::StringPool(const std::string_view& nodeId) : nodeId_(nodeId) { addStaticKey(IdStr, "id"); addStaticKey(TypeIdStr, "typeId"); + addStaticKey(LodStr, "lod"); addStaticKey(MapIdStr, "mapId"); addStaticKey(LayerIdStr, "layerId"); addStaticKey(LayerStr, "layer"); @@ -37,6 +41,12 @@ StringPool::StringPool(const std::string_view& nodeId) : nodeId_(nodeId) { addStaticKey(EndStr, "end"); addStaticKey(PointStr, "point"); addStaticKey(FeatureIdStr, "featureId"); + addStaticKey(FromStr, "from"); + addStaticKey(ToStr, "to"); + addStaticKey(ConnectedEndStr, "connectedEnd"); + addStaticKey(FromConnectedEndStr, "fromConnectedEnd"); + addStaticKey(ToConnectedEndStr, "toConnectedEnd"); + addStaticKey(TransitionNumberStr, "transitionNumber"); } tl::expected @@ -48,11 +58,31 @@ StringPool::write(std::ostream& outputStream, simfil::StringId offset) const return simfil::StringPool::write(outputStream, offset); } -std::string StringPool::readDataSourceNodeId(std::istream& inputStream) { +std::string StringPool::readDataSourceNodeId( + const std::vector& input, + size_t offset, + size_t* bytesRead) +{ + if (offset > input.size()) { + raise("Failed to read StringPool node id: invalid input offset."); + } + + using Adapter = bitsery::InputBufferAdapter>; + bitsery::Deserializer s(Adapter( + input.begin() + static_cast(offset), + input.end())); + // Read the node id which identifies the string pool. - bitsery::Deserializer s(inputStream); std::string stringPoolNodeId; s.text1b(stringPoolNodeId, std::numeric_limits::max()); + if (s.adapter().error() != bitsery::ReaderError::NoError) { + raiseFmt( + "Failed to read StringPool node id: Error {}", + static_cast>(s.adapter().error())); + } + if (bytesRead != nullptr) { + *bytesRead = s.adapter().currentReadPos(); + } return stringPoolNodeId; } diff --git a/libs/model/src/validity.cpp b/libs/model/src/validity.cpp index 5ad07f0a..3165fb45 100644 --- a/libs/model/src/validity.cpp +++ b/libs/model/src/validity.cpp @@ -3,6 +3,9 @@ #include "stringpool.h" #include "featurelayer.h" +#include +#include + namespace mapget { @@ -14,24 +17,166 @@ std::string_view directionToString(Validity::Direction const& d) case Validity::Empty: return "EMPTY"; case Validity::Positive: return "POSITIVE"; case Validity::Negative: return "NEGATIVE"; - case Validity::Both: return "BOTH"; + case Validity::Both: return "COMPLETE"; case Validity::None: return "NONE"; } return "?"; } +std::string_view transitionEndToString(Validity::TransitionEnd const& end) +{ + switch (end) { + case Validity::Start: return "START"; + case Validity::End: return "END"; + } + return "?"; +} + +uint8_t packTransitionEnds( + Validity::TransitionEnd fromConnectedEnd, + Validity::TransitionEnd toConnectedEnd) +{ + return static_cast( + static_cast(fromConnectedEnd) | + (static_cast(toConnectedEnd) << 1U)); +} + +Validity::TransitionEnd unpackFromConnectedEnd(uint8_t packedEnds) +{ + return (packedEnds & 0x1U) != 0 ? Validity::End : Validity::Start; +} + +Validity::TransitionEnd unpackToConnectedEnd(uint8_t packedEnds) +{ + return (packedEnds & 0x2U) != 0 ? Validity::End : Validity::Start; +} + +model_ptr resolveLineGeometry( + model_ptr const& geometryCollection, + std::optional referencedStage) +{ + if (!geometryCollection) { + return {}; + } + return geometryCollection->geometryOfTypeAtPreferredStage(GeomType::Line, referencedStage); +} + +struct TransitionSegment +{ + Point outer_; + Point inner_; +}; + +bool pointsCoincide(Point const& left, Point const& right) +{ + return left.distanceTo(right) < 1e-9; +} + +std::optional resolveTransitionSegment( + model_ptr const& feature, + Validity::TransitionEnd connectedEnd, + std::optional referencedStage) +{ + if (!feature) { + return std::nullopt; + } + + auto geometry = resolveLineGeometry(feature->geomOrNull(), referencedStage); + if (!geometry || geometry->numPoints() == 0) { + return std::nullopt; + } + + const auto numPoints = geometry->numPoints(); + const auto innerIndex = connectedEnd == Validity::End ? numPoints - 1U : 0U; + auto outerIndex = innerIndex; + auto const innerPoint = geometry->pointAt(innerIndex); + if (connectedEnd == Validity::End) { + for (auto pointIndex = innerIndex; pointIndex-- > 0;) { + if (!pointsCoincide(geometry->pointAt(pointIndex), innerPoint)) { + outerIndex = pointIndex; + break; + } + } + } else { + for (auto pointIndex = innerIndex + 1U; pointIndex < numPoints; ++pointIndex) { + if (!pointsCoincide(geometry->pointAt(pointIndex), innerPoint)) { + outerIndex = pointIndex; + break; + } + } + } + return TransitionSegment{ + geometry->pointAt(outerIndex), + innerPoint, + }; +} + +SelfContainedGeometry applyDirectionToGeometry( + SelfContainedGeometry geometry, + Validity::Direction direction) +{ + if (direction == Validity::Negative && geometry.points_.size() > 1) { + std::reverse(geometry.points_.begin(), geometry.points_.end()); + } + return geometry; +} + +std::optional geometryNameForStage( + TileFeatureLayer const& model, + std::optional geometryStage) +{ + if (!geometryStage || !model.layerInfo()) { + return std::nullopt; + } + auto const& layerInfo = *model.layerInfo(); + if (*geometryStage <= layerInfo.highFidelityStage_) { + return std::nullopt; + } + if (*geometryStage >= layerInfo.stageLabels_.size()) { + return std::nullopt; + } + auto const& label = layerInfo.stageLabels_.at(*geometryStage); + if (label.empty()) { + return std::nullopt; + } + return label; +} + +} + +void Validity::ensureMaterialized() +{ + if (data_) { + return; + } + + auto const simpleAddress = addr(); + if (simpleAddress.column() != TileFeatureLayer::ColumnId::SimpleValidity) { + raise("Cannot materialize validity from non-simple address."); + } + + auto upgradedAddress = model().materializeSimpleValidity(simpleAddress, simpleDirection_); + auto upgraded = model().resolve(upgradedAddress); + if (!upgraded || !upgraded->data_) { + raise("Failed to materialize simple validity."); + } + data_ = upgraded->data_; } model_ptr Validity::featureId() const { + if (!data_) { + return {}; + } if (!data_->featureAddress_) { return {}; } - return model().resolveFeatureId(*Ptr::make(model_, data_->featureAddress_)); + return model().resolve(data_->featureAddress_); } void Validity::setFeatureId(model_ptr featureId) { + ensureMaterialized(); if (!featureId) { data_->featureAddress_ = {}; return; @@ -39,10 +184,32 @@ void Validity::setFeatureId(model_ptr featureId) data_->featureAddress_ = featureId->addr(); } +Validity::Validity( + Validity::Direction direction, + simfil::ModelConstPtr layer, + simfil::ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::ProceduralObject<7, Validity, TileFeatureLayer>(std::move(layer), a, key), + simpleDirection_(direction) +{ + if (direction != Empty) { + fields_.emplace_back( + StringPool::DirectionStr, + [](Validity const& self) + { + return model_ptr::make( + directionToString(self.direction()), + self.model_); + }); + } +} + Validity::Validity(Validity::Data* data, simfil::ModelConstPtr layer, - simfil::ModelNodeAddress a) - : simfil::ProceduralObject<6, Validity, TileFeatureLayer>(std::move(layer), a), data_(data) + simfil::ModelNodeAddress a, + simfil::detail::mp_key key) + : simfil::ProceduralObject<7, Validity, TileFeatureLayer>(std::move(layer), a, key), + data_(data) { if (data_->direction_) fields_.emplace_back( @@ -50,35 +217,74 @@ Validity::Validity(Validity::Data* data, [](Validity const& self) { return model_ptr::make( - directionToString(self.data_->direction_), + directionToString(self.direction()), self.model_); }); + if (auto geometryName = geometryNameForStage(model(), geometryStage())) { + fields_.emplace_back( + StringPool::GeometryNameStr, + [geometryName](Validity const& self) + { + return model_ptr::make(*geometryName, self.model_); + }); + } + if (data_->geomDescrType_ == SimpleGeometry) { fields_.emplace_back( StringPool::GeometryStr, [](Validity const& self) { - return ModelNode::Ptr::make( - self.model_, - std::get(self.data_->geomDescr_)); + return self.model().resolve( + self.data_->geomDescr_.simpleGeometry_); }); return; } - if (data_->referencedGeomName_) { + if (data_->geomDescrType_ == FeatureTransition) { fields_.emplace_back( - StringPool::GeometryNameStr, + StringPool::TransitionNumberStr, + [](Validity const& self) + { + return model_ptr::make( + static_cast(*self.transitionNumber()), + self.model_); + }); + fields_.emplace_back( + StringPool::FromStr, + [](Validity const& self) + { + auto fromFeature = self.transitionFromFeature(); + return model_ptr::make( + fromFeature ? fromFeature->id()->toString() : std::string{}, + self.model_); + }); + fields_.emplace_back( + StringPool::FromConnectedEndStr, + [](Validity const& self) + { + return model_ptr::make( + transitionEndToString(*self.transitionFromConnectedEnd()), + self.model_); + }); + fields_.emplace_back( + StringPool::ToStr, + [](Validity const& self) + { + auto toFeature = self.transitionToFeature(); + return model_ptr::make( + toFeature ? toFeature->id()->toString() : std::string{}, + self.model_); + }); + fields_.emplace_back( + StringPool::ToConnectedEndStr, [](Validity const& self) { - auto resolvedString = - self.model().strings()->resolve(self.data_->referencedGeomName_); return model_ptr::make( - resolvedString ? - *resolvedString : - std::string_view(""), + transitionEndToString(*self.transitionToConnectedEnd()), self.model_); }); + return; } if (data_->geomOffsetType_ != InvalidOffsetType) { @@ -104,11 +310,10 @@ Validity::Validity(Validity::Data* data, fieldName, [pointIndex, p](Validity const& self) -> ModelNode::Ptr { - switch (self.data_->geomOffsetType_) { + switch (self.geometryOffsetType()) { case InvalidOffsetType: return ModelNode::Ptr{}; case GeoPosOffset: - return ModelNode::Ptr::make( - self.model_, + return self.model().resolve( ModelNodeAddress{ TileFeatureLayer::ColumnId::ValidityPoints, self.addr().index()}, @@ -123,12 +328,13 @@ Validity::Validity(Validity::Data* data, }; if (data_->geomDescrType_ == OffsetRangeValidity) { - auto& [start, end] = std::get(data_->geomDescr_); + auto& start = data_->geomDescr_.range_.first; + auto& end = data_->geomDescr_.range_.second; exposeOffsetPoint(StringPool::StartStr, 1, start); exposeOffsetPoint(StringPool::EndStr, 2, end); } else if (data_->geomDescrType_ == OffsetPointValidity) { - exposeOffsetPoint(StringPool::PointStr, 0, std::get(data_->geomDescr_)); + exposeOffsetPoint(StringPool::PointStr, 0, data_->geomDescr_.point_); } if (data_->featureAddress_) { @@ -145,122 +351,291 @@ Validity::Validity(Validity::Data* data, void Validity::setDirection(const Validity::Direction& v) { + ensureMaterialized(); data_->direction_ = v; } Validity::Direction Validity::direction() const { + if (!data_) { + return simpleDirection_; + } return data_->direction_; } Validity::GeometryOffsetType Validity::geometryOffsetType() const { + if (!data_) { + return InvalidOffsetType; + } return data_->geomOffsetType_; } Validity::GeometryDescriptionType Validity::geometryDescriptionType() const { + if (!data_) { + return NoGeometry; + } return data_->geomDescrType_; } -void Validity::setGeometryName(const std::optional& geometryName) +void Validity::setGeometryStage(std::optional geometryStage) { - simfil::StringId nameId = simfil::StringPool::Empty; - if (geometryName) - if (auto res = model().strings()->emplace(*geometryName); res) - nameId = *res; - - data_->referencedGeomName_ = nameId; + ensureMaterialized(); + if (!geometryStage) { + data_->referencedStage_ = Data::InvalidReferencedStage; + return; + } + if (*geometryStage > static_cast(std::numeric_limits::max())) { + raise("Validity::setGeometryStage: stage is out of int8_t range."); + } + data_->referencedStage_ = static_cast(*geometryStage); } -std::optional Validity::geometryName() const +std::optional Validity::geometryStage() const { - if (!data_->referencedGeomName_) { + if (!data_) { + return {}; + } + if (data_->referencedStage_ == Data::InvalidReferencedStage) { return {}; } - return model().strings()->resolve(data_->referencedGeomName_); + return static_cast(data_->referencedStage_); } void Validity::setOffsetPoint(Point pos) { + ensureMaterialized(); data_->geomDescrType_ = OffsetPointValidity; data_->geomOffsetType_ = GeoPosOffset; - data_->geomDescr_ = pos; + data_->geomDescr_.point_ = pos; } void Validity::setOffsetPoint(Validity::GeometryOffsetType offsetType, double pos) { + ensureMaterialized(); assert(offsetType != InvalidOffsetType && offsetType != GeoPosOffset); data_->geomDescrType_ = OffsetPointValidity; data_->geomOffsetType_ = offsetType; - data_->geomDescr_ = Point{pos, 0, 0}; + data_->geomDescr_.point_ = Point{pos, 0, 0}; } std::optional Validity::offsetPoint() const { + if (!data_) { + return {}; + } if (data_->geomDescrType_ != OffsetPointValidity) { return {}; } - return std::get(data_->geomDescr_); + return data_->geomDescr_.point_; } void Validity::setOffsetRange(Point start, Point end) { + ensureMaterialized(); data_->geomDescrType_ = OffsetRangeValidity; data_->geomOffsetType_ = GeoPosOffset; - data_->geomDescr_ = std::make_pair(start, end); + data_->geomDescr_.range_ = {start, end}; } void Validity::setOffsetRange(Validity::GeometryOffsetType offsetType, double start, double end) { + ensureMaterialized(); assert(offsetType != InvalidOffsetType && offsetType != GeoPosOffset); data_->geomDescrType_ = OffsetRangeValidity; data_->geomOffsetType_ = offsetType; - data_->geomDescr_ = std::make_pair(Point{start, 0, 0}, Point{end, 0, 0}); + data_->geomDescr_.range_ = {Point{start, 0, 0}, Point{end, 0, 0}}; } std::optional> Validity::offsetRange() const { + if (!data_) { + return {}; + } if (data_->geomDescrType_ != OffsetRangeValidity) { return {}; } - return std::get(data_->geomDescr_); + return std::pair{data_->geomDescr_.range_.first, data_->geomDescr_.range_.second}; } void Validity::setSimpleGeometry(model_ptr geom) { + ensureMaterialized(); if (geom) { data_->geomDescrType_ = SimpleGeometry; + data_->geomDescr_.simpleGeometry_ = geom->addr(); } else { data_->geomDescrType_ = NoGeometry; + data_->geomDescr_.simpleGeometry_ = {}; } data_->geomOffsetType_ = InvalidOffsetType; - data_->geomDescr_ = geom->addr(); } model_ptr Validity::simpleGeometry() const { + if (!data_) { + return {}; + } if (data_->geomDescrType_ != SimpleGeometry) { return {}; } - return model().resolveGeometry(*ModelNode::Ptr::make(model_, std::get(data_->geomDescr_))); + return model().resolve(data_->geomDescr_.simpleGeometry_); +} + +void Validity::setFeatureTransition( + model_ptr const& fromFeature, + TransitionEnd fromConnectedEnd, + model_ptr const& toFeature, + TransitionEnd toConnectedEnd, + uint32_t transitionNumber) +{ + ensureMaterialized(); + if (!fromFeature || !toFeature) { + raise("Validity::setFeatureTransition requires both from/to features."); + } + data_->geomDescrType_ = FeatureTransition; + data_->geomOffsetType_ = InvalidOffsetType; + data_->referencedStage_ = Data::InvalidReferencedStage; + data_->featureAddress_ = {}; + data_->geomDescr_.featureTransition_ = { + fromFeature->addr(), + toFeature->addr(), + transitionNumber, + packTransitionEnds(fromConnectedEnd, toConnectedEnd), + }; +} + +model_ptr Validity::transitionFromFeature() const +{ + if (!data_ || data_->geomDescrType_ != FeatureTransition) { + return {}; + } + return model().resolve(data_->geomDescr_.featureTransition_.fromFeature_); +} + +model_ptr Validity::transitionToFeature() const +{ + if (!data_ || data_->geomDescrType_ != FeatureTransition) { + return {}; + } + return model().resolve(data_->geomDescr_.featureTransition_.toFeature_); +} + +std::optional Validity::transitionFromConnectedEnd() const +{ + if (!data_ || data_->geomDescrType_ != FeatureTransition) { + return std::nullopt; + } + return unpackFromConnectedEnd(data_->geomDescr_.featureTransition_.connectedEnds_); +} + +std::optional Validity::transitionToConnectedEnd() const +{ + if (!data_ || data_->geomDescrType_ != FeatureTransition) { + return std::nullopt; + } + return unpackToConnectedEnd(data_->geomDescr_.featureTransition_.connectedEnds_); +} + +std::optional Validity::transitionNumber() const +{ + if (!data_ || data_->geomDescrType_ != FeatureTransition) { + return std::nullopt; + } + return data_->geomDescr_.featureTransition_.transitionNumber_; } SelfContainedGeometry Validity::computeGeometry( model_ptr geometryCollection, - std::string* error) const + std::string* error, + std::optional defaultGeometryStage) const { - if (data_->geomDescrType_ == SimpleGeometry) { + if (geometryDescriptionType() == SimpleGeometry) { // Return the self-contained geometry points. auto simpleGeom = simpleGeometry(); assert(simpleGeom); - return simpleGeom->toSelfContained(); + return applyDirectionToGeometry(simpleGeom->toSelfContained(), direction()); + } + + const auto referencedStage = geometryStage().has_value() + ? geometryStage() + : defaultGeometryStage; + + if (geometryDescriptionType() == FeatureTransition) { + auto fromFeature = transitionFromFeature(); + auto toFeature = transitionToFeature(); + auto fromConnectedEnd = transitionFromConnectedEnd(); + auto toConnectedEnd = transitionToConnectedEnd(); + if (!fromFeature || !toFeature || !fromConnectedEnd || !toConnectedEnd) { + if (error) { + *error = "Failed to resolve semantic feature transition validity."; + } + return {}; + } + + auto fromSegment = resolveTransitionSegment(fromFeature, *fromConnectedEnd, referencedStage); + if (!fromSegment) { + if (error) { + *error = fmt::format( + "Failed to resolve transition source geometry for feature {}.", + fromFeature->id()->toString()); + } + return {}; + } + + auto toSegment = resolveTransitionSegment(toFeature, *toConnectedEnd, referencedStage); + if (!toSegment) { + if (error) { + *error = fmt::format( + "Failed to resolve transition target geometry for feature {}.", + toFeature->id()->toString()); + } + return {}; + } + + std::vector points; + points.reserve(3); + auto appendIfNotDuplicate = [&](Point const& point) + { + if (points.empty() || !pointsCoincide(points.back(), point)) { + points.emplace_back(point); + } + }; + // Render the transition as outer-from -> shared transition midpoint -> outer-to. + // The midpoint prefers the hosting feature geometry (for example an intersection point) + // and otherwise falls back to the connected road endpoints. + appendIfNotDuplicate(fromSegment->outer_); + bool appendedHostMidpoint = false; + if (geometryCollection) { + geometryCollection->forEachGeometry([&](auto&& geom) { + if (geom->geomType() != GeomType::Points || geom->numPoints() == 0) { + return true; + } + appendIfNotDuplicate(geom->pointAt(0)); + appendedHostMidpoint = true; + return false; + }); + } + if (!appendedHostMidpoint) { + if (pointsCoincide(fromSegment->inner_, toSegment->inner_)) { + appendIfNotDuplicate(fromSegment->inner_); + } else { + appendIfNotDuplicate(Point{ + (fromSegment->inner_.x + toSegment->inner_.x) * 0.5, + (fromSegment->inner_.y + toSegment->inner_.y) * 0.5, + (fromSegment->inner_.z + toSegment->inner_.z) * 0.5, + }); + } + } + appendIfNotDuplicate(toSegment->outer_); + return {points, points.size() > 1 ? GeomType::Line : GeomType::Points}; } // If this validity references some feature directly, // use the geometry collection of that feature. - if (data_->featureAddress_) { - auto feature = model().find(featureId()->typeId(), featureId()->keyValuePairs()); + if (auto featureIdNode = featureId()) { + auto feature = model().find(featureIdNode->typeId(), featureIdNode->keyValuePairs()); if (feature) { geometryCollection = feature->geomOrNull(); } else { - log().warn("Could not find feature by its ID {}", featureId()->toString()); + log().warn("Could not find feature by its ID {}", featureIdNode->toString()); } } @@ -268,28 +643,26 @@ SelfContainedGeometry Validity::computeGeometry( return {}; } - // Find a geometry with a matching name. - auto requiredGeomName = geometryName(); - model_ptr geometry; - geometryCollection->forEachGeometry([&requiredGeomName, &geometry](auto&& geom){ - if (geom->name() == requiredGeomName && geom->geomType() == GeomType::Line) { - geometry = geom; - return false; // abort iteration. - } - return true; - }); + // Resolve validity geometry by stage first (if specified), then by line type. + auto geometry = resolveLineGeometry(geometryCollection, referencedStage); if (!geometry) { if (error) { - *error = fmt::format("Failed to find geometry for {}", requiredGeomName ? *requiredGeomName : ""); + if (referencedStage) { + *error = fmt::format( + "Failed to find line geometry for validity stage {}.", + *referencedStage); + } else { + *error = "Failed to find line geometry for validity at the configured high-fidelity stage."; + } } return {}; } // No geometry description from the attribute - just return the whole // geometry from the collection. - if (data_->geomDescrType_ == NoGeometry) { - return geometry->toSelfContained(); + if (geometryDescriptionType() == NoGeometry) { + return applyDirectionToGeometry(geometry->toSelfContained(), direction()); } // Now we have OffsetPointValidity or OffsetRangeValidity @@ -303,7 +676,7 @@ SelfContainedGeometry Validity::computeGeometry( Point startPoint; std::optional endPoint; - if (data_->geomDescrType_ == OffsetPointValidity) { + if (geometryDescriptionType() == OffsetPointValidity) { startPoint = *offsetPoint(); } else { @@ -315,7 +688,7 @@ SelfContainedGeometry Validity::computeGeometry( // Handle GeoPosOffset (a range of the geometry line, bound by two positions). if (offsetType == GeoPosOffset) { auto points = geometry->pointsFromPositionBound(startPoint, endPoint); - return {points, points.size() > 1 ? GeomType::Line : GeomType::Points}; + return applyDirectionToGeometry({points, points.size() > 1 ? GeomType::Line : GeomType::Points}, direction()); } // Handle BufferOffset (a range of the geometry bound by two indices). @@ -346,7 +719,7 @@ SelfContainedGeometry Validity::computeGeometry( for (auto pointIndex = startPointIndex; pointIndex <= endPointIndex; ++pointIndex) { points.emplace_back(geometry->pointAt(pointIndex)); } - return {points, points.size() > 1 ? GeomType::Line : GeomType::Points}; + return applyDirectionToGeometry({points, points.size() > 1 ? GeomType::Line : GeomType::Points}, direction()); } // Handle RelativeLengthOffset (a percentage range of the geometry). @@ -362,7 +735,7 @@ SelfContainedGeometry Validity::computeGeometry( // Handle MetricLengthOffset (a length range of the geometry in meters). if (offsetType == MetricLengthOffset || offsetType == RelativeLengthOffset) { auto points = geometry->pointsFromLengthBound(startPoint.x, endPoint ? std::optional(endPoint->x) : std::optional()); - return {points, points.size() > 1 ? GeomType::Line : GeomType::Points}; + return applyDirectionToGeometry({points, points.size() > 1 ? GeomType::Line : GeomType::Points}, direction()); } if (error) { @@ -372,11 +745,11 @@ SelfContainedGeometry Validity::computeGeometry( } model_ptr -MultiValidity::newPoint(Point pos, std::string_view geomName, Validity::Direction direction) +MultiValidity::newPoint(Point pos, std::optional geometryStage, Validity::Direction direction) { auto result = model().newValidity(); result->setOffsetPoint(pos); - result->setGeometryName(geomName); + result->setGeometryStage(geometryStage); result->setDirection(direction); append(result); return result; @@ -385,12 +758,12 @@ MultiValidity::newPoint(Point pos, std::string_view geomName, Validity::Directio model_ptr MultiValidity::newRange( Point start, Point end, - std::string_view geomName, + std::optional geometryStage, Validity::Direction direction) { auto result = model().newValidity(); result->setOffsetRange(start, end); - result->setGeometryName(geomName); + result->setGeometryStage(geometryStage); result->setDirection(direction); append(result); return result; @@ -399,12 +772,12 @@ model_ptr MultiValidity::newRange( model_ptr MultiValidity::newPoint( Validity::GeometryOffsetType offsetType, double pos, - std::string_view geomName, + std::optional geometryStage, Validity::Direction direction) { auto result = model().newValidity(); result->setOffsetPoint(offsetType, pos); - result->setGeometryName(geomName); + result->setGeometryStage(geometryStage); result->setDirection(direction); append(result); return result; @@ -413,22 +786,22 @@ model_ptr MultiValidity::newPoint( model_ptr MultiValidity::newPoint( Validity::GeometryOffsetType offsetType, int32_t pos, - std::string_view geomName, + std::optional geometryStage, Validity::Direction direction) { - return newPoint(offsetType, static_cast(pos), geomName, direction); + return newPoint(offsetType, static_cast(pos), geometryStage, direction); } model_ptr MultiValidity::newRange( Validity::GeometryOffsetType offsetType, double start, double end, - std::string_view geomName, + std::optional geometryStage, Validity::Direction direction) { auto result = model().newValidity(); result->setOffsetRange(offsetType, start, end); - result->setGeometryName(geomName); + result->setGeometryStage(geometryStage); result->setDirection(direction); append(result); return result; @@ -438,14 +811,14 @@ model_ptr MultiValidity::newRange( Validity::GeometryOffsetType offsetType, int32_t start, int32_t end, - std::string_view geomName, + std::optional geometryStage, Validity::Direction direction) { return newRange( offsetType, static_cast(start), static_cast(end), - geomName, + geometryStage, direction); } @@ -459,12 +832,62 @@ MultiValidity::newGeometry(model_ptr geom, Validity::Direction directi return result; } -model_ptr MultiValidity::newDirection(Validity::Direction direction) +model_ptr +MultiValidity::newFeatureId(model_ptr const& featureId, Validity::Direction direction) +{ + auto result = model().newValidity(); + result->setFeatureId(featureId); + result->setDirection(direction); + append(result); + return result; +} + +model_ptr +MultiValidity::newGeomStage(uint32_t geometryStage, Validity::Direction direction) +{ + auto result = model().newValidity(); + result->setGeometryStage(geometryStage); + result->setDirection(direction); + append(result); + return result; +} + +model_ptr MultiValidity::newFeatureTransition( + model_ptr const& fromFeature, + Validity::TransitionEnd fromConnectedEnd, + model_ptr const& toFeature, + Validity::TransitionEnd toConnectedEnd, + uint32_t transitionNumber, + Validity::Direction direction) { auto result = model().newValidity(); + result->setFeatureTransition( + fromFeature, + fromConnectedEnd, + toFeature, + toConnectedEnd, + transitionNumber); result->setDirection(direction); append(result); return result; } +model_ptr MultiValidity::newComplete(Validity::Direction direction) +{ + return newDirection(direction == Validity::Empty ? Validity::Both : direction); +} + +model_ptr MultiValidity::newDirection(Validity::Direction direction) +{ + const auto simpleAddr = simfil::ModelNodeAddress{ + TileFeatureLayer::ColumnId::SimpleValidity, + static_cast(direction)}; + if (auto upgradedAddress = model().upgradedSimpleValidityAddress(simpleAddr)) { + appendInternal(model_ptr::make(model_, *upgradedAddress)); + return model().resolve(*upgradedAddress); + } + appendInternal(model_ptr::make(model_, simpleAddr)); + return model().resolve(simpleAddr); +} + } diff --git a/libs/pymapget/CMakeLists.txt b/libs/pymapget/CMakeLists.txt index 8d9a2016..fb7b8066 100644 --- a/libs/pymapget/CMakeLists.txt +++ b/libs/pymapget/CMakeLists.txt @@ -28,17 +28,7 @@ target_compile_features(pymapget INTERFACE cxx_std_17) -FetchContent_GetProperties(cpp-httplib) -if (MSVC AND CPP-HTTPLIB_POPULATED) - # Required because cpp-httplib speaks https via OpenSSL. - # Only needed if httplib came via FetchContent. - set(DEPLOY_FILES - "${OPENSSL_INCLUDE_DIR}/../libcrypto-1_1-x64.dll" - "${OPENSSL_INCLUDE_DIR}/../libssl-1_1-x64.dll" - "${CMAKE_CURRENT_LIST_DIR}/__main__.py") -else() - set(DEPLOY_FILES "${CMAKE_CURRENT_LIST_DIR}/__main__.py") -endif() +set(DEPLOY_FILES "${CMAKE_CURRENT_LIST_DIR}/__main__.py") add_wheel(pymapget NAME mapget diff --git a/libs/pymapget/binding/py-layer.h b/libs/pymapget/binding/py-layer.h index bc641193..3d5bcaa7 100644 --- a/libs/pymapget/binding/py-layer.h +++ b/libs/pymapget/binding/py-layer.h @@ -96,8 +96,17 @@ void bindTileLayer(py::module_& m) std::visit( [&](auto&& vv) { - if constexpr (!std::is_same_v, std::monostate>) + using V = std::decay_t; + if constexpr (std::is_same_v) { + return; + } + else if constexpr (std::is_same_v) { + // Store bytes in hex to keep JSON valid and readable. + self.setInfo(k, vv.toHex()); + } + else { self.setInfo(k, vv); + } }, v); }, @@ -106,7 +115,7 @@ void bindTileLayer(py::module_& m) R"pbdoc( Set a JSON field to store sizes, construction times, and other arbitrary meta-information. The value may be - bool, int, double or string. + bool, int, double or string. ByteArray values are stored as hex strings. )pbdoc") .def( "set_prefix", @@ -215,5 +224,12 @@ void bindTileLayer(py::module_& m) { return self.toJson().dump(); }, R"pbdoc( Convert this tile to a GeoJSON feature collection. - )pbdoc"); + )pbdoc") + .def("__len__", [](TileFeatureLayer const& self) { return self.size(); }) + .def("__getitem__", [](TileFeatureLayer const& self, int64_t i) { + auto sz = (int64_t)self.size(); + if (i < 0) i += sz; + if (i < 0 || i >= sz) throw py::index_error(); + return BoundFeature(self.at((size_t)i)); + }); } diff --git a/libs/pymapget/binding/py-model.h b/libs/pymapget/binding/py-model.h index 92e7c931..a78d9399 100644 --- a/libs/pymapget/binding/py-model.h +++ b/libs/pymapget/binding/py-model.h @@ -16,6 +16,8 @@ using namespace simfil; namespace mapget { +py::object nodeToPython(model_ptr const& n, TileFeatureLayer& fl, bool checkMultimap = false); + struct BoundModelNode { virtual ~BoundModelNode() = default; @@ -48,11 +50,98 @@ struct BoundModelNodeBase : public BoundModelNode py::class_(m, "ModelNode") .def( "value", - [](const model_ptr& node) { return node->value(); }, + [](BoundModelNode& self) { + if (auto n = self.node()) + return n->value(); + return ScalarValueType{}; + }, R"pbdoc( Get the node's scalar value if it has one. - )pbdoc"); - py::class_(m, "ModelNodeBase"); + )pbdoc") + .def( + "to_json", + [](BoundModelNode& self) { + if (auto n = self.node()) + return n->toJson().dump(); + return std::string("null"); + }, + "Convert this node to a JSON string.") + .def( + "to_dict", + [](BoundModelNode& self) -> py::object { + if (auto n = self.node()) { + auto& fl = self.featureLayer(); + return nodeToPython(n, fl); + } + return py::none(); + }, + "Convert this node to a Python dict/list/scalar."); + py::enum_(m, "ValueType") + .value("UNDEF", ValueType::Undef) + .value("NULL_", ValueType::Null) + .value("BOOL", ValueType::Bool) + .value("INT", ValueType::Int) + .value("FLOAT", ValueType::Float) + .value("STRING", ValueType::String) + .value("BYTES", ValueType::Bytes) + .value("OBJECT", ValueType::Object) + .value("ARRAY", ValueType::Array); + + py::class_(m, "ModelNodeBase") + .def("__len__", [](BoundModelNodeBase& self) { + return self.modelNodePtr_->size(); + }) + .def("__getitem__", [](BoundModelNodeBase& self, std::string_view const& key) { + auto& fl = self.featureLayer(); + for (auto const& [fieldId, child] : self.modelNodePtr_->fields()) { + if (auto resolved = fl.lookupStringId(fieldId)) { + if (*resolved == key) { + BoundModelNodeBase node; + node.modelNodePtr_ = child; + return node; + } + } + } + throw py::key_error(std::string(key)); + }, py::arg("key")) + .def("__getitem__", [](BoundModelNodeBase& self, int64_t i) { + auto sz = (int64_t)self.modelNodePtr_->size(); + if (i < 0) i += sz; + if (i < 0 || i >= sz) throw py::index_error(); + BoundModelNodeBase node; + node.modelNodePtr_ = self.modelNodePtr_->at(i); + return node; + }, py::arg("index")) + .def("__iter__", [](BoundModelNodeBase& self) { + auto n = self.modelNodePtr_; + auto type = n->type(); + if (type == ValueType::Object) { + py::list result; + auto& fl = self.featureLayer(); + for (auto const& [fieldId, child] : n->fields()) { + if (auto key = fl.lookupStringId(fieldId)) { + BoundModelNodeBase node; + node.modelNodePtr_ = child; + result.append(py::make_tuple(std::string(*key), node)); + } + } + return py::iter(result); + } + else if (type == ValueType::Array) { + py::list result; + for (uint32_t i = 0; i < n->size(); ++i) { + BoundModelNodeBase node; + node.modelNodePtr_ = n->at(i); + result.append(node); + } + return py::iter(result); + } + py::list empty; + return py::iter(empty); + }) + .def("type", [](BoundModelNodeBase& self) { + return self.modelNodePtr_->type(); + }); } ModelNode::Ptr node() override { return modelNodePtr_; } @@ -89,7 +178,7 @@ ModelVariant pyValueToModel(py::object const& pyValue, TileFeatureLayer& model) else if (py::isinstance(pyValue)) { // Recursively convert Python list to array. auto list = pyValue.cast(); - auto arr = model.newArray(list.size()); + auto arr = model.newArray(list.size(), true); for (auto const& item : list) { auto value = pyValueToModel(py::reinterpret_borrow(item), model); @@ -103,7 +192,7 @@ ModelVariant pyValueToModel(py::object const& pyValue, TileFeatureLayer& model) else if (py::isinstance(pyValue)) { // Recursively convert Python dict to object. auto dict = pyValue.cast(); - auto obj = model.newObject(dict.size()); + auto obj = model.newObject(dict.size(), true); for (auto const& [anyKey, anyValue] : dict) { std::string key = py::str(anyKey); @@ -164,6 +253,31 @@ struct BoundObject : public BoundModelNode { auto boundClass = py::class_(m, "Object"); bindObjectMethods(boundClass); + boundClass + .def("__len__", [](BoundObject& self) { return self.modelNodePtr_->size(); }) + .def( + "__getitem__", + [](BoundObject& self, std::string_view const& key) { + auto result = self.modelNodePtr_->get(key); + if (!result) throw py::key_error(std::string(key)); + BoundModelNodeBase node; + node.modelNodePtr_ = *result; + return node; + }, + py::arg("key"), + "Get a field by name.") + .def("__iter__", [](BoundObject& self) { + py::list result; + auto& fl = self.featureLayer(); + for (auto const& [fieldId, childNode] : self.modelNodePtr_->fields()) { + if (auto resolved = fl.lookupStringId(fieldId)) { + BoundModelNodeBase node; + node.modelNodePtr_ = childNode; + result.append(py::make_tuple(std::string(*resolved), node)); + } + } + return py::iter(result); + }); } ModelNode::Ptr node() override { return modelNodePtr_; } @@ -185,7 +299,30 @@ struct BoundArray : public BoundModelNode std::visit([&self](auto&& value) { self.modelNodePtr_->append(value); }, vv); }, py::arg("value"), - "Append a value to the array."); + "Append a value to the array.") + .def("__len__", [](BoundArray& self) { return self.modelNodePtr_->size(); }) + .def( + "__getitem__", + [](BoundArray& self, int64_t i) { + auto sz = (int64_t)self.modelNodePtr_->size(); + if (i < 0) i += sz; + if (i < 0 || i >= sz) throw py::index_error(); + BoundModelNodeBase node; + node.modelNodePtr_ = self.modelNodePtr_->at(i); + return node; + }, + py::arg("index"), + "Get an element by index.") + .def("__iter__", [](BoundArray& self) { + py::list result; + auto sz = self.modelNodePtr_->size(); + for (uint32_t i = 0; i < sz; ++i) { + BoundModelNodeBase node; + node.modelNodePtr_ = self.modelNodePtr_->at(i); + result.append(node); + } + return py::iter(result); + }); } ModelNode::Ptr node() override { return modelNodePtr_; } @@ -225,7 +362,22 @@ struct BoundGeometry : public BoundModelNode py::arg("point"), R"pbdoc( Append a point to the geometry. - )pbdoc"); + )pbdoc") + .def("geom_type", [](BoundGeometry& self) { return self.modelNodePtr_->geomType(); }, + "Get the type of the geometry.") + .def("num_points", [](BoundGeometry& self) { return self.modelNodePtr_->numPoints(); }, + "Get the number of points.") + .def("point_at", [](BoundGeometry& self, size_t i) { return self.modelNodePtr_->pointAt(i); }, + py::arg("index"), "Get a point at an index.") + .def("__len__", [](BoundGeometry& self) { return self.modelNodePtr_->numPoints(); }) + .def("__getitem__", [](BoundGeometry& self, int64_t i) { + auto n = (int64_t)self.modelNodePtr_->numPoints(); + if (i < 0) i += n; + if (i < 0 || i >= n) throw py::index_error(); + return self.modelNodePtr_->pointAt(i); + }) + .def("length", [](BoundGeometry& self) { return self.modelNodePtr_->length(); }, + "Get total length in metres (for polylines)."); } ModelNode::Ptr node() override { return modelNodePtr_; } @@ -245,7 +397,19 @@ struct BoundGeometryCollection : public BoundModelNode [](BoundGeometryCollection& self, GeomType const& geomType) { return BoundGeometry(self.modelNodePtr_->newGeometry(geomType)); }, py::arg("geom_type"), - "Create and insert a new geometry into the collection."); + "Create and insert a new geometry into the collection.") + .def("__len__", [](BoundGeometryCollection& self) { + return self.modelNodePtr_->numGeometries(); + }) + .def("__iter__", [](BoundGeometryCollection& self) { + py::list result; + self.modelNodePtr_->forEachGeometry( + [&result](model_ptr const& geom) { + result.append(BoundGeometry(geom)); + return true; + }); + return py::iter(result); + }); } ModelNode::Ptr node() override { return modelNodePtr_; } @@ -265,7 +429,7 @@ struct BoundAttribute : public BoundObject .value("EMPTY", Validity::Direction::Empty) .value("POSITIVE", Validity::Direction::Positive) .value("NEGATIVE", Validity::Direction::Negative) - .value("BOTH", Validity::Direction::Both) + .value("COMPLETE", Validity::Direction::Both) .value("NONE", Validity::Direction::None); auto boundClass = @@ -280,6 +444,18 @@ struct BoundAttribute : public BoundObject "Get the name of the attribute."); bindObjectMethods(boundClass); + boundClass.def("__iter__", [](BoundAttribute& self) { + py::list result; + auto& fl = self.featureLayer(); + for (auto const& [fieldId, childNode] : self.modelNodePtr_->fields()) { + if (auto resolved = fl.lookupStringId(fieldId)) { + BoundModelNodeBase node; + node.modelNodePtr_ = childNode; + result.append(py::make_tuple(std::string(*resolved), node)); + } + } + return py::iter(result); + }); } explicit BoundAttribute(model_ptr const& ptr) : BoundObject(ptr) {} @@ -301,7 +477,23 @@ struct BoundAttributeLayer : public BoundModelNode [](BoundAttributeLayer& self, BoundAttribute const& a) { self.modelNodePtr_->addAttribute(a.modelNodePtr_); }, py::arg("a"), - "Add an existing attribute to the layer."); + "Add an existing attribute to the layer.") + .def("__iter__", [](BoundAttributeLayer& self) { + py::list result; + self.modelNodePtr_->forEachAttribute( + [&result](model_ptr const& attr) { + result.append(BoundAttribute(attr)); + return true; + }); + return py::iter(result); + }) + .def("to_dict", [](BoundAttributeLayer& self) -> py::object { + if (auto n = self.node()) { + auto& fl = self.featureLayer(); + return nodeToPython(n, fl, true); + } + return py::none(); + }, "Convert this layer to a Python dict (handles duplicate attribute names)."); } ModelNode::Ptr node() override { return modelNodePtr_; } @@ -330,7 +522,26 @@ struct BoundAttributeLayerList : public BoundModelNode { self.modelNodePtr_->addLayer(name, l.modelNodePtr_); }, py::arg("name"), py::arg("layer"), - "Add an existing layer to the collection."); + "Add an existing layer to the collection.") + .def("__iter__", [](BoundAttributeLayerList& self) { + py::list result; + self.modelNodePtr_->forEachLayer( + [&result](std::string_view name, model_ptr const& layer) { + result.append(py::make_tuple(std::string(name), BoundAttributeLayer(layer))); + return true; + }); + return py::iter(result); + }) + .def("to_dict", [](BoundAttributeLayerList& self) -> py::object { + py::dict d; + auto& fl = self.featureLayer(); + self.modelNodePtr_->forEachLayer( + [&](std::string_view name, model_ptr const& layer) { + d[py::str(std::string(name))] = nodeToPython(layer, fl, true); + return true; + }); + return d; + }, "Convert all layers to a Python dict (handles duplicate attribute names)."); } ModelNode::Ptr node() override { return modelNodePtr_; } @@ -342,6 +553,28 @@ struct BoundAttributeLayerList : public BoundModelNode model_ptr modelNodePtr_; }; +struct BoundFeatureId : public BoundModelNode +{ + static void bind(py::module_& m) + { + py::class_(m, "FeatureId") + .def( + "to_string", + [](BoundFeatureId& self) { return self.modelNodePtr_->toString(); }, + "Convert the FeatureId to a string.") + .def( + "type_id", + [](BoundFeatureId& self) { return self.modelNodePtr_->typeId(); }, + "Get the feature ID's type ID."); + } + + ModelNode::Ptr node() override { return modelNodePtr_; } + + explicit BoundFeatureId(model_ptr const& ptr) : modelNodePtr_(ptr) {} + + model_ptr modelNodePtr_; +}; + struct BoundFeature : public BoundModelNode { static void bind(py::module_& m) @@ -352,20 +585,13 @@ struct BoundFeature : public BoundModelNode [](BoundFeature& self) { return self.modelNodePtr_->typeId(); }, "Get the type ID of the feature.") .def( - "evaluate", - [](BoundFeature& self, std::string_view const& expression) - { - auto res = self.modelNodePtr_->evaluate(expression); - if (!res) - throw std::runtime_error(res.error().message); - return res->getScalar(); - }, - py::arg("expression"), - "Evaluate a filter expression on this feature.") + "id", + [](BoundFeature& self) { return BoundFeatureId(self.modelNodePtr_->id()); }, + "Get the feature's unique ID.") .def( "to_json", - [](BoundFeature& self) { return self.modelNodePtr_->toJson(); }, - "Convert the Feature to JSON.") + [](BoundFeature& self) { return self.modelNodePtr_->toJson().dump(); }, + "Convert the Feature to a JSON string.") .def( "geom", [](BoundFeature& self) @@ -428,27 +654,94 @@ struct BoundFeature : public BoundModelNode model_ptr modelNodePtr_; }; -struct BoundFeatureId : public BoundModelNode +/// Recursively convert a ModelNode tree to native Python objects. +/// Mirrors simfil's ModelNode::toJson() (nodes.cpp) but builds +/// py::dict/py::list/scalars directly, avoiding JSON serialization. +/// +/// Handles two special cases that toJson() also handles: +/// - Multimap objects: when checkMultimap is true and an Object has +/// duplicate keys, values are grouped into lists with a "_multimap" +/// marker. Only AttributeLayer-level objects can have duplicates, +/// so callers should only pass true at that level to avoid overhead. +/// - ByteArray scalars: converted to {"_bytes": True, "hex": ..., "number": ...}. +py::object nodeToPython(model_ptr const& n, TileFeatureLayer& fl, bool checkMultimap) { - static void bind(py::module_& m) - { - py::class_(m, "FeatureId") - .def( - "to_string", - [](BoundFeatureId& self) { return self.modelNodePtr_->toString(); }, - "Convert the FeatureId to a string.") - .def( - "type_id", - [](BoundFeatureId& self) { return self.modelNodePtr_->typeId(); }, - "Get the feature ID's type ID."); + auto type = n->type(); + if (type == ValueType::Object) { + py::dict d; + if (checkMultimap) { + // Pre-scan for duplicate field IDs using stack-allocated + // array with cheap uint16_t comparison. Only called for + // AttributeLayer-level objects, not recursively. + bool isMultiMap = false; + { + uint16_t seen[32]; + size_t count = 0; + for (auto const& [fieldId, _] : n->fields()) { + for (size_t j = 0; j < count; ++j) { + if (seen[j] == fieldId) { + isMultiMap = true; + break; + } + } + if (isMultiMap) break; + if (count < 32) + seen[count++] = fieldId; + } + } + if (isMultiMap) { + for (auto const& [fieldId, child] : n->fields()) { + if (auto key = fl.lookupStringId(fieldId)) { + auto pyKey = py::str(std::string(*key)); + if (d.contains(pyKey)) + d[pyKey].cast().append(nodeToPython(child, fl)); + else + d[pyKey] = py::list(py::make_tuple(nodeToPython(child, fl))); + } + } + d[py::str("_multimap")] = py::bool_(true); + return d; + } + } + for (auto const& [fieldId, child] : n->fields()) { + if (auto key = fl.lookupStringId(fieldId)) + d[py::str(std::string(*key))] = nodeToPython(child, fl); + } + return d; } - - ModelNode::Ptr node() override { return modelNodePtr_; } - - explicit BoundFeatureId(model_ptr const& ptr) : modelNodePtr_(ptr) {} - - model_ptr modelNodePtr_; -}; + if (type == ValueType::Array) { + py::list l; + for (uint32_t i = 0; i < n->size(); ++i) + l.append(nodeToPython(n->at(i), fl)); + return l; + } + auto v = n->value(); + return std::visit([](auto&& val) -> py::object { + using T = std::decay_t; + if constexpr (std::is_same_v) + return py::bool_(val); + else if constexpr (std::is_same_v) + return py::int_(val); + else if constexpr (std::is_same_v) + return py::float_(val); + else if constexpr (std::is_same_v) + return py::str(val); + else if constexpr (std::is_same_v) + return py::str(std::string(val)); + else if constexpr (std::is_same_v) { + py::dict d; + d["_bytes"] = py::bool_(true); + d["hex"] = py::str(val.toHex(false)); + if (auto decoded = val.decodeBigEndianI64()) + d["number"] = py::int_(*decoded); + else + d["number"] = py::none(); + return d; + } + else + return py::none(); + }, v); +} } // namespace mapget @@ -462,6 +755,6 @@ void bindModel(py::module& m) mapget::BoundAttribute::bind(m); mapget::BoundAttributeLayer::bind(m); mapget::BoundAttributeLayerList::bind(m); - mapget::BoundFeature::bind(m); mapget::BoundFeatureId::bind(m); + mapget::BoundFeature::bind(m); } diff --git a/libs/service/include/mapget/service/cache.h b/libs/service/include/mapget/service/cache.h index bee02b1d..8ffe471b 100644 --- a/libs/service/include/mapget/service/cache.h +++ b/libs/service/include/mapget/service/cache.h @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include "mapget/model/info.h" #include "mapget/model/featurelayer.h" @@ -29,6 +31,7 @@ class Cache : public TileLayerStream::StringPoolCache, public std::enable_shared }; using Ptr = std::shared_ptr; + using TileBlobVisitor = std::function; // The following methods are already implemented, // they forward to the virtual methods on-demand. @@ -52,6 +55,9 @@ class Cache : public TileLayerStream::StringPoolCache, public std::enable_shared /** Abstract: Upsert (update or insert) a TileLayer blob. */ virtual void putTileLayerBlob(MapTileKey const& k, std::string const& v) = 0; + /** Abstract: Iterate through all cached tile layer blobs. */ + virtual void forEachTileLayerBlob(const TileBlobVisitor& cb) const = 0; + /** Abstract: Retrieve a string-pool blob for a sourceNodeId. */ virtual std::optional getStringPoolBlob(std::string_view const& sourceNodeId) = 0; @@ -78,8 +84,8 @@ class Cache : public TileLayerStream::StringPoolCache, public std::enable_shared TileLayerStream::StringPoolOffsetMap stringPoolOffsets_; // Statistics - int64_t cacheHits_ = 0; - int64_t cacheMisses_ = 0; + std::atomic cacheHits_{0}; + std::atomic cacheMisses_{0}; }; } diff --git a/libs/service/include/mapget/service/config.h b/libs/service/include/mapget/service/config.h index ba3b4108..fbc27f05 100644 --- a/libs/service/include/mapget/service/config.h +++ b/libs/service/include/mapget/service/config.h @@ -189,7 +189,7 @@ class DataSourceConfigService // Atomic flag to control the file watching thread. std::atomic watching_ = false; - // Thread which is watching the config file changed-timestamp. + // Thread which polls the config file for content changes. std::optional watchThread_; // Mutex to ensure that currentConfig_ and subscriptions_ are safely accessed. diff --git a/libs/service/include/mapget/service/datasource.h b/libs/service/include/mapget/service/datasource.h index a76c172e..dadf5ae7 100644 --- a/libs/service/include/mapget/service/datasource.h +++ b/libs/service/include/mapget/service/datasource.h @@ -58,7 +58,11 @@ class DataSource virtual std::vector locate(LocateRequest const& req); /** Called by mapget::Service worker. Dispatches to Cache or fill(...) on miss. */ - virtual TileLayer::Ptr get(MapTileKey const& k, Cache::Ptr& cache, DataSourceInfo const& info); + virtual TileLayer::Ptr get( + MapTileKey const& k, + Cache::Ptr& cache, + DataSourceInfo const& info, + TileLayer::LoadStateCallback loadStateCallback = {}); /** Add an authorization header-regex pair for this datasource. */ void requireAuthHeaderRegexMatchOption(std::string header, std::regex re); diff --git a/libs/service/include/mapget/service/memcache.h b/libs/service/include/mapget/service/memcache.h index 283c0e11..bdc2b452 100644 --- a/libs/service/include/mapget/service/memcache.h +++ b/libs/service/include/mapget/service/memcache.h @@ -29,6 +29,9 @@ class MemCache : public Cache /** Upsert a TileLayer blob. */ void putTileLayerBlob(MapTileKey const& k, std::string const& v) override; + /** Iterate over cached tile layer blobs. */ + void forEachTileLayerBlob(const TileBlobVisitor& cb) const override; + /** Retrieve a string-pool blob for a sourceNodeId -> No-Op */ std::optional getStringPoolBlob(std::string_view const& sourceNodeId) override {return {};} @@ -40,10 +43,10 @@ class MemCache : public Cache private: // Cached tile blobs. - std::shared_mutex cacheMutex_; + mutable std::shared_mutex cacheMutex_; std::unordered_map cachedTiles_; std::deque fifo_; uint32_t maxCachedTiles_ = 0; }; -} \ No newline at end of file +} diff --git a/libs/service/include/mapget/service/nullcache.h b/libs/service/include/mapget/service/nullcache.h index 00aa8e1a..66e29b71 100644 --- a/libs/service/include/mapget/service/nullcache.h +++ b/libs/service/include/mapget/service/nullcache.h @@ -20,6 +20,9 @@ class NullCache : public Cache /** Upsert a TileLayer blob - does nothing. */ void putTileLayerBlob(MapTileKey const& k, std::string const& v) override; + /** Iterate cached tile blobs - no-op. */ + void forEachTileLayerBlob(const TileBlobVisitor& cb) const override; + /** Retrieve a string-pool blob for a sourceNodeId - always returns empty. */ std::optional getStringPoolBlob(std::string_view const& sourceNodeId) override; @@ -27,4 +30,4 @@ class NullCache : public Cache void putStringPoolBlob(std::string_view const& sourceNodeId, std::string const& v) override; }; -} \ No newline at end of file +} diff --git a/libs/service/include/mapget/service/service.h b/libs/service/include/mapget/service/service.h index 79c9689e..6b738422 100644 --- a/libs/service/include/mapget/service/service.h +++ b/libs/service/include/mapget/service/service.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace mapget { @@ -22,6 +23,12 @@ enum class RequestStatus { Aborted = 0x4 /** Canceled, e.g. because a bundled request cannot be fulfilled. */ }; +struct LayerRequestContext { + RequestStatus status_ = RequestStatus::NoDataSource; + LayerType layerType_ = LayerType::Features; + uint32_t stages_ = 1; +}; + /** * Client request for map data, which consists of a map id, * a map layer id, an array of tile ids, and a callback function @@ -41,6 +48,12 @@ class LayerTilesRequest std::string layerId, std::vector tiles); + /** Construct a staged request with tile IDs grouped by next missing stage. */ + LayerTilesRequest( + std::string mapId, + std::string layerId, + std::vector> tileIdsByNextStage); + /** Get the current status of the request. */ RequestStatus getStatus(); @@ -57,10 +70,17 @@ class LayerTilesRequest std::string layerId_; /** - * The map tile ids for which this request is dedicated. - * Must not be empty. Result tiles will be processed in the given order. + * The map tile IDs for this request, grouped by next missing stage. + * Bucket index N means: send stage N and all higher stages for these IDs. */ - std::vector tiles_; + std::vector> tileIdsByNextStage_; + + /** + * True iff the request explicitly uses `tileIdsByNextStage`. + * Legacy `tileIds` requests keep unstaged semantics and must not be + * expanded into one backend request per advertised stage. + */ + bool usesStageBuckets_ = false; /** * The callback function which is called when all tiles have been processed. @@ -76,30 +96,47 @@ class LayerTilesRequest template LayerTilesRequest& onSourceDataLayer(Fun&& callback) { onSourceDataLayer_ = std::forward(callback); return *this; } + /** + * Callback for per-tile load-state changes. + */ + template + LayerTilesRequest& onLayerLoadStateChanged(Fun&& callback) { onLoadStateChanged_ = std::forward(callback); return *this; } + protected: virtual void notifyResult(TileLayer::Ptr); + void notifyLoadState(MapTileKey const& key, TileLayer::LoadState state) const; void setStatus(RequestStatus s); void notifyStatus(); nlohmann::json toJson(); private: + /** Resolve staged tile IDs into concrete stage-qualified tile keys. */ + void prepareResolvedLayer(LayerType layerType, uint32_t stages); + /** * The callback functions which are called when a result tile is available. */ std::function onFeatureLayer_; std::function onSourceDataLayer_; + std::function onLoadStateChanged_; - // So the service can track which tileId index from tiles_ + // So the service can track which tile index from resolvedTileKeys_ // is next in line to be processed. size_t nextTileIndex_ = 0; + // Resolved staged tile keys in scheduling order. + std::vector resolvedTileKeys_; + + // Track which resolved tile keys still need to be scheduled/served. + std::set tileKeysNotStarted_; + // So the requester can track how many results have been received. size_t resultCount_ = 0; // Mutex/condition variable for reading/setting request status. std::mutex statusMutex_; std::condition_variable statusConditionVariable_; - RequestStatus status_ = RequestStatus::Open; + std::atomic status_ = RequestStatus::Open; }; /** @@ -185,6 +222,14 @@ class Service std::string const& layerId, std::optional const& clientHeaders) const; + /** + * Resolve request context (status, layer type, stage count) for one map+layer. + */ + [[nodiscard]] LayerRequestContext resolveLayerRequest( + std::string const& mapId, + std::string const& layerId, + std::optional const& clientHeaders) const; + /** * Get Statistics about the operation of this service. * Returns the following values: @@ -194,6 +239,17 @@ class Service */ [[nodiscard]] nlohmann::json getStatistics() const; + /** + * Variant of getStatistics() with optional expensive analyses: + * - includeCachedFeatureTreeBytes: Parse cached feature tiles and aggregate + * detailed subtree sizes. + * - includeTileSizeDistribution: Build cached feature-tile size histogram + * and percentiles. + */ + [[nodiscard]] nlohmann::json getStatistics( + bool includeCachedFeatureTreeBytes, + bool includeTileSizeDistribution) const; + /** Get the Cache which this service was constructed with. */ [[nodiscard]] Cache::Ptr cache(); diff --git a/libs/service/include/mapget/service/sqlitecache.h b/libs/service/include/mapget/service/sqlitecache.h index 55b42879..85387dcd 100644 --- a/libs/service/include/mapget/service/sqlitecache.h +++ b/libs/service/include/mapget/service/sqlitecache.h @@ -25,6 +25,7 @@ class SQLiteCache : public Cache std::optional getTileLayerBlob(MapTileKey const& k) override; void putTileLayerBlob(MapTileKey const& k, std::string const& v) override; + void forEachTileLayerBlob(const TileBlobVisitor& cb) const override; std::optional getStringPoolBlob(std::string_view const& sourceNodeId) override; void putStringPoolBlob(std::string_view const& sourceNodeId, std::string const& v) override; @@ -53,4 +54,4 @@ class SQLiteCache : public Cache } stmts_; }; -} // namespace mapget \ No newline at end of file +} // namespace mapget diff --git a/libs/service/src/cache.cpp b/libs/service/src/cache.cpp index 0382debc..f44aa464 100644 --- a/libs/service/src/cache.cpp +++ b/libs/service/src/cache.cpp @@ -29,22 +29,38 @@ std::shared_ptr Cache::getStringPool(const std::string_view& nodeId) std::shared_ptr stringPool = std::make_shared(nodeId); auto cachedStringsBlob = getStringPoolBlob(nodeId); if (cachedStringsBlob) { - // Read the string pool from the stream. - std::stringstream stream; - stream << *cachedStringsBlob; + std::vector bytes(cachedStringsBlob->begin(), cachedStringsBlob->end()); // First, read the header and the datasource node id. // These must match what we expect. TileLayerStream::MessageType streamMessageType; uint32_t streamMessageSize; - TileLayerStream::Reader::readMessageHeader(stream, streamMessageType, streamMessageSize); - auto streamDataSourceNodeId = StringPool::readDataSourceNodeId(stream); + size_t headerBytesRead = 0; + if (!TileLayerStream::Reader::readMessageHeader( + std::span(bytes), + streamMessageType, + streamMessageSize, + &headerBytesRead)) { + raise("Stream header error while parsing string pool."); + } + if (headerBytesRead + streamMessageSize > bytes.size()) { + raise("Invalid StringPool message size while parsing cache blob."); + } + + std::vector payload( + bytes.begin() + static_cast(headerBytesRead), + bytes.begin() + static_cast(headerBytesRead + streamMessageSize)); + size_t nodeIdBytesRead = 0; + auto streamDataSourceNodeId = StringPool::readDataSourceNodeId(payload, 0, &nodeIdBytesRead); if (streamMessageType != TileLayerStream::MessageType::StringPool || streamDataSourceNodeId != nodeId) { raise("Stream header error while parsing string pool."); } // Now, actually read the string pool message. - stringPool->read(stream); + auto readResult = stringPool->read(payload, nodeIdBytesRead); + if (!readResult) { + raise(readResult.error().message); + } stringPoolOffsets_.emplace(nodeId, stringPool->highest()); } auto [itNew, _] = stringPoolPerNodeId_.emplace(nodeId, stringPool); @@ -54,8 +70,8 @@ std::shared_ptr Cache::getStringPool(const std::string_view& nodeId) nlohmann::json Cache::getStatistics() const { return { - {"cache-hits", cacheHits_}, - {"cache-misses", cacheMisses_}, + {"cache-hits", cacheHits_.load()}, + {"cache-misses", cacheMisses_.load()}, {"loaded-string-pools", (int64_t)stringPoolOffsets().size()} }; } @@ -65,7 +81,7 @@ Cache::LookupResult Cache::getTileLayer(const MapTileKey& tileKey, DataSourceInf LookupResult result; auto tileBlob = getTileLayerBlob(tileKey); if (!tileBlob) { - ++cacheMisses_; + cacheMisses_.fetch_add(1, std::memory_order_relaxed); return result; } TileLayer::Ptr tile; @@ -85,17 +101,27 @@ Cache::LookupResult Cache::getTileLayer(const MapTileKey& tileKey, DataSourceInf tileReader.read(*tileBlob); if (tile) { + if (auto layerInfo = dataSource.getLayer(tileKey.layerId_); + layerInfo && + layerInfo->type_ == LayerType::Features && + layerInfo->stages_ > 1 && + tileKey.stage_ != UnspecifiedStage) + { + tile->setStage(tileKey.stage_); + } else { + tile->setStage(std::nullopt); + } auto ttl = tile->ttl(); if (ttl && ttl->count() > 0) { auto expiresAt = tile->timestamp() + *ttl; if (std::chrono::system_clock::now() > expiresAt) { log().debug("Cache entry expired for {}", tileKey.toString()); - ++cacheMisses_; + cacheMisses_.fetch_add(1, std::memory_order_relaxed); result.expiredAt = expiresAt; return result; } } - ++cacheHits_; + cacheHits_.fetch_add(1, std::memory_order_relaxed); log().debug("Returned tile from cache: {}", tileKey.tileId_.value_); result.tile = tile; } diff --git a/libs/service/src/config.cpp b/libs/service/src/config.cpp index 7842567c..378f745b 100644 --- a/libs/service/src/config.cpp +++ b/libs/service/src/config.cpp @@ -121,7 +121,7 @@ void DataSourceConfigService::loadConfig() if (sha256 == lastConfigSHA256_) { - log().info("Config file unchanged. No need to reload."); + log().trace("Config file unchanged. No need to reload."); return; } @@ -503,10 +503,6 @@ void DataSourceConfigService::startConfigFileWatchThread() }; auto lastModTime = modTime(path); - if (lastModTime) - loadConfig(); - else - log().debug("The config file does not exist yet."); while (watching_) { std::this_thread::sleep_for(std::chrono::milliseconds(500)); @@ -515,7 +511,6 @@ void DataSourceConfigService::startConfigFileWatchThread() if (currentModTime && !lastModTime) { // The file has appeared since the last check. log().debug("The config file exists now (t={}).", toStr(*currentModTime)); - loadConfig(); } else if (!currentModTime && lastModTime) { log().debug("The config file disappeared."); @@ -527,7 +522,6 @@ void DataSourceConfigService::startConfigFileWatchThread() "The config file changed (t0={} vs t1={}).", toStr(*currentModTime), toStr(*lastModTime)); - loadConfig(); } else log().trace( @@ -536,6 +530,13 @@ void DataSourceConfigService::startConfigFileWatchThread() toStr(*lastModTime)); } + if (currentModTime) { + // Reload by content hash, not only by timestamp. Fast rewrites can + // otherwise be missed if the file system timestamp granularity is + // coarse or the watch thread starts after the rewrite has landed. + loadConfig(); + } + lastModTime = currentModTime; } }); diff --git a/libs/service/src/datasource.cpp b/libs/service/src/datasource.cpp index 2e15d0a6..34944cf9 100644 --- a/libs/service/src/datasource.cpp +++ b/libs/service/src/datasource.cpp @@ -1,14 +1,21 @@ #include "datasource.h" +#include +#include #include #include #include +#include #include "mapget/model/sourcedatalayer.h" #include "mapget/model/info.h" namespace mapget { -TileLayer::Ptr DataSource::get(const MapTileKey& k, Cache::Ptr& cache, DataSourceInfo const& info) +TileLayer::Ptr DataSource::get( + const MapTileKey& k, + Cache::Ptr& cache, + DataSourceInfo const& info, + TileLayer::LoadStateCallback loadStateCallback) { auto layerInfo = info.getLayer(k.layerId_); if (!layerInfo) @@ -25,7 +32,31 @@ TileLayer::Ptr DataSource::get(const MapTileKey& k, Cache::Ptr& cache, DataSourc info.mapId_, info.getLayer(k.layerId_), cache->getStringPool(info.nodeId_)); + if (loadStateCallback) { + tileFeatureLayer->setLoadStateCallback(loadStateCallback); + } + if (layerInfo->stages_ > 1 && k.stage_ != UnspecifiedStage) { + tileFeatureLayer->setStage(k.stage_); + if (k.stage_ > 0) { + auto stageZeroKey = k; + stageZeroKey.stage_ = 0; + auto stageZeroLookup = cache->getTileLayer(stageZeroKey, info); + auto stageZeroLayer = + std::dynamic_pointer_cast(stageZeroLookup.tile); + if (stageZeroLayer) { + std::vector expectedFeatureIds; + expectedFeatureIds.reserve(stageZeroLayer->size()); + for (auto const& feature : *stageZeroLayer) { + expectedFeatureIds.emplace_back(feature->id()->toString()); + } + tileFeatureLayer->setExpectedFeatureSequence(std::move(expectedFeatureIds)); + } + } + } else { + tileFeatureLayer->setStage(std::nullopt); + } fill(tileFeatureLayer); + tileFeatureLayer->validateExpectedFeatureSequenceComplete(); result = tileFeatureLayer; break; } @@ -36,6 +67,9 @@ TileLayer::Ptr DataSource::get(const MapTileKey& k, Cache::Ptr& cache, DataSourc info.mapId_, info.getLayer(k.layerId_), cache->getStringPool(info.nodeId_)); + if (loadStateCallback) { + tileSourceDataLayer->setLoadStateCallback(loadStateCallback); + } fill(tileSourceDataLayer); result = tileSourceDataLayer; break; @@ -47,13 +81,14 @@ TileLayer::Ptr DataSource::get(const MapTileKey& k, Cache::Ptr& cache, DataSourc // Notify the tile how long it took to fill. if (result) { auto duration = std::chrono::steady_clock::now() - start; - result->setInfo("fill-time-ms", std::chrono::duration_cast(duration).count()); + result->setInfo("Load+Convert/Total#ms", std::chrono::duration_cast(duration).count()); } return result; } void DataSource::requireAuthHeaderRegexMatchOption(std::string header, std::regex re) { + std::ranges::transform(header, header.begin(), [](unsigned char c) { return (char)std::tolower(c); }); authHeaderAlternatives_.insert({std::move(header), std::move(re)}); } @@ -64,7 +99,9 @@ bool DataSource::isDataSourceAuthorized( return true; for (auto const& [k, v] : clientHeaders) { - auto authHeaderPatternIt = authHeaderAlternatives_.find(k); + auto key = k; + std::ranges::transform(key, key.begin(), [](unsigned char c) { return (char)std::tolower(c); }); + auto authHeaderPatternIt = authHeaderAlternatives_.find(key); if (authHeaderPatternIt != authHeaderAlternatives_.end()) { if (std::regex_match(v, authHeaderPatternIt->second)) { return true; diff --git a/libs/service/src/memcache.cpp b/libs/service/src/memcache.cpp index d9a49dc8..f3d95a88 100644 --- a/libs/service/src/memcache.cpp +++ b/libs/service/src/memcache.cpp @@ -31,6 +31,14 @@ void MemCache::putTileLayerBlob(const MapTileKey& k, const std::string& v) } } +void MemCache::forEachTileLayerBlob(const TileBlobVisitor& cb) const +{ + std::shared_lock cacheLock(cacheMutex_); + for (const auto& [key, value] : cachedTiles_) { + cb(MapTileKey(key), value); + } +} + nlohmann::json MemCache::getStatistics() const { auto result = Cache::getStatistics(); result["memcache-map-size"] = (int64_t)cachedTiles_.size(); @@ -38,4 +46,4 @@ nlohmann::json MemCache::getStatistics() const { return result; } -} \ No newline at end of file +} diff --git a/libs/service/src/nullcache.cpp b/libs/service/src/nullcache.cpp index 0862a5ea..e1dc99e6 100644 --- a/libs/service/src/nullcache.cpp +++ b/libs/service/src/nullcache.cpp @@ -13,6 +13,11 @@ void NullCache::putTileLayerBlob(MapTileKey const& k, std::string const& v) // Do nothing - no caching } +void NullCache::forEachTileLayerBlob(const TileBlobVisitor& cb) const +{ + // No cached tiles. +} + std::optional NullCache::getStringPoolBlob(std::string_view const& sourceNodeId) { return std::nullopt; @@ -23,4 +28,4 @@ void NullCache::putStringPoolBlob(std::string_view const& sourceNodeId, std::str // Do nothing - no caching } -} \ No newline at end of file +} diff --git a/libs/service/src/service.cpp b/libs/service/src/service.cpp index c8449d8c..f90bf6b1 100644 --- a/libs/service/src/service.cpp +++ b/libs/service/src/service.cpp @@ -11,61 +11,161 @@ #include #include -#include #include #include #include #include #include +#include +#include +#include +#include #include "simfil/types.h" namespace mapget { +namespace { + +/** Ensure that Tile IDs are unique across all stages. */ +std::vector> normalizeTileBuckets(std::vector> buckets) +{ + std::set seenTileIds; + for (auto& bucket : buckets) { + std::vector uniqueTiles; + uniqueTiles.reserve(bucket.size()); + for (auto const& tileId : bucket) { + if (seenTileIds.insert(tileId).second) { + uniqueTiles.push_back(tileId); + } + } + bucket.swap(uniqueTiles); + } + return buckets; +} + +} // namespace + LayerTilesRequest::LayerTilesRequest( std::string mapId, std::string layerId, std::vector tiles) + : LayerTilesRequest( + std::move(mapId), + std::move(layerId), + std::vector>{std::move(tiles)}) +{ + usesStageBuckets_ = false; +} + +LayerTilesRequest::LayerTilesRequest( + std::string mapId, + std::string layerId, + std::vector> tileIdsByNextStage) : mapId_(std::move(mapId)), layerId_(std::move(layerId)), - tiles_(std::move(tiles)) + tileIdsByNextStage_(normalizeTileBuckets(std::move(tileIdsByNextStage))) { - if (tiles_.empty()) { + usesStageBuckets_ = true; + bool hasAnyTileIds = false; + for (auto const& bucket : tileIdsByNextStage_) { + if (!bucket.empty()) { + hasAnyTileIds = true; + break; + } + } + + if (!hasAnyTileIds) { // An empty request is always set to success, but the client/service // is responsible for triggering notifyStatus() in that case. status_ = RequestStatus::Success; } } +void LayerTilesRequest::prepareResolvedLayer(LayerType layerType, uint32_t stages) +{ + nextTileIndex_ = 0; + resultCount_ = 0; + resolvedTileKeys_.clear(); + tileKeysNotStarted_.clear(); + + const auto normalizedStages = std::max(1U, stages); + + if (!usesStageBuckets_) { + if (!tileIdsByNextStage_.empty()) { + for (auto const& tileId : tileIdsByNextStage_.front()) { + MapTileKey key( + layerType, + mapId_, + layerId_, + tileId, + UnspecifiedStage); + if (tileKeysNotStarted_.insert(key).second) { + resolvedTileKeys_.push_back(std::move(key)); + } + } + } + status_ = resolvedTileKeys_.empty() ? RequestStatus::Success : RequestStatus::Open; + return; + } + + for (uint32_t stage = 0; stage < normalizedStages; ++stage) { + // For all tiles in bucket 0, we need to enqueue N stages. + // For tiles in bucket 1, we need to enqueue N-1 stages. + // etc. + for (size_t bucketIndex = 0; bucketIndex < tileIdsByNextStage_.size(); ++bucketIndex) { + const auto nextMissingStage = static_cast(bucketIndex); + if (nextMissingStage > stage || nextMissingStage >= normalizedStages) { + continue; + } + for (auto const& tileId : tileIdsByNextStage_[bucketIndex]) { + MapTileKey key(layerType, mapId_, layerId_, tileId, stage); + if (tileKeysNotStarted_.insert(key).second) { + resolvedTileKeys_.push_back(std::move(key)); + } + } + } + } + + status_ = resolvedTileKeys_.empty() ? RequestStatus::Success : RequestStatus::Open; +} + void LayerTilesRequest::notifyResult(TileLayer::Ptr r) { + if (isDone()) { + return; + } + const auto type = r->layerInfo()->type_; switch (type) { - case mapget::LayerType::Features: + case LayerType::Features: if (onFeatureLayer_) - onFeatureLayer_(std::move(std::static_pointer_cast(r))); + onFeatureLayer_(std::move(std::static_pointer_cast(r))); break; - case mapget::LayerType::SourceData: + case LayerType::SourceData: if (onSourceDataLayer_) - onSourceDataLayer_(std::move(std::static_pointer_cast(r))); + onSourceDataLayer_(std::move(std::static_pointer_cast(r))); break; default: - mapget::log().error(fmt::format("Unhandled layer type {}, no matching callback!", static_cast(type))); + log().error(fmt::format("Unhandled layer type {}, no matching callback!", static_cast(type))); break; } ++resultCount_; - if (resultCount_ == tiles_.size()) { + if (resultCount_ == resolvedTileKeys_.size()) { setStatus(RequestStatus::Success); } } +void LayerTilesRequest::notifyLoadState(MapTileKey const& key, TileLayer::LoadState state) const { + if (onLoadStateChanged_) { + onLoadStateChanged_(key, state); + } +} + void LayerTilesRequest::setStatus(RequestStatus s) { - { - std::unique_lock statusLock(statusMutex_); - this->status_ = s; - } + this->status_ = s; notifyStatus(); } @@ -90,14 +190,32 @@ void LayerTilesRequest::wait() nlohmann::json LayerTilesRequest::toJson() { - auto tileIds = nlohmann::json::array(); - for (auto const& tid : tiles_) - tileIds.emplace_back(tid.value_); - return nlohmann::json::object({ + auto requestJson = nlohmann::json::object({ {"mapId", mapId_}, - {"layerId", layerId_}, - {"tileIds", tileIds} + {"layerId", layerId_} }); + + if (!usesStageBuckets_) { + auto tileIds = nlohmann::json::array(); + if (!tileIdsByNextStage_.empty()) { + for (auto const& tileId : tileIdsByNextStage_.front()) { + tileIds.emplace_back(tileId.value_); + } + } + requestJson["tileIds"] = std::move(tileIds); + return requestJson; + } + + auto tileIdsByNextStage = nlohmann::json::array(); + for (auto const& bucket : tileIdsByNextStage_) { + auto tileIds = nlohmann::json::array(); + for (auto const& tileId : bucket) { + tileIds.emplace_back(tileId.value_); + } + tileIdsByNextStage.push_back(std::move(tileIds)); + } + requestJson["tileIdsByNextStage"] = std::move(tileIdsByNextStage); + return requestJson; } RequestStatus LayerTilesRequest::getStatus() @@ -112,13 +230,16 @@ bool LayerTilesRequest::isDone() struct Service::Controller { + virtual ~Controller() = default; + struct Job { MapTileKey tileKey; - LayerTilesRequest::Ptr request; + std::vector waitingRequests; std::optional cacheExpiredAt; + TileLayer::LoadState loadStatus = TileLayer::LoadState::LoadingQueued; }; - std::set jobsInProgress_; // Set of jobs currently in progress + std::map> jobsInProgress_; // Jobs currently in progress + interested requests Cache::Ptr cache_; // The cache for the service std::optional defaultTtl_; // Default TTL applied when datasource does not override std::list requests_; // List of requests currently being processed @@ -127,80 +248,158 @@ struct Service::Controller explicit Controller(Cache::Ptr cache, std::optional defaultTtl) : cache_(std::move(cache)), - defaultTtl_(std::move(defaultTtl)) + defaultTtl_(defaultTtl) { if (!cache_) raise("Cache must not be null!"); } - std::optional nextJob(DataSourceInfo const& i) + struct Candidate { + std::list::const_iterator requestIt_; + LayerTilesRequest::Ptr request_; + MapTileKey tileKey_; + size_t nextTileIndex_ = 0; + }; + + static bool requestMatchesDataSource( + LayerTilesRequest::Ptr const& request, + DataSourceInfo const& info) { - // Workers call the nextJob function when they are free. - // Note: For thread safety, jobsMutex_ must be held - // when calling this function. - - std::optional result; - - // Return next job, if available. - bool cachedTilesServed = false; - do { - cachedTilesServed = false; - for (auto reqIt = requests_.begin(); reqIt != requests_.end(); ++reqIt) { - auto const& request = *reqIt; - auto layerIt = i.layers_.find(request->layerId_); - - // Are there tiles left to be processed in the request? - if (request->mapId_ == i.mapId_ && layerIt != i.layers_.end()) { - if (request->nextTileIndex_ >= request->tiles_.size()) { - continue; - } + if (!request || request->isDone()) + return false; + if (request->mapId_ != info.mapId_) + return false; + return info.layers_.find(request->layerId_) != info.layers_.end(); + } - // Create result wrapper object. - auto tileId = request->tiles_[request->nextTileIndex_++]; - result = Job{MapTileKey(), request, std::nullopt}; - result->tileKey.layer_ = layerIt->second->type_; - result->tileKey.mapId_ = request->mapId_; - result->tileKey.layerId_ = request->layerId_; - result->tileKey.tileId_ = tileId; - - // Cache lookup. - auto cachedResult = cache_->getTileLayer(result->tileKey, i); - if (cachedResult.tile) { - log().debug("Serving cached tile: {}", result->tileKey.toString()); - request->notifyResult(cachedResult.tile); - result.reset(); - cachedTilesServed = true; - continue; - } - result->cacheExpiredAt = cachedResult.expiredAt; - - if (jobsInProgress_.find(result->tileKey) != jobsInProgress_.end()) { - // Don't work on something that is already being worked on. - // Wait for the work to finish, then send the (hopefully cached) result. - log().debug("Delaying tile with job in progress: {}", - result->tileKey.toString()); - --request->nextTileIndex_; - result.reset(); - continue; - } + [[nodiscard]] std::optional nextPendingTileKey(LayerTilesRequest const& request) const + { + auto keyIndex = request.nextTileIndex_; + while (keyIndex < request.resolvedTileKeys_.size()) { + auto const& candidate = request.resolvedTileKeys_[keyIndex]; + if (request.tileKeysNotStarted_.find(candidate) != request.tileKeysNotStarted_.end()) { + return keyIndex; + } + ++keyIndex; + } + return {}; + } - // Enter into the jobs-in-progress set. - jobsInProgress_.insert(result->tileKey); + [[nodiscard]] std::optional bestCandidate(DataSourceInfo const& info) const + { + std::optional best; + + for (auto reqIt = requests_.begin(); reqIt != requests_.end(); ++reqIt) { + auto const& request = *reqIt; + if (!requestMatchesDataSource(request, info)) + continue; + + auto pendingIndex = nextPendingTileKey(*request); + if (!pendingIndex) + continue; + auto pendingKey = request->resolvedTileKeys_[*pendingIndex]; + + if (!best || pendingKey.stage_ < best->tileKey_.stage_) { + best = Candidate{ + .requestIt_ = reqIt, + .request_ = request, + .tileKey_ = pendingKey, + .nextTileIndex_ = *pendingIndex, + }; + } + } - // Move this request to the end of the list, so others gain priority. - requests_.splice(requests_.end(), requests_, reqIt); + return best; + } - log().debug("Working on tile: {}", result->tileKey.toString()); - break; - } - } + void attachMatchingRequests( + LayerTilesRequest::Ptr const& selectedRequest, + MapTileKey const& tileKey, + std::vector& waitingRequests) const + { + for (auto const& otherRequest : requests_) { + if (!otherRequest || otherRequest == selectedRequest) + continue; + if (otherRequest->mapId_ != selectedRequest->mapId_ || otherRequest->layerId_ != selectedRequest->layerId_) + continue; + if (otherRequest->tileKeysNotStarted_.erase(tileKey) == 0) + continue; + waitingRequests.push_back(otherRequest); } - while (cachedTilesServed && !result); + } - // Clean up done requests. - requests_.remove_if([](auto&& r) {return r->nextTileIndex_ == r->tiles_.size(); }); + [[nodiscard]] std::shared_ptr dispatchCandidate( + Candidate const& candidate, + DataSourceInfo const& info) + { + // Commit this candidate as "consumed" for the request before dispatching it. + // The tile is then either satisfied immediately (cache/in-progress) or started. + auto const& request = candidate.request_; + request->nextTileIndex_ = candidate.nextTileIndex_; + request->tileKeysNotStarted_.erase(candidate.tileKey_); + + auto cachedResult = cache_->getTileLayer(candidate.tileKey_, info); + if (cachedResult.tile) { + log().debug("Serving cached tile: {}", candidate.tileKey_.toString()); + request->notifyResult(cachedResult.tile); + return nullptr; + } - return result; + if (auto inProgress = jobsInProgress_.find(candidate.tileKey_); + inProgress != jobsInProgress_.end()) + { + log().debug("Joining tile with job in progress: {}", candidate.tileKey_.toString()); + request->notifyLoadState(candidate.tileKey_, inProgress->second->loadStatus); + inProgress->second->waitingRequests.push_back(request); + return nullptr; + } + + auto startedJob = std::make_shared(Job{candidate.tileKey_, {request}, cachedResult.expiredAt}); + attachMatchingRequests(request, candidate.tileKey_, startedJob->waitingRequests); + jobsInProgress_.emplace(startedJob->tileKey, startedJob); + + // Move this request to the end of the list, so others gain priority. + requests_.splice(requests_.end(), requests_, candidate.requestIt_); + log().debug("Working on tile: {}", startedJob->tileKey.toString()); + + return startedJob; + } + + void removeCompletedRequests() + { + requests_.remove_if([](auto const& request) { + return !request || request->tileKeysNotStarted_.empty(); + }); + } + + std::shared_ptr nextJob(DataSourceInfo const& i, std::unique_lock& lock) + { + // Workers call the nextJob function when they are free. + // Note: For thread safety, jobsMutex_ must be held + // when calling this function. The lock may be released/re-acquired + // between sweeps to allow external updates. + + while (true) { + // 1) Pick highest-priority pending tile for this datasource worker. + auto candidate = bestCandidate(i); + if (!candidate) + break; + + // 2) Dispatch that tile: cache hit, join running job, or start backend work. + if (auto dispatchResult = dispatchCandidate(*candidate, i)) { + removeCompletedRequests(); + return dispatchResult; + } + + // 3) No backend job started yet, so yield lock and sweep again. + // Cached/in-progress work was handled without starting a new backend job. + // Let external threads update requests_ before the next sweep. + lock.unlock(); + lock.lock(); + } + + removeCompletedRequests(); + return {}; } virtual void loadAddOnTiles(TileFeatureLayer::Ptr const& baseTile, DataSource& baseDataSource) = 0; @@ -229,7 +428,7 @@ struct Service::Worker bool work() { - std::optional nextJob; + std::shared_ptr nextJob; { std::unique_lock lock(controller_.jobsMutex_); @@ -244,8 +443,8 @@ struct Service::Worker // is removed. All worker instances are expected to terminate. return true; } - nextJob = controller_.nextJob(info_); - return nextJob.has_value(); + nextJob = controller_.nextJob(info_, lock); + return !!nextJob; }); } @@ -260,7 +459,28 @@ struct Service::Worker dataSource_->onCacheExpired(job.tileKey, *job.cacheExpiredAt); } - auto layer = dataSource_->get(job.tileKey, controller_.cache_, info_); + auto notifyWaitingRequests = [&](TileLayer::LoadState state) { + std::vector waiting; + { + std::unique_lock lock(controller_.jobsMutex_); + job.loadStatus = state; + waiting = job.waitingRequests; + } + for (auto const& req : waiting) { + if (req) { + req->notifyLoadState(job.tileKey, state); + } + } + }; + + notifyWaitingRequests(TileLayer::LoadState::BackendFetching); + auto layer = dataSource_->get( + job.tileKey, + controller_.cache_, + info_, + [¬ifyWaitingRequests](TileLayer::LoadState state) { + notifyWaitingRequests(state); + }); if (!layer) raise("DataSource::get() returned null."); @@ -282,14 +502,20 @@ struct Service::Worker controller_.cache_->putTileLayer(layer); + std::vector notifyRequests; { std::unique_lock lock(controller_.jobsMutex_); controller_.jobsInProgress_.erase(job.tileKey); - job.request->notifyResult(layer); - // As we entered a tile into the cache, notify other workers - // that this tile can be served. - controller_.jobsAvailable_.notify_all(); + notifyRequests = job.waitingRequests; } + for (auto const& req : notifyRequests) { + if (req) { + req->notifyResult(layer); + } + } + // As we entered a tile into the cache, notify other workers + // that this tile can be served. + controller_.jobsAvailable_.notify_all(); } catch (std::exception& e) { log().error("Could not load tile {}: {}", @@ -307,6 +533,7 @@ struct Service::Impl : public Service::Controller std::map> dataSourceWorkers_; std::list addOnDataSources_; + mutable std::shared_mutex dataSourcesMutex_; std::unique_ptr configSubscription_; std::vector dataSourcesFromConfig_; @@ -314,26 +541,32 @@ struct Service::Impl : public Service::Controller Cache::Ptr cache, bool useDataSourceConfig, std::optional defaultTtl) - : Controller(std::move(cache), std::move(defaultTtl)) + : Controller(std::move(cache), defaultTtl) { if (!useDataSourceConfig) return; configSubscription_ = DataSourceConfigService::get().subscribe( [this](auto&& dataSourceConfigNodes) { + std::vector previousDataSources; + { + std::unique_lock lock(dataSourcesMutex_); + previousDataSources.swap(dataSourcesFromConfig_); + } + // Remove previous datasources. log().info("Config changed. Removing previous datasources."); - for (auto const& datasource : dataSourcesFromConfig_) { + for (auto const& datasource : previousDataSources) { removeDataSource(datasource); } - dataSourcesFromConfig_.clear(); // Add datasources present in the new configuration. auto index = 0; + std::vector configuredDataSources; for (const auto& configNode : dataSourceConfigNodes) { if (auto dataSource = DataSourceConfigService::get().makeDataSource(configNode)) { addDataSource(dataSource); - dataSourcesFromConfig_.push_back(dataSource); + configuredDataSources.push_back(dataSource); } else { log().error( @@ -341,33 +574,45 @@ struct Service::Impl : public Service::Controller } ++index; } + + std::unique_lock lock(dataSourcesMutex_); + dataSourcesFromConfig_ = std::move(configuredDataSources); }); } - ~Impl() + ~Impl() override { // Ensure that no new datasources are added while we are cleaning up. configSubscription_.reset(); - for (auto& dataSourceAndWorkers : dataSourceWorkers_) { - for (auto& worker : dataSourceAndWorkers.second) { - worker->shouldTerminate_ = true; + std::vector workersToJoin; + { + std::unique_lock lock(dataSourcesMutex_); + for (auto& [_, workers] : dataSourceWorkers_) { + for (auto& worker : workers) { + worker->shouldTerminate_ = true; + workersToJoin.push_back(worker); + } } + dataSourceWorkers_.clear(); + dataSourceInfo_.clear(); + addOnDataSources_.clear(); + dataSourcesFromConfig_.clear(); } // Wake up all workers to check shouldTerminate_. jobsAvailable_.notify_all(); - for (auto& dataSourceAndWorkers : dataSourceWorkers_) { - for (auto& worker : dataSourceAndWorkers.second) { - if (worker->thread_.joinable()) { - worker->thread_.join(); - } + for (auto& worker : workersToJoin) { + if (worker->thread_.joinable()) { + worker->thread_.join(); } } } void addDataSource(DataSource::Ptr const& dataSource) { + std::unique_lock lock(dataSourcesMutex_); + if (dataSource->info().nodeId_.empty()) { // Unique node IDs are required for the string pool offsets. raise("Tried to create service worker for an unnamed node!"); @@ -403,27 +648,29 @@ struct Service::Impl : public Service::Controller void removeDataSource(DataSource::Ptr const& dataSource) { - dataSourceInfo_.erase(dataSource); - addOnDataSources_.remove(dataSource); - - auto workers = dataSourceWorkers_.find(dataSource); - if (workers != dataSourceWorkers_.end()) + std::vector workersToJoin; { - // Signal each worker thread to terminate. + std::unique_lock lock(dataSourcesMutex_); + dataSourceInfo_.erase(dataSource); + addOnDataSources_.remove(dataSource); + + auto workers = dataSourceWorkers_.find(dataSource); + if (workers == dataSourceWorkers_.end()) { + return; + } for (auto& worker : workers->second) { worker->shouldTerminate_ = true; + workersToJoin.push_back(worker); } - jobsAvailable_.notify_all(); + dataSourceWorkers_.erase(workers); + } - // Wait for each worker thread to terminate. - for (auto& worker : workers->second) { - if (worker->thread_.joinable()) { - worker->thread_.join(); - } - } + jobsAvailable_.notify_all(); - // Remove workers. - dataSourceWorkers_.erase(workers); + for (auto& worker : workersToJoin) { + if (worker->thread_.joinable()) { + worker->thread_.join(); + } } } @@ -447,47 +694,55 @@ struct Service::Impl : public Service::Controller void abortRequest(LayerTilesRequest::Ptr const& r) { - std::unique_lock lock(jobsMutex_); - // Remove the request from the list of requests. - auto numRemoved = requests_.remove_if([r](auto&& request) { return r == request; }); - // Clear its jobs to mark it as done. - if (numRemoved) { - r->setStatus(RequestStatus::Aborted); + // Mark the request as aborted. + r->setStatus(RequestStatus::Aborted); + + // Remove the request from the list of requests (needs lock). + { + std::unique_lock lock(jobsMutex_); + requests_.remove_if([r](auto&& request) { return r == request; }); } } std::vector getDataSourceInfos(std::optional const& clientHeaders) { std::vector infos; + std::shared_lock lock(dataSourcesMutex_); infos.reserve(dataSourceInfo_.size()); for (const auto& [dataSource, info] : dataSourceInfo_) { if (!clientHeaders || dataSource->isDataSourceAuthorized(*clientHeaders)) { infos.push_back(info); } } - return std::move(infos); + return infos; } void loadAddOnTiles(TileFeatureLayer::Ptr const& baseTile, DataSource& baseDataSource) override { - for (auto const& auxDataSource : addOnDataSources_) { + std::vector addOnDataSources; + { + std::shared_lock lock(dataSourcesMutex_); + addOnDataSources.assign(addOnDataSources_.begin(), addOnDataSources_.end()); + } + + for (auto const& auxDataSource : addOnDataSources) { if (auxDataSource->info().mapId_ == baseTile->mapId()) { auto auxTile = [&]() -> TileFeatureLayer::Ptr { - auto auxTile = auxDataSource->get(baseTile->id(), cache_, auxDataSource->info()); - if (!auxTile) { + auto result = auxDataSource->get(baseTile->id(), cache_, auxDataSource->info()); + if (!result) { log().warn("auxDataSource returned null for {}", baseTile->id().toString()); return {}; } - if (auxTile->error()) { - log().warn("Error while fetching addon tile {}: {}", baseTile->id().toString(), *auxTile->error()); + if (result->error()) { + log().warn("Error while fetching addon tile {}: {}", baseTile->id().toString(), *result->error()); return {}; } - if (auxTile->layerInfo()->type_ != LayerType::Features) { + if (result->layerInfo()->type_ != LayerType::Features) { log().warn("Addon tile is not a feature layer"); return {}; } - return std::static_pointer_cast(auxTile); + return std::static_pointer_cast(result); }(); if (!auxTile) { @@ -502,7 +757,7 @@ struct Service::Impl : public Service::Controller // artificial node id. auto auxBaseNodeId = baseTile->nodeId() + "|" + auxTile->nodeId(); auto auxBaseStringPool = cache_->getStringPool(auxBaseNodeId); - baseTile->setStrings(auxBaseStringPool); + (void) baseTile->setStrings(auxBaseStringPool); baseTile->setNodeId(auxBaseNodeId); // Adopt new attributes, features and relations for the base feature @@ -561,7 +816,7 @@ struct Service::Impl : public Service::Controller }; Service::Service(Cache::Ptr cache, bool useDataSourceConfig, std::optional defaultTtl) - : impl_(std::make_unique(std::move(cache), useDataSourceConfig, std::move(defaultTtl))) + : impl_(std::make_unique(std::move(cache), useDataSourceConfig, defaultTtl)) { } @@ -581,8 +836,8 @@ bool Service::request(std::vector const& requests, std:: { bool dataSourcesAvailable = true; for (const auto& r : requests) { - switch (hasLayerAndCanAccess(r->mapId_, r->layerId_, clientHeaders)) - { + auto context = resolveLayerRequest(r->mapId_, r->layerId_, clientHeaders); + switch (context.status_) { case RequestStatus::NoDataSource: dataSourcesAvailable = false; log().debug("No data source can provide requested map and layer: {}::{}", @@ -597,8 +852,13 @@ bool Service::request(std::vector const& requests, std:: r->layerId_); r->setStatus(RequestStatus::Unauthorized); break; - default: {} + default: { // Nothing to do. + r->prepareResolvedLayer(context.layerType_, context.stages_); + if (r->isDone()) { + r->notifyStatus(); + } + } } } @@ -611,7 +871,9 @@ bool Service::request(std::vector const& requests, std:: } } else { - impl_->addRequest(r); + if (!r->isDone()) { + impl_->addRequest(r); + } } } return dataSourcesAvailable; @@ -620,7 +882,13 @@ bool Service::request(std::vector const& requests, std:: std::vector Service::locate(LocateRequest const& req) { std::vector results; - for (auto const& [ds, info] : impl_->dataSourceInfo_) + std::vector> dataSources; + { + std::shared_lock lock(impl_->dataSourcesMutex_); + dataSources.assign(impl_->dataSourceInfo_.begin(), impl_->dataSourceInfo_.end()); + } + + for (auto const& [ds, info] : dataSources) if (info.mapId_ == req.mapId_ && !info.isAddOn_) { for (auto const& location : ds->locate(req)) results.emplace_back(location); @@ -648,35 +916,298 @@ RequestStatus Service::hasLayerAndCanAccess( std::string const& layerId, std::optional const& clientHeaders) const { - std::unique_lock lock(impl_->jobsMutex_); - // Check that one of the data sources can fulfill the request. - for (auto& [ds, info] : impl_->dataSourceInfo_) { + return resolveLayerRequest(mapId, layerId, clientHeaders).status_; +} + +LayerRequestContext Service::resolveLayerRequest( + std::string const& mapId, + std::string const& layerId, + std::optional const& clientHeaders) const +{ + LayerRequestContext result; + + std::shared_lock lock(impl_->dataSourcesMutex_); + bool layerExists = false; + bool unauthorized = false; + bool foundAuthorizedLayer = false; + for (auto const& [ds, info] : impl_->dataSourceInfo_) { if (mapId != info.mapId_) continue; - if (info.layers_.find(layerId) != info.layers_.end()) { - if (clientHeaders && !ds->isDataSourceAuthorized(*clientHeaders)) { - return RequestStatus::Unauthorized; + + auto layerIt = info.layers_.find(layerId); + if (layerIt == info.layers_.end()) + continue; + + layerExists = true; + if (clientHeaders && !ds->isDataSourceAuthorized(*clientHeaders)) { + unauthorized = true; + continue; + } + + if (!foundAuthorizedLayer) { + result.status_ = RequestStatus::Success; + result.layerType_ = layerIt->second->type_; + result.stages_ = std::max(1U, layerIt->second->stages_); + foundAuthorizedLayer = true; + continue; + } + + if (result.layerType_ != layerIt->second->type_) { + log().warn( + "Conflicting layer types for {}::{} across data sources ({} vs {}).", + mapId, + layerId, + static_cast(result.layerType_), + static_cast(layerIt->second->type_)); + } + result.stages_ = std::max( + result.stages_, + std::max(1U, layerIt->second->stages_)); + } + + if (foundAuthorizedLayer) { + return result; + } + + if (layerExists && unauthorized) { + result.status_ = RequestStatus::Unauthorized; + } + else { + result.status_ = RequestStatus::NoDataSource; + } + return result; +} + +namespace +{ + +[[nodiscard]] nlohmann::json buildTileSizeDistribution(std::vector tileSizes) +{ + if (tileSizes.empty()) + return nlohmann::json::object(); + + std::sort(tileSizes.begin(), tileSizes.end()); + + const int64_t totalBytes = std::accumulate(tileSizes.begin(), tileSizes.end(), int64_t{0}); + const int64_t tileCount = static_cast(tileSizes.size()); + const int64_t meanBytes = totalBytes / tileCount; + + struct HistogramBin { + int64_t upperBound; + const char* label; + int64_t count = 0; + }; + std::vector bins = { + {16 * 1024, "<=16 KiB"}, + {32 * 1024, "16-32 KiB"}, + {64 * 1024, "32-64 KiB"}, + {128 * 1024, "64-128 KiB"}, + {256 * 1024, "128-256 KiB"}, + {512 * 1024, "256-512 KiB"}, + {1024 * 1024, "512 KiB-1 MiB"}, + {2 * 1024 * 1024, "1-2 MiB"}, + {4 * 1024 * 1024, "2-4 MiB"}, + }; + int64_t overflowCount = 0; + + for (const auto bytes : tileSizes) { + bool assigned = false; + for (auto& bin : bins) { + if (bytes <= bin.upperBound) { + ++bin.count; + assigned = true; + break; } - return RequestStatus::Success; + } + if (!assigned) { + ++overflowCount; } } - return RequestStatus::NoDataSource; + + auto histogram = nlohmann::json::array(); + for (const auto& bin : bins) { + histogram.push_back(nlohmann::json::object({ + {"label", bin.label}, + {"count", bin.count}, + })); + } + histogram.push_back(nlohmann::json::object({ + {"label", ">4 MiB"}, + {"count", overflowCount}, + })); + + return nlohmann::json::object({ + {"tile-count", tileCount}, + {"total-tile-bytes", totalBytes}, + {"min-bytes", tileSizes.front()}, + {"max-bytes", tileSizes.back()}, + {"mean-bytes", meanBytes}, + {"histogram", std::move(histogram)}, + }); } +} // namespace + nlohmann::json Service::getStatistics() const { + // Preserve old behavior for existing callers. + return getStatistics(true, false); +} + +nlohmann::json Service::getStatistics(bool includeCachedFeatureTreeBytes, bool includeTileSizeDistribution) const +{ + std::vector> dataSources; + { + std::shared_lock lock(impl_->dataSourcesMutex_); + dataSources.reserve(impl_->dataSourceInfo_.size()); + for (auto const& [dataSource, info] : impl_->dataSourceInfo_) { + auto workersIt = impl_->dataSourceWorkers_.find(dataSource); + auto workerCount = workersIt == impl_->dataSourceWorkers_.end() + ? size_t{0} + : workersIt->second.size(); + dataSources.emplace_back(info, workerCount); + } + } + auto datasources = nlohmann::json::array(); - for (auto const& [dataSource, info] : impl_->dataSourceInfo_) { + for (auto const& [info, workerCount] : dataSources) { datasources.push_back({ {"name", info.mapId_}, - {"workers", impl_->dataSourceWorkers_[dataSource].size()} + {"workers", workerCount} }); } - return { + size_t activeRequests = 0; + { + std::unique_lock lock(impl_->jobsMutex_); + activeRequests = impl_->requests_.size(); + } + auto result = nlohmann::json{ {"datasources", datasources}, - {"active-requests", impl_->requests_.size()} + {"active-requests", activeRequests} }; + + if (!includeCachedFeatureTreeBytes && !includeTileSizeDistribution) { + return result; + } + + auto featureLayerTotals = nlohmann::json::object(); + auto modelPoolTotals = nlohmann::json::object(); + auto geometryUsageTotals = nlohmann::json::object(); + auto validityUsageTotals = nlohmann::json::object(); + auto arrayArenaSingletonTotals = nlohmann::json::object(); + int64_t parsedTiles = 0; + int64_t totalTileBytes = 0; + int64_t parseErrors = 0; + std::vector tileSizes; + + auto addTotals = [](nlohmann::json& totals, const nlohmann::json& stats, const auto& self) -> void { + for (const auto& [key, value] : stats.items()) { + if (value.is_number_integer()) { + totals[key] = totals.template value(key, 0) + value.template get(); + } else if (value.is_number_float()) { + totals[key] = totals.template value(key, .0) + value.template get(); + } else if (value.is_object()) { + if (!totals.contains(key) || !totals[key].is_object()) { + totals[key] = nlohmann::json::object(); + } + self(totals[key], value, self); + } + } + }; + + std::unique_ptr tileReader; + if (includeCachedFeatureTreeBytes) { + auto layerInfoByMap = + std::unordered_map>>{}; + std::vector infos; + { + std::shared_lock lock(impl_->dataSourcesMutex_); + infos.reserve(impl_->dataSourceInfo_.size()); + for (auto const& [_, info] : impl_->dataSourceInfo_) { + infos.push_back(info); + } + } + for (auto const& info : infos) { + auto& layers = layerInfoByMap[info.mapId_]; + for (auto const& [layerId, layerInfo] : info.layers_) { + layers[layerId] = layerInfo; + } + } + + auto resolveLayerInfo = [layerInfoByMap](std::string_view mapId, std::string_view layerId) + -> std::shared_ptr { + auto mapIt = layerInfoByMap.find(std::string(mapId)); + if (mapIt == layerInfoByMap.end()) + return std::make_shared(); + auto layerIt = mapIt->second.find(std::string(layerId)); + if (layerIt == mapIt->second.end()) { + auto fallback = std::make_shared(); + fallback->layerId_ = std::string(layerId); + return fallback; + } + return layerIt->second; + }; + + tileReader = std::make_unique( + resolveLayerInfo, + [&](auto&& parsedLayer) { + auto tile = std::dynamic_pointer_cast(parsedLayer); + if (!tile) { + ++parseErrors; + return; + } + auto sizeStats = tile->serializationSizeStats(); + addTotals(featureLayerTotals, sizeStats["feature-layer"], addTotals); + addTotals(modelPoolTotals, sizeStats["model-pool"], addTotals); + addTotals(geometryUsageTotals, sizeStats["geometry-usage"], addTotals); + addTotals(validityUsageTotals, sizeStats["validity-usage"], addTotals); + addTotals(arrayArenaSingletonTotals, sizeStats["array-arena-singletons"], addTotals); + }, + impl_->cache_); + } + + impl_->cache_->forEachTileLayerBlob([&](const MapTileKey& key, const std::string& blob) { + if (key.layer_ != LayerType::Features) + return; + + const int64_t tileBytes = static_cast(blob.size()); + ++parsedTiles; + totalTileBytes += tileBytes; + + if (includeTileSizeDistribution) { + tileSizes.push_back(tileBytes); + } + + if (!includeCachedFeatureTreeBytes) { + return; + } + + try { + tileReader->read(blob); + } catch (const std::exception&) { + ++parseErrors; + } + }); + + if (includeCachedFeatureTreeBytes && parsedTiles > 0) { + result["cached-feature-tree-bytes"] = nlohmann::json{ + {"tile-count", parsedTiles}, + {"total-tile-bytes", totalTileBytes}, + {"parse-errors", parseErrors}, + {"feature-layer", featureLayerTotals}, + {"model-pool", modelPoolTotals}, + {"geometry-usage", geometryUsageTotals}, + {"validity-usage", validityUsageTotals}, + {"array-arena-singletons", arrayArenaSingletonTotals} + }; + } + + if (includeTileSizeDistribution && !tileSizes.empty()) { + result["cached-feature-tile-size-distribution"] = buildTileSizeDistribution(std::move(tileSizes)); + } + + return result; } } // namespace mapget diff --git a/libs/service/src/sqlitecache.cpp b/libs/service/src/sqlitecache.cpp index 74b2b308..5ed2d8fc 100644 --- a/libs/service/src/sqlitecache.cpp +++ b/libs/service/src/sqlitecache.cpp @@ -217,11 +217,11 @@ std::optional SQLiteCache::getTileLayerBlob(MapTileKey const& k) std::string result(static_cast(data), size); log().trace(fmt::format("Key: {} | Layer size: {}", k.toString(), size)); - log().debug("Cache hits: {}, cache misses: {}", cacheHits_, cacheMisses_); + log().debug("Cache hits: {}, cache misses: {}", cacheHits_.load(), cacheMisses_.load()); return result; } else if (rc == SQLITE_DONE) { - log().debug("Cache hits: {}, cache misses: {}", cacheHits_, cacheMisses_); + log().debug("Cache hits: {}, cache misses: {}", cacheHits_.load(), cacheMisses_.load()); return {}; } else { @@ -245,7 +245,7 @@ void SQLiteCache::putTileLayerBlob(MapTileKey const& k, std::string const& v) raise(fmt::format("Error writing to database: {}", sqlite3_errmsg(db_))); } - log().debug("Cache hits: {}, cache misses: {}", cacheHits_, cacheMisses_); + log().debug("Cache hits: {}, cache misses: {}", cacheHits_.load(), cacheMisses_.load()); // Check if we need to evict old tiles if (maxTileCount_ > 0) { @@ -259,6 +259,32 @@ void SQLiteCache::putTileLayerBlob(MapTileKey const& k, std::string const& v) } } +void SQLiteCache::forEachTileLayerBlob(const TileBlobVisitor& cb) const +{ + std::lock_guard lock(dbMutex_); + + sqlite3_stmt* stmt = nullptr; + int rc = sqlite3_prepare_v2(db_, "SELECT key, data FROM tiles", -1, &stmt, nullptr); + if (rc != SQLITE_OK) { + raise(fmt::format("Failed to prepare tile iteration statement: {}", sqlite3_errmsg(db_))); + } + + while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) { + const char* key = reinterpret_cast(sqlite3_column_text(stmt, 0)); + const void* data = sqlite3_column_blob(stmt, 1); + int size = sqlite3_column_bytes(stmt, 1); + if (key && data && size >= 0) { + cb(MapTileKey(key), std::string(static_cast(data), size)); + } + } + if (rc != SQLITE_DONE) { + sqlite3_finalize(stmt); + raise(fmt::format("Error iterating cached tiles: {}", sqlite3_errmsg(db_))); + } + + sqlite3_finalize(stmt); +} + void SQLiteCache::cleanupOldestTiles() { // Delete the oldest tile @@ -315,4 +341,4 @@ void SQLiteCache::putStringPoolBlob(std::string_view const& sourceNodeId, std::s } } -} // namespace mapget \ No newline at end of file +} // namespace mapget diff --git a/test/integration/CMakeLists.txt b/test/integration/CMakeLists.txt index 6ce5ba32..1af0caf9 100644 --- a/test/integration/CMakeLists.txt +++ b/test/integration/CMakeLists.txt @@ -1,9 +1,7 @@ project(test.mapget.integration CXX) -# TODO: Figure out a way to do this without assuming free ports. -set (MAPGET_SERVER_PORT 61852) -set (DATASOURCE_CPP_PORT 61853) -set (DATASOURCE_PY_PORT 61854) +set(MAPGET_PICK_PORTS_PY "${CMAKE_CURRENT_LIST_DIR}/detect-ports-and-prepare-config-yaml.py") +set(MAPGET_RUN_WITH_PORTS "${CMAKE_CURRENT_LIST_DIR}/run-with-ports.bash") add_wheel_test(test-local-example WORKING_DIRECTORY @@ -20,20 +18,23 @@ if (NOT WITH_COVERAGE) WORKING_DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}" COMMANDS + # Pick ports at test runtime (reduces CI flakiness vs. hard-coded ports) + -f "python ${MAPGET_PICK_PORTS_PY} --out-dir .integration/test-cli-cpp" + # Run Python datasource - -b "python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py ${DATASOURCE_PY_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-cpp/ports.env python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py $DATASOURCE_PY_PORT" # Run C++ datasource - -b "./cpp-sample-http-datasource ${DATASOURCE_CPP_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-cpp/ports.env ./cpp-sample-http-datasource $DATASOURCE_CPP_PORT" # Run service - -b "./mapget --log-level trace serve -p ${MAPGET_SERVER_PORT} -d 127.0.0.1:${DATASOURCE_CPP_PORT} -d 127.0.0.1:${DATASOURCE_PY_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-cpp/ports.env ./mapget --log-level trace serve -p $MAPGET_SERVER_PORT -d 127.0.0.1:$DATASOURCE_CPP_PORT -d 127.0.0.1:$DATASOURCE_PY_PORT" # Request from cpp datasource - -f "./mapget --log-level trace fetch -s 127.0.0.1:${MAPGET_SERVER_PORT} -m Tropico -l WayLayer -t 12345" + -f "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-cpp/ports.env ./mapget --log-level trace fetch -s 127.0.0.1:$MAPGET_SERVER_PORT -m Tropico -l WayLayer -t 12345" # Request from py datasource - -f "./mapget --log-level trace fetch -s 127.0.0.1:${MAPGET_SERVER_PORT} -m TestMap -l WayLayer -t 12345" + -f "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-cpp/ports.env ./mapget --log-level trace fetch -s 127.0.0.1:$MAPGET_SERVER_PORT -m TestMap -l WayLayer -t 12345" ) endif () @@ -42,11 +43,14 @@ if (NOT WITH_COVERAGE) WORKING_DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}" COMMANDS + # Pick ports at test runtime (reduces CI flakiness vs. hard-coded ports) + -f "python ${MAPGET_PICK_PORTS_PY} --out-dir .integration/test-cli-datasource-exe" + # Run service with auto-launched python datasource - -b "${CMAKE_CURRENT_LIST_DIR}/mapget-exec-datasource.bash ${MAPGET_SERVER_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-datasource-exe/ports.env ${CMAKE_CURRENT_LIST_DIR}/mapget-exec-datasource.bash $MAPGET_SERVER_PORT" # Request from py datasource - -f "./mapget --log-level trace fetch -s 127.0.0.1:${MAPGET_SERVER_PORT} -m TestMap -l WayLayer -t 12345" + -f "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-datasource-exe/ports.env ./mapget --log-level trace fetch -s 127.0.0.1:$MAPGET_SERVER_PORT -m TestMap -l WayLayer -t 12345" ) endif () @@ -56,17 +60,20 @@ if (NOT WITH_COVERAGE) WORKING_DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}" COMMANDS + # Pick ports at test runtime (reduces CI flakiness vs. hard-coded ports) + -f "python ${MAPGET_PICK_PORTS_PY} --out-dir .integration/test-cli-python" + # Run Python datasource - -b "python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py ${DATASOURCE_PY_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-python/ports.env python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py $DATASOURCE_PY_PORT" # Run C++ datasource - -b "./cpp-sample-http-datasource ${DATASOURCE_CPP_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-python/ports.env ./cpp-sample-http-datasource $DATASOURCE_CPP_PORT" # Run service - -b "python -m mapget --log-level trace serve -p ${MAPGET_SERVER_PORT} -d 127.0.0.1:${DATASOURCE_CPP_PORT} -d 127.0.0.1:${DATASOURCE_PY_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-python/ports.env python -m mapget --log-level trace serve -p $MAPGET_SERVER_PORT -d 127.0.0.1:$DATASOURCE_CPP_PORT -d 127.0.0.1:$DATASOURCE_PY_PORT" # Request from py datasource - -f "python -m mapget --log-level trace fetch -s 127.0.0.1:${MAPGET_SERVER_PORT} -m TestMap -l WayLayer -t 12345" + -f "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-cli-python/ports.env python -m mapget --log-level trace fetch -s 127.0.0.1:$MAPGET_SERVER_PORT -m TestMap -l WayLayer -t 12345" ) endif() @@ -76,20 +83,23 @@ if (NOT WITH_COVERAGE) WORKING_DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}" COMMANDS + # Pick ports + write config YAMLs at test runtime. + -f "python ${MAPGET_PICK_PORTS_PY} --out-dir .integration/test-config-cpp" + # Run Python datasource - -b "python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py ${DATASOURCE_PY_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-config-cpp/ports.env python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py $DATASOURCE_PY_PORT" # Run C++ datasource - -b "./cpp-sample-http-datasource ${DATASOURCE_CPP_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-config-cpp/ports.env ./cpp-sample-http-datasource $DATASOURCE_CPP_PORT" # Run service - -b "./mapget --config ${CMAKE_CURRENT_LIST_DIR}/../../examples/config/sample-service.yaml serve" + -b "./mapget --config .integration/test-config-cpp/sample-service.yaml serve" # Request from py datasource - -f "./mapget --config ${CMAKE_CURRENT_LIST_DIR}/../../examples/config/sample-second-datasource.yaml fetch" + -f "./mapget --config .integration/test-config-cpp/sample-second-datasource.yaml fetch" # Request from cpp datasource - -f "./mapget --config ${CMAKE_CURRENT_LIST_DIR}/../../examples/config/sample-first-datasource.yaml fetch" + -f "./mapget --config .integration/test-config-cpp/sample-first-datasource.yaml fetch" ) endif () @@ -99,19 +109,22 @@ if (NOT WITH_COVERAGE) WORKING_DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}" COMMANDS + # Pick ports + write config YAMLs at test runtime. + -f "python ${MAPGET_PICK_PORTS_PY} --out-dir .integration/test-config-py" + # Run Python datasource - -b "python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py ${DATASOURCE_PY_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-config-py/ports.env python ${CMAKE_CURRENT_LIST_DIR}/../../examples/python/datasource.py $DATASOURCE_PY_PORT" # Run C++ datasource - -b "./cpp-sample-http-datasource ${DATASOURCE_CPP_PORT}" + -b "bash ${MAPGET_RUN_WITH_PORTS} .integration/test-config-py/ports.env ./cpp-sample-http-datasource $DATASOURCE_CPP_PORT" # Run service - -b "python -m mapget --config ${CMAKE_CURRENT_LIST_DIR}/../../examples/config/sample-service.yaml serve" + -b "python -m mapget --config .integration/test-config-py/sample-service.yaml serve" # Request from py datasource - -f "python -m mapget --config ${CMAKE_CURRENT_LIST_DIR}/../../examples/config/sample-second-datasource.yaml fetch" + -f "python -m mapget --config .integration/test-config-py/sample-second-datasource.yaml fetch" # Request from cpp datasource - -f "python -m mapget --config ${CMAKE_CURRENT_LIST_DIR}/../../examples/config/sample-first-datasource.yaml fetch" + -f "python -m mapget --config .integration/test-config-py/sample-first-datasource.yaml fetch" ) endif () diff --git a/test/integration/detect-ports-and-prepare-config-yaml.py b/test/integration/detect-ports-and-prepare-config-yaml.py new file mode 100644 index 00000000..df8e652a --- /dev/null +++ b/test/integration/detect-ports-and-prepare-config-yaml.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import argparse +import os +import re +import socket +from pathlib import Path + + +_PORT_RANGE_START = 20000 +_PORT_RANGE_END = 30000 + + +def _pick_free_tcp_ports(count: int, seed: str) -> list[int]: + if count <= 0: + raise ValueError("count must be > 0") + if count > (_PORT_RANGE_END - _PORT_RANGE_START): + raise ValueError("count exceeds available port range") + + sockets: list[socket.socket] = [] + ports: list[int] = [] + port_span = _PORT_RANGE_END - _PORT_RANGE_START + start_offset = (sum(seed.encode("utf-8")) + os.getpid()) % port_span + try: + for candidate_offset in range(port_span): + if len(ports) == count: + break + + port = _PORT_RANGE_START + ((start_offset + candidate_offset) % port_span) + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + s.bind(("127.0.0.1", port)) + except OSError: + s.close() + continue + + sockets.append(s) + ports.append(port) + finally: + for s in sockets: + try: + s.close() + except Exception: + pass + + if len(ports) != count: + raise RuntimeError( + f"Could not reserve {count} TCP ports in range {_PORT_RANGE_START}-{_PORT_RANGE_END - 1}" + ) + if len(set(ports)) != len(ports): + raise RuntimeError(f"Port picker returned duplicates: {ports}") + + return ports + + +def _patch_sample_service_yaml(text: str, mapget_port: int, datasource_cpp_port: int, datasource_py_port: int) -> str: + text = re.sub( + r"(?m)^(\s*port:\s*)\d+(\s*)$", + rf"\g<1>{mapget_port}\g<2>", + text, + count=1, + ) + text = text.replace("127.0.0.1:61853", f"127.0.0.1:{datasource_cpp_port}") + text = text.replace("127.0.0.1:61854", f"127.0.0.1:{datasource_py_port}") + return text + + +def _patch_cache_dir(text: str, cache_path: str) -> str: + # Prefer updating an existing cache-dir value, otherwise insert after cache-type. + escaped_path = cache_path.replace("'", "''") + cache_value = f"'{escaped_path}'" + if re.search(r"(?m)^\s*cache-dir:\s*.*$", text): + return re.sub( + r"(?m)^(\s*cache-dir:\s*).*$", + lambda match: f"{match.group(1)}{cache_value}", + text, + count=1, + ) + match = re.search(r"(?m)^(\s*)cache-type:\s*.*$", text) + if not match: + return text + indent = match.group(1) + insert_line = f"{indent}cache-dir: {cache_value}" + return re.sub( + r"(?m)^(\s*cache-type:\s*.*)$", + lambda match: f"{match.group(1)}\n{insert_line}", + text, + count=1, + ) + + +def _patch_sample_fetch_yaml(text: str, mapget_port: int) -> str: + return re.sub( + r"(?m)^(\s*server:\s*127\.0\.0\.1:)\d+(\s*)$", + rf"\g<1>{mapget_port}\g<2>", + text, + count=1, + ) + + +def main() -> int: + parser = argparse.ArgumentParser() + parser.add_argument("--out-dir", required=True, help="Output directory for generated files (created if needed).") + args = parser.parse_args() + + out_dir = Path(args.out_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + # Pick ports at test runtime (reduces collision risk vs. configure-time selection). + mapget_port, datasource_cpp_port, datasource_py_port = _pick_free_tcp_ports(3, str(out_dir)) + + ports_env = out_dir / "ports.env" + ports_env.write_text( + "\n".join( + [ + f"export MAPGET_SERVER_PORT={mapget_port}", + f"export DATASOURCE_CPP_PORT={datasource_cpp_port}", + f"export DATASOURCE_PY_PORT={datasource_py_port}", + "", + ] + ), + encoding="utf-8", + newline="\n", + ) + + repo_root = Path(__file__).resolve().parents[2] + examples_config = repo_root / "examples" / "config" + + sample_service = (examples_config / "sample-service.yaml").read_text(encoding="utf-8") + cache_path = str((out_dir / "mapget-cache.db").resolve()) + (out_dir / "sample-service.yaml").write_text( + _patch_cache_dir( + _patch_sample_service_yaml(sample_service, mapget_port, datasource_cpp_port, datasource_py_port), + cache_path, + ), + encoding="utf-8", + newline="\n", + ) + + sample_first = (examples_config / "sample-first-datasource.yaml").read_text(encoding="utf-8") + (out_dir / "sample-first-datasource.yaml").write_text( + _patch_sample_fetch_yaml(sample_first, mapget_port), + encoding="utf-8", + newline="\n", + ) + + sample_second = (examples_config / "sample-second-datasource.yaml").read_text(encoding="utf-8") + (out_dir / "sample-second-datasource.yaml").write_text( + _patch_sample_fetch_yaml(sample_second, mapget_port), + encoding="utf-8", + newline="\n", + ) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/test/integration/run-with-ports.bash b/test/integration/run-with-ports.bash new file mode 100644 index 00000000..b0f17c54 --- /dev/null +++ b/test/integration/run-with-ports.bash @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if [[ $# -lt 2 ]]; then + echo "Usage: $0 " >&2 + exit 2 +fi + +ports_env="$1" +shift + +# shellcheck disable=SC1090 +source "$ports_env" + +# Note: The wheel test harness passes commands as a single string, so `$MAPGET_SERVER_PORT` +# etc. are not expanded. We intentionally use `eval` here so the variables sourced above +# are expanded before executing the command. +eval "exec $*" + diff --git a/test/unit/CMakeLists.txt b/test/unit/CMakeLists.txt index 8a2deff4..af35f5cb 100644 --- a/test/unit/CMakeLists.txt +++ b/test/unit/CMakeLists.txt @@ -1,6 +1,7 @@ project(test.mapget.unit CXX) add_executable(test.mapget + test-main.cpp test-model.cpp test-model-geometry.cpp test-simfil-geometry.cpp @@ -16,6 +17,9 @@ add_executable(test.mapget add_executable(test.mapget.filelog test-file-logging.cpp) +add_executable(test.mapget.datasource-server + test-datasource-server.cpp) + target_link_libraries(test.mapget PUBLIC mapget-log @@ -23,7 +27,16 @@ target_link_libraries(test.mapget mapget-http-datasource mapget-http-service geojsonsource - Catch2::Catch2WithMain) + Catch2::Catch2) + +target_link_libraries(test.mapget.datasource-server + PUBLIC + mapget-log + mapget-http-datasource) + +target_compile_definitions(test.mapget + PRIVATE + MAPGET_TEST_DATASOURCE_SERVER_EXE=\"$\") target_link_libraries(test.mapget.filelog PUBLIC diff --git a/test/unit/test-cache.cpp b/test/unit/test-cache.cpp index 43a9e333..4e590b25 100644 --- a/test/unit/test-cache.cpp +++ b/test/unit/test-cache.cpp @@ -76,6 +76,9 @@ namespace { layerInfo->featureTypes_, std::vector{0, 1, 2}, std::vector{{1, 2, {}}, {3, 3, {}}}, + 1, + std::vector{"Complete"}, + 0, true, false, Version{0, 0, 0}}); @@ -113,11 +116,14 @@ namespace { // Helper for creating temporary cache paths std::filesystem::path createTempCachePath(const std::string& prefix, bool needsDbExtension = true) { - auto now = std::chrono::system_clock::now(); - auto epoch_time = std::chrono::system_clock::to_time_t(now); + static std::atomic counter{0}; + auto now = std::chrono::system_clock::now().time_since_epoch(); + auto epoch_nanos = std::chrono::duration_cast(now).count(); + auto unique_counter = counter.fetch_add(1, std::memory_order_relaxed); std::filesystem::path test_cache = std::filesystem::temp_directory_path() / - (prefix + std::to_string(epoch_time) + (needsDbExtension ? ".db" : "")); + (prefix + std::to_string(epoch_nanos) + "-" + std::to_string(unique_counter) + + (needsDbExtension ? ".db" : "")); // Delete cache if it already exists if (std::filesystem::exists(test_cache)) { @@ -404,7 +410,10 @@ TEST_CASE("SQLiteCache", "[Cache]") TEST_CASE("SQLiteCache Concurrent Access", "[Cache][Concurrent]") { - mapget::setLogLevel("trace", log()); + // This test intentionally pounds the cache from multiple threads. + // Trace logging turns each cache hit into CI output and makes the Windows + // job spend most of its time flushing logs instead of running the test. + mapget::setLogLevel("warn", log()); // Use common test data setup auto layerInfo = createTestLayerInfo(); @@ -416,46 +425,65 @@ TEST_CASE("SQLiteCache Concurrent Access", "[Cache][Concurrent]") // Create test data auto nodeId = "ConcurrentTestNode"; auto mapId = "ConcurrentMap"; - auto strings = std::make_shared(nodeId); DataSourceInfo info = createTestDataSourceInfo(nodeId, mapId, layerInfo); // Create multiple tiles for testing - std::vector> tiles; + std::vector tileIds; + std::vector tileKeys; for (int i = 0; i < 10; ++i) { auto tileId = TileId::fromWgs84(42.0 + i * 0.1, 11.0, 13); - tiles.push_back(createTestTile(tileId, nodeId, mapId, layerInfo, strings, 1)); - } - - // Write initial tiles - for (const auto& tile : tiles) { + auto tile = createTestTile( + tileId, + nodeId, + mapId, + layerInfo, + std::make_shared(nodeId), + 1); + tileIds.push_back(tileId); + tileKeys.push_back(tile->id()); cache->putTileLayer(tile); } - + ConcurrentTestMetrics metrics; std::atomic updatedFeaturesFound{0}; + std::atomic start{false}; + std::atomic completedWrites{0}; + constexpr int kTotalWrites = 100; // Track the maximum feature count seen for each tile - std::vector> maxFeatureCounts(tiles.size()); - for (size_t i = 0; i < tiles.size(); ++i) { + std::vector> maxFeatureCounts(tileIds.size()); + std::vector> expectedFeatureCounts(tileIds.size()); + for (size_t i = 0; i < tileIds.size(); ++i) { maxFeatureCounts[i] = 1; // Initial count + expectedFeatureCounts[i] = 1; } // Start multiple reader threads std::vector readers; for (int i = 0; i < 4; ++i) { - readers.emplace_back([&cache, &tiles, &info, &metrics, - &updatedFeaturesFound, &maxFeatureCounts]() { - for (int j = 0; j < 100; ++j) { + readers.emplace_back([&cache, &tileKeys, &info, &metrics, + &updatedFeaturesFound, &maxFeatureCounts, + &start, &completedWrites]() { + while (!start.load(std::memory_order_acquire)) { + std::this_thread::yield(); + } + + bool observedUpdate = false; + int j = 0; + do { try { // Read random tiles - int tileIndex = j % tiles.size(); - auto tile = cache->getTileLayer(tiles[tileIndex]->id(), info); + size_t tileIndex = static_cast(j % tileKeys.size()); + auto tile = cache->getTileLayer(tileKeys[tileIndex], info); if (tile.tile) { metrics.successfulReads++; auto featureLayer = std::static_pointer_cast(tile.tile); // Verify the tile data is correct - REQUIRE(featureLayer->size() > 0); + if (featureLayer->size() == 0) { + metrics.readErrors++; + continue; + } // Track if we see increasing feature counts size_t currentCount = featureLayer->size(); @@ -465,62 +493,70 @@ TEST_CASE("SQLiteCache Concurrent Access", "[Cache][Concurrent]") // Update previousMax and retry } - // Check if we see features added by writers (wayId >= 1000) - // For simplicity, just count features beyond the initial one per tile - // Since writers add features with wayId >= 1000, any feature beyond - // the first one in a tile must be from a writer if (currentCount > 1) { - // Additional features beyond initial must be from writers updatedFeaturesFound += (currentCount - 1); + observedUpdate = true; } } } catch (...) { metrics.readErrors++; } - } + ++j; + } while (completedWrites.load(std::memory_order_acquire) < kTotalWrites || !observedUpdate); }); } // Start writer threads that update tiles std::vector writers; for (int i = 0; i < 2; ++i) { - writers.emplace_back([&cache, &tiles, &metrics, i]() { + writers.emplace_back([&cache, &tileIds, &layerInfo, &metrics, i, + nodeId, mapId, &start, &completedWrites, + &expectedFeatureCounts]() { + while (!start.load(std::memory_order_acquire)) { + std::this_thread::yield(); + } + for (int j = 0; j < 50; ++j) { try { - // Update tiles by adding more features - int tileIndex = j % tiles.size(); - auto& tile = tiles[tileIndex]; - // Store the area ID to ensure it outlives the string_view - auto areaId = "UpdatedArea" + std::to_string(i) + "_" + std::to_string(j); - tile->newFeature("Way", { - {"areaId", areaId}, - {"wayId", 1000 + i * 100 + j} - }); + // Keep per-tile writes ordered by assigning each tile to one writer. + const size_t tileIndex = static_cast((i + 2 * j) % tileIds.size()); + const size_t featureCount = + expectedFeatureCounts[tileIndex].fetch_add(1, std::memory_order_relaxed) + 1; + auto tile = createTestTile( + tileIds[tileIndex], + nodeId, + mapId, + layerInfo, + std::make_shared(nodeId), + static_cast(featureCount)); cache->putTileLayer(tile); metrics.successfulWrites++; + completedWrites.fetch_add(1, std::memory_order_release); } catch (...) { metrics.writeErrors++; } } }); } + + start.store(true, std::memory_order_release); // Wait for all threads to complete - joinThreads(readers); joinThreads(writers); + joinThreads(readers); // Verify no errors occurred REQUIRE(metrics.readErrors == 0); REQUIRE(metrics.writeErrors == 0); REQUIRE(metrics.successfulReads > 0); - REQUIRE(metrics.successfulWrites == 100); // 2 writers * 50 writes each + REQUIRE(metrics.successfulWrites == kTotalWrites); // 2 writers * 50 writes each // Verify that readers saw updates from writers REQUIRE(updatedFeaturesFound > 0); // Check that at least some tiles have more than the initial 1 feature size_t tilesWithUpdates = 0; - for (size_t i = 0; i < tiles.size(); ++i) { + for (size_t i = 0; i < tileIds.size(); ++i) { if (maxFeatureCounts[i].load() > 1) { tilesWithUpdates++; } @@ -528,13 +564,11 @@ TEST_CASE("SQLiteCache Concurrent Access", "[Cache][Concurrent]") REQUIRE(tilesWithUpdates > 0); // Verify the final state - all tiles should have multiple features - for (size_t i = 0; i < tiles.size(); ++i) { - auto [tile, expiredAt] = cache->getTileLayer(tiles[i]->id(), info); + for (size_t i = 0; i < tileIds.size(); ++i) { + auto [tile, expiredAt] = cache->getTileLayer(tileKeys[i], info); REQUIRE(tile != nullptr); auto featureLayer = std::static_pointer_cast(tile); - // Each tile should have more than the initial 1 feature - // Writers add features, so final count should be > 1 - REQUIRE(featureLayer->size() > 1); + REQUIRE(featureLayer->size() == expectedFeatureCounts[i].load()); } // Clean up - reset cache to close DB connection before removing file @@ -663,7 +697,9 @@ TEST_CASE("SQLiteCache Concurrent Access", "[Cache][Concurrent]") metrics.successfulReads++; // Verify we can cast and access the data auto featureLayer = std::static_pointer_cast(tile.tile); - REQUIRE(featureLayer->size() == 1); + if (featureLayer->size() != 1) { + metrics.readErrors++; + } } } } catch (...) { diff --git a/test/unit/test-datasource-server.cpp b/test/unit/test-datasource-server.cpp new file mode 100644 index 00000000..e5ebfc4f --- /dev/null +++ b/test/unit/test-datasource-server.cpp @@ -0,0 +1,70 @@ +#include "mapget/http-datasource/datasource-server.h" +#include "mapget/log.h" + +#include "nlohmann/json.hpp" + +using namespace mapget; +using namespace nlohmann; + +int main() +{ + setLogLevel("trace", log()); + + auto info = DataSourceInfo::fromJson(R"( + { + "nodeId": "test-datasource", + "mapId": "Tropico", + "layers": { + "WayLayer": { + "featureTypes": + [ + { + "name": "Way", + "uniqueIdCompositions": + [ + [ + { + "partId": "areaId", + "description": "String which identifies the map area.", + "datatype": "STR" + }, + { + "partId": "wayId", + "description": "Globally Unique 32b integer.", + "datatype": "U32" + } + ] + ] + } + ] + }, + "SourceData-WayLayer": { + "type": "SourceData" + } + } + } + )"_json); + + DataSourceServer ds(info); + ds.onTileFeatureRequest( + [&](const auto& tile) + { + auto f = tile->newFeature("Way", {{"areaId", "Area42"}, {"wayId", 0}}); + auto g = f->geom()->newGeometry(GeomType::Line); + g->append({42., 11}); + g->append({42., 12}); + }); + ds.onTileSourceDataRequest([&](const auto&) {}); + ds.onLocateRequest( + [&](LocateRequest const& request) -> std::vector + { + LocateResponse response(request); + response.tileKey_.layerId_ = "WayLayer"; + response.tileKey_.tileId_.value_ = 1; + return {response}; + }); + + ds.go("127.0.0.1", 0, 5000); + ds.waitForSignal(); + return 0; +} diff --git a/test/unit/test-http-datasource.cpp b/test/unit/test-http-datasource.cpp index 8f58c08b..3e3d8d1d 100644 --- a/test/unit/test-http-datasource.cpp +++ b/test/unit/test-http-datasource.cpp @@ -1,34 +1,423 @@ #include + +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include +#include + #ifndef _WIN32 -#include #include +#include #endif -#include "httplib.h" -#include "mapget/log.h" -#include "nlohmann/json.hpp" -#include "utility.h" +#include +#include +#include +#include + +#include "process.hpp" + #include "mapget/http-datasource/datasource-client.h" -#include "mapget/http-datasource/datasource-server.h" +#include "mapget/http-service/cli.h" #include "mapget/http-service/http-client.h" #include "mapget/http-service/http-service.h" +#include "mapget/log.h" +#include "mapget/model/info.h" #include "mapget/model/stream.h" #include "mapget/service/config.h" -#include "mapget/http-service/cli.h" + +#include "nlohmann/json.hpp" + +#include "test-http-service-fixture.h" +#include "utility.h" using namespace mapget; namespace fs = std::filesystem; -TEST_CASE("HttpDataSource", "[HttpDataSource]") +namespace { - setLogLevel("trace", log()); - // Create DataSourceInfo. - auto info = DataSourceInfo::fromJson(R"( +class SyncHttpClient +{ +public: + SyncHttpClient(std::string host, uint16_t port) + { + loopThread_ = std::make_unique("MapgetTestHttpClient"); + loopThread_->run(); + + client_ = drogon::HttpClient::newHttpClient( + fmt::format("http://{}:{}/", host, port), + loopThread_->getLoop()); + } + + std::pair get(std::string path) + { + auto req = drogon::HttpRequest::newHttpRequest(); + req->setMethod(drogon::Get); + req->setPath(std::move(path)); + return client_->sendRequest(req); + } + + std::pair postJson(std::string path, std::string body) + { + auto req = drogon::HttpRequest::newHttpRequest(); + req->setMethod(drogon::Post); + req->setPath(std::move(path)); + req->setContentTypeCode(drogon::CT_APPLICATION_JSON); + req->setBody(std::move(body)); + return client_->sendRequest(req); + } + +private: + std::unique_ptr loopThread_; + drogon::HttpClientPtr client_; +}; + +class WsTilesClient +{ +public: + WsTilesClient(uint16_t port, std::shared_ptr layerInfo, bool requireFeatureLayer = true) + : pullClient_("127.0.0.1", port), + layerInfo_(std::move(layerInfo)), + requireFeatureLayer_(requireFeatureLayer), + reader_( + [this](auto&&, auto&&) { return layerInfo_; }, + [this](auto&& tile) { + if (requireFeatureLayer_ && tile->id().layer_ != LayerType::Features) { + setError("Unexpected tile layer type"); + return; + } + receivedTileCount_.fetch_add(1, std::memory_order_relaxed); + }) + { + loopThread_ = std::make_unique("MapgetTestWsClient"); + loopThread_->run(); + + client_ = drogon::WebSocketClient::newWebSocketClient( + fmt::format("ws://127.0.0.1:{}", port), + loopThread_->getLoop()); + + client_->setMessageHandler( + [this](std::string&& msg, + const drogon::WebSocketClientPtr&, + const drogon::WebSocketMessageType& msgType) { + if (msgType != drogon::WebSocketMessageType::Binary) { + return; + } + handleBinaryMessage(std::move(msg)); + }); + } + + bool connect(bool sendAuthHeader) + { + auto connectReq = drogon::HttpRequest::newHttpRequest(); + connectReq->setMethod(drogon::Get); + connectReq->setPath("/tiles"); + if (sendAuthHeader) { + connectReq->addHeader("X-USER-ROLE", "Tropico-Viewer"); + } + + std::promise connectPromise; + auto connectFuture = connectPromise.get_future(); + client_->connectToServer( + connectReq, + [&connectPromise]( + drogon::ReqResult result, + const drogon::HttpResponsePtr&, + const drogon::WebSocketClientPtr&) { connectPromise.set_value(result); }); + + if (connectFuture.wait_for(std::chrono::seconds(5)) != std::future_status::ready) { + return false; + } + return connectFuture.get() == drogon::ReqResult::Ok; + } + + drogon::WebSocketConnectionPtr connection() const { return client_->getConnection(); } + + void send(std::string_view payload) + { + auto conn = connection(); + if (conn && conn->connected()) { + conn->send(std::string(payload), drogon::WebSocketMessageType::Text); + } + } + + [[nodiscard]] bool waitForDone(std::chrono::seconds timeout) + { + const auto deadline = std::chrono::steady_clock::now() + timeout; + while (std::chrono::steady_clock::now() < deadline) { + { + std::lock_guard lock(mutex_); + if (!error_.empty() || (lastStatus_.has_value() && lastStatus_->value("allDone", false))) { + return true; + } + } + + const auto remainingMs = static_cast( + std::chrono::duration_cast(deadline - std::chrono::steady_clock::now()).count()); + if (remainingMs <= 0) { + break; + } + + const auto clientId = clientId_.load(std::memory_order_relaxed); + if (clientId > 0) { + const auto waitMs = std::clamp(remainingMs, 1, 1000); + const auto [result, resp] = pullClient_.get(fmt::format( + "/tiles/next?clientId={}&waitMs={}&maxBytes={}", + clientId, + waitMs, + 5 * 1024 * 1024)); + + if (result != drogon::ReqResult::Ok || !resp) { + setError("Failed to pull next tile frame"); + return true; + } + + if (resp->statusCode() == drogon::k200OK) { + try { + std::lock_guard readerLock(readerMutex_); + reader_.read(std::string(resp->body())); + } + catch (const std::exception& e) { + setError(std::string("Failed to parse pulled tile stream: ") + e.what()); + return true; + } + } + else if (resp->statusCode() == drogon::k204NoContent) { + continue; + } + else if (resp->statusCode() == drogon::k410Gone) { + setError("Tiles pull session closed"); + return true; + } + else { + setError(fmt::format("Unexpected /tiles/next response status: {}", static_cast(resp->statusCode()))); + return true; + } + + continue; + } + + std::unique_lock lock(mutex_); + cv_.wait_for( + lock, + std::chrono::milliseconds(std::min(remainingMs, 50)), + [this] { + return !error_.empty() + || clientId_.load(std::memory_order_relaxed) > 0 + || (lastStatus_.has_value() && lastStatus_->value("allDone", false)); + }); + } + + std::lock_guard lock(mutex_); + return !error_.empty() || (lastStatus_.has_value() && lastStatus_->value("allDone", false)); + } + + void resetStatus() + { + std::lock_guard lock(mutex_); + lastStatus_.reset(); + error_.clear(); + } + + void resetTileCount() { receivedTileCount_.store(0, std::memory_order_relaxed); } + + std::optional lastStatus() const + { + std::lock_guard lock(mutex_); + return lastStatus_; + } + + std::string error() const + { + std::lock_guard lock(mutex_); + return error_; + } + + int receivedTileCount() const { return receivedTileCount_.load(std::memory_order_relaxed); } + + void stop() { client_->stop(); } + +private: + void handleBinaryMessage(std::string&& msg) + { + TileLayerStream::MessageType type = TileLayerStream::MessageType::None; + uint32_t payloadSize = 0; + size_t headerBytes = 0; + auto bytes = std::span{ + reinterpret_cast(msg.data()), + msg.size()}; + if (!TileLayerStream::Reader::readMessageHeader(bytes, type, payloadSize, &headerBytes)) { + setError("Failed to read stream message header"); + return; + } + if (bytes.size() < headerBytes + payloadSize) { + setError("Invalid stream message size"); + return; + } + + if (type == TileLayerStream::MessageType::Status) { + auto payload = std::string_view{ + msg.data() + static_cast(headerBytes), + payloadSize}; + try { + auto parsed = nlohmann::json::parse(payload); + { + std::lock_guard lock(mutex_); + lastStatus_ = std::move(parsed); + } + cv_.notify_all(); + } + catch (const std::exception& e) { + setError(std::string("Failed to parse status JSON: ") + e.what()); + } + return; + } + + if (type == TileLayerStream::MessageType::RequestContext) { + auto payload = std::string_view{ + msg.data() + static_cast(headerBytes), + payloadSize}; + try { + auto parsed = nlohmann::json::parse(payload); + if (parsed.contains("clientId") && parsed["clientId"].is_number_integer()) { + clientId_.store(parsed["clientId"].get(), std::memory_order_relaxed); + cv_.notify_all(); + } + } + catch (const std::exception& e) { + setError(std::string("Failed to parse request-context JSON: ") + e.what()); + } + return; + } + + try { + std::lock_guard readerLock(readerMutex_); + reader_.read(msg); + } + catch (const std::exception& e) { + setError(std::string("Failed to parse tile stream: ") + e.what()); + } + } + + void setError(std::string message) + { + { + std::lock_guard lock(mutex_); + error_ = std::move(message); + } + cv_.notify_all(); + } + + mutable std::mutex mutex_; + std::condition_variable cv_; + std::optional lastStatus_; + std::string error_; + std::mutex readerMutex_; + std::atomic_int receivedTileCount_{0}; + std::atomic_int64_t clientId_{0}; + std::unique_ptr loopThread_; + drogon::WebSocketClientPtr client_; + SyncHttpClient pullClient_; + std::shared_ptr layerInfo_; + bool requireFeatureLayer_{true}; + TileLayerStream::Reader reader_; +}; + +class ChildProcessWithPort +{ +public: + explicit ChildProcessWithPort(std::string exePath) + { + auto stderrCallback = [](const char* bytes, size_t n) { + auto output = std::string(bytes, n); + output.erase(output.find_last_not_of(" \n\r\t") + 1); + if (!output.empty()) + std::cerr << output << std::endl; + }; + + auto stdoutCallback = [this](const char* bytes, size_t n) { + std::lock_guard lock(mutex_); + stdoutBuffer_.append(bytes, n); + + for (;;) { + auto nl = stdoutBuffer_.find_first_of("\r\n"); + if (nl == std::string::npos) + break; + + auto line = stdoutBuffer_.substr(0, nl); + stdoutBuffer_.erase(0, nl + 1); + line.erase(line.find_last_not_of(" \n\r\t") + 1); + + if (!portReady_) { + std::regex portRegex(R"(Running on port (\d+))"); + std::smatch matches; + if (std::regex_search(line, matches, portRegex) && matches.size() > 1) { + port_ = static_cast(std::stoi(matches.str(1))); + portReady_ = true; + cv_.notify_all(); + } + } + } + }; + + process_ = std::make_unique( + fmt::format("\"{}\"", exePath), + "", + stdoutCallback, + stderrCallback, + true); + + std::unique_lock lock(mutex_); +#if defined(NDEBUG) + if (!cv_.wait_for(lock, std::chrono::seconds(10), [this] { return portReady_; })) { + raise("Timeout waiting for the child process to start listening."); + } +#else + log().warn("Using Debug build: will wait forever!"); + cv_.wait(lock, [this] { return portReady_; }); +#endif + } + + ~ChildProcessWithPort() + { + if (process_) { + process_->kill(true); + process_->get_exit_status(); + } + } + + [[nodiscard]] uint16_t port() const { + return port_; + } + +private: + std::unique_ptr process_; + mutable std::mutex mutex_; + std::condition_variable cv_; + std::string stdoutBuffer_; + uint16_t port_ = 0; + bool portReady_ = false; +}; + +nlohmann::json testDataSourceInfoJson() +{ + using nlohmann::json; + return json::parse(R"( + { + "nodeId": "test-datasource", "mapId": "Tropico", "layers": { "WayLayer": { @@ -59,69 +448,41 @@ TEST_CASE("HttpDataSource", "[HttpDataSource]") } } } - )"_json); + )"); +} - // Initialize a DataSource. - DataSourceServer ds(info); - std::atomic_uint32_t dataSourceFeatureRequestCount = 0; - std::atomic_uint32_t dataSourceSourceDataRequestCount = 0; - ds.onTileFeatureRequest( - [&](const auto& tile) - { - auto f = tile->newFeature("Way", {{"areaId", "Area42"}, {"wayId", 0}}); - auto g = f->geom()->newGeometry(GeomType::Line); - g->append({42., 11}); - g->append({42., 12}); - ++dataSourceFeatureRequestCount; - }); - ds.onTileSourceDataRequest( - [&](const auto& tile) { - ++dataSourceSourceDataRequestCount; - }); - ds.onLocateRequest( - [&](LocateRequest const& request) -> std::vector - { - REQUIRE(request.mapId_ == "Tropico"); - REQUIRE(request.typeId_ == "Way"); - REQUIRE(request.featureId_ == KeyValuePairs{{"wayId", 0}}); +} // namespace + +TEST_CASE("HttpDataSource", "[HttpDataSource]") +{ + setLogLevel("trace", log()); - LocateResponse response(request); - response.tileKey_.layerId_ = "WayLayer"; - response.tileKey_.tileId_.value_ = 1; - return {response}; - }); + // Start datasource server in a separate process (Drogon is singleton). + ChildProcessWithPort dsProc(MAPGET_TEST_DATASOURCE_SERVER_EXE); - // Launch the DataSource on a separate thread. - ds.go(); + // Expected datasource info. + auto info = DataSourceInfo::fromJson(testDataSourceInfoJson()); - // Ensure the DataSource is running. - REQUIRE(ds.isRunning() == true); + SyncHttpClient dsClient("127.0.0.1", dsProc.port()); - SECTION("Fetch /info") + // Fetch /info { - // Initialize an httplib client. - httplib::Client cli("localhost", ds.port()); + auto [result, resp] = dsClient.get("/info"); + REQUIRE(result == drogon::ReqResult::Ok); + REQUIRE(resp != nullptr); + REQUIRE(resp->statusCode() == drogon::k200OK); - // Send a GET info request. - auto fetchedInfoJson = cli.Get("/info"); - auto fetchedInfo = - DataSourceInfo::fromJson(nlohmann::json::parse(fetchedInfoJson->body)); + auto fetchedInfo = DataSourceInfo::fromJson(nlohmann::json::parse(std::string(resp->body()))); REQUIRE(fetchedInfo.toJson() == info.toJson()); } - SECTION("Fetch /tile") + // Fetch /tile { - // Initialize an httplib client. - httplib::Client cli("localhost", ds.port()); - - // Send a GET tile request. - auto tileResponse = cli.Get("/tile?layer=WayLayer&tileId=1"); - - // Check that the response is OK. - REQUIRE(tileResponse != nullptr); - REQUIRE(tileResponse->status == 200); + auto [result, resp] = dsClient.get("/tile?layer=WayLayer&tileId=1"); + REQUIRE(result == drogon::ReqResult::Ok); + REQUIRE(resp != nullptr); + REQUIRE(resp->statusCode() == drogon::k200OK); - // Check the response body for expected content. auto receivedTileCount = 0; TileLayerStream::Reader reader( [&](auto&& mapId, auto&& layerId) @@ -133,24 +494,18 @@ TEST_CASE("HttpDataSource", "[HttpDataSource]") REQUIRE(tile->id().layer_ == LayerType::Features); receivedTileCount++; }); - reader.read(tileResponse->body); + reader.read(std::string(resp->body())); REQUIRE(receivedTileCount == 1); } - SECTION("Fetch /tile SourceData") + // Fetch /tile SourceData { - // Initialize an httplib client. - httplib::Client cli("localhost", ds.port()); + auto [result, resp] = dsClient.get("/tile?layer=SourceData-WayLayer&tileId=1"); + REQUIRE(result == drogon::ReqResult::Ok); + REQUIRE(resp != nullptr); + REQUIRE(resp->statusCode() == drogon::k200OK); - // Send a GET tile request - auto tileResponse = cli.Get("/tile?layer=SourceData-WayLayer&tileId=1"); - - // Check that the response is OK. - REQUIRE(tileResponse != nullptr); - REQUIRE(tileResponse->status == 200); - - // Check the response body for expected content. auto receivedTileCount = 0; TileLayerStream::Reader reader( [&](auto&& mapId, auto&& layerId) @@ -162,113 +517,98 @@ TEST_CASE("HttpDataSource", "[HttpDataSource]") REQUIRE(tile->id().layer_ == LayerType::SourceData); receivedTileCount++; }); - reader.read(tileResponse->body); + reader.read(std::string(resp->body())); REQUIRE(receivedTileCount == 1); } - SECTION("Fetch /locate") + // Fetch /locate { - // Initialize an httplib client. - httplib::Client cli("localhost", ds.port()); - - // Send a POST locate request. - auto response = cli.Post("/locate", R"({ - "mapId": "Tropico", - "typeId": "Way", - "featureId": ["wayId", 0] - })", "application/json"); - - // Check that the response is OK. - REQUIRE(response != nullptr); - REQUIRE(response->status == 200); - - // Check the response body for expected content. - LocateResponse responseParsed(nlohmann::json::parse(response->body)[0]); + auto [result, resp] = dsClient.postJson( + "/locate", + R"({ + "mapId": "Tropico", + "typeId": "Way", + "featureId": ["wayId", 0] + })"); + + REQUIRE(result == drogon::ReqResult::Ok); + REQUIRE(resp != nullptr); + REQUIRE(resp->statusCode() == drogon::k200OK); + + LocateResponse responseParsed(nlohmann::json::parse(std::string(resp->body()))[0]); REQUIRE(responseParsed.tileKey_.mapId_ == "Tropico"); REQUIRE(responseParsed.tileKey_.layer_ == LayerType::Features); REQUIRE(responseParsed.tileKey_.layerId_ == "WayLayer"); REQUIRE(responseParsed.tileKey_.tileId_.value_ == 1); } - SECTION("Query mapget HTTP service") + // Query mapget HTTP service (in-process, started once for entire test binary) { + auto& service = test::httpService(); + auto remoteDataSource = std::make_shared("127.0.0.1", dsProc.port()); + service.add(remoteDataSource); + auto countReceivedTiles = [](auto& client, auto mapId, auto layerId, auto tiles) { auto tileCount = 0; - auto request = std::make_shared(mapId, layerId, tiles); - request->onFeatureLayer([&](auto&& tile) { tileCount++; }); - //request->onSourceDataLayer([&](auto&& tile) { tileCount++; }); - + request->onFeatureLayer([&](auto&&) { tileCount++; }); client.request(request)->wait(); return std::make_tuple(request, tileCount); }; - HttpService service; - auto remoteDataSource = std::make_shared("localhost", ds.port()); - service.add(remoteDataSource); - - service.go(); - - SECTION("Query through mapget HTTP service") + // Query through mapget HTTP service { - HttpClient client("localhost", service.port()); + HttpClient client("127.0.0.1", service.port()); auto [request, receivedTileCount] = countReceivedTiles( client, "Tropico", "WayLayer", - std::vector{{1234, 5678, 9112, 1234}}); + std::vector{1234, 5678, 9112}); - REQUIRE(receivedTileCount == 4); - // One tile requested twice, so the cache was used. - REQUIRE(dataSourceFeatureRequestCount == 3); + REQUIRE(receivedTileCount == 3); + REQUIRE(request->getStatus() == RequestStatus::Success); } - SECTION("Trigger 400 responses") + // Trigger 400 responses { - HttpClient client("localhost", service.port()); + HttpClient client("127.0.0.1", service.port()); { - auto [request, receivedTileCount] = countReceivedTiles( - client, - "UnknownMap", - "WayLayer", - std::vector{{1234}}); + auto [request, receivedTileCount] = + countReceivedTiles(client, "UnknownMap", "WayLayer", std::vector{{1234}}); REQUIRE(request->getStatus() == RequestStatus::NoDataSource); REQUIRE(receivedTileCount == 0); } { - auto [request, receivedTileCount] = countReceivedTiles( - client, - "Tropico", - "UnknownLayer", - std::vector{{1234}}); + auto [request, receivedTileCount] = + countReceivedTiles(client, "Tropico", "UnknownLayer", std::vector{{1234}}); REQUIRE(request->getStatus() == RequestStatus::NoDataSource); REQUIRE(receivedTileCount == 0); } } - SECTION("Run /locate through service") + // Run /locate through service { - httplib::Client client("localhost", service.port()); - - // Send a POST locate request. - auto response = client.Post("/locate", R"({ - "requests": [{ - "mapId": "Tropico", - "typeId": "Way", - "featureId": ["wayId", 0] - }] - })", "application/json"); - - // Check that the response is OK. - REQUIRE(response != nullptr); - REQUIRE(response->status == 200); - - // Check the response body for expected content. - auto responseJsonLists = nlohmann::json::parse(response->body)["responses"]; + SyncHttpClient serviceClient("127.0.0.1", service.port()); + + auto [result, resp] = serviceClient.postJson( + "/locate", + R"({ + "requests": [{ + "mapId": "Tropico", + "typeId": "Way", + "featureId": ["wayId", 0] + }] + })"); + + REQUIRE(result == drogon::ReqResult::Ok); + REQUIRE(resp != nullptr); + REQUIRE(resp->statusCode() == drogon::k200OK); + + auto responseJsonLists = nlohmann::json::parse(std::string(resp->body()))["responses"]; REQUIRE(responseJsonLists.size() == 1); auto responseJsonList = responseJsonLists[0]; REQUIRE(responseJsonList.size() == 1); @@ -279,79 +619,159 @@ TEST_CASE("HttpDataSource", "[HttpDataSource]") REQUIRE(responseParsed.tileKey_.tileId_.value_ == 1); } - SECTION("Test auth header requirement") + // Test auth header requirement { - remoteDataSource->requireAuthHeaderRegexMatchOption( - "X-USER-ROLE", - std::regex("\\bTropico-Viewer\\b")); + remoteDataSource->requireAuthHeaderRegexMatchOption("X-USER-ROLE", std::regex("\\bTropico-Viewer\\b")); - HttpClient badClient("localhost", service.port()); - HttpClient goodClient("localhost", service.port(), {{"X-USER-ROLE", "Tropico-Viewer"}}); + HttpClient badClient("127.0.0.1", service.port()); + HttpClient goodClient("127.0.0.1", service.port(), {{"X-USER-ROLE", "Tropico-Viewer"}}); - // Check sources REQUIRE(badClient.sources().empty()); REQUIRE(goodClient.sources().size() == 1); - // Try to load tiles with bad client { - auto [request, receivedTileCount] = countReceivedTiles( - badClient, - "Tropico", - "WayLayer", - std::vector{{1234}}); + auto [request, receivedTileCount] = + countReceivedTiles(badClient, "Tropico", "WayLayer", std::vector{{1234}}); REQUIRE(request->getStatus() == RequestStatus::Unauthorized); REQUIRE(receivedTileCount == 0); } - // Try to load tiles with good client { - auto [request, receivedTileCount] = countReceivedTiles( - goodClient, - "Tropico", - "WayLayer", - std::vector{{1234}}); + auto [request, receivedTileCount] = + countReceivedTiles(goodClient, "Tropico", "WayLayer", std::vector{{1234}}); REQUIRE(request->getStatus() == RequestStatus::Success); REQUIRE(receivedTileCount == 1); } - } - service.stop(); - REQUIRE(service.isRunning() == false); - } + const auto dsInfo = remoteDataSource->info(); + const auto layerInfo = dsInfo.getLayer("WayLayer"); + REQUIRE(layerInfo != nullptr); - SECTION("Wait for data source") - { - auto waitThread = std::thread([&] { ds.waitForSignal(); }); - ds.stop(); - waitThread.join(); - REQUIRE(ds.isRunning() == false); - } + auto requireConnected = [](WsTilesClient& wsClient) { + auto conn = wsClient.connection(); + if (!conn || !conn->connected()) { + wsClient.stop(); + FAIL("WebSocket connection not established"); + } + return conn; + }; + + auto runWsTilesRequest = [&](bool sendAuthHeader, const std::string& requestJson) { + WsTilesClient wsClient(service.port(), layerInfo); + + REQUIRE(wsClient.connect(sendAuthHeader)); + requireConnected(wsClient)->send(requestJson, drogon::WebSocketMessageType::Text); + + REQUIRE(wsClient.waitForDone(std::chrono::seconds(10))); + if (!wsClient.error().empty()) { + wsClient.stop(); + FAIL(wsClient.error()); + } + + auto status = wsClient.lastStatus(); + wsClient.stop(); - ds.stop(); - REQUIRE(ds.isRunning() == false); + REQUIRE(status.has_value()); + return std::make_tuple(*status, wsClient.receivedTileCount()); + }; + + // WebSocket tiles: unauthorized without auth header. + { + auto req = nlohmann::json::object({ + {"requests", nlohmann::json::array({nlohmann::json::object({ + {"mapId", "Tropico"}, + {"layerId", "WayLayer"}, + {"tileIds", nlohmann::json::array({1234})}, + })})}, + }).dump(); + + auto [status, wsTileCount] = runWsTilesRequest(false, req); + REQUIRE(wsTileCount == 0); + REQUIRE(status["requests"].size() == 1); + REQUIRE(status["requests"][0]["status"].get() == + static_cast(RequestStatus::Unauthorized)); + } + + // WebSocket tiles: invalid request stays on the same connection, then succeeds. + { + WsTilesClient wsClient(service.port(), layerInfo); + REQUIRE(wsClient.connect(true)); + + auto conn = requireConnected(wsClient); + + // Invalid JSON: should yield a Status message but keep the socket open. + { + conn->send("{not json", drogon::WebSocketMessageType::Text); + REQUIRE(wsClient.waitForDone(std::chrono::seconds(5))); + if (!wsClient.error().empty()) { + wsClient.stop(); + FAIL(wsClient.error()); + } + + auto status = wsClient.lastStatus(); + REQUIRE(status.has_value()); + REQUIRE(status->value("message", "").find("Invalid JSON") != std::string::npos); + REQUIRE(conn->connected()); + } + + // Valid request should succeed afterwards. + { + wsClient.resetStatus(); + wsClient.resetTileCount(); + + auto req = nlohmann::json::object({ + {"requests", nlohmann::json::array({nlohmann::json::object({ + {"mapId", "Tropico"}, + {"layerId", "WayLayer"}, + {"tileIds", nlohmann::json::array({1234})}, + })})}, + }).dump(); + + conn->send(req, drogon::WebSocketMessageType::Text); + + REQUIRE(wsClient.waitForDone(std::chrono::seconds(10))); + if (!wsClient.error().empty()) { + wsClient.stop(); + FAIL(wsClient.error()); + } + + auto status = wsClient.lastStatus(); + REQUIRE(wsClient.receivedTileCount() == 1); + REQUIRE(status.has_value()); + REQUIRE(status->contains("requests")); + REQUIRE((*status)["requests"].size() == 1); + REQUIRE((*status)["requests"][0]["status"].get() == + static_cast(RequestStatus::Success)); + } + + wsClient.stop(); + } + } + + service.remove(remoteDataSource); + } } TEST_CASE("Configuration Endpoint Tests", "[Configuration]") { + auto& service = test::httpService(); + REQUIRE(service.isRunning() == true); + + SyncHttpClient cli("127.0.0.1", service.port()); + auto tempDir = fs::temp_directory_path() / test::generateTimestampedDirectoryName("mapget_test_http_config"); fs::create_directory(tempDir); auto tempConfigPath = tempDir / "temp_config.yaml"; - // Setting up the server and client. - HttpService service; - service.go(); - REQUIRE(service.isRunning() == true); - httplib::Client cli("localhost", service.port()); - // Set up the config file. DataSourceConfigService::get().reset(); + struct ConfigWatchGuard { + ~ConfigWatchGuard() { DataSourceConfigService::get().end(); } + } configWatchGuard; struct SchemaPatchGuard { - ~SchemaPatchGuard() { - DataSourceConfigService::get().setDataSourceConfigSchemaPatch(nlohmann::json::object()); - } + ~SchemaPatchGuard() { DataSourceConfigService::get().setDataSourceConfigSchemaPatch(nlohmann::json::object()); } } schemaPatchGuard; - // Emulate the CLI-provided config-schema patch so http-settings participates in the auto schema. auto schemaPatch = nlohmann::json::parse(R"( { "properties": { @@ -364,110 +784,127 @@ TEST_CASE("Configuration Endpoint Tests", "[Configuration]") )"); DataSourceConfigService::get().setDataSourceConfigSchemaPatch(schemaPatch); - SECTION("Get Configuration - Config File Not Found") { + SECTION("Get Configuration - Config File Not Found") + { DataSourceConfigService::get().loadConfig(tempConfigPath.string()); - auto res = cli.Get("/config"); + auto [result, res] = cli.get("/config"); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 404); - REQUIRE(res->body == "The server does not have a config file."); + REQUIRE(res->statusCode() == drogon::k404NotFound); + REQUIRE(std::string(res->body()) == "The server does not have a config file."); } // Create config file for tests that need it { std::ofstream configFile(tempConfigPath); - configFile << "sources: []\nhttp-settings: [{'password': 'hunter2'}]"; // Update http-settings to an array. + configFile << "sources: []\nhttp-settings: [{'password': 'hunter2'}]"; configFile.flush(); configFile.close(); - - // Ensure file is synced to disk - #ifndef _WIN32 + +#ifndef _WIN32 int fd = open(tempConfigPath.c_str(), O_RDONLY); if (fd != -1) { fsync(fd); close(fd); } - #endif +#endif std::this_thread::sleep_for(std::chrono::milliseconds(100)); } - - // Load the config after file is created + DataSourceConfigService::get().loadConfig(tempConfigPath.string()); - - // Give the config watcher time to detect the file std::this_thread::sleep_for(std::chrono::milliseconds(500)); - SECTION("Get Configuration - Not allowed") { + SECTION("Get Configuration - Not allowed") + { setGetConfigEndpointEnabled(false); - auto res = cli.Get("/config"); + auto [result, res] = cli.get("/config"); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 403); + REQUIRE(res->statusCode() == drogon::k403Forbidden); } - SECTION("Get Configuration - No Config File Path Set") { + SECTION("Get Configuration - No Config File Path Set") + { setGetConfigEndpointEnabled(true); - DataSourceConfigService::get().loadConfig(""); // Simulate no config path set. - auto res = cli.Get("/config"); + DataSourceConfigService::get().loadConfig(""); + auto [result, res] = cli.get("/config"); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 404); - REQUIRE(res->body == "The config file path is not set. Check the server configuration."); + REQUIRE(res->statusCode() == drogon::k404NotFound); + REQUIRE(std::string(res->body()) == + "The config file path is not set. Check the server configuration."); } - SECTION("Get Configuration - Success") { - auto res = cli.Get("/config"); + SECTION("Get Configuration - Success") + { + setGetConfigEndpointEnabled(true); + auto [result, res] = cli.get("/config"); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 200); - REQUIRE(res->body.find("sources") != std::string::npos); - REQUIRE(res->body.find("http-settings") != std::string::npos); - - // Ensure that the password is masked as SHA256. - REQUIRE(res->body.find("hunter2") == std::string::npos); - REQUIRE(res->body.find("MASKED:0:f52fbd32b2b3b86ff88ef6c490628285f482af15ddcb29541f94bcf526a3f6c7") != std::string::npos); + REQUIRE(res->statusCode() == drogon::k200OK); + + auto body = std::string(res->body()); + REQUIRE(body.find("sources") != std::string::npos); + REQUIRE(body.find("http-settings") != std::string::npos); + REQUIRE(body.find("hunter2") == std::string::npos); + REQUIRE( + body.find("MASKED:0:f52fbd32b2b3b86ff88ef6c490628285f482af15ddcb29541f94bcf526a3f6c7") != + std::string::npos); } - SECTION("Post Configuration - Not Enabled") { + SECTION("Post Configuration - Not Enabled") + { setPostConfigEndpointEnabled(false); - auto res = cli.Post("/config", "", "application/json"); + auto [result, res] = cli.postJson("/config", ""); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 403); + REQUIRE(res->statusCode() == drogon::k403Forbidden); } - SECTION("Post Configuration - Invalid JSON Format") { + SECTION("Post Configuration - Invalid JSON Format") + { setPostConfigEndpointEnabled(true); - std::string invalidJson = "this is not valid json"; - auto res = cli.Post("/config", invalidJson, "application/json"); + auto [result, res] = cli.postJson("/config", "this is not valid json"); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 400); - REQUIRE(res->body.find("Invalid JSON format") != std::string::npos); + REQUIRE(res->statusCode() == drogon::k400BadRequest); + REQUIRE(std::string(res->body()).find("Invalid JSON format") != std::string::npos); } - SECTION("Post Configuration - Missing Sources") { - std::string newConfig = R"({"http-settings": []})"; - auto res = cli.Post("/config", newConfig, "application/json"); + SECTION("Post Configuration - Missing Sources") + { + setPostConfigEndpointEnabled(true); + auto [result, res] = cli.postJson("/config", R"({"http-settings": []})"); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 500); - REQUIRE(res->body.starts_with("Validation failed")); + REQUIRE(res->statusCode() == drogon::k500InternalServerError); + REQUIRE(std::string(res->body()).starts_with("Validation failed")); } - SECTION("Post Configuration - Missing Http Settings") { - std::string newConfig = R"({"sources": []})"; - auto res = cli.Post("/config", newConfig, "application/json"); + SECTION("Post Configuration - Missing Http Settings") + { + setPostConfigEndpointEnabled(true); + auto [result, res] = cli.postJson("/config", R"({"sources": []})"); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 500); - REQUIRE(res->body.starts_with("Validation failed")); + REQUIRE(res->statusCode() == drogon::k500InternalServerError); + REQUIRE(std::string(res->body()).starts_with("Validation failed")); } - SECTION("Post Configuration - Valid JSON Config") { + SECTION("Post Configuration - Valid JSON Config") + { + setPostConfigEndpointEnabled(true); std::string newConfig = R"({ "sources": [{"type": "TestDataSource"}], "http-settings": [{"scope": "https://example.com", "password": "MASKED:0:f52fbd32b2b3b86ff88ef6c490628285f482af15ddcb29541f94bcf526a3f6c7"}] })"; - log().set_level(spdlog::level::trace); - auto res = cli.Post("/config", newConfig, "application/json"); + + auto [result, res] = cli.postJson("/config", newConfig); + REQUIRE(result == drogon::ReqResult::Ok); REQUIRE(res != nullptr); - REQUIRE(res->status == 200); - REQUIRE(res->body == "Configuration updated and applied successfully."); + REQUIRE(res->statusCode() == drogon::k200OK); + REQUIRE(std::string(res->body()) == "Configuration updated and applied successfully."); - // Check that the password SHA was re-substituted. std::ifstream config(*mapget::DataSourceConfigService::get().getConfigFilePath()); std::stringstream configContentStream; configContentStream << config.rdbuf(); @@ -475,9 +912,6 @@ TEST_CASE("Configuration Endpoint Tests", "[Configuration]") REQUIRE(configContent.find("hunter2") != std::string::npos); } - service.stop(); - REQUIRE(service.isRunning() == false); - - // Clean up the test configuration files. + DataSourceConfigService::get().end(); fs::remove(tempConfigPath); } diff --git a/test/unit/test-http-service-fixture.h b/test/unit/test-http-service-fixture.h new file mode 100644 index 00000000..bbb9b0c3 --- /dev/null +++ b/test/unit/test-http-service-fixture.h @@ -0,0 +1,17 @@ +#pragma once + +#include "mapget/http-service/http-service.h" + +namespace mapget::test +{ + +// Starts the HTTP service lazily (on first use) and keeps it alive for the +// lifetime of the test process. `shutdownHttpService()` stops the server and +// joins its server thread to avoid Drogon shutdown issues. +HttpService& httpService(); + +// Safe to call even if the service was never started. +void shutdownHttpService(); + +} // namespace mapget::test + diff --git a/test/unit/test-info.cpp b/test/unit/test-info.cpp index ca15a9e3..e0cac24a 100644 --- a/test/unit/test-info.cpp +++ b/test/unit/test-info.cpp @@ -19,6 +19,9 @@ TEST_CASE("InfoToJson", "[DataSourceInfo]") std::vector(), std::vector{0, 1, 2}, std::vector{{1, 2, {}}, {3, 3, {}}}, + 1, + std::vector{"Complete"}, + 0, true, false, Version{1, 0, 0}}); diff --git a/test/unit/test-main.cpp b/test/unit/test-main.cpp new file mode 100644 index 00000000..eb67e32f --- /dev/null +++ b/test/unit/test-main.cpp @@ -0,0 +1,50 @@ +#define CATCH_CONFIG_RUNNER + +#include + +#include + +#include "mapget/log.h" +#include "test-http-service-fixture.h" + +namespace mapget::test +{ +namespace +{ + +std::mutex serviceMutex; +HttpService* servicePtr = nullptr; + +} // namespace + +HttpService& httpService() +{ + std::lock_guard lock(serviceMutex); + + if (!servicePtr) { + // Intentionally leaked to avoid destructor ordering issues at process shutdown. + servicePtr = new HttpService(); + servicePtr->go("127.0.0.1", 0, 5000); + } + + return *servicePtr; +} + +void shutdownHttpService() +{ + std::lock_guard lock(serviceMutex); + if (!servicePtr) + return; + + servicePtr->stop(); +} + +} // namespace mapget::test + +int main(int argc, char* argv[]) +{ + auto result = Catch::Session().run(argc, argv); + mapget::test::shutdownHttpService(); + return result; +} + diff --git a/test/unit/test-model-geometry.cpp b/test/unit/test-model-geometry.cpp index 82a2e940..8c7176bc 100644 --- a/test/unit/test-model-geometry.cpp +++ b/test/unit/test-model-geometry.cpp @@ -168,7 +168,7 @@ TEST_CASE("GeometryCollection", "[geom.collection]") SECTION("Construct GeometryCollection") { REQUIRE(asModelNode(geometry_collection).type() == ValueType::Object); - REQUIRE(asModelNode(geometry_collection).size() == 3); // 'type' and 'geometries' fields + REQUIRE(asModelNode(geometry_collection).size() == 2); // 'type' and 'geometries' fields } SECTION("Recover geometry") @@ -187,7 +187,7 @@ TEST_CASE("GeometryCollection", "[geom.collection]") // Since the collection only contains one geometry, // it hides itself and directly presents the nested geometry, // conforming to GeoJSON (a collection must have >1 geometries). - REQUIRE(asModelNode(geometry_collection).size() == 3); // 'type' and 'geometry' fields + REQUIRE(asModelNode(geometry_collection).size() == 2); // 'type' and 'geometry' fields REQUIRE(asModelNode(geometry_collection).at(1)->type() == ValueType::Array); // 'geometry' field REQUIRE(asModelNode(geometry_collection).at(1)->size() == 4); // four points @@ -238,6 +238,35 @@ TEST_CASE("Spatial Operators", "[spatial.ops]") { } } +TEST_CASE("GeoJSON geometry names are derived from non-default stages", "[geometry][geojson]") +{ + auto tile = makeTile(); + tile->layerInfo()->stages_ = 3; + tile->layerInfo()->stageLabels_ = {"Low-Fi", "High-Fi", "ADAS"}; + tile->layerInfo()->highFidelityStage_ = 1; + + auto feature = tile->newFeature("Way", {{"wayId", 77}}); + + tile->setStage(1U); + auto baseGeometry = feature->geom()->newGeometry(GeomType::Line, 2); + baseGeometry->append({0., 0., 0.}); + baseGeometry->append({1., 0., 0.}); + + tile->setStage(2U); + auto adasGeometry = feature->geom()->newGeometry(GeomType::Line, 2); + adasGeometry->append({1., 0., 0.}); + adasGeometry->append({2., 0., 0.}); + + tile->setStage(std::nullopt); + + auto json = feature->toJson(); + REQUIRE_FALSE(json.contains("lod")); + auto const& geometries = json.at("geometry").at("geometries"); + REQUIRE(geometries.size() == 2); + REQUIRE_FALSE(geometries[0].contains("geometryName")); + REQUIRE(geometries[1].at("geometryName") == "ADAS"); +} + TEST_CASE("GeometryCollection Multiple Geometries", "[geom.collection.multiple]") { auto model_pool = makeTile(); @@ -331,6 +360,45 @@ TEST_CASE("GeometryCollection Multiple Geometries", "[geom.collection.multiple]" } } +TEST_CASE("Feature Geometry Direct Storage Upgrade", "[geom.collection][feature]") +{ + auto modelPool = makeTile(); + auto feature = modelPool->newFeature("Way", {{"wayId", 42}}); + + feature->addPoint({1.0, 2.0, 3.0}); + + auto single = feature->geomOrNull(); + REQUIRE(single); + REQUIRE(single->addr().column() == TileFeatureLayer::ColumnId::PointGeometries); + REQUIRE(single->numGeometries() == 1); + + auto singleAsGeometry = modelPool->resolve(single->addr()); + REQUIRE(singleAsGeometry); + REQUIRE(singleAsGeometry->geomType() == GeomType::Points); + REQUIRE(singleAsGeometry->numPoints() == 1); + REQUIRE(singleAsGeometry->pointAt(0) == Point{1.0, 2.0, 3.0}); + + feature->addLine({{10.0, 20.0, 0.0}, {11.0, 21.0, 0.0}}); + + auto upgraded = feature->geomOrNull(); + REQUIRE(upgraded); + REQUIRE(upgraded->addr().column() == TileFeatureLayer::ColumnId::GeometryCollections); + REQUIRE(upgraded->numGeometries() == 2); + + auto upgradedGeoms = asModelNode(upgraded).get(StringPool::GeometriesStr); + REQUIRE(upgradedGeoms); + REQUIRE(upgradedGeoms->size() == 2); + auto upgradedFirst = modelPool->resolve(*upgradedGeoms->at(0)); + auto upgradedSecond = modelPool->resolve(*upgradedGeoms->at(1)); + REQUIRE(upgradedFirst->geomType() == GeomType::Points); + REQUIRE(upgradedFirst->numPoints() == 1); + REQUIRE(upgradedFirst->pointAt(0) == Point{1.0, 2.0, 3.0}); + REQUIRE(upgradedSecond->geomType() == GeomType::Line); + REQUIRE(upgradedSecond->numPoints() == 2); + REQUIRE(upgradedSecond->pointAt(0) == Point{10.0, 20.0, 0.0}); + REQUIRE(upgradedSecond->pointAt(1) == Point{11.0, 21.0, 0.0}); +} + TEST_CASE("Attribute Validity", "[validity]") { auto modelPool = makeTile(); @@ -343,12 +411,11 @@ TEST_CASE("Attribute Validity", "[validity]") { linestringGeom->append({.5, .5}); linestringGeom->append({1., 1.}); - // Create and add LineString geometry with name. + // Create and add second LineString geometry. auto linestringGeomNamed = geometryCollection->newGeometry(GeomType::Line); linestringGeomNamed->append({-0., -0.}); linestringGeomNamed->append({-.5, -.5}); linestringGeomNamed->append({-1., -1.}); - linestringGeomNamed->setName("BestGeometry"); // Create a validity collection. auto metresAtFortyPercent = Point({-0., -0.}).geographicDistanceTo(Point({-1., -1.})) * 0.4; @@ -366,20 +433,8 @@ TEST_CASE("Attribute Validity", "[validity]") { validities ->newRange(Validity::MetricLengthOffset, metresAtFortyPercent, metresAtEightyPercent); validities->newGeometry(linestringGeomNamed); - validities->newPoint({-.2, -.25}, "BestGeometry"); - validities->newRange({-.2, -.25}, {-.75, -.7}, "BestGeometry"); - validities->newPoint(Validity::BufferOffset, 0, "BestGeometry"); - validities->newPoint(Validity::RelativeLengthOffset, .4, "BestGeometry"); - validities->newPoint(Validity::MetricLengthOffset, metresAtFortyPercent, "BestGeometry"); - validities->newRange(Validity::BufferOffset, 0, 1, "BestGeometry"); - validities->newRange(Validity::RelativeLengthOffset, .4, .8, "BestGeometry"); - validities->newRange( - Validity::MetricLengthOffset, - metresAtFortyPercent, - metresAtEightyPercent, - "BestGeometry"); auto json = validities->toJson(); - REQUIRE(json.size() == 19); + REQUIRE(json.size() == 11); // Fill out the expectedGeometry vector. std::vector> expectedGeometry = { @@ -405,22 +460,6 @@ TEST_CASE("Attribute Validity", "[validity]") { {{0.39999238400870357,0.39999238400870357,0.0}, {0.5,0.5,0.0}, {0.7999961908806066,0.7999961908806066,0.0}}, // linestringGeomNamed 💚 {{-0.0,-0.0,0.0}, {-0.5,-0.5,0.0}, {-1.0,-1.0,0.0}}, - // {-.2, -.25}, "BestGeometry" 💚 - {{-0.225,-0.225,0.0}}, - // {-.2, -.25}, {-.75, -.7}, "BestGeometry" 💚 - {{-0.225,-0.225,0.0}, {-0.5,-0.5,0.0}, {-0.725,-0.725,0.0}}, - // Validity::BufferOffset, 0, "BestGeometry" 💚 - {{-0.0,-0.0,0.0}}, - // Validity::RelativeLengthOffset, .4, "BestGeometry" 💚 - {{-0.39999238466117465,-0.39999238466117465,0.0}}, - // Validity::MetricLengthOffset, metresAtFortyPercent, "BestGeometry" 💚 - {{-0.39999238400870357,-0.39999238400870357,0.0}}, - // Validity::BufferOffset, 0, 1, "BestGeometry" 💚 - {{-0.0,-0.0,0.0}, {-0.5,-0.5,0.0}}, - // Validity::RelativeLengthOffset, .4, .8, "BestGeometry" 💚 - {{-0.39999238466117465,-0.39999238466117465,0.0}, {-0.5,-0.5,0.0}, {-0.7999961921855985,-0.7999961921855985,0.0}}, - // Validity::MetricLengthOffset, metresAtFortyPercent, metresAtEightyPercent, "BestGeometry" 💚 - {{-0.39999238400870357,-0.39999238400870357,0.0}, {-0.5,-0.5,0.0}, {-0.7999961908806066,-0.7999961908806066,0.0}}, }; // Compare expected validity geometries against computed ones. @@ -445,3 +484,129 @@ TEST_CASE("Attribute Validity", "[validity]") { return true; }); } + +TEST_CASE("Simple Validity Self Upgrade", "[validity]") { + auto modelPool = makeTile(); + auto validities = modelPool->newValidityCollection(); + + auto simple = validities->newDirection(Validity::Direction::Positive); + REQUIRE(simple->addr().column() == TileFeatureLayer::ColumnId::SimpleValidity); + REQUIRE(simple->geometryDescriptionType() == Validity::NoGeometry); + + // Any geometry/feature setter must materialize the simple validity. + simple->setOffsetPoint(Validity::BufferOffset, 1.0); + + auto firstNode = validities->at(0); + REQUIRE(firstNode); + auto upgraded = modelPool->resolve(*firstNode); + REQUIRE(upgraded->addr().column() == TileFeatureLayer::ColumnId::Validities); + REQUIRE(upgraded->direction() == Validity::Direction::Positive); + REQUIRE(upgraded->geometryDescriptionType() == Validity::OffsetPointValidity); + REQUIRE(upgraded->geometryOffsetType() == Validity::BufferOffset); + REQUIRE(upgraded->offsetPoint().has_value()); + REQUIRE(upgraded->offsetPoint()->x == 1.0); +} + +TEST_CASE("Semantic feature transition validities compute transition geometry", "[validity]") { + auto modelPool = makeTile(); + + auto fromFeature = modelPool->newFeature("Way", {{"wayId", int64_t(1)}}); + auto fromGeometry = fromFeature->geom()->newGeometry(GeomType::Line, 2); + fromGeometry->append({0.0, 0.0, 0.0}); + fromGeometry->append({1.0, 0.0, 0.0}); + + auto toFeature = modelPool->newFeature("Way", {{"wayId", int64_t(2)}}); + auto toGeometry = toFeature->geom()->newGeometry(GeomType::Line, 2); + toGeometry->append({1.0, 0.0, 0.0}); + toGeometry->append({2.0, 0.0, 0.0}); + + auto intersection = modelPool->newFeature("Way", {{"wayId", int64_t(3)}}); + auto validity = intersection->attributeLayers() + ->newLayer("rules") + ->newAttribute("turn") + ->validity() + ->newFeatureTransition( + fromFeature, + Validity::End, + toFeature, + Validity::Start, + 7); + + auto geometry = validity->computeGeometry(intersection->geomOrNull()); + REQUIRE(geometry.geomType_ == GeomType::Line); + REQUIRE(geometry.points_.size() == 3); + REQUIRE(geometry.points_[0] == Point{0.0, 0.0, 0.0}); + REQUIRE(geometry.points_[1] == Point{1.0, 0.0, 0.0}); + REQUIRE(geometry.points_[2] == Point{2.0, 0.0, 0.0}); +} + +TEST_CASE("Semantic feature transition validities skip duplicate endpoint points", "[validity]") { + auto modelPool = makeTile(); + + auto fromFeature = modelPool->newFeature("Way", {{"wayId", int64_t(1)}}); + auto fromGeometry = fromFeature->geom()->newGeometry(GeomType::Line, 3); + fromGeometry->append({0.0, 0.0, 0.0}); + fromGeometry->append({1.0, 0.0, 0.0}); + fromGeometry->append({1.0, 0.0, 0.0}); + + auto toFeature = modelPool->newFeature("Way", {{"wayId", int64_t(2)}}); + auto toGeometry = toFeature->geom()->newGeometry(GeomType::Line, 3); + toGeometry->append({1.0, 0.0, 0.0}); + toGeometry->append({1.0, 0.0, 0.0}); + toGeometry->append({2.0, 0.0, 0.0}); + + auto intersection = modelPool->newFeature("Way", {{"wayId", int64_t(3)}}); + auto validity = intersection->attributeLayers() + ->newLayer("rules") + ->newAttribute("turn") + ->validity() + ->newFeatureTransition( + fromFeature, + Validity::End, + toFeature, + Validity::Start, + 7); + + auto geometry = validity->computeGeometry(intersection->geomOrNull()); + REQUIRE(geometry.geomType_ == GeomType::Line); + REQUIRE(geometry.points_.size() == 3); + REQUIRE(geometry.points_[0] == Point{0.0, 0.0, 0.0}); + REQUIRE(geometry.points_[1] == Point{1.0, 0.0, 0.0}); + REQUIRE(geometry.points_[2] == Point{2.0, 0.0, 0.0}); +} + +TEST_CASE("Semantic feature transition validities prefer host geometry as midpoint", "[validity]") { + auto modelPool = makeTile(); + + auto fromFeature = modelPool->newFeature("Way", {{"wayId", int64_t(1)}}); + auto fromGeometry = fromFeature->geom()->newGeometry(GeomType::Line, 2); + fromGeometry->append({0.0, 0.0, 0.0}); + fromGeometry->append({1.0, 0.0, 0.0}); + + auto toFeature = modelPool->newFeature("Way", {{"wayId", int64_t(2)}}); + auto toGeometry = toFeature->geom()->newGeometry(GeomType::Line, 2); + toGeometry->append({1.1, 0.0, 0.0}); + toGeometry->append({2.0, 0.0, 0.0}); + + auto intersection = modelPool->newFeature("Way", {{"wayId", int64_t(3)}}); + auto intersectionGeometry = intersection->geom()->newGeometry(GeomType::Points, 1); + intersectionGeometry->append({1.0, 0.5, 0.0}); + + auto validity = intersection->attributeLayers() + ->newLayer("rules") + ->newAttribute("turn") + ->validity() + ->newFeatureTransition( + fromFeature, + Validity::End, + toFeature, + Validity::Start, + 7); + + auto geometry = validity->computeGeometry(intersection->geomOrNull()); + REQUIRE(geometry.geomType_ == GeomType::Line); + REQUIRE(geometry.points_.size() == 3); + REQUIRE(geometry.points_[0] == Point{0.0, 0.0, 0.0}); + REQUIRE(geometry.points_[1] == Point{1.0, 0.5, 0.0}); + REQUIRE(geometry.points_[2] == Point{2.0, 0.0, 0.0}); +} diff --git a/test/unit/test-model.cpp b/test/unit/test-model.cpp index 5f86d734..a2b8e726 100644 --- a/test/unit/test-model.cpp +++ b/test/unit/test-model.cpp @@ -1,6 +1,7 @@ #include #include "mapget/model/featurelayer.h" +#include "mapget/model/sourcedatareference.h" #include "mapget/model/stream.h" #include "nlohmann/json.hpp" #include "mapget/log.h" @@ -147,17 +148,17 @@ TEST_CASE("FeatureLayer", "[test.featurelayer]") { constexpr auto expected = R"({"areaId":"TheBestArea","geometry":{"geometries":[)" - R"({"coordinates":[[41.0,10.0,0.0],[43.0,11.0,0.0]],"name":"","type":"LineString"},)" - R"({"coordinates":[[41.5,10.5,0.0]],"name":"","type":"MultiPoint"},)" - R"({"coordinates":[[41.5,10.5,0.0],[41.600000001490116,10.700000002980232,0.0]],"name":"","type":"MultiPoint"},)" - R"({"coordinates":[[41.5,10.5,0.0],[41.600000001490116,10.700000002980232,0.0]],"name":"","type":"LineString"},)" - R"({"coordinates":[[[[41.5,10.5,0.0],[41.5,10.299999997019768,0.0],[41.600000001490116,10.700000002980232,0.0],[41.5,10.5,0.0]]]],"name":"","type":"MultiPolygon"},)" - R"({"coordinates":[[[41.5,10.5,0.0],[41.600000001490116,10.700000002980232,0.0],[41.5,10.299999997019768,0.0],[41.80000001192093,10.900000005960464,0.0],[41.5,10.5,0.0]]],"name":"","type":"Polygon"},)" - R"({"coordinates":[[[0,1,0],[0,0,0],[1,0,0],[1,1,0],[0,1,0]]],"name":"","type":"Polygon"},)" // Unclosed, CW - R"({"coordinates":[[[1,0,0],[2,0,0],[2,1,0],[1,1,0],[1,0,0]]],"name":"","type":"Polygon"},)" // Closed, CCW - R"({"coordinates":[[[2,1,0],[3,1,1],[3,0,2],[2,0,3],[2,1,0]]],"name":"","type":"Polygon"},)" // Closed, CW, Z!=0 - R"({"coordinates":[[[[3,0,0],[4,0,0],[4,1,0],[3,0,0]]],[[[4,1,0],[3,0,0],[3,1,0],[4,1,0]]]],"name":"","type":"MultiPolygon"})" // Mesh - R"(],"type":"GeometryCollection"},"id":"Way.TheBestArea.42","properties":{"layer":{"cheese":{"mozzarella":{"smell":"neutral","validity":[{"direction":"POSITIVE"}]}}},"main_ingredient":"Pepper"},"type":"Feature","typeId":"Way","wayId":42,)" + R"({"coordinates":[[41.0,10.0,0.0],[43.0,11.0,0.0]],"type":"LineString"},)" + R"({"coordinates":[[41.5,10.5,0.0]],"type":"MultiPoint"},)" + R"({"coordinates":[[41.5,10.5,0.0],[41.599999994039536,10.699999988079071,0.0]],"type":"MultiPoint"},)" + R"({"coordinates":[[41.5,10.5,0.0],[41.599999994039536,10.699999988079071,0.0]],"type":"LineString"},)" + R"({"coordinates":[[[[41.5,10.5,0.0],[41.5,10.300000011920929,0.0],[41.599999994039536,10.699999988079071,0.0],[41.5,10.5,0.0]]]],"type":"MultiPolygon"},)" + R"({"coordinates":[[[41.5,10.5,0.0],[41.599999994039536,10.699999988079071,0.0],[41.5,10.300000011920929,0.0],[41.79999999701977,10.899999998509884,0.0],[41.5,10.5,0.0]]],"type":"Polygon"},)" + R"({"coordinates":[[[0,1,0],[0,0,0],[1,0,0],[1,1,0],[0,1,0]]],"type":"Polygon"},)" // Unclosed, CW + R"({"coordinates":[[[1,0,0],[2,0,0],[2,1,0],[1,1,0],[1,0,0]]],"type":"Polygon"},)" // Closed, CCW + R"({"coordinates":[[[2,1,0],[3,1,1],[3,0,2],[2,0,3],[2,1,0]]],"type":"Polygon"},)" // Closed, CW, Z!=0 + R"({"coordinates":[[[[3,0,0],[4,0,0],[4,1,0],[3,0,0]]],[[[4,1,0],[3,0,0],[3,1,0],[4,1,0]]]],"type":"MultiPolygon"})" // Mesh + R"(],"type":"GeometryCollection"},"id":"Way.TheBestArea.42","properties":{"layer":{"cheese":{"mozzarella":{"smell":"neutral","validity":{"direction":"POSITIVE"}}}},"main_ingredient":"Pepper"},"type":"Feature","typeId":"Way","wayId":42,)" R"("layerId":"WayLayer","mapId":"Tropico"})"; auto res = feature1->toJson(); @@ -173,6 +174,19 @@ TEST_CASE("FeatureLayer", "[test.featurelayer]") REQUIRE(feature1->id()->toString() == "Way.TheBestArea.42"); } + SECTION("Secondary feature ID compositions keep all labeled parts") + { + auto const keyValuePairs = featureForId1->keyValuePairs(); + REQUIRE(featureForId1->toString() == "Way.42.84.0123456789abcdef"); + REQUIRE(keyValuePairs.size() == 3); + REQUIRE(keyValuePairs[0].first == "wayIdU32"); + REQUIRE(std::get(keyValuePairs[0].second) == 42); + REQUIRE(keyValuePairs[1].first == "wayIdU64"); + REQUIRE(std::get(keyValuePairs[1].second) == 84); + REQUIRE(keyValuePairs[2].first == "wayIdUUID128"); + REQUIRE(std::get(keyValuePairs[2].second) == "0123456789abcdef"); + } + SECTION("Evaluate simfil filter") { REQUIRE(feature1->evaluate("**.mozzarella.smell").value().toString() == "neutral"); @@ -217,9 +231,11 @@ TEST_CASE("FeatureLayer", "[test.featurelayer]") { std::stringstream tileBytes; tile->write(tileBytes); + auto serializedTile = tileBytes.str(); + std::vector tileBuffer(serializedTile.begin(), serializedTile.end()); auto deserializedTile = std::make_shared( - tileBytes, + tileBuffer, [&](auto&& mapName, auto&& layerName){ REQUIRE(mapName == "Tropico"); REQUIRE(layerName == "WayLayer"); @@ -246,6 +262,18 @@ TEST_CASE("FeatureLayer", "[test.featurelayer]") for (auto feature : *deserializedTile) { REQUIRE(feature->id()->toString().substr(0, 16) == "Way.TheBestArea."); } + + auto deserializedFeatureId = deserializedTile->resolve( + simfil::ModelNodeAddress{TileFeatureLayer::ColumnId::ExternalFeatureIds, 0}); + REQUIRE(deserializedFeatureId); + auto const deserializedKeyValuePairs = deserializedFeatureId->keyValuePairs(); + REQUIRE(deserializedKeyValuePairs.size() == 3); + REQUIRE(deserializedKeyValuePairs[0].first == "wayIdU32"); + REQUIRE(std::get(deserializedKeyValuePairs[0].second) == 42); + REQUIRE(deserializedKeyValuePairs[1].first == "wayIdU64"); + REQUIRE(std::get(deserializedKeyValuePairs[1].second) == 84); + REQUIRE(deserializedKeyValuePairs[2].first == "wayIdUUID128"); + REQUIRE(std::get(deserializedKeyValuePairs[2].second) == "0123456789abcdef"); } SECTION("Stream") @@ -389,9 +417,11 @@ TEST_CASE("FeatureLayer", "[test.featurelayer]") std::stringstream tileBytes; tile->write(tileBytes); + auto serializedTile = tileBytes.str(); + std::vector tileBuffer(serializedTile.begin(), serializedTile.end()); auto deserializedTile = std::make_shared( - tileBytes, + tileBuffer, [&](auto&& mapName, auto&& layerName){ return layerInfo; }, @@ -407,6 +437,570 @@ TEST_CASE("FeatureLayer", "[test.featurelayer]") REQUIRE(deserializedTile->ttl() == tile->ttl()); REQUIRE(deserializedTile->ttl().value().count() == 60000); } + + SECTION("Serialization with stage") + { + tile->setStage(3U); + + std::stringstream tileBytes; + tile->write(tileBytes); + auto serializedTile = tileBytes.str(); + std::vector tileBuffer(serializedTile.begin(), serializedTile.end()); + + auto deserializedTile = std::make_shared( + tileBuffer, + [&](auto&&, auto&&) { + return layerInfo; + }, + [&](auto&&) { + return strings; + } + ); + + REQUIRE(deserializedTile->stage().has_value()); + REQUIRE(deserializedTile->stage().value() == 3U); + } + + SECTION("Serialization without stage") + { + tile->setStage({}); + + std::stringstream tileBytes; + tile->write(tileBytes); + auto serializedTile = tileBytes.str(); + std::vector tileBuffer(serializedTile.begin(), serializedTile.end()); + + auto deserializedTile = std::make_shared( + tileBuffer, + [&](auto&&, auto&&) { + return layerInfo; + }, + [&](auto&&) { + return strings; + } + ); + + REQUIRE_FALSE(deserializedTile->stage().has_value()); + } +} + +TEST_CASE("FeatureLayer stores geometry source-data refs compactly for singleton geometries", + "[test.featurelayer][test.featurelayer.sourcedatarefs]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [ + [ + { + "partId": "wayId", + "description": "Globally unique 32b integer.", + "datatype": "U32" + } + ] + ] + } + ] + })"_json); + + auto strings = std::make_shared("SourceDataRefNode"); + auto tile = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "SourceDataRefNode", + "SourceDataRefMap", + layerInfo, + strings); + + auto feature = tile->newFeature("Way", {{"wayId", 42}}); + auto singletonPoint = feature->geom()->newGeometry(GeomType::Points, 1, true); + singletonPoint->append({42., 11., 0.}); + auto line = feature->geom()->newGeometry(GeomType::Line, 2); + line->append({42., 11., 0.}); + line->append({42.1, 11.1, 0.}); + + QualifiedSourceDataReference singletonPointRef{ + .address_ = SourceDataAddress::fromBitPosition(8, 16), + .layerId_ = strings->emplace("DisplayLayer").value(), + .qualifier_ = strings->emplace("Position2D").value(), + }; + QualifiedSourceDataReference lineRef{ + .address_ = SourceDataAddress::fromBitPosition(24, 32), + .layerId_ = strings->emplace("DisplayLayer").value(), + .qualifier_ = strings->emplace("Line2D").value(), + }; + + singletonPoint->setSourceDataReferences( + tile->newSourceDataReferenceCollection({&singletonPointRef, 1})); + line->setSourceDataReferences( + tile->newSourceDataReferenceCollection({&lineRef, 1})); + + SECTION("refs are accessible before serialization") + { + REQUIRE(singletonPoint->sourceDataReferences()); + REQUIRE(singletonPoint->sourceDataReferences()->size() == 1); + std::string singletonPointQualifier; + singletonPoint->sourceDataReferences()->forEachReference([&](auto const& ref) { + singletonPointQualifier = std::string(ref.qualifier()); + }); + REQUIRE(singletonPointQualifier == "Position2D"); + + REQUIRE(line->sourceDataReferences()); + REQUIRE(line->sourceDataReferences()->size() == 1); + std::string lineQualifier; + line->sourceDataReferences()->forEachReference([&](auto const& ref) { + lineQualifier = std::string(ref.qualifier()); + }); + REQUIRE(lineQualifier == "Line2D"); + + auto sizeStats = tile->serializationSizeStats(); + REQUIRE(sizeStats["feature-layer"]["geometry-source-data-references"].get() < 1024); + } + + SECTION("refs survive serialization roundtrip") + { + std::stringstream tileBytes; + REQUIRE(tile->write(tileBytes).has_value()); + auto serializedTile = tileBytes.str(); + std::vector tileBuffer(serializedTile.begin(), serializedTile.end()); + + auto deserializedTile = std::make_shared( + tileBuffer, + [&](auto&&, auto&&) { + return layerInfo; + }, + [&](auto&&) { + return strings; + }); + + auto deserializedFeature = deserializedTile->at(0); + REQUIRE(deserializedFeature); + auto deserializedGeometries = deserializedFeature->geomOrNull(); + REQUIRE(deserializedGeometries); + REQUIRE(deserializedGeometries->numGeometries() == 2); + + std::vector> deserializedGeometryList; + deserializedGeometries->forEachGeometry([&](auto const& geometry) { + deserializedGeometryList.push_back(geometry); + return true; + }); + REQUIRE(deserializedGeometryList.size() == 2); + + auto const& deserializedPoint = deserializedGeometryList[0]; + auto const& deserializedLine = deserializedGeometryList[1]; + REQUIRE(deserializedPoint->sourceDataReferences()); + REQUIRE(deserializedLine->sourceDataReferences()); + + std::string deserializedPointQualifier; + deserializedPoint->sourceDataReferences()->forEachReference([&](auto const& ref) { + deserializedPointQualifier = std::string(ref.qualifier()); + }); + REQUIRE(deserializedPointQualifier == "Position2D"); + + std::string deserializedLineQualifier; + deserializedLine->sourceDataReferences()->forEachReference([&](auto const& ref) { + deserializedLineQualifier = std::string(ref.qualifier()); + }); + REQUIRE(deserializedLineQualifier == "Line2D"); + + auto sizeStats = deserializedTile->serializationSizeStats(); + REQUIRE(sizeStats["feature-layer"]["geometry-source-data-references"].get() < 1024); + } +} + +TEST_CASE("Feature LOD Field", "[test.featurelayer][test.feature.lod]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [[ + {"partId": "areaId", "datatype": "STR"}, + {"partId": "wayId", "datatype": "U32"} + ]] + } + ] + })"_json); + + auto strings = std::make_shared("FeatureLodNode"); + auto tile = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "FeatureLodNode", + "Tropico", + layerInfo, + strings); + tile->setIdPrefix({{"areaId", "A"}}); + + auto feature = tile->newFeature("Way", {{"wayId", int64_t(42)}}); + auto lodValueResult = feature->evaluate("lod"); + REQUIRE(lodValueResult.has_value()); + auto lodValue = lodValueResult.value().as(); + REQUIRE(lodValue >= 0); + REQUIRE(lodValue <= 7); + REQUIRE(static_cast(feature->lod()) == static_cast(lodValue)); +} + +TEST_CASE("Feature IDs infill optional primary parts", "[test.featurelayer][test.feature.id.optionals]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [[ + {"partId": "areaId", "datatype": "STR"}, + {"partId": "sideId", "datatype": "U32", "isOptional": true}, + {"partId": "wayId", "datatype": "U32"} + ]] + } + ] + })"_json); + + auto strings = std::make_shared("FeatureOptionalIdNode"); + auto tile = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "FeatureOptionalIdNode", + "Tropico", + layerInfo, + strings); + tile->setIdPrefix({{"areaId", "A"}}); + + auto withoutOptional = tile->newFeature("Way", {{"wayId", int64_t(42)}}); + auto withOptional = tile->newFeature("Way", {{"sideId", int64_t(7)}, {"wayId", int64_t(43)}}); + + auto const withoutOptionalPairs = withoutOptional->id()->keyValuePairs(); + REQUIRE(withoutOptional->id()->toString() == "Way.A.42"); + REQUIRE(withoutOptionalPairs.size() == 2); + REQUIRE(withoutOptionalPairs[0].first == "areaId"); + REQUIRE(std::get(withoutOptionalPairs[0].second) == "A"); + REQUIRE(withoutOptionalPairs[1].first == "wayId"); + REQUIRE(std::get(withoutOptionalPairs[1].second) == 42); + + auto const withOptionalPairs = withOptional->id()->keyValuePairs(); + REQUIRE(withOptional->id()->toString() == "Way.A.7.43"); + REQUIRE(withOptionalPairs.size() == 3); + REQUIRE(withOptionalPairs[0].first == "areaId"); + REQUIRE(std::get(withOptionalPairs[0].second) == "A"); + REQUIRE(withOptionalPairs[1].first == "sideId"); + REQUIRE(std::get(withOptionalPairs[1].second) == 7); + REQUIRE(withOptionalPairs[2].first == "wayId"); + REQUIRE(std::get(withOptionalPairs[2].second) == 43); +} + +TEST_CASE("Single-entry validity collections are exposed as singular nodes", "[test.featurelayer.validity]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [[ + {"partId": "wayId", "description": "way id", "datatype": "U32"} + ]] + } + ] + })"_json); + + auto strings = std::make_shared("ValidityNode"); + auto tile = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "ValidityNode", + "Tropico", + layerInfo, + strings); + auto feature = tile->newFeature("Way", {{"wayId", 1}}); + + auto attr = feature->attributeLayers()->newLayer("limits")->newAttribute("speed"); + attr->validity()->newDirection(Validity::Direction::Both); + + auto relation = tile->newRelation("connectedTo", tile->newFeatureId("Way", {{"wayId", 2}})); + relation->sourceValidity()->newDirection(Validity::Direction::Positive); + relation->targetValidity()->newDirection(Validity::Direction::Negative); + feature->addRelation(relation); + + auto materializedAttr = tile->resolve(attr->addr()); + REQUIRE(materializedAttr); + auto const& attrNode = static_cast(*materializedAttr); + auto const attrValidityNode = attrNode.get(StringPool::ValidityStr); + REQUIRE(attrValidityNode); + REQUIRE(attrValidityNode->toJson() == nlohmann::json{{"direction", "COMPLETE"}}); + + auto materializedRelation = tile->resolve(relation->addr()); + REQUIRE(materializedRelation); + auto const& relationNode = static_cast(*materializedRelation); + auto const sourceValidityNode = relationNode.get(StringPool::SourceValidityStr); + REQUIRE(sourceValidityNode); + REQUIRE(sourceValidityNode->toJson() == nlohmann::json{{"direction", "POSITIVE"}}); + + auto const targetValidityNode = relationNode.get(StringPool::TargetValidityStr); + REQUIRE(targetValidityNode); + REQUIRE(targetValidityNode->toJson() == nlohmann::json{{"direction", "NEGATIVE"}}); +} + +TEST_CASE("Semantic feature transition validities expose semantic nodes", "[test.featurelayer.validity]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [[ + {"partId": "wayId", "description": "way id", "datatype": "U32"} + ]] + } + ] + })"_json); + + auto strings = std::make_shared("TransitionValidityNode"); + auto tile = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "TransitionValidityNode", + "Tropico", + layerInfo, + strings); + + auto fromFeature = tile->newFeature("Way", {{"wayId", 1}}); + auto fromGeometry = fromFeature->geom()->newGeometry(GeomType::Line, 2); + fromGeometry->append({0., 0., 0.}); + fromGeometry->append({1., 0., 0.}); + + auto toFeature = tile->newFeature("Way", {{"wayId", 2}}); + auto toGeometry = toFeature->geom()->newGeometry(GeomType::Line, 2); + toGeometry->append({1., 0., 0.}); + toGeometry->append({2., 0., 0.}); + + auto intersection = tile->newFeature("Way", {{"wayId", 3}}); + auto attr = intersection->attributeLayers()->newLayer("rules")->newAttribute("turn"); + attr->validity()->newFeatureTransition( + fromFeature, + Validity::End, + toFeature, + Validity::Start, + 7); + + auto materializedAttr = tile->resolve(attr->addr()); + REQUIRE(materializedAttr); + auto const& attrNode = static_cast(*materializedAttr); + auto const attrValidityNode = attrNode.get(StringPool::ValidityStr); + REQUIRE(attrValidityNode); + REQUIRE(attrValidityNode->toJson() == nlohmann::json{ + {"from", "Way.1"}, + {"fromConnectedEnd", "END"}, + {"to", "Way.2"}, + {"toConnectedEnd", "START"}, + {"transitionNumber", 7}, + }); +} + +TEST_CASE("Validity GeoJSON exposes stage labels only beyond the default stage", "[test.featurelayer.validity]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [[ + {"partId": "wayId", "description": "way id", "datatype": "U32"} + ]] + } + ], + "stages": 3, + "stageLabels": ["Low-Fi", "High-Fi", "ADAS"], + "highFidelityStage": 1 + })"_json); + + auto tile = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "StageValidityNode", + "Tropico", + layerInfo, + std::make_shared("StageValidityNode")); + + auto highFiValidity = tile->newValidity(); + highFiValidity->setGeometryStage(1U); + highFiValidity->setDirection(Validity::Positive); + auto resolvedHighFiValidity = tile->resolve(highFiValidity->addr()); + REQUIRE(resolvedHighFiValidity); + REQUIRE(resolvedHighFiValidity->toJson() == nlohmann::json{{"direction", "POSITIVE"}}); + + auto adasValidity = tile->newValidity(); + adasValidity->setGeometryStage(2U); + adasValidity->setDirection(Validity::Positive); + auto resolvedAdasValidity = tile->resolve(adasValidity->addr()); + REQUIRE(resolvedAdasValidity); + REQUIRE(resolvedAdasValidity->toJson() == nlohmann::json{ + {"direction", "POSITIVE"}, + {"geometryName", "ADAS"}, + }); +} + +TEST_CASE("FeatureLayer Overlay Merged Views", "[test.featurelayer.overlay]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [ + [ + { + "partId": "wayId", + "description": "Globally unique 32b integer.", + "datatype": "U32" + } + ] + ] + } + ] + })"_json); + + auto strings = std::make_shared("OverlayNode"); + + auto makeTile = [&](std::string const& nodeName) { + return std::make_shared( + TileId::fromWgs84(42., 11., 13), + nodeName, + "OverlayMap", + layerInfo, + strings); + }; + + auto base = makeTile("OverlayNode"); + auto overlayStage1 = makeTile("OverlayNode"); + auto overlayStage2 = makeTile("OverlayNode"); + + auto baseFeature = base->newFeature("Way", {{"wayId", 1}}); + auto baseGeom = baseFeature->geom()->newGeometry(GeomType::Points, 1); + baseGeom->append({10., 10., 0.}); + REQUIRE(baseFeature->attributes()->addField("plainA", "base").has_value()); + REQUIRE(baseFeature->attributes()->addField("overrideA", "base").has_value()); + auto baseLayer = baseFeature->attributeLayers()->newLayer("baseLayer"); + auto baseAttr = baseLayer->newAttribute("baseAttr"); + REQUIRE(baseAttr->addField("value", "base").has_value()); + baseFeature->addRelation("baseRel", base->newFeatureId("Way", {{"wayId", 100}})); + + auto overlayFeature1 = overlayStage1->newFeature("Way", {{"wayId", 1}}); + auto overlayGeom1 = overlayFeature1->geom()->newGeometry(GeomType::Points, 1); + overlayGeom1->append({20., 20., 0.}); + REQUIRE(overlayFeature1->attributes()->addField("overrideA", "overlay1").has_value()); + auto overlayLayer1 = overlayFeature1->attributeLayers()->newLayer("overlayLayer1"); + auto overlayAttr1 = overlayLayer1->newAttribute("overlayAttr1"); + REQUIRE(overlayAttr1->addField("value", "overlay1").has_value()); + overlayFeature1->addRelation("overlayRel1", overlayStage1->newFeatureId("Way", {{"wayId", 101}})); + + auto overlayFeature2 = overlayStage2->newFeature("Way", {{"wayId", 1}}); + auto overlayGeom2 = overlayFeature2->geom()->newGeometry(GeomType::Points, 1); + overlayGeom2->append({30., 30., 0.}); + REQUIRE(overlayFeature2->attributes()->addField("plainB", "overlay2").has_value()); + REQUIRE(overlayFeature2->attributes()->addField("overrideA", "overlay2").has_value()); + auto overlayLayer2 = overlayFeature2->attributeLayers()->newLayer("overlayLayer2"); + auto overlayAttr2 = overlayLayer2->newAttribute("overlayAttr2"); + REQUIRE(overlayAttr2->addField("value", "overlay2").has_value()); + overlayFeature2->addRelation("overlayRel2", overlayStage2->newFeatureId("Way", {{"wayId", 102}})); + + base->attachOverlay(overlayStage1); + base->attachOverlay(overlayStage2); + + auto mergedFeature = base->at(0); + REQUIRE(mergedFeature); + + SECTION("Typed access sees merged data") + { + REQUIRE(mergedFeature->geomOrNull()->numGeometries() == 3); + REQUIRE(mergedFeature->mergedAttributesOrNull()->size() == 3); + REQUIRE(mergedFeature->evaluate("properties.plainA").value().toString() == "base"); + REQUIRE(mergedFeature->evaluate("properties.plainB").value().toString() == "overlay2"); + REQUIRE(mergedFeature->evaluate("properties.overrideA").value().toString() == "overlay2"); + REQUIRE(mergedFeature->attributeLayersOrNull()->size() == 3); + REQUIRE(mergedFeature->numRelations() == 3); + } + + SECTION("ModelNode access sees merged geometry and relations") + { + auto const& mergedFeatureNode = static_cast(*mergedFeature); + + auto geometryNode = mergedFeatureNode.get(StringPool::GeometryStr); + REQUIRE(geometryNode); + auto geometryArrayNode = geometryNode->get(StringPool::GeometriesStr); + REQUIRE(geometryArrayNode); + REQUIRE(geometryArrayNode->size() == 3); + + auto relationsNode = mergedFeatureNode.get(StringPool::RelationsStr); + REQUIRE(relationsNode); + REQUIRE(relationsNode->size() == 3); + + auto relationsJson = relationsNode->toJson(); + REQUIRE(relationsJson[0]["name"] == "baseRel"); + REQUIRE(relationsJson[1]["name"] == "overlayRel1"); + REQUIRE(relationsJson[2]["name"] == "overlayRel2"); + } + + SECTION("ModelNode access sees merged attribute layers") + { + auto const& mergedFeatureNode = static_cast(*mergedFeature); + + auto propertiesNode = mergedFeatureNode.get(StringPool::PropertiesStr); + REQUIRE(propertiesNode); + auto layersNode = propertiesNode->get(StringPool::LayerStr); + REQUIRE(layersNode); + REQUIRE(layersNode->size() == 3); + + auto layersJson = layersNode->toJson(); + REQUIRE(layersJson.contains("baseLayer")); + REQUIRE(layersJson.contains("overlayLayer1")); + REQUIRE(layersJson.contains("overlayLayer2")); + } +} + +TEST_CASE("FeatureLayer Overlay Size Check", "[test.featurelayer.overlay]") +{ + auto layerInfo = LayerInfo::fromJson(R"({ + "layerId": "WayLayer", + "type": "Features", + "featureTypes": [ + { + "name": "Way", + "uniqueIdCompositions": [ + [ + { + "partId": "wayId", + "description": "Globally unique 32b integer.", + "datatype": "U32" + } + ] + ] + } + ] + })"_json); + + auto strings = std::make_shared("OverlayNode"); + auto base = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "OverlayNode", + "OverlayMap", + layerInfo, + strings); + auto overlay = std::make_shared( + TileId::fromWgs84(42., 11., 13), + "OverlayNode", + "OverlayMap", + layerInfo, + strings); + + base->newFeature("Way", {{"wayId", 1}}); + base->newFeature("Way", {{"wayId", 2}}); + overlay->newFeature("Way", {{"wayId", 1}}); + + REQUIRE_THROWS(base->attachOverlay(overlay)); } // Helper function to compare two points with some tolerance diff --git a/test/unit/test-service-ttl.cpp b/test/unit/test-service-ttl.cpp index 2c04fd8c..8311cd46 100644 --- a/test/unit/test-service-ttl.cpp +++ b/test/unit/test-service-ttl.cpp @@ -84,6 +84,13 @@ class TestTtlDataSource : public DataSource std::optional tileTtlOverride_; }; +class TestLayerTilesRequest : public LayerTilesRequest +{ +public: + using LayerTilesRequest::LayerTilesRequest; + using LayerTilesRequest::toJson; +}; + TEST_CASE("Service TTL behavior", "[Service][TTL]") { auto cache = std::make_shared(1024); @@ -173,3 +180,35 @@ TEST_CASE("Service TTL behavior", "[Service][TTL]") } } +TEST_CASE("LayerTilesRequest preserves staged intent in JSON", "[Service][JSON]") +{ + SECTION("Legacy unstaged requests serialize as tileIds") + { + auto request = std::make_shared( + "Tropico", + "WayLayer", + std::vector{TileId(12345)}); + + REQUIRE(request->toJson() == nlohmann::json{ + {"mapId", "Tropico"}, + {"layerId", "WayLayer"}, + {"tileIds", nlohmann::json::array({12345})}, + }); + } + + SECTION("Single-bucket staged requests serialize as tileIdsByNextStage") + { + auto request = std::make_shared( + "Tropico", + "WayLayer", + std::vector>{{TileId(12345)}}); + + REQUIRE(request->toJson() == nlohmann::json{ + {"mapId", "Tropico"}, + {"layerId", "WayLayer"}, + {"tileIdsByNextStage", nlohmann::json::array({ + nlohmann::json::array({12345}) + })}, + }); + } +}