From 7e2a456f3c96c292e1fef831f87061e51d4f2d5f Mon Sep 17 00:00:00 2001 From: jamjamjon Date: Tue, 17 Mar 2026 15:09:18 +0800 Subject: [PATCH 1/3] chore: upgrade ort to v2.0.0-rc12 with multiversioning support --- .github/workflows/rust-ci.yml | 2 +- Cargo.toml | 14 ++- docs/cargo-features/ep.md | 95 ++++++++++++++++++++ docs/cargo-features/ort.md | 111 +++++------------------- mkdocs.yml | 3 +- src/models/vision/pipeline/basemodel.rs | 2 +- src/models/vlm/sam3_image/impl.rs | 6 +- src/ort/dtype.rs | 2 +- src/ort/engine.rs | 36 ++++---- src/ort/inputs.rs | 2 +- src/ort/x.rs | 12 +-- src/ort/x_any.rs | 2 +- src/ort/xs.rs | 4 +- 13 files changed, 169 insertions(+), 122 deletions(-) create mode 100644 docs/cargo-features/ep.md diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml index 479d462..3154661 100644 --- a/.github/workflows/rust-ci.yml +++ b/.github/workflows/rust-ci.yml @@ -49,7 +49,7 @@ jobs: - name: Clippy run: | if [ "${{ matrix.feature }}" = "all-features" ]; then - cargo clippy --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic" --all-targets -- -D warnings + cargo clippy --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic,ort-api-24" --all-targets -- -D warnings elif [ "${{ matrix.feature }}" = "" ]; then cargo clippy --no-default-features --all-targets -- -D warnings else diff --git a/Cargo.toml b/Cargo.toml index d94691a..01a34d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,7 @@ fast_image_resize = { version = "5.5.0", default-features = false, features = [" minifb = { version = "0.28.0", optional = true } video-rs = { version = "0.10.5", features = ["ndarray"], optional = true } ndarray-npy = { version = "0.10", optional = true } -ort = { version = "=2.0.0-rc.11", default-features = false, features = [ +ort = { version = "=2.0.0-rc.12", default-features = false, features = [ "tls-rustls", "copy-dylibs", "half", @@ -75,12 +75,22 @@ strip = true [features] -default = ["ort-download-binaries", "vision", "annotator"] +default = ["ort-download-binaries", "vision", "annotator", "ort-api-24"] # ONNXRuntime loading strategies ort-download-binaries = ["ort/download-binaries"] ort-load-dynamic = ["ort/load-dynamic"] +# ONNXRuntime API version selection +ort-api-17 = ["ort/api-17"] +ort-api-18 = ["ort/api-18"] +ort-api-19 = ["ort/api-19"] +ort-api-20 = ["ort/api-20"] +ort-api-21 = ["ort/api-21"] +ort-api-22 = ["ort/api-22"] +ort-api-23 = ["ort/api-23"] +ort-api-24 = ["ort/api-24"] + # Cuda features (Internal use) cuda-runtime = ["dep:cudarc"] cuda-runtime-11040 = ["cuda-runtime", "cudarc/cuda-11040"] diff --git a/docs/cargo-features/ep.md b/docs/cargo-features/ep.md new file mode 100644 index 0000000..9992cf5 --- /dev/null +++ b/docs/cargo-features/ep.md @@ -0,0 +1,95 @@ +# Execution Providers + +Hardware acceleration for inference. Enable the one matching your hardware. + +## Execution Providers + +| Feature | Platform | Description | +|---------|----------|-------------| +| `cuda` | NVIDIA GPU | CUDA execution provider | +| `tensorrt` | NVIDIA GPU | TensorRT execution provider | +| `nvrtx` | NVIDIA GPU | NVRTX execution provider | +| `coreml` | Apple Silicon | macOS/iOS inference | +| `openvino` | Intel | CPU/GPU/VPU acceleration | +| `directml` | Windows | DirectML acceleration | +| `rocm` | AMD GPU | ROCm acceleration | +| `onednn` | Intel | Deep Neural Network Library | +| `cann` | Huawei | Ascend NPU | +| `rknpu` | Rockchip | NPU acceleration | +| `armnn` | ARM | Neural Network SDK | +| `xnnpack` | Mobile | CPU optimization | +| `webgpu` | Web | WebGPU/Chrome | +| `nnapi` | Android | Neural Networks API | +| `qnn` | Qualcomm | SNPE acceleration | +| `tvm` | - | Apache TVM | +| `azure` | Azure | ML execution provider | +| `migraphx` | AMD | MIGraphX | +| `vitis` | Xilinx | Vitis AI | + +--- + +## CUDA Image Processor + +!!! info "Prerequisites" + Requires [cudarc](https://github.com/coreylowman/cudarc) for CUDA kernels. + +Enable GPU-accelerated image preprocessing: + +| Pattern | Description | Example | +|---------|-------------|---------| +| `-full` | Auto-detect CUDA version via `nvcc` | `cuda-full`, `tensorrt-full` | +| `-cuda-` | Specific CUDA version | `cuda-12040`, `tensorrt-cuda-12040` | + +- **``**: `cuda`, `tensorrt`, or `nvrtx` +- **``**: Specific CUDA version + +### Supported CUDA Versions + +| Version | Features | +|---------|----------| +| 11.x | `cuda-11040`, `cuda-11050`, `cuda-11060`, `cuda-11070`, `cuda-11080` | +| 12.x | `cuda-12000`, `cuda-12010`, `cuda-12020`, `cuda-12030`, `cuda-12040`, `cuda-12050`, `cuda-12060`, `cuda-12080`, `cuda-12090` | +| 13.x | `cuda-13000`, `cuda-13010` | + +!!! note "TensorRT/NVRTX Versions" + Replace `cuda-` with `tensorrt-cuda-` or `nvrtx-cuda-` for TensorRT/NVRTX versions. + Example: `tensorrt-cuda-12040`, `nvrtx-cuda-12080` + +### Feature & Device Combinations + +| Scenario | Feature | Model Device | Processor | Speed | +|----------|---------|--------------|-----------|-------| +| CPU Only | `vision` (default) | `cpu` | `cpu` | Baseline | +| CUDA | `cuda` | `cuda` | `cpu` | Slow preprocess | +| CUDA (fast) | `cuda-full` | `cuda` | `cuda` | Fast preprocess | +| TensorRT | `tensorrt` | `tensorrt` | `cpu` | Slow preprocess | +| TensorRT (fast) | `tensorrt-full` | `tensorrt` | `cuda` | Fast preprocess | + +!!! tip "TensorRT EP + CUDA EP + CUDA Image Processor" + ```toml + features = ["tensorrt-full", "cuda"] + # Or + features = ["tensorrt", "cuda-full"] + ``` + +!!! warning "Device Consistency" + Different EPs can use different devices (e.g., `tensorrt:0` + `cuda:1`). + + However, when using **NVIDIA EP + CUDA image processor**, they **MUST** use the **same GPU ID**: + ```toml + # ✅ Correct: same GPU + --device cuda:0 --processor-device cuda:0 + + # ❌ Wrong: different GPUs + --device cuda:0 --processor-device cuda:1 + ``` + + +!!! danger "Don't mix CUDA versions" + ```toml + # ❌ Wrong + features = ["cuda-12040", "cuda-11080"] + + # ✅ Correct + features = ["tensorrt-full"] + ``` diff --git a/docs/cargo-features/ort.md b/docs/cargo-features/ort.md index a94d8ca..c2a53a0 100644 --- a/docs/cargo-features/ort.md +++ b/docs/cargo-features/ort.md @@ -1,102 +1,37 @@ -# Execution Providers +# ONNX Runtime +ONNX Runtime configuration and API version management. -Hardware acceleration for inference. Enable the one matching your hardware. - -## ONNX Runtime +## Configuration | Feature | Description | Default | |---------|-------------|:-------:| | `ort-download-binaries` | Auto-download ONNX Runtime binaries from [pyke](https://ort.pyke.io) | ✓ | | `ort-load-dynamic` | Manual linking for custom builds. See [Linking Guide](https://ort.pyke.io/setup/linking) | x | -## Execution Providers - -| Feature | Platform | Description | -|---------|----------|-------------| -| `cuda` | NVIDIA GPU | CUDA execution provider | -| `tensorrt` | NVIDIA GPU | TensorRT execution provider | -| `nvrtx` | NVIDIA GPU | NVRTX execution provider | -| `coreml` | Apple Silicon | macOS/iOS inference | -| `openvino` | Intel | CPU/GPU/VPU acceleration | -| `directml` | Windows | DirectML acceleration | -| `rocm` | AMD GPU | ROCm acceleration | -| `onednn` | Intel | Deep Neural Network Library | -| `cann` | Huawei | Ascend NPU | -| `rknpu` | Rockchip | NPU acceleration | -| `armnn` | ARM | Neural Network SDK | -| `xnnpack` | Mobile | CPU optimization | -| `webgpu` | Web | WebGPU/Chrome | -| `nnapi` | Android | Neural Networks API | -| `qnn` | Qualcomm | SNPE acceleration | -| `tvm` | - | Apache TVM | -| `azure` | Azure | ML execution provider | -| `migraphx` | AMD | MIGraphX | -| `vitis` | Xilinx | Vitis AI | - ---- - -## CUDA Image Processor - -!!! info "Prerequisites" - Requires [cudarc](https://github.com/coreylowman/cudarc) for CUDA kernels. - -Enable GPU-accelerated image preprocessing: - -| Pattern | Description | Example | -|---------|-------------|---------| -| `-full` | Auto-detect CUDA version via `nvcc` | `cuda-full`, `tensorrt-full` | -| `-cuda-` | Specific CUDA version | `cuda-12040`, `tensorrt-cuda-12040` | - -- **``**: `cuda`, `tensorrt`, or `nvrtx` -- **``**: Specific CUDA version - -### Supported CUDA Versions - -| Version | Features | -|---------|----------| -| 11.x | `cuda-11040`, `cuda-11050`, `cuda-11060`, `cuda-11070`, `cuda-11080` | -| 12.x | `cuda-12000`, `cuda-12010`, `cuda-12020`, `cuda-12030`, `cuda-12040`, `cuda-12050`, `cuda-12060`, `cuda-12080`, `cuda-12090` | -| 13.x | `cuda-13000`, `cuda-13010` | +### API Version Selection -!!! note "TensorRT/NVRTX Versions" - Replace `cuda-` with `tensorrt-cuda-` or `nvrtx-cuda-` for TensorRT/NVRTX versions. - Example: `tensorrt-cuda-12040`, `nvrtx-cuda-12080` +This library supports ONNX Runtime versions 1.17 through 1.24 via API version features. -### Feature & Device Combinations +| Feature | ONNX Runtime | Requirements | +|---------|--------------|--------------| +| `ort-api-17` | v1.17 | Baseline | +| `ort-api-18` | v1.18 | - | +| `ort-api-19` | v1.19 | - | +| `ort-api-20` | v1.20 | Adapter API available | +| `ort-api-21` | v1.21 | - | +| `ort-api-22` | v1.22 | - | +| `ort-api-23` | v1.23 | - | +| `ort-api-24` | v1.24 | **Default** - Latest features | -| Scenario | Feature | Model Device | Processor | Speed | -|----------|---------|--------------|-----------|-------| -| CPU Only | `vision` (default) | `cpu` | `cpu` | Baseline | -| CUDA | `cuda` | `cuda` | `cpu` | Slow preprocess | -| CUDA (fast) | `cuda-full` | `cuda` | `cuda` | Fast preprocess | -| TensorRT | `tensorrt` | `tensorrt` | `cpu` | Slow preprocess | -| TensorRT (fast) | `tensorrt-full` | `tensorrt` | `cuda` | Fast preprocess | - -!!! tip "TensorRT EP + CUDA EP + CUDA Image Processor" - ```toml - features = ["tensorrt-full", "cuda"] - # Or - features = ["tensorrt", "cuda-full"] - ``` - -!!! warning "Device Consistency" - Different EPs can use different devices (e.g., `tensorrt:0` + `cuda:1`). - - However, when using **NVIDIA EP + CUDA image processor**, they **MUST** use the **same GPU ID**: +!!! tip "API Version Selection" ```toml - # ✅ Correct: same GPU - --device cuda:0 --processor-device cuda:0 + # Default uses api-24 (latest) + usls = { version = "0.2", features = ["vision"] } - # ❌ Wrong: different GPUs - --device cuda:0 --processor-device cuda:1 + # Specify API version explicitly + usls = { version = "0.2", features = ["vision", "ort-api-20"] } ``` - -!!! danger "Don't mix CUDA versions" - ```toml - # ❌ Wrong - features = ["cuda-12040", "cuda-11080"] - - # ✅ Correct - features = ["tensorrt-full"] - ``` +!!! note "Version Compatibility" + - Each API version includes all features from previous versions + - Check [ORT multiversion docs](https://ort.pyke.io/setup/multiversion) for minimum version requirements \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 549a728..b530cb2 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -69,7 +69,8 @@ nav: - Integration: getting-started/integration.md - Cargo Features: - Overview: cargo-features/overview.md - - ONNX Runtime & EP: cargo-features/ort.md + - ONNX Runtime Version: cargo-features/ort.md + - Execution Provider: cargo-features/ep.md - Image Formats: cargo-features/image-formats.md - Model Categories: cargo-features/models.md - Utilities: cargo-features/utils.md diff --git a/src/models/vision/pipeline/basemodel.rs b/src/models/vision/pipeline/basemodel.rs index 2ff2353..67f0c7c 100644 --- a/src/models/vision/pipeline/basemodel.rs +++ b/src/models/vision/pipeline/basemodel.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use ort::tensor::TensorElementType; +use ort::value::TensorElementType; use crate::{ Config, Device, Engine, Engines, FromConfig, Image, ImageProcessor, Model, Module, Scale, Task, diff --git a/src/models/vlm/sam3_image/impl.rs b/src/models/vlm/sam3_image/impl.rs index 4d034c4..6881a19 100644 --- a/src/models/vlm/sam3_image/impl.rs +++ b/src/models/vlm/sam3_image/impl.rs @@ -29,7 +29,7 @@ pub struct Sam3Image { impl Sam3Image { fn extract_f32(val: &DynValue) -> Result> { - use ort::tensor::TensorElementType as TE; + use ort::value::TensorElementType as TE; use ort::value::ValueType; match val.dtype() { ValueType::Tensor { ty, .. } => match ty { @@ -54,7 +54,7 @@ impl Sam3Image { } use ort::memory::AllocationDevice; - use ort::tensor::TensorElementType as TE; + use ort::value::TensorElementType as TE; use ort::value::ValueType; let owned = text_feat @@ -198,7 +198,7 @@ impl Sam3Image { let mut res = Vec::with_capacity(texts.len()); for chunk in texts.chunks(self.text_batch) { use ort::memory::AllocationDevice; - use ort::tensor::TensorElementType as TE; + use ort::value::TensorElementType as TE; use ort::value::ValueType; let encs = self.text_processor.encode_texts(chunk, true)?; diff --git a/src/ort/dtype.rs b/src/ort/dtype.rs index 1f004a5..589c773 100644 --- a/src/ort/dtype.rs +++ b/src/ort/dtype.rs @@ -1,4 +1,4 @@ -use ort::tensor::TensorElementType; +use ort::value::TensorElementType; impl From for crate::DType { fn from(dtype: TensorElementType) -> Self { diff --git a/src/ort/engine.rs b/src/ort/engine.rs index 9ca7383..7263edd 100644 --- a/src/ort/engine.rs +++ b/src/ort/engine.rs @@ -4,9 +4,8 @@ use half::{bf16, f16}; use ndarray::{Array, IxDyn}; use ort::{ execution_providers::ExecutionProvider, - session::{builder::GraphOptimizationLevel, input::SessionInputs, Session, SessionInputValue}, - tensor::TensorElementType, - value::{DynValue, Value}, + session::{builder::GraphOptimizationLevel, Session, SessionInputValue, SessionInputs}, + value::{DynValue, TensorElementType, Value}, }; use std::{ collections::{HashMap, HashSet}, @@ -493,7 +492,7 @@ impl Engine { dtype: &TensorElementType, ) -> Result> { use ort::memory::{AllocationDevice, AllocatorType, MemoryInfo, MemoryType}; - use ort::tensor::Shape; + use ort::value::Shape; use ort::value::TensorRefMut; // Only f32 is supported for now (can extend later) @@ -1129,25 +1128,32 @@ impl Engine { } // threads - builder = - builder.with_intra_threads(config.num_intra_threads.unwrap_or(n_threads_available))?; - builder = builder.with_inter_threads(config.num_inter_threads.unwrap_or(8))?; + builder = builder + .with_intra_threads(config.num_intra_threads.unwrap_or(n_threads_available)) + .map_err(|e| anyhow::anyhow!("Failed to set intra threads: {e}"))?; + builder = builder + .with_inter_threads(config.num_inter_threads.unwrap_or(8)) + .map_err(|e| anyhow::anyhow!("Failed to set inter threads: {e}"))?; // optimization #[cfg(not(feature = "tensorrt"))] if let Some(level) = config.graph_opt_level { - builder = builder.with_optimization_level(match level { - 0 => GraphOptimizationLevel::Disable, - 1 => GraphOptimizationLevel::Level1, - 2 => GraphOptimizationLevel::Level2, - 3 => GraphOptimizationLevel::Level3, - _ => anyhow::bail!("Invalid graph optimization level: {level}"), - })?; + builder = builder + .with_optimization_level(match level { + 0 => GraphOptimizationLevel::Disable, + 1 => GraphOptimizationLevel::Level1, + 2 => GraphOptimizationLevel::Level2, + 3 => GraphOptimizationLevel::Level3, + _ => anyhow::bail!("Invalid graph optimization level: {level}"), + }) + .map_err(|e| anyhow::anyhow!("Failed to set graph optimization level: {e}"))?; } #[cfg(feature = "tensorrt")] { tracing::info!("Disabling ort graph optimization for TensorRT. `ort_graph_opt_level` setting is ignored."); - builder = builder.with_optimization_level(GraphOptimizationLevel::Disable)?; + builder = builder + .with_optimization_level(GraphOptimizationLevel::Disable) + .map_err(|e| anyhow::anyhow!("Failed to disable graph optimization: {e}"))?; } let session = builder.commit_from_file(model_file)?; diff --git a/src/ort/inputs.rs b/src/ort/inputs.rs index 0b554a6..e355370 100644 --- a/src/ort/inputs.rs +++ b/src/ort/inputs.rs @@ -1,4 +1,4 @@ -use ort::session::{input::SessionInputs, SessionInputValue}; +use ort::session::{SessionInputValue, SessionInputs}; use crate::{XAny, X}; diff --git a/src/ort/x.rs b/src/ort/x.rs index 69a4bfa..f42161b 100644 --- a/src/ort/x.rs +++ b/src/ort/x.rs @@ -15,7 +15,7 @@ use ::ort::value::Tensor as OrtTensor; impl TryFrom<&crate::X> for OrtTensor where - A: Clone + 'static + ::ort::tensor::PrimitiveTensorElementType + std::fmt::Debug, + A: Clone + 'static + ::ort::value::PrimitiveTensorElementType + std::fmt::Debug, { type Error = anyhow::Error; @@ -29,7 +29,7 @@ where impl TryFrom<&&crate::X> for OrtTensor where - A: Clone + 'static + ::ort::tensor::PrimitiveTensorElementType + std::fmt::Debug, + A: Clone + 'static + ::ort::value::PrimitiveTensorElementType + std::fmt::Debug, { type Error = anyhow::Error; @@ -40,7 +40,7 @@ where impl TryFrom> for OrtTensor where - A: Clone + 'static + ::ort::tensor::PrimitiveTensorElementType + std::fmt::Debug, + A: Clone + 'static + ::ort::value::PrimitiveTensorElementType + std::fmt::Debug, { type Error = anyhow::Error; @@ -69,7 +69,7 @@ where impl TryFrom> for ::ort::session::SessionInputValue<'static> where - A: Clone + 'static + ::ort::tensor::PrimitiveTensorElementType + std::fmt::Debug, + A: Clone + 'static + ::ort::value::PrimitiveTensorElementType + std::fmt::Debug, { type Error = anyhow::Error; @@ -84,7 +84,7 @@ where /// - **Non-contiguous layout**: auto-fallback to copy (creates owned tensor) impl<'a, A> TryFrom> for ::ort::session::SessionInputValue<'a> where - A: Clone + 'static + ::ort::tensor::PrimitiveTensorElementType + std::fmt::Debug, + A: Clone + 'static + ::ort::value::PrimitiveTensorElementType + std::fmt::Debug, { type Error = anyhow::Error; @@ -118,7 +118,7 @@ where /// ``` impl TryFrom<&crate::X> for ::ort::session::SessionInputValue<'static> where - A: Clone + 'static + ::ort::tensor::PrimitiveTensorElementType + std::fmt::Debug, + A: Clone + 'static + ::ort::value::PrimitiveTensorElementType + std::fmt::Debug, { type Error = anyhow::Error; diff --git a/src/ort/x_any.rs b/src/ort/x_any.rs index 0322ec1..fa62eac 100644 --- a/src/ort/x_any.rs +++ b/src/ort/x_any.rs @@ -26,7 +26,7 @@ impl<'a> TryFrom<&'a XAny> for ort::session::SessionInputValue<'a> { XAny::Device(cuda_tensor) => { // Zero-copy CUDA path: create ORT CUDA tensor directly use ort::memory::{AllocationDevice, AllocatorType, MemoryInfo, MemoryType}; - use ort::tensor::Shape; + use ort::value::Shape; use ort::value::TensorRefMut; // Create MemoryInfo for CUDA device diff --git a/src/ort/xs.rs b/src/ort/xs.rs index ae15e45..892864c 100644 --- a/src/ort/xs.rs +++ b/src/ort/xs.rs @@ -6,8 +6,8 @@ use half::{bf16, f16}; use ndarray::Array; use num_traits::{cast, NumCast}; use ort::{ - memory::AllocationDevice, session::SessionOutputs, tensor::PrimitiveTensorElementType, - tensor::TensorElementType, value::ValueType, + memory::AllocationDevice, session::SessionOutputs, value::PrimitiveTensorElementType, + value::TensorElementType, value::ValueType, }; use rayon::prelude::*; use std::{ From bebf8e1a65b626a4480c6fa1a2d6a4bdbc522e8f Mon Sep 17 00:00:00 2001 From: jamjamjon Date: Tue, 17 Mar 2026 15:28:50 +0800 Subject: [PATCH 2/3] fix ci --- .github/workflows/rust-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml index 3154661..92d09d1 100644 --- a/.github/workflows/rust-ci.yml +++ b/.github/workflows/rust-ci.yml @@ -74,7 +74,7 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Check - run: cargo check --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic" --all-targets + run: cargo check --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic,ort-api-24" --all-targets test: name: cargo-test @@ -94,7 +94,7 @@ jobs: uses: dtolnay/rust-toolchain@nightly - name: Test - run: cargo +nightly test --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic" --all-targets + run: cargo +nightly test --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic,ort-api-24" --all-targets build-linux: needs: test @@ -120,4 +120,4 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Build - run: cargo build --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic" \ No newline at end of file + run: cargo build --no-default-features --features "all-models,video,viewer,annotator,ort-download-binaries,ort-load-dynamic,ort-api-24" \ No newline at end of file From 4069944836f48819e98f09ee328f37149db841bf Mon Sep 17 00:00:00 2001 From: jamjamjon Date: Tue, 17 Mar 2026 15:37:42 +0800 Subject: [PATCH 3/3] fix ci --- .github/workflows/rust-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml index 92d09d1..6d39d52 100644 --- a/.github/workflows/rust-ci.yml +++ b/.github/workflows/rust-ci.yml @@ -22,8 +22,8 @@ jobs: - "" - "vision" - "vlm" - - "ort-download-binaries" - - "ort-load-dynamic" + - "ort-download-binaries,ort-api-24" + - "ort-load-dynamic,ort-api-24" - "video" - "viewer" - "annotator"