Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
904 changes: 888 additions & 16 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[workspace]
members = ["core", "daemon/openastrovizd"]
members = ["core", "daemon/openastrovizd", "webgpu-compute"]
resolver = "2"
25 changes: 25 additions & 0 deletions core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,31 @@ pub enum PropagationError {
Propagation(#[from] sgp4::Error),
}

/// Errors surfaced by GPU backend implementations.
#[derive(Debug, Error)]
pub enum GpuBackendError {
#[error("backend is not ready")]
NotReady,
#[error("invalid dispatch batch size: {0}")]
InvalidBatchSize(u32),
#[error("shader compilation failed: {0}")]
ShaderCompile(String),
#[error("compute dispatch failed: {0}")]
Dispatch(String),
}

/// Contract shared by CUDA and WebGPU compute backends.
pub trait GpuBackend {
/// User-visible backend name.
fn name(&self) -> &'static str;

/// Whether this backend has been initialized and can accept work.
fn is_ready(&self) -> bool;

/// Dispatch one FP32 SGP4 kernel step for the given orbital batch size.
fn dispatch_sgp4_fp32_step(&self, batch_size: u32) -> Result<(), GpuBackendError>;
}

/// Thin wrapper around the Vallado SGP4 implementation from the [`sgp4`] crate.
#[derive(Debug, Clone)]
pub struct Sgp4Propagator {
Expand Down
10 changes: 10 additions & 0 deletions web/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,16 @@ VITE_DAEMON_WS="wss://example.com/ws/orbits" \
yarn dev
```



## Build WebGPU compute wasm artifact

```bash
yarn build:wasm
```

This runs `wasm-pack` against `../webgpu-compute` and emits browser-loadable bindings into `web/pkg/`.

## Build & lint

```bash
Expand Down
3 changes: 2 additions & 1 deletion web/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
"dev": "vite",
"build": "tsc --noEmit && vite build",
"preview": "vite preview",
"lint": "eslint . --ext ts,tsx --max-warnings 0"
"lint": "eslint . --ext ts,tsx --max-warnings 0",
"build:wasm": "wasm-pack build ../webgpu-compute --target web --release --out-dir pkg"
},
"dependencies": {
"@radix-ui/react-scroll-area": "^1.0.5",
Expand Down
21 changes: 21 additions & 0 deletions webgpu-compute/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[package]
name = "openastroviz-webgpu-compute"
version = "0.1.0"
edition = "2021"
description = "WebGPU compute scaffolding for OpenAstroViz GPU backend"
authors = ["OpenAstroViz Contributors"]
license = "MIT"

[lib]
crate-type = ["cdylib", "rlib"]

[dependencies]
openastroviz-core = { path = "../core" }
thiserror = "1"
wgpu = { version = "0.20", features = ["wgsl"] }
wasm-bindgen = "0.2"
wasm-bindgen-futures = "0.4"

[dependencies.web-sys]
version = "0.3"
features = ["Window", "Navigator", "Gpu"]
15 changes: 15 additions & 0 deletions webgpu-compute/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# openastroviz-webgpu-compute

WebGPU compute scaffolding for OpenAstroViz. This crate introduces a browser-native
`GpuBackend` implementation with a WGSL kernel entrypoint intended for the FP32
SGP4 port from CUDA.

## Build to WebAssembly with wasm-pack

```bash
wasm-pack build webgpu-compute --target web --release --out-dir pkg
```

The generated `pkg/` directory can be consumed directly by the React/Vite frontend,
which can call `initWebGpuBackend()` and then dispatch compute work using
`dispatchSgp4Fp32Step(batchSize)`.
11 changes: 11 additions & 0 deletions webgpu-compute/shaders/sgp4_fp32.wgsl
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// OpenAstroViz FP32 SGP4 WebGPU scaffold.
//
// The body here intentionally keeps a lightweight placeholder while we port the
// full CUDA FP32 SGP4 math and data-layout bindings. It validates workgroup
// sizing and dispatch wiring for browser compute execution.

@compute @workgroup_size(64)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
// Placeholder so the compiler keeps the invocation alive.
let _lane = gid.x;
}
126 changes: 126 additions & 0 deletions webgpu-compute/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
use openastroviz_core::{GpuBackend, GpuBackendError};
use wasm_bindgen::prelude::*;

const WORKGROUP_SIZE: u32 = 64;

/// WGSL compute kernel scaffold for the FP32 SGP4 pipeline.
///
/// TODO: Port the existing CUDA FP32 orbital update math fully; this shader currently
/// establishes the dispatch and invocation shape so the React client can drive it.
const SGP4_FP32_WGSL: &str = include_str!("../shaders/sgp4_fp32.wgsl");

#[derive(Debug)]
pub struct WebGpuBackend {
device: wgpu::Device,
queue: wgpu::Queue,
pipeline: wgpu::ComputePipeline,
}

impl WebGpuBackend {
pub async fn initialize() -> Result<Self, GpuBackendError> {
let instance = wgpu::Instance::default();

let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
force_fallback_adapter: false,
compatible_surface: None,
})
.await
.ok_or_else(|| GpuBackendError::Dispatch("no WebGPU adapter available".to_string()))?;

let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: Some("openastroviz-webgpu-device"),
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::downlevel_defaults(),
},
None,
)
.await
.map_err(|err| GpuBackendError::Dispatch(err.to_string()))?;

let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("openastroviz-sgp4-fp32"),
source: wgpu::ShaderSource::Wgsl(SGP4_FP32_WGSL.into()),
});

let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("openastroviz-sgp4-fp32-pipeline"),
layout: None,
module: &shader,
entry_point: "main",
compilation_options: wgpu::PipelineCompilationOptions::default(),
});

Ok(Self {
device,
queue,
pipeline,
})
}
}

impl GpuBackend for WebGpuBackend {
fn name(&self) -> &'static str {
"webgpu"
}

fn is_ready(&self) -> bool {
true
}

fn dispatch_sgp4_fp32_step(&self, batch_size: u32) -> Result<(), GpuBackendError> {
if batch_size == 0 {
return Err(GpuBackendError::InvalidBatchSize(batch_size));
}

let workgroups = batch_size.div_ceil(WORKGROUP_SIZE);
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("openastroviz-sgp4-fp32-encoder"),
});

{
let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("openastroviz-sgp4-fp32-pass"),
timestamp_writes: None,
});
pass.set_pipeline(&self.pipeline);
pass.dispatch_workgroups(workgroups, 1, 1);
}

self.queue.submit(Some(encoder.finish()));
Ok(())
}
}

#[wasm_bindgen]
pub struct WebGpuBackendHandle {
backend: WebGpuBackend,
}

#[wasm_bindgen]
impl WebGpuBackendHandle {
#[wasm_bindgen(js_name = backendName)]
pub fn backend_name(&self) -> String {
self.backend.name().to_string()
}

#[wasm_bindgen(js_name = dispatchSgp4Fp32Step)]
pub fn dispatch_sgp4_fp32_step(&self, batch_size: u32) -> Result<(), JsValue> {
self.backend
.dispatch_sgp4_fp32_step(batch_size)
.map_err(|err| JsValue::from_str(&err.to_string()))
}
}

#[wasm_bindgen(js_name = initWebGpuBackend)]
pub async fn init_webgpu_backend() -> Result<WebGpuBackendHandle, JsValue> {
let backend = WebGpuBackend::initialize()
.await
.map_err(|err| JsValue::from_str(&err.to_string()))?;
Ok(WebGpuBackendHandle { backend })
}