Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -130,3 +130,11 @@ dmypy.json
# Pyre type checker
.pyre/
.DS_Store

# Claude Code settings
.claude/*

# Additional testing artifacts
*traj_vis/
*.npz
*.npy
344 changes: 344 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

73 changes: 73 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
[tool.poetry]
name = "dynacam-evaluation"
version = "0.1.0"
description = "3D Human Pose Estimation Evaluation and Visualization for DynaCam Dataset"
authors = ["Your Name <your.email@example.com>"]
readme = "README.md"
packages = [{include = "evaluation"}, {include = "visualization"}]

[tool.poetry.dependencies]
python = "^3.9"
numpy = "^1.21.0"

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.0"

[tool.poetry.scripts]
test = "pytest:main"
tests = "pytest:main"

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"--strict-markers",
"--strict-config",
"--verbose",
"--cov=evaluation",
"--cov=visualization",
"--cov-report=html:htmlcov",
"--cov-report=xml:coverage.xml",
"--cov-report=term-missing",
"--cov-fail-under=80"
]
markers = [
"unit: Unit tests",
"integration: Integration tests",
"slow: Tests that take a long time to run"
]

[tool.coverage.run]
source = ["evaluation", "visualization"]
omit = [
"tests/*",
"*/test_*",
"*/__pycache__/*",
"*/site-packages/*"
]

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"raise AssertionError",
"raise NotImplementedError",
"if __name__ == .__main__.:",
"@abstract"
]
show_missing = true
precision = 2

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Tests module for DynaCam evaluation and visualization
151 changes: 151 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
"""
Shared pytest fixtures for DynaCam evaluation and visualization tests.
"""

import os
import tempfile
import shutil
from pathlib import Path
from typing import Dict, Any

import pytest
import numpy as np


@pytest.fixture
def temp_dir():
"""Create a temporary directory that gets cleaned up after test."""
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)


@pytest.fixture
def sample_data_dir(temp_dir):
"""Create a sample data directory structure for testing."""
data_dir = Path(temp_dir) / "test_data"
data_dir.mkdir(parents=True, exist_ok=True)

# Create sample subdirectories
(data_dir / "annotations").mkdir()
(data_dir / "video_frames").mkdir()
(data_dir / "predictions").mkdir()

yield str(data_dir)


@pytest.fixture
def sample_annotations():
"""Create sample annotation data for testing."""
return {
'frame_ids': np.array([0, 1, 2, 3, 4]),
'world_grots': np.random.randn(1, 5, 3),
'world_trans': np.random.randn(1, 5, 3),
'kp2ds': np.random.randn(1, 5, 24, 3),
'kp3ds': np.random.randn(1, 5, 24, 3),
'poses': np.random.randn(1, 5, 24, 3, 3),
'betas': np.random.randn(1, 10),
'camera_intrinsics': np.eye(3),
'camera_extrinsics': np.eye(4)
}


@pytest.fixture
def sample_trajectory():
"""Create sample trajectory data for testing."""
timestamps = np.linspace(0, 1, 10)
positions = np.random.randn(10, 3)
orientations = np.random.randn(10, 4) # quaternions
trajectory = np.concatenate([positions, orientations], axis=1)
return trajectory, timestamps


@pytest.fixture
def mock_config():
"""Create mock configuration for testing."""
return {
'root_dir': '/tmp/test_data',
'prediction_dir': '/tmp/test_predictions',
'coverage_threshold': 80,
'test_timeout': 30,
'visualization': {
'width': 640,
'height': 480,
'fps': 30
},
'evaluation': {
'missing_punish': [2, 4],
'joint_num': 24,
'align_trajectories': True
}
}


@pytest.fixture
def sample_image_paths(temp_dir):
"""Create sample image file paths for testing."""
image_dir = Path(temp_dir) / "images"
image_dir.mkdir(parents=True, exist_ok=True)

image_paths = []
for i in range(5):
image_path = image_dir / f"frame_{i:06d}.jpg"
# Create empty files for testing
image_path.touch()
image_paths.append(str(image_path))

return image_paths


@pytest.fixture
def mock_sequence_data():
"""Create mock sequence data for testing."""
return {
'seq_001': {
'frame_ids': np.array([0, 1, 2, 3]),
'world_grots': np.random.randn(1, 4, 3),
'world_trans': np.random.randn(1, 4, 3),
},
'seq_002': {
'frame_ids': np.array([0, 1, 2, 3, 4]),
'world_grots': np.random.randn(1, 5, 3),
'world_trans': np.random.randn(1, 5, 3),
}
}


@pytest.fixture
def prediction_results():
"""Create mock prediction results for testing."""
return {
'seq_001': (
{0: 0, 1: 1, 2: 2, 3: 3}, # frame2ind
np.random.randn(4, 24, 3), # kp_2d_pred
np.random.randn(4, 3), # root_trans_world
np.random.randn(4, 3) # root_rot_world
),
'seq_002': None # Missing prediction case
}


def pytest_configure(config):
"""Configure pytest session."""
# Set numpy print options for consistent test output
np.set_printoptions(precision=3, suppress=True)


# Marker fixtures for test categorization
@pytest.fixture(autouse=True)
def setup_test_environment(monkeypatch):
"""Set up test environment variables."""
monkeypatch.setenv("PYTEST_RUNNING", "true")
monkeypatch.setenv("NUMEXPR_MAX_THREADS", "1") # Limit threads for tests


@pytest.fixture
def numpy_random_seed():
"""Set numpy random seed for reproducible tests."""
np.random.seed(42)
yield
# Reset to random state after test
np.random.seed(None)
1 change: 1 addition & 0 deletions tests/integration/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Integration tests for DynaCam evaluation and visualization
125 changes: 125 additions & 0 deletions tests/test_setup_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
"""
Test file to validate the testing infrastructure setup.
"""

import pytest
import numpy as np
import os
import sys
from pathlib import Path

# Add project root to path for imports
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))


class TestSetupValidation:
"""Test class to validate the testing infrastructure."""

def test_pytest_working(self):
"""Test that pytest is working correctly."""
assert True

def test_fixtures_available(self, temp_dir, sample_annotations, mock_config):
"""Test that custom fixtures are available and working."""
assert os.path.exists(temp_dir)
assert isinstance(sample_annotations, dict)
assert 'frame_ids' in sample_annotations
assert isinstance(mock_config, dict)
assert 'root_dir' in mock_config

def test_numpy_available(self):
"""Test that numpy is available and working."""
arr = np.array([1, 2, 3, 4, 5])
assert arr.mean() == 3.0

def test_markers_configured(self):
"""Test that custom markers are properly configured."""
# This test will pass if the markers are properly defined in pyproject.toml
assert hasattr(pytest, 'mark')

@pytest.mark.unit
def test_unit_marker(self):
"""Test with unit marker."""
assert True

@pytest.mark.integration
def test_integration_marker(self):
"""Test with integration marker."""
assert True

@pytest.mark.slow
def test_slow_marker(self):
"""Test with slow marker."""
assert True

def test_project_structure(self):
"""Test that the project structure is correct."""
project_root = Path(__file__).parent.parent

# Check that main modules exist
assert (project_root / "evaluation").is_dir()
assert (project_root / "visualization").is_dir()

# Check that pyproject.toml exists
assert (project_root / "pyproject.toml").is_file()

def test_temp_directory_fixture(self, temp_dir):
"""Test that temp_dir fixture works correctly."""
assert os.path.exists(temp_dir)
assert os.path.isdir(temp_dir)

# Test writing to temp directory
test_file = os.path.join(temp_dir, "test_file.txt")
with open(test_file, "w") as f:
f.write("test content")

assert os.path.exists(test_file)

def test_sample_data_fixture(self, sample_data_dir):
"""Test that sample_data_dir fixture works correctly."""
assert os.path.exists(sample_data_dir)

# Check that subdirectories were created
assert os.path.exists(os.path.join(sample_data_dir, "annotations"))
assert os.path.exists(os.path.join(sample_data_dir, "video_frames"))
assert os.path.exists(os.path.join(sample_data_dir, "predictions"))

def test_sample_annotations_fixture(self, sample_annotations):
"""Test that sample_annotations fixture provides correct data structure."""
required_keys = ['frame_ids', 'world_grots', 'world_trans', 'kp2ds', 'kp3ds']

for key in required_keys:
assert key in sample_annotations
assert isinstance(sample_annotations[key], np.ndarray)

def test_trajectory_fixture(self, sample_trajectory):
"""Test that sample_trajectory fixture works correctly."""
trajectory, timestamps = sample_trajectory

assert isinstance(trajectory, np.ndarray)
assert isinstance(timestamps, np.ndarray)
assert trajectory.shape[1] == 7 # 3 position + 4 orientation
assert len(trajectory) == len(timestamps)

def test_numpy_seeding(self, numpy_random_seed):
"""Test that numpy random seeding fixture works."""
# With seed 42, first random number should be reproducible
first_random = np.random.rand()

# Reset numpy random state and test again
np.random.seed(42)
second_random = np.random.rand()

assert first_random == second_random

def test_mock_config_structure(self, mock_config):
"""Test that mock_config has the expected structure."""
assert 'root_dir' in mock_config
assert 'prediction_dir' in mock_config
assert 'visualization' in mock_config
assert 'evaluation' in mock_config

# Test nested config
assert 'width' in mock_config['visualization']
assert 'missing_punish' in mock_config['evaluation']
1 change: 1 addition & 0 deletions tests/unit/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Unit tests for DynaCam evaluation and visualization