diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 1a44a79..0000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,38 +0,0 @@ - -deps-run: &deps-install - name: Install Python dependencies - command: | - python3 -m venv venv - . venv/bin/activate - pip install --user --quiet -r requirements/docs.txt - pip install --user . - -doc-test: &doc-test - name: Run doctets - command: make doctest - working_directory: doc - -doc-run: &doc-build - name: Build documentation - command: make html SPHINXOPTS='-q' - working_directory: doc - -version: 2 -jobs: - build: - docker: - - image: circleci/python:3.6 - steps: - - checkout - - run: *deps-install - - run: *doc-test - - run: *doc-build - - store_artifacts: - path: doc/build/html - - run: - name: "Built documentation is available at:" - command: echo "${CIRCLE_BUILD_URL}/artifacts/${CIRCLE_NODE_INDEX}/${CIRCLE_WORKING_DIRECTORY/#\~/$HOME}/doc/build/html/index.html" - -notify: - webhooks: - - url: https://giles.cadair.dev/circleci diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 834abe9..46c3d2d 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -9,8 +9,8 @@ } }, "forwardPorts": [6080], - //Pip install the requriements, and then install the pre-commit hooks - "postCreateCommand": "pip install -e .[tests,dev] && pre-commit install", + //Pip install the requirements, and then install the pre-commit hooks + "postCreateCommand": "pip install -e .[tests,dev,docs] && pre-commit install", "customizations": { "vscode": { "extensions": [ diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..50b125f --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,74 @@ +# GitHub Copilot Instructions for cdflib + +This repository contains `cdflib`, a pure Python library for reading and writing NASA Common Data Format (CDF) files. It does not rely on the NASA CDF C library. + +## Project Architecture & Core Components + +- **Pure Python Implementation**: The core logic uses `numpy` and `struct` to parse binary CDF files. No C extensions are used. +- **Reading (`cdflib/cdfread.py`)**: + - The `CDF` class is the main entry point for reading. + - Supports reading from local files, URLs, and S3 buckets. + - **Key Method**: `CDF.varget(variable_name)` retrieves variable data. + - **Key Method**: `CDF.cdf_info()` returns global file information. +- **Writing (`cdflib/cdfwrite.py`)**: + - The `CDF` class in this module is used to create new CDF files. + - Requires a `cdf_spec` dictionary to define file properties (encoding, majority, etc.). +- **Xarray Integration (`cdflib/xarray/`)**: + - `cdf_to_xarray`: Converts CDF files to `xarray.Dataset` objects, mapping ISTP attributes to xarray conventions. + - `xarray_to_cdf`: Converts `xarray.Dataset` objects back to CDF files. +- **Time Handling (`cdflib/epochs.py`)**: + - `CDFepoch` class handles conversions between CDF time types (CDF_EPOCH, CDF_EPOCH16, TT2000) and Python `datetime`, `numpy.datetime64`, or Unix timestamps. + +## Developer Workflows + +- **Dependency Management**: Dependencies are defined in `pyproject.toml`. + - Core: `numpy` + - Optional/Test: `xarray`, `astropy`, `hypothesis`, `pytest` +- **Testing**: + - Run tests using `tox` to test across multiple Python versions. + - Run specific tests with `pytest`: `pytest tests/test_cdfread.py`. + - Tests are located in the `tests/` directory. +- **Formatting**: + - The project uses `black` for code formatting and `isort` for import sorting. + - Configuration is in `pyproject.toml`. + +## Coding Conventions & Patterns + +- **Type Hinting**: Use Python type hints extensively (e.g., `Union[str, Path]`, `npt.ArrayLike`). +- **Path Handling**: Support both `str` and `pathlib.Path` objects for file paths. Internally, paths are often resolved to strings or `Path` objects. +- **Numpy Usage**: Use `numpy` for all array operations. Avoid explicit loops over data where possible. +- **S3 Support**: When handling S3 paths (`s3://`), use the internal `cdflib.s3` module logic. + +## Common Code Examples + +### Reading a CDF File +```python +import cdflib +cdf_file = cdflib.CDF('/path/to/file.cdf') +info = cdf_file.cdf_info() +data = cdf_file.varget("VariableName") +``` + +### Converting to Xarray +```python +from cdflib import cdf_to_xarray +ds = cdf_to_xarray('/path/to/file.cdf', to_datetime=True) +``` + +### Time Conversion +```python +from cdflib.epochs import CDFepoch +# Convert CDF epoch to datetime +dt = CDFepoch.to_datetime(cdf_epoch_value) +# Convert datetime to CDF epoch +epoch = CDFepoch.compute(dt) +``` + +### Writing a CDF File +```python +from cdflib.cdfwrite import CDF +spec = {'Majority': 'row_major', 'Encoding': 6} # 6 is IBMPC_ENCODING +with CDF('new_file.cdf', cdf_spec=spec) as cdf: + cdf.write_globalattrs(global_attrs) + cdf.write_var(var_spec, var_attrs, var_data) +``` diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 50158f6..9c03fb1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,36 +1,49 @@ -name: Run tests +name: Tests on: push: - branches: - - 'main' + branches: [ main ] pull_request: - # Allow manual runs through the web UI workflow_dispatch: -# Only allow one run per git ref at a time concurrency: - group: '${{ github.workflow }}-${{ github.ref }}' + group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: - core: - uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 - with: - submodules: false - coverage: codecov - envs: | - - linux: py39 - #- linux: py310 - #- linux: py311 - - linux: py312 - #- windows: py39 - - windows: py310 - #- windows: py311 - #- windows: py312 - #- macos: py38 - #- macos: py39 - #- macos: py310 - - macos: py311-online - - macos: py312 - - windows: py313-devdeps + tests: + name: ${{ matrix.os }} / ${{ matrix.python-version }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.10", "3.11", "3.12", "3.13"] + include: + - os: ubuntu-latest + python-version: "3.13" + tox-env: devdeps + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + + - name: Install Tools + run: | + python -m pip install --upgrade pip + python -m pip install tox + + - name: Run Tests via Tox + # If a specific tox-env is set in matrix, use it; otherwise use standard py version + run: | + if [ -z "${{ matrix.tox-env }}" ]; then + tox -e py$(echo ${{ matrix.python-version }} | tr -d .) + else + tox -e ${{ matrix.tox-env }} + fi + shell: bash diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..fc93ce7 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,26 @@ +name: docs +on: + push: + branches: + - main +permissions: + contents: write +jobs: + build-and-deploy-docs: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.x + + - name: Install dependencies + run: | + pip install -e .[tests,dev,docs] + + - name: Build and deploy + run: | + mkdocs gh-deploy --force diff --git a/.github/workflows/pypi-build.yaml b/.github/workflows/pypi-build.yaml index 713a0fc..8ecf643 100644 --- a/.github/workflows/pypi-build.yaml +++ b/.github/workflows/pypi-build.yaml @@ -4,25 +4,29 @@ on: release: types: [created] - jobs: - build: + build-and-publish: runs-on: ubuntu-latest + # REQUIRED: This permission allows the OIDC token to be generated + permissions: + id-token: write + contents: read + steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: '3.x' - - name: Install dependencies + + - name: Install build dependencies run: | python -m pip install --upgrade pip - python -m pip install twine build - - name: Build + python -m pip install build + + - name: Build package run: python -m build - - name: Publish - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.pypi_password }} - run: twine upload dist/* + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/remote-tests.yaml b/.github/workflows/remote-tests.yaml new file mode 100644 index 0000000..6158be6 --- /dev/null +++ b/.github/workflows/remote-tests.yaml @@ -0,0 +1,27 @@ +name: Remote Data Tests + +on: + # Allow manual trigger via the "Run Workflow" button in UI + workflow_dispatch: + # Run automatically every Monday at 6am UTC + schedule: + - cron: '0 6 * * 1' + +jobs: + remote-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install dependencies + run: | + pip install .[tests] + + - name: Run Remote Tests + # We explicitly tell pytest to run ONLY the remote_data tests + run: pytest -m remote_data diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3360ee6..ab6665a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,7 @@ +ci: + autofix_prs: false + autoupdate_schedule: "quarterly" + skip: [no-commit-to-branch, ccv] repos: - repo: https://github.com/myint/autoflake rev: v2.1.1 @@ -10,11 +14,11 @@ repos: rev: 5.12.0 hooks: - id: isort - args: ['--sp','setup.cfg'] + args: ['--sp','pyproject.toml'] exclude: ".*(.fits|.fts|.fit|.txt|tca.*|extern.*|.rst|.md|.svg)$" - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v6.0.0 hooks: - id: check-ast - id: check-case-conflict @@ -32,8 +36,8 @@ repos: hooks: - id: black - - repo: https://github.com/pre-commit/mirrors-mypy - rev: 'v1.3.0' - hooks: - - id: mypy - additional_dependencies: [xarray] + - repo: https://github.com/pre-commit/mirrors-mypy + rev: 'v1.18.2' + hooks: + - id: mypy + additional_dependencies: [xarray] diff --git a/.readthedocs.yml b/.readthedocs.yml deleted file mode 100644 index 134d35f..0000000 --- a/.readthedocs.yml +++ /dev/null @@ -1,25 +0,0 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required -version: 2 - -# Set the version of Python and other tools you might need -build: - os: ubuntu-20.04 - tools: - python: "3.9" - -# Build documentation in the docs/ directory with Sphinx -sphinx: - configuration: doc/conf.py - - -# Optionally declare the Python requirements required to build your docs -python: - install: - - method: pip - path: . - extra_requirements: - - docs diff --git a/LICENSE b/LICENSE index 863452d..3aca72e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2024 Regents of the University of Colorado +Copyright (c) 2025 Regents of the University of Colorado Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index c806524..50c8778 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -[![Run tests](https://github.com/MAVENSDC/cdflib/actions/workflows/ci.yml/badge.svg)](https://github.com/MAVENSDC/cdflib/actions/workflows/ci.yml) -[![codecov](https://codecov.io/gh/MAVENSDC/cdflib/branch/master/graph/badge.svg?token=IJ6moGc40e)](https://codecov.io/gh/MAVENSDC/cdflib) +[![Run tests](https://github.com/lasp/cdflib/actions/workflows/ci.yml/badge.svg)](https://github.com/lasp/cdflib/actions/workflows/ci.yml) +[![codecov](https://codecov.io/gh/lasp/cdflib/branch/master/graph/badge.svg?token=IJ6moGc40e)](https://codecov.io/gh/lasp/cdflib) [![DOI](https://zenodo.org/badge/102912691.svg)](https://zenodo.org/badge/latestdoi/102912691) [![Documentation Status](https://readthedocs.org/projects/cdflib/badge/?version=latest)](https://cdflib.readthedocs.io/en/latest/?badge=latest) @@ -20,6 +20,6 @@ pip install cdflib ## Documentation -The full documentation can be found here: +The full documentation can be found here -[https://cdflib.readthedocs.io/en/latest/](https://cdflib.readthedocs.io/en/latest/) +[https://lasp.github.io/cdflib/](https://lasp.github.io/cdflib/) diff --git a/archive/.appveyor.yml b/archive/.appveyor.yml deleted file mode 100644 index 016ac84..0000000 --- a/archive/.appveyor.yml +++ /dev/null @@ -1,19 +0,0 @@ -image: -- Visual Studio 2017 -- Ubuntu1804 - -stack: python 3 - -environment: - PY_DIR: C:\Python37-x64 - -clone_depth: 3 - -build: off - -init: -- cmd: set PATH=%PY_DIR%;%PY_DIR%\Scripts;%PATH% - -install: pip install -e .[tests] - -test_script: pytest diff --git a/asv.conf.json b/asv.conf.json deleted file mode 100644 index 9655436..0000000 --- a/asv.conf.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - // The version of the config file format. Do not change, unless - // you know what you are doing. - "version": 1, - - // The name of the project being benchmarked - "project": "cdflib", - - // The project's homepage - "project_url": "https://github.com/MAVENSDC/cdflib", - - // The URL or local path of the source code repository for the - // project being benchmarked - "repo": ".", - - // The Python project's subdirectory in your repo. If missing or - // the empty string, the project is assumed to be located at the root - // of the repository. - // "repo_subdir": "", - - // Customizable commands for building, installing, and - // uninstalling the project. See asv.conf.json documentation. - // - // "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"], - // "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"], - // "build_command": [ - // "python setup.py build", - // "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}" - // ], - - // List of branches to benchmark. If not provided, defaults to "master" - // (for git) or "default" (for mercurial). - // "branches": ["master"], // for git - // "branches": ["default"], // for mercurial - - // The DVCS being used. If not set, it will be automatically - // determined from "repo" by looking at the protocol in the URL - // (if remote), or by looking for special directories, such as - // ".git" (if local). - // "dvcs": "git", - - // The tool to use to create environments. May be "conda", - // "virtualenv" or other value depending on the plugins in use. - // If missing or the empty string, the tool will be automatically - // determined by looking for tools on the PATH environment - // variable. - "environment_type": "virtualenv", - - // timeout in seconds for installing any dependencies in environment - // defaults to 10 min - //"install_timeout": 600, - - // the base URL to show a commit for the project. - // "show_commit_url": "http://github.com/owner/project/commit/", - - // The Pythons you'd like to test against. If not provided, defaults - // to the current version of Python used to run `asv`. - // "pythons": ["2.7", "3.6"], - - // The list of conda channel names to be searched for benchmark - // dependency packages in the specified order - // "conda_channels": ["conda-forge", "defaults"], - - // The matrix of dependencies to test. Each key is the name of a - // package (in PyPI) and the values are version numbers. An empty - // list or empty string indicates to just test against the default - // (latest) version. null indicates that the package is to not be - // installed. If the package to be tested is only available from - // PyPi, and the 'environment_type' is conda, then you can preface - // the package name by 'pip+', and the package will be installed via - // pip (with all the conda available packages installed first, - // followed by the pip installed packages). - // - // "matrix": { - // "numpy": ["1.6", "1.7"], - // "six": ["", null], // test with and without six installed - // "pip+emcee": [""], // emcee is only available for install with pip. - // }, - - // Combinations of libraries/python versions can be excluded/included - // from the set to test. Each entry is a dictionary containing additional - // key-value pairs to include/exclude. - // - // An exclude entry excludes entries where all values match. The - // values are regexps that should match the whole string. - // - // An include entry adds an environment. Only the packages listed - // are installed. The 'python' key is required. The exclude rules - // do not apply to includes. - // - // In addition to package names, the following keys are available: - // - // - python - // Python version, as in the *pythons* variable above. - // - environment_type - // Environment type, as above. - // - sys_platform - // Platform, as in sys.platform. Possible values for the common - // cases: 'linux2', 'win32', 'cygwin', 'darwin'. - // - // "exclude": [ - // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows - // {"environment_type": "conda", "six": null}, // don't run without six on conda - // ], - // - // "include": [ - // // additional env for python2.7 - // {"python": "2.7", "numpy": "1.8"}, - // // additional env if run on windows+conda - // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""}, - // ], - - // The directory (relative to the current directory) that benchmarks are - // stored in. If not provided, defaults to "benchmarks" - // "benchmark_dir": "benchmarks", - - // The directory (relative to the current directory) to cache the Python - // environments in. If not provided, defaults to "env" - "env_dir": ".asv/env", - - // The directory (relative to the current directory) that raw benchmark - // results are stored in. If not provided, defaults to "results". - "results_dir": ".asv/results", - - // The directory (relative to the current directory) that the html tree - // should be written to. If not provided, defaults to "html". - "html_dir": ".asv/html", - - // The number of characters to retain in the commit hashes. - // "hash_length": 8, - - // `asv` will cache results of the recent builds in each - // environment, making them faster to install next time. This is - // the number of builds to keep, per environment. - // "build_cache_size": 2, - - // The commits after which the regression search in `asv publish` - // should start looking for regressions. Dictionary whose keys are - // regexps matching to benchmark names, and values corresponding to - // the commit (exclusive) after which to start looking for - // regressions. The default is to start from the first commit - // with results. If the commit is `null`, regression detection is - // skipped for the matching benchmark. - // - // "regressions_first_commits": { - // "some_benchmark": "352cdf", // Consider regressions only after this commit - // "another_benchmark": null, // Skip regression detection altogether - // }, - - // The thresholds for relative change in results, after which `asv - // publish` starts reporting regressions. Dictionary of the same - // form as in ``regressions_first_commits``, with values - // indicating the thresholds. If multiple entries match, the - // maximum is taken. If no entry matches, the default is 5%. - // - // "regressions_thresholds": { - // "some_benchmark": 0.01, // Threshold of 1% - // "another_benchmark": 0.5, // Threshold of 50% - // }, -} diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py deleted file mode 100644 index 858827c..0000000 --- a/benchmarks/benchmarks.py +++ /dev/null @@ -1,23 +0,0 @@ -import numpy as np - -from cdflib.epochs import CDFepoch as cdfepoch - - -class TimeSuite: - """ - An example benchmark that times the performance of various kinds - of iterating over dictionaries in Python. - """ - - def setup(self): - self.epochs = np.ones(1000) * 62567898765432.0 - self.epochs_tt2000 = (np.ones(1000) * 186999622360321123).astype(int) - - def time_epoch_encode(self): - cdfepoch.encode(self.epochs) - - def time_epoch_to_datetime(self): - cdfepoch.to_datetime(self.epochs) - - def time_epoch_to_datetime_tt2000(self): - cdfepoch.to_datetime(self.epochs_tt2000) diff --git a/cdflib/cdfread.py b/cdflib/cdfread.py index e1a2c20..6384b28 100644 --- a/cdflib/cdfread.py +++ b/cdflib/cdfread.py @@ -36,10 +36,12 @@ class CDF: Example ------- + ```python >>> import cdflib >>> cdf_file = cdflib.CDF('/path/to/cdf_file.cdf') >>> cdf_file.cdf_info() >>> x = cdf_file.varget("NameOfVariable", startrec=0, endrec=150) + ``` """ def __init__(self, path: Union[str, Path], validate: bool = False, string_encoding: str = "ascii", s3_read_method: int = 1): diff --git a/cdflib/cdfwrite.py b/cdflib/cdfwrite.py index 98507fe..4706b9d 100644 --- a/cdflib/cdfwrite.py +++ b/cdflib/cdfwrite.py @@ -1,6 +1,5 @@ import binascii import hashlib -import io import logging import math import numbers @@ -11,7 +10,7 @@ from functools import wraps from numbers import Number from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union import numpy as np import numpy.typing as npt @@ -822,7 +821,7 @@ def write_var(self, var_spec, var_attrs=None, var_data=None): if maxRec < varMaxRec: self._update_offset_value(f, self.gdr_head + 52, 4, varMaxRec) - def _write_var_attrs(self, f: io.BufferedWriter, varNum: int, var_attrs: Dict[str, Any], zVar: bool) -> None: + def _write_var_attrs(self, f: BinaryIO, varNum: int, var_attrs: Dict[str, Any], zVar: bool) -> None: """ Writes ADRs and AEDRs for variables @@ -944,7 +943,7 @@ def _write_var_attrs(self, f: io.BufferedWriter, varNum: int, var_attrs: Dict[st def _write_var_data_nonsparse( self, - f: io.BufferedWriter, + f: BinaryIO, zVar: bool, var: int, dataType: int, @@ -1106,7 +1105,7 @@ def _write_var_data_nonsparse( def _write_var_data_sparse( self, - f: io.BufferedWriter, + f: BinaryIO, zVar: bool, var: int, dataType: int, @@ -1193,7 +1192,7 @@ def _write_var_data_sparse( return rec_end - def _create_vxr(self, f: io.BufferedWriter, recStart: int, recEnd: int, currentVDR: int, priorVXR: int, vvrOffset: int) -> int: + def _create_vxr(self, f: BinaryIO, recStart: int, recEnd: int, currentVDR: int, priorVXR: int, vvrOffset: int) -> int: """ Create a VXR AND use a VXR @@ -1229,7 +1228,7 @@ def _create_vxr(self, f: io.BufferedWriter, recStart: int, recEnd: int, currentV self._update_offset_value(f, currentVDR + 36, 8, vxroffset) return vxroffset - def _use_vxrentry(self, f: io.BufferedWriter, VXRoffset: int, recStart: int, recEnd: int, offset: int) -> int: + def _use_vxrentry(self, f: BinaryIO, VXRoffset: int, recStart: int, recEnd: int, offset: int) -> int: """ Adds a VVR pointer to a VXR """ @@ -1250,7 +1249,7 @@ def _use_vxrentry(self, f: io.BufferedWriter, VXRoffset: int, recStart: int, rec self._update_offset_value(f, VXRoffset + 24, 4, usedEntries) return usedEntries - def _add_vxr_levels_r(self, f: io.BufferedWriter, vxrhead: int, numVXRs: int) -> Tuple[int, int]: + def _add_vxr_levels_r(self, f: BinaryIO, vxrhead: int, numVXRs: int) -> Tuple[int, int]: """ Build a new level of VXRs... make VXRs more tree-like @@ -1321,7 +1320,7 @@ def _add_vxr_levels_r(self, f: io.BufferedWriter, vxrhead: int, numVXRs: int) -> else: return newvxrhead, newvxroff - def _update_vdr_vxrheadtail(self, f: io.BufferedWriter, vdr_offset: int, VXRoffset: int) -> None: + def _update_vdr_vxrheadtail(self, f: BinaryIO, vdr_offset: int, VXRoffset: int) -> None: """ This sets a VXR to be the first and last VXR in the VDR """ @@ -1330,7 +1329,7 @@ def _update_vdr_vxrheadtail(self, f: io.BufferedWriter, vdr_offset: int, VXRoffs # VDR's VXRtail self._update_offset_value(f, vdr_offset + 36, 8, VXRoffset) - def _get_recrange(self, f: io.BufferedWriter, VXRoffset: int) -> Tuple[int, int]: + def _get_recrange(self, f: BinaryIO, VXRoffset: int) -> Tuple[int, int]: """ Finds the first and last record numbers pointed by the VXR Assumes the VXRs are in order @@ -1505,7 +1504,7 @@ def _sparse_token(sparse: str) -> int: except Exception: return 0 - def _write_cdr(self, f: io.BufferedWriter, major: int, encoding: int, checksum: int) -> int: + def _write_cdr(self, f: BinaryIO, major: int, encoding: int, checksum: int) -> int: f.seek(0, 2) byte_loc = f.tell() block_size = self.CDR_BASE_SIZE64 + self.CDF_COPYRIGHT_LEN @@ -1552,7 +1551,7 @@ def _write_cdr(self, f: io.BufferedWriter, major: int, encoding: int, checksum: return byte_loc - def _write_gdr(self, f: io.BufferedWriter) -> int: + def _write_gdr(self, f: BinaryIO) -> int: f.seek(0, 2) byte_loc = f.tell() block_size = self.GDR_BASE_SIZE64 + 4 * self.num_rdim @@ -1594,7 +1593,7 @@ def _write_gdr(self, f: io.BufferedWriter) -> int: return byte_loc - def _write_adr(self, f: io.BufferedWriter, gORv: bool, name: str) -> Tuple[int, int]: + def _write_adr(self, f: BinaryIO, gORv: bool, name: str) -> Tuple[int, int]: """ Writes and ADR to the end of the file. @@ -1676,7 +1675,7 @@ def _write_adr(self, f: io.BufferedWriter, gORv: bool, name: str) -> Tuple[int, def _write_aedr( self, - f: io.BufferedWriter, + f: BinaryIO, gORz: bool, attrNum: int, entryNum: int, @@ -1790,7 +1789,7 @@ def _write_aedr( def _write_vdr( self, - f: io.BufferedWriter, + f: BinaryIO, cdataType: int, numElems: int, numDims: int, @@ -1977,7 +1976,7 @@ def _write_vdr( return num, byte_loc - def _write_vxr(self, f: io.BufferedWriter, numEntries: Optional[int] = None) -> int: + def _write_vxr(self, f: BinaryIO, numEntries: Optional[int] = None) -> int: """ Creates a VXR at the end of the file. Returns byte location of the VXR @@ -2012,7 +2011,7 @@ def _write_vxr(self, f: io.BufferedWriter, numEntries: Optional[int] = None) -> f.write(vxr) return byte_loc - def _write_vvr(self, f: io.BufferedWriter, data: bytes) -> int: + def _write_vvr(self, f: BinaryIO, data: bytes) -> int: """ Writes a vvr to the end of file "f" with the byte stream "data". """ @@ -2029,7 +2028,7 @@ def _write_vvr(self, f: io.BufferedWriter, data: bytes) -> int: return byte_loc - def _write_cpr(self, f: io.BufferedWriter, cType: int, parameter: int) -> int: + def _write_cpr(self, f: BinaryIO, cType: int, parameter: int) -> int: """ Write compression info to the end of the file in a CPR. """ @@ -2051,7 +2050,7 @@ def _write_cpr(self, f: io.BufferedWriter, cType: int, parameter: int) -> int: return byte_loc - def _write_cvvr(self, f: io.BufferedWriter, data: Any) -> int: + def _write_cvvr(self, f: BinaryIO, data: Any) -> int: """ Write compressed "data" variable to the end of the file in a CVVR """ @@ -2072,7 +2071,7 @@ def _write_cvvr(self, f: io.BufferedWriter, data: Any) -> int: return byte_loc - def _write_ccr(self, f: io.BufferedWriter, g: io.BufferedWriter, level: int) -> None: + def _write_ccr(self, f: BinaryIO, g: BinaryIO, level: int) -> None: """ Write a CCR to file "g" from file "f" with level "level". Currently, only handles gzip compression. @@ -2479,7 +2478,7 @@ def _num_values(self, zVar: bool, varNum: int) -> int: values = values * dimSizes[x] return values - def _read_offset_value(self, f: io.BufferedWriter, offset: int, size: int) -> int: + def _read_offset_value(self, f: BinaryIO, offset: int, size: int) -> int: """ Reads an integer value from file "f" at location "offset". """ @@ -2489,7 +2488,7 @@ def _read_offset_value(self, f: io.BufferedWriter, offset: int, size: int) -> in else: return int.from_bytes(f.read(4), "big", signed=True) - def _update_offset_value(self, f: io.BufferedWriter, offset: int, size: int, value: Any) -> None: + def _update_offset_value(self, f: BinaryIO, offset: int, size: int, value: Any) -> None: """ Writes "value" into location "offset" in file "f". """ @@ -2499,7 +2498,7 @@ def _update_offset_value(self, f: io.BufferedWriter, offset: int, size: int, val else: f.write(struct.pack(">i", value)) - def _update_aedr_link(self, f: io.BufferedWriter, attrNum: int, zVar: bool, varNum: int, offset: int) -> None: + def _update_aedr_link(self, f: BinaryIO, attrNum: int, zVar: bool, varNum: int, offset: int) -> None: """ Updates variable aedr links @@ -2615,7 +2614,7 @@ def _checklistofNums(obj: Any) -> bool: else: return isinstance(obj, numbers.Number) or isinstance(obj, np.datetime64) - def _md5_compute(self, f: io.BufferedWriter) -> bytes: + def _md5_compute(self, f: BinaryIO) -> bytes: """ Computes the checksum of the file """ diff --git a/cdflib/epochs.py b/cdflib/epochs.py index 118e369..5a8c4cc 100644 --- a/cdflib/epochs.py +++ b/cdflib/epochs.py @@ -44,11 +44,13 @@ class CDFepoch: Example ------- + ```python >>> import cdflib - # Convert to an epoch + >>> # Convert to an epoch >>> epoch = cdflib.cdfepoch.compute_epoch([2017,1,1,1,1,1,111]) - # Convert from an epoch + >>> # Convert from an epoch >>> time = cdflib.cdfepoch.to_datetime(epoch) # Or pass epochs via CDF.varget. + ``` """ version = 3 @@ -474,7 +476,7 @@ def breakdown_tt2000(tt2000: cdf_tt2000_type) -> np.ndarray: Parameters ---------- - epochs : array-like + tt2000 : array-like Single, list, tuple, or np.array of tt2000 values Returns diff --git a/cdflib/epochs_astropy.py b/cdflib/epochs_astropy.py index 2b0512c..cd4e97f 100644 --- a/cdflib/epochs_astropy.py +++ b/cdflib/epochs_astropy.py @@ -2,8 +2,6 @@ ################## CDF Astropy Epochs ################## - -@author: Bryan Harter """ import datetime from datetime import timezone diff --git a/cdflib/xarray/cdf_to_xarray.py b/cdflib/xarray/cdf_to_xarray.py index ab16c5f..b06b950 100644 --- a/cdflib/xarray/cdf_to_xarray.py +++ b/cdflib/xarray/cdf_to_xarray.py @@ -689,59 +689,73 @@ def cdf_to_xarray(filename: str, to_datetime: bool = True, to_unixtime: bool = F """ This function converts CDF files into XArray Dataset Objects. - Parameters: - filename (str): The path to the CDF file to read - to_datetime (bool, optional): Whether or not to convert CDF_EPOCH/EPOCH_16/TT2000 to datetime64, or leave them as is - to_unixtime (bool, optional): Whether or not to convert CDF_EPOCH/EPOCH_16/TT2000 to unixtime, or leave them as is - fillval_to_nan (bool, optional): If True, any data values that match the FILLVAL attribute for a variable will be set to NaN - - Returns: - An XArray Dataset Object - - Example MMS: - >>> # Import necessary libraries - >>> import cdflib.xarray - >>> import xarray as xr - >>> import os - >>> import urllib.request - - >>> # Download a CDF file - >>> fname = 'mms2_fgm_srvy_l2_20160809_v4.47.0.cdf' - >>> url = ("https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/mms2_fgm_srvy_l2_20160809_v4.47.0.cdf") - >>> if not os.path.exists(fname): - >>> urllib.request.urlretrieve(url, fname) - - >>> # Load in and display the CDF file - >>> mms_data = cdflib.xarray.cdf_to_xarray("mms2_fgm_srvy_l2_20160809_v4.47.0.cdf", to_unixtime=True, fillval_to_nan=True) - - >>> # Show off XArray functionality - >>> - >>> # Slice the data using built in XArray functions - >>> mms_data2 = mms_data.isel(dim0=0) - >>> # Plot the sliced data using built in XArray functions - >>> mms_data2['mms2_fgm_b_gse_srvy_l2'].plot() - >>> # Zoom in on the slices data in time using built in XArray functions - >>> mms_data3 = mms_data2.isel(Epoch=slice(716000,717000)) - >>> # Plot the zoomed in sliced data using built in XArray functionality - >>> mms_data3['mms2_fgm_b_gse_srvy_l2'].plot() - - Example THEMIS: - >>> # Import necessary libraries - >>> import cdflib.xarray - >>> import xarray as xr - >>> import os - >>> import urllib.request - - >>> # Download a CDF file - >>> fname = 'thg_l2_mag_amd_20070323_v01.cdf' - >>> url = ("https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/thg_l2_mag_amd_20070323_v01.cdf") - >>> if not os.path.exists(fname): - >>> urllib.request.urlretrieve(url, fname) - - >>> # Load in and display the CDF file - >>> thg_data = cdflib.xarray.cdf_to_xarray(fname, to_unixtime=True, fillval_to_nan=True) - - Processing Steps: + Parameters + ---------- + filename : str + The path to the CDF file to read + to_datetime : bool, optional + Whether or not to convert CDF_EPOCH/EPOCH_16/TT2000 to datetime64, or leave them as is + to_unixtime : bool, optional + Whether or not to convert CDF_EPOCH/EPOCH_16/TT2000 to unixtime, or leave them as is + fillval_to_nan : bool, optional + If True, any data values that match the FILLVAL attribute for a variable will be set to NaN + + Returns + ------- + dataset : xarray.Dataset + An XArray Dataset object containing all of the data and attributes from the CDF file + + Example MMS + ----------- + ```python + >>> # Import necessary libraries + >>> import cdflib.xarray + >>> import xarray as xr + >>> import os + >>> import urllib.request + + >>> # Download a CDF file + >>> fname = 'mms2_fgm_srvy_l2_20160809_v4.47.0.cdf' + >>> url = ("https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/mms2_fgm_srvy_l2_20160809_v4.47.0.cdf") + >>> if not os.path.exists(fname): + >>> urllib.request.urlretrieve(url, fname) + + >>> # Load in and display the CDF file + >>> mms_data = cdflib.xarray.cdf_to_xarray("mms2_fgm_srvy_l2_20160809_v4.47.0.cdf", to_unixtime=True, fillval_to_nan=True) + + >>> # Show off XArray functionality + + >>> # Slice the data using built in XArray functions + >>> mms_data2 = mms_data.isel(dim0=0) + >>> # Plot the sliced data using built in XArray functions + >>> mms_data2['mms2_fgm_b_gse_srvy_l2'].plot() + >>> # Zoom in on the slices data in time using built in XArray functions + >>> mms_data3 = mms_data2.isel(Epoch=slice(716000,717000)) + >>> # Plot the zoomed in sliced data using built in XArray functionality + >>> mms_data3['mms2_fgm_b_gse_srvy_l2'].plot() + ``` + + Example THEMIS + -------------- + ```python + >>> # Import necessary libraries + >>> import cdflib.xarray + >>> import xarray as xr + >>> import os + >>> import urllib.request + + >>> # Download a CDF file + >>> fname = 'thg_l2_mag_amd_20070323_v01.cdf' + >>> url = ("https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/thg_l2_mag_amd_20070323_v01.cdf") + >>> if not os.path.exists(fname): + >>> urllib.request.urlretrieve(url, fname) + + >>> # Load in and display the CDF file + >>> thg_data = cdflib.xarray.cdf_to_xarray(fname, to_unixtime=True, fillval_to_nan=True) + ``` + + Processing Steps + ---------------- 1. For each variable in the CDF file 1. Determine the name of the dimension that spans the data "records" - Check if the variable itself might be a dimension diff --git a/cdflib/xarray/xarray_to_cdf.py b/cdflib/xarray/xarray_to_cdf.py index 9cb9901..b9fae86 100644 --- a/cdflib/xarray/xarray_to_cdf.py +++ b/cdflib/xarray/xarray_to_cdf.py @@ -891,85 +891,105 @@ def xarray_to_cdf( """ This function converts XArray Dataset objects into CDF files. - Parameters: - xarray_dataset (xarray.Dataset): The XArray Dataset object that you'd like to convert into a CDF file - file_name (str): The path to the place the newly created CDF file - unix_time_to_cdf_time (bool, optional): Whether or not to assume variables that will become a CDF_EPOCH/EPOCH16/TT2000 are a unix timestamp - istp (bool, optional): Whether or not to do checks on the Dataset object to attempt to enforce CDF compliance - terminate_on_warning (bool, optional): Whether or not to throw an error when given warnings or to continue trying to make the file - auto_fix_depends (bool, optional): Whether or not to automatically add dependencies - record_dimensions (list of str, optional): If the code cannot determine which dimensions should be made into CDF records, you may provide a list of them here - compression (int, optional): The level of compression to gzip the data in the variables. Default is no compression, standard is 6. - nan_to_fillval (bool, optional): Convert all np.nan and np.datetime64('NaT') to the standard CDF FILLVALs. - Returns: - None, but generates a CDF file - - Example CDF file from scratch: - >>> # Import the needed libraries - >>> from cdflib.xarray import xarray_to_cdf - >>> import xarray as xr - >>> import os - >>> import urllib.request - - >>> # Create some fake data - >>> var_data = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] - >>> var_dims = ['epoch', 'direction'] - >>> data = xr.Variable(var_dims, var_data) - - >>> # Create fake epoch data - >>> epoch_data = [1, 2, 3] - >>> epoch_dims = ['epoch'] - >>> epoch = xr.Variable(epoch_dims, epoch_data) - - >>> # Combine the two into an xarray Dataset and export as CDF (this will print out many ISTP warnings) - >>> ds = xr.Dataset(data_vars={'data': data, 'epoch': epoch}) - >>> xarray_to_cdf(ds, 'hello.cdf') - - >>> # Add some global attributes - >>> global_attributes = {'Project': 'Hail Mary', - >>> 'Source_name': 'Thin Air', - >>> 'Discipline': 'None', - >>> 'Data_type': 'counts', - >>> 'Descriptor': 'Midichlorians in unicorn blood', - >>> 'Data_version': '3.14', - >>> 'Logical_file_id': 'SEVENTEEN', - >>> 'PI_name': 'Darth Vader', - >>> 'PI_affiliation': 'Dark Side', - >>> 'TEXT': 'AHHHHH', - >>> 'Instrument_type': 'Banjo', - >>> 'Mission_group': 'Impossible', - >>> 'Logical_source': ':)', - >>> 'Logical_source_description': ':('} - - >>> # Lets add a new coordinate variable for the "direction" - >>> dir_data = [1, 2, 3] - >>> dir_dims = ['direction'] - >>> direction = xr.Variable(dir_dims, dir_data) - - >>> # Recreate the Dataset with this new objects, and recreate the CDF - >>> ds = xr.Dataset(data_vars={'data': data, 'epoch': epoch, 'direction':direction}, attrs=global_attributes) - >>> os.remove('hello.cdf') - >>> xarray_to_cdf(ds, 'hello.cdf') - - Example netCDF -> CDF conversion: - >>> # Download a netCDF file (if needed) - >>> fname = 'dn_magn-l2-hires_g17_d20211219_v1-0-1.nc' - >>> url = ("https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/dn_magn-l2-hires_g17_d20211219_v1-0-1.nc") - >>> if not os.path.exists(fname): - >>> urllib.request.urlretrieve(url, fname) - - >>> # Load in the dataset, and set VAR_TYPES attributes (the most important attribute as far as this code is concerned) - >>> goes_r_mag = xr.load_dataset("dn_magn-l2-hires_g17_d20211219_v1-0-1.nc") - >>> for var in goes_r_mag: - >>> goes_r_mag[var].attrs['VAR_TYPE'] = 'data' - >>> goes_r_mag['coordinate'].attrs['VAR_TYPE'] = 'support_data' - >>> goes_r_mag['time'].attrs['VAR_TYPE'] = 'support_data' - >>> goes_r_mag['time_orbit'].attrs['VAR_TYPE'] = 'support_data' - - >>> # Create the CDF file - >>> xarray_to_cdf(goes_r_mag, 'hello.cdf') - - Processing Steps: + Parameters + ---------- + xarray_dataset : xarray.Dataset + The XArray Dataset object that you'd like to convert into a CDF file + file_name : str + The path to the place the newly created CDF file + unix_time_to_cdf_time : bool, optional + Whether or not to assume variables that will become a CDF_EPOCH/EPOCH16/TT2000 are a unix timestamp + istp : bool, optional + Whether or not to do checks on the Dataset object to attempt to enforce CDF compliance + terminate_on_warning : bool, optional + Whether or not to throw an error when given warnings or to continue trying to make the file + auto_fix_depends : bool, optional + Whether or not to automatically add dependencies + record_dimensions : list of str, optional + If the code cannot determine which dimensions should be made into CDF records, you may provide a list of them here + compression : int, optional + The level of compression to gzip the data in the variables. Default is no compression, standard is 6. + nan_to_fillval : bool, optional + Convert all np.nan and np.datetime64('NaT') to the standard CDF FILLVALs. + + Returns + ------- + None + Function generates a CDF file + + Example CDF file from scratch + ------------------------------ + ```python + >>> # Import the needed libraries + >>> from cdflib.xarray import xarray_to_cdf + >>> import xarray as xr + >>> import os + >>> import urllib.request + + >>> # Create some fake data + >>> var_data = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] + >>> var_dims = ['epoch', 'direction'] + >>> data = xr.Variable(var_dims, var_data) + + >>> # Create fake epoch data + >>> epoch_data = [1, 2, 3] + >>> epoch_dims = ['epoch'] + >>> epoch = xr.Variable(epoch_dims, epoch_data) + + >>> # Combine the two into an xarray Dataset and export as CDF (this will print out many ISTP warnings) + >>> ds = xr.Dataset(data_vars={'data': data, 'epoch': epoch}) + >>> xarray_to_cdf(ds, 'hello.cdf') + + >>> # Add some global attributes + >>> global_attributes = {'Project': 'Hail Mary', + >>> 'Source_name': 'Thin Air', + >>> 'Discipline': 'None', + >>> 'Data_type': 'counts', + >>> 'Descriptor': 'Midichlorians in unicorn blood', + >>> 'Data_version': '3.14', + >>> 'Logical_file_id': 'SEVENTEEN', + >>> 'PI_name': 'Darth Vader', + >>> 'PI_affiliation': 'Dark Side', + >>> 'TEXT': 'AHHHHH', + >>> 'Instrument_type': 'Banjo', + >>> 'Mission_group': 'Impossible', + >>> 'Logical_source': ':)', + >>> 'Logical_source_description': ':('} + + >>> # Lets add a new coordinate variable for the "direction" + >>> dir_data = [1, 2, 3] + >>> dir_dims = ['direction'] + >>> direction = xr.Variable(dir_dims, dir_data) + + >>> # Recreate the Dataset with this new objects, and recreate the CDF + >>> ds = xr.Dataset(data_vars={'data': data, 'epoch': epoch, 'direction':direction}, attrs=global_attributes) + >>> os.remove('hello.cdf') + >>> xarray_to_cdf(ds, 'hello.cdf') + ``` + + Example netCDF -> CDF conversion + -------------------------------- + ```python + >>> # Download a netCDF file (if needed) + >>> fname = 'dn_magn-l2-hires_g17_d20211219_v1-0-1.nc' + >>> url = ("https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/dn_magn-l2-hires_g17_d20211219_v1-0-1.nc") + >>> if not os.path.exists(fname): + >>> urllib.request.urlretrieve(url, fname) + + >>> # Load in the dataset, and set VAR_TYPES attributes (the most important attribute as far as this code is concerned) + >>> goes_r_mag = xr.load_dataset("dn_magn-l2-hires_g17_d20211219_v1-0-1.nc") + >>> for var in goes_r_mag: + >>> goes_r_mag[var].attrs['VAR_TYPE'] = 'data' + >>> goes_r_mag['coordinate'].attrs['VAR_TYPE'] = 'support_data' + >>> goes_r_mag['time'].attrs['VAR_TYPE'] = 'support_data' + >>> goes_r_mag['time_orbit'].attrs['VAR_TYPE'] = 'support_data' + + >>> # Create the CDF file + >>> xarray_to_cdf(goes_r_mag, 'hello.cdf') + ``` + + Processing Steps + ---------------- 1. Determines the list of dimensions that represent time-varying dimensions. These ultimately become the "records" of the CDF file - If it is named "epoch" or "epoch_N", it is considered time-varying - If a variable points to another variable with a DEPEND_0 attribute, it is considered time-varying @@ -984,7 +1004,8 @@ def xarray_to_cdf( 6. Optionally, convert variables with the name "epoch" or "epoch_N" to CDF_TT2000 7. Write all variables and global attributes to the CDF file! - ISTP Warnings: + ISTP Warnings + ------------- If ISTP=true, these are some of the common things it will check: - Missing or invalid VAR_TYPE variable attributes @@ -995,7 +1016,8 @@ def xarray_to_cdf( - Missing an "epoch" dimension - DEPEND_N attribute pointing to a variable with uncompatible dimensions - CDF Data Types: + CDF Data Types + -------------- All variable data is automatically converted to one of the following CDF types, based on the type of data in the xarray Dataset: ============= =============== @@ -1020,7 +1042,7 @@ def xarray_to_cdf( ============= =============== If you want to attempt to cast your data to a different type, you need to add an attribute to your variable called "CDF_DATA_TYPE". - xarray_to_cdf will read this attribute and override the default conversions. Valid choices are: + xarray_to_cdf will read this attribute and override the default conversions. Valid choices are - Integers: CDF_INT1, CDF_INT2, CDF_INT4, CDF_INT8 - Unsigned Integers: CDF_UINT1, CDF_UINT2, CDF_UINT4 @@ -1098,10 +1120,10 @@ def xarray_to_cdf( if len(d[var].dims) > 0: if var in time_varying_dimensions or var in depend_0_vars: - dim_sizes = d[var].shape[1:] # type: ignore + dim_sizes = list(d[var].shape[1:]) record_vary = True else: - dim_sizes = d[var].shape + dim_sizes = list(d[var].shape) record_vary = False else: dim_sizes = [] @@ -1146,7 +1168,7 @@ def xarray_to_cdf( "Data_Type": cdf_data_type, "Num_Elements": cdf_num_elements, "Rec_Vary": record_vary, - "Dim_Sizes": list(dim_sizes), + "Dim_Sizes": dim_sizes, "Compress": compression, } diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index 8dfc929..0000000 --- a/codecov.yml +++ /dev/null @@ -1 +0,0 @@ -comment: true diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index a95a274..0000000 --- a/doc/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -W --keep-going -SPHINXBUILD = sphinx-build -SPHINXPROJ = cdflib -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - - -clean: - @echo Removing files created by sphinx-build - rm -rf $(BUILDDIR) - rm -rf $(SOURCEDIR)/api/ - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/conf.py b/doc/conf.py deleted file mode 100644 index 448533e..0000000 --- a/doc/conf.py +++ /dev/null @@ -1,163 +0,0 @@ -# -# Configuration file for the Sphinx documentation builder. -# -# This file does only contain a selection of the most common options. For a -# full list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import cdflib - -# -- Project information ----------------------------------------------------- - -project = "cdflib" -author = "Bryan Harter, Michael Liu" -version = str(cdflib.__version__) - -# The short X.Y version -# version = '' -# The full version, including alpha/beta/rc tags -release = "" - - -# -- General configuration --------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.viewcode", - "sphinx.ext.napoleon", - "sphinx.ext.autosummary", - "sphinx_copybutton", - "sphinx.ext.linkcode", - "sphinx.ext.intersphinx", - "sphinx_automodapi.automodapi", -] - -numpydoc_class_members_toctree = False -numpydoc_show_class_members = False -autodoc_typehints = "description" - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] -default_role = "any" -nitpicky = True -nitpick_ignore = [("py:class", "array-like"), ("py:class", "optional")] -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - -# The master toctree document. -master_doc = "index" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = "en" - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path . -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = "sphinx_rtd_theme" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# The default sidebars (for documents that don't match any pattern) are -# defined by theme itself. Builtin themes are using these templates by -# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', -# 'searchbox.html']``. -# -# html_sidebars = {} - - -# -- Options for HTMLHelp output --------------------------------------------- - -# Output file base name for HTML help builder. -htmlhelp_basename = "cdflibdoc" - - -# -- Options for LaTeX output ------------------------------------------------ - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, "cdflib.tex", "cdflib Documentation", "Bryan Harter, Michael Liu", "manual"), -] - - -# -- Options for manual page output ------------------------------------------ - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "cdflib", "cdflib Documentation", [author], 1)] - - -# -- Options for Texinfo output ---------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, "cdflib", "cdflib Documentation", author, "cdflib", "One line description of project.", "Miscellaneous"), -] - - -# -- Extension configuration ------------------------------------------------- - -# -- Options for intersphinx extension --------------------------------------- - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("https://docs.python.org/3", None), - "numpy": ("https://numpy.org/doc/stable", None), - "astropy": ("https://docs.astropy.org/en/stable/", None), - "xarray": ("https://docs.xarray.dev/en/stable/", None), -} - - -def linkcode_resolve(domain, info): - if domain != "py": - return None - if not info["module"]: - return None - filename = info["module"].replace(".", "/") - return "https://github.com/MAVENSDC/cdflib/tree/master/%s.py" % filename diff --git a/doc/development.rst b/doc/development.rst deleted file mode 100644 index f131406..0000000 --- a/doc/development.rst +++ /dev/null @@ -1,15 +0,0 @@ -Developing cdflib -================= - -Documentation -------------- -To build the documentation you will need to install the documentation -requirements using:: - - pip install .[docs] - -This will install cdflib and all the packages need to make the documenation. - -Versioning ----------- -The package version is automatically determined using `setuptools_scm `__, so does not need to be manually incremented when doing a new release. diff --git a/doc/index.rst b/doc/index.rst deleted file mode 100644 index 7d09813..0000000 --- a/doc/index.rst +++ /dev/null @@ -1,49 +0,0 @@ -cdflib -====== - -A python package to read CDF files without needing to install the CDF NASA library. - -**Last Built**: |today| | **Version**: |version| | **Source**: `github`_ | **Archive**: `zenodo`_. - -.. _github: https://github.com/MAVENSDC/cdflib -.. _zenodo: https://zenodo.org/record/4746617#.Y5NfWXbMKF4 - -Installing ----------- -cdflib requires python 3 and numpy. To install run - -.. code:: - bash - - python3 -m pip install cdflib - - -What is cdflib? ------------------- - -cdflib is an effort to replicate the CDF libraries using a pure python implementation. -This means users do not need to install the `CDF NASA libraries `_. - -The only module you need to install is ``numpy``, but there are a few things you can do with ``astropy`` and ``xarray``. - -While this originally started as a way to read PDS-archive compliant CDF files, thanks to many contributors, it has grown to be able to handle every type of CDF file. - -What can cdflib do? -------------------- - -- Ability to read variables and attributes from CDF files (see ``CDF Reader Class``) -- Writes CDF version 3 files (see ``CDF Writer Class``) -- Can convert between CDF time types (EPOCH/EPOCH16/TT2000) to other common time formats (see ``CDF Time Conversions``) -- Can convert CDF files into XArray Dataset objects and vice versa, attempting to maintain ISTP compliance (see ``Working with XArray``) - -.. note:: - While we try to simplify things in this documentation, the full API description of each module can be found in the ``API Reference`` section - - -.. toctree:: - :maxdepth: 1 - :caption: Contents: - - modules/index - changelog - development diff --git a/doc/make.bat b/doc/make.bat deleted file mode 100644 index 870fa73..0000000 --- a/doc/make.bat +++ /dev/null @@ -1,36 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build -set SPHINXPROJ=cdflib - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/doc/modules/dataclasses.rst b/doc/modules/dataclasses.rst deleted file mode 100644 index c3c4f79..0000000 --- a/doc/modules/dataclasses.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _dataclasses: - -Dataclasses -=========== -These classes store information returned from reading a CDF file. - -.. automodule:: cdflib.dataclasses - :members: diff --git a/doc/modules/index.rst b/doc/modules/index.rst deleted file mode 100644 index ded64bb..0000000 --- a/doc/modules/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -API Reference -============= - -.. toctree:: - :maxdepth: 1 - - cdfread - cdfwrite - cdfepoch - dataclasses - xarray diff --git a/doc/modules/cdfepoch.rst b/docs/cdfepoch.md similarity index 65% rename from doc/modules/cdfepoch.rst rename to docs/cdfepoch.md index 55b69d5..3da2da1 100644 --- a/doc/modules/cdfepoch.rst +++ b/docs/cdfepoch.md @@ -1,6 +1,4 @@ -********************** -CDF Time Conversions -********************** +# cdfepoch There are three (3) unique epoch data types in CDF: CDF_EPOCH, CDF_EPOCH16 and CDF_TIME_TT2000. @@ -10,9 +8,4 @@ There are three (3) unique epoch data types in CDF: CDF_EPOCH, CDF_EPOCH16 and C The following two classes contain functions to convert those times into formats that are in more standard use. - -.. automodapi:: cdflib.epochs - :no-inheritance-diagram: - -.. automodapi:: cdflib.epochs_astropy - :no-inheritance-diagram: +::: cdflib.epochs diff --git a/doc/modules/cdfread.rst b/docs/cdfread.md similarity index 79% rename from doc/modules/cdfread.rst rename to docs/cdfread.md index a12dc87..58c50e4 100644 --- a/doc/modules/cdfread.rst +++ b/docs/cdfread.md @@ -1,9 +1,6 @@ -CDF Reader Class -================ +# cdfread -.. autoclass:: cdflib.cdfread.CDF - :members: - :undoc-members: +::: cdflib.cdfread Sample Usage ------------ @@ -11,13 +8,17 @@ Sample Usage To begin accessing the data within a CDF file, first create a new CDF class. This can be done with the following commands +```python >>> import cdflib >>> cdf_file = cdflib.CDF('/path/to/cdf_file.cdf') +``` Then, you can call various functions on the variable. -For example: +For example +```python >>> x = cdf_file.varget("NameOfVariable", startrec = 0, endrec = 150) +``` This command will return all data inside of the variable ``Variable1``, from records 0 to 150. diff --git a/doc/modules/cdfwrite.rst b/docs/cdfwrite.md similarity index 98% rename from doc/modules/cdfwrite.rst rename to docs/cdfwrite.md index 8038fcf..1d254ed 100644 --- a/doc/modules/cdfwrite.rst +++ b/docs/cdfwrite.md @@ -1,13 +1,11 @@ -CDF Writer Class -================= - -.. autoclass:: cdflib.cdfwrite.CDF - :members: +# cdfwrite +::: cdflib.cdfwrite Sample Usage ------------ +```python >>> import cdfwrite >>> import cdfread >>> import numpy as np @@ -90,3 +88,4 @@ Sample Usage >>> var_data=[varrecs,vardata]) >>> cdf_master.close() >>> cdf_file.close() +``` diff --git a/doc/changelog.rst b/docs/changelog.md similarity index 99% rename from doc/changelog.rst rename to docs/changelog.md index fbe1978..8e1a5b8 100644 --- a/doc/changelog.rst +++ b/docs/changelog.md @@ -1,11 +1,10 @@ -========= Changelog ========= 1.3.6 ===== cdf_to_xarray -------------- +-------------- - Stopping uncertainty "DELTA_VAR" variables from becoming coordinate variables cdfwrite diff --git a/docs/dataclasses.md b/docs/dataclasses.md new file mode 100644 index 0000000..750d54c --- /dev/null +++ b/docs/dataclasses.md @@ -0,0 +1,3 @@ +# dataclasses + +::: cdflib.dataclasses diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 0000000..094d304 --- /dev/null +++ b/docs/development.md @@ -0,0 +1,74 @@ +# Developing cdflib + +## Developer Environment + +Typically, development for cdflib is done right on github, using Github Codespaces. Codespaces is ultimately just a slightly fancier devcontainers, so you can also use your local system if you have devcontainers set up. However, CU has weird licensing agreements about Docker Desktop, so I try to avoid it altogether by using Codespaces. As CU employees, we get 60 free hours of codespace use every month. + +The setup that Codespaces uses is located in the .devcontainer folder. The devcontainer installs all requirements for cdflib, sets up precommit, and also starts up a virtual desktop. The reason we start a virtual desktop in the devcontainer is in case we want to perform any plotting with matplotlib. The plots will show up within the browser. + +A new codespace should be created for every PR you are working on. Once the PR is merged, you should delete the codespace (in addition to the branch). + +## Tests + +Unit testing is simply done through pytest. All unit tests are located in the `/tests` folder. + +### Remote Data + +Some of the unit tests run using sample CDF files that need to be downloaded from the MAVEN Science Data Center website. To run these tests in particular, you can use the command: + +``` +pytest -m remote_data +``` + +However be warned, the test will take approximately 15 minutes to run with the ~20 or so CDF files that have caused issues at various points in development. + +## Working on a Ticket + +The recommended workflow when you want to work on a ticket: + +1) Create a new branch (there should be a button to do so on the Github Issue page itself) +2) Start a new Codespace on the branch (again, there should be a button to do so after you create a branch) +3) Make your changes +4) If your changes have made significant alterations to the way CDF files are read/written, run `pytest -m remote_data` to perform unit tests on CDF files stored on the MAVEN SDC server. +5) Ensure your code passes the pre-commit checks (don't worry, it will tell you if you don't) +6) Create a new PR in the repository, and assign Bryan Harter as a reviewer. +7) Once I have approved changes, perform a "Squash and Merge", delete your branch, and delete your Codespace. (All of these should be a button in the page for your specific Pull Request) + +## Documentation +Documentation is made using "mkdocs" whenever there is a new release of the library. See `.github/workflows/docs.yaml`. + +The above script will update the branch `gh-pages` to build documentation from the markdown files in the "docs" folder. The configuration for mkdocs is located in the file `mkdocks.yaml` in the root directory of the project. + +To build the documentation you will need to install the documentation requirements using + +``` +pip install .[docs] +``` + +If you have made changes to the documentation that you would like to check prior to merging, you can serve the documentation to yourself using + +``` +mkdocs serve +``` + +Note that this will work even on Codespaces. A pop-up will tell you that there is a new port that is open, and you can connect to it. + +## Pre-commit + +This repository is set up to run checks on the code prior to go being committed to git. The setup for pre-commit it in `pre-commit-config.yaml` in the root of the project. In particular, the key processes that run before a commit are + +1) `Autoflake` to remove unused imports and variables. Configuration for autoflake is in the `.flake8` file in the root directory. +2) `isort` to keep imports in alphabetical order. +3) `Black` to restyle the code automatically. +4) `mypy` to use our static typing to check the code and ensure all types match the functions they are put in to. Configuration for mypy is in the `mypy.ini` file in the root directory. + +## PyPI Release + +New versions of cdflib are released onto PyPI at [https://pypi.org/project/cdflib/](https://pypi.org/project/cdflib/). + +This project lives under LASP's PyPI organization, so all members and admins from that organization can make modifications to the PyPI project. The LASP PyPI organization is run by LASP's Data Systems division. + +New versions are released to PyPI when a Github release occurs, see the workflow in `.github/workflows/pypi-build.yaml`. There are no secret keys required; PyPI has been configured to trust deployments from `https://github.com/lasp/cdflib/.github/workflows/pypi-build.yaml`. + +### Versioning +The package version is automatically determined using [setuptools_scm](https://github.com/pypa/setuptools_scm), so does not need to be manually incremented when doing a new release. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..64d6dff --- /dev/null +++ b/docs/index.md @@ -0,0 +1,26 @@ +# cdflib + +A python package to read CDF files without needing to install the CDF NASA library. + +**Source**: [github](https://github.com/lasp/cdflib) | **Archive**: [zenodo](https://zenodo.org/record/4746617#.Y5NfWXbMKF4). + +## Installing +cdflib requires python 3 and numpy. To install run + +```bash +python3 -m pip install cdflib +``` + +## What is cdflib? + +cdflib is an effort to replicate the CDF libraries using a pure python implementation. +This means users do not need to install the [CDF NASA libraries](https://cdf.gsfc.nasa.gov/). + +The only module you need to install is `numpy`, but there are a few things you can do with `astropy` and `xarray`. + +## What can cdflib do? + +- Ability to read variables and attributes from CDF files (see [CDF Reading](cdfread.md)) +- Writes CDF version 3 files (see [CDF Writing](cdfwrite.md)) +- Can convert between CDF time types (EPOCH/EPOCH16/TT2000) to other common time formats (see [`CDF Time Conversions`](cdfepoch.md)) +- Can convert CDF files into XArray Dataset objects and vice versa, attempting to maintain ISTP compliance (see [`Working with XArray`](xarray.md)) diff --git a/doc/modules/xarray.rst b/docs/xarray.md similarity index 51% rename from doc/modules/xarray.rst rename to docs/xarray.md index 2c59788..3cc5ec5 100644 --- a/doc/modules/xarray.rst +++ b/docs/xarray.md @@ -1,12 +1,11 @@ -Working with XArray -=================== +# xarray There are two functions for working with XArray Datasets, one for converting a CDF to a DataSet, and one for going the other way. To use these you need the ``xarray`` package installed. These will attempt to determine any -`ISTP Compliance `_, and incorporate that into the output. +[ISTP Compliance](https://spdf.gsfc.nasa.gov/istp_guide/istp_guide.html), and incorporate that into the output. -.. automodapi:: cdflib.xarray - :no-inheritance-diagram: +::: cdflib.xarray.cdf_to_xarray.cdf_to_xarray +::: cdflib.xarray.xarray_to_cdf.xarray_to_cdf diff --git a/meta.yaml b/meta.yaml deleted file mode 100644 index a4af9ee..0000000 --- a/meta.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{% set name = "cdflib" %} -{% set version = "1.3.6" %} - -package: - name: "{{ name|lower }}" - version: "{{ version }}" - -source: - git_url: https://github.com/MAVENSDC/cdflib.git - git_depth: 20 - git_rev: 1.3.6 - -build: - number: 0 - script: "{{ PYTHON }} -m pip install . -vv" - -requirements: - build: - - setuptools - - python - - pip - run: - - python - - numpy - -about: - home: "https://github.com/MAVENSDC/cdflib" - license: "MIT" - summary: "cdflib is a python module to read/write CDF (Common Data Format .cdf) files without needing to install the CDF NASA library." - doc_url: "https://cdflib.readthedocs.io" - dev_url: "https://github.com/MAVENSDC/cdflib" - -extra: - recipe-maintainers: - - MAVENSDC diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..e5f1307 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,123 @@ +site_name: cdflib +theme: + name: material + features: + - announce.dismiss + - content.action.edit + - content.action.view + - content.code.annotate + - content.code.copy + - content.tooltips + - navigation.footer + - navigation.instant.preview + - navigation.path + - navigation.sections + - navigation.tabs + - navigation.tabs.sticky + - navigation.top + - search.highlight + - search.suggest + - toc.follow + palette: + - media: "(prefers-color-scheme)" + toggle: + icon: material/brightness-auto + name: Switch to light mode + - media: "(prefers-color-scheme: light)" + scheme: default + primary: teal + accent: purple + toggle: + icon: material/weather-sunny + name: Switch to dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: black + accent: lime + toggle: + icon: material/weather-night + name: Switch to system preference +repo_url: https://github.com/lasp/cdflib +nav: + - Home: index.md + - Tools: + - Reading CDFs: cdfread.md + - Writing CDFs: cdfwrite.md + - Time Conversions: cdfepoch.md + - dataclasses: dataclasses.md + - xarray: xarray.md + - XArray Tools: xarray.md + - Changelog: changelog.md + - Development: development.md + +markdown_extensions: + - abbr + - attr_list + - admonition + - footnotes + - md_in_html + - pymdownx.blocks.admonition + - pymdownx.blocks.details + - pymdownx.blocks.tab: + alternate_style: true + slugify: !!python/object/apply:pymdownx.slugs.slugify + kwds: + case: lower + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.highlight: + pygments_lang_class: true + - pymdownx.magiclink + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed: + alternate_style: true + slugify: !!python/object/apply:pymdownx.slugs.slugify + kwds: + case: lower + - pymdownx.tasklist: + custom_checkbox: true + - toc: + permalink: "ยค" + +plugins: +- search +- mkdocstrings: + handlers: + python: + paths: [.] + inventories: + - https://docs.python.org/3/objects.inv + - https://mkdocstrings.github.io/objects.inv + - https://mkdocstrings.github.io/autorefs/objects.inv + - https://mkdocstrings.github.io/griffe/objects.inv + - https://python-markdown.github.io/objects.inv + options: + backlinks: tree + docstring_style: numpy + filters: public + heading_level: 1 + inherited_members: true + line_length: 88 + merge_init_into_class: true + parameter_headings: true + type_parameter_headings: true + preload_modules: [mkdocstrings] + relative_crossrefs: true + scoped_crossrefs: true + separate_signature: true + show_bases: false + show_inheritance_diagram: true + show_root_heading: true + show_root_full_path: false + show_signature_annotations: true + show_signature_type_parameters: true + show_source: true + show_symbol_type_heading: true + show_symbol_type_toc: true + signature_crossrefs: true + summary: true diff --git a/pyproject.toml b/pyproject.toml index 2180d63..f303587 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,8 +2,85 @@ requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] build-backend = "setuptools.build_meta" +[project] +name = "cdflib" +authors = [ + { name = "Bryan Harter", email = "harter@lasp.colorado.edu" }, +] +description = "A python CDF reader toolkit" +readme = "README.md" +requires-python = ">= 3.9" +keywords = ["CDF", "maven", "lasp", "PDS", "GSFC"] +license = { file = "LICENSE" } +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Science/Research", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3 :: Only", + "Topic :: Utilities", +] +dependencies = [ + "numpy >= 1.21", +] +dynamic = ["version"] + +[project.urls] +Homepage = "https://github.com/lasp/cdflib" + +[project.optional-dependencies] +tests = [ + "astropy", + "hypothesis", + "pytest >= 3.9", + "pytest-cov", + "pytest-remotedata", + "xarray", + "h5netcdf", + "netcdf4", + "pooch" +] +docs = [ + "mkdocs", + "mkdocs-material", + "mkdocstrings[python]", +] +dev = [ + "ipython", + "pre-commit", + "matplotlib", +] + +[tool.setuptools] +include-package-data = true +packages = ["cdflib"] + [tool.setuptools_scm] write_to = "cdflib/_version.py" [tool.black] line-length = 132 + +[tool.pytest.ini_options] +minversion = "3.9" +addopts = "-ra --cov=cdflib --cov-report=xml" +filterwarnings = [ + "ignore:ERFA function", + "ignore:numpy.ndarray size changed", +] + +[tool.isort] +balanced_wrapping = true +default_section = "THIRDPARTY" +include_trailing_comma = true +known_first_party = ["cdflib"] +length_sort = false +line_length = 80 +multi_line_output = 3 +no_lines_before = "LOCALFOLDER" +sections = ["STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 38acd7b..0000000 --- a/setup.cfg +++ /dev/null @@ -1,79 +0,0 @@ -[metadata] -name = cdflib -author = MAVEN SDC -author_email = mavensdc@lasp.colorado.edu -description = A python CDF reader toolkit -url = https://github.com/MAVENSDC/cdflib -keywords = - CDF - maven - lasp - PDS - GSFC -classifiers = - Development Status :: 5 - Production/Stable - Environment :: Console - Intended Audience :: Science/Research - Operating System :: OS Independent - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Programming Language :: Python :: 3.11 - Programming Language :: Python :: 3.12 - Programming Language :: Python :: 3 :: Only - Topic :: Utilities -license_file = LICENSE -long_description = file: README.md -long_description_content_type = text/markdown - -[options] -python_requires = >= 3.9 -include_package_data = True -packages = cdflib -install_requires = - numpy >= 1.21 - -[options.extras_require] -tests = - astropy - hypothesis - pytest >= 3.9 - pytest-cov - pytest-remotedata - xarray - h5netcdf - netcdf4 -docs = - astropy - xarray - netcdf4 - sphinx - sphinx-automodapi - sphinx-copybutton - sphinx_rtd_theme -dev = - ipython - pre-commit - matplotlib - -[tool:pytest] -minversion = 3.9 -addopts = -ra --cov=cdflib --cov-report=xml -filterwarnings = - # Astropy emits various warnings when dealing with dates/times, which are - # not an issue - ignore:ERFA function - # This is expected but safe when using binaries compiled with different - # numpy versions - ignore:numpy.ndarray size changed - -[isort] -balanced_wrapping = True -skip = -default_section = THIRDPARTY -include_trailing_comma = True -known_first_party = cdflib -length_sort = False -line_length = 80 -multi_line_output = 3 -no_lines_before = LOCALFOLDER -sections = STDLIB, THIRDPARTY, FIRSTPARTY, LOCALFOLDER diff --git a/tox.ini b/tox.ini index 2fe49ca..4d29114 100644 --- a/tox.ini +++ b/tox.ini @@ -1,69 +1,25 @@ [tox] -min_version = 4.0 -envlist = - py{39,310,311,312,313}{,-online} - #py39-oldestdeps - py313-devdeps - build_docs +envlist = py310, py311, py312, py313 +isolated_build = True [testenv] -# tox environments are constructed with so-called 'factors' (or terms) -# separated by hyphens, e.g. test-devdeps-cov. Lines below starting with factor: -# will only take effect if that factor is included in the environment name. To -# see a list of example environments that can be run, along with a description, -# run: -# -# tox -l -v -# -description = - run tests - oldestdeps: with the oldest supported version of key dependencies - devdeps: with the latest developer version of key dependencies - online: that require remote data (as well as the offline ones) - -# Run the tests in a temporary directory to not pollute the working directory with files or other downloads -changedir = .tmp/{envname} - -# Pass through the following environment variables which may be needed for the CI -pass_env = - # A variable to tell tests we are on a CI system - CI - # Custom compiler locations (such as ccache) - CC - # Location of locales (needed by sphinx on some systems) - LOCALE_ARCHIVE - # If the user has set a LC override we should follow it - # (note LANG is automatically passed through by tox) - LC_ALL - -set_env = - PYTEST_COMMAND = pytest --cov=cdflib {toxinidir}/tests/ {toxinidir}/doc/ - devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple https://pypi.anaconda.org/scientific-python-nightly-wheels/simple - -deps = - oldestdeps: minimum_dependencies - devdeps: astropy>=0.0.dev0 - devdeps: numpy>=0.0.dev0 - devdeps: pandas>=0.0.dev0 # needed to prevent pandas pulling in numpy<2, this can be removed after pandas does a release supporting 2.0 - -# The following indicates which extras_require from setup.cfg will be installed -extras = - tests +# Installs the library + "tests" extra from pyproject.toml +extras = tests +commands = + pytest {posargs} +# Keep this if you want to test against nightly numpy (Optional) +[testenv:devdeps] +description = Run against nightly versions of dependencies commands_pre = - oldestdeps: minimum_dependencies cdflib --filename requirements-min.txt - oldestdeps: pip install -r requirements-min.txt - pip freeze - + pip install --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy xarray commands = - !online: {env:PYTEST_COMMAND} {posargs} - online: {env:PYTEST_COMMAND} --remote-data=any {posargs} + pytest {posargs} +# Keep this for documentation building [testenv:build_docs] -description = invoke sphinx-build to build the HTML docs -change_dir = - doc -extras = - docs +description = invoke mkdocs to build the HTML docs +# Installs the library + "docs" extra from pyproject.toml +extras = docs commands = - sphinx-build -j auto --color -W --keep-going -b html -d _build/.doctrees . _build/html {posargs} + mkdocs build