diff --git a/alembic.ini b/alembic.ini index 113574be..07e86ef0 100644 --- a/alembic.ini +++ b/alembic.ini @@ -49,8 +49,8 @@ sqlalchemy.url = postgresql://metnorth@db.pcic.uvic.ca:5433/metnorth?keepalives= [metnorth_test] sqlalchemy.url = postgresql://metnorth@dbtest02.pcic.uvic.ca/metnorth?keepalives=1&keepalives_idle=300&keepalives_interval=300&keepalives_count=9 -[crmp_dbtest01] -sqlalchemy.url = postgresql://crmp@dbtest01.pcic.uvic.ca/crmp?keepalives=1&keepalives_idle=300&keepalives_interval=300&keepalives_count=9 +[crmp_new_cluster] +sqlalchemy.url = postgresql://crmp@/crmp?host=pg01.pcic.uvic.ca,pg02.pcic.uvic.ca&port=5432,5432&target_session_attrs=read-write [crmp_dbtest02_hx] sqlalchemy.url = postgresql://crmp@dbtest02.pcic.uvic.ca:5432/crmp_hx?keepalives=1&keepalives_idle=300&keepalives_interval=300&keepalives_count=9 diff --git a/pycds/__init__.py b/pycds/__init__.py index 69d27671..278213f7 100644 --- a/pycds/__init__.py +++ b/pycds/__init__.py @@ -74,6 +74,9 @@ from pycds.context import get_schema_name, get_su_role_name from pycds.util import schema_func, variable_tags +# Import database module to make it accessible as pycds.database for mocking in tests +from pycds import database as database + from .orm.tables import ( Base, Network, diff --git a/pycds/alembic/change_history_utils.py b/pycds/alembic/change_history_utils.py index a9cc2571..f732eafe 100644 --- a/pycds/alembic/change_history_utils.py +++ b/pycds/alembic/change_history_utils.py @@ -208,6 +208,28 @@ def create_primary_table_triggers(collection_name: str, prefix: str = "t100_"): ) +def toggle_primary_table_triggers( + collection_name: str, enable: bool, prefix: str = "t100_" +): + action = "ENABLE" if enable else "DISABLE" + op.execute( + f"ALTER TABLE {main_table_name(collection_name)} " + f"{action} TRIGGER {prefix}primary_control_hx_cols" + ) + op.execute( + f"ALTER TABLE {main_table_name(collection_name)} " + f"{action} TRIGGER {prefix}primary_ops_to_hx" + ) + + +def disable_primary_table_triggers(collection_name: str, prefix: str = "t100_"): + toggle_primary_table_triggers(collection_name, enable=False, prefix=prefix) + + +def enable_primary_table_triggers(collection_name: str, prefix: str = "t100_"): + toggle_primary_table_triggers(collection_name, enable=True, prefix=prefix) + + def create_history_table_triggers( collection_name: str, foreign_tables: list, prefix: str = "t100_" ): diff --git a/pycds/alembic/versions/33179b5ae85a_add_network_key_column_to_meta_network.py b/pycds/alembic/versions/33179b5ae85a_add_network_key_column_to_meta_network.py new file mode 100644 index 00000000..57cb7ef7 --- /dev/null +++ b/pycds/alembic/versions/33179b5ae85a_add_network_key_column_to_meta_network.py @@ -0,0 +1,247 @@ +"""add network_key column to meta_network + +Revision ID: 33179b5ae85a +Revises: 8c05da87cb79 +Create Date: 2026-01-07 20:25:34.314026 + +Notes: This process was made more complicated by some assumptions made by the history tracking code. +In particular it assumes that the primary and history table have the same column order with the +exception that history tables have additional columns at the end. When adding a new column it is added +at the end and therefore breaks the assumption. To work around this, we have to recreate the history table +with the correct column order. This involves renaming the existing history table, creating a new one with +the correct structure, copying the data over, and then dropping the old table. + +This is needed because at the current time neither postgres nor alembic support adding a column at a specific +position in the table. + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import text +from pycds.context import get_schema_name +from pycds.alembic.change_history_utils import ( + disable_primary_table_triggers, + create_history_table, + create_history_table_triggers, + create_history_table_indexes, + enable_primary_table_triggers, +) +from pycds.alembic.util import grant_standard_table_privileges + + +# revision identifiers, used by Alembic. +revision = "33179b5ae85a" +down_revision = "8c05da87cb79" +branch_labels = None +depends_on = None + +schema_name = get_schema_name() + + +def upgrade(): + # Create a function to generate network key from network name + # Replicates the behavior of Network.gen_key_from_name() in orm Tables.py + op.execute( + text( + f""" + CREATE OR REPLACE FUNCTION {schema_name}.gen_network_key_from_name(name text) + RETURNS text + LANGUAGE sql + IMMUTABLE + AS $$ + SELECT lower(replace(replace(trim(name), ' ', '_'), '-', '_')) + $$ + """ + ) + ) + + # Disable existing triggers before modifying table structure so that we don't accidentally track + # the intermediate states + disable_primary_table_triggers("meta_network") + + # Rename the existing history table to preserve existing history data + # We'll copy data from this into the new table with the correct column order + op.execute( + text(f"ALTER TABLE {schema_name}.meta_network_hx RENAME TO meta_network_hx_old") + ) + + # Drop the identity property, which also drops the associated sequence + op.execute( + text( + f"ALTER TABLE {schema_name}.meta_network_hx_old ALTER COLUMN meta_network_hx_id DROP IDENTITY" + ) + ) + + op.add_column( + "meta_network", + sa.Column( + "network_key", + sa.String(), + nullable=True, + ), + schema=schema_name, + ) + + op.execute( + text( + f""" + UPDATE {schema_name}.meta_network + SET network_key = {schema_name}.gen_network_key_from_name(network_name) + """ + ) + ) + + op.create_unique_constraint( + "uq_meta_network_network_key", + "meta_network", + ["network_key"], + schema=schema_name, + ) + + # Create a trigger function to auto-populate network_key on INSERT. Must be a trigger as + # Deault values can't call functions that access other columns. + op.execute( + text( + f""" + CREATE OR REPLACE FUNCTION {schema_name}.set_network_key_default() + RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF NEW.network_key IS NULL THEN + NEW.network_key := {schema_name}.gen_network_key_from_name(NEW.network_name); + END IF; + RETURN NEW; + END; + $$ + """ + ) + ) + + # Create trigger to run before INSERT + op.execute( + text( + f""" + CREATE TRIGGER set_network_key_default_trigger + BEFORE INSERT ON {schema_name}.meta_network + FOR EACH ROW + EXECUTE FUNCTION {schema_name}.set_network_key_default() + """ + ) + ) + + # Recreate the history table with the new column structure + create_history_table("meta_network", foreign_tables=None) + grant_standard_table_privileges(f"{schema_name}.meta_network_hx") + + # Copy existing history data from the old table to the new one + op.execute( + text( + f""" + INSERT INTO {schema_name}.meta_network_hx + (network_id, network_name, description, virtual, + publish, col_hex, mod_time, mod_user, + network_key, deleted, meta_network_hx_id) + SELECT + network_id, network_name, description, virtual, + publish, col_hex, mod_time, mod_user, + {schema_name}.gen_network_key_from_name(network_name), + deleted, meta_network_hx_id + FROM {schema_name}.meta_network_hx_old + ORDER BY meta_network_hx_id + """ + ) + ) + + # Update foreign key references in dependent tables to point to the new history table + # meta_station_hx and meta_vars_hx have foreign keys to meta_network_hx + + # Drop the foreign key constraints from dependent tables + op.execute( + text( + f"ALTER TABLE {schema_name}.meta_station_hx DROP CONSTRAINT meta_station_hx_meta_network_hx_id_fkey" + ) + ) + op.execute( + text( + f"ALTER TABLE {schema_name}.meta_vars_hx DROP CONSTRAINT meta_vars_hx_meta_network_hx_id_fkey" + ) + ) + + # Drop the old history table now that data has been copied and FKs removed + op.execute(text(f"DROP TABLE {schema_name}.meta_network_hx_old")) + + # Set the sequence to continue from the next ID after the last copied record + op.execute( + text( + f""" + SELECT setval( + '{schema_name}.meta_network_hx_meta_network_hx_id_seq', + (SELECT COALESCE(MAX(meta_network_hx_id), 1) FROM {schema_name}.meta_network_hx), + true + ) + """ + ) + ) + + # Recreate the foreign key constraints pointing to the new history table + op.execute( + text( + f""" + ALTER TABLE {schema_name}.meta_station_hx + ADD CONSTRAINT meta_station_hx_meta_network_hx_id_fkey + FOREIGN KEY (meta_network_hx_id) + REFERENCES {schema_name}.meta_network_hx(meta_network_hx_id) + """ + ) + ) + op.execute( + text( + f""" + ALTER TABLE {schema_name}.meta_vars_hx + ADD CONSTRAINT meta_vars_hx_meta_network_hx_id_fkey + FOREIGN KEY (meta_network_hx_id) + REFERENCES {schema_name}.meta_network_hx(meta_network_hx_id) + """ + ) + ) + + # Recreate the history tracking triggers + enable_primary_table_triggers("meta_network") + create_history_table_triggers("meta_network", foreign_tables=None) + + # Create indexes on the history table + create_history_table_indexes( + "meta_network", "network_id", foreign_tables=None, extras=None + ) + + +def downgrade(): + # Drop the trigger and trigger function + op.execute( + text( + f"DROP TRIGGER IF EXISTS set_network_key_default_trigger ON {schema_name}.meta_network" + ) + ) + op.execute(text(f"DROP FUNCTION IF EXISTS {schema_name}.set_network_key_default()")) + + # Drop the constraint and column from primary table + op.drop_constraint( + "uq_meta_network_network_key", + "meta_network", + type_="unique", + schema=schema_name, + ) + + # When dropping we don't have the same issues with column order so we can safely just drop the + # column to return to the pre-migration state + op.drop_column("meta_network", "network_key", schema=schema_name) + + # Drop the column from history table + op.drop_column("meta_network_hx", "network_key", schema=schema_name) + + # Drop the key generation function + op.execute( + text(f"DROP FUNCTION IF EXISTS {schema_name}.gen_network_key_from_name(text)") + ) diff --git a/pycds/database.py b/pycds/database.py index dd6ba733..8afe4d5d 100644 --- a/pycds/database.py +++ b/pycds/database.py @@ -10,7 +10,7 @@ def check_migration_version( - executor, schema_name=get_schema_name(), version="8c05da87cb79" + executor, schema_name=get_schema_name(), version="33179b5ae85a" ): """Check that the migration version of the database schema is compatible with the current version of this package. diff --git a/pycds/orm/tables/__init__.py b/pycds/orm/tables/__init__.py new file mode 100644 index 00000000..fda60a06 --- /dev/null +++ b/pycds/orm/tables/__init__.py @@ -0,0 +1,39 @@ +""" +ORM declarations for tables. + +This module provides access to table ORM definitions, with support for +retrieving definitions at specific migration revisions for testing purposes. + +By default, imports from this module provide the current (head) version of tables. +Tests can set a specific revision using set_global_table_version() before importing. +""" + +from pycds.orm.versioning import ( + get_global_table_version, +) + +# Check if a specific version has been requested +_requested_version = get_global_table_version() + +if _requested_version is None: + # No specific version requested - import from (current/head) + from .version_33179b5ae85a import * +else: + # Specific version requested - import from that version module + import importlib + try: + _version_module = importlib.import_module( + f"pycds.orm.tables.version_{_requested_version}" + ) + + # Import all public members from the version module + for _name in dir(_version_module): + if not _name.startswith("_"): + globals()[_name] = getattr(_version_module, _name) + + del importlib, _version_module, _name + except ModuleNotFoundError as e: + raise ImportError( + f"Table version module for revision '{_requested_version}' not found. Ensure that " + f"the migration revision exists and that the corresponding version module has been created." + ) from e diff --git a/pycds/orm/tables.py b/pycds/orm/tables/base.py similarity index 88% rename from pycds/orm/tables.py rename to pycds/orm/tables/base.py index 531fc939..32977a9c 100644 --- a/pycds/orm/tables.py +++ b/pycds/orm/tables/base.py @@ -48,86 +48,33 @@ # string templating functions for check functions applied against multiple columns def no_newline_ck_name(column): - return f"ck_{column}_no_newlines" + return f"{column}_nolinebreak" def no_newline_ck_check(column): return f"{column} !~ '[\r\n]'" -class Network(Base): - """This class maps to the table which represents various `networks` of - data for the Climate Related Monitoring Program. There is one - network row for each data provider, typically a BC Ministry, crown - corporation or private company. +class Contact(Base): + """This class maps to the table which represents contact people and + representatives for the networks of the Climate Related Monitoring + Program. """ - __tablename__ = "meta_network" - - # Columns - id = Column("network_id", Integer, primary_key=True) - name = Column("network_name", String) - long_name = Column("description", String) - virtual = Column(String(255)) - publish = Column(Boolean) - color = Column("col_hex", String) - contact_id = Column(Integer, ForeignKey("meta_contact.contact_id")) - mod_time = Column(DateTime, nullable=False, server_default=func.now()) - mod_user = Column( - String(64), nullable=False, server_default=literal_column("current_user") - ) + __tablename__ = "meta_contact" + id = Column("contact_id", Integer, primary_key=True) + name = Column("name", String) + title = Column("title", String) + organization = Column("organization", String) + email = Column("email", String) + phone = Column("phone", String) - # Relationships - stations = relationship( - "Station", - order_by="Station.id", - back_populates="network", - cascade_backrefs=False, - ) - meta_station = synonym("stations") - variables = relationship( - "Variable", back_populates="network", cascade_backrefs=False - ) - meta_vars = synonym("variables") - contact = relationship("Contact", back_populates="networks", cascade_backrefs=False) - meta_contact = synonym("contact") # Retain backwards compatibility - native_flags = relationship( - "NativeFlag", - order_by="NativeFlag.id", - back_populates="network", + networks = relationship( + "Network", + order_by="Network.id", + back_populates="contact", cascade_backrefs=False, ) - meta_native_flag = synonym("native_flags") # Retain backwards compatibility - - def __str__(self): - return f"" - - -class NetworkHistory(Base): - """This class maps to the history table for table Network.""" - - __tablename__ = hx_table_name(Network.__tablename__, schema=None) - - # Columns - network_id = Column(Integer, nullable=False, index=True) - name = Column("network_name", String) - long_name = Column("description", String) - virtual = Column(String(255)) - publish = Column(Boolean) - color = Column("col_hex", String) - contact_id = Column(Integer) - mod_time = Column(DateTime, nullable=False, server_default=func.now()) - mod_user = Column( - String(64), nullable=False, server_default=literal_column("current_user") - ) - deleted = Column(Boolean, default=False) - meta_network_hx_id = Column(Integer, primary_key=True) - - def __str__(self): - return f"" - - -class Contact(Base): """This class maps to the table which represents contact people and representatives for the networks of the Climate Related Monitoring Program. diff --git a/pycds/orm/tables/version_33179b5ae85a.py b/pycds/orm/tables/version_33179b5ae85a.py new file mode 100644 index 00000000..dd9022e6 --- /dev/null +++ b/pycds/orm/tables/version_33179b5ae85a.py @@ -0,0 +1,124 @@ +""" +ORM table definitions at migration version 33179b5ae85a. + +This version includes the network_key column added to Network and NetworkHistory tables. +All other tables are imported from base.py since they haven't changed at this revision. +""" + +from sqlalchemy import ( + Boolean, + Column, + DateTime, + Integer, + String, + ForeignKey, + func, +) +from sqlalchemy.sql import literal_column +from sqlalchemy.orm import relationship, synonym + +from pycds.alembic.change_history_utils import hx_table_name + +# Import Base and all other tables from base module +from .base import ( + Base, + metadata, + no_newline_ck_name, + no_newline_ck_check, + Contact, + Station, + StationHistory, + History, + HistoryHistory, + ObsRawNativeFlags, + ObsRawPCICFlags, + MetaSensor, + Obs, + ObsHistory, + TimeBound, + Variable, + VariableHistory, + NativeFlag, + PCICFlag, + DerivedValue, +) + + +class Network(Base): + """Network table ORM at revision 33179b5ae85a""" + + __tablename__ = "meta_network" + + # Columns - order must match physical column order in database after migrations + # The network_key column was added at the END via ALTER TABLE ADD COLUMN + id = Column("network_id", Integer, primary_key=True) + name = Column("network_name", String) + long_name = Column("description", String) + virtual = Column(String(255)) + publish = Column(Boolean) + color = Column("col_hex", String) + contact_id = Column(Integer, ForeignKey("meta_contact.contact_id")) + mod_time = Column(DateTime, nullable=False, server_default=func.now()) + mod_user = Column( + String(64), nullable=False, server_default=literal_column("current_user") + ) + key = Column("network_key", String, unique=True) # Added at end by ALTER TABLE + + # Relationships + stations = relationship( + "Station", + order_by="Station.id", + back_populates="network", + cascade_backrefs=False, + ) + meta_station = synonym("stations") + variables = relationship( + "Variable", back_populates="network", cascade_backrefs=False + ) + meta_vars = synonym("variables") + contact = relationship("Contact", back_populates="networks", cascade_backrefs=False) + meta_contact = synonym("contact") + native_flags = relationship( + "NativeFlag", + order_by="NativeFlag.id", + back_populates="network", + cascade_backrefs=False, + ) + meta_native_flag = synonym("native_flags") + + def __str__(self): + return f"" + + @staticmethod + def gen_key_from_name(name): + """Normalize network name for use as network key.""" + return name.strip().lower().replace(" ", "_").replace("-", "_") + + +class NetworkHistory(Base): + """NetworkHistory table ORM at revision""" + + __tablename__ = hx_table_name("meta_network", schema=None) + + # Columns - order must match physical column order in database after migrations + # The network_key column was added at the END via ALTER TABLE ADD COLUMN + network_id = Column(Integer, nullable=False, index=True) + name = Column("network_name", String) + long_name = Column("description", String) + virtual = Column(String(255)) + publish = Column(Boolean) + color = Column("col_hex", String) + contact_id = Column(Integer) + mod_time = Column(DateTime, nullable=False, server_default=func.now()) + mod_user = Column( + String(64), nullable=False, server_default=literal_column("current_user") + ) + key = Column("network_key", String) # Added at end by ALTER TABLE + deleted = Column(Boolean, default=False) + meta_network_hx_id = Column(Integer, primary_key=True) + + def __str__(self): + return f"" + + +# import other tables from base diff --git a/pycds/orm/tables/version_a59d64cf16ca.py b/pycds/orm/tables/version_a59d64cf16ca.py new file mode 100644 index 00000000..ec9c4245 --- /dev/null +++ b/pycds/orm/tables/version_a59d64cf16ca.py @@ -0,0 +1,112 @@ +""" +ORM table definitions at migration version a59d64cf16ca. + +This version is before network_key was added to Network and NetworkHistory tables. +All other tables are imported from base.py since they haven't changed at this revision. +""" + +from sqlalchemy import ( + Boolean, + Column, + DateTime, + Integer, + String, + ForeignKey, + func, +) +from sqlalchemy.sql import literal_column +from sqlalchemy.orm import relationship, synonym + +from pycds.alembic.change_history_utils import hx_table_name + +# Import Base and all other tables from base module +from .base import ( + Base, + metadata, + no_newline_ck_name, + no_newline_ck_check, + Contact, + Station, + StationHistory, + History, + HistoryHistory, + ObsRawNativeFlags, + ObsRawPCICFlags, + MetaSensor, + Obs, + ObsHistory, + TimeBound, + Variable, + VariableHistory, + NativeFlag, + PCICFlag, + DerivedValue, +) + + +class Network(Base): + """Network table ORM at revision a59d64cf16ca (before network_key)""" + + __tablename__ = "meta_network" + + # Columns + id = Column("network_id", Integer, primary_key=True) + name = Column("network_name", String) + long_name = Column("description", String) + virtual = Column(String(255)) + publish = Column(Boolean) + color = Column("col_hex", String) + contact_id = Column(Integer, ForeignKey("meta_contact.contact_id")) + mod_time = Column(DateTime, nullable=False, server_default=func.now()) + mod_user = Column( + String(64), nullable=False, server_default=literal_column("current_user") + ) + + # Relationships + stations = relationship( + "Station", + order_by="Station.id", + back_populates="network", + cascade_backrefs=False, + ) + meta_station = synonym("stations") + variables = relationship( + "Variable", back_populates="network", cascade_backrefs=False + ) + meta_vars = synonym("variables") + contact = relationship("Contact", back_populates="networks", cascade_backrefs=False) + meta_contact = synonym("contact") + native_flags = relationship( + "NativeFlag", + order_by="NativeFlag.id", + back_populates="network", + cascade_backrefs=False, + ) + meta_native_flag = synonym("native_flags") + + def __str__(self): + return f"" + + +class NetworkHistory(Base): + """NetworkHistory table ORM at revision a59d64cf16ca""" + + __tablename__ = hx_table_name("meta_network", schema=None) + + # Columns + network_id = Column(Integer, nullable=False, index=True) + name = Column("network_name", String) + long_name = Column("description", String) + virtual = Column(String(255)) + publish = Column(Boolean) + color = Column("col_hex", String) + contact_id = Column(Integer) + mod_time = Column(DateTime, nullable=False, server_default=func.now()) + mod_user = Column( + String(64), nullable=False, server_default=literal_column("current_user") + ) + deleted = Column(Boolean, default=False) + meta_network_hx_id = Column(Integer, primary_key=True) + + def __str__(self): + return f"" diff --git a/pycds/orm/versioning.py b/pycds/orm/versioning.py new file mode 100644 index 00000000..02f16fdc --- /dev/null +++ b/pycds/orm/versioning.py @@ -0,0 +1,131 @@ +""" +ORM table versioning system. + +Provides access to table ORM definitions at specific migration revisions. +This allows tests to use the correct schema version when testing migrations. +""" + +import importlib +import sys +from typing import Optional + + +class TableVersionManager: + """ + Manages versioned ORM table definitions. + + Usage: + # Get tables at specific revision + tables = get_tables_at_revision("a59d64cf16ca") + Network = tables.Network + NetworkHistory = tables.NetworkHistory + + # Or use current version (default) + tables = get_tables_at_revision() # Returns current version + Network = tables.Network + """ + + def __init__(self, revision: Optional[str] = None): + self.revision = revision + self._module = None + + def _load_module(self): + """Lazily load the versioned module.""" + if self._module is None: + if self.revision is None: + # Import current/head version from head version module + self._module = importlib.import_module("pycds.orm.tables") + else: + # Import specific version + module_name = f"pycds.orm.tables.version_{self.revision}" + self._module = importlib.import_module(module_name) + return self._module + + def __getattr__(self, name): + """Dynamically access classes from the versioned module.""" + module = self._load_module() + try: + return getattr(module, name) + except AttributeError: + raise AttributeError( + f"Class '{name}' not found in tables module " + f"{'at current version' if self.revision is None else f'at revision {self.revision}'}" + ) + + +def get_tables_at_revision(revision: Optional[str] = None) -> TableVersionManager: + """ + Get ORM table classes at a specific migration revision. + + Args: + revision: Alembic revision ID (e.g., "a59d64cf16ca"). + If None, returns current version. + + Returns: + TableVersionManager that provides access to versioned table classes. + + Examples: + # Get tables at old revision (before network_key) + tables = get_tables_at_revision("a59d64cf16ca") + Network = tables.Network + assert not hasattr(Network, 'key') + + # Get current tables + tables = get_tables_at_revision() + Network = tables.Network + assert hasattr(Network, 'key') + """ + return TableVersionManager(revision) + + +# Convenience for setting a global version context +_global_version: Optional[str] = None + + +def set_global_table_version(revision: Optional[str] = None): + """ + Set the global table version that will be used by default. + + This is useful for test fixtures that need to ensure all table + references use a specific version. + + IMPORTANT: This clears the module cache for pycds.orm.tables and related + modules to ensure the new version is loaded on next import. + + Args: + revision: Alembic revision ID, or None for current version. + """ + global _global_version + _global_version = revision + + # Clear module cache for tables/views/matviews modules to force reload with new version + # We need to clear modules that import from pycds.orm.tables (like main pycds module) + # but NOT modules that don't depend on tables (like pycds.database, pycds.context, etc.) + # since they may have been imported by test files and clearing them breaks references. + modules_to_clear = [ + key + for key in sys.modules.keys() + if ( + key.startswith("pycds.orm.tables") + or key.startswith("pycds.orm.views") + or key.startswith("pycds.orm.native_matviews") + or key.startswith("pycds.orm.manual_matviews") + or key == "pycds" + ) + ] + for module_name in modules_to_clear: + del sys.modules[module_name] + + +def get_global_table_version() -> Optional[str]: + """Get the currently set global table version.""" + return _global_version + + +def get_default_tables() -> TableVersionManager: + """ + Get tables using the global version if set, otherwise current version. + + This respects the global version set by set_global_table_version(). + """ + return TableVersionManager(_global_version) diff --git a/tests/alembic_migrations/test_check_migration_version.py b/tests/alembic_migrations/test_check_migration_version.py index 6c5237fa..a3069c57 100644 --- a/tests/alembic_migrations/test_check_migration_version.py +++ b/tests/alembic_migrations/test_check_migration_version.py @@ -8,7 +8,7 @@ @pytest.mark.update20 def test_get_current_head(): - assert get_current_head() == "8c05da87cb79" + assert get_current_head() == "33179b5ae85a" @pytest.mark.update20 diff --git a/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/__init__.py b/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/test_data_preservation.py b/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/test_data_preservation.py new file mode 100644 index 00000000..279b6abc --- /dev/null +++ b/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/test_data_preservation.py @@ -0,0 +1,265 @@ +"""Data preservation and sequence tests: +- Check that existing history data is preserved during migration +- Verify that the sequence is correctly set after migration +- Ensure subsequent insertions work correctly +""" + +import logging +import pytest +from sqlalchemy import text, inspect + +logger = logging.getLogger("tests") + + +@pytest.mark.update20 +def test_history_data_preservation_and_sequence( + alembic_engine, alembic_runner, schema_name +): + """Test that history data is preserved and sequence works after migration.""" + + # Start at the previous revision + alembic_runner.migrate_up_to("8c05da87cb79") + + # Insert test data into meta_network + with alembic_engine.begin() as conn: + # Insert a network + conn.execute( + text( + f""" + INSERT INTO {schema_name}.meta_network + (network_name, description, publish) + VALUES ('Test Network', 'Test Description', true) + """ + ) + ) + + # Get the network_id + result = conn.execute( + text( + f""" + SELECT network_id FROM {schema_name}.meta_network + WHERE network_name = 'Test Network' + """ + ) + ) + network_id = result.scalar() + + # Update the network to create history entries + conn.execute( + text( + f""" + UPDATE {schema_name}.meta_network + SET description = 'Updated Description' + WHERE network_id = {network_id} + """ + ) + ) + + conn.execute( + text( + f""" + UPDATE {schema_name}.meta_network + SET description = 'Final Description' + WHERE network_id = {network_id} + """ + ) + ) + + # Count history records before migration + result = conn.execute( + text( + f""" + SELECT COUNT(*) FROM {schema_name}.meta_network_hx + WHERE network_id = {network_id} + """ + ) + ) + history_count_before = result.scalar() + + # Get the max history ID before migration + result = conn.execute( + text( + f""" + SELECT MAX(meta_network_hx_id) FROM {schema_name}.meta_network_hx + """ + ) + ) + max_hx_id_before = result.scalar() + + # Run the migration + alembic_runner.migrate_up_to("33179b5ae85a") + + with alembic_engine.begin() as conn: + # Verify history records still exist + result = conn.execute( + text( + f""" + SELECT COUNT(*) FROM {schema_name}.meta_network_hx + WHERE network_id = {network_id} + """ + ) + ) + history_count_after = result.scalar() + assert history_count_after == history_count_before, ( + f"History count changed: before={history_count_before}, " + f"after={history_count_after}" + ) + + # Verify network_key was generated for history records + result = conn.execute( + text( + f""" + SELECT COUNT(*) FROM {schema_name}.meta_network_hx + WHERE network_id = {network_id} AND network_key IS NOT NULL + """ + ) + ) + history_with_key_count = result.scalar() + assert ( + history_with_key_count == history_count_after + ), "Not all history records have network_key populated" + + # Verify the network_key matches expected format + result = conn.execute( + text( + f""" + SELECT network_key FROM {schema_name}.meta_network_hx + WHERE network_id = {network_id} + LIMIT 1 + """ + ) + ) + network_key = result.scalar() + assert ( + network_key == "test_network" + ), f"Generated network_key '{network_key}' doesn't match expected 'test_network'" + + # Verify the sequence is set correctly by inserting a new network + # This should trigger the history tracking and use the next sequence value + conn.execute( + text( + f""" + INSERT INTO {schema_name}.meta_network + (network_name, description, publish) + VALUES ('New Test Network', 'New Description', true) + """ + ) + ) + + # Get the new network_id + result = conn.execute( + text( + f""" + SELECT network_id FROM {schema_name}.meta_network + WHERE network_name = 'New Test Network' + """ + ) + ) + new_network_id = result.scalar() + + # Update it to create a history entry - this will test the sequence + conn.execute( + text( + f""" + UPDATE {schema_name}.meta_network + SET description = 'Updated New Description' + WHERE network_id = {new_network_id} + """ + ) + ) + + # Verify the new history record was created successfully + result = conn.execute( + text( + f""" + SELECT meta_network_hx_id FROM {schema_name}.meta_network_hx + WHERE network_id = {new_network_id} + ORDER BY meta_network_hx_id + """ + ) + ) + new_hx_ids = [row[0] for row in result.fetchall()] + + assert len(new_hx_ids) > 0, "No history records created for new network" + + # Verify the new history IDs are greater than the old max + for hx_id in new_hx_ids: + assert ( + hx_id > max_hx_id_before + ), f"New history ID {hx_id} is not greater than old max {max_hx_id_before}" + + # Verify network_key was auto-generated for the new network + result = conn.execute( + text( + f""" + SELECT network_key FROM {schema_name}.meta_network + WHERE network_id = {new_network_id} + """ + ) + ) + new_network_key = result.scalar() + assert ( + new_network_key == "new_test_network" + ), f"Generated network_key '{new_network_key}' doesn't match expected 'new_test_network'" + + # Verify network_key in history matches + result = conn.execute( + text( + f""" + SELECT DISTINCT network_key FROM {schema_name}.meta_network_hx + WHERE network_id = {new_network_id} + """ + ) + ) + new_hx_network_key = result.scalar() + assert ( + new_hx_network_key == "new_test_network" + ), f"History network_key '{new_hx_network_key}' doesn't match expected 'new_test_network'" + + +@pytest.mark.update20 +def test_sequence_continuity_with_no_gaps(alembic_engine, alembic_runner, schema_name): + """Test that the sequence has no gaps after migration.""" + + # Start at the previous revision + alembic_runner.migrate_up_to("8c05da87cb79") + + with alembic_engine.begin() as conn: + # Get current max ID before migration + result = conn.execute( + text( + f""" + SELECT COALESCE(MAX(meta_network_hx_id), 0) + FROM {schema_name}.meta_network_hx + """ + ) + ) + max_id_before = result.scalar() + + # Run the migration + alembic_runner.migrate_up_to("33179b5ae85a") + + with alembic_engine.begin() as conn: + # Check the sequence value + result = conn.execute( + text( + f""" + SELECT last_value, is_called + FROM {schema_name}.meta_network_hx_meta_network_hx_id_seq + """ + ) + ) + row = result.fetchone() + last_value, is_called = row[0], row[1] + + # If is_called is true, the last value has been used and next will be last_value + 1 + # The migration uses COALESCE(MAX(...), 1) so minimum is 1 even when table is empty + expected_last_value = max(max_id_before, 1) + + assert ( + is_called == True + ), "Sequence should be marked as called after setval with true" + assert last_value == expected_last_value, ( + f"Sequence last_value {last_value} doesn't match expected {expected_last_value} " + f"(max_id_before was {max_id_before})" + ) diff --git a/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/test_smoke.py b/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/test_smoke.py new file mode 100644 index 00000000..4a2eba58 --- /dev/null +++ b/tests/alembic_migrations/versions/v_33179b5ae85a_add_network_key_column_to_meta_network/test_smoke.py @@ -0,0 +1,70 @@ +"""Smoke tests: +- Upgrade adds network_key column to meta_network table and meta_network_hx table +- Downgrade drops network_key column from meta_network table and meta_network_hx table +""" + +# -*- coding: utf-8 -*- +import logging +import pytest +from sqlalchemy import inspect, null + + +logger = logging.getLogger("tests") + +column_name = "network_key" +table_name = "meta_network" +history_table_name = "meta_network_hx" + + +def check_if_column_exists(col_name, sch_cols): + for col in sch_cols: + if col["name"] == col_name: + return col + return null + + +@pytest.mark.update20 +def test_upgrade( + alembic_engine, + alembic_runner, + schema_name, +): + """Test the schema migration from 8c05da87cb79 to 33179b5ae85a.""" + alembic_runner.migrate_up_to("33179b5ae85a") + + # Check that column has been added to meta_network + meta_network_table = inspect(alembic_engine).get_columns( + table_name, schema=schema_name + ) + col = check_if_column_exists(column_name, meta_network_table) + assert (col["nullable"] == True) and (col["name"] == column_name) + + # Check that column has been added to meta_network_hx + meta_network_hx_table = inspect(alembic_engine).get_columns( + history_table_name, schema=schema_name + ) + col_hx = check_if_column_exists(column_name, meta_network_hx_table) + assert (col_hx["nullable"] == True) and (col_hx["name"] == column_name) + + +@pytest.mark.update20 +def test_downgrade(alembic_engine, alembic_runner, schema_name): + """Test the schema migration from 33179b5ae85a to 8c05da87cb79.""" + alembic_runner.migrate_up_to("33179b5ae85a") + + # Downgrade to revision 8c05da87cb79 + alembic_runner.migrate_down_one() + + # Check that column has been removed from meta_network + meta_network_table = inspect(alembic_engine).get_columns( + table_name, schema=schema_name + ) + col = check_if_column_exists(column_name, meta_network_table) + assert col == null + + # Check that column has been removed from meta_network_hx + meta_network_hx_table = inspect(alembic_engine).get_columns( + history_table_name, schema=schema_name + ) + col_hx = check_if_column_exists(column_name, meta_network_hx_table) + assert col_hx == null diff --git a/tests/alembic_migrations/versions/v_7a3b247c577b_add_varsperhistory_native_matview/test_smoke.py b/tests/alembic_migrations/versions/v_7a3b247c577b_add_varsperhistory_native_matview/test_smoke.py index 254f8c1a..ad7cf007 100644 --- a/tests/alembic_migrations/versions/v_7a3b247c577b_add_varsperhistory_native_matview/test_smoke.py +++ b/tests/alembic_migrations/versions/v_7a3b247c577b_add_varsperhistory_native_matview/test_smoke.py @@ -8,6 +8,7 @@ import pytest from alembic import command +from pycds import database as pycds_database import pycds.database from .. import check_matviews @@ -21,8 +22,10 @@ @pytest.mark.update20 @pytest.mark.parametrize("supports_matviews", [True, False]) def test_mock(mocker, supports_matviews): - mocker.patch("pycds.database.db_supports_matviews", return_value=supports_matviews) - assert pycds.database.db_supports_matviews() is supports_matviews + mock_func = mocker.patch( + "pycds.database.db_supports_matviews", return_value=supports_matviews + ) + assert pycds.database.db_supports_matviews(None) is supports_matviews @pytest.mark.update20 diff --git a/tests/alembic_migrations/versions/v_a59d64cf16ca_add_hx_tkg_to_main_metadata_tables/test_on_real_tables/test_on_real_tables.py b/tests/alembic_migrations/versions/v_a59d64cf16ca_add_hx_tkg_to_main_metadata_tables/test_on_real_tables/test_on_real_tables.py index 2120fe3d..6d565ec2 100644 --- a/tests/alembic_migrations/versions/v_a59d64cf16ca_add_hx_tkg_to_main_metadata_tables/test_on_real_tables/test_on_real_tables.py +++ b/tests/alembic_migrations/versions/v_a59d64cf16ca_add_hx_tkg_to_main_metadata_tables/test_on_real_tables/test_on_real_tables.py @@ -12,6 +12,12 @@ tests to verify they are actually being called and recording history records. """ +# IMPORTANT: Set table version BEFORE any pycds imports +# This test needs the table schema at revision a59d64cf16ca (before network_key was added) +from pycds.orm.versioning import set_global_table_version + +set_global_table_version("a59d64cf16ca") + import logging import pytest diff --git a/tests/alembic_migrations/versions/v_bdc28573df56_add_obs_raw_indexes/test_smoke.py b/tests/alembic_migrations/versions/v_bdc28573df56_add_obs_raw_indexes/test_smoke.py index 509181ce..dc2064c8 100644 --- a/tests/alembic_migrations/versions/v_bdc28573df56_add_obs_raw_indexes/test_smoke.py +++ b/tests/alembic_migrations/versions/v_bdc28573df56_add_obs_raw_indexes/test_smoke.py @@ -6,8 +6,8 @@ # -*- coding: utf-8 -*- import logging import pytest +from pycds import database as pycds_database import pycds.database -from pycds.database import get_schema_item_names logger = logging.getLogger("tests") @@ -23,7 +23,10 @@ @pytest.mark.update20 @pytest.mark.parametrize("item_names", [set(), {"alpha", "beta"}]) def test_mock(mocker, item_names): - mocker.patch("pycds.database.get_schema_item_names", return_value=item_names) + mock_func = mocker.patch( + "pycds.database.get_schema_item_names", return_value=item_names + ) + # Call the mock directly since pycds module reference may be stale assert pycds.database.get_schema_item_names() == item_names @@ -34,7 +37,7 @@ def test_upgrade(alembic_engine, alembic_runner, schema_name): with alembic_engine.begin() as conn: # Check that indexes have been added - before_upgrade_index_names = get_schema_item_names( + before_upgrade_index_names = pycds.database.get_schema_item_names( conn, "indexes", table_name="obs_raw", schema_name=schema_name ) assert not (index_names <= set() | before_upgrade_index_names) @@ -42,7 +45,7 @@ def test_upgrade(alembic_engine, alembic_runner, schema_name): alembic_runner.migrate_up_one() with alembic_engine.begin() as conn: - after_upgrade_index_names = get_schema_item_names( + after_upgrade_index_names = pycds.database.get_schema_item_names( conn, "indexes", table_name="obs_raw", schema_name=schema_name ) @@ -62,7 +65,7 @@ def test_downgrade( alembic_runner.migrate_up_before("bdc28573df56") with alembic_engine.begin() as conn: - before_upgrade_index_names = get_schema_item_names( + before_upgrade_index_names = pycds.database.get_schema_item_names( conn, "indexes", table_name="obs_raw", schema_name=schema_name ) @@ -71,7 +74,7 @@ def test_downgrade( alembic_runner.migrate_up_one() # Upgrade to bdc28573df56, indexes added with alembic_engine.begin() as conn: - after_upgrade_index_names = get_schema_item_names( + after_upgrade_index_names = pycds.database.get_schema_item_names( conn, "indexes", table_name="obs_raw", schema_name=schema_name ) @@ -80,7 +83,7 @@ def test_downgrade( alembic_runner.migrate_down_one() with alembic_engine.begin() as conn: - after_downgrade_index_names = get_schema_item_names( + after_downgrade_index_names = pycds.database.get_schema_item_names( conn, "indexes", table_name="obs_raw", schema_name=schema_name ) diff --git a/tests/conftest.py b/tests/conftest.py index 5e80d146..7a9f4995 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,5 @@ import logging import sys -import logging from pytest import fixture