Skip to content
4 changes: 2 additions & 2 deletions alembic.ini
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ sqlalchemy.url = postgresql://metnorth@db.pcic.uvic.ca:5433/metnorth?keepalives=
[metnorth_test]
sqlalchemy.url = postgresql://metnorth@dbtest02.pcic.uvic.ca/metnorth?keepalives=1&keepalives_idle=300&keepalives_interval=300&keepalives_count=9

[crmp_dbtest01]
sqlalchemy.url = postgresql://crmp@dbtest01.pcic.uvic.ca/crmp?keepalives=1&keepalives_idle=300&keepalives_interval=300&keepalives_count=9
[crmp_new_cluster]
sqlalchemy.url = postgresql://crmp@/crmp?host=pg01.pcic.uvic.ca,pg02.pcic.uvic.ca&port=5432,5432&target_session_attrs=read-write

[crmp_dbtest02_hx]
sqlalchemy.url = postgresql://crmp@dbtest02.pcic.uvic.ca:5432/crmp_hx?keepalives=1&keepalives_idle=300&keepalives_interval=300&keepalives_count=9
Expand Down
3 changes: 3 additions & 0 deletions pycds/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,9 @@
from pycds.context import get_schema_name, get_su_role_name
from pycds.util import schema_func, variable_tags

# Import database module to make it accessible as pycds.database for mocking in tests
from pycds import database as database

from .orm.tables import (
Base,
Network,
Expand Down
22 changes: 22 additions & 0 deletions pycds/alembic/change_history_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,28 @@ def create_primary_table_triggers(collection_name: str, prefix: str = "t100_"):
)


def toggle_primary_table_triggers(
collection_name: str, enable: bool, prefix: str = "t100_"
):
action = "ENABLE" if enable else "DISABLE"
op.execute(
f"ALTER TABLE {main_table_name(collection_name)} "
f"{action} TRIGGER {prefix}primary_control_hx_cols"
)
op.execute(
f"ALTER TABLE {main_table_name(collection_name)} "
f"{action} TRIGGER {prefix}primary_ops_to_hx"
)


def disable_primary_table_triggers(collection_name: str, prefix: str = "t100_"):
toggle_primary_table_triggers(collection_name, enable=False, prefix=prefix)


def enable_primary_table_triggers(collection_name: str, prefix: str = "t100_"):
toggle_primary_table_triggers(collection_name, enable=True, prefix=prefix)


def create_history_table_triggers(
collection_name: str, foreign_tables: list, prefix: str = "t100_"
):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
"""add network_key column to meta_network

Revision ID: 33179b5ae85a
Revises: 8c05da87cb79
Create Date: 2026-01-07 20:25:34.314026

Notes: This process was made more complicated by some assumptions made by the history tracking code.
In particular it assumes that the primary and history table have the same column order with the
exception that history tables have additional columns at the end. When adding a new column it is added
at the end and therefore breaks the assumption. To work around this, we have to recreate the history table
with the correct column order. This involves renaming the existing history table, creating a new one with
the correct structure, copying the data over, and then dropping the old table.

This is needed because at the current time neither postgres nor alembic support adding a column at a specific
position in the table.

"""

from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
from pycds.context import get_schema_name
from pycds.alembic.change_history_utils import (
disable_primary_table_triggers,
create_history_table,
create_history_table_triggers,
create_history_table_indexes,
enable_primary_table_triggers,
)
from pycds.alembic.util import grant_standard_table_privileges


# revision identifiers, used by Alembic.
revision = "33179b5ae85a"
down_revision = "8c05da87cb79"
branch_labels = None
depends_on = None

schema_name = get_schema_name()


def upgrade():
# Create a function to generate network key from network name
# Replicates the behavior of Network.gen_key_from_name() in orm Tables.py
op.execute(
text(
f"""
CREATE OR REPLACE FUNCTION {schema_name}.gen_network_key_from_name(name text)
RETURNS text
LANGUAGE sql
IMMUTABLE
AS $$
SELECT lower(replace(replace(trim(name), ' ', '_'), '-', '_'))
$$
"""
)
)

# Disable existing triggers before modifying table structure so that we don't accidentally track
# the intermediate states
disable_primary_table_triggers("meta_network")

# Rename the existing history table to preserve existing history data
# We'll copy data from this into the new table with the correct column order
op.execute(
text(f"ALTER TABLE {schema_name}.meta_network_hx RENAME TO meta_network_hx_old")
)

# Drop the identity property, which also drops the associated sequence
op.execute(
text(
f"ALTER TABLE {schema_name}.meta_network_hx_old ALTER COLUMN meta_network_hx_id DROP IDENTITY"
)
)

op.add_column(
"meta_network",
sa.Column(
"network_key",
sa.String(),
nullable=True,
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To Quintin's point, network_name, though currently NULLable, shouldn't be (and in practice, never is), and I think network_key should not be nullable either. What good is a key that's NULL :)

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree but I think I'm I'm stuck in a catch 22 here. I can't add a new column with out it being either a) nullable or b) have a default value applied. Default values in postgres need to be pretty simple and can't refer to other column data even via functions.

In order to circumvent this the code is as currently applied: The column is created as a nullable but a trigger will populate this column any time an insert happens acting as a default.

),
schema=schema_name,
)

op.execute(
text(
f"""
UPDATE {schema_name}.meta_network
SET network_key = {schema_name}.gen_network_key_from_name(network_name)
"""
)
)

op.create_unique_constraint(
"uq_meta_network_network_key",
"meta_network",
["network_key"],
schema=schema_name,
)

# Create a trigger function to auto-populate network_key on INSERT. Must be a trigger as
# Deault values can't call functions that access other columns.
op.execute(
text(
f"""
CREATE OR REPLACE FUNCTION {schema_name}.set_network_key_default()
RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF NEW.network_key IS NULL THEN
NEW.network_key := {schema_name}.gen_network_key_from_name(NEW.network_name);
END IF;
RETURN NEW;
END;
$$
"""
)
)

# Create trigger to run before INSERT
op.execute(
text(
f"""
CREATE TRIGGER set_network_key_default_trigger
BEFORE INSERT ON {schema_name}.meta_network
FOR EACH ROW
EXECUTE FUNCTION {schema_name}.set_network_key_default()
"""
)
)

# Recreate the history table with the new column structure
create_history_table("meta_network", foreign_tables=None)
grant_standard_table_privileges(f"{schema_name}.meta_network_hx")

# Copy existing history data from the old table to the new one
op.execute(
text(
f"""
INSERT INTO {schema_name}.meta_network_hx
(network_id, network_name, description, virtual,
publish, col_hex, mod_time, mod_user,
network_key, deleted, meta_network_hx_id)
SELECT
network_id, network_name, description, virtual,
publish, col_hex, mod_time, mod_user,
{schema_name}.gen_network_key_from_name(network_name),
deleted, meta_network_hx_id
FROM {schema_name}.meta_network_hx_old
ORDER BY meta_network_hx_id
"""
)
)

# Update foreign key references in dependent tables to point to the new history table
# meta_station_hx and meta_vars_hx have foreign keys to meta_network_hx

# Drop the foreign key constraints from dependent tables
op.execute(
text(
f"ALTER TABLE {schema_name}.meta_station_hx DROP CONSTRAINT meta_station_hx_meta_network_hx_id_fkey"
)
)
op.execute(
text(
f"ALTER TABLE {schema_name}.meta_vars_hx DROP CONSTRAINT meta_vars_hx_meta_network_hx_id_fkey"
)
)

# Drop the old history table now that data has been copied and FKs removed
op.execute(text(f"DROP TABLE {schema_name}.meta_network_hx_old"))

# Set the sequence to continue from the next ID after the last copied record
op.execute(
text(
f"""
SELECT setval(
'{schema_name}.meta_network_hx_meta_network_hx_id_seq',
(SELECT COALESCE(MAX(meta_network_hx_id), 1) FROM {schema_name}.meta_network_hx),
true
)
"""
)
)

# Recreate the foreign key constraints pointing to the new history table
op.execute(
text(
f"""
ALTER TABLE {schema_name}.meta_station_hx
ADD CONSTRAINT meta_station_hx_meta_network_hx_id_fkey
FOREIGN KEY (meta_network_hx_id)
REFERENCES {schema_name}.meta_network_hx(meta_network_hx_id)
"""
)
)
op.execute(
text(
f"""
ALTER TABLE {schema_name}.meta_vars_hx
ADD CONSTRAINT meta_vars_hx_meta_network_hx_id_fkey
FOREIGN KEY (meta_network_hx_id)
REFERENCES {schema_name}.meta_network_hx(meta_network_hx_id)
"""
)
)

# Recreate the history tracking triggers
enable_primary_table_triggers("meta_network")
create_history_table_triggers("meta_network", foreign_tables=None)

# Create indexes on the history table
create_history_table_indexes(
"meta_network", "network_id", foreign_tables=None, extras=None
)


def downgrade():
# Drop the trigger and trigger function
op.execute(
text(
f"DROP TRIGGER IF EXISTS set_network_key_default_trigger ON {schema_name}.meta_network"
)
)
op.execute(text(f"DROP FUNCTION IF EXISTS {schema_name}.set_network_key_default()"))

# Drop the constraint and column from primary table
op.drop_constraint(
"uq_meta_network_network_key",
"meta_network",
type_="unique",
schema=schema_name,
)

# When dropping we don't have the same issues with column order so we can safely just drop the
# column to return to the pre-migration state
op.drop_column("meta_network", "network_key", schema=schema_name)

# Drop the column from history table
op.drop_column("meta_network_hx", "network_key", schema=schema_name)

# Drop the key generation function
op.execute(
text(f"DROP FUNCTION IF EXISTS {schema_name}.gen_network_key_from_name(text)")
)
2 changes: 1 addition & 1 deletion pycds/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


def check_migration_version(
executor, schema_name=get_schema_name(), version="8c05da87cb79"
executor, schema_name=get_schema_name(), version="33179b5ae85a"
):
"""Check that the migration version of the database schema is compatible
with the current version of this package.
Expand Down
39 changes: 39 additions & 0 deletions pycds/orm/tables/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
"""
ORM declarations for tables.

This module provides access to table ORM definitions, with support for
retrieving definitions at specific migration revisions for testing purposes.

By default, imports from this module provide the current (head) version of tables.
Tests can set a specific revision using set_global_table_version() before importing.
"""

from pycds.orm.versioning import (
get_global_table_version,
)

# Check if a specific version has been requested
_requested_version = get_global_table_version()

if _requested_version is None:
# No specific version requested - import from (current/head)
from .version_33179b5ae85a import *
else:
# Specific version requested - import from that version module
import importlib
try:
_version_module = importlib.import_module(
f"pycds.orm.tables.version_{_requested_version}"
)

# Import all public members from the version module
for _name in dir(_version_module):
if not _name.startswith("_"):
globals()[_name] = getattr(_version_module, _name)

del importlib, _version_module, _name
except ModuleNotFoundError as e:
raise ImportError(
f"Table version module for revision '{_requested_version}' not found. Ensure that "
f"the migration revision exists and that the corresponding version module has been created."
) from e
Loading
Loading