@@ -137,6 +137,23 @@ This builds all images from local source and enables file watching:
Dependency changes (`pyproject.toml`, `uv.lock`, `package.json`) trigger a full container rebuild.
+### π§Ή Code Quality
+
+Pre-commit hooks enforce formatting and linting automatically on every commit.
+
+```bash
+# One-time setup
+bash scripts/setup-hooks.sh
+
+# Run all hooks manually
+pre-commit run --all-files
+```
+
+**Hooks included:**
+- **Backend**: [Ruff](https://docs.astral.sh/ruff/) lint + format
+- **Frontend**: [ESLint](https://eslint.org/) + [Prettier](https://prettier.io/) with Tailwind CSS plugin
+- **General**: trailing whitespace, EOF fixer, YAML/JSON validation, merge conflict detection
+
### π§ͺ Running Tests
```bash
diff --git a/backend/README.md b/backend/README.md
index 84982b6..1c14470 100644
Binary files a/backend/README.md and b/backend/README.md differ
diff --git a/backend/alembic/README b/backend/alembic/README
index 98e4f9c..2500aa1 100644
--- a/backend/alembic/README
+++ b/backend/alembic/README
@@ -1 +1 @@
-Generic single-database configuration.
\ No newline at end of file
+Generic single-database configuration.
diff --git a/backend/alembic/versions/00ebcf349edc_add_feature_completion_tracking.py b/backend/alembic/versions/00ebcf349edc_add_feature_completion_tracking.py
index a87e32a..b038052 100644
--- a/backend/alembic/versions/00ebcf349edc_add_feature_completion_tracking.py
+++ b/backend/alembic/versions/00ebcf349edc_add_feature_completion_tracking.py
@@ -5,16 +5,17 @@
Create Date: 2025-12-05 14:21:20.764109
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '00ebcf349edc'
-down_revision: Union[str, Sequence[str], None] = '6baa75dcb961'
+revision: str = "00ebcf349edc"
+down_revision: Union[str, Sequence[str], None] = "6baa75dcb961"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -23,48 +24,21 @@ def upgrade() -> None:
"""Add feature completion tracking fields."""
# Create the feature_completion_status enum for PostgreSQL
bind = op.get_bind()
- if bind.dialect.name == 'postgresql':
+ if bind.dialect.name == "postgresql":
feature_completion_status = postgresql.ENUM(
- 'pending', 'in_progress', 'completed',
- name='feature_completion_status',
- create_type=False
+ "pending", "in_progress", "completed", name="feature_completion_status", create_type=False
)
feature_completion_status.create(bind, checkfirst=True)
# Add completion tracking columns to features table
- op.add_column(
- 'features',
- sa.Column(
- 'completion_status',
- sa.String(20),
- server_default='pending',
- nullable=False
- )
- )
- op.add_column(
- 'features',
- sa.Column('completion_summary', sa.Text(), nullable=True)
- )
- op.add_column(
- 'features',
- sa.Column('completed_at', sa.DateTime(timezone=True), nullable=True)
- )
- op.add_column(
- 'features',
- sa.Column(
- 'completed_by_id',
- sa.UUID(),
- nullable=True
- )
- )
+ op.add_column("features", sa.Column("completion_status", sa.String(20), server_default="pending", nullable=False))
+ op.add_column("features", sa.Column("completion_summary", sa.Text(), nullable=True))
+ op.add_column("features", sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True))
+ op.add_column("features", sa.Column("completed_by_id", sa.UUID(), nullable=True))
# Add foreign key constraint (PostgreSQL only due to SQLite limitations)
- if bind.dialect.name == 'postgresql':
- op.create_foreign_key(
- 'fk_features_completed_by_id',
- 'features', 'users',
- ['completed_by_id'], ['id']
- )
+ if bind.dialect.name == "postgresql":
+ op.create_foreign_key("fk_features_completed_by_id", "features", "users", ["completed_by_id"], ["id"])
def downgrade() -> None:
@@ -72,15 +46,15 @@ def downgrade() -> None:
bind = op.get_bind()
# Drop foreign key constraint (PostgreSQL only)
- if bind.dialect.name == 'postgresql':
- op.drop_constraint('fk_features_completed_by_id', 'features', type_='foreignkey')
+ if bind.dialect.name == "postgresql":
+ op.drop_constraint("fk_features_completed_by_id", "features", type_="foreignkey")
# Drop columns
- op.drop_column('features', 'completed_by_id')
- op.drop_column('features', 'completed_at')
- op.drop_column('features', 'completion_summary')
- op.drop_column('features', 'completion_status')
+ op.drop_column("features", "completed_by_id")
+ op.drop_column("features", "completed_at")
+ op.drop_column("features", "completion_summary")
+ op.drop_column("features", "completion_status")
# Drop the enum type (PostgreSQL only)
- if bind.dialect.name == 'postgresql':
+ if bind.dialect.name == "postgresql":
op.execute("DROP TYPE IF EXISTS feature_completion_status")
diff --git a/backend/alembic/versions/021b37581165_add_completion_summary_and_triggered_by_.py b/backend/alembic/versions/021b37581165_add_completion_summary_and_triggered_by_.py
index 1aee3e9..8e42599 100644
--- a/backend/alembic/versions/021b37581165_add_completion_summary_and_triggered_by_.py
+++ b/backend/alembic/versions/021b37581165_add_completion_summary_and_triggered_by_.py
@@ -5,26 +5,27 @@
Create Date: 2025-11-20 13:03:09.128947
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '021b37581165'
-down_revision: Union[str, Sequence[str], None] = 'ed7322775e46'
+revision: str = "021b37581165"
+down_revision: Union[str, Sequence[str], None] = "ed7322775e46"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
- op.add_column('implementation_phases', sa.Column('completion_summary', sa.Text(), nullable=True))
- op.add_column('implementation_phases', sa.Column('triggered_by', sa.String(length=50), nullable=True))
+ op.add_column("implementation_phases", sa.Column("completion_summary", sa.Text(), nullable=True))
+ op.add_column("implementation_phases", sa.Column("triggered_by", sa.String(length=50), nullable=True))
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_column('implementation_phases', 'triggered_by')
- op.drop_column('implementation_phases', 'completion_summary')
+ op.drop_column("implementation_phases", "triggered_by")
+ op.drop_column("implementation_phases", "completion_summary")
diff --git a/backend/alembic/versions/0c4f46a254f8_drop_conversation_generation_triggers_.py b/backend/alembic/versions/0c4f46a254f8_drop_conversation_generation_triggers_.py
index a540909..2260abc 100644
--- a/backend/alembic/versions/0c4f46a254f8_drop_conversation_generation_triggers_.py
+++ b/backend/alembic/versions/0c4f46a254f8_drop_conversation_generation_triggers_.py
@@ -9,16 +9,17 @@
instead of using a batched scheduler with trigger records.
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '0c4f46a254f8'
-down_revision: Union[str, Sequence[str], None] = 'u0v1w2x3y4z5'
+revision: str = "0c4f46a254f8"
+down_revision: Union[str, Sequence[str], None] = "u0v1w2x3y4z5"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/0c5625ec8ba1_add_project_memberships_table.py b/backend/alembic/versions/0c5625ec8ba1_add_project_memberships_table.py
index 82dc9cc..2cbbae4 100644
--- a/backend/alembic/versions/0c5625ec8ba1_add_project_memberships_table.py
+++ b/backend/alembic/versions/0c5625ec8ba1_add_project_memberships_table.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 10:37:38.546482
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '0c5625ec8ba1'
-down_revision: Union[str, Sequence[str], None] = 'a60117da6409'
+revision: str = "0c5625ec8ba1"
+down_revision: Union[str, Sequence[str], None] = "a60117da6409"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,28 +23,32 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create project_memberships table
op.create_table(
- 'project_memberships',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('user_id', sa.UUID(), nullable=False),
- sa.Column('role', sa.String(length=20), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], name=op.f('fk_project_memberships_project_id_projects'), ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_project_memberships_user_id_users'), ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_project_memberships')),
- sa.UniqueConstraint('project_id', 'user_id', name=op.f('uq_project_memberships_project_id_user_id'))
+ "project_memberships",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=False),
+ sa.Column("role", sa.String(length=20), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.ForeignKeyConstraint(
+ ["project_id"], ["projects.id"], name=op.f("fk_project_memberships_project_id_projects"), ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"], ["users.id"], name=op.f("fk_project_memberships_user_id_users"), ondelete="CASCADE"
+ ),
+ sa.PrimaryKeyConstraint("id", name=op.f("pk_project_memberships")),
+ sa.UniqueConstraint("project_id", "user_id", name=op.f("uq_project_memberships_project_id_user_id")),
)
# Create indexes for foreign keys
- op.create_index(op.f('ix_project_memberships_project_id'), 'project_memberships', ['project_id'], unique=False)
- op.create_index(op.f('ix_project_memberships_user_id'), 'project_memberships', ['user_id'], unique=False)
+ op.create_index(op.f("ix_project_memberships_project_id"), "project_memberships", ["project_id"], unique=False)
+ op.create_index(op.f("ix_project_memberships_user_id"), "project_memberships", ["user_id"], unique=False)
def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
- op.drop_index(op.f('ix_project_memberships_user_id'), table_name='project_memberships')
- op.drop_index(op.f('ix_project_memberships_project_id'), table_name='project_memberships')
+ op.drop_index(op.f("ix_project_memberships_user_id"), table_name="project_memberships")
+ op.drop_index(op.f("ix_project_memberships_project_id"), table_name="project_memberships")
# Drop table
- op.drop_table('project_memberships')
+ op.drop_table("project_memberships")
diff --git a/backend/alembic/versions/0d2b4de48ed2_add_org_plan_fields.py b/backend/alembic/versions/0d2b4de48ed2_add_org_plan_fields.py
index fe7680e..06b2cc2 100644
--- a/backend/alembic/versions/0d2b4de48ed2_add_org_plan_fields.py
+++ b/backend/alembic/versions/0d2b4de48ed2_add_org_plan_fields.py
@@ -8,67 +8,41 @@
Create Date: 2025-12-16 19:19:14.846155
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '0d2b4de48ed2'
-down_revision: Union[str, Sequence[str], None] = '4691251c9f11'
+revision: str = "0d2b4de48ed2"
+down_revision: Union[str, Sequence[str], None] = "4691251c9f11"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Add plan/subscription fields to organizations table."""
- op.add_column(
- 'organizations',
- sa.Column('plan_name', sa.String(100), nullable=True)
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_llm_tokens_per_month', sa.Integer, nullable=True)
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_llm_tokens_total', sa.Integer, nullable=True)
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_llm_tokens_used', sa.Integer, nullable=False, server_default='0')
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_max_projects', sa.Integer, nullable=True)
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_max_users', sa.Integer, nullable=True)
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_start_date', sa.DateTime(timezone=True), nullable=True)
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_end_date', sa.DateTime(timezone=True), nullable=True)
- )
- op.add_column(
- 'organizations',
- sa.Column('plan_billing_cycle_start', sa.Integer, nullable=True, server_default='1')
- )
+ op.add_column("organizations", sa.Column("plan_name", sa.String(100), nullable=True))
+ op.add_column("organizations", sa.Column("plan_llm_tokens_per_month", sa.Integer, nullable=True))
+ op.add_column("organizations", sa.Column("plan_llm_tokens_total", sa.Integer, nullable=True))
+ op.add_column("organizations", sa.Column("plan_llm_tokens_used", sa.Integer, nullable=False, server_default="0"))
+ op.add_column("organizations", sa.Column("plan_max_projects", sa.Integer, nullable=True))
+ op.add_column("organizations", sa.Column("plan_max_users", sa.Integer, nullable=True))
+ op.add_column("organizations", sa.Column("plan_start_date", sa.DateTime(timezone=True), nullable=True))
+ op.add_column("organizations", sa.Column("plan_end_date", sa.DateTime(timezone=True), nullable=True))
+ op.add_column("organizations", sa.Column("plan_billing_cycle_start", sa.Integer, nullable=True, server_default="1"))
def downgrade() -> None:
"""Remove plan/subscription fields from organizations table."""
- op.drop_column('organizations', 'plan_billing_cycle_start')
- op.drop_column('organizations', 'plan_end_date')
- op.drop_column('organizations', 'plan_start_date')
- op.drop_column('organizations', 'plan_max_users')
- op.drop_column('organizations', 'plan_max_projects')
- op.drop_column('organizations', 'plan_llm_tokens_used')
- op.drop_column('organizations', 'plan_llm_tokens_total')
- op.drop_column('organizations', 'plan_llm_tokens_per_month')
- op.drop_column('organizations', 'plan_name')
+ op.drop_column("organizations", "plan_billing_cycle_start")
+ op.drop_column("organizations", "plan_end_date")
+ op.drop_column("organizations", "plan_start_date")
+ op.drop_column("organizations", "plan_max_users")
+ op.drop_column("organizations", "plan_max_projects")
+ op.drop_column("organizations", "plan_llm_tokens_used")
+ op.drop_column("organizations", "plan_llm_tokens_total")
+ op.drop_column("organizations", "plan_llm_tokens_per_month")
+ op.drop_column("organizations", "plan_name")
diff --git a/backend/alembic/versions/1227d87646fe_fix_spec_version_unique_constraint_per_.py b/backend/alembic/versions/1227d87646fe_fix_spec_version_unique_constraint_per_.py
index 42812dd..c07e6dd 100644
--- a/backend/alembic/versions/1227d87646fe_fix_spec_version_unique_constraint_per_.py
+++ b/backend/alembic/versions/1227d87646fe_fix_spec_version_unique_constraint_per_.py
@@ -5,15 +5,14 @@
Create Date: 2025-12-19 21:56:40.605043
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = '1227d87646fe'
-down_revision: Union[str, Sequence[str], None] = '8bba664c9d7b'
+revision: str = "1227d87646fe"
+down_revision: Union[str, Sequence[str], None] = "8bba664c9d7b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -26,24 +25,18 @@ def upgrade() -> None:
its own version sequence (v1, v2, v3...).
"""
# Drop old constraint (per project)
- op.drop_constraint('uq_project_spec_version', 'spec_versions', type_='unique')
+ op.drop_constraint("uq_project_spec_version", "spec_versions", type_="unique")
# Create new constraint (per phase)
op.create_unique_constraint(
- 'uq_phase_spec_version',
- 'spec_versions',
- ['brainstorming_phase_id', 'spec_type', 'version']
+ "uq_phase_spec_version", "spec_versions", ["brainstorming_phase_id", "spec_type", "version"]
)
def downgrade() -> None:
"""Downgrade schema."""
# Drop new constraint
- op.drop_constraint('uq_phase_spec_version', 'spec_versions', type_='unique')
+ op.drop_constraint("uq_phase_spec_version", "spec_versions", type_="unique")
# Restore old constraint
- op.create_unique_constraint(
- 'uq_project_spec_version',
- 'spec_versions',
- ['project_id', 'spec_type', 'version']
- )
+ op.create_unique_constraint("uq_project_spec_version", "spec_versions", ["project_id", "spec_type", "version"])
diff --git a/backend/alembic/versions/178e7cb83afe_add_implementation_phases_table.py b/backend/alembic/versions/178e7cb83afe_add_implementation_phases_table.py
index f6f2d21..92a4957 100644
--- a/backend/alembic/versions/178e7cb83afe_add_implementation_phases_table.py
+++ b/backend/alembic/versions/178e7cb83afe_add_implementation_phases_table.py
@@ -5,15 +5,14 @@
Create Date: 2025-11-20 11:47:32.962280
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = '178e7cb83afe'
-down_revision: Union[str, Sequence[str], None] = '8e970e133ccd'
+revision: str = "178e7cb83afe"
+down_revision: Union[str, Sequence[str], None] = "8e970e133ccd"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -45,18 +44,18 @@ def upgrade() -> None:
""")
# Create indexes
- op.create_index('ix_implementation_phases_project_id', 'implementation_phases', ['project_id'])
- op.create_index('ix_implementation_phases_status', 'implementation_phases', ['project_id', 'status'])
+ op.create_index("ix_implementation_phases_project_id", "implementation_phases", ["project_id"])
+ op.create_index("ix_implementation_phases_status", "implementation_phases", ["project_id", "status"])
def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
- op.drop_index('ix_implementation_phases_status', table_name='implementation_phases')
- op.drop_index('ix_implementation_phases_project_id', table_name='implementation_phases')
+ op.drop_index("ix_implementation_phases_status", table_name="implementation_phases")
+ op.drop_index("ix_implementation_phases_project_id", table_name="implementation_phases")
# Drop table
- op.drop_table('implementation_phases')
+ op.drop_table("implementation_phases")
# Drop ENUM
op.execute("DROP TYPE phase_status")
diff --git a/backend/alembic/versions/17e2d7ed64e2_add_testing_debug_settings.py b/backend/alembic/versions/17e2d7ed64e2_add_testing_debug_settings.py
index a33a377..65ad74d 100644
--- a/backend/alembic/versions/17e2d7ed64e2_add_testing_debug_settings.py
+++ b/backend/alembic/versions/17e2d7ed64e2_add_testing_debug_settings.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-22 09:37:51.442393
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '17e2d7ed64e2'
-down_revision: Union[str, Sequence[str], None] = '840545b82f16'
+revision: str = "17e2d7ed64e2"
+down_revision: Union[str, Sequence[str], None] = "840545b82f16"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,40 +23,33 @@ def upgrade() -> None:
"""Add testing & debugging settings to llm_preferences table."""
# Add mock_discovery_enabled column (boolean, default false)
op.add_column(
- 'llm_preferences',
- sa.Column('mock_discovery_enabled', sa.Boolean(), nullable=False, server_default='false')
+ "llm_preferences", sa.Column("mock_discovery_enabled", sa.Boolean(), nullable=False, server_default="false")
)
# Add mock_discovery_question_limit column (integer, default 10, check constraint)
op.add_column(
- 'llm_preferences',
- sa.Column('mock_discovery_question_limit', sa.Integer(), nullable=False, server_default='10')
+ "llm_preferences", sa.Column("mock_discovery_question_limit", sa.Integer(), nullable=False, server_default="10")
)
op.create_check_constraint(
- 'ck_mock_discovery_question_limit',
- 'llm_preferences',
- 'mock_discovery_question_limit IN (10, 20, 30)'
+ "ck_mock_discovery_question_limit", "llm_preferences", "mock_discovery_question_limit IN (10, 20, 30)"
)
# Add mock_discovery_delay_seconds column (integer, default 5, check constraint)
op.add_column(
- 'llm_preferences',
- sa.Column('mock_discovery_delay_seconds', sa.Integer(), nullable=False, server_default='5')
+ "llm_preferences", sa.Column("mock_discovery_delay_seconds", sa.Integer(), nullable=False, server_default="5")
)
op.create_check_constraint(
- 'ck_mock_discovery_delay_seconds',
- 'llm_preferences',
- 'mock_discovery_delay_seconds IN (5, 10, 20)'
+ "ck_mock_discovery_delay_seconds", "llm_preferences", "mock_discovery_delay_seconds IN (5, 10, 20)"
)
def downgrade() -> None:
"""Remove testing & debugging settings from llm_preferences table."""
# Drop check constraints first
- op.drop_constraint('ck_mock_discovery_delay_seconds', 'llm_preferences', type_='check')
- op.drop_constraint('ck_mock_discovery_question_limit', 'llm_preferences', type_='check')
+ op.drop_constraint("ck_mock_discovery_delay_seconds", "llm_preferences", type_="check")
+ op.drop_constraint("ck_mock_discovery_question_limit", "llm_preferences", type_="check")
# Drop columns
- op.drop_column('llm_preferences', 'mock_discovery_delay_seconds')
- op.drop_column('llm_preferences', 'mock_discovery_question_limit')
- op.drop_column('llm_preferences', 'mock_discovery_enabled')
+ op.drop_column("llm_preferences", "mock_discovery_delay_seconds")
+ op.drop_column("llm_preferences", "mock_discovery_question_limit")
+ op.drop_column("llm_preferences", "mock_discovery_enabled")
diff --git a/backend/alembic/versions/1931abc02c3f_add_user_auth_fields.py b/backend/alembic/versions/1931abc02c3f_add_user_auth_fields.py
index 89a4c95..36abdca 100644
--- a/backend/alembic/versions/1931abc02c3f_add_user_auth_fields.py
+++ b/backend/alembic/versions/1931abc02c3f_add_user_auth_fields.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 09:16:02.362772
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '1931abc02c3f'
-down_revision: Union[str, Sequence[str], None] = 'a8845f795cf1'
+revision: str = "1931abc02c3f"
+down_revision: Union[str, Sequence[str], None] = "a8845f795cf1"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,16 +22,16 @@
def upgrade() -> None:
"""Upgrade schema."""
# Add password_hash field
- op.add_column('users', sa.Column('password_hash', sa.String(length=255), nullable=False))
+ op.add_column("users", sa.Column("password_hash", sa.String(length=255), nullable=False))
# Add display_name field
- op.add_column('users', sa.Column('display_name', sa.String(length=100), nullable=True))
+ op.add_column("users", sa.Column("display_name", sa.String(length=100), nullable=True))
def downgrade() -> None:
"""Downgrade schema."""
# Remove display_name field
- op.drop_column('users', 'display_name')
+ op.drop_column("users", "display_name")
# Remove password_hash field
- op.drop_column('users', 'password_hash')
+ op.drop_column("users", "password_hash")
diff --git a/backend/alembic/versions/19fa5dd5cb52_add_organization_id_to_organizations.py b/backend/alembic/versions/19fa5dd5cb52_add_organization_id_to_organizations.py
index a9b43ee..379ba42 100644
--- a/backend/alembic/versions/19fa5dd5cb52_add_organization_id_to_organizations.py
+++ b/backend/alembic/versions/19fa5dd5cb52_add_organization_id_to_organizations.py
@@ -5,15 +5,16 @@
Create Date: 2025-12-24 00:20:21.946817
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '19fa5dd5cb52'
-down_revision: Union[str, Sequence[str], None] = 'j1k2l3m4n5o6'
+revision: str = "19fa5dd5cb52"
+down_revision: Union[str, Sequence[str], None] = "j1k2l3m4n5o6"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,22 +22,14 @@
def upgrade() -> None:
"""Upgrade schema."""
# Add organization_id column to organizations table
- op.add_column(
- 'organizations',
- sa.Column('organization_id', sa.String(length=255), nullable=True)
- )
+ op.add_column("organizations", sa.Column("organization_id", sa.String(length=255), nullable=True))
# Create unique index on organization_id
- op.create_index(
- 'ix_organizations_organization_id',
- 'organizations',
- ['organization_id'],
- unique=True
- )
+ op.create_index("ix_organizations_organization_id", "organizations", ["organization_id"], unique=True)
def downgrade() -> None:
"""Downgrade schema."""
# Drop the index first
- op.drop_index('ix_organizations_organization_id', table_name='organizations')
+ op.drop_index("ix_organizations_organization_id", table_name="organizations")
# Drop the column
- op.drop_column('organizations', 'organization_id')
+ op.drop_column("organizations", "organization_id")
diff --git a/backend/alembic/versions/1cfb3bffcc2a_phase3_autogen_discovery_schema.py b/backend/alembic/versions/1cfb3bffcc2a_phase3_autogen_discovery_schema.py
index f63471d..516fd3d 100644
--- a/backend/alembic/versions/1cfb3bffcc2a_phase3_autogen_discovery_schema.py
+++ b/backend/alembic/versions/1cfb3bffcc2a_phase3_autogen_discovery_schema.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-21 10:58:33.178811
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '1cfb3bffcc2a'
-down_revision: Union[str, Sequence[str], None] = '6ebda2d1112d'
+revision: str = "1cfb3bffcc2a"
+down_revision: Union[str, Sequence[str], None] = "6ebda2d1112d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -71,37 +72,46 @@ def upgrade() -> None:
op.execute("DROP TYPE discoverystatus_old;")
# 3. Add new columns for Phase 3
- op.add_column('discovery_questions', sa.Column('type', sa.String(), nullable=False, server_default='mcq_with_custom_input'))
- op.add_column('discovery_questions', sa.Column('allows_custom_input', sa.Boolean(), nullable=False, server_default='true'))
- op.add_column('discovery_questions', sa.Column('followup_to', sa.dialects.postgresql.UUID(as_uuid=True), nullable=True))
- op.add_column('discovery_questions', sa.Column('depth', sa.Integer(), nullable=False, server_default='0'))
- op.add_column('discovery_questions', sa.Column('explanation', sa.Text(), nullable=True))
- op.add_column('discovery_questions', sa.Column('internal_agent_notes', sa.Text(), nullable=True))
- op.add_column('discovery_questions', sa.Column('discussion_thread_id', sa.dialects.postgresql.UUID(as_uuid=True), nullable=True))
- op.add_column('discovery_questions', sa.Column('suggested_follow_ups', sa.JSON(), nullable=True))
+ op.add_column(
+ "discovery_questions", sa.Column("type", sa.String(), nullable=False, server_default="mcq_with_custom_input")
+ )
+ op.add_column(
+ "discovery_questions", sa.Column("allows_custom_input", sa.Boolean(), nullable=False, server_default="true")
+ )
+ op.add_column(
+ "discovery_questions", sa.Column("followup_to", sa.dialects.postgresql.UUID(as_uuid=True), nullable=True)
+ )
+ op.add_column("discovery_questions", sa.Column("depth", sa.Integer(), nullable=False, server_default="0"))
+ op.add_column("discovery_questions", sa.Column("explanation", sa.Text(), nullable=True))
+ op.add_column("discovery_questions", sa.Column("internal_agent_notes", sa.Text(), nullable=True))
+ op.add_column(
+ "discovery_questions",
+ sa.Column("discussion_thread_id", sa.dialects.postgresql.UUID(as_uuid=True), nullable=True),
+ )
+ op.add_column("discovery_questions", sa.Column("suggested_follow_ups", sa.JSON(), nullable=True))
# 4. Add foreign key for followup_to
op.create_foreign_key(
- 'fk_discovery_questions_followup_to',
- 'discovery_questions',
- 'discovery_questions',
- ['followup_to'],
- ['id'],
- ondelete='CASCADE'
+ "fk_discovery_questions_followup_to",
+ "discovery_questions",
+ "discovery_questions",
+ ["followup_to"],
+ ["id"],
+ ondelete="CASCADE",
)
# 5. Add foreign key for discussion_thread_id
op.create_foreign_key(
- 'fk_discovery_questions_discussion_thread_id',
- 'discovery_questions',
- 'threads',
- ['discussion_thread_id'],
- ['id'],
- ondelete='SET NULL'
+ "fk_discovery_questions_discussion_thread_id",
+ "discovery_questions",
+ "threads",
+ ["discussion_thread_id"],
+ ["id"],
+ ondelete="SET NULL",
)
# 6. Create index on followup_to for better query performance
- op.create_index('ix_discovery_questions_followup_to', 'discovery_questions', ['followup_to'])
+ op.create_index("ix_discovery_questions_followup_to", "discovery_questions", ["followup_to"])
# 7. Migrate existing data: map depends_on to followup_to
op.execute("""
@@ -112,16 +122,16 @@ def upgrade() -> None:
""")
# 8. Add intent and complexity to projects table for caching
- op.add_column('projects', sa.Column('discovery_intent', sa.String(), nullable=True))
- op.add_column('projects', sa.Column('discovery_complexity', sa.String(), nullable=True))
+ op.add_column("projects", sa.Column("discovery_intent", sa.String(), nullable=True))
+ op.add_column("projects", sa.Column("discovery_complexity", sa.String(), nullable=True))
def downgrade() -> None:
"""Downgrade schema to Phase 1."""
# 1. Remove columns from projects table
- op.drop_column('projects', 'discovery_complexity')
- op.drop_column('projects', 'discovery_intent')
+ op.drop_column("projects", "discovery_complexity")
+ op.drop_column("projects", "discovery_intent")
# 2. Migrate followup_to back to depends_on
op.execute("""
@@ -131,19 +141,19 @@ def downgrade() -> None:
""")
# 3. Drop indexes and foreign keys
- op.drop_index('ix_discovery_questions_followup_to', table_name='discovery_questions')
- op.drop_constraint('fk_discovery_questions_discussion_thread_id', 'discovery_questions', type_='foreignkey')
- op.drop_constraint('fk_discovery_questions_followup_to', 'discovery_questions', type_='foreignkey')
+ op.drop_index("ix_discovery_questions_followup_to", table_name="discovery_questions")
+ op.drop_constraint("fk_discovery_questions_discussion_thread_id", "discovery_questions", type_="foreignkey")
+ op.drop_constraint("fk_discovery_questions_followup_to", "discovery_questions", type_="foreignkey")
# 4. Drop new columns
- op.drop_column('discovery_questions', 'suggested_follow_ups')
- op.drop_column('discovery_questions', 'discussion_thread_id')
- op.drop_column('discovery_questions', 'internal_agent_notes')
- op.drop_column('discovery_questions', 'explanation')
- op.drop_column('discovery_questions', 'depth')
- op.drop_column('discovery_questions', 'followup_to')
- op.drop_column('discovery_questions', 'allows_custom_input')
- op.drop_column('discovery_questions', 'type')
+ op.drop_column("discovery_questions", "suggested_follow_ups")
+ op.drop_column("discovery_questions", "discussion_thread_id")
+ op.drop_column("discovery_questions", "internal_agent_notes")
+ op.drop_column("discovery_questions", "explanation")
+ op.drop_column("discovery_questions", "depth")
+ op.drop_column("discovery_questions", "followup_to")
+ op.drop_column("discovery_questions", "allows_custom_input")
+ op.drop_column("discovery_questions", "type")
# 5. Revert status enum
op.execute("""
diff --git a/backend/alembic/versions/1e859fbdbd74_add_spec_coverage_reports.py b/backend/alembic/versions/1e859fbdbd74_add_spec_coverage_reports.py
index 8ce7dac..1b64a0d 100644
--- a/backend/alembic/versions/1e859fbdbd74_add_spec_coverage_reports.py
+++ b/backend/alembic/versions/1e859fbdbd74_add_spec_coverage_reports.py
@@ -5,16 +5,17 @@
Create Date: 2025-11-24 11:43:58.546628
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
-from sqlalchemy.dialects.postgresql import UUID, JSON
+from sqlalchemy.dialects.postgresql import JSON, UUID
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '1e859fbdbd74'
-down_revision: Union[str, Sequence[str], None] = 'bc6ddbf8b5b7'
+revision: str = "1e859fbdbd74"
+down_revision: Union[str, Sequence[str], None] = "bc6ddbf8b5b7"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,21 +23,23 @@
def upgrade() -> None:
"""Upgrade schema."""
op.create_table(
- 'spec_coverage_reports',
- sa.Column('id', UUID(as_uuid=True), primary_key=True),
- sa.Column('spec_version_id', UUID(as_uuid=True), sa.ForeignKey('spec_versions.id'), nullable=False, unique=True),
- sa.Column('ok', sa.Boolean(), nullable=False),
- sa.Column('uncovered_must_have_questions', JSON(), nullable=False, server_default='[]'),
- sa.Column('weak_coverage_warnings', JSON(), nullable=False, server_default='[]'),
- sa.Column('contradictions_found', JSON(), nullable=False, server_default='[]'),
- sa.Column('suggested_rewrites', JSON(), nullable=False, server_default='[]'),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False, server_default=sa.text('now()')),
+ "spec_coverage_reports",
+ sa.Column("id", UUID(as_uuid=True), primary_key=True),
+ sa.Column(
+ "spec_version_id", UUID(as_uuid=True), sa.ForeignKey("spec_versions.id"), nullable=False, unique=True
+ ),
+ sa.Column("ok", sa.Boolean(), nullable=False),
+ sa.Column("uncovered_must_have_questions", JSON(), nullable=False, server_default="[]"),
+ sa.Column("weak_coverage_warnings", JSON(), nullable=False, server_default="[]"),
+ sa.Column("contradictions_found", JSON(), nullable=False, server_default="[]"),
+ sa.Column("suggested_rewrites", JSON(), nullable=False, server_default="[]"),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.text("now()")),
)
# Create index on spec_version_id for fast lookups
- op.create_index('ix_spec_coverage_reports_spec_version_id', 'spec_coverage_reports', ['spec_version_id'])
+ op.create_index("ix_spec_coverage_reports_spec_version_id", "spec_coverage_reports", ["spec_version_id"])
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_index('ix_spec_coverage_reports_spec_version_id', table_name='spec_coverage_reports')
- op.drop_table('spec_coverage_reports')
+ op.drop_index("ix_spec_coverage_reports_spec_version_id", table_name="spec_coverage_reports")
+ op.drop_table("spec_coverage_reports")
diff --git a/backend/alembic/versions/22633d1c969b_add_feature_key_number.py b/backend/alembic/versions/22633d1c969b_add_feature_key_number.py
index 9418a73..3f874d6 100644
--- a/backend/alembic/versions/22633d1c969b_add_feature_key_number.py
+++ b/backend/alembic/versions/22633d1c969b_add_feature_key_number.py
@@ -5,22 +5,23 @@
Create Date: 2025-01-01 00:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '22633d1c969b'
-down_revision: Union[str, None] = 'c8d9e0f1g2h3'
+revision: str = "22633d1c969b"
+down_revision: Union[str, None] = "c8d9e0f1g2h3"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Add feature_key_number column (nullable initially for backfill)
- op.add_column('features', sa.Column('feature_key_number', sa.Integer(), nullable=True))
+ op.add_column("features", sa.Column("feature_key_number", sa.Integer(), nullable=True))
# Backfill existing rows by extracting the numeric part after the last hyphen
# e.g., "1F40-090" -> 90, "USR-1234" -> 1234
@@ -33,15 +34,15 @@ def upgrade() -> None:
""")
# Make it non-nullable after backfill
- op.alter_column('features', 'feature_key_number', nullable=False, server_default='0')
+ op.alter_column("features", "feature_key_number", nullable=False, server_default="0")
# Remove the server default (it was just for the alter)
- op.alter_column('features', 'feature_key_number', server_default=None)
+ op.alter_column("features", "feature_key_number", server_default=None)
# Add index for efficient sorting
- op.create_index('ix_features_feature_key_number', 'features', ['feature_key_number'])
+ op.create_index("ix_features_feature_key_number", "features", ["feature_key_number"])
def downgrade() -> None:
- op.drop_index('ix_features_feature_key_number', table_name='features')
- op.drop_column('features', 'feature_key_number')
+ op.drop_index("ix_features_feature_key_number", table_name="features")
+ op.drop_column("features", "feature_key_number")
diff --git a/backend/alembic/versions/23893c2e92fa_add_llm_usage_tracking_to_jobs.py b/backend/alembic/versions/23893c2e92fa_add_llm_usage_tracking_to_jobs.py
index bb6d2e7..eef497c 100644
--- a/backend/alembic/versions/23893c2e92fa_add_llm_usage_tracking_to_jobs.py
+++ b/backend/alembic/versions/23893c2e92fa_add_llm_usage_tracking_to_jobs.py
@@ -5,30 +5,31 @@
Create Date: 2025-12-05 16:14:42.646001
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '23893c2e92fa'
-down_revision: Union[str, Sequence[str], None] = '00ebcf349edc'
+revision: str = "23893c2e92fa"
+down_revision: Union[str, Sequence[str], None] = "00ebcf349edc"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Add LLM usage tracking columns to jobs table."""
- op.add_column('jobs', sa.Column('model_used', sa.String(length=100), nullable=True))
- op.add_column('jobs', sa.Column('total_prompt_tokens', sa.Integer(), nullable=True))
- op.add_column('jobs', sa.Column('total_completion_tokens', sa.Integer(), nullable=True))
- op.add_column('jobs', sa.Column('total_cost_usd', sa.Numeric(precision=10, scale=6), nullable=True))
+ op.add_column("jobs", sa.Column("model_used", sa.String(length=100), nullable=True))
+ op.add_column("jobs", sa.Column("total_prompt_tokens", sa.Integer(), nullable=True))
+ op.add_column("jobs", sa.Column("total_completion_tokens", sa.Integer(), nullable=True))
+ op.add_column("jobs", sa.Column("total_cost_usd", sa.Numeric(precision=10, scale=6), nullable=True))
def downgrade() -> None:
"""Remove LLM usage tracking columns from jobs table."""
- op.drop_column('jobs', 'total_cost_usd')
- op.drop_column('jobs', 'total_completion_tokens')
- op.drop_column('jobs', 'total_prompt_tokens')
- op.drop_column('jobs', 'model_used')
+ op.drop_column("jobs", "total_cost_usd")
+ op.drop_column("jobs", "total_completion_tokens")
+ op.drop_column("jobs", "total_prompt_tokens")
+ op.drop_column("jobs", "model_used")
diff --git a/backend/alembic/versions/264f9632081a_add_notification_tables.py b/backend/alembic/versions/264f9632081a_add_notification_tables.py
index 57888db..aa52f45 100644
--- a/backend/alembic/versions/264f9632081a_add_notification_tables.py
+++ b/backend/alembic/versions/264f9632081a_add_notification_tables.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 00:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '264f9632081a'
-down_revision: Union[str, Sequence[str], None] = '2aec09f57c3e'
+revision: str = "264f9632081a"
+down_revision: Union[str, Sequence[str], None] = "2aec09f57c3e"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,61 +23,61 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create notification_preferences table
op.create_table(
- 'notification_preferences',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('user_id', sa.UUID(), nullable=False),
- sa.Column('channel', sa.Enum('EMAIL', 'SLACK', 'TEAMS', name='notificationchannel'), nullable=False),
- sa.Column('enabled', sa.Boolean(), nullable=False, server_default=sa.true()),
- sa.Column('channel_config', sa.String(), nullable=True),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id')
+ "notification_preferences",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=False),
+ sa.Column("channel", sa.Enum("EMAIL", "SLACK", "TEAMS", name="notificationchannel"), nullable=False),
+ sa.Column("enabled", sa.Boolean(), nullable=False, server_default=sa.true()),
+ sa.Column("channel_config", sa.String(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
)
- op.create_index('ix_notification_preferences_user_id', 'notification_preferences', ['user_id'])
+ op.create_index("ix_notification_preferences_user_id", "notification_preferences", ["user_id"])
# Create notification_project_mutes table
op.create_table(
- 'notification_project_mutes',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('user_id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id'),
- sa.UniqueConstraint('user_id', 'project_id', name='uq_user_project_mute')
+ "notification_project_mutes",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("user_id", "project_id", name="uq_user_project_mute"),
)
- op.create_index('ix_notification_project_mutes_user_id', 'notification_project_mutes', ['user_id'])
- op.create_index('ix_notification_project_mutes_project_id', 'notification_project_mutes', ['project_id'])
+ op.create_index("ix_notification_project_mutes_user_id", "notification_project_mutes", ["user_id"])
+ op.create_index("ix_notification_project_mutes_project_id", "notification_project_mutes", ["project_id"])
# Create notification_thread_watches table
op.create_table(
- 'notification_thread_watches',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('user_id', sa.UUID(), nullable=False),
- sa.Column('thread_id', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['thread_id'], ['threads.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id'),
- sa.UniqueConstraint('user_id', 'thread_id', name='uq_user_thread_watch')
+ "notification_thread_watches",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=False),
+ sa.Column("thread_id", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["thread_id"], ["threads.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("user_id", "thread_id", name="uq_user_thread_watch"),
)
- op.create_index('ix_notification_thread_watches_user_id', 'notification_thread_watches', ['user_id'])
- op.create_index('ix_notification_thread_watches_thread_id', 'notification_thread_watches', ['thread_id'])
+ op.create_index("ix_notification_thread_watches_user_id", "notification_thread_watches", ["user_id"])
+ op.create_index("ix_notification_thread_watches_thread_id", "notification_thread_watches", ["thread_id"])
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_index('ix_notification_thread_watches_thread_id', table_name='notification_thread_watches')
- op.drop_index('ix_notification_thread_watches_user_id', table_name='notification_thread_watches')
- op.drop_table('notification_thread_watches')
+ op.drop_index("ix_notification_thread_watches_thread_id", table_name="notification_thread_watches")
+ op.drop_index("ix_notification_thread_watches_user_id", table_name="notification_thread_watches")
+ op.drop_table("notification_thread_watches")
- op.drop_index('ix_notification_project_mutes_project_id', table_name='notification_project_mutes')
- op.drop_index('ix_notification_project_mutes_user_id', table_name='notification_project_mutes')
- op.drop_table('notification_project_mutes')
+ op.drop_index("ix_notification_project_mutes_project_id", table_name="notification_project_mutes")
+ op.drop_index("ix_notification_project_mutes_user_id", table_name="notification_project_mutes")
+ op.drop_table("notification_project_mutes")
- op.drop_index('ix_notification_preferences_user_id', table_name='notification_preferences')
- op.drop_table('notification_preferences')
+ op.drop_index("ix_notification_preferences_user_id", table_name="notification_preferences")
+ op.drop_table("notification_preferences")
- op.execute('DROP TYPE notificationchannel')
+ op.execute("DROP TYPE notificationchannel")
diff --git a/backend/alembic/versions/2aec09f57c3e_add_integration_configs_and_bug_sync_.py b/backend/alembic/versions/2aec09f57c3e_add_integration_configs_and_bug_sync_.py
index 777041f..e22660f 100644
--- a/backend/alembic/versions/2aec09f57c3e_add_integration_configs_and_bug_sync_.py
+++ b/backend/alembic/versions/2aec09f57c3e_add_integration_configs_and_bug_sync_.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 13:18:11.302275
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '2aec09f57c3e'
-down_revision: Union[str, Sequence[str], None] = '021b37581165'
+revision: str = "2aec09f57c3e"
+down_revision: Union[str, Sequence[str], None] = "021b37581165"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,42 +23,42 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create integration_configs table
op.create_table(
- 'integration_configs',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('organization_id', sa.UUID(), nullable=False),
- sa.Column('provider', sa.String(length=50), nullable=False),
- sa.Column('encrypted_token', sa.Text(), nullable=False),
- sa.Column('config_json', sa.JSON(), nullable=True),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id'),
- sa.UniqueConstraint('organization_id', 'provider', name='uq_org_provider')
+ "integration_configs",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("organization_id", sa.UUID(), nullable=False),
+ sa.Column("provider", sa.String(length=50), nullable=False),
+ sa.Column("encrypted_token", sa.Text(), nullable=False),
+ sa.Column("config_json", sa.JSON(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("organization_id", "provider", name="uq_org_provider"),
)
- op.create_index('ix_integration_configs_organization_id', 'integration_configs', ['organization_id'])
+ op.create_index("ix_integration_configs_organization_id", "integration_configs", ["organization_id"])
# Create bug_sync_history table
op.create_table(
- 'bug_sync_history',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('synced_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('status', sa.String(length=20), nullable=False),
- sa.Column('imported_data_json', sa.JSON(), nullable=True),
- sa.Column('error_message', sa.Text(), nullable=True),
- sa.Column('triggered_by', sa.String(length=20), nullable=False),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id')
+ "bug_sync_history",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("synced_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("status", sa.String(length=20), nullable=False),
+ sa.Column("imported_data_json", sa.JSON(), nullable=True),
+ sa.Column("error_message", sa.Text(), nullable=True),
+ sa.Column("triggered_by", sa.String(length=20), nullable=False),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
)
- op.create_index('ix_bug_sync_history_project_id', 'bug_sync_history', ['project_id'])
- op.create_index('ix_bug_sync_history_synced_at', 'bug_sync_history', ['synced_at'])
+ op.create_index("ix_bug_sync_history_project_id", "bug_sync_history", ["project_id"])
+ op.create_index("ix_bug_sync_history_synced_at", "bug_sync_history", ["synced_at"])
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_index('ix_bug_sync_history_synced_at', table_name='bug_sync_history')
- op.drop_index('ix_bug_sync_history_project_id', table_name='bug_sync_history')
- op.drop_table('bug_sync_history')
+ op.drop_index("ix_bug_sync_history_synced_at", table_name="bug_sync_history")
+ op.drop_index("ix_bug_sync_history_project_id", table_name="bug_sync_history")
+ op.drop_table("bug_sync_history")
- op.drop_index('ix_integration_configs_organization_id', table_name='integration_configs')
- op.drop_table('integration_configs')
+ op.drop_index("ix_integration_configs_organization_id", table_name="integration_configs")
+ op.drop_table("integration_configs")
diff --git a/backend/alembic/versions/2f8c2d246f32_merge_llm_usage_and_call_logs.py b/backend/alembic/versions/2f8c2d246f32_merge_llm_usage_and_call_logs.py
index ed703ad..9e39db5 100644
--- a/backend/alembic/versions/2f8c2d246f32_merge_llm_usage_and_call_logs.py
+++ b/backend/alembic/versions/2f8c2d246f32_merge_llm_usage_and_call_logs.py
@@ -5,15 +5,12 @@
Create Date: 2025-12-05 17:42:23.198841
"""
-from typing import Sequence, Union
-
-from alembic import op
-import sqlalchemy as sa
+from typing import Sequence, Union
# revision identifiers, used by Alembic.
-revision: str = '2f8c2d246f32'
-down_revision: Union[str, Sequence[str], None] = ('23893c2e92fa', 'f5g6h7i8j9k0')
+revision: str = "2f8c2d246f32"
+down_revision: Union[str, Sequence[str], None] = ("23893c2e92fa", "f5g6h7i8j9k0")
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/307472938827_remove_completion_summary_from_features.py b/backend/alembic/versions/307472938827_remove_completion_summary_from_features.py
index 6437f27..b75d87e 100644
--- a/backend/alembic/versions/307472938827_remove_completion_summary_from_features.py
+++ b/backend/alembic/versions/307472938827_remove_completion_summary_from_features.py
@@ -5,15 +5,16 @@
Create Date: 2026-01-09 20:25:10.479596
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '307472938827'
-down_revision: Union[str, Sequence[str], None] = 'implcs01'
+revision: str = "307472938827"
+down_revision: Union[str, Sequence[str], None] = "implcs01"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -24,9 +25,9 @@ def upgrade() -> None:
completion_summary is now stored at the implementation level and
auto-generated by the grounding agent when notes are written.
"""
- op.drop_column('features', 'completion_summary')
+ op.drop_column("features", "completion_summary")
def downgrade() -> None:
"""Re-add completion_summary column to features table."""
- op.add_column('features', sa.Column('completion_summary', sa.Text(), nullable=True))
+ op.add_column("features", sa.Column("completion_summary", sa.Text(), nullable=True))
diff --git a/backend/alembic/versions/3e35a2b90829_add_prompt_plan_coverage_reports_table.py b/backend/alembic/versions/3e35a2b90829_add_prompt_plan_coverage_reports_table.py
index 877bc4c..0c76484 100644
--- a/backend/alembic/versions/3e35a2b90829_add_prompt_plan_coverage_reports_table.py
+++ b/backend/alembic/versions/3e35a2b90829_add_prompt_plan_coverage_reports_table.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-24 16:47:11.941231
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '3e35a2b90829'
-down_revision: Union[str, Sequence[str], None] = '1e859fbdbd74'
+revision: str = "3e35a2b90829"
+down_revision: Union[str, Sequence[str], None] = "1e859fbdbd74"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,22 +22,25 @@
def upgrade() -> None:
"""Upgrade schema."""
op.create_table(
- 'prompt_plan_coverage_reports',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('spec_version_id', sa.UUID(), nullable=False),
- sa.Column('ok', sa.Boolean(), nullable=False),
- sa.Column('missing_phases', sa.JSON(), nullable=False),
- sa.Column('missing_mcp_methods', sa.JSON(), nullable=False),
- sa.Column('hallucinated_constraints', sa.JSON(), nullable=False),
- sa.Column('weak_sections', sa.JSON(), nullable=False),
- sa.Column('suggested_improvements', sa.JSON(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
- sa.ForeignKeyConstraint(['spec_version_id'], ['spec_versions.id'], ),
- sa.PrimaryKeyConstraint('id'),
- sa.UniqueConstraint('spec_version_id')
+ "prompt_plan_coverage_reports",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("spec_version_id", sa.UUID(), nullable=False),
+ sa.Column("ok", sa.Boolean(), nullable=False),
+ sa.Column("missing_phases", sa.JSON(), nullable=False),
+ sa.Column("missing_mcp_methods", sa.JSON(), nullable=False),
+ sa.Column("hallucinated_constraints", sa.JSON(), nullable=False),
+ sa.Column("weak_sections", sa.JSON(), nullable=False),
+ sa.Column("suggested_improvements", sa.JSON(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["spec_version_id"],
+ ["spec_versions.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("spec_version_id"),
)
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_table('prompt_plan_coverage_reports')
+ op.drop_table("prompt_plan_coverage_reports")
diff --git a/backend/alembic/versions/4631a4a3270c_add_organizations_and_memberships.py b/backend/alembic/versions/4631a4a3270c_add_organizations_and_memberships.py
index cd1c93c..41dc338 100644
--- a/backend/alembic/versions/4631a4a3270c_add_organizations_and_memberships.py
+++ b/backend/alembic/versions/4631a4a3270c_add_organizations_and_memberships.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 09:48:16.630938
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '4631a4a3270c'
-down_revision: Union[str, Sequence[str], None] = '1931abc02c3f'
+revision: str = "4631a4a3270c"
+down_revision: Union[str, Sequence[str], None] = "1931abc02c3f"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,39 +23,43 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create organizations table
op.create_table(
- 'organizations',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_organizations'))
+ "organizations",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("name", sa.String(length=255), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.PrimaryKeyConstraint("id", name=op.f("pk_organizations")),
)
# Create org_memberships table
op.create_table(
- 'org_memberships',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('org_id', sa.UUID(), nullable=False),
- sa.Column('user_id', sa.UUID(), nullable=False),
- sa.Column('role', sa.String(length=20), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.ForeignKeyConstraint(['org_id'], ['organizations.id'], name=op.f('fk_org_memberships_org_id_organizations'), ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_org_memberships_user_id_users'), ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_org_memberships')),
- sa.UniqueConstraint('org_id', 'user_id', name=op.f('uq_org_memberships_org_id_user_id'))
+ "org_memberships",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("org_id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=False),
+ sa.Column("role", sa.String(length=20), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.ForeignKeyConstraint(
+ ["org_id"], ["organizations.id"], name=op.f("fk_org_memberships_org_id_organizations"), ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"], ["users.id"], name=op.f("fk_org_memberships_user_id_users"), ondelete="CASCADE"
+ ),
+ sa.PrimaryKeyConstraint("id", name=op.f("pk_org_memberships")),
+ sa.UniqueConstraint("org_id", "user_id", name=op.f("uq_org_memberships_org_id_user_id")),
)
# Create indexes for foreign keys
- op.create_index(op.f('ix_org_memberships_org_id'), 'org_memberships', ['org_id'], unique=False)
- op.create_index(op.f('ix_org_memberships_user_id'), 'org_memberships', ['user_id'], unique=False)
+ op.create_index(op.f("ix_org_memberships_org_id"), "org_memberships", ["org_id"], unique=False)
+ op.create_index(op.f("ix_org_memberships_user_id"), "org_memberships", ["user_id"], unique=False)
def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
- op.drop_index(op.f('ix_org_memberships_user_id'), table_name='org_memberships')
- op.drop_index(op.f('ix_org_memberships_org_id'), table_name='org_memberships')
+ op.drop_index(op.f("ix_org_memberships_user_id"), table_name="org_memberships")
+ op.drop_index(op.f("ix_org_memberships_org_id"), table_name="org_memberships")
# Drop tables (reverse order)
- op.drop_table('org_memberships')
- op.drop_table('organizations')
+ op.drop_table("org_memberships")
+ op.drop_table("organizations")
diff --git a/backend/alembic/versions/4691251c9f11_add_user_trial_started_at.py b/backend/alembic/versions/4691251c9f11_add_user_trial_started_at.py
index 1c3cf71..76b85d0 100644
--- a/backend/alembic/versions/4691251c9f11_add_user_trial_started_at.py
+++ b/backend/alembic/versions/4691251c9f11_add_user_trial_started_at.py
@@ -8,28 +8,26 @@
Create Date: 2025-12-16 17:21:42.122199
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '4691251c9f11'
-down_revision: Union[str, Sequence[str], None] = 'd1cf77c4c1fa'
+revision: str = "4691251c9f11"
+down_revision: Union[str, Sequence[str], None] = "d1cf77c4c1fa"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Add trial_started_at column to users table."""
- op.add_column(
- 'users',
- sa.Column('trial_started_at', sa.DateTime(timezone=True), nullable=True)
- )
+ op.add_column("users", sa.Column("trial_started_at", sa.DateTime(timezone=True), nullable=True))
# Existing users stay NULL (no trial = grandfathered with perpetual access)
def downgrade() -> None:
"""Remove trial_started_at column from users table."""
- op.drop_column('users', 'trial_started_at')
+ op.drop_column("users", "trial_started_at")
diff --git a/backend/alembic/versions/489421eb9675_fix_brainstorm_module_feature_types_data.py b/backend/alembic/versions/489421eb9675_fix_brainstorm_module_feature_types_data.py
index d6098ce..7e26df6 100644
--- a/backend/alembic/versions/489421eb9675_fix_brainstorm_module_feature_types_data.py
+++ b/backend/alembic/versions/489421eb9675_fix_brainstorm_module_feature_types_data.py
@@ -5,15 +5,14 @@
Create Date: 2025-12-05 07:29:11.649749
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = '489421eb9675'
-down_revision: Union[str, Sequence[str], None] = '77d468f92bb4'
+revision: str = "489421eb9675"
+down_revision: Union[str, Sequence[str], None] = "77d468f92bb4"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/5464eccf3da8_add_key_lookup_hash_to_api_keys.py b/backend/alembic/versions/5464eccf3da8_add_key_lookup_hash_to_api_keys.py
index 223cf50..70d784c 100644
--- a/backend/alembic/versions/5464eccf3da8_add_key_lookup_hash_to_api_keys.py
+++ b/backend/alembic/versions/5464eccf3da8_add_key_lookup_hash_to_api_keys.py
@@ -8,16 +8,17 @@
Create Date: 2026-02-14 18:58:17.183207
"""
+
import hashlib
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '5464eccf3da8'
-down_revision: Union[str, Sequence[str], None] = 'slack02'
+revision: str = "5464eccf3da8"
+down_revision: Union[str, Sequence[str], None] = "slack02"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -25,17 +26,15 @@
def upgrade() -> None:
"""Add key_lookup_hash column and backfill from encrypted keys."""
# 1. Add the column (nullable for backwards compatibility)
- op.add_column('api_keys', sa.Column('key_lookup_hash', sa.String(64), nullable=True))
+ op.add_column("api_keys", sa.Column("key_lookup_hash", sa.String(64), nullable=True))
# 2. Add index for fast lookups
- op.create_index('idx_api_keys_key_lookup_hash', 'api_keys', ['key_lookup_hash'])
+ op.create_index("idx_api_keys_key_lookup_hash", "api_keys", ["key_lookup_hash"])
# 3. Backfill: decrypt key_encrypted β compute SHA-256 β store
# This runs in-database using Python for the crypto operations
conn = op.get_bind()
- rows = conn.execute(
- sa.text("SELECT id, key_encrypted FROM api_keys WHERE key_encrypted IS NOT NULL")
- ).fetchall()
+ rows = conn.execute(sa.text("SELECT id, key_encrypted FROM api_keys WHERE key_encrypted IS NOT NULL")).fetchall()
if rows:
# Import decryption utils (requires ENCRYPTION_KEY to be set)
@@ -61,5 +60,5 @@ def upgrade() -> None:
def downgrade() -> None:
"""Remove key_lookup_hash column."""
- op.drop_index('idx_api_keys_key_lookup_hash', table_name='api_keys')
- op.drop_column('api_keys', 'key_lookup_hash')
+ op.drop_index("idx_api_keys_key_lookup_hash", table_name="api_keys")
+ op.drop_column("api_keys", "key_lookup_hash")
diff --git a/backend/alembic/versions/58b3923aa347_add_thread_ai_response_flag.py b/backend/alembic/versions/58b3923aa347_add_thread_ai_response_flag.py
index 88c447b..7f1f4ad 100644
--- a/backend/alembic/versions/58b3923aa347_add_thread_ai_response_flag.py
+++ b/backend/alembic/versions/58b3923aa347_add_thread_ai_response_flag.py
@@ -5,15 +5,16 @@
Create Date: 2026-01-04 13:21:04.973636
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '58b3923aa347'
-down_revision: Union[str, Sequence[str], None] = 'ppd06'
+revision: str = "58b3923aa347"
+down_revision: Union[str, Sequence[str], None] = "ppd06"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -24,11 +25,10 @@ def upgrade() -> None:
# This flag is set TRUE when user triggers @MFBTAI AI mention,
# and cleared to FALSE when job completes (success or failure).
op.add_column(
- 'threads',
- sa.Column('is_generating_ai_response', sa.Boolean(), nullable=False, server_default='false')
+ "threads", sa.Column("is_generating_ai_response", sa.Boolean(), nullable=False, server_default="false")
)
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_column('threads', 'is_generating_ai_response')
+ op.drop_column("threads", "is_generating_ai_response")
diff --git a/backend/alembic/versions/58b5bf6d73fc_add_discovery_tables.py b/backend/alembic/versions/58b5bf6d73fc_add_discovery_tables.py
index 0498dec..19ce9dd 100644
--- a/backend/alembic/versions/58b5bf6d73fc_add_discovery_tables.py
+++ b/backend/alembic/versions/58b5bf6d73fc_add_discovery_tables.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 10:57:24.440539
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '58b5bf6d73fc'
-down_revision: Union[str, Sequence[str], None] = '0c5625ec8ba1'
+revision: str = "58b5bf6d73fc"
+down_revision: Union[str, Sequence[str], None] = "0c5625ec8ba1"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,52 +23,64 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create discovery_questions table
op.create_table(
- 'discovery_questions',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('category', sa.String(), nullable=True),
- sa.Column('priority', sa.Enum('LOW', 'MEDIUM', 'HIGH', name='discoverypriority'), nullable=False),
- sa.Column('question_text', sa.Text(), nullable=False),
- sa.Column('is_multiple_choice', sa.Boolean(), nullable=False),
- sa.Column('options', sa.JSON(), nullable=False),
- sa.Column('depends_on', sa.UUID(), nullable=True),
- sa.Column('created_by', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('status', sa.Enum('OPEN', 'RESOLVED', 'NA', name='discoverystatus'), nullable=False),
- sa.Column('resolved_by', sa.UUID(), nullable=True),
- sa.Column('resolved_at', sa.DateTime(timezone=True), nullable=True),
- sa.ForeignKeyConstraint(['created_by'], ['users.id'], ),
- sa.ForeignKeyConstraint(['depends_on'], ['discovery_questions.id'], ),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['resolved_by'], ['users.id'], ),
- sa.PrimaryKeyConstraint('id')
+ "discovery_questions",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("category", sa.String(), nullable=True),
+ sa.Column("priority", sa.Enum("LOW", "MEDIUM", "HIGH", name="discoverypriority"), nullable=False),
+ sa.Column("question_text", sa.Text(), nullable=False),
+ sa.Column("is_multiple_choice", sa.Boolean(), nullable=False),
+ sa.Column("options", sa.JSON(), nullable=False),
+ sa.Column("depends_on", sa.UUID(), nullable=True),
+ sa.Column("created_by", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("status", sa.Enum("OPEN", "RESOLVED", "NA", name="discoverystatus"), nullable=False),
+ sa.Column("resolved_by", sa.UUID(), nullable=True),
+ sa.Column("resolved_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["created_by"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["depends_on"],
+ ["discovery_questions.id"],
+ ),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["resolved_by"],
+ ["users.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
)
- op.create_index(op.f('ix_discovery_questions_project_id'), 'discovery_questions', ['project_id'], unique=False)
- op.create_index(op.f('ix_discovery_questions_status'), 'discovery_questions', ['status'], unique=False)
+ op.create_index(op.f("ix_discovery_questions_project_id"), "discovery_questions", ["project_id"], unique=False)
+ op.create_index(op.f("ix_discovery_questions_status"), "discovery_questions", ["status"], unique=False)
# Create discovery_answers table
op.create_table(
- 'discovery_answers',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('question_id', sa.UUID(), nullable=False),
- sa.Column('selected_option_id', sa.String(), nullable=True),
- sa.Column('free_text', sa.Text(), nullable=True),
- sa.Column('answered_by', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
- sa.ForeignKeyConstraint(['answered_by'], ['users.id'], ),
- sa.ForeignKeyConstraint(['question_id'], ['discovery_questions.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id')
+ "discovery_answers",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("question_id", sa.UUID(), nullable=False),
+ sa.Column("selected_option_id", sa.String(), nullable=True),
+ sa.Column("free_text", sa.Text(), nullable=True),
+ sa.Column("answered_by", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
+ sa.ForeignKeyConstraint(
+ ["answered_by"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(["question_id"], ["discovery_questions.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
)
- op.create_index(op.f('ix_discovery_answers_question_id'), 'discovery_answers', ['question_id'], unique=False)
+ op.create_index(op.f("ix_discovery_answers_question_id"), "discovery_answers", ["question_id"], unique=False)
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_index(op.f('ix_discovery_answers_question_id'), table_name='discovery_answers')
- op.drop_table('discovery_answers')
- op.drop_index(op.f('ix_discovery_questions_status'), table_name='discovery_questions')
- op.drop_index(op.f('ix_discovery_questions_project_id'), table_name='discovery_questions')
- op.drop_table('discovery_questions')
- op.execute('DROP TYPE discoverystatus')
- op.execute('DROP TYPE discoverypriority')
+ op.drop_index(op.f("ix_discovery_answers_question_id"), table_name="discovery_answers")
+ op.drop_table("discovery_answers")
+ op.drop_index(op.f("ix_discovery_questions_status"), table_name="discovery_questions")
+ op.drop_index(op.f("ix_discovery_questions_project_id"), table_name="discovery_questions")
+ op.drop_table("discovery_questions")
+ op.execute("DROP TYPE discoverystatus")
+ op.execute("DROP TYPE discoverypriority")
diff --git a/backend/alembic/versions/616549379b06_add_generation_flags_to_impl_and_thread.py b/backend/alembic/versions/616549379b06_add_generation_flags_to_impl_and_thread.py
index d811e8a..67b01c4 100644
--- a/backend/alembic/versions/616549379b06_add_generation_flags_to_impl_and_thread.py
+++ b/backend/alembic/versions/616549379b06_add_generation_flags_to_impl_and_thread.py
@@ -5,15 +5,16 @@
Create Date: 2026-01-03 15:28:40.447898
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '616549379b06'
-down_revision: Union[str, Sequence[str], None] = 'a049ffa5b22b'
+revision: str = "616549379b06"
+down_revision: Union[str, Sequence[str], None] = "a049ffa5b22b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,23 +23,20 @@ def upgrade() -> None:
"""Add generation status flags to implementations and threads tables."""
# Add is_generating_spec and is_generating_prompt_plan to implementations
op.add_column(
- 'implementations',
- sa.Column('is_generating_spec', sa.Boolean(), nullable=False, server_default='false')
+ "implementations", sa.Column("is_generating_spec", sa.Boolean(), nullable=False, server_default="false")
)
op.add_column(
- 'implementations',
- sa.Column('is_generating_prompt_plan', sa.Boolean(), nullable=False, server_default='false')
+ "implementations", sa.Column("is_generating_prompt_plan", sa.Boolean(), nullable=False, server_default="false")
)
# Add is_generating_decision_summary to threads
op.add_column(
- 'threads',
- sa.Column('is_generating_decision_summary', sa.Boolean(), nullable=False, server_default='false')
+ "threads", sa.Column("is_generating_decision_summary", sa.Boolean(), nullable=False, server_default="false")
)
def downgrade() -> None:
"""Remove generation status flags from implementations and threads tables."""
- op.drop_column('threads', 'is_generating_decision_summary')
- op.drop_column('implementations', 'is_generating_prompt_plan')
- op.drop_column('implementations', 'is_generating_spec')
+ op.drop_column("threads", "is_generating_decision_summary")
+ op.drop_column("implementations", "is_generating_prompt_plan")
+ op.drop_column("implementations", "is_generating_spec")
diff --git a/backend/alembic/versions/63256c2c0d52_add_threads_and_comments_tables.py b/backend/alembic/versions/63256c2c0d52_add_threads_and_comments_tables.py
index 65912f5..0b403ae 100644
--- a/backend/alembic/versions/63256c2c0d52_add_threads_and_comments_tables.py
+++ b/backend/alembic/versions/63256c2c0d52_add_threads_and_comments_tables.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 12:03:13.033739
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '63256c2c0d52'
-down_revision: Union[str, Sequence[str], None] = '178e7cb83afe'
+revision: str = "63256c2c0d52"
+down_revision: Union[str, Sequence[str], None] = "178e7cb83afe"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,47 +23,42 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create threads table
op.create_table(
- 'threads',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('context_type', sa.String(), nullable=False),
- sa.Column('context_id', sa.String(), nullable=True),
- sa.Column('title', sa.String(), nullable=True),
- sa.Column('created_by', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False),
- sa.ForeignKeyConstraint(['created_by'], ['users.id']),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_index('ix_threads_project_id', 'threads', ['project_id'])
- op.create_index(
- 'ix_threads_context',
- 'threads',
- ['project_id', 'context_type', 'context_id'],
- unique=False
+ "threads",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("context_type", sa.String(), nullable=False),
+ sa.Column("context_id", sa.String(), nullable=True),
+ sa.Column("title", sa.String(), nullable=True),
+ sa.Column("created_by", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
+ sa.ForeignKeyConstraint(["created_by"], ["users.id"]),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
)
+ op.create_index("ix_threads_project_id", "threads", ["project_id"])
+ op.create_index("ix_threads_context", "threads", ["project_id", "context_type", "context_id"], unique=False)
# Create comments table
op.create_table(
- 'comments',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('thread_id', sa.UUID(), nullable=False),
- sa.Column('author_id', sa.UUID(), nullable=False),
- sa.Column('body_markdown', sa.Text(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
- sa.ForeignKeyConstraint(['author_id'], ['users.id']),
- sa.ForeignKeyConstraint(['thread_id'], ['threads.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id')
+ "comments",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("thread_id", sa.UUID(), nullable=False),
+ sa.Column("author_id", sa.UUID(), nullable=False),
+ sa.Column("body_markdown", sa.Text(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(["author_id"], ["users.id"]),
+ sa.ForeignKeyConstraint(["thread_id"], ["threads.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
)
- op.create_index('ix_comments_thread_id', 'comments', ['thread_id'])
+ op.create_index("ix_comments_thread_id", "comments", ["thread_id"])
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_index('ix_comments_thread_id', table_name='comments')
- op.drop_table('comments')
- op.drop_index('ix_threads_context', table_name='threads')
- op.drop_index('ix_threads_project_id', table_name='threads')
- op.drop_table('threads')
+ op.drop_index("ix_comments_thread_id", table_name="comments")
+ op.drop_table("comments")
+ op.drop_index("ix_threads_context", table_name="threads")
+ op.drop_index("ix_threads_project_id", table_name="threads")
+ op.drop_table("threads")
diff --git a/backend/alembic/versions/6baa75dcb961_fix_module_feature_types_properly.py b/backend/alembic/versions/6baa75dcb961_fix_module_feature_types_properly.py
index 0124b24..7e5dbf2 100644
--- a/backend/alembic/versions/6baa75dcb961_fix_module_feature_types_properly.py
+++ b/backend/alembic/versions/6baa75dcb961_fix_module_feature_types_properly.py
@@ -5,15 +5,14 @@
Create Date: 2025-12-05 07:30:43.900157
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = '6baa75dcb961'
-down_revision: Union[str, Sequence[str], None] = '489421eb9675'
+revision: str = "6baa75dcb961"
+down_revision: Union[str, Sequence[str], None] = "489421eb9675"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/6ebda2d1112d_add_llm_preference_table.py b/backend/alembic/versions/6ebda2d1112d_add_llm_preference_table.py
index c8ffc4b..34445a6 100644
--- a/backend/alembic/versions/6ebda2d1112d_add_llm_preference_table.py
+++ b/backend/alembic/versions/6ebda2d1112d_add_llm_preference_table.py
@@ -5,16 +5,17 @@
Create Date: 2025-11-20 19:06:43.742518
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '6ebda2d1112d'
-down_revision: Union[str, Sequence[str], None] = '83c377d42a07'
+revision: str = "6ebda2d1112d"
+down_revision: Union[str, Sequence[str], None] = "83c377d42a07"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,20 +23,20 @@
def upgrade() -> None:
"""Upgrade schema."""
op.create_table(
- 'llm_preferences',
- sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
- sa.Column('organization_id', postgresql.UUID(as_uuid=True), nullable=False, index=True),
- sa.Column('main_llm_config_id', postgresql.UUID(as_uuid=True), nullable=True),
- sa.Column('lightweight_llm_config_id', postgresql.UUID(as_uuid=True), nullable=True),
- sa.Column('created_at', postgresql.TIMESTAMP(timezone=True), nullable=False),
- sa.Column('updated_at', postgresql.TIMESTAMP(timezone=True), nullable=False),
- sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['main_llm_config_id'], ['integration_configs.id'], ondelete='SET NULL'),
- sa.ForeignKeyConstraint(['lightweight_llm_config_id'], ['integration_configs.id'], ondelete='SET NULL'),
- sa.UniqueConstraint('organization_id', name='uq_org_llm_preference'),
+ "llm_preferences",
+ sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
+ sa.Column("organization_id", postgresql.UUID(as_uuid=True), nullable=False, index=True),
+ sa.Column("main_llm_config_id", postgresql.UUID(as_uuid=True), nullable=True),
+ sa.Column("lightweight_llm_config_id", postgresql.UUID(as_uuid=True), nullable=True),
+ sa.Column("created_at", postgresql.TIMESTAMP(timezone=True), nullable=False),
+ sa.Column("updated_at", postgresql.TIMESTAMP(timezone=True), nullable=False),
+ sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["main_llm_config_id"], ["integration_configs.id"], ondelete="SET NULL"),
+ sa.ForeignKeyConstraint(["lightweight_llm_config_id"], ["integration_configs.id"], ondelete="SET NULL"),
+ sa.UniqueConstraint("organization_id", name="uq_org_llm_preference"),
)
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_table('llm_preferences')
+ op.drop_table("llm_preferences")
diff --git a/backend/alembic/versions/77d468f92bb4_add_module_and_feature_type_columns.py b/backend/alembic/versions/77d468f92bb4_add_module_and_feature_type_columns.py
index 622d1da..7f44d80 100644
--- a/backend/alembic/versions/77d468f92bb4_add_module_and_feature_type_columns.py
+++ b/backend/alembic/versions/77d468f92bb4_add_module_and_feature_type_columns.py
@@ -5,15 +5,16 @@
Create Date: 2025-12-04 18:33:23.985570
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '77d468f92bb4'
-down_revision: Union[str, Sequence[str], None] = 'e4f5g6h7i8j9'
+revision: str = "77d468f92bb4"
+down_revision: Union[str, Sequence[str], None] = "e4f5g6h7i8j9"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,21 +22,15 @@
def upgrade() -> None:
"""Add module_type and feature_type columns with data migration."""
# Create the enum types
- moduletype_enum = sa.Enum('conversation', 'implementation', name='moduletype')
- featuretype_enum = sa.Enum('conversation', 'implementation', name='featuretype')
+ moduletype_enum = sa.Enum("conversation", "implementation", name="moduletype")
+ featuretype_enum = sa.Enum("conversation", "implementation", name="featuretype")
moduletype_enum.create(op.get_bind(), checkfirst=True)
featuretype_enum.create(op.get_bind(), checkfirst=True)
# Add columns as nullable first
- op.add_column(
- 'modules',
- sa.Column('module_type', moduletype_enum, nullable=True)
- )
- op.add_column(
- 'features',
- sa.Column('feature_type', featuretype_enum, nullable=True)
- )
+ op.add_column("modules", sa.Column("module_type", moduletype_enum, nullable=True))
+ op.add_column("features", sa.Column("feature_type", featuretype_enum, nullable=True))
# Data migration: Set feature_type based on Thread association
# Features with BRAINSTORM_FEATURE threads are conversation questions
@@ -69,24 +64,14 @@ def upgrade() -> None:
""")
# Make columns NOT NULL with defaults
- op.alter_column(
- 'modules',
- 'module_type',
- nullable=False,
- server_default='implementation'
- )
- op.alter_column(
- 'features',
- 'feature_type',
- nullable=False,
- server_default='implementation'
- )
+ op.alter_column("modules", "module_type", nullable=False, server_default="implementation")
+ op.alter_column("features", "feature_type", nullable=False, server_default="implementation")
def downgrade() -> None:
"""Remove module_type and feature_type columns."""
- op.drop_column('features', 'feature_type')
- op.drop_column('modules', 'module_type')
+ op.drop_column("features", "feature_type")
+ op.drop_column("modules", "module_type")
# Drop the enum types
op.execute("DROP TYPE IF EXISTS featuretype")
diff --git a/backend/alembic/versions/8058002151ba_add_thread_items_and_followup_timestamps.py b/backend/alembic/versions/8058002151ba_add_thread_items_and_followup_timestamps.py
index 02ec8bd..5ad70e6 100644
--- a/backend/alembic/versions/8058002151ba_add_thread_items_and_followup_timestamps.py
+++ b/backend/alembic/versions/8058002151ba_add_thread_items_and_followup_timestamps.py
@@ -5,16 +5,17 @@
Create Date: 2025-11-22 17:33:59.041505
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '8058002151ba'
-down_revision: Union[str, Sequence[str], None] = '17e2d7ed64e2'
+revision: str = "8058002151ba"
+down_revision: Union[str, Sequence[str], None] = "17e2d7ed64e2"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,25 +23,29 @@
def upgrade() -> None:
"""Upgrade schema."""
# Add timestamp columns to threads table
- op.add_column('threads', sa.Column('last_followup_check_at', sa.DateTime(timezone=True), nullable=True))
- op.add_column('threads', sa.Column('last_user_comment_at', sa.DateTime(timezone=True), nullable=True))
+ op.add_column("threads", sa.Column("last_followup_check_at", sa.DateTime(timezone=True), nullable=True))
+ op.add_column("threads", sa.Column("last_user_comment_at", sa.DateTime(timezone=True), nullable=True))
# Create thread_items table
op.create_table(
- 'thread_items',
- sa.Column('id', postgresql.UUID(), nullable=False),
- sa.Column('thread_id', postgresql.UUID(), nullable=False),
- sa.Column('item_type', sa.Enum('comment', 'mcq_followup', 'no_followup_message', name='threaditemtype'), nullable=False),
- sa.Column('author_id', postgresql.UUID(), nullable=False),
- sa.Column('content_data', sa.JSON(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
- sa.ForeignKeyConstraint(['author_id'], ['users.id']),
- sa.ForeignKeyConstraint(['thread_id'], ['threads.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id')
+ "thread_items",
+ sa.Column("id", postgresql.UUID(), nullable=False),
+ sa.Column("thread_id", postgresql.UUID(), nullable=False),
+ sa.Column(
+ "item_type",
+ sa.Enum("comment", "mcq_followup", "no_followup_message", name="threaditemtype"),
+ nullable=False,
+ ),
+ sa.Column("author_id", postgresql.UUID(), nullable=False),
+ sa.Column("content_data", sa.JSON(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(["author_id"], ["users.id"]),
+ sa.ForeignKeyConstraint(["thread_id"], ["threads.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
)
- op.create_index('ix_thread_items_thread_id', 'thread_items', ['thread_id'])
- op.create_index('ix_thread_items_item_type', 'thread_items', ['item_type'])
+ op.create_index("ix_thread_items_thread_id", "thread_items", ["thread_id"])
+ op.create_index("ix_thread_items_item_type", "thread_items", ["item_type"])
# Migrate existing comments to thread_items
op.execute("""
@@ -71,13 +76,13 @@ def upgrade() -> None:
def downgrade() -> None:
"""Downgrade schema."""
# Drop thread_items table and indexes
- op.drop_index('ix_thread_items_item_type', table_name='thread_items')
- op.drop_index('ix_thread_items_thread_id', table_name='thread_items')
- op.drop_table('thread_items')
+ op.drop_index("ix_thread_items_item_type", table_name="thread_items")
+ op.drop_index("ix_thread_items_thread_id", table_name="thread_items")
+ op.drop_table("thread_items")
# Drop enum type
op.execute("DROP TYPE threaditemtype")
# Drop timestamp columns from threads
- op.drop_column('threads', 'last_user_comment_at')
- op.drop_column('threads', 'last_followup_check_at')
+ op.drop_column("threads", "last_user_comment_at")
+ op.drop_column("threads", "last_followup_check_at")
diff --git a/backend/alembic/versions/83c377d42a07_add_display_name_to_integration_config.py b/backend/alembic/versions/83c377d42a07_add_display_name_to_integration_config.py
index d9f266a..afed7e8 100644
--- a/backend/alembic/versions/83c377d42a07_add_display_name_to_integration_config.py
+++ b/backend/alembic/versions/83c377d42a07_add_display_name_to_integration_config.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 19:05:19.168793
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '83c377d42a07'
-down_revision: Union[str, Sequence[str], None] = 'a04c6c0f1117'
+revision: str = "83c377d42a07"
+down_revision: Union[str, Sequence[str], None] = "a04c6c0f1117"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,30 +22,30 @@
def upgrade() -> None:
"""Upgrade schema."""
# Add display_name column (nullable initially to allow backfill)
- op.add_column('integration_configs', sa.Column('display_name', sa.String(length=100), nullable=True))
+ op.add_column("integration_configs", sa.Column("display_name", sa.String(length=100), nullable=True))
# Backfill display_name with provider name for existing rows
op.execute("UPDATE integration_configs SET display_name = provider WHERE display_name IS NULL")
# Make display_name non-nullable
- op.alter_column('integration_configs', 'display_name', nullable=False)
+ op.alter_column("integration_configs", "display_name", nullable=False)
# Drop old unique constraint
- op.drop_constraint('uq_org_provider', 'integration_configs', type_='unique')
+ op.drop_constraint("uq_org_provider", "integration_configs", type_="unique")
# Add new unique constraint with display_name
- op.create_unique_constraint('uq_org_provider_name', 'integration_configs',
- ['organization_id', 'provider', 'display_name'])
+ op.create_unique_constraint(
+ "uq_org_provider_name", "integration_configs", ["organization_id", "provider", "display_name"]
+ )
def downgrade() -> None:
"""Downgrade schema."""
# Drop new unique constraint
- op.drop_constraint('uq_org_provider_name', 'integration_configs', type_='unique')
+ op.drop_constraint("uq_org_provider_name", "integration_configs", type_="unique")
# Recreate old unique constraint
- op.create_unique_constraint('uq_org_provider', 'integration_configs',
- ['organization_id', 'provider'])
+ op.create_unique_constraint("uq_org_provider", "integration_configs", ["organization_id", "provider"])
# Drop display_name column
- op.drop_column('integration_configs', 'display_name')
+ op.drop_column("integration_configs", "display_name")
diff --git a/backend/alembic/versions/840545b82f16_add_deleted_at_and_key_constraints_to_.py b/backend/alembic/versions/840545b82f16_add_deleted_at_and_key_constraints_to_.py
index 519590e..6005a1f 100644
--- a/backend/alembic/versions/840545b82f16_add_deleted_at_and_key_constraints_to_.py
+++ b/backend/alembic/versions/840545b82f16_add_deleted_at_and_key_constraints_to_.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-22 07:09:28.843970
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '840545b82f16'
-down_revision: Union[str, Sequence[str], None] = '1cfb3bffcc2a'
+revision: str = "840545b82f16"
+down_revision: Union[str, Sequence[str], None] = "1cfb3bffcc2a"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,7 +22,7 @@
def upgrade() -> None:
"""Upgrade schema."""
# Add deleted_at column
- op.add_column('projects', sa.Column('deleted_at', sa.TIMESTAMP(timezone=True), nullable=True))
+ op.add_column("projects", sa.Column("deleted_at", sa.TIMESTAMP(timezone=True), nullable=True))
# Backfill NULL keys with auto-generated values
# Format: PROJ-{first 8 chars of UUID}
@@ -32,25 +33,25 @@ def upgrade() -> None:
""")
# Make key non-nullable now that all rows have values
- op.alter_column('projects', 'key', nullable=False)
+ op.alter_column("projects", "key", nullable=False)
# Add unique constraint on (org_id, key) to ensure uniqueness within org
- op.create_unique_constraint('uq_projects_org_key', 'projects', ['org_id', 'key'])
+ op.create_unique_constraint("uq_projects_org_key", "projects", ["org_id", "key"])
# Add index on deleted_at for efficient filtering
- op.create_index('ix_projects_deleted_at', 'projects', ['deleted_at'])
+ op.create_index("ix_projects_deleted_at", "projects", ["deleted_at"])
def downgrade() -> None:
"""Downgrade schema."""
# Drop index on deleted_at
- op.drop_index('ix_projects_deleted_at', 'projects')
+ op.drop_index("ix_projects_deleted_at", "projects")
# Drop unique constraint
- op.drop_constraint('uq_projects_org_key', 'projects', type_='unique')
+ op.drop_constraint("uq_projects_org_key", "projects", type_="unique")
# Make key nullable again
- op.alter_column('projects', 'key', nullable=True)
+ op.alter_column("projects", "key", nullable=True)
# Drop deleted_at column
- op.drop_column('projects', 'deleted_at')
+ op.drop_column("projects", "deleted_at")
diff --git a/backend/alembic/versions/8bba664c9d7b_set_freemium_max_users_default.py b/backend/alembic/versions/8bba664c9d7b_set_freemium_max_users_default.py
index 9c6f051..b480c77 100644
--- a/backend/alembic/versions/8bba664c9d7b_set_freemium_max_users_default.py
+++ b/backend/alembic/versions/8bba664c9d7b_set_freemium_max_users_default.py
@@ -5,15 +5,14 @@
Create Date: 2025-12-19 18:33:47.267904
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = '8bba664c9d7b'
-down_revision: Union[str, Sequence[str], None] = 'ie4f5g6h7i8j'
+revision: str = "8bba664c9d7b"
+down_revision: Union[str, Sequence[str], None] = "ie4f5g6h7i8j"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/8e970e133ccd_add_spec_versions_table.py b/backend/alembic/versions/8e970e133ccd_add_spec_versions_table.py
index 8733cfe..4396675 100644
--- a/backend/alembic/versions/8e970e133ccd_add_spec_versions_table.py
+++ b/backend/alembic/versions/8e970e133ccd_add_spec_versions_table.py
@@ -5,15 +5,14 @@
Create Date: 2025-11-20 11:27:52.760625
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = '8e970e133ccd'
-down_revision: Union[str, Sequence[str], None] = '58b5bf6d73fc'
+revision: str = "8e970e133ccd"
+down_revision: Union[str, Sequence[str], None] = "58b5bf6d73fc"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -43,18 +42,18 @@ def upgrade() -> None:
""")
# Create indexes
- op.create_index('ix_spec_versions_project_id', 'spec_versions', ['project_id'])
- op.create_index('ix_spec_versions_active', 'spec_versions', ['project_id', 'spec_type', 'is_active'])
+ op.create_index("ix_spec_versions_project_id", "spec_versions", ["project_id"])
+ op.create_index("ix_spec_versions_active", "spec_versions", ["project_id", "spec_type", "is_active"])
def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
- op.drop_index('ix_spec_versions_active', table_name='spec_versions')
- op.drop_index('ix_spec_versions_project_id', table_name='spec_versions')
+ op.drop_index("ix_spec_versions_active", table_name="spec_versions")
+ op.drop_index("ix_spec_versions_project_id", table_name="spec_versions")
# Drop table
- op.drop_table('spec_versions')
+ op.drop_table("spec_versions")
# Drop ENUM
op.execute("DROP TYPE spec_type")
diff --git a/backend/alembic/versions/8eebe3a100d3_add_mcp_oauth_tables.py b/backend/alembic/versions/8eebe3a100d3_add_mcp_oauth_tables.py
index bd4726b..4e486a4 100644
--- a/backend/alembic/versions/8eebe3a100d3_add_mcp_oauth_tables.py
+++ b/backend/alembic/versions/8eebe3a100d3_add_mcp_oauth_tables.py
@@ -5,15 +5,16 @@
Create Date: 2026-01-09 07:46:45.073505
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '8eebe3a100d3'
-down_revision: Union[str, Sequence[str], None] = '58b3923aa347'
+revision: str = "8eebe3a100d3"
+down_revision: Union[str, Sequence[str], None] = "58b3923aa347"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/9b373c88f9ac_add_email_verification_fields.py b/backend/alembic/versions/9b373c88f9ac_add_email_verification_fields.py
index c5c70d9..88f7f72 100644
--- a/backend/alembic/versions/9b373c88f9ac_add_email_verification_fields.py
+++ b/backend/alembic/versions/9b373c88f9ac_add_email_verification_fields.py
@@ -5,15 +5,16 @@
Create Date: 2025-12-16 16:30:15.630889
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = '9b373c88f9ac'
-down_revision: Union[str, Sequence[str], None] = 'b7c8d9e0f1a2'
+revision: str = "9b373c88f9ac"
+down_revision: Union[str, Sequence[str], None] = "b7c8d9e0f1a2"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/a049ffa5b22b_add_suggested_implementation_name_to_.py b/backend/alembic/versions/a049ffa5b22b_add_suggested_implementation_name_to_.py
index 257399a..b78c2a8 100644
--- a/backend/alembic/versions/a049ffa5b22b_add_suggested_implementation_name_to_.py
+++ b/backend/alembic/versions/a049ffa5b22b_add_suggested_implementation_name_to_.py
@@ -5,15 +5,16 @@
Create Date: 2026-01-03 12:32:14.162163
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'a049ffa5b22b'
-down_revision: Union[str, Sequence[str], None] = 'impl02'
+revision: str = "a049ffa5b22b"
+down_revision: Union[str, Sequence[str], None] = "impl02"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/a04c6c0f1117_add_implementation_notes_tables.py b/backend/alembic/versions/a04c6c0f1117_add_implementation_notes_tables.py
index c415ca8..0c5fa56 100644
--- a/backend/alembic/versions/a04c6c0f1117_add_implementation_notes_tables.py
+++ b/backend/alembic/versions/a04c6c0f1117_add_implementation_notes_tables.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 14:59:14.842098
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'a04c6c0f1117'
-down_revision: Union[str, Sequence[str], None] = '264f9632081a'
+revision: str = "a04c6c0f1117"
+down_revision: Union[str, Sequence[str], None] = "264f9632081a"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,66 +23,66 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create project_implementation_notes table
op.create_table(
- 'project_implementation_notes',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('title', sa.Text(), nullable=False),
- sa.Column('content_markdown', sa.Text(), nullable=False),
- sa.Column('created_by', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('promoted_from_phase_id', sa.UUID(), nullable=True),
- sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()),
- sa.PrimaryKeyConstraint('id'),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['created_by'], ['users.id']),
- sa.ForeignKeyConstraint(['promoted_from_phase_id'], ['implementation_phases.id'], ondelete='SET NULL'),
+ "project_implementation_notes",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("title", sa.Text(), nullable=False),
+ sa.Column("content_markdown", sa.Text(), nullable=False),
+ sa.Column("created_by", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("promoted_from_phase_id", sa.UUID(), nullable=True),
+ sa.Column("is_active", sa.Boolean(), nullable=False, server_default=sa.true()),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["created_by"], ["users.id"]),
+ sa.ForeignKeyConstraint(["promoted_from_phase_id"], ["implementation_phases.id"], ondelete="SET NULL"),
)
- op.create_index('ix_project_implementation_notes_project_id', 'project_implementation_notes', ['project_id'])
+ op.create_index("ix_project_implementation_notes_project_id", "project_implementation_notes", ["project_id"])
# Create phase_notes table
op.create_table(
- 'phase_notes',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('implementation_phase_id', sa.UUID(), nullable=False),
- sa.Column('content_markdown', sa.Text(), nullable=False),
- sa.Column('created_by', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.PrimaryKeyConstraint('id'),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['implementation_phase_id'], ['implementation_phases.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['created_by'], ['users.id']),
+ "phase_notes",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("implementation_phase_id", sa.UUID(), nullable=False),
+ sa.Column("content_markdown", sa.Text(), nullable=False),
+ sa.Column("created_by", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["implementation_phase_id"], ["implementation_phases.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["created_by"], ["users.id"]),
)
- op.create_index('ix_phase_notes_project_id', 'phase_notes', ['project_id'])
- op.create_index('ix_phase_notes_implementation_phase_id', 'phase_notes', ['implementation_phase_id'])
+ op.create_index("ix_phase_notes_project_id", "phase_notes", ["project_id"])
+ op.create_index("ix_phase_notes_implementation_phase_id", "phase_notes", ["implementation_phase_id"])
# Create thread_notes table
op.create_table(
- 'thread_notes',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('thread_id', sa.UUID(), nullable=False),
- sa.Column('content_markdown', sa.Text(), nullable=False),
- sa.Column('created_by', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.PrimaryKeyConstraint('id'),
- sa.ForeignKeyConstraint(['thread_id'], ['threads.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['created_by'], ['users.id']),
+ "thread_notes",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("thread_id", sa.UUID(), nullable=False),
+ sa.Column("content_markdown", sa.Text(), nullable=False),
+ sa.Column("created_by", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(["thread_id"], ["threads.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["created_by"], ["users.id"]),
)
- op.create_index('ix_thread_notes_thread_id', 'thread_notes', ['thread_id'])
+ op.create_index("ix_thread_notes_thread_id", "thread_notes", ["thread_id"])
def downgrade() -> None:
"""Downgrade schema."""
# Drop thread_notes table
- op.drop_index('ix_thread_notes_thread_id', 'thread_notes')
- op.drop_table('thread_notes')
+ op.drop_index("ix_thread_notes_thread_id", "thread_notes")
+ op.drop_table("thread_notes")
# Drop phase_notes table
- op.drop_index('ix_phase_notes_implementation_phase_id', 'phase_notes')
- op.drop_index('ix_phase_notes_project_id', 'phase_notes')
- op.drop_table('phase_notes')
+ op.drop_index("ix_phase_notes_implementation_phase_id", "phase_notes")
+ op.drop_index("ix_phase_notes_project_id", "phase_notes")
+ op.drop_table("phase_notes")
# Drop project_implementation_notes table
- op.drop_index('ix_project_implementation_notes_project_id', 'project_implementation_notes')
- op.drop_table('project_implementation_notes')
+ op.drop_index("ix_project_implementation_notes_project_id", "project_implementation_notes")
+ op.drop_table("project_implementation_notes")
diff --git a/backend/alembic/versions/a1b2c3d4e5f6_add_brainstorming_phases_modules_features.py b/backend/alembic/versions/a1b2c3d4e5f6_add_brainstorming_phases_modules_features.py
index 10ce70e..891d152 100644
--- a/backend/alembic/versions/a1b2c3d4e5f6_add_brainstorming_phases_modules_features.py
+++ b/backend/alembic/versions/a1b2c3d4e5f6_add_brainstorming_phases_modules_features.py
@@ -5,15 +5,14 @@
Create Date: 2025-12-03 10:00:00.000000
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = 'a1b2c3d4e5f6'
-down_revision: Union[str, Sequence[str], None] = 'd5f8b2c9e1a3'
+revision: str = "a1b2c3d4e5f6"
+down_revision: Union[str, Sequence[str], None] = "d5f8b2c9e1a3"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -42,7 +41,7 @@ def upgrade() -> None:
FOREIGN KEY (created_by) REFERENCES users (id)
)
""")
- op.create_index('ix_brainstorming_phases_project_id', 'brainstorming_phases', ['project_id'])
+ op.create_index("ix_brainstorming_phases_project_id", "brainstorming_phases", ["project_id"])
# Create modules table
op.execute("""
@@ -63,8 +62,8 @@ def upgrade() -> None:
FOREIGN KEY (created_by) REFERENCES users (id)
)
""")
- op.create_index('ix_modules_project_id', 'modules', ['project_id'])
- op.create_index('ix_modules_brainstorming_phase_id', 'modules', ['brainstorming_phase_id'])
+ op.create_index("ix_modules_project_id", "modules", ["project_id"])
+ op.create_index("ix_modules_brainstorming_phase_id", "modules", ["brainstorming_phase_id"])
# Create features table
op.execute("""
@@ -86,23 +85,23 @@ def upgrade() -> None:
FOREIGN KEY (created_by) REFERENCES users (id)
)
""")
- op.create_index('ix_features_module_id', 'features', ['module_id'])
- op.create_index('ix_features_feature_key', 'features', ['feature_key'])
+ op.create_index("ix_features_module_id", "features", ["module_id"])
+ op.create_index("ix_features_feature_key", "features", ["feature_key"])
def downgrade() -> None:
"""Drop brainstorming_phases, modules, and features tables."""
# Drop indexes
- op.drop_index('ix_features_feature_key', table_name='features')
- op.drop_index('ix_features_module_id', table_name='features')
- op.drop_index('ix_modules_brainstorming_phase_id', table_name='modules')
- op.drop_index('ix_modules_project_id', table_name='modules')
- op.drop_index('ix_brainstorming_phases_project_id', table_name='brainstorming_phases')
+ op.drop_index("ix_features_feature_key", table_name="features")
+ op.drop_index("ix_features_module_id", table_name="features")
+ op.drop_index("ix_modules_brainstorming_phase_id", table_name="modules")
+ op.drop_index("ix_modules_project_id", table_name="modules")
+ op.drop_index("ix_brainstorming_phases_project_id", table_name="brainstorming_phases")
# Drop tables (in correct order due to foreign key dependencies)
- op.drop_table('features')
- op.drop_table('modules')
- op.drop_table('brainstorming_phases')
+ op.drop_table("features")
+ op.drop_table("modules")
+ op.drop_table("brainstorming_phases")
# Drop ENUMs
op.execute("DROP TYPE feature_status")
diff --git a/backend/alembic/versions/a1b2c3d4e5f7_add_module_key.py b/backend/alembic/versions/a1b2c3d4e5f7_add_module_key.py
index a3d90e6..c9a32c7 100644
--- a/backend/alembic/versions/a1b2c3d4e5f7_add_module_key.py
+++ b/backend/alembic/versions/a1b2c3d4e5f7_add_module_key.py
@@ -8,10 +8,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "a1b2c3d4e5f7"
@@ -39,7 +39,8 @@ def upgrade() -> None:
if dialect == "postgresql":
# PostgreSQL version with LPAD
- conn.execute(text("""
+ conn.execute(
+ text("""
WITH numbered_modules AS (
SELECT
m.id,
@@ -57,10 +58,12 @@ def upgrade() -> None:
module_key = 'M' || nm.project_key || '-' || LPAD(CAST(nm.row_num AS TEXT), 3, '0')
FROM numbered_modules nm
WHERE modules.id = nm.id
- """))
+ """)
+ )
else:
# SQLite version with printf
- conn.execute(text("""
+ conn.execute(
+ text("""
WITH numbered_modules AS (
SELECT
m.id,
@@ -76,7 +79,8 @@ def upgrade() -> None:
SET
module_key_number = (SELECT nm.row_num FROM numbered_modules nm WHERE nm.id = modules.id),
module_key = 'M' || (SELECT nm.project_key FROM numbered_modules nm WHERE nm.id = modules.id) || '-' || printf('%03d', (SELECT nm.row_num FROM numbered_modules nm WHERE nm.id = modules.id))
- """))
+ """)
+ )
# Make columns non-nullable
op.alter_column("modules", "module_key", nullable=False)
diff --git a/backend/alembic/versions/a60117da6409_add_projects_table.py b/backend/alembic/versions/a60117da6409_add_projects_table.py
index c224c9c..1045c4d 100644
--- a/backend/alembic/versions/a60117da6409_add_projects_table.py
+++ b/backend/alembic/versions/a60117da6409_add_projects_table.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 10:10:50.652293
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'a60117da6409'
-down_revision: Union[str, Sequence[str], None] = '4631a4a3270c'
+revision: str = "a60117da6409"
+down_revision: Union[str, Sequence[str], None] = "4631a4a3270c"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,41 +23,41 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create projects table
op.create_table(
- 'projects',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('org_id', sa.UUID(), nullable=False),
- sa.Column('parent_project_id', sa.UUID(), nullable=True),
- sa.Column('type', sa.String(20), nullable=False),
- sa.Column('key', sa.String(100), nullable=True),
- sa.Column('name', sa.String(255), nullable=False),
- sa.Column('short_description', sa.Text(), nullable=True),
- sa.Column('idea_text', sa.Text(), nullable=True),
- sa.Column('status', sa.String(30), nullable=False, server_default='draft'),
- sa.Column('external_ticket_id', sa.String(100), nullable=True),
- sa.Column('external_system', sa.String(50), nullable=True),
- sa.Column('created_by', sa.UUID(), nullable=False),
- sa.Column('created_at', sa.TIMESTAMP(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('updated_at', sa.TIMESTAMP(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.PrimaryKeyConstraint('id'),
- sa.ForeignKeyConstraint(['org_id'], ['organizations.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['parent_project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['created_by'], ['users.id']),
+ "projects",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("org_id", sa.UUID(), nullable=False),
+ sa.Column("parent_project_id", sa.UUID(), nullable=True),
+ sa.Column("type", sa.String(20), nullable=False),
+ sa.Column("key", sa.String(100), nullable=True),
+ sa.Column("name", sa.String(255), nullable=False),
+ sa.Column("short_description", sa.Text(), nullable=True),
+ sa.Column("idea_text", sa.Text(), nullable=True),
+ sa.Column("status", sa.String(30), nullable=False, server_default="draft"),
+ sa.Column("external_ticket_id", sa.String(100), nullable=True),
+ sa.Column("external_system", sa.String(50), nullable=True),
+ sa.Column("created_by", sa.UUID(), nullable=False),
+ sa.Column("created_at", sa.TIMESTAMP(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("updated_at", sa.TIMESTAMP(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(["org_id"], ["organizations.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["parent_project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["created_by"], ["users.id"]),
)
# Create indexes
- op.create_index('ix_projects_org_id', 'projects', ['org_id'])
- op.create_index('ix_projects_type', 'projects', ['type'])
- op.create_index('ix_projects_status', 'projects', ['status'])
- op.create_index('ix_projects_parent_project_id', 'projects', ['parent_project_id'])
+ op.create_index("ix_projects_org_id", "projects", ["org_id"])
+ op.create_index("ix_projects_type", "projects", ["type"])
+ op.create_index("ix_projects_status", "projects", ["status"])
+ op.create_index("ix_projects_parent_project_id", "projects", ["parent_project_id"])
def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
- op.drop_index('ix_projects_parent_project_id', 'projects')
- op.drop_index('ix_projects_status', 'projects')
- op.drop_index('ix_projects_type', 'projects')
- op.drop_index('ix_projects_org_id', 'projects')
+ op.drop_index("ix_projects_parent_project_id", "projects")
+ op.drop_index("ix_projects_status", "projects")
+ op.drop_index("ix_projects_type", "projects")
+ op.drop_index("ix_projects_org_id", "projects")
# Drop table
- op.drop_table('projects')
+ op.drop_table("projects")
diff --git a/backend/alembic/versions/a6b7c8d9e0f1_user_level_api_keys.py b/backend/alembic/versions/a6b7c8d9e0f1_user_level_api_keys.py
index 4b46277..1fd23f2 100644
--- a/backend/alembic/versions/a6b7c8d9e0f1_user_level_api_keys.py
+++ b/backend/alembic/versions/a6b7c8d9e0f1_user_level_api_keys.py
@@ -7,9 +7,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "a6b7c8d9e0f1"
diff --git a/backend/alembic/versions/a8845f795cf1_add_jobs_table.py b/backend/alembic/versions/a8845f795cf1_add_jobs_table.py
index 002d3a5..fb4d5f3 100644
--- a/backend/alembic/versions/a8845f795cf1_add_jobs_table.py
+++ b/backend/alembic/versions/a8845f795cf1_add_jobs_table.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-20 08:11:53.926713
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'a8845f795cf1'
-down_revision: Union[str, Sequence[str], None] = 'f9a38e4b733d'
+revision: str = "a8845f795cf1"
+down_revision: Union[str, Sequence[str], None] = "f9a38e4b733d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,40 +23,40 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create jobs table
op.create_table(
- 'jobs',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('org_id', sa.UUID(), nullable=True),
- sa.Column('project_id', sa.UUID(), nullable=True),
- sa.Column('job_type', sa.String(length=50), nullable=False),
- sa.Column('status', sa.String(length=20), nullable=False),
- sa.Column('payload', sa.JSON, nullable=False),
- sa.Column('result', sa.JSON, nullable=True),
- sa.Column('error_message', sa.Text(), nullable=True),
- sa.Column('attempts', sa.Integer(), nullable=False, server_default='0'),
- sa.Column('max_attempts', sa.Integer(), nullable=False, server_default='3'),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.Column('started_at', sa.DateTime(timezone=True), nullable=True),
- sa.Column('finished_at', sa.DateTime(timezone=True), nullable=True),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_jobs'))
+ "jobs",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("org_id", sa.UUID(), nullable=True),
+ sa.Column("project_id", sa.UUID(), nullable=True),
+ sa.Column("job_type", sa.String(length=50), nullable=False),
+ sa.Column("status", sa.String(length=20), nullable=False),
+ sa.Column("payload", sa.JSON, nullable=False),
+ sa.Column("result", sa.JSON, nullable=True),
+ sa.Column("error_message", sa.Text(), nullable=True),
+ sa.Column("attempts", sa.Integer(), nullable=False, server_default="0"),
+ sa.Column("max_attempts", sa.Integer(), nullable=False, server_default="3"),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.Column("started_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("finished_at", sa.DateTime(timezone=True), nullable=True),
+ sa.PrimaryKeyConstraint("id", name=op.f("pk_jobs")),
)
# Create indexes
- op.create_index(op.f('ix_jobs_org_id'), 'jobs', ['org_id'], unique=False)
- op.create_index(op.f('ix_jobs_project_id'), 'jobs', ['project_id'], unique=False)
- op.create_index(op.f('ix_jobs_job_type'), 'jobs', ['job_type'], unique=False)
- op.create_index(op.f('ix_jobs_status'), 'jobs', ['status'], unique=False)
- op.create_index(op.f('ix_jobs_created_at'), 'jobs', ['created_at'], unique=False)
+ op.create_index(op.f("ix_jobs_org_id"), "jobs", ["org_id"], unique=False)
+ op.create_index(op.f("ix_jobs_project_id"), "jobs", ["project_id"], unique=False)
+ op.create_index(op.f("ix_jobs_job_type"), "jobs", ["job_type"], unique=False)
+ op.create_index(op.f("ix_jobs_status"), "jobs", ["status"], unique=False)
+ op.create_index(op.f("ix_jobs_created_at"), "jobs", ["created_at"], unique=False)
def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
- op.drop_index(op.f('ix_jobs_created_at'), table_name='jobs')
- op.drop_index(op.f('ix_jobs_status'), table_name='jobs')
- op.drop_index(op.f('ix_jobs_job_type'), table_name='jobs')
- op.drop_index(op.f('ix_jobs_project_id'), table_name='jobs')
- op.drop_index(op.f('ix_jobs_org_id'), table_name='jobs')
+ op.drop_index(op.f("ix_jobs_created_at"), table_name="jobs")
+ op.drop_index(op.f("ix_jobs_status"), table_name="jobs")
+ op.drop_index(op.f("ix_jobs_job_type"), table_name="jobs")
+ op.drop_index(op.f("ix_jobs_project_id"), table_name="jobs")
+ op.drop_index(op.f("ix_jobs_org_id"), table_name="jobs")
# Drop table
- op.drop_table('jobs')
+ op.drop_table("jobs")
diff --git a/backend/alembic/versions/b2c3d4e5f6g7_add_draft_final_version_tables.py b/backend/alembic/versions/b2c3d4e5f6g7_add_draft_final_version_tables.py
index 3535452..1805e22 100644
--- a/backend/alembic/versions/b2c3d4e5f6g7_add_draft_final_version_tables.py
+++ b/backend/alembic/versions/b2c3d4e5f6g7_add_draft_final_version_tables.py
@@ -5,15 +5,16 @@
Create Date: 2025-12-03 11:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'b2c3d4e5f6g7'
-down_revision: Union[str, Sequence[str], None] = 'a1b2c3d4e5f6'
+revision: str = "b2c3d4e5f6g7"
+down_revision: Union[str, Sequence[str], None] = "a1b2c3d4e5f6"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,24 +22,24 @@
def upgrade() -> None:
"""Add brainstorming_phase_id and blocks to spec_versions, create final tables."""
# Add columns to spec_versions
- op.add_column('spec_versions', sa.Column('brainstorming_phase_id', sa.UUID(), nullable=True))
- op.add_column('spec_versions', sa.Column('blocks', sa.JSON(), nullable=True))
+ op.add_column("spec_versions", sa.Column("brainstorming_phase_id", sa.UUID(), nullable=True))
+ op.add_column("spec_versions", sa.Column("blocks", sa.JSON(), nullable=True))
# Create foreign key constraint
op.create_foreign_key(
- 'fk_spec_versions_brainstorming_phase_id',
- 'spec_versions',
- 'brainstorming_phases',
- ['brainstorming_phase_id'],
- ['id'],
- ondelete='CASCADE'
+ "fk_spec_versions_brainstorming_phase_id",
+ "spec_versions",
+ "brainstorming_phases",
+ ["brainstorming_phase_id"],
+ ["id"],
+ ondelete="CASCADE",
)
# Create index on brainstorming_phase_id
- op.create_index('ix_spec_versions_brainstorming_phase_id', 'spec_versions', ['brainstorming_phase_id'])
+ op.create_index("ix_spec_versions_brainstorming_phase_id", "spec_versions", ["brainstorming_phase_id"])
# Make project_id nullable (for brainstorming phase-based specs)
- op.alter_column('spec_versions', 'project_id', nullable=True)
+ op.alter_column("spec_versions", "project_id", nullable=True)
# Create final_specs table
op.execute("""
@@ -56,7 +57,7 @@ def upgrade() -> None:
FOREIGN KEY (created_by) REFERENCES users (id)
)
""")
- op.create_index('ix_final_specs_brainstorming_phase_id', 'final_specs', ['brainstorming_phase_id'])
+ op.create_index("ix_final_specs_brainstorming_phase_id", "final_specs", ["brainstorming_phase_id"])
# Create final_prompt_plans table
op.execute("""
@@ -74,23 +75,23 @@ def upgrade() -> None:
FOREIGN KEY (created_by) REFERENCES users (id)
)
""")
- op.create_index('ix_final_prompt_plans_brainstorming_phase_id', 'final_prompt_plans', ['brainstorming_phase_id'])
+ op.create_index("ix_final_prompt_plans_brainstorming_phase_id", "final_prompt_plans", ["brainstorming_phase_id"])
def downgrade() -> None:
"""Remove final tables and spec_versions modifications."""
# Drop final tables
- op.drop_index('ix_final_prompt_plans_brainstorming_phase_id', table_name='final_prompt_plans')
- op.drop_table('final_prompt_plans')
+ op.drop_index("ix_final_prompt_plans_brainstorming_phase_id", table_name="final_prompt_plans")
+ op.drop_table("final_prompt_plans")
- op.drop_index('ix_final_specs_brainstorming_phase_id', table_name='final_specs')
- op.drop_table('final_specs')
+ op.drop_index("ix_final_specs_brainstorming_phase_id", table_name="final_specs")
+ op.drop_table("final_specs")
# Make project_id not nullable again
- op.alter_column('spec_versions', 'project_id', nullable=False)
+ op.alter_column("spec_versions", "project_id", nullable=False)
# Drop new columns from spec_versions
- op.drop_index('ix_spec_versions_brainstorming_phase_id', table_name='spec_versions')
- op.drop_constraint('fk_spec_versions_brainstorming_phase_id', 'spec_versions', type_='foreignkey')
- op.drop_column('spec_versions', 'blocks')
- op.drop_column('spec_versions', 'brainstorming_phase_id')
+ op.drop_index("ix_spec_versions_brainstorming_phase_id", table_name="spec_versions")
+ op.drop_constraint("fk_spec_versions_brainstorming_phase_id", "spec_versions", type_="foreignkey")
+ op.drop_column("spec_versions", "blocks")
+ op.drop_column("spec_versions", "brainstorming_phase_id")
diff --git a/backend/alembic/versions/b7c8d9e0f1a2_add_summary_to_grounding_files.py b/backend/alembic/versions/b7c8d9e0f1a2_add_summary_to_grounding_files.py
index 07a5a9c..186a560 100644
--- a/backend/alembic/versions/b7c8d9e0f1a2_add_summary_to_grounding_files.py
+++ b/backend/alembic/versions/b7c8d9e0f1a2_add_summary_to_grounding_files.py
@@ -8,11 +8,12 @@
grounding files (primarily agents.md). The summary describes what's
in the file, not what changed.
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "b7c8d9e0f1a2"
diff --git a/backend/alembic/versions/b7c8d9e0f1g2_add_coding_agent_name_to_mcp_logs.py b/backend/alembic/versions/b7c8d9e0f1g2_add_coding_agent_name_to_mcp_logs.py
index 52ae154..2356029 100644
--- a/backend/alembic/versions/b7c8d9e0f1g2_add_coding_agent_name_to_mcp_logs.py
+++ b/backend/alembic/versions/b7c8d9e0f1g2_add_coding_agent_name_to_mcp_logs.py
@@ -7,11 +7,12 @@
Adds coding_agent_name column to track which coding agent (e.g., Claude Code,
Cursor, Cline) made each MCP tool call. This enables analytics on agent usage.
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "b7c8d9e0f1g2"
diff --git a/backend/alembic/versions/b894089ee371_add_api_key_encrypted_column.py b/backend/alembic/versions/b894089ee371_add_api_key_encrypted_column.py
index c543c71..795f8fa 100644
--- a/backend/alembic/versions/b894089ee371_add_api_key_encrypted_column.py
+++ b/backend/alembic/versions/b894089ee371_add_api_key_encrypted_column.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "b894089ee371"
diff --git a/backend/alembic/versions/ba27eeb7dc05_merge_cexp08_and_icshare01.py b/backend/alembic/versions/ba27eeb7dc05_merge_cexp08_and_icshare01.py
index 6b33dce..9d01a25 100644
--- a/backend/alembic/versions/ba27eeb7dc05_merge_cexp08_and_icshare01.py
+++ b/backend/alembic/versions/ba27eeb7dc05_merge_cexp08_and_icshare01.py
@@ -5,15 +5,12 @@
Create Date: 2026-01-15 08:18:33.406116
"""
-from typing import Sequence, Union
-
-from alembic import op
-import sqlalchemy as sa
+from typing import Sequence, Union
# revision identifiers, used by Alembic.
-revision: str = 'ba27eeb7dc05'
-down_revision: Union[str, Sequence[str], None] = ('cexp08', 'icshare01')
+revision: str = "ba27eeb7dc05"
+down_revision: Union[str, Sequence[str], None] = ("cexp08", "icshare01")
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/bc6ddbf8b5b7_add_api_keys_table_for_mcp_http_.py b/backend/alembic/versions/bc6ddbf8b5b7_add_api_keys_table_for_mcp_http_.py
index 0e64e3a..21706c8 100644
--- a/backend/alembic/versions/bc6ddbf8b5b7_add_api_keys_table_for_mcp_http_.py
+++ b/backend/alembic/versions/bc6ddbf8b5b7_add_api_keys_table_for_mcp_http_.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-24 07:58:36.774569
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'bc6ddbf8b5b7'
-down_revision: Union[str, Sequence[str], None] = '8058002151ba'
+revision: str = "bc6ddbf8b5b7"
+down_revision: Union[str, Sequence[str], None] = "8058002151ba"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,33 +23,33 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create api_keys table
op.create_table(
- 'api_keys',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('user_id', sa.UUID(), nullable=False),
- sa.Column('project_id', sa.UUID(), nullable=False),
- sa.Column('name', sa.String(length=200), nullable=False),
- sa.Column('key_hash', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.TIMESTAMP(timezone=True), nullable=False),
- sa.Column('last_used_at', sa.TIMESTAMP(timezone=True), nullable=True),
- sa.Column('revoked', sa.Boolean(), nullable=False, server_default='false'),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id'),
- sa.UniqueConstraint('key_hash')
+ "api_keys",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=False),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column("name", sa.String(length=200), nullable=False),
+ sa.Column("key_hash", sa.String(length=255), nullable=False),
+ sa.Column("created_at", sa.TIMESTAMP(timezone=True), nullable=False),
+ sa.Column("last_used_at", sa.TIMESTAMP(timezone=True), nullable=True),
+ sa.Column("revoked", sa.Boolean(), nullable=False, server_default="false"),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("key_hash"),
)
# Create indexes
- op.create_index('idx_api_keys_key_hash', 'api_keys', ['key_hash'])
- op.create_index('idx_api_keys_project_id', 'api_keys', ['project_id'])
- op.create_index('idx_api_keys_user_id', 'api_keys', ['user_id'])
+ op.create_index("idx_api_keys_key_hash", "api_keys", ["key_hash"])
+ op.create_index("idx_api_keys_project_id", "api_keys", ["project_id"])
+ op.create_index("idx_api_keys_user_id", "api_keys", ["user_id"])
def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
- op.drop_index('idx_api_keys_user_id', table_name='api_keys')
- op.drop_index('idx_api_keys_project_id', table_name='api_keys')
- op.drop_index('idx_api_keys_key_hash', table_name='api_keys')
+ op.drop_index("idx_api_keys_user_id", table_name="api_keys")
+ op.drop_index("idx_api_keys_project_id", table_name="api_keys")
+ op.drop_index("idx_api_keys_key_hash", table_name="api_keys")
# Drop table
- op.drop_table('api_keys')
+ op.drop_table("api_keys")
diff --git a/backend/alembic/versions/c3d4e5f6g7h8_add_thread_version_anchoring.py b/backend/alembic/versions/c3d4e5f6g7h8_add_thread_version_anchoring.py
index 0668a80..7b3cf8a 100644
--- a/backend/alembic/versions/c3d4e5f6g7h8_add_thread_version_anchoring.py
+++ b/backend/alembic/versions/c3d4e5f6g7h8_add_thread_version_anchoring.py
@@ -5,15 +5,16 @@
Create Date: 2025-12-03 12:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'c3d4e5f6g7h8'
-down_revision: Union[str, Sequence[str], None] = 'b2c3d4e5f6g7'
+revision: str = "c3d4e5f6g7h8"
+down_revision: Union[str, Sequence[str], None] = "b2c3d4e5f6g7"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,14 +22,14 @@
def upgrade() -> None:
"""Add version_id and block_id columns to threads, update context_type enum."""
# Add new columns to threads
- op.add_column('threads', sa.Column('version_id', sa.String(), nullable=True))
- op.add_column('threads', sa.Column('block_id', sa.String(), nullable=True))
+ op.add_column("threads", sa.Column("version_id", sa.String(), nullable=True))
+ op.add_column("threads", sa.Column("block_id", sa.String(), nullable=True))
# Create index on version_id
- op.create_index('ix_threads_version_id', 'threads', ['version_id'])
+ op.create_index("ix_threads_version_id", "threads", ["version_id"])
# Create composite index on (version_id, block_id) for efficient lookups
- op.create_index('ix_threads_version_block', 'threads', ['version_id', 'block_id'])
+ op.create_index("ix_threads_version_block", "threads", ["version_id", "block_id"])
# Note: context_type is stored as VARCHAR, not a PostgreSQL enum type,
# so no ALTER TYPE is needed. New values ('spec_draft', 'prompt_plan_draft')
@@ -38,12 +39,12 @@ def upgrade() -> None:
def downgrade() -> None:
"""Remove version_id and block_id columns from threads."""
# Drop indexes
- op.drop_index('ix_threads_version_block', table_name='threads')
- op.drop_index('ix_threads_version_id', table_name='threads')
+ op.drop_index("ix_threads_version_block", table_name="threads")
+ op.drop_index("ix_threads_version_id", table_name="threads")
# Drop columns
- op.drop_column('threads', 'block_id')
- op.drop_column('threads', 'version_id')
+ op.drop_column("threads", "block_id")
+ op.drop_column("threads", "version_id")
# Note: PostgreSQL doesn't support removing enum values easily.
# The new enum values will remain but won't be used after downgrade.
diff --git a/backend/alembic/versions/c4a5e7f8d123_add_archived_at_to_brainstorming_phases.py b/backend/alembic/versions/c4a5e7f8d123_add_archived_at_to_brainstorming_phases.py
index 68504b5..45402c8 100644
--- a/backend/alembic/versions/c4a5e7f8d123_add_archived_at_to_brainstorming_phases.py
+++ b/backend/alembic/versions/c4a5e7f8d123_add_archived_at_to_brainstorming_phases.py
@@ -7,9 +7,10 @@
"""
from typing import Sequence, Union
-from alembic import op
+
import sqlalchemy as sa
+from alembic import op
revision: str = "c4a5e7f8d123"
down_revision: Union[str, None] = "b894089ee371"
diff --git a/backend/alembic/versions/c8d9e0f1g2h3_add_feature_updated_at.py b/backend/alembic/versions/c8d9e0f1g2h3_add_feature_updated_at.py
index 9543f59..b3cb267 100644
--- a/backend/alembic/versions/c8d9e0f1g2h3_add_feature_updated_at.py
+++ b/backend/alembic/versions/c8d9e0f1g2h3_add_feature_updated_at.py
@@ -8,11 +8,12 @@
are modified. This enables sorting features by "recently changed".
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "c8d9e0f1g2h3"
@@ -24,22 +25,16 @@
def upgrade() -> None:
# Add updated_at column with server default of now()
op.add_column(
- 'features',
- sa.Column(
- 'updated_at',
- sa.DateTime(timezone=True),
- nullable=False,
- server_default=sa.func.now()
- )
+ "features", sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now())
)
# Backfill existing rows with created_at value
op.execute("UPDATE features SET updated_at = created_at")
# Add index for efficient sorting by updated_at
- op.create_index('ix_features_updated_at', 'features', ['updated_at'])
+ op.create_index("ix_features_updated_at", "features", ["updated_at"])
def downgrade() -> None:
- op.drop_index('ix_features_updated_at', table_name='features')
- op.drop_column('features', 'updated_at')
+ op.drop_index("ix_features_updated_at", table_name="features")
+ op.drop_column("features", "updated_at")
diff --git a/backend/alembic/versions/cec01_add_code_exploration_cache.py b/backend/alembic/versions/cec01_add_code_exploration_cache.py
index 248df9f..a838b55 100644
--- a/backend/alembic/versions/cec01_add_code_exploration_cache.py
+++ b/backend/alembic/versions/cec01_add_code_exploration_cache.py
@@ -14,8 +14,8 @@
from typing import Sequence, Union
import sqlalchemy as sa
-from alembic import op
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "cec01"
diff --git a/backend/alembic/versions/cexp01_add_code_explorer.py b/backend/alembic/versions/cexp01_add_code_explorer.py
index 7bddc2e..d3fbe98 100644
--- a/backend/alembic/versions/cexp01_add_code_explorer.py
+++ b/backend/alembic/versions/cexp01_add_code_explorer.py
@@ -12,10 +12,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "cexp01"
diff --git a/backend/alembic/versions/cexp06_add_raw_output_to_code_exploration.py b/backend/alembic/versions/cexp06_add_raw_output_to_code_exploration.py
index 2e7bc23..e05088c 100644
--- a/backend/alembic/versions/cexp06_add_raw_output_to_code_exploration.py
+++ b/backend/alembic/versions/cexp06_add_raw_output_to_code_exploration.py
@@ -5,11 +5,12 @@
Create Date: 2026-01-11
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "cexp06"
diff --git a/backend/alembic/versions/cexp07_add_thread_code_exploration.py b/backend/alembic/versions/cexp07_add_thread_code_exploration.py
index 714857b..98c3227 100644
--- a/backend/alembic/versions/cexp07_add_thread_code_exploration.py
+++ b/backend/alembic/versions/cexp07_add_thread_code_exploration.py
@@ -8,12 +8,13 @@
Revises: cexp06, ppd07
Create Date: 2025-01-12
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "cexp07"
diff --git a/backend/alembic/versions/cexp08_move_code_explorer_to_project.py b/backend/alembic/versions/cexp08_move_code_explorer_to_project.py
index 9fd586b..d316789 100644
--- a/backend/alembic/versions/cexp08_move_code_explorer_to_project.py
+++ b/backend/alembic/versions/cexp08_move_code_explorer_to_project.py
@@ -8,12 +8,13 @@
Revises: cexp07
Create Date: 2025-01-14
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "cexp08"
diff --git a/backend/alembic/versions/d1cf77c4c1fa_add_llm_usage_logs_table.py b/backend/alembic/versions/d1cf77c4c1fa_add_llm_usage_logs_table.py
index 893edc2..f077ef8 100644
--- a/backend/alembic/versions/d1cf77c4c1fa_add_llm_usage_logs_table.py
+++ b/backend/alembic/versions/d1cf77c4c1fa_add_llm_usage_logs_table.py
@@ -5,16 +5,17 @@
Create Date: 2025-12-16 17:04:42.006885
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'd1cf77c4c1fa'
-down_revision: Union[str, Sequence[str], None] = '9b373c88f9ac'
+revision: str = "d1cf77c4c1fa"
+down_revision: Union[str, Sequence[str], None] = "9b373c88f9ac"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/d4e5f6g7h8i9_add_activity_logs.py b/backend/alembic/versions/d4e5f6g7h8i9_add_activity_logs.py
index dacde96..d034597 100644
--- a/backend/alembic/versions/d4e5f6g7h8i9_add_activity_logs.py
+++ b/backend/alembic/versions/d4e5f6g7h8i9_add_activity_logs.py
@@ -5,15 +5,14 @@
Create Date: 2025-12-03 13:00:00.000000
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
-revision: str = 'd4e5f6g7h8i9'
-down_revision: Union[str, Sequence[str], None] = 'c3d4e5f6g7h8'
+revision: str = "d4e5f6g7h8i9"
+down_revision: Union[str, Sequence[str], None] = "c3d4e5f6g7h8"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -34,14 +33,14 @@ def upgrade() -> None:
""")
# Create indexes for efficient querying
- op.create_index('ix_activity_logs_entity', 'activity_logs', ['entity_type', 'entity_id'])
- op.create_index('ix_activity_logs_event_type', 'activity_logs', ['event_type'])
- op.create_index('ix_activity_logs_created_at', 'activity_logs', ['created_at'])
+ op.create_index("ix_activity_logs_entity", "activity_logs", ["entity_type", "entity_id"])
+ op.create_index("ix_activity_logs_event_type", "activity_logs", ["event_type"])
+ op.create_index("ix_activity_logs_created_at", "activity_logs", ["created_at"])
def downgrade() -> None:
"""Drop activity_logs table."""
- op.drop_index('ix_activity_logs_created_at', table_name='activity_logs')
- op.drop_index('ix_activity_logs_event_type', table_name='activity_logs')
- op.drop_index('ix_activity_logs_entity', table_name='activity_logs')
- op.drop_table('activity_logs')
+ op.drop_index("ix_activity_logs_created_at", table_name="activity_logs")
+ op.drop_index("ix_activity_logs_event_type", table_name="activity_logs")
+ op.drop_index("ix_activity_logs_entity", table_name="activity_logs")
+ op.drop_table("activity_logs")
diff --git a/backend/alembic/versions/d5b6e7f8g123_add_decision_summary_short.py b/backend/alembic/versions/d5b6e7f8g123_add_decision_summary_short.py
index 3770f41..5d634ce 100644
--- a/backend/alembic/versions/d5b6e7f8g123_add_decision_summary_short.py
+++ b/backend/alembic/versions/d5b6e7f8g123_add_decision_summary_short.py
@@ -7,9 +7,10 @@
"""
from typing import Sequence, Union
-from alembic import op
+
import sqlalchemy as sa
+from alembic import op
revision: str = "d5b6e7f8g123"
down_revision: Union[str, None] = "c4a5e7f8d123"
diff --git a/backend/alembic/versions/d5f8b2c9e1a3_add_non_goals_violations_column.py b/backend/alembic/versions/d5f8b2c9e1a3_add_non_goals_violations_column.py
index 5ff0745..c5fc88e 100644
--- a/backend/alembic/versions/d5f8b2c9e1a3_add_non_goals_violations_column.py
+++ b/backend/alembic/versions/d5f8b2c9e1a3_add_non_goals_violations_column.py
@@ -5,15 +5,16 @@
Create Date: 2025-11-25 16:35:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
-from sqlalchemy.dialects import postgresql
+
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'd5f8b2c9e1a3'
-down_revision: Union[str, Sequence[str], None] = '3e35a2b90829'
+revision: str = "d5f8b2c9e1a3"
+down_revision: Union[str, Sequence[str], None] = "3e35a2b90829"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,11 +22,11 @@
def upgrade() -> None:
"""Add non_goals_violations JSON column to prompt_plan_coverage_reports table."""
op.add_column(
- 'prompt_plan_coverage_reports',
- sa.Column('non_goals_violations', sa.JSON(), nullable=False, server_default='[]')
+ "prompt_plan_coverage_reports",
+ sa.Column("non_goals_violations", sa.JSON(), nullable=False, server_default="[]"),
)
def downgrade() -> None:
"""Remove non_goals_violations column from prompt_plan_coverage_reports table."""
- op.drop_column('prompt_plan_coverage_reports', 'non_goals_violations')
+ op.drop_column("prompt_plan_coverage_reports", "non_goals_violations")
diff --git a/backend/alembic/versions/df4d2a8eedf1_add_show_create_implementation_button_.py b/backend/alembic/versions/df4d2a8eedf1_add_show_create_implementation_button_.py
index c8e3804..88da725 100644
--- a/backend/alembic/versions/df4d2a8eedf1_add_show_create_implementation_button_.py
+++ b/backend/alembic/versions/df4d2a8eedf1_add_show_create_implementation_button_.py
@@ -5,15 +5,16 @@
Create Date: 2026-01-03 16:53:37.917190
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'df4d2a8eedf1'
-down_revision: Union[str, Sequence[str], None] = '616549379b06'
+revision: str = "df4d2a8eedf1"
+down_revision: Union[str, Sequence[str], None] = "616549379b06"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,11 +22,10 @@
def upgrade() -> None:
"""Add show_create_implementation_button flag to threads table."""
op.add_column(
- 'threads',
- sa.Column('show_create_implementation_button', sa.Boolean(), nullable=False, server_default='false')
+ "threads", sa.Column("show_create_implementation_button", sa.Boolean(), nullable=False, server_default="false")
)
def downgrade() -> None:
"""Remove show_create_implementation_button flag from threads table."""
- op.drop_column('threads', 'show_create_implementation_button')
+ op.drop_column("threads", "show_create_implementation_button")
diff --git a/backend/alembic/versions/dus01_daily_usage_summary_table.py b/backend/alembic/versions/dus01_daily_usage_summary_table.py
index d93bcca..cc678cf 100644
--- a/backend/alembic/versions/dus01_daily_usage_summary_table.py
+++ b/backend/alembic/versions/dus01_daily_usage_summary_table.py
@@ -12,10 +12,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "dus01"
diff --git a/backend/alembic/versions/dus02_realtime_aggregation_trigger.py b/backend/alembic/versions/dus02_realtime_aggregation_trigger.py
index dac99ad..a039905 100644
--- a/backend/alembic/versions/dus02_realtime_aggregation_trigger.py
+++ b/backend/alembic/versions/dus02_realtime_aggregation_trigger.py
@@ -13,8 +13,6 @@
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
revision: str = "dus02"
diff --git a/backend/alembic/versions/dus03_fix_trigger_on_conflict.py b/backend/alembic/versions/dus03_fix_trigger_on_conflict.py
index 678d553..372a748 100644
--- a/backend/alembic/versions/dus03_fix_trigger_on_conflict.py
+++ b/backend/alembic/versions/dus03_fix_trigger_on_conflict.py
@@ -14,7 +14,6 @@
from alembic import op
-
# revision identifiers, used by Alembic.
revision: str = "dus03"
down_revision: Union[str, None] = "rec01"
diff --git a/backend/alembic/versions/e4f5g6h7i8j9_add_feature_priority_category.py b/backend/alembic/versions/e4f5g6h7i8j9_add_feature_priority_category.py
index ea194e8..c7812b9 100644
--- a/backend/alembic/versions/e4f5g6h7i8j9_add_feature_priority_category.py
+++ b/backend/alembic/versions/e4f5g6h7i8j9_add_feature_priority_category.py
@@ -5,15 +5,16 @@
Create Date: 2025-12-04 07:20:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'e4f5g6h7i8j9'
-down_revision: Union[str, Sequence[str], None] = 'd4e5f6g7h8i9'
+revision: str = "e4f5g6h7i8j9"
+down_revision: Union[str, Sequence[str], None] = "d4e5f6g7h8i9"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,34 +22,20 @@
def upgrade() -> None:
"""Add priority and category columns to features table."""
# Create the enum type for feature priority
- feature_priority_enum = sa.Enum(
- 'must_have', 'important', 'optional',
- name='feature_priority'
- )
+ feature_priority_enum = sa.Enum("must_have", "important", "optional", name="feature_priority")
feature_priority_enum.create(op.get_bind(), checkfirst=True)
# Add priority column with default 'important'
- op.add_column(
- 'features',
- sa.Column(
- 'priority',
- feature_priority_enum,
- nullable=False,
- server_default='important'
- )
- )
+ op.add_column("features", sa.Column("priority", feature_priority_enum, nullable=False, server_default="important"))
# Add category column (nullable string)
- op.add_column(
- 'features',
- sa.Column('category', sa.String(100), nullable=True)
- )
+ op.add_column("features", sa.Column("category", sa.String(100), nullable=True))
def downgrade() -> None:
"""Remove priority and category columns from features table."""
- op.drop_column('features', 'category')
- op.drop_column('features', 'priority')
+ op.drop_column("features", "category")
+ op.drop_column("features", "priority")
# Drop the enum type
- sa.Enum(name='feature_priority').drop(op.get_bind(), checkfirst=True)
+ sa.Enum(name="feature_priority").drop(op.get_bind(), checkfirst=True)
diff --git a/backend/alembic/versions/e6f7g8h9i0j1_add_email_templates_table.py b/backend/alembic/versions/e6f7g8h9i0j1_add_email_templates_table.py
index c67719f..06dc14a 100644
--- a/backend/alembic/versions/e6f7g8h9i0j1_add_email_templates_table.py
+++ b/backend/alembic/versions/e6f7g8h9i0j1_add_email_templates_table.py
@@ -7,10 +7,11 @@
"""
from typing import Sequence, Union
-from alembic import op
+
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
revision: str = "e6f7g8h9i0j1"
down_revision: Union[str, None] = "d5b6e7f8g123"
diff --git a/backend/alembic/versions/ea0f87fc2305_add_is_sample_to_projects.py b/backend/alembic/versions/ea0f87fc2305_add_is_sample_to_projects.py
index 365554e..8c5101d 100644
--- a/backend/alembic/versions/ea0f87fc2305_add_is_sample_to_projects.py
+++ b/backend/alembic/versions/ea0f87fc2305_add_is_sample_to_projects.py
@@ -7,11 +7,12 @@
Adds a boolean flag to identify sample onboarding projects created
for new users during signup.
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ea0f87fc2305"
diff --git a/backend/alembic/versions/ed7322775e46_add_pending_approval_to_threads.py b/backend/alembic/versions/ed7322775e46_add_pending_approval_to_threads.py
index 4edd369..f57684a 100644
--- a/backend/alembic/versions/ed7322775e46_add_pending_approval_to_threads.py
+++ b/backend/alembic/versions/ed7322775e46_add_pending_approval_to_threads.py
@@ -5,24 +5,25 @@
Create Date: 2025-11-20 13:01:24.678760
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'ed7322775e46'
-down_revision: Union[str, Sequence[str], None] = '63256c2c0d52'
+revision: str = "ed7322775e46"
+down_revision: Union[str, Sequence[str], None] = "63256c2c0d52"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
- op.add_column('threads', sa.Column('pending_approval', sa.Boolean(), nullable=False, server_default=sa.false()))
+ op.add_column("threads", sa.Column("pending_approval", sa.Boolean(), nullable=False, server_default=sa.false()))
def downgrade() -> None:
"""Downgrade schema."""
- op.drop_column('threads', 'pending_approval')
+ op.drop_column("threads", "pending_approval")
diff --git a/backend/alembic/versions/f5g6h7i8j9k0_add_llm_call_logs_table.py b/backend/alembic/versions/f5g6h7i8j9k0_add_llm_call_logs_table.py
index ac73ee8..4fa9ac5 100644
--- a/backend/alembic/versions/f5g6h7i8j9k0_add_llm_call_logs_table.py
+++ b/backend/alembic/versions/f5g6h7i8j9k0_add_llm_call_logs_table.py
@@ -5,16 +5,17 @@
Create Date: 2025-12-05
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'f5g6h7i8j9k0'
-down_revision: Union[str, Sequence[str], None] = 'e4f5g6h7i8j9'
+revision: str = "f5g6h7i8j9k0"
+down_revision: Union[str, Sequence[str], None] = "e4f5g6h7i8j9"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,44 +23,46 @@
def upgrade() -> None:
"""Create llm_call_logs table."""
op.create_table(
- 'llm_call_logs',
- sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
- sa.Column('job_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('jobs.id', ondelete='CASCADE'), nullable=False),
+ "llm_call_logs",
+ sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
+ sa.Column(
+ "job_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("jobs.id", ondelete="CASCADE"), nullable=False
+ ),
# Agent identification
- sa.Column('agent_name', sa.String(100), nullable=False),
- sa.Column('agent_display_name', sa.String(255), nullable=True),
+ sa.Column("agent_name", sa.String(100), nullable=False),
+ sa.Column("agent_display_name", sa.String(255), nullable=True),
# Request details
- sa.Column('request_messages', postgresql.JSONB, nullable=False),
- sa.Column('request_model', sa.String(100), nullable=False),
- sa.Column('request_temperature', sa.Numeric(3, 2), nullable=True),
- sa.Column('request_max_tokens', sa.Integer(), nullable=True),
+ sa.Column("request_messages", postgresql.JSONB, nullable=False),
+ sa.Column("request_model", sa.String(100), nullable=False),
+ sa.Column("request_temperature", sa.Numeric(3, 2), nullable=True),
+ sa.Column("request_max_tokens", sa.Integer(), nullable=True),
# Response details
- sa.Column('response_content', sa.Text(), nullable=True),
- sa.Column('response_finish_reason', sa.String(50), nullable=True),
- sa.Column('response_tool_calls', postgresql.JSONB, nullable=True),
+ sa.Column("response_content", sa.Text(), nullable=True),
+ sa.Column("response_finish_reason", sa.String(50), nullable=True),
+ sa.Column("response_tool_calls", postgresql.JSONB, nullable=True),
# Usage metrics
- sa.Column('prompt_tokens', sa.Integer(), nullable=False),
- sa.Column('completion_tokens', sa.Integer(), nullable=False),
- sa.Column('cost_usd', sa.Numeric(10, 6), nullable=True),
+ sa.Column("prompt_tokens", sa.Integer(), nullable=False),
+ sa.Column("completion_tokens", sa.Integer(), nullable=False),
+ sa.Column("cost_usd", sa.Numeric(10, 6), nullable=True),
# Timing
- sa.Column('started_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('finished_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('duration_ms', sa.Integer(), nullable=False),
+ sa.Column("started_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("finished_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("duration_ms", sa.Integer(), nullable=False),
# Metadata
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
)
# Create indexes
- op.create_index('ix_llm_call_logs_job_id', 'llm_call_logs', ['job_id'])
- op.create_index('ix_llm_call_logs_agent_name', 'llm_call_logs', ['agent_name'])
- op.create_index('ix_llm_call_logs_created_at', 'llm_call_logs', ['created_at'])
- op.create_index('ix_llm_call_logs_job_created', 'llm_call_logs', ['job_id', 'created_at'])
+ op.create_index("ix_llm_call_logs_job_id", "llm_call_logs", ["job_id"])
+ op.create_index("ix_llm_call_logs_agent_name", "llm_call_logs", ["agent_name"])
+ op.create_index("ix_llm_call_logs_created_at", "llm_call_logs", ["created_at"])
+ op.create_index("ix_llm_call_logs_job_created", "llm_call_logs", ["job_id", "created_at"])
def downgrade() -> None:
"""Drop llm_call_logs table."""
- op.drop_index('ix_llm_call_logs_job_created')
- op.drop_index('ix_llm_call_logs_created_at')
- op.drop_index('ix_llm_call_logs_agent_name')
- op.drop_index('ix_llm_call_logs_job_id')
- op.drop_table('llm_call_logs')
+ op.drop_index("ix_llm_call_logs_job_created")
+ op.drop_index("ix_llm_call_logs_created_at")
+ op.drop_index("ix_llm_call_logs_agent_name")
+ op.drop_index("ix_llm_call_logs_job_id")
+ op.drop_table("llm_call_logs")
diff --git a/backend/alembic/versions/f7g8h9i0j1k2_seed_email_templates.py b/backend/alembic/versions/f7g8h9i0j1k2_seed_email_templates.py
index 06e591e..d5a3714 100644
--- a/backend/alembic/versions/f7g8h9i0j1k2_seed_email_templates.py
+++ b/backend/alembic/versions/f7g8h9i0j1k2_seed_email_templates.py
@@ -5,11 +5,13 @@
Create Date: 2025-01-21
"""
-from alembic import op
-import sqlalchemy as sa
-from datetime import datetime, timezone
-import uuid
import json
+import uuid
+from datetime import datetime, timezone
+
+import sqlalchemy as sa
+
+from alembic import op
revision = "f7g8h9i0j1k2"
down_revision = "e6f7g8h9i0j1"
@@ -147,10 +149,9 @@ def upgrade() -> None:
id=str(uuid.uuid4()),
body=MENTION_NOTIFICATION_BODY,
mandatory_vars=json.dumps(["mentioned_by_name", "context_title", "view_url"]),
- available_vars=json.dumps([
- "mentioned_by_name", "context_title", "context_description",
- "recent_messages", "view_url"
- ]),
+ available_vars=json.dumps(
+ ["mentioned_by_name", "context_title", "context_description", "recent_messages", "view_url"]
+ ),
now=now,
)
)
diff --git a/backend/alembic/versions/f9a38e4b733d_initial_migration_create_users_table.py b/backend/alembic/versions/f9a38e4b733d_initial_migration_create_users_table.py
index 8f9b215..32252a6 100644
--- a/backend/alembic/versions/f9a38e4b733d_initial_migration_create_users_table.py
+++ b/backend/alembic/versions/f9a38e4b733d_initial_migration_create_users_table.py
@@ -1,18 +1,19 @@
"""Initial migration - create users table
Revision ID: f9a38e4b733d
-Revises:
+Revises:
Create Date: 2025-11-20 07:58:16.443565
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'f9a38e4b733d'
+revision: str = "f9a38e4b733d"
down_revision: Union[str, Sequence[str], None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,18 +23,18 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create users table
op.create_table(
- 'users',
- sa.Column('id', sa.UUID(), nullable=False),
- sa.Column('email', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_users')),
- sa.UniqueConstraint('email', name=op.f('uq_users_email'))
+ "users",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("email", sa.String(length=255), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
+ sa.PrimaryKeyConstraint("id", name=op.f("pk_users")),
+ sa.UniqueConstraint("email", name=op.f("uq_users_email")),
)
- op.create_index(op.f('ix_email'), 'users', ['email'], unique=False)
+ op.create_index(op.f("ix_email"), "users", ["email"], unique=False)
def downgrade() -> None:
"""Downgrade schema."""
# Drop users table
- op.drop_index(op.f('ix_email'), table_name='users')
- op.drop_table('users')
+ op.drop_index(op.f("ix_email"), table_name="users")
+ op.drop_table("users")
diff --git a/backend/alembic/versions/fb1c2d3e4f5g_add_notes_updated_fields.py b/backend/alembic/versions/fb1c2d3e4f5g_add_notes_updated_fields.py
index 5ba7090..ad1b5ae 100644
--- a/backend/alembic/versions/fb1c2d3e4f5g_add_notes_updated_fields.py
+++ b/backend/alembic/versions/fb1c2d3e4f5g_add_notes_updated_fields.py
@@ -5,11 +5,12 @@
Create Date: 2024-12-18 12:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "fb1c2d3e4f5g"
diff --git a/backend/alembic/versions/fd30aac0413b_add_created_by_to_pre_phase_message.py b/backend/alembic/versions/fd30aac0413b_add_created_by_to_pre_phase_message.py
index 4173927..f0c524b 100644
--- a/backend/alembic/versions/fd30aac0413b_add_created_by_to_pre_phase_message.py
+++ b/backend/alembic/versions/fd30aac0413b_add_created_by_to_pre_phase_message.py
@@ -5,36 +5,28 @@
Create Date: 2026-01-14 17:13:02.981564
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'fd30aac0413b'
-down_revision: Union[str, Sequence[str], None] = 'sid01'
+revision: str = "fd30aac0413b"
+down_revision: Union[str, Sequence[str], None] = "sid01"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Add created_by column to pre_phase_messages for user tracking."""
- op.add_column(
- 'pre_phase_messages',
- sa.Column('created_by', postgresql.UUID(as_uuid=True), nullable=True)
- )
- op.create_foreign_key(
- 'fk_pre_phase_messages_created_by',
- 'pre_phase_messages',
- 'users',
- ['created_by'],
- ['id']
- )
+ op.add_column("pre_phase_messages", sa.Column("created_by", postgresql.UUID(as_uuid=True), nullable=True))
+ op.create_foreign_key("fk_pre_phase_messages_created_by", "pre_phase_messages", "users", ["created_by"], ["id"])
def downgrade() -> None:
"""Remove created_by column from pre_phase_messages."""
- op.drop_constraint('fk_pre_phase_messages_created_by', 'pre_phase_messages', type_='foreignkey')
- op.drop_column('pre_phase_messages', 'created_by')
+ op.drop_constraint("fk_pre_phase_messages_created_by", "pre_phase_messages", type_="foreignkey")
+ op.drop_column("pre_phase_messages", "created_by")
diff --git a/backend/alembic/versions/freemium01_increase_max_users_to_5.py b/backend/alembic/versions/freemium01_increase_max_users_to_5.py
index cdfdf3d..1eb674d 100644
--- a/backend/alembic/versions/freemium01_increase_max_users_to_5.py
+++ b/backend/alembic/versions/freemium01_increase_max_users_to_5.py
@@ -5,14 +5,14 @@
Create Date: 2025-02-02
"""
+
from typing import Sequence, Union
from alembic import op
-
# revision identifiers, used by Alembic.
-revision: str = 'freemium01'
-down_revision: Union[str, None] = 'grnotes01'
+revision: str = "freemium01"
+down_revision: Union[str, None] = "grnotes01"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
diff --git a/backend/alembic/versions/g6h7i8j9k0l1_add_mcp_call_logs_table.py b/backend/alembic/versions/g6h7i8j9k0l1_add_mcp_call_logs_table.py
index 44efb6c..b1ad141 100644
--- a/backend/alembic/versions/g6h7i8j9k0l1_add_mcp_call_logs_table.py
+++ b/backend/alembic/versions/g6h7i8j9k0l1_add_mcp_call_logs_table.py
@@ -5,16 +5,17 @@
Create Date: 2025-12-06
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'g6h7i8j9k0l1'
-down_revision: Union[str, Sequence[str], None] = '2f8c2d246f32'
+revision: str = "g6h7i8j9k0l1"
+down_revision: Union[str, Sequence[str], None] = "2f8c2d246f32"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -22,58 +23,70 @@
def upgrade() -> None:
"""Create mcp_call_logs table."""
op.create_table(
- 'mcp_call_logs',
- sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
+ "mcp_call_logs",
+ sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
# Who made the call
- sa.Column('user_id', postgresql.UUID(as_uuid=True),
- sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False),
- sa.Column('api_key_id', postgresql.UUID(as_uuid=True),
- sa.ForeignKey('api_keys.id', ondelete='SET NULL'), nullable=True),
- sa.Column('api_key_name', sa.String(200), nullable=False),
+ sa.Column(
+ "user_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("users.id", ondelete="CASCADE"), nullable=False
+ ),
+ sa.Column(
+ "api_key_id",
+ postgresql.UUID(as_uuid=True),
+ sa.ForeignKey("api_keys.id", ondelete="SET NULL"),
+ nullable=True,
+ ),
+ sa.Column("api_key_name", sa.String(200), nullable=False),
# Where (context)
- sa.Column('org_id', postgresql.UUID(as_uuid=True),
- sa.ForeignKey('organizations.id', ondelete='CASCADE'), nullable=False),
- sa.Column('project_id', postgresql.UUID(as_uuid=True),
- sa.ForeignKey('projects.id', ondelete='CASCADE'), nullable=False),
+ sa.Column(
+ "org_id",
+ postgresql.UUID(as_uuid=True),
+ sa.ForeignKey("organizations.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column(
+ "project_id",
+ postgresql.UUID(as_uuid=True),
+ sa.ForeignKey("projects.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
# What was called
- sa.Column('tool_name', sa.String(100), nullable=False),
- sa.Column('jsonrpc_method', sa.String(100), nullable=False),
+ sa.Column("tool_name", sa.String(100), nullable=False),
+ sa.Column("jsonrpc_method", sa.String(100), nullable=False),
# Request/Response
- sa.Column('request_params', postgresql.JSONB, nullable=True),
- sa.Column('response_result', postgresql.JSONB, nullable=True),
- sa.Column('response_error', postgresql.JSONB, nullable=True),
- sa.Column('is_error', sa.Boolean(), nullable=False, server_default='false'),
+ sa.Column("request_params", postgresql.JSONB, nullable=True),
+ sa.Column("response_result", postgresql.JSONB, nullable=True),
+ sa.Column("response_error", postgresql.JSONB, nullable=True),
+ sa.Column("is_error", sa.Boolean(), nullable=False, server_default="false"),
# Timing
- sa.Column('started_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('finished_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('duration_ms', sa.Integer(), nullable=False),
+ sa.Column("started_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("finished_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("duration_ms", sa.Integer(), nullable=False),
# Metadata
- sa.Column('created_at', sa.DateTime(timezone=True),
- server_default=sa.func.now(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
)
# Create indexes
- op.create_index('ix_mcp_call_logs_user_id', 'mcp_call_logs', ['user_id'])
- op.create_index('ix_mcp_call_logs_api_key_id', 'mcp_call_logs', ['api_key_id'])
- op.create_index('ix_mcp_call_logs_org_id', 'mcp_call_logs', ['org_id'])
- op.create_index('ix_mcp_call_logs_project_id', 'mcp_call_logs', ['project_id'])
- op.create_index('ix_mcp_call_logs_tool_name', 'mcp_call_logs', ['tool_name'])
- op.create_index('ix_mcp_call_logs_created_at', 'mcp_call_logs', ['created_at'])
+ op.create_index("ix_mcp_call_logs_user_id", "mcp_call_logs", ["user_id"])
+ op.create_index("ix_mcp_call_logs_api_key_id", "mcp_call_logs", ["api_key_id"])
+ op.create_index("ix_mcp_call_logs_org_id", "mcp_call_logs", ["org_id"])
+ op.create_index("ix_mcp_call_logs_project_id", "mcp_call_logs", ["project_id"])
+ op.create_index("ix_mcp_call_logs_tool_name", "mcp_call_logs", ["tool_name"])
+ op.create_index("ix_mcp_call_logs_created_at", "mcp_call_logs", ["created_at"])
# Composite indexes
- op.create_index('ix_mcp_call_logs_org_created', 'mcp_call_logs', ['org_id', 'created_at'])
- op.create_index('ix_mcp_call_logs_project_created', 'mcp_call_logs', ['project_id', 'created_at'])
- op.create_index('ix_mcp_call_logs_user_created', 'mcp_call_logs', ['user_id', 'created_at'])
+ op.create_index("ix_mcp_call_logs_org_created", "mcp_call_logs", ["org_id", "created_at"])
+ op.create_index("ix_mcp_call_logs_project_created", "mcp_call_logs", ["project_id", "created_at"])
+ op.create_index("ix_mcp_call_logs_user_created", "mcp_call_logs", ["user_id", "created_at"])
def downgrade() -> None:
"""Drop mcp_call_logs table."""
- op.drop_index('ix_mcp_call_logs_user_created')
- op.drop_index('ix_mcp_call_logs_project_created')
- op.drop_index('ix_mcp_call_logs_org_created')
- op.drop_index('ix_mcp_call_logs_created_at')
- op.drop_index('ix_mcp_call_logs_tool_name')
- op.drop_index('ix_mcp_call_logs_project_id')
- op.drop_index('ix_mcp_call_logs_org_id')
- op.drop_index('ix_mcp_call_logs_api_key_id')
- op.drop_index('ix_mcp_call_logs_user_id')
- op.drop_table('mcp_call_logs')
+ op.drop_index("ix_mcp_call_logs_user_created")
+ op.drop_index("ix_mcp_call_logs_project_created")
+ op.drop_index("ix_mcp_call_logs_org_created")
+ op.drop_index("ix_mcp_call_logs_created_at")
+ op.drop_index("ix_mcp_call_logs_tool_name")
+ op.drop_index("ix_mcp_call_logs_project_id")
+ op.drop_index("ix_mcp_call_logs_org_id")
+ op.drop_index("ix_mcp_call_logs_api_key_id")
+ op.drop_index("ix_mcp_call_logs_user_id")
+ op.drop_table("mcp_call_logs")
diff --git a/backend/alembic/versions/g8h9i0j1k2l3_add_triggered_by_and_duration.py b/backend/alembic/versions/g8h9i0j1k2l3_add_triggered_by_and_duration.py
index 870f10a..5c0fdd4 100644
--- a/backend/alembic/versions/g8h9i0j1k2l3_add_triggered_by_and_duration.py
+++ b/backend/alembic/versions/g8h9i0j1k2l3_add_triggered_by_and_duration.py
@@ -5,12 +5,14 @@
Create Date: 2025-12-21 14:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
+
# revision identifiers, used by Alembic.
revision: str = "g8h9i0j1k2l3"
down_revision: Union[str, None] = "f7g8h9i0j1k2"
@@ -24,9 +26,7 @@ def upgrade() -> None:
"jobs",
sa.Column("triggered_by_user_id", postgresql.UUID(as_uuid=True), nullable=True),
)
- op.create_index(
- "ix_jobs_triggered_by_user_id", "jobs", ["triggered_by_user_id"], unique=False
- )
+ op.create_index("ix_jobs_triggered_by_user_id", "jobs", ["triggered_by_user_id"], unique=False)
# Add triggered_by_user_id and duration_ms to llm_usage_logs table
op.add_column(
@@ -55,9 +55,7 @@ def upgrade() -> None:
def downgrade() -> None:
# Remove from llm_usage_logs
- op.drop_constraint(
- "fk_llm_usage_logs_triggered_by_user_id", "llm_usage_logs", type_="foreignkey"
- )
+ op.drop_constraint("fk_llm_usage_logs_triggered_by_user_id", "llm_usage_logs", type_="foreignkey")
op.drop_index("ix_llm_usage_logs_triggered_by_user_id", table_name="llm_usage_logs")
op.drop_column("llm_usage_logs", "duration_ms")
op.drop_column("llm_usage_logs", "triggered_by_user_id")
diff --git a/backend/alembic/versions/gc2d3e4f5g6h_add_thread_ai_error_fields.py b/backend/alembic/versions/gc2d3e4f5g6h_add_thread_ai_error_fields.py
index a412316..0fd5af3 100644
--- a/backend/alembic/versions/gc2d3e4f5g6h_add_thread_ai_error_fields.py
+++ b/backend/alembic/versions/gc2d3e4f5g6h_add_thread_ai_error_fields.py
@@ -5,25 +5,26 @@
Create Date: 2025-01-01 00:00:00.000000
"""
-from alembic import op
+
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision = 'gc2d3e4f5g6h'
-down_revision = 'fb1c2d3e4f5g'
+revision = "gc2d3e4f5g6h"
+down_revision = "fb1c2d3e4f5g"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Add AI error state columns for persisting error state across page refreshes
- op.add_column('threads', sa.Column('ai_error_message', sa.Text(), nullable=True))
- op.add_column('threads', sa.Column('ai_error_job_id', sa.String(), nullable=True))
- op.add_column('threads', sa.Column('ai_error_user_message', sa.Text(), nullable=True))
+ op.add_column("threads", sa.Column("ai_error_message", sa.Text(), nullable=True))
+ op.add_column("threads", sa.Column("ai_error_job_id", sa.String(), nullable=True))
+ op.add_column("threads", sa.Column("ai_error_user_message", sa.Text(), nullable=True))
def downgrade() -> None:
- op.drop_column('threads', 'ai_error_user_message')
- op.drop_column('threads', 'ai_error_job_id')
- op.drop_column('threads', 'ai_error_message')
+ op.drop_column("threads", "ai_error_user_message")
+ op.drop_column("threads", "ai_error_job_id")
+ op.drop_column("threads", "ai_error_message")
diff --git a/backend/alembic/versions/gen01flags_add_generation_status_flags.py b/backend/alembic/versions/gen01flags_add_generation_status_flags.py
index 76f1d15..caf4a8d 100644
--- a/backend/alembic/versions/gen01flags_add_generation_status_flags.py
+++ b/backend/alembic/versions/gen01flags_add_generation_status_flags.py
@@ -11,9 +11,9 @@
when users switch tabs during generation.
"""
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision = "gen01flags"
diff --git a/backend/alembic/versions/ghoauth01_add_github_oauth_states.py b/backend/alembic/versions/ghoauth01_add_github_oauth_states.py
index 3570aa6..4db2d64 100644
--- a/backend/alembic/versions/ghoauth01_add_github_oauth_states.py
+++ b/backend/alembic/versions/ghoauth01_add_github_oauth_states.py
@@ -10,10 +10,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
+from alembic import op
revision: str = "ghoauth01"
down_revision: Union[str, Sequence[str], None] = "ba27eeb7dc05"
diff --git a/backend/alembic/versions/ghoauth02_add_github_oauth_to_platform_settings.py b/backend/alembic/versions/ghoauth02_add_github_oauth_to_platform_settings.py
index 613668e..7914465 100644
--- a/backend/alembic/versions/ghoauth02_add_github_oauth_to_platform_settings.py
+++ b/backend/alembic/versions/ghoauth02_add_github_oauth_to_platform_settings.py
@@ -15,9 +15,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ghoauth02"
diff --git a/backend/alembic/versions/grdbr01_add_grounding_file_branches.py b/backend/alembic/versions/grdbr01_add_grounding_file_branches.py
index 2b2beac..0d3739e 100644
--- a/backend/alembic/versions/grdbr01_add_grounding_file_branches.py
+++ b/backend/alembic/versions/grdbr01_add_grounding_file_branches.py
@@ -8,10 +8,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "grdbr01"
diff --git a/backend/alembic/versions/grdbr02_add_is_merging_flag.py b/backend/alembic/versions/grdbr02_add_is_merging_flag.py
index 7d93440..b9007a4 100644
--- a/backend/alembic/versions/grdbr02_add_is_merging_flag.py
+++ b/backend/alembic/versions/grdbr02_add_is_merging_flag.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "grdbr02"
diff --git a/backend/alembic/versions/grdbr03_add_content_updated_at.py b/backend/alembic/versions/grdbr03_add_content_updated_at.py
index 10d93a3..a6ac1eb 100644
--- a/backend/alembic/versions/grdbr03_add_content_updated_at.py
+++ b/backend/alembic/versions/grdbr03_add_content_updated_at.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "grdbr03"
@@ -31,9 +31,7 @@ def upgrade() -> None:
)
# Set existing rows to use their updated_at value
- op.execute(
- "UPDATE grounding_file_branches SET content_updated_at = updated_at WHERE content_updated_at IS NULL"
- )
+ op.execute("UPDATE grounding_file_branches SET content_updated_at = updated_at WHERE content_updated_at IS NULL")
# Make the column non-nullable with a default for new rows
op.alter_column(
diff --git a/backend/alembic/versions/grdbr04_add_global_content_updated_at.py b/backend/alembic/versions/grdbr04_add_global_content_updated_at.py
index 94cba4b..330efbb 100644
--- a/backend/alembic/versions/grdbr04_add_global_content_updated_at.py
+++ b/backend/alembic/versions/grdbr04_add_global_content_updated_at.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "grdbr04"
@@ -31,9 +31,7 @@ def upgrade() -> None:
)
# Set existing rows to use their updated_at value
- op.execute(
- "UPDATE grounding_files SET content_updated_at = updated_at WHERE content_updated_at IS NULL"
- )
+ op.execute("UPDATE grounding_files SET content_updated_at = updated_at WHERE content_updated_at IS NULL")
# Make the column non-nullable with a default for new rows
op.alter_column(
diff --git a/backend/alembic/versions/grdbr05_add_last_synced_with_global_at.py b/backend/alembic/versions/grdbr05_add_last_synced_with_global_at.py
index 563800e..5ca1bce 100644
--- a/backend/alembic/versions/grdbr05_add_last_synced_with_global_at.py
+++ b/backend/alembic/versions/grdbr05_add_last_synced_with_global_at.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "grdbr05"
diff --git a/backend/alembic/versions/grnotes01_add_grounding_note_versions.py b/backend/alembic/versions/grnotes01_add_grounding_note_versions.py
index 0cbf21b..f6b25d8 100644
--- a/backend/alembic/versions/grnotes01_add_grounding_note_versions.py
+++ b/backend/alembic/versions/grnotes01_add_grounding_note_versions.py
@@ -5,15 +5,17 @@
Create Date: 2025-01-28
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
+
# revision identifiers, used by Alembic.
-revision: str = 'grnotes01'
-down_revision: Union[str, None] = 'phc04'
+revision: str = "grnotes01"
+down_revision: Union[str, None] = "phc04"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,27 +23,27 @@
def upgrade() -> None:
# Create grounding_note_versions table
op.create_table(
- 'grounding_note_versions',
- sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
- sa.Column('project_id', postgresql.UUID(as_uuid=True), nullable=False),
- sa.Column('version', sa.Integer(), nullable=False),
- sa.Column('content_markdown', sa.Text(), nullable=False),
- sa.Column('is_active', sa.Boolean(), nullable=False, server_default='true'),
- sa.Column('edit_source', sa.String(length=50), nullable=True),
- sa.Column('created_by', postgresql.UUID(as_uuid=True), nullable=True),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()),
- sa.ForeignKeyConstraint(['created_by'], ['users.id'], ondelete='SET NULL'),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id'),
- sa.UniqueConstraint('project_id', 'version', name='uq_grounding_note_version'),
+ "grounding_note_versions",
+ sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
+ sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False),
+ sa.Column("version", sa.Integer(), nullable=False),
+ sa.Column("content_markdown", sa.Text(), nullable=False),
+ sa.Column("is_active", sa.Boolean(), nullable=False, server_default="true"),
+ sa.Column("edit_source", sa.String(length=50), nullable=True),
+ sa.Column("created_by", postgresql.UUID(as_uuid=True), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()),
+ sa.ForeignKeyConstraint(["created_by"], ["users.id"], ondelete="SET NULL"),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("project_id", "version", name="uq_grounding_note_version"),
)
# Create indexes
- op.create_index('ix_grounding_note_versions_project_id', 'grounding_note_versions', ['project_id'])
- op.create_index('ix_grounding_note_active', 'grounding_note_versions', ['project_id', 'is_active'])
+ op.create_index("ix_grounding_note_versions_project_id", "grounding_note_versions", ["project_id"])
+ op.create_index("ix_grounding_note_active", "grounding_note_versions", ["project_id", "is_active"])
def downgrade() -> None:
- op.drop_index('ix_grounding_note_active', table_name='grounding_note_versions')
- op.drop_index('ix_grounding_note_versions_project_id', table_name='grounding_note_versions')
- op.drop_table('grounding_note_versions')
+ op.drop_index("ix_grounding_note_active", table_name="grounding_note_versions")
+ op.drop_index("ix_grounding_note_versions_project_id", table_name="grounding_note_versions")
+ op.drop_table("grounding_note_versions")
diff --git a/backend/alembic/versions/h7i8j9k0l1m2_add_vfs_metadata_table.py b/backend/alembic/versions/h7i8j9k0l1m2_add_vfs_metadata_table.py
index 89afe14..0fb505e 100644
--- a/backend/alembic/versions/h7i8j9k0l1m2_add_vfs_metadata_table.py
+++ b/backend/alembic/versions/h7i8j9k0l1m2_add_vfs_metadata_table.py
@@ -8,10 +8,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "h7i8j9k0l1m2"
diff --git a/backend/alembic/versions/h9i0j1k2l3m4_add_thread_item_summary_snapshots.py b/backend/alembic/versions/h9i0j1k2l3m4_add_thread_item_summary_snapshots.py
index fb24400..cbfd0fb 100644
--- a/backend/alembic/versions/h9i0j1k2l3m4_add_thread_item_summary_snapshots.py
+++ b/backend/alembic/versions/h9i0j1k2l3m4_add_thread_item_summary_snapshots.py
@@ -5,11 +5,12 @@
Create Date: 2025-12-21
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "h9i0j1k2l3m4"
diff --git a/backend/alembic/versions/hd3e4f5g6h7i_add_session_ai_error_fields.py b/backend/alembic/versions/hd3e4f5g6h7i_add_session_ai_error_fields.py
index 0801e7c..26f9afa 100644
--- a/backend/alembic/versions/hd3e4f5g6h7i_add_session_ai_error_fields.py
+++ b/backend/alembic/versions/hd3e4f5g6h7i_add_session_ai_error_fields.py
@@ -5,26 +5,27 @@
Create Date: 2025-01-01 00:00:00.000000
"""
-from alembic import op
+
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision = 'hd3e4f5g6h7i'
-down_revision = 'gc2d3e4f5g6h'
+revision = "hd3e4f5g6h7i"
+down_revision = "gc2d3e4f5g6h"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Add AI error state columns for persisting error state across page refreshes
- op.add_column('user_question_sessions', sa.Column('ai_error_message', sa.Text(), nullable=True))
- op.add_column('user_question_sessions', sa.Column('ai_error_job_id', postgresql.UUID(as_uuid=True), nullable=True))
- op.add_column('user_question_sessions', sa.Column('ai_error_user_prompt', sa.Text(), nullable=True))
+ op.add_column("user_question_sessions", sa.Column("ai_error_message", sa.Text(), nullable=True))
+ op.add_column("user_question_sessions", sa.Column("ai_error_job_id", postgresql.UUID(as_uuid=True), nullable=True))
+ op.add_column("user_question_sessions", sa.Column("ai_error_user_prompt", sa.Text(), nullable=True))
def downgrade() -> None:
- op.drop_column('user_question_sessions', 'ai_error_user_prompt')
- op.drop_column('user_question_sessions', 'ai_error_job_id')
- op.drop_column('user_question_sessions', 'ai_error_message')
+ op.drop_column("user_question_sessions", "ai_error_user_prompt")
+ op.drop_column("user_question_sessions", "ai_error_job_id")
+ op.drop_column("user_question_sessions", "ai_error_message")
diff --git a/backend/alembic/versions/i0j1k2l3m4n5_add_mcp_image_submissions.py b/backend/alembic/versions/i0j1k2l3m4n5_add_mcp_image_submissions.py
index 51cfd5e..988c592 100644
--- a/backend/alembic/versions/i0j1k2l3m4n5_add_mcp_image_submissions.py
+++ b/backend/alembic/versions/i0j1k2l3m4n5_add_mcp_image_submissions.py
@@ -5,15 +5,17 @@
Create Date: 2025-12-22 12:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
+
# revision identifiers, used by Alembic.
-revision: str = 'i0j1k2l3m4n5'
-down_revision: Union[str, None] = 'h9i0j1k2l3m4'
+revision: str = "i0j1k2l3m4n5"
+down_revision: Union[str, None] = "h9i0j1k2l3m4"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@@ -21,38 +23,40 @@
def upgrade() -> None:
"""Create mcp_image_submissions table for staging VFS image uploads."""
op.create_table(
- 'mcp_image_submissions',
- sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
- sa.Column('submission_id', sa.String(length=100), nullable=False),
- sa.Column('project_id', postgresql.UUID(as_uuid=True), nullable=False),
- sa.Column('feature_id', postgresql.UUID(as_uuid=True), nullable=False),
- sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=False),
- sa.Column('filename', sa.String(length=255), nullable=False),
- sa.Column('content_type', sa.String(length=50), nullable=False),
- sa.Column('image_data', sa.LargeBinary(), nullable=False),
- sa.Column('size_bytes', sa.Integer(), nullable=False),
- sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
- sa.Column('expires_at', sa.DateTime(timezone=True), nullable=False),
- sa.ForeignKeyConstraint(['feature_id'], ['features.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id'),
+ "mcp_image_submissions",
+ sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
+ sa.Column("submission_id", sa.String(length=100), nullable=False),
+ sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False),
+ sa.Column("feature_id", postgresql.UUID(as_uuid=True), nullable=False),
+ sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
+ sa.Column("filename", sa.String(length=255), nullable=False),
+ sa.Column("content_type", sa.String(length=50), nullable=False),
+ sa.Column("image_data", sa.LargeBinary(), nullable=False),
+ sa.Column("size_bytes", sa.Integer(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False),
+ sa.ForeignKeyConstraint(["feature_id"], ["features.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
)
# Indexes
- op.create_index('ix_mcp_image_submissions_submission_id', 'mcp_image_submissions', ['submission_id'], unique=True)
- op.create_index('ix_mcp_image_submissions_project_id', 'mcp_image_submissions', ['project_id'], unique=False)
- op.create_index('ix_mcp_image_submissions_feature_id', 'mcp_image_submissions', ['feature_id'], unique=False)
- op.create_index('ix_mcp_image_submissions_user_id', 'mcp_image_submissions', ['user_id'], unique=False)
- op.create_index('ix_mcp_image_submissions_expires', 'mcp_image_submissions', ['expires_at'], unique=False)
- op.create_index('ix_mcp_image_submissions_feature_user', 'mcp_image_submissions', ['feature_id', 'user_id'], unique=False)
+ op.create_index("ix_mcp_image_submissions_submission_id", "mcp_image_submissions", ["submission_id"], unique=True)
+ op.create_index("ix_mcp_image_submissions_project_id", "mcp_image_submissions", ["project_id"], unique=False)
+ op.create_index("ix_mcp_image_submissions_feature_id", "mcp_image_submissions", ["feature_id"], unique=False)
+ op.create_index("ix_mcp_image_submissions_user_id", "mcp_image_submissions", ["user_id"], unique=False)
+ op.create_index("ix_mcp_image_submissions_expires", "mcp_image_submissions", ["expires_at"], unique=False)
+ op.create_index(
+ "ix_mcp_image_submissions_feature_user", "mcp_image_submissions", ["feature_id", "user_id"], unique=False
+ )
def downgrade() -> None:
"""Drop mcp_image_submissions table."""
- op.drop_index('ix_mcp_image_submissions_feature_user', table_name='mcp_image_submissions')
- op.drop_index('ix_mcp_image_submissions_expires', table_name='mcp_image_submissions')
- op.drop_index('ix_mcp_image_submissions_user_id', table_name='mcp_image_submissions')
- op.drop_index('ix_mcp_image_submissions_feature_id', table_name='mcp_image_submissions')
- op.drop_index('ix_mcp_image_submissions_project_id', table_name='mcp_image_submissions')
- op.drop_index('ix_mcp_image_submissions_submission_id', table_name='mcp_image_submissions')
- op.drop_table('mcp_image_submissions')
+ op.drop_index("ix_mcp_image_submissions_feature_user", table_name="mcp_image_submissions")
+ op.drop_index("ix_mcp_image_submissions_expires", table_name="mcp_image_submissions")
+ op.drop_index("ix_mcp_image_submissions_user_id", table_name="mcp_image_submissions")
+ op.drop_index("ix_mcp_image_submissions_feature_id", table_name="mcp_image_submissions")
+ op.drop_index("ix_mcp_image_submissions_project_id", table_name="mcp_image_submissions")
+ op.drop_index("ix_mcp_image_submissions_submission_id", table_name="mcp_image_submissions")
+ op.drop_table("mcp_image_submissions")
diff --git a/backend/alembic/versions/i8j9k0l1m2n3_add_team_roles_tables.py b/backend/alembic/versions/i8j9k0l1m2n3_add_team_roles_tables.py
index d061f15..98b4636 100644
--- a/backend/alembic/versions/i8j9k0l1m2n3_add_team_roles_tables.py
+++ b/backend/alembic/versions/i8j9k0l1m2n3_add_team_roles_tables.py
@@ -8,10 +8,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "i8j9k0l1m2n3"
diff --git a/backend/alembic/versions/icshare01_add_integration_config_sharing.py b/backend/alembic/versions/icshare01_add_integration_config_sharing.py
index 36b8b7f..b826aef 100644
--- a/backend/alembic/versions/icshare01_add_integration_config_sharing.py
+++ b/backend/alembic/versions/icshare01_add_integration_config_sharing.py
@@ -12,10 +12,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
+from alembic import op
revision: str = "icshare01"
down_revision: Union[str, Sequence[str], None] = "fd30aac0413b"
diff --git a/backend/alembic/versions/ie4f5g6h7i8j_convert_trial_to_freemium.py b/backend/alembic/versions/ie4f5g6h7i8j_convert_trial_to_freemium.py
index a6aef70..85814df 100644
--- a/backend/alembic/versions/ie4f5g6h7i8j_convert_trial_to_freemium.py
+++ b/backend/alembic/versions/ie4f5g6h7i8j_convert_trial_to_freemium.py
@@ -8,14 +8,14 @@
Create Date: 2025-01-01 00:00:00.000000
"""
-from alembic import op
+
import sqlalchemy as sa
-from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
-revision = 'ie4f5g6h7i8j'
-down_revision = 'hd3e4f5g6h7i'
+revision = "ie4f5g6h7i8j"
+down_revision = "hd3e4f5g6h7i"
branch_labels = None
depends_on = None
@@ -23,45 +23,45 @@
def upgrade() -> None:
# 1. Add freemium configuration to platform_settings
op.add_column(
- 'platform_settings',
+ "platform_settings",
sa.Column(
- 'freemium_initial_tokens',
+ "freemium_initial_tokens",
sa.Integer(),
nullable=False,
- server_default='5000000',
- comment='Initial tokens granted to new users on signup'
- )
+ server_default="5000000",
+ comment="Initial tokens granted to new users on signup",
+ ),
)
op.add_column(
- 'platform_settings',
+ "platform_settings",
sa.Column(
- 'freemium_weekly_topup_tokens',
+ "freemium_weekly_topup_tokens",
sa.Integer(),
nullable=False,
- server_default='10000000',
- comment='Tokens added each Monday (additive, up to max)'
- )
+ server_default="10000000",
+ comment="Tokens added each Monday (additive, up to max)",
+ ),
)
op.add_column(
- 'platform_settings',
+ "platform_settings",
sa.Column(
- 'freemium_max_tokens',
+ "freemium_max_tokens",
sa.Integer(),
nullable=False,
- server_default='10000000',
- comment='Maximum token balance for freemium users'
- )
+ server_default="10000000",
+ comment="Maximum token balance for freemium users",
+ ),
)
# 2. Add last top-up tracking to organizations
op.add_column(
- 'organizations',
+ "organizations",
sa.Column(
- 'last_freemium_topup_at',
+ "last_freemium_topup_at",
sa.DateTime(timezone=True),
nullable=True,
- comment='When the org last received a weekly freemium token top-up'
- )
+ comment="When the org last received a weekly freemium token top-up",
+ ),
)
# 3. Convert existing trial orgs to freemium
@@ -87,7 +87,7 @@ def downgrade() -> None:
""")
# Remove columns
- op.drop_column('organizations', 'last_freemium_topup_at')
- op.drop_column('platform_settings', 'freemium_max_tokens')
- op.drop_column('platform_settings', 'freemium_weekly_topup_tokens')
- op.drop_column('platform_settings', 'freemium_initial_tokens')
+ op.drop_column("organizations", "last_freemium_topup_at")
+ op.drop_column("platform_settings", "freemium_max_tokens")
+ op.drop_column("platform_settings", "freemium_weekly_topup_tokens")
+ op.drop_column("platform_settings", "freemium_initial_tokens")
diff --git a/backend/alembic/versions/impl01_add_implementations_table.py b/backend/alembic/versions/impl01_add_implementations_table.py
index aaee021..2ce7ca5 100644
--- a/backend/alembic/versions/impl01_add_implementations_table.py
+++ b/backend/alembic/versions/impl01_add_implementations_table.py
@@ -6,14 +6,14 @@
"""
-from typing import Sequence, Union
import uuid
from datetime import datetime, timezone
+from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "impl01"
@@ -134,7 +134,7 @@ def upgrade() -> None:
"notes_updated_at": notes_updated_at,
"created_by": str(created_by) if created_by else None,
"created_at": created_at or datetime.now(timezone.utc),
- }
+ },
)
diff --git a/backend/alembic/versions/impl02_add_implementation_created_enum.py b/backend/alembic/versions/impl02_add_implementation_created_enum.py
index 0873c34..001ac65 100644
--- a/backend/alembic/versions/impl02_add_implementation_created_enum.py
+++ b/backend/alembic/versions/impl02_add_implementation_created_enum.py
@@ -5,11 +5,11 @@
Create Date: 2025-01-02
"""
+
from typing import Sequence, Union
from alembic import op
-
# revision identifiers, used by Alembic.
revision: str = "impl02"
down_revision: Union[str, None] = "impl01"
diff --git a/backend/alembic/versions/implcs01_add_completion_summary_to_implementations.py b/backend/alembic/versions/implcs01_add_completion_summary_to_implementations.py
index 6b47566..485fc0b 100644
--- a/backend/alembic/versions/implcs01_add_completion_summary_to_implementations.py
+++ b/backend/alembic/versions/implcs01_add_completion_summary_to_implementations.py
@@ -5,23 +5,21 @@
Create Date: 2026-01-09
"""
-from alembic import op
+
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision = 'implcs01'
-down_revision = '8eebe3a100d3'
+revision = "implcs01"
+down_revision = "8eebe3a100d3"
branch_labels = None
depends_on = None
def upgrade() -> None:
- op.add_column(
- 'implementations',
- sa.Column('completion_summary', sa.Text(), nullable=True)
- )
+ op.add_column("implementations", sa.Column("completion_summary", sa.Text(), nullable=True))
def downgrade() -> None:
- op.drop_column('implementations', 'completion_summary')
+ op.drop_column("implementations", "completion_summary")
diff --git a/backend/alembic/versions/j1k2l3m4n5o6_add_form_drafts_table.py b/backend/alembic/versions/j1k2l3m4n5o6_add_form_drafts_table.py
index ad2a16c..1ef51af 100644
--- a/backend/alembic/versions/j1k2l3m4n5o6_add_form_drafts_table.py
+++ b/backend/alembic/versions/j1k2l3m4n5o6_add_form_drafts_table.py
@@ -5,12 +5,14 @@
Create Date: 2025-12-23 12:00:00.000000
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
+
# revision identifiers, used by Alembic.
revision: str = "j1k2l3m4n5o6"
down_revision: Union[str, None] = "i0j1k2l3m4n5"
diff --git a/backend/alembic/versions/j9k0l1m2n3o4_make_team_roles_dynamic.py b/backend/alembic/versions/j9k0l1m2n3o4_make_team_roles_dynamic.py
index 04c8c70..caef178 100644
--- a/backend/alembic/versions/j9k0l1m2n3o4_make_team_roles_dynamic.py
+++ b/backend/alembic/versions/j9k0l1m2n3o4_make_team_roles_dynamic.py
@@ -9,11 +9,10 @@
from typing import Sequence, Union
from uuid import uuid4
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
-from sqlalchemy.sql import table, column
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "j9k0l1m2n3o4"
@@ -59,18 +58,14 @@ def upgrade() -> None:
for role_key, title, description, order_index in DEFAULT_ROLES:
# Check if this role already exists for this org
existing = conn.execute(
- sa.text(
- "SELECT id FROM team_role_definitions WHERE org_id = :org_id AND role_key = :role_key"
- ),
+ sa.text("SELECT id FROM team_role_definitions WHERE org_id = :org_id AND role_key = :role_key"),
{"org_id": org_id, "role_key": role_key},
).fetchone()
if existing:
# Mark existing as default
conn.execute(
- sa.text(
- "UPDATE team_role_definitions SET is_default = true WHERE id = :id"
- ),
+ sa.text("UPDATE team_role_definitions SET is_default = true WHERE id = :id"),
{"id": existing[0]},
)
else:
diff --git a/backend/alembic/versions/k0l1m2n3o4p5_add_grounding_files_table.py b/backend/alembic/versions/k0l1m2n3o4p5_add_grounding_files_table.py
index bc2acef..95e3fd7 100644
--- a/backend/alembic/versions/k0l1m2n3o4p5_add_grounding_files_table.py
+++ b/backend/alembic/versions/k0l1m2n3o4p5_add_grounding_files_table.py
@@ -8,10 +8,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "k0l1m2n3o4p5"
diff --git a/backend/alembic/versions/l1m2n3o4p5q6_add_feature_content_versions.py b/backend/alembic/versions/l1m2n3o4p5q6_add_feature_content_versions.py
index 75eeb0e..57bef10 100644
--- a/backend/alembic/versions/l1m2n3o4p5q6_add_feature_content_versions.py
+++ b/backend/alembic/versions/l1m2n3o4p5q6_add_feature_content_versions.py
@@ -6,14 +6,14 @@
"""
-from typing import Sequence, Union
import uuid
from datetime import datetime, timezone
+from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "l1m2n3o4p5q6"
@@ -25,7 +25,9 @@
def upgrade() -> None:
# Create the enum type
content_type_enum = postgresql.ENUM(
- "spec", "prompt_plan", "implementation_notes",
+ "spec",
+ "prompt_plan",
+ "implementation_notes",
name="featurecontenttype",
create_type=False,
)
@@ -119,7 +121,7 @@ def upgrade() -> None:
"content": spec_text,
"created_by": str(created_by) if created_by else None,
"created_at": created_at or datetime.now(timezone.utc),
- }
+ },
)
if prompt_plan_text:
@@ -135,7 +137,7 @@ def upgrade() -> None:
"content": prompt_plan_text,
"created_by": str(created_by) if created_by else None,
"created_at": created_at or datetime.now(timezone.utc),
- }
+ },
)
if implementation_notes:
@@ -151,7 +153,7 @@ def upgrade() -> None:
"content": implementation_notes,
"created_by": str(created_by) if created_by else None,
"created_at": created_at or datetime.now(timezone.utc),
- }
+ },
)
diff --git a/backend/alembic/versions/m2n3o4p5q6r7_add_feature_import_fields.py b/backend/alembic/versions/m2n3o4p5q6r7_add_feature_import_fields.py
index 90b00de..8cd2bc5 100644
--- a/backend/alembic/versions/m2n3o4p5q6r7_add_feature_import_fields.py
+++ b/backend/alembic/versions/m2n3o4p5q6r7_add_feature_import_fields.py
@@ -8,10 +8,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "m2n3o4p5q6r7"
diff --git a/backend/alembic/versions/mcqans01_add_mcq_answer_item_type.py b/backend/alembic/versions/mcqans01_add_mcq_answer_item_type.py
index 507ad51..b18a6fe 100644
--- a/backend/alembic/versions/mcqans01_add_mcq_answer_item_type.py
+++ b/backend/alembic/versions/mcqans01_add_mcq_answer_item_type.py
@@ -5,11 +5,11 @@
Create Date: 2024-12-29
"""
+
from typing import Sequence, Union
from alembic import op
-
# revision identifiers, used by Alembic.
revision: str = "mcqans01"
down_revision: Union[str, None] = "ppd04"
diff --git a/backend/alembic/versions/n3o4p5q6r7s8_add_identity_provider_tables.py b/backend/alembic/versions/n3o4p5q6r7s8_add_identity_provider_tables.py
index 1f60a4c..78ddb87 100644
--- a/backend/alembic/versions/n3o4p5q6r7s8_add_identity_provider_tables.py
+++ b/backend/alembic/versions/n3o4p5q6r7s8_add_identity_provider_tables.py
@@ -8,10 +8,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "n3o4p5q6r7s8"
diff --git a/backend/alembic/versions/o4p5q6r7s8t9_seed_identity_providers.py b/backend/alembic/versions/o4p5q6r7s8t9_seed_identity_providers.py
index ca3e86e..cc2eaf1 100644
--- a/backend/alembic/versions/o4p5q6r7s8t9_seed_identity_providers.py
+++ b/backend/alembic/versions/o4p5q6r7s8t9_seed_identity_providers.py
@@ -5,10 +5,12 @@
Create Date: 2025-01-20
"""
-from alembic import op
-import sqlalchemy as sa
-from datetime import datetime, timezone
import uuid
+from datetime import datetime, timezone
+
+import sqlalchemy as sa
+
+from alembic import op
revision = "o4p5q6r7s8t9"
down_revision = "n3o4p5q6r7s8"
diff --git a/backend/alembic/versions/p5q6r7s8t9u0_add_platform_settings_tables.py b/backend/alembic/versions/p5q6r7s8t9u0_add_platform_settings_tables.py
index 2289775..3838dbb 100644
--- a/backend/alembic/versions/p5q6r7s8t9u0_add_platform_settings_tables.py
+++ b/backend/alembic/versions/p5q6r7s8t9u0_add_platform_settings_tables.py
@@ -5,11 +5,13 @@
Create Date: 2025-01-20
"""
-from alembic import op
+import uuid
+from datetime import datetime, timezone
+
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
-from datetime import datetime, timezone
-import uuid
+
+from alembic import op
revision = "p5q6r7s8t9u0"
down_revision = "o4p5q6r7s8t9"
@@ -40,9 +42,7 @@ def upgrade() -> None:
nullable=False,
default=lambda: datetime.now(timezone.utc),
),
- sa.UniqueConstraint(
- "connector_type", "provider", "display_name", name="uq_platform_connector_name"
- ),
+ sa.UniqueConstraint("connector_type", "provider", "display_name", name="uq_platform_connector_name"),
)
# Create platform_settings table (singleton)
diff --git a/backend/alembic/versions/pc01_rename_pre_phase_to_project_chat.py b/backend/alembic/versions/pc01_rename_pre_phase_to_project_chat.py
index d30a8e4..79a2b1e 100644
--- a/backend/alembic/versions/pc01_rename_pre_phase_to_project_chat.py
+++ b/backend/alembic/versions/pc01_rename_pre_phase_to_project_chat.py
@@ -15,7 +15,6 @@
from alembic import op
-
# revision identifiers, used by Alembic.
revision: str = "pc01"
down_revision: Union[str, None] = "ghoauth02"
@@ -39,31 +38,20 @@ def upgrade() -> None:
)
# Rename indexes on project_chats (formerly pre_phase_discussions)
+ op.execute("ALTER INDEX IF EXISTS ix_pre_phase_discussions_org_id RENAME TO ix_project_chats_org_id")
+ op.execute("ALTER INDEX IF EXISTS ix_pre_phase_discussions_project_id RENAME TO ix_project_chats_project_id")
op.execute(
- "ALTER INDEX IF EXISTS ix_pre_phase_discussions_org_id "
- "RENAME TO ix_project_chats_org_id"
- )
- op.execute(
- "ALTER INDEX IF EXISTS ix_pre_phase_discussions_project_id "
- "RENAME TO ix_project_chats_project_id"
- )
- op.execute(
- "ALTER INDEX IF EXISTS ix_pre_phase_discussions_created_phase_id "
- "RENAME TO ix_project_chats_created_phase_id"
+ "ALTER INDEX IF EXISTS ix_pre_phase_discussions_created_phase_id RENAME TO ix_project_chats_created_phase_id"
)
op.execute(
"ALTER INDEX IF EXISTS ix_pre_phase_discussions_created_project_id "
"RENAME TO ix_project_chats_created_project_id"
)
- op.execute(
- "ALTER INDEX IF EXISTS ix_pre_phase_discussions_short_id "
- "RENAME TO ix_project_chats_short_id"
- )
+ op.execute("ALTER INDEX IF EXISTS ix_pre_phase_discussions_short_id RENAME TO ix_project_chats_short_id")
# Rename indexes on project_chat_messages (formerly pre_phase_messages)
op.execute(
- "ALTER INDEX IF EXISTS ix_pre_phase_messages_discussion_id "
- "RENAME TO ix_project_chat_messages_project_chat_id"
+ "ALTER INDEX IF EXISTS ix_pre_phase_messages_discussion_id RENAME TO ix_project_chat_messages_project_chat_id"
)
# Rename index on code_exploration_results
@@ -96,31 +84,20 @@ def downgrade() -> None:
# Rename indexes on project_chat_messages back
op.execute(
- "ALTER INDEX IF EXISTS ix_project_chat_messages_project_chat_id "
- "RENAME TO ix_pre_phase_messages_discussion_id"
+ "ALTER INDEX IF EXISTS ix_project_chat_messages_project_chat_id RENAME TO ix_pre_phase_messages_discussion_id"
)
# Rename indexes on project_chats back
- op.execute(
- "ALTER INDEX IF EXISTS ix_project_chats_short_id "
- "RENAME TO ix_pre_phase_discussions_short_id"
- )
+ op.execute("ALTER INDEX IF EXISTS ix_project_chats_short_id RENAME TO ix_pre_phase_discussions_short_id")
op.execute(
"ALTER INDEX IF EXISTS ix_project_chats_created_project_id "
"RENAME TO ix_pre_phase_discussions_created_project_id"
)
op.execute(
- "ALTER INDEX IF EXISTS ix_project_chats_created_phase_id "
- "RENAME TO ix_pre_phase_discussions_created_phase_id"
- )
- op.execute(
- "ALTER INDEX IF EXISTS ix_project_chats_project_id "
- "RENAME TO ix_pre_phase_discussions_project_id"
- )
- op.execute(
- "ALTER INDEX IF EXISTS ix_project_chats_org_id "
- "RENAME TO ix_pre_phase_discussions_org_id"
+ "ALTER INDEX IF EXISTS ix_project_chats_created_phase_id RENAME TO ix_pre_phase_discussions_created_phase_id"
)
+ op.execute("ALTER INDEX IF EXISTS ix_project_chats_project_id RENAME TO ix_pre_phase_discussions_project_id")
+ op.execute("ALTER INDEX IF EXISTS ix_project_chats_org_id RENAME TO ix_pre_phase_discussions_org_id")
# Rename foreign key column in code_exploration_results back
op.alter_column(
diff --git a/backend/alembic/versions/pc02_add_retry_status_to_project_chat.py b/backend/alembic/versions/pc02_add_retry_status_to_project_chat.py
index bcaf091..3ea9d25 100644
--- a/backend/alembic/versions/pc02_add_retry_status_to_project_chat.py
+++ b/backend/alembic/versions/pc02_add_retry_status_to_project_chat.py
@@ -11,9 +11,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "pc02"
@@ -30,7 +30,7 @@ def upgrade() -> None:
"retry_status",
sa.String(100),
nullable=True,
- comment="Current retry status message for UI display (e.g., 'Invalid response. Retrying 2/3')"
+ comment="Current retry status message for UI display (e.g., 'Invalid response. Retrying 2/3')",
),
)
diff --git a/backend/alembic/versions/phc01_add_phase_containers_table.py b/backend/alembic/versions/phc01_add_phase_containers_table.py
index 06fd0d7..dae84e4 100644
--- a/backend/alembic/versions/phc01_add_phase_containers_table.py
+++ b/backend/alembic/versions/phc01_add_phase_containers_table.py
@@ -11,9 +11,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "phc01"
diff --git a/backend/alembic/versions/phc02_add_container_fields_to_phases.py b/backend/alembic/versions/phc02_add_container_fields_to_phases.py
index e2080ff..e05e89d 100644
--- a/backend/alembic/versions/phc02_add_container_fields_to_phases.py
+++ b/backend/alembic/versions/phc02_add_container_fields_to_phases.py
@@ -11,9 +11,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "phc02"
diff --git a/backend/alembic/versions/phc03_migrate_phases_to_containers.py b/backend/alembic/versions/phc03_migrate_phases_to_containers.py
index c974136..fc9bfc4 100644
--- a/backend/alembic/versions/phc03_migrate_phases_to_containers.py
+++ b/backend/alembic/versions/phc03_migrate_phases_to_containers.py
@@ -9,13 +9,13 @@
"""
+from datetime import datetime, timezone
from typing import Sequence, Union
from uuid import uuid4
-from datetime import datetime, timezone
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "phc03"
@@ -28,8 +28,9 @@ def generate_short_id(length: int = 11) -> str:
"""Generate a short ID for URL-friendly identifiers."""
import random
import string
+
chars = string.ascii_letters + string.digits
- return ''.join(random.choice(chars) for _ in range(length))
+ return "".join(random.choice(chars) for _ in range(length))
def upgrade() -> None:
diff --git a/backend/alembic/versions/phc04_add_target_container_to_project_chats.py b/backend/alembic/versions/phc04_add_target_container_to_project_chats.py
index b1701ea..df7dd30 100644
--- a/backend/alembic/versions/phc04_add_target_container_to_project_chats.py
+++ b/backend/alembic/versions/phc04_add_target_container_to_project_chats.py
@@ -11,9 +11,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "phc04"
diff --git a/backend/alembic/versions/ppc01_add_visibility_to_project_chats.py b/backend/alembic/versions/ppc01_add_visibility_to_project_chats.py
index caa4b19..7bcc9be 100644
--- a/backend/alembic/versions/ppc01_add_visibility_to_project_chats.py
+++ b/backend/alembic/versions/ppc01_add_visibility_to_project_chats.py
@@ -14,10 +14,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ppc01"
@@ -29,7 +29,8 @@
def upgrade() -> None:
# Create the visibility enum type
visibility_enum = postgresql.ENUM(
- "private", "team",
+ "private",
+ "team",
name="projectchatvisibility",
create_type=False,
)
diff --git a/backend/alembic/versions/ppd01_add_pre_phase_discussions.py b/backend/alembic/versions/ppd01_add_pre_phase_discussions.py
index 06716bb..2982685 100644
--- a/backend/alembic/versions/ppd01_add_pre_phase_discussions.py
+++ b/backend/alembic/versions/ppd01_add_pre_phase_discussions.py
@@ -9,12 +9,13 @@
Create Date: 2024-12-27
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ppd01"
@@ -26,7 +27,8 @@
def upgrade() -> None:
# Create message_type enum for pre_phase_messages
message_type_enum = postgresql.ENUM(
- "user", "bot",
+ "user",
+ "bot",
name="prephasemessagetype",
create_type=False,
)
diff --git a/backend/alembic/versions/ppd02_add_discussion_phase_link.py b/backend/alembic/versions/ppd02_add_discussion_phase_link.py
index f321450..a3b8254 100644
--- a/backend/alembic/versions/ppd02_add_discussion_phase_link.py
+++ b/backend/alembic/versions/ppd02_add_discussion_phase_link.py
@@ -10,12 +10,13 @@
Create Date: 2024-12-28
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ppd02"
diff --git a/backend/alembic/versions/ppd03_add_pre_phase_feature_fields.py b/backend/alembic/versions/ppd03_add_pre_phase_feature_fields.py
index ef75203..b8f4e03 100644
--- a/backend/alembic/versions/ppd03_add_pre_phase_feature_fields.py
+++ b/backend/alembic/versions/ppd03_add_pre_phase_feature_fields.py
@@ -11,12 +11,13 @@
Create Date: 2024-12-28
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ppd03"
diff --git a/backend/alembic/versions/ppd04_add_org_scoped_discussions.py b/backend/alembic/versions/ppd04_add_org_scoped_discussions.py
index 9f7f90b..e1621f1 100644
--- a/backend/alembic/versions/ppd04_add_org_scoped_discussions.py
+++ b/backend/alembic/versions/ppd04_add_org_scoped_discussions.py
@@ -13,12 +13,13 @@
Create Date: 2024-12-28
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ppd04"
diff --git a/backend/alembic/versions/ppd05_add_image_attachments.py b/backend/alembic/versions/ppd05_add_image_attachments.py
index 0310156..51aab23 100644
--- a/backend/alembic/versions/ppd05_add_image_attachments.py
+++ b/backend/alembic/versions/ppd05_add_image_attachments.py
@@ -13,12 +13,13 @@
Create Date: 2024-12-30
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ppd05"
diff --git a/backend/alembic/versions/ppd06_add_chat_title.py b/backend/alembic/versions/ppd06_add_chat_title.py
index e6882f7..d22845b 100644
--- a/backend/alembic/versions/ppd06_add_chat_title.py
+++ b/backend/alembic/versions/ppd06_add_chat_title.py
@@ -9,11 +9,12 @@
Create Date: 2025-01-04
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ppd06"
diff --git a/backend/alembic/versions/ppd08_add_summary_snapshot.py b/backend/alembic/versions/ppd08_add_summary_snapshot.py
index 221858b..fb7f62d 100644
--- a/backend/alembic/versions/ppd08_add_summary_snapshot.py
+++ b/backend/alembic/versions/ppd08_add_summary_snapshot.py
@@ -6,9 +6,9 @@
"""
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision = "ppd08"
diff --git a/backend/alembic/versions/pts01_add_project_tech_stack.py b/backend/alembic/versions/pts01_add_project_tech_stack.py
index 201f72e..6d00f5f 100644
--- a/backend/alembic/versions/pts01_add_project_tech_stack.py
+++ b/backend/alembic/versions/pts01_add_project_tech_stack.py
@@ -10,9 +10,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
revision: str = "pts01"
down_revision: Union[str, Sequence[str], None] = "ghoauth01"
diff --git a/backend/alembic/versions/q6r7s8t9u0v1_add_base_url_to_platform_settings.py b/backend/alembic/versions/q6r7s8t9u0v1_add_base_url_to_platform_settings.py
index dde1c7e..4242ad7 100644
--- a/backend/alembic/versions/q6r7s8t9u0v1_add_base_url_to_platform_settings.py
+++ b/backend/alembic/versions/q6r7s8t9u0v1_add_base_url_to_platform_settings.py
@@ -5,9 +5,10 @@
Create Date: 2025-01-20
"""
-from alembic import op
import sqlalchemy as sa
+from alembic import op
+
revision = "q6r7s8t9u0v1"
down_revision = "p5q6r7s8t9u0"
branch_labels = None
diff --git a/backend/alembic/versions/qrs02_add_grounding_file_is_generating.py b/backend/alembic/versions/qrs02_add_grounding_file_is_generating.py
index ba4c6a5..21921c4 100644
--- a/backend/alembic/versions/qrs02_add_grounding_file_is_generating.py
+++ b/backend/alembic/versions/qrs02_add_grounding_file_is_generating.py
@@ -6,9 +6,9 @@
"""
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision = "qrs02"
diff --git a/backend/alembic/versions/qst01phase_add_phase_question_stats.py b/backend/alembic/versions/qst01phase_add_phase_question_stats.py
index 850d083..3c4c319 100644
--- a/backend/alembic/versions/qst01phase_add_phase_question_stats.py
+++ b/backend/alembic/versions/qst01phase_add_phase_question_stats.py
@@ -5,11 +5,12 @@
Create Date: 2024-12-26
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "qst01phase"
@@ -53,14 +54,13 @@ def upgrade() -> None:
connection = op.get_bind()
# Get all phases
- phases = connection.execute(
- sa.text("SELECT id FROM brainstorming_phases")
- ).fetchall()
+ phases = connection.execute(sa.text("SELECT id FROM brainstorming_phases")).fetchall()
for (phase_id,) in phases:
# Count active questions using a subquery for module_ids
- active_total = connection.execute(
- sa.text("""
+ active_total = (
+ connection.execute(
+ sa.text("""
SELECT COUNT(*) FROM features f
WHERE f.module_id IN (
SELECT m.id FROM modules m
@@ -71,12 +71,15 @@ def upgrade() -> None:
AND f.visibility_status = 'active'
AND f.archived_at IS NULL
"""),
- {"phase_id": phase_id}
- ).scalar() or 0
+ {"phase_id": phase_id},
+ ).scalar()
+ or 0
+ )
# Count pending questions
- pending_total = connection.execute(
- sa.text("""
+ pending_total = (
+ connection.execute(
+ sa.text("""
SELECT COUNT(*) FROM features f
WHERE f.module_id IN (
SELECT m.id FROM modules m
@@ -87,12 +90,15 @@ def upgrade() -> None:
AND f.visibility_status = 'pending'
AND f.archived_at IS NULL
"""),
- {"phase_id": phase_id}
- ).scalar() or 0
+ {"phase_id": phase_id},
+ ).scalar()
+ or 0
+ )
# Count answered questions (have MCQ with selected_option_id set)
- active_answered = connection.execute(
- sa.text("""
+ active_answered = (
+ connection.execute(
+ sa.text("""
SELECT COUNT(DISTINCT t.context_id)
FROM threads t
JOIN thread_items ti ON ti.thread_id = t.id
@@ -111,8 +117,10 @@ def upgrade() -> None:
AND ti.item_type = 'mcq_followup'
AND ti.content_data->>'selected_option_id' IS NOT NULL
"""),
- {"phase_id": phase_id}
- ).scalar() or 0
+ {"phase_id": phase_id},
+ ).scalar()
+ or 0
+ )
# Update the phase
connection.execute(
@@ -128,7 +136,7 @@ def upgrade() -> None:
"active_answered": active_answered,
"active_total": active_total,
"pending_total": pending_total,
- }
+ },
)
diff --git a/backend/alembic/versions/r7s8t9u0v1w2_add_invitations_and_groups.py b/backend/alembic/versions/r7s8t9u0v1w2_add_invitations_and_groups.py
index a653526..b4bd35f 100644
--- a/backend/alembic/versions/r7s8t9u0v1w2_add_invitations_and_groups.py
+++ b/backend/alembic/versions/r7s8t9u0v1w2_add_invitations_and_groups.py
@@ -12,10 +12,11 @@
Create Date: 2025-01-20
"""
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSON, UUID
+from alembic import op
+
revision = "r7s8t9u0v1w2"
down_revision = "q6r7s8t9u0v1"
branch_labels = None
@@ -158,9 +159,7 @@ def upgrade() -> None:
nullable=False,
server_default=sa.func.now(),
),
- sa.UniqueConstraint(
- "project_id", "subject_type", "subject_id", name="uq_project_subject"
- ),
+ sa.UniqueConstraint("project_id", "subject_type", "subject_id", name="uq_project_subject"),
)
# 5. Migrate data from project_memberships to project_shares
@@ -202,9 +201,7 @@ def upgrade() -> None:
sa.Column("role", sa.String(20), nullable=False),
sa.Column("token", sa.String(64), nullable=False, unique=True, index=True),
sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False),
- sa.Column(
- "status", sa.String(20), nullable=False, server_default="pending"
- ),
+ sa.Column("status", sa.String(20), nullable=False, server_default="pending"),
sa.Column(
"accepted_by_user_id",
UUID(as_uuid=True),
diff --git a/backend/alembic/versions/rec01_plan_recommendations.py b/backend/alembic/versions/rec01_plan_recommendations.py
index a6fd76a..7a3bb41 100644
--- a/backend/alembic/versions/rec01_plan_recommendations.py
+++ b/backend/alembic/versions/rec01_plan_recommendations.py
@@ -11,10 +11,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "rec01"
diff --git a/backend/alembic/versions/repo01_add_project_repositories.py b/backend/alembic/versions/repo01_add_project_repositories.py
index 56c7051..1c4f507 100644
--- a/backend/alembic/versions/repo01_add_project_repositories.py
+++ b/backend/alembic/versions/repo01_add_project_repositories.py
@@ -11,13 +11,14 @@
Create Date: 2026-01-16
"""
+
import json
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "repo01"
@@ -98,6 +99,7 @@ def upgrade() -> None:
slug = url_path.split("/")[-1].lower() if "/" in url_path else "repo"
# Sanitize: replace non-alphanumeric with hyphens
import re
+
slug = re.sub(r"[^a-zA-Z0-9_-]", "-", slug)
slug = re.sub(r"-+", "-", slug).strip("-") or "repo"
diff --git a/backend/alembic/versions/s8t9u0v1w2x3_add_current_org_id_to_users.py b/backend/alembic/versions/s8t9u0v1w2x3_add_current_org_id_to_users.py
index a4053c3..b045517 100644
--- a/backend/alembic/versions/s8t9u0v1w2x3_add_current_org_id_to_users.py
+++ b/backend/alembic/versions/s8t9u0v1w2x3_add_current_org_id_to_users.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "s8t9u0v1w2x3"
diff --git a/backend/alembic/versions/sid01_add_short_ids.py b/backend/alembic/versions/sid01_add_short_ids.py
index 131cba4..f3359f4 100644
--- a/backend/alembic/versions/sid01_add_short_ids.py
+++ b/backend/alembic/versions/sid01_add_short_ids.py
@@ -18,10 +18,9 @@
import string
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
-from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "sid01"
diff --git a/backend/alembic/versions/slack01_add_slack_tables.py b/backend/alembic/versions/slack01_add_slack_tables.py
index 4e5cfec..ae63d6d 100644
--- a/backend/alembic/versions/slack01_add_slack_tables.py
+++ b/backend/alembic/versions/slack01_add_slack_tables.py
@@ -5,12 +5,13 @@
Create Date: 2026-02-05
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "slack01"
diff --git a/backend/alembic/versions/slack02_add_oauth_tables.py b/backend/alembic/versions/slack02_add_oauth_tables.py
index a0f376a..5cc449c 100644
--- a/backend/alembic/versions/slack02_add_oauth_tables.py
+++ b/backend/alembic/versions/slack02_add_oauth_tables.py
@@ -8,11 +8,12 @@
Create Date: 2026-02-05
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "slack02"
diff --git a/backend/alembic/versions/sys01_add_system_thread_item_type.py b/backend/alembic/versions/sys01_add_system_thread_item_type.py
index f9d6bc6..6a33e5d 100644
--- a/backend/alembic/versions/sys01_add_system_thread_item_type.py
+++ b/backend/alembic/versions/sys01_add_system_thread_item_type.py
@@ -5,11 +5,11 @@
Create Date: 2025-01-13
"""
+
from typing import Sequence, Union
from alembic import op
-
# revision identifiers, used by Alembic.
revision: str = "sys01"
down_revision: Union[str, None] = "cexp07"
diff --git a/backend/alembic/versions/t9u0v1w2x3y4_add_thread_decision_summary.py b/backend/alembic/versions/t9u0v1w2x3y4_add_thread_decision_summary.py
index 7d5d147..d297894 100644
--- a/backend/alembic/versions/t9u0v1w2x3y4_add_thread_decision_summary.py
+++ b/backend/alembic/versions/t9u0v1w2x3y4_add_thread_decision_summary.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "t9u0v1w2x3y4"
diff --git a/backend/alembic/versions/tep01_add_exploration_prompt_search_query.py b/backend/alembic/versions/tep01_add_exploration_prompt_search_query.py
index d23420a..157a290 100644
--- a/backend/alembic/versions/tep01_add_exploration_prompt_search_query.py
+++ b/backend/alembic/versions/tep01_add_exploration_prompt_search_query.py
@@ -11,9 +11,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "tep01"
diff --git a/backend/alembic/versions/th01_add_retry_status_to_threads.py b/backend/alembic/versions/th01_add_retry_status_to_threads.py
index aa4d9ec..568b862 100644
--- a/backend/alembic/versions/th01_add_retry_status_to_threads.py
+++ b/backend/alembic/versions/th01_add_retry_status_to_threads.py
@@ -11,9 +11,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "th01"
diff --git a/backend/alembic/versions/u0v1w2x3y4z5_add_proactive_conversation_features.py b/backend/alembic/versions/u0v1w2x3y4z5_add_proactive_conversation_features.py
index bf23a1a..7d8d774 100644
--- a/backend/alembic/versions/u0v1w2x3y4z5_add_proactive_conversation_features.py
+++ b/backend/alembic/versions/u0v1w2x3y4z5_add_proactive_conversation_features.py
@@ -12,10 +12,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "u0v1w2x3y4z5"
@@ -27,7 +27,8 @@
def upgrade() -> None:
# Create feature_visibility_status enum
visibility_status_enum = postgresql.ENUM(
- "pending", "active",
+ "pending",
+ "active",
name="featurevisibilitystatus",
create_type=False,
)
@@ -46,7 +47,9 @@ def upgrade() -> None:
# Create trigger_type enum
trigger_type_enum = postgresql.ENUM(
- "phase_created", "mcq_answered", "user_comment",
+ "phase_created",
+ "mcq_answered",
+ "user_comment",
name="triggertype",
create_type=False,
)
diff --git a/backend/alembic/versions/uchat01_add_project_chat_fields_to_threads.py b/backend/alembic/versions/uchat01_add_project_chat_fields_to_threads.py
index 473fd70..58af7f0 100644
--- a/backend/alembic/versions/uchat01_add_project_chat_fields_to_threads.py
+++ b/backend/alembic/versions/uchat01_add_project_chat_fields_to_threads.py
@@ -12,9 +12,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "uchat01"
diff --git a/backend/alembic/versions/v1w2x3y4z5a6_add_user_question_sessions.py b/backend/alembic/versions/v1w2x3y4z5a6_add_user_question_sessions.py
index 230bc92..20619cd 100644
--- a/backend/alembic/versions/v1w2x3y4z5a6_add_user_question_sessions.py
+++ b/backend/alembic/versions/v1w2x3y4z5a6_add_user_question_sessions.py
@@ -12,10 +12,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "v1w2x3y4z5a6"
@@ -27,7 +27,8 @@
def upgrade() -> None:
# Create session_status enum
session_status_enum = postgresql.ENUM(
- "active", "archived",
+ "active",
+ "archived",
name="userquestionsessionstatus",
create_type=False,
)
@@ -35,7 +36,8 @@ def upgrade() -> None:
# Create message_role enum
message_role_enum = postgresql.ENUM(
- "user", "assistant",
+ "user",
+ "assistant",
name="messagerole",
create_type=False,
)
diff --git a/backend/alembic/versions/w2x3y4z5a6b7_remove_thread_followup_columns.py b/backend/alembic/versions/w2x3y4z5a6b7_remove_thread_followup_columns.py
index 0954b3a..d8adb18 100644
--- a/backend/alembic/versions/w2x3y4z5a6b7_remove_thread_followup_columns.py
+++ b/backend/alembic/versions/w2x3y4z5a6b7_remove_thread_followup_columns.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "w2x3y4z5a6b7"
diff --git a/backend/alembic/versions/wsrch01_add_web_search_support.py b/backend/alembic/versions/wsrch01_add_web_search_support.py
index 83a86ae..55de82a 100644
--- a/backend/alembic/versions/wsrch01_add_web_search_support.py
+++ b/backend/alembic/versions/wsrch01_add_web_search_support.py
@@ -14,10 +14,10 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "wsrch01"
diff --git a/backend/alembic/versions/x3y4z5a6b7c8_add_feature_description.py b/backend/alembic/versions/x3y4z5a6b7c8_add_feature_description.py
index a392fc4..2673ec2 100644
--- a/backend/alembic/versions/x3y4z5a6b7c8_add_feature_description.py
+++ b/backend/alembic/versions/x3y4z5a6b7c8_add_feature_description.py
@@ -8,9 +8,9 @@
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "x3y4z5a6b7c8"
diff --git a/backend/alembic/versions/y4z5a6b7c8d9_add_conversation_rerun_flag.py b/backend/alembic/versions/y4z5a6b7c8d9_add_conversation_rerun_flag.py
index fa7195b..78e1051 100644
--- a/backend/alembic/versions/y4z5a6b7c8d9_add_conversation_rerun_flag.py
+++ b/backend/alembic/versions/y4z5a6b7c8d9_add_conversation_rerun_flag.py
@@ -5,11 +5,12 @@
Create Date: 2024-12-13
"""
+
from typing import Sequence, Union
-from alembic import op
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
revision: str = "y4z5a6b7c8d9"
diff --git a/backend/alembic/versions/z5a6b7c8d9e0_convert_project_key_to_prefix.py b/backend/alembic/versions/z5a6b7c8d9e0_convert_project_key_to_prefix.py
index 17c18e0..20177f7 100644
--- a/backend/alembic/versions/z5a6b7c8d9e0_convert_project_key_to_prefix.py
+++ b/backend/alembic/versions/z5a6b7c8d9e0_convert_project_key_to_prefix.py
@@ -9,11 +9,10 @@
now store ticket prefixes that are used for feature key generation.
"""
+
from typing import Sequence, Union
from alembic import op
-import sqlalchemy as sa
-
# revision identifiers, used by Alembic.
revision: str = "z5a6b7c8d9e0"
diff --git a/backend/app/agents/brainstorm/__init__.py b/backend/app/agents/brainstorm/__init__.py
index ee0e67d..da9d377 100644
--- a/backend/app/agents/brainstorm/__init__.py
+++ b/backend/app/agents/brainstorm/__init__.py
@@ -22,17 +22,17 @@
- Initial MCQs for each question to start the conversation thread
"""
-from app.agents.brainstorm.types import (
- BrainstormContext,
- GeneratedMCQ,
- GeneratedClarificationQuestion,
- GeneratedAspect,
- BrainstormResult,
-)
from app.agents.brainstorm.orchestrator import (
BrainstormOrchestrator,
create_orchestrator,
)
+from app.agents.brainstorm.types import (
+ BrainstormContext,
+ BrainstormResult,
+ GeneratedAspect,
+ GeneratedClarificationQuestion,
+ GeneratedMCQ,
+)
__all__ = [
"BrainstormContext",
diff --git a/backend/app/agents/brainstorm/generator.py b/backend/app/agents/brainstorm/generator.py
index 8e569fa..06baaa3 100644
--- a/backend/app/agents/brainstorm/generator.py
+++ b/backend/app/agents/brainstorm/generator.py
@@ -25,24 +25,23 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+# Import from common module
+from app.agents.response_parser import strip_markdown_json
+
from .types import (
+ MAX_ASPECTS,
+ MAX_MCQ_CHOICES,
+ MAX_QUESTIONS_PER_ASPECT,
+ MIN_ASPECTS,
+ MIN_MCQ_CHOICES,
+ MIN_QUESTIONS_PER_ASPECT,
BrainstormContext,
BrainstormResult,
GeneratedAspect,
GeneratedClarificationQuestion,
GeneratedMCQ,
- MIN_ASPECTS,
- MAX_ASPECTS,
- MIN_QUESTIONS_PER_ASPECT,
- MAX_QUESTIONS_PER_ASPECT,
- MIN_MCQ_CHOICES,
- MAX_MCQ_CHOICES,
)
-# Import from common module
-from app.agents.response_parser import strip_markdown_json
-
-
logger = logging.getLogger(__name__)
@@ -147,23 +146,16 @@ async def generate(self, context: BrainstormContext) -> BrainstormResult:
Raises:
ValueError: If generation fails or returns invalid JSON
"""
- logger.info(
- f"[brainstorm.generator] Starting generation for phase {context.brainstorming_phase_id}"
- )
+ logger.info(f"[brainstorm.generator] Starting generation for phase {context.brainstorming_phase_id}")
try:
# Build the prompt
prompt = self._build_prompt(context)
- logger.debug(
- f"[brainstorm.generator] Calling LLM with prompt length: {len(prompt)}"
- )
+ logger.debug(f"[brainstorm.generator] Calling LLM with prompt length: {len(prompt)}")
# Call the agent
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -176,9 +168,7 @@ async def generate(self, context: BrainstormContext) -> BrainstormResult:
try:
generated_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- logger.error(
- f"[brainstorm.generator] Failed to parse response as JSON: {e}"
- )
+ logger.error(f"[brainstorm.generator] Failed to parse response as JSON: {e}")
logger.error(f"[brainstorm.generator] Raw response: {response_text[:500]}")
raise ValueError(f"Failed to parse generator response as JSON: {e}")
@@ -284,8 +274,7 @@ def _build_result(self, generated_data: dict) -> BrainstormResult:
async def create_generator_agent(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> BrainstormGeneratorAgent:
"""
Factory function to create a Brainstorm Generator Agent.
diff --git a/backend/app/agents/brainstorm/orchestrator.py b/backend/app/agents/brainstorm/orchestrator.py
index 5f5de76..cba203f 100644
--- a/backend/app/agents/brainstorm/orchestrator.py
+++ b/backend/app/agents/brainstorm/orchestrator.py
@@ -13,17 +13,16 @@
"""
import logging
-from typing import Optional, Callable, Dict, Any
+from typing import Any, Callable, Dict, Optional
from autogen_core.models import ChatCompletionClient
+from .generator import BrainstormGeneratorAgent
from .types import (
BrainstormContext,
BrainstormResult,
validate_brainstorm_result,
)
-from .generator import BrainstormGeneratorAgent
-
logger = logging.getLogger(__name__)
@@ -43,7 +42,7 @@ def __init__(
self,
model_client: ChatCompletionClient,
progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
):
"""
Initialize the Brainstorm Orchestrator.
@@ -73,9 +72,7 @@ async def generate_brainstorm(self, context: BrainstormContext) -> BrainstormRes
Raises:
Exception: If generation fails
"""
- logger.info(
- f"[brainstorm.orchestrator] Starting generation for phase {context.brainstorming_phase_id}"
- )
+ logger.info(f"[brainstorm.orchestrator] Starting generation for phase {context.brainstorming_phase_id}")
try:
# Step 1: Analyzing (0-20%)
@@ -99,9 +96,7 @@ async def generate_brainstorm(self, context: BrainstormContext) -> BrainstormRes
issues = validate_brainstorm_result(result)
if issues:
- logger.warning(
- f"[brainstorm.orchestrator] Validation issues: {issues}"
- )
+ logger.warning(f"[brainstorm.orchestrator] Validation issues: {issues}")
# Add issues to generation notes
result.generation_notes.extend([f"Validation: {issue}" for issue in issues])
@@ -152,7 +147,7 @@ async def create_orchestrator(
api_key: str,
config: dict,
progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> BrainstormOrchestrator:
"""
Factory function to create a Brainstorm Orchestrator with LLM client.
diff --git a/backend/app/agents/brainstorm/types.py b/backend/app/agents/brainstorm/types.py
index 3ca1ed5..5b917f6 100644
--- a/backend/app/agents/brainstorm/types.py
+++ b/backend/app/agents/brainstorm/types.py
@@ -14,10 +14,9 @@
"""
from dataclasses import dataclass, field
-from typing import List, Optional, Dict, Any
+from typing import Dict, List, Optional
from uuid import UUID
-
# Configuration constants
MIN_ASPECTS = 2
MAX_ASPECTS = 8
@@ -37,6 +36,7 @@ class BrainstormContext:
The agent uses the phase description as its primary input to generate
aspects and clarification questions.
"""
+
project_id: UUID
brainstorming_phase_id: UUID
phase_title: str
@@ -54,6 +54,7 @@ class GeneratedMCQ:
Each clarification question gets a thread with an initial MCQ
that allows users to provide structured input.
"""
+
question_text: str
choices: List[Dict[str, str]] # [{"id": "a", "label": "..."}]
explanation: Optional[str] = None # Why this question is important
@@ -67,6 +68,7 @@ class GeneratedClarificationQuestion:
Clarification questions represent specific topics to discuss
within an aspect. Each one gets its own conversation thread.
"""
+
title: str
description: str
initial_mcq: GeneratedMCQ # The MCQ to start the thread
@@ -80,6 +82,7 @@ class GeneratedAspect:
Aspects represent logical areas to explore during brainstorming.
Each aspect contains multiple clarification questions.
"""
+
title: str
description: str
clarification_questions: List[GeneratedClarificationQuestion]
@@ -93,6 +96,7 @@ class BrainstormResult:
Contains all generated aspects with their clarification questions.
"""
+
aspects: List[GeneratedAspect]
total_clarification_questions: int = 0
generation_notes: List[str] = field(default_factory=list)
@@ -100,9 +104,7 @@ class BrainstormResult:
def __post_init__(self):
"""Calculate total clarification questions if not provided."""
if self.total_clarification_questions == 0:
- self.total_clarification_questions = sum(
- len(aspect.clarification_questions) for aspect in self.aspects
- )
+ self.total_clarification_questions = sum(len(aspect.clarification_questions) for aspect in self.aspects)
def validate_brainstorm_result(result: BrainstormResult) -> List[str]:
@@ -121,13 +123,9 @@ def validate_brainstorm_result(result: BrainstormResult) -> List[str]:
# Check total questions
if result.total_clarification_questions < MIN_TOTAL_QUESTIONS:
- issues.append(
- f"Too few questions: {result.total_clarification_questions} < {MIN_TOTAL_QUESTIONS}"
- )
+ issues.append(f"Too few questions: {result.total_clarification_questions} < {MIN_TOTAL_QUESTIONS}")
if result.total_clarification_questions > MAX_TOTAL_QUESTIONS:
- issues.append(
- f"Too many questions: {result.total_clarification_questions} > {MAX_TOTAL_QUESTIONS}"
- )
+ issues.append(f"Too many questions: {result.total_clarification_questions} > {MAX_TOTAL_QUESTIONS}")
# Check each aspect
for i, aspect in enumerate(result.aspects):
@@ -136,13 +134,9 @@ def validate_brainstorm_result(result: BrainstormResult) -> List[str]:
q_count = len(aspect.clarification_questions)
if q_count < MIN_QUESTIONS_PER_ASPECT:
- issues.append(
- f"Aspect '{aspect.title}' has too few questions: {q_count} < {MIN_QUESTIONS_PER_ASPECT}"
- )
+ issues.append(f"Aspect '{aspect.title}' has too few questions: {q_count} < {MIN_QUESTIONS_PER_ASPECT}")
if q_count > MAX_QUESTIONS_PER_ASPECT:
- issues.append(
- f"Aspect '{aspect.title}' has too many questions: {q_count} > {MAX_QUESTIONS_PER_ASPECT}"
- )
+ issues.append(f"Aspect '{aspect.title}' has too many questions: {q_count} > {MAX_QUESTIONS_PER_ASPECT}")
# Check each question
for j, question in enumerate(aspect.clarification_questions):
@@ -152,13 +146,9 @@ def validate_brainstorm_result(result: BrainstormResult) -> List[str]:
# Check MCQ choices
choice_count = len(question.initial_mcq.choices)
if choice_count < MIN_MCQ_CHOICES:
- issues.append(
- f"Question '{question.title}' MCQ has too few choices: {choice_count}"
- )
+ issues.append(f"Question '{question.title}' MCQ has too few choices: {choice_count}")
if choice_count > MAX_MCQ_CHOICES:
- issues.append(
- f"Question '{question.title}' MCQ has too many choices: {choice_count}"
- )
+ issues.append(f"Question '{question.title}' MCQ has too many choices: {choice_count}")
# Check for duplicate aspect titles
aspect_titles = [a.title.lower().strip() for a in result.aspects]
diff --git a/backend/app/agents/brainstorm_conversation/__init__.py b/backend/app/agents/brainstorm_conversation/__init__.py
index cab4f21..74db348 100644
--- a/backend/app/agents/brainstorm_conversation/__init__.py
+++ b/backend/app/agents/brainstorm_conversation/__init__.py
@@ -34,49 +34,47 @@
result = await orchestrator.generate_brainstorm_conversations(context)
"""
+from .aspect_generator import AspectGeneratorAgent, create_aspect_generator
+from .classifier import ComplexityClassifierAgent, create_complexity_classifier
+from .critic_pruner import CriticPrunerAgent, create_critic_pruner
+from .input_validator import InputValidatorAgent, ValidationResult, create_input_validator
+from .orchestrator import (
+ BrainstormConversationOrchestrator,
+ create_orchestrator,
+)
+from .question_generator import QuestionGeneratorAgent, create_question_generator
+from .summarizer import SummarizerAgent, create_summarizer_agent
from .types import (
- # Enums
- PhaseType,
- PhaseComplexity,
- AspectCategory,
- QuestionPriority,
- # Dataclasses
- MCQChoice,
- GeneratedMCQ,
- GeneratedClarificationQuestion,
- GeneratedAspect,
- BrainstormConversationContext,
- SummarizedPhaseContext,
- ClassificationResult,
- ComplexityCaps,
- BrainstormConversationResult,
- AgentInfo,
- CodeExplorationContext,
# Constants
AGENT_METADATA,
- WORKFLOW_STEPS,
COMPLEXITY_CAPS_CONFIG,
- STANDARD_MCQ_CHOICES,
- MIN_MCQ_CHOICES,
MAX_MCQ_CHOICES,
+ MIN_MCQ_CHOICES,
+ STANDARD_MCQ_CHOICES,
+ WORKFLOW_STEPS,
+ AgentInfo,
+ AspectCategory,
+ BrainstormConversationContext,
+ BrainstormConversationResult,
+ ClassificationResult,
+ CodeExplorationContext,
+ ComplexityCaps,
+ GeneratedAspect,
+ GeneratedClarificationQuestion,
+ GeneratedMCQ,
+ # Dataclasses
+ MCQChoice,
+ PhaseComplexity,
+ # Enums
+ PhaseType,
+ QuestionPriority,
+ SummarizedPhaseContext,
+ create_mcq_choices,
# Helper functions
get_caps_for_complexity,
- create_mcq_choices,
validate_brainstorm_result,
)
-from .orchestrator import (
- BrainstormConversationOrchestrator,
- create_orchestrator,
-)
-
-from .summarizer import SummarizerAgent, create_summarizer_agent
-from .classifier import ComplexityClassifierAgent, create_complexity_classifier
-from .aspect_generator import AspectGeneratorAgent, create_aspect_generator
-from .question_generator import QuestionGeneratorAgent, create_question_generator
-from .critic_pruner import CriticPrunerAgent, create_critic_pruner
-from .input_validator import InputValidatorAgent, ValidationResult, create_input_validator
-
__all__ = [
# Main orchestrator
"BrainstormConversationOrchestrator",
diff --git a/backend/app/agents/brainstorm_conversation/aspect_generator.py b/backend/app/agents/brainstorm_conversation/aspect_generator.py
index 65f6950..4cf2f1a 100644
--- a/backend/app/agents/brainstorm_conversation/aspect_generator.py
+++ b/backend/app/agents/brainstorm_conversation/aspect_generator.py
@@ -13,21 +13,21 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
- GeneratedAspect,
AspectCategory,
- SummarizedPhaseContext,
ClassificationResult,
+ CodeExplorationContext,
ComplexityCaps,
- ExistingConversationContext,
- UserInitiatedContext,
CrossProjectContext,
- TechStackContext,
- CodeExplorationContext,
+ ExistingConversationContext,
+ GeneratedAspect,
SiblingPhasesContext,
+ SummarizedPhaseContext,
+ TechStackContext,
+ UserInitiatedContext,
calculate_aspect_coverage_level,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -138,7 +138,7 @@ async def generate_aspects(
tech_stack_context: Optional[TechStackContext] = None,
code_exploration_context: Optional[CodeExplorationContext] = None,
sibling_phases_context: Optional[SiblingPhasesContext] = None,
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> List[GeneratedAspect]:
"""
Generate aspects (exploration areas) for brainstorming.
@@ -168,7 +168,7 @@ async def generate_aspects(
complexity=classification.complexity.value,
target_range=f"{caps.min_aspects}-{caps.max_aspects}",
existing_count=existing_count,
- num_focus_areas=len(classification.suggested_focus_areas)
+ num_focus_areas=len(classification.suggested_focus_areas),
)
try:
@@ -183,20 +183,13 @@ async def generate_aspects(
cross_project_context,
tech_stack_context,
code_exploration_context,
- sibling_phases_context
+ sibling_phases_context,
)
# Call the agent
- self.logger.log_llm_call(
- prompt=prompt,
- model=str(self.model_client),
- operation="generate_aspects"
- )
+ self.logger.log_llm_call(prompt=prompt, model=str(self.model_client), operation="generate_aspects")
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -209,10 +202,9 @@ async def generate_aspects(
try:
aspects_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
raise ValueError(f"Failed to parse aspect generator response as JSON: {e}")
# Convert to GeneratedAspect objects
@@ -228,7 +220,7 @@ async def generate_aspects(
self.logger.log_agent_complete(
generated_count=len(aspects),
categories=list(set(a.category.value for a in aspects)),
- aspect_titles=[a.title for a in aspects]
+ aspect_titles=[a.title for a in aspects],
)
return aspects
@@ -237,10 +229,7 @@ async def generate_aspects(
self.logger.log_error(e, {"project_id": project_id})
raise
- def _build_existing_context_section(
- self,
- existing_context: ExistingConversationContext
- ) -> str:
+ def _build_existing_context_section(self, existing_context: ExistingConversationContext) -> str:
"""
Build the existing aspects context section for the prompt.
@@ -260,20 +249,30 @@ def _build_existing_context_section(
# Section 1: Existing Aspects Overview
sections.append("### EXISTING ASPECTS - DO NOT DUPLICATE:")
- sections.append("The following aspects have already been created. DO NOT create new aspects with the same or similar names.")
- sections.append("If you want to add more questions to an existing topic, you MUST skip it - questions will be added separately.")
+ sections.append(
+ "The following aspects have already been created. DO NOT create new aspects with the same or similar names."
+ )
+ sections.append(
+ "If you want to add more questions to an existing topic, you MUST skip it - questions will be added separately."
+ )
sections.append("")
for aspect in existing_context.aspects:
coverage = calculate_aspect_coverage_level(aspect.total_questions)
- sections.append(f"**{aspect.title}** ({aspect.total_questions} questions, {aspect.answered_questions} answered) - {coverage.upper()} coverage")
- sections.append(f" Description: {aspect.description[:150]}..." if len(aspect.description) > 150 else f" Description: {aspect.description}")
+ sections.append(
+ f"**{aspect.title}** ({aspect.total_questions} questions, {aspect.answered_questions} answered) - {coverage.upper()} coverage"
+ )
+ sections.append(
+ f" Description: {aspect.description[:150]}..."
+ if len(aspect.description) > 150
+ else f" Description: {aspect.description}"
+ )
# Show questions with decision summaries
for q in aspect.questions:
if q.status == "answered" and q.decision_summary:
sections.append(f" - Q: {q.question_title}")
- sections.append(f" Decision: \"{q.decision_summary}\"")
+ sections.append(f' Decision: "{q.decision_summary}"')
# Show unresolved points if any (limit to 2 for brevity)
if q.unresolved_points:
sections.append(f" Open questions: {', '.join(q.unresolved_points[:2])}")
@@ -285,17 +284,14 @@ def _build_existing_context_section(
# Section 2: Decisions that expand valid scope
decisions_with_content = [
- (aspect, q)
- for aspect in existing_context.aspects
- for q in aspect.questions
- if q.decision_summary
+ (aspect, q) for aspect in existing_context.aspects for q in aspect.questions if q.decision_summary
]
if decisions_with_content:
sections.append("### DECISIONS THAT EXPAND VALID SCOPE:")
sections.append("These decisions enable follow-up aspects on related topics:")
for aspect, q in decisions_with_content[:10]: # Cap at 10
- sections.append(f"- \"{q.decision_summary}\" β follow-up on this topic is VALID")
+ sections.append(f'- "{q.decision_summary}" β follow-up on this topic is VALID')
sections.append("")
# Section 3: Generation Guidelines
@@ -303,16 +299,15 @@ def _build_existing_context_section(
sections.append("- DO NOT create aspects with names similar to the ones listed above")
sections.append("- Focus on NEW areas of exploration not yet covered")
sections.append("- You MAY create aspects related to the decisions listed above")
- sections.append("- Think like a human reviewer who already has these aspects - what NEW areas need exploration?")
+ sections.append(
+ "- Think like a human reviewer who already has these aspects - what NEW areas need exploration?"
+ )
sections.append("- If all major areas are covered, generate fewer aspects or focus on niche areas")
sections.append("")
return "\n".join(sections)
- def _build_user_initiated_section(
- self,
- user_initiated_context: UserInitiatedContext
- ) -> str:
+ def _build_user_initiated_section(self, user_initiated_context: UserInitiatedContext) -> str:
"""
Build the user-initiated context section for the prompt.
@@ -394,11 +389,15 @@ def _build_tech_stack_section(self, tech_stack_context: TechStackContext) -> str
sections = []
sections.append("### TECHNOLOGY STACK CONTEXT")
sections.append("")
- sections.append(f"The user indicated this tech stack during initial discussion: **{tech_stack_context.proposed_stack}**")
+ sections.append(
+ f"The user indicated this tech stack during initial discussion: **{tech_stack_context.proposed_stack}**"
+ )
sections.append("")
if tech_stack_context.has_grounding:
- sections.append("NOTE: This project has grounding documentation (agents.md), which may already document tech decisions.")
+ sections.append(
+ "NOTE: This project has grounding documentation (agents.md), which may already document tech decisions."
+ )
sections.append("Consider whether tech stack exploration is still needed.")
sections.append("")
@@ -408,10 +407,7 @@ def _build_tech_stack_section(self, tech_stack_context: TechStackContext) -> str
return "\n".join(sections)
- def _build_code_exploration_section(
- self,
- code_exploration_context: CodeExplorationContext
- ) -> str:
+ def _build_code_exploration_section(self, code_exploration_context: CodeExplorationContext) -> str:
"""
Build the code exploration context section for the prompt.
@@ -436,10 +432,7 @@ def _build_code_exploration_section(
return "\n".join(sections)
- def _build_cross_project_section(
- self,
- cross_project_context: CrossProjectContext
- ) -> str:
+ def _build_cross_project_section(self, cross_project_context: CrossProjectContext) -> str:
"""
Build the cross-phase and project-level context section for the prompt.
@@ -475,7 +468,9 @@ def _build_cross_project_section(
sections.append(f" {phase.phase_description}")
for decision in phase.decisions[:10]: # Cap at 10 per phase for display
- sections.append(f" - [{decision.aspect_title}] {decision.question_title}: \"{decision.decision_summary_short}\"")
+ sections.append(
+ f' - [{decision.aspect_title}] {decision.question_title}: "{decision.decision_summary_short}"'
+ )
sections.append("")
# Section 2: Project-level features
@@ -485,7 +480,7 @@ def _build_cross_project_section(
sections.append("")
for feat in cross_project_context.project_features[:15]: # Cap at 15
- sections.append(f" - [{feat.module_title}] {feat.feature_title}: \"{feat.decision_summary_short}\"")
+ sections.append(f' - [{feat.module_title}] {feat.feature_title}: "{feat.decision_summary_short}"')
sections.append("")
# Guidance for the LLM
@@ -497,10 +492,7 @@ def _build_cross_project_section(
return "\n".join(sections)
- def _build_sibling_phases_section(
- self,
- sibling_phases_context: SiblingPhasesContext
- ) -> str:
+ def _build_sibling_phases_section(self, sibling_phases_context: SiblingPhasesContext) -> str:
"""
Build the sibling phases context section for the prompt.
@@ -518,7 +510,7 @@ def _build_sibling_phases_section(
return ""
sections = []
- sections.append(f"### SIBLING PHASES (same container: \"{sibling_phases_context.container_title}\"):")
+ sections.append(f'### SIBLING PHASES (same container: "{sibling_phases_context.container_title}"):')
sections.append("These phases are part of the same feature group and share related context:")
sections.append("")
@@ -530,7 +522,9 @@ def _build_sibling_phases_section(
if phase.decisions:
for decision in phase.decisions[:10]:
- sections.append(f" - [{decision.aspect_title}] {decision.question_title}: \"{decision.decision_summary_short}\"")
+ sections.append(
+ f' - [{decision.aspect_title}] {decision.question_title}: "{decision.decision_summary_short}"'
+ )
if phase.implementation_analysis:
truncated = phase.implementation_analysis[:300]
@@ -558,7 +552,7 @@ def _build_prompt(
cross_project_context: Optional[CrossProjectContext] = None,
tech_stack_context: Optional[TechStackContext] = None,
code_exploration_context: Optional[CodeExplorationContext] = None,
- sibling_phases_context: Optional[SiblingPhasesContext] = None
+ sibling_phases_context: Optional[SiblingPhasesContext] = None,
) -> str:
"""
Build the aspect generation prompt with existing context awareness.
@@ -685,13 +679,12 @@ def _parse_aspect(self, a_data: dict, order_index: int) -> GeneratedAspect:
category=category,
order_index=order_index,
clarification_questions=[], # Will be filled by question generator
- internal_agent_notes=internal_notes
+ internal_agent_notes=internal_notes,
)
async def create_aspect_generator(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> AspectGeneratorAgent:
"""
Factory function to create an Aspect Generator Agent.
diff --git a/backend/app/agents/brainstorm_conversation/classifier.py b/backend/app/agents/brainstorm_conversation/classifier.py
index 9a7c33e..c9e3c80 100644
--- a/backend/app/agents/brainstorm_conversation/classifier.py
+++ b/backend/app/agents/brainstorm_conversation/classifier.py
@@ -6,15 +6,15 @@
"""
import json
-from typing import Optional, List
+from typing import Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
-from .types import PhaseComplexity, ClassificationResult, SummarizedPhaseContext, PhaseType
from .logging_config import get_agent_logger
+from .types import ClassificationResult, PhaseComplexity, PhaseType, SummarizedPhaseContext
from .utils import strip_markdown_json
@@ -111,10 +111,7 @@ def _get_system_message(self) -> str:
The suggested_focus_areas should be 3-5 key areas that deserve exploration based on the phase description."""
async def classify(
- self,
- summarized_context: SummarizedPhaseContext,
- phase_type: PhaseType,
- project_id: Optional[str] = None
+ self, summarized_context: SummarizedPhaseContext, phase_type: PhaseType, project_id: Optional[str] = None
) -> ClassificationResult:
"""
Classify the brainstorming phase's complexity.
@@ -134,35 +131,25 @@ async def classify(
project_id=project_id,
phase_type=phase_type.value,
num_objectives=len(summarized_context.key_objectives),
- num_constraints=len(summarized_context.constraints)
+ num_constraints=len(summarized_context.constraints),
)
try:
# Apply heuristics first
- heuristic_complexity = self._apply_heuristics(
- phase_type,
- summarized_context
- )
+ heuristic_complexity = self._apply_heuristics(phase_type, summarized_context)
# Build the prompt
- prompt = self._build_prompt(
- summarized_context,
- phase_type,
- heuristic_complexity
- )
+ prompt = self._build_prompt(summarized_context, phase_type, heuristic_complexity)
# Call the agent
self.logger.log_llm_call(
prompt=prompt,
model=str(self.model_client),
operation="classify_complexity",
- heuristic_complexity=heuristic_complexity
+ heuristic_complexity=heuristic_complexity,
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -190,22 +177,17 @@ async def classify(
raise ValueError(f"Invalid complexity value: {e}")
result = ClassificationResult(
- complexity=complexity,
- rationale=rationale,
- suggested_focus_areas=suggested_focus_areas
+ complexity=complexity, rationale=rationale, suggested_focus_areas=suggested_focus_areas
)
self.logger.log_decision(
decision=f"complexity={complexity.value}",
rationale=rationale,
project_id=project_id,
- focus_areas=suggested_focus_areas
+ focus_areas=suggested_focus_areas,
)
- self.logger.log_agent_complete(
- complexity=complexity.value,
- num_focus_areas=len(suggested_focus_areas)
- )
+ self.logger.log_agent_complete(complexity=complexity.value, num_focus_areas=len(suggested_focus_areas))
return result
@@ -213,11 +195,7 @@ async def classify(
self.logger.log_error(e, {"project_id": project_id})
raise
- def _apply_heuristics(
- self,
- phase_type: PhaseType,
- summarized_context: SummarizedPhaseContext
- ) -> Optional[str]:
+ def _apply_heuristics(self, phase_type: PhaseType, summarized_context: SummarizedPhaseContext) -> Optional[str]:
"""
Apply simple heuristics to guess complexity.
@@ -262,28 +240,30 @@ def _apply_heuristics(
# High complexity indicators
high_keywords = [
- "architecture", "multiple systems", "integration", "complex",
- "greenfield", "from scratch", "new platform", "many users",
- "enterprise", "scalability", "migration"
+ "architecture",
+ "multiple systems",
+ "integration",
+ "complex",
+ "greenfield",
+ "from scratch",
+ "new platform",
+ "many users",
+ "enterprise",
+ "scalability",
+ "migration",
]
if any(keyword in summary_lower for keyword in high_keywords):
return PhaseComplexity.HIGH.value
# Low complexity indicators
- low_keywords = [
- "simple", "single", "small", "quick", "minor",
- "tweak", "adjustment", "one feature"
- ]
+ low_keywords = ["simple", "single", "small", "quick", "minor", "tweak", "adjustment", "one feature"]
if any(keyword in summary_lower for keyword in low_keywords):
return PhaseComplexity.LOW.value
return base_complexity.value
def _build_prompt(
- self,
- summarized_context: SummarizedPhaseContext,
- phase_type: PhaseType,
- heuristic_complexity: Optional[str]
+ self, summarized_context: SummarizedPhaseContext, phase_type: PhaseType, heuristic_complexity: Optional[str]
) -> str:
"""
Build the classification prompt.
@@ -340,8 +320,7 @@ def _build_prompt(
async def create_complexity_classifier(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> ComplexityClassifierAgent:
"""
Factory function to create a Complexity Classifier Agent.
diff --git a/backend/app/agents/brainstorm_conversation/code_explorer_stage.py b/backend/app/agents/brainstorm_conversation/code_explorer_stage.py
index 9388a80..9aa837d 100644
--- a/backend/app/agents/brainstorm_conversation/code_explorer_stage.py
+++ b/backend/app/agents/brainstorm_conversation/code_explorer_stage.py
@@ -15,22 +15,22 @@
import hashlib
import logging
from datetime import datetime, timezone
-from typing import Optional, List, TYPE_CHECKING
+from typing import TYPE_CHECKING, List, Optional
from uuid import UUID
from sqlalchemy.orm import Session
-from app.models.project import Project
from app.models.platform_settings import PlatformSettings
+from app.models.project import Project
from app.services.code_explorer_client import code_explorer_client
if TYPE_CHECKING:
from app.models.brainstorming_phase import BrainstormingPhase
from .types import (
+ ClassificationResult,
CodeExplorationContext,
SummarizedPhaseContext,
- ClassificationResult,
)
logger = logging.getLogger(__name__)
@@ -135,22 +135,20 @@ async def _build_repos_list(
github_token = None
if repo.github_integration_config_id:
try:
- github_token = await get_github_token_for_org(
- db, project.org_id, repo.github_integration_config_id
- )
+ github_token = await get_github_token_for_org(db, project.org_id, repo.github_integration_config_id)
except Exception as e:
# Continue without token (works for public repos)
- logger.warning(
- f"Failed to get GitHub token for repo {repo.slug}: {e}"
- )
-
- repos.append({
- "slug": repo.slug,
- "repo_url": repo.repo_url,
- "branch": repo.default_branch or "main",
- "github_token": github_token,
- "user_remarks": repo.user_remarks,
- })
+ logger.warning(f"Failed to get GitHub token for repo {repo.slug}: {e}")
+
+ repos.append(
+ {
+ "slug": repo.slug,
+ "repo_url": repo.repo_url,
+ "branch": repo.default_branch or "main",
+ "github_token": github_token,
+ "user_remarks": repo.user_remarks,
+ }
+ )
return repos
@@ -181,10 +179,10 @@ def _log_code_exploration_usage(
response_content: The exploration output (for response log)
"""
try:
+ from app.models.job import Job
from app.services.llm_call_log_service import LLMCallLogService
from app.services.llm_usage_log_service import LLMUsageLogService
- from app.models.job import Job
- from workers.handlers.code_explorer import calculate_claude_cost, CODE_EXPLORER_MODEL
+ from workers.handlers.code_explorer import CODE_EXPLORER_MODEL, calculate_claude_cost
# Get org_id, project_id, triggered_by_user_id from job
job = db.query(Job).filter(Job.id == job_id).first()
@@ -243,10 +241,7 @@ def _log_code_exploration_usage(
duration_ms=duration_ms,
)
- logger.debug(
- f"Logged code exploration usage: {prompt_tokens} prompt + "
- f"{completion_tokens} completion tokens"
- )
+ logger.debug(f"Logged code exploration usage: {prompt_tokens} prompt + {completion_tokens} completion tokens")
except Exception as e:
# Log error but don't disrupt agent execution
logger.warning(f"Failed to log code exploration usage: {e}")
@@ -288,29 +283,21 @@ async def get_or_run_code_exploration(
# 1. Check if Code Explorer is enabled in platform settings
settings = db.query(PlatformSettings).first()
if not settings or not settings.code_explorer_enabled:
- logger.debug(
- f"Code Explorer disabled for project {project.id}, skipping exploration"
- )
+ logger.debug(f"Code Explorer disabled for project {project.id}, skipping exploration")
return None
# 2. Build repos list
repos = await _build_repos_list(db, project)
if not repos:
- logger.debug(
- f"No repositories configured for project {project.id}, skipping exploration"
- )
+ logger.debug(f"No repositories configured for project {project.id}, skipping exploration")
return None
repos_hash = _compute_repos_hash(repos)
# 3. Check cache validity
- if (
- phase.code_exploration_output
- and phase.code_exploration_repos_hash == repos_hash
- ):
+ if phase.code_exploration_output and phase.code_exploration_repos_hash == repos_hash:
logger.info(
- f"Using cached code exploration for phase {phase.id} "
- f"(cached at {phase.code_exploration_cached_at})"
+ f"Using cached code exploration for phase {phase.id} (cached at {phase.code_exploration_cached_at})"
)
return CodeExplorationContext(
output=phase.code_exploration_output,
@@ -323,9 +310,7 @@ async def get_or_run_code_exploration(
)
# 4. Run new exploration
- logger.info(
- f"Running new code exploration for phase {phase.id} with {len(repos)} repos"
- )
+ logger.info(f"Running new code exploration for phase {phase.id} with {len(repos)} repos")
started_at = datetime.now(timezone.utc)
result = await run_code_exploration(
db=db,
@@ -343,10 +328,7 @@ async def get_or_run_code_exploration(
phase.code_exploration_cached_at = datetime.now(timezone.utc)
phase.code_exploration_repos_hash = repos_hash
db.commit()
- logger.info(
- f"Cached code exploration for phase {phase.id} "
- f"({len(result.output)} chars)"
- )
+ logger.info(f"Cached code exploration for phase {phase.id} ({len(result.output)} chars)")
# Log LLM usage for the code exploration call
if job_id and result.prompt_tokens and result.completion_tokens:
@@ -400,17 +382,13 @@ async def run_code_exploration(
# 1. Check if Code Explorer is enabled in platform settings
settings = db.query(PlatformSettings).first()
if not settings or not settings.code_explorer_enabled:
- logger.debug(
- f"Code Explorer disabled for project {project.id}, skipping exploration"
- )
+ logger.debug(f"Code Explorer disabled for project {project.id}, skipping exploration")
return None
# 2. Check if project has repositories
repositories = project.repositories
if not repositories:
- logger.debug(
- f"No repositories configured for project {project.id}, skipping exploration"
- )
+ logger.debug(f"No repositories configured for project {project.id}, skipping exploration")
return None
# 3. Get Anthropic API key from platform settings
@@ -418,9 +396,7 @@ async def run_code_exploration(
anthropic_key = get_code_explorer_api_key(db)
if not anthropic_key:
- logger.warning(
- f"Code Explorer API key not configured, skipping exploration for project {project.id}"
- )
+ logger.warning(f"Code Explorer API key not configured, skipping exploration for project {project.id}")
return None
# 4. Build repos list with GitHub tokens
@@ -431,22 +407,20 @@ async def run_code_exploration(
github_token = None
if repo.github_integration_config_id:
try:
- github_token = await get_github_token_for_org(
- db, project.org_id, repo.github_integration_config_id
- )
+ github_token = await get_github_token_for_org(db, project.org_id, repo.github_integration_config_id)
except Exception as e:
# Continue without token (works for public repos)
- logger.warning(
- f"Failed to get GitHub token for repo {repo.slug}: {e}"
- )
-
- repos.append({
- "slug": repo.slug,
- "repo_url": repo.repo_url,
- "branch": repo.default_branch or "main",
- "github_token": github_token,
- "user_remarks": repo.user_remarks,
- })
+ logger.warning(f"Failed to get GitHub token for repo {repo.slug}: {e}")
+
+ repos.append(
+ {
+ "slug": repo.slug,
+ "repo_url": repo.repo_url,
+ "branch": repo.default_branch or "main",
+ "github_token": github_token,
+ "user_remarks": repo.user_remarks,
+ }
+ )
# 5. Build exploration prompt
exploration_prompt = _build_exploration_prompt(
@@ -456,9 +430,7 @@ async def run_code_exploration(
classification=classification,
)
- logger.info(
- f"Running code exploration for project {project.id} with {len(repos)} repos"
- )
+ logger.info(f"Running code exploration for project {project.id} with {len(repos)} repos")
# 6. Call code explorer service
try:
@@ -478,16 +450,13 @@ async def run_code_exploration(
# Check for success and actual output
if not result["success"]:
logger.warning(
- f"Code exploration failed for project {project.id}: "
- f"{result.get('error') or result.get('error_code')}"
+ f"Code exploration failed for project {project.id}: {result.get('error') or result.get('error_code')}"
)
return None
output = result.get("output")
if not output or not output.strip():
- logger.warning(
- f"Code exploration returned empty output for project {project.id}"
- )
+ logger.warning(f"Code exploration returned empty output for project {project.id}")
return None
logger.info(
@@ -508,7 +477,5 @@ async def run_code_exploration(
except Exception as e:
# Log but don't fail - exploration is optional context
- logger.warning(
- f"Code exploration error for project {project.id}: {e}"
- )
+ logger.warning(f"Code exploration error for project {project.id}: {e}")
return None
diff --git a/backend/app/agents/brainstorm_conversation/critic_pruner.py b/backend/app/agents/brainstorm_conversation/critic_pruner.py
index 2ca2660..89c0172 100644
--- a/backend/app/agents/brainstorm_conversation/critic_pruner.py
+++ b/backend/app/agents/brainstorm_conversation/critic_pruner.py
@@ -20,18 +20,17 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
+ AspectCategory,
+ ComplexityCaps,
+ ExistingConversationContext,
GeneratedAspect,
GeneratedClarificationQuestion,
GeneratedMCQ,
MCQChoice,
QuestionPriority,
- AspectCategory,
- ComplexityCaps,
- PhaseComplexity,
- ExistingConversationContext,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -159,7 +158,7 @@ async def prune_and_refine(
project_id: Optional[str] = None,
phase_summary: Optional[str] = None,
phase_description: Optional[str] = None,
- existing_context: Optional[ExistingConversationContext] = None
+ existing_context: Optional[ExistingConversationContext] = None,
) -> List[GeneratedAspect]:
"""
Refine the generated aspects and questions for quality.
@@ -188,7 +187,7 @@ async def prune_and_refine(
aspect_count=len(aspects),
question_count=total_questions,
max_aspects=caps.max_aspects,
- max_questions=caps.total_max_questions
+ max_questions=caps.total_max_questions,
)
try:
@@ -199,15 +198,10 @@ async def prune_and_refine(
# Call the agent
self.logger.log_llm_call(
- prompt=prompt,
- model=str(self.model_client),
- operation="prune_aspects_and_questions"
+ prompt=prompt, model=str(self.model_client), operation="prune_aspects_and_questions"
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -220,10 +214,9 @@ async def prune_and_refine(
try:
result_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
raise ValueError(f"Failed to parse pruner response as JSON: {e}")
# Extract refined aspects
@@ -253,16 +246,13 @@ async def prune_and_refine(
initial_questions=total_questions,
final_questions=final_questions,
pruned_items=[], # Could track specific items if needed
- pruning_summary=pruning_summary
+ pruning_summary=pruning_summary,
)
self.logger.log_agent_complete(
kept_aspects=len(refined_aspects),
kept_questions=final_questions,
- within_caps=(
- len(refined_aspects) <= caps.max_aspects and
- final_questions <= caps.total_max_questions
- )
+ within_caps=(len(refined_aspects) <= caps.max_aspects and final_questions <= caps.total_max_questions),
)
return refined_aspects
@@ -278,7 +268,7 @@ def _build_prompt(
phase_summary: Optional[str] = None,
phase_description: Optional[str] = None,
existing_context: Optional[ExistingConversationContext] = None,
- is_user_initiated: bool = False
+ is_user_initiated: bool = False,
) -> str:
"""
Build the quality review prompt.
@@ -326,7 +316,7 @@ def _build_prompt(
prompt += "\n"
# Add caps information
- prompt += f"### Limits:\n"
+ prompt += "### Limits:\n"
prompt += f"- Complexity level: {caps.complexity.value}\n"
prompt += f"- Max aspects: {caps.max_aspects}\n"
prompt += f"- Max questions per aspect: {caps.max_questions_per_aspect}\n"
@@ -409,7 +399,7 @@ def _parse_aspect(self, a_data: dict, order_index: int) -> GeneratedAspect:
category=category,
order_index=order_index,
clarification_questions=questions,
- internal_agent_notes=a_data.get("pruning_note")
+ internal_agent_notes=a_data.get("pruning_note"),
)
def _parse_question(self, q_data: dict) -> GeneratedClarificationQuestion:
@@ -441,7 +431,7 @@ def _parse_question(self, q_data: dict) -> GeneratedClarificationQuestion:
description=description,
priority=priority,
initial_mcq=mcq,
- internal_agent_notes=q_data.get("pruning_note")
+ internal_agent_notes=q_data.get("pruning_note"),
)
def _parse_mcq(self, mcq_data: dict) -> GeneratedMCQ:
@@ -497,8 +487,7 @@ def _parse_mcq(self, mcq_data: dict) -> GeneratedMCQ:
)
def _sort_questions_by_priority(
- self,
- questions: List[GeneratedClarificationQuestion]
+ self, questions: List[GeneratedClarificationQuestion]
) -> List[GeneratedClarificationQuestion]:
"""
Sort questions by priority (must_have first, then important, then optional).
@@ -517,10 +506,7 @@ def _sort_questions_by_priority(
return sorted(questions, key=lambda q: priority_order.get(q.priority, 1))
def _enforce_hard_caps(
- self,
- aspects: List[GeneratedAspect],
- caps: ComplexityCaps,
- is_user_initiated: bool = False
+ self, aspects: List[GeneratedAspect], caps: ComplexityCaps, is_user_initiated: bool = False
) -> List[GeneratedAspect]:
"""
Enforce hard caps if LLM didn't follow instructions.
@@ -567,7 +553,7 @@ def _enforce_hard_caps(
self.logger.logger.warning(
f"LLM returned {len(aspects)} aspects but max is {caps.max_aspects}. Truncating."
)
- aspects = aspects[:caps.max_aspects]
+ aspects = aspects[: caps.max_aspects]
# Limit questions per aspect and total questions (priority-aware)
total_questions = 0
@@ -580,7 +566,7 @@ def _enforce_hard_caps(
self.logger.logger.info(
f"Aspect '{aspect.title}': removing {removed_count} lowest-priority questions to meet per-aspect cap"
)
- sorted_questions = sorted_questions[:caps.max_questions_per_aspect]
+ sorted_questions = sorted_questions[: caps.max_questions_per_aspect]
# Check total cap
remaining_budget = caps.total_max_questions - total_questions
@@ -598,8 +584,7 @@ def _enforce_hard_caps(
async def create_critic_pruner(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> CriticPrunerAgent:
"""
Factory function to create a Critic/Pruner Agent.
diff --git a/backend/app/agents/brainstorm_conversation/logging_config.py b/backend/app/agents/brainstorm_conversation/logging_config.py
index dd081c0..8d91cea 100644
--- a/backend/app/agents/brainstorm_conversation/logging_config.py
+++ b/backend/app/agents/brainstorm_conversation/logging_config.py
@@ -4,10 +4,10 @@
Provides structured logging for all agent decisions, LLM calls, and workflow steps.
"""
-import logging
import json
-from typing import Any, Dict, Optional
+import logging
from datetime import datetime, timezone
+from typing import Any, Dict, Optional
class BrainstormAgentLogger:
@@ -33,19 +33,12 @@ def __init__(self, agent_name: str, project_id: Optional[str] = None):
# Ensure structured output
if not self.logger.handlers:
handler = logging.StreamHandler()
- formatter = logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
- )
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
- def _structured_log(
- self,
- level: str,
- event: str,
- extra_data: Optional[Dict[str, Any]] = None
- ) -> None:
+ def _structured_log(self, level: str, event: str, extra_data: Optional[Dict[str, Any]] = None) -> None:
"""
Log a structured event.
@@ -78,12 +71,7 @@ def log_agent_complete(self, **kwargs: Any) -> None:
self._structured_log("info", f"{self.agent_name}_complete", kwargs)
def log_llm_call(
- self,
- prompt: str,
- model: str,
- response: Optional[str] = None,
- tokens_used: Optional[int] = None,
- **kwargs: Any
+ self, prompt: str, model: str, response: Optional[str] = None, tokens_used: Optional[int] = None, **kwargs: Any
) -> None:
"""
Log an LLM API call.
@@ -99,7 +87,7 @@ def log_llm_call(
"model": model,
"prompt_preview": prompt[:200] + "..." if len(prompt) > 200 else prompt,
"prompt_length": len(prompt),
- **kwargs
+ **kwargs,
}
if response:
@@ -120,11 +108,7 @@ def log_decision(self, decision: str, rationale: str, **kwargs: Any) -> None:
rationale: Explanation of why
**kwargs: Additional context
"""
- data = {
- "decision": decision,
- "rationale": rationale,
- **kwargs
- }
+ data = {"decision": decision, "rationale": rationale, **kwargs}
self._structured_log("info", "agent_decision", data)
def log_pruning_stats(
@@ -134,7 +118,7 @@ def log_pruning_stats(
initial_questions: int,
final_questions: int,
pruned_items: list,
- **kwargs: Any
+ **kwargs: Any,
) -> None:
"""
Log aspect and question pruning statistics.
@@ -155,7 +139,7 @@ def log_pruning_stats(
"pruned_aspects": initial_aspects - final_aspects,
"pruned_questions": initial_questions - final_questions,
"pruned_items": pruned_items,
- **kwargs
+ **kwargs,
}
self._structured_log("info", "pruning_stats", data)
@@ -186,11 +170,7 @@ def log_workflow_transition(self, from_state: str, to_state: str, **kwargs: Any)
to_state: New state
**kwargs: Additional context
"""
- data = {
- "from_state": from_state,
- "to_state": to_state,
- **kwargs
- }
+ data = {"from_state": from_state, "to_state": to_state, **kwargs}
self._structured_log("info", "workflow_transition", data)
diff --git a/backend/app/agents/brainstorm_conversation/orchestrator.py b/backend/app/agents/brainstorm_conversation/orchestrator.py
index 7ca0bad..6da657a 100644
--- a/backend/app/agents/brainstorm_conversation/orchestrator.py
+++ b/backend/app/agents/brainstorm_conversation/orchestrator.py
@@ -11,36 +11,37 @@
"""
import asyncio
-from typing import Optional, Dict, Any, List, Callable, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
from uuid import UUID
from autogen_core.models import ChatCompletionClient
if TYPE_CHECKING:
from sqlalchemy.orm import Session
- from app.models.project import Project
+
from app.models.brainstorming_phase import BrainstormingPhase
+ from app.models.project import Project
-from app.agents.llm_client import create_litellm_client, LLMCallLogger
+from app.agents.llm_client import LLMCallLogger, create_litellm_client
+from .aspect_generator import AspectGeneratorAgent
+from .classifier import ComplexityClassifierAgent
+from .critic_pruner import CriticPrunerAgent
+from .logging_config import get_agent_logger
+from .question_generator import QuestionGeneratorAgent
+from .summarizer import SummarizerAgent
from .types import (
+ AGENT_METADATA,
+ USER_INITIATED_CAPS,
+ WORKFLOW_STEP_DISPLAY_NAMES,
+ WORKFLOW_STEPS,
BrainstormConversationContext,
BrainstormConversationResult,
+ ClassificationResult,
GeneratedAspect,
PhaseComplexity,
get_caps_for_complexity,
- AGENT_METADATA,
- WORKFLOW_STEPS,
- WORKFLOW_STEP_DISPLAY_NAMES,
- USER_INITIATED_CAPS,
- ClassificationResult,
)
-from .summarizer import SummarizerAgent
-from .classifier import ComplexityClassifierAgent
-from .aspect_generator import AspectGeneratorAgent
-from .question_generator import QuestionGeneratorAgent
-from .critic_pruner import CriticPrunerAgent
-from .logging_config import get_agent_logger
class BrainstormConversationOrchestrator:
@@ -105,6 +106,7 @@ def __init__(
self.call_logger = None
if job_id and not mock_mode_enabled:
from app.database import SessionLocal
+
self.call_logger = LLMCallLogger(
db_session_factory=SessionLocal,
job_id=job_id,
@@ -134,16 +136,10 @@ def __init__(
self.critic_pruner = None
self.logger.log_agent_start(
- model=str(model_client) if model_client else "mock",
- provider=provider if not mock_mode_enabled else "mock"
+ model=str(model_client) if model_client else "mock", provider=provider if not mock_mode_enabled else "mock"
)
- def _create_model_client(
- self,
- provider: str,
- api_key: str,
- config: Dict[str, Any]
- ) -> ChatCompletionClient:
+ def _create_model_client(self, provider: str, api_key: str, config: Dict[str, Any]) -> ChatCompletionClient:
"""
Create a model client for the specified provider using LiteLLM.
@@ -251,10 +247,7 @@ async def generate_brainstorm_conversations(
# **NORMAL MODE**: Proceed with standard LLM workflow
self.logger.log_workflow_transition(
- from_state="start",
- to_state="summarizing",
- project_id=project_id_str,
- phase_id=phase_id_str
+ from_state="start", to_state="summarizing", project_id=project_id_str, phase_id=phase_id_str
)
try:
@@ -273,7 +266,7 @@ async def generate_brainstorm_conversations(
from_state="summarizing",
to_state="generating_aspects",
project_id=project_id_str,
- user_initiated=True
+ user_initiated=True,
)
complexity = PhaseComplexity.LOW
@@ -283,21 +276,19 @@ async def generate_brainstorm_conversations(
classification = ClassificationResult(
complexity=complexity,
rationale="User-initiated mode - conservative generation",
- suggested_focus_areas=[context.user_initiated_context.user_prompt[:100]]
+ suggested_focus_areas=[context.user_initiated_context.user_prompt[:100]],
)
self.logger.log_decision(
decision="User-initiated mode: using conservative caps",
rationale=f"User request: {context.user_initiated_context.user_prompt[:100]}",
project_id=project_id_str,
- user_initiated=True
+ user_initiated=True,
)
else:
# Normal mode: Run classifier
self.logger.log_workflow_transition(
- from_state="summarizing",
- to_state="classifying",
- project_id=project_id_str
+ from_state="summarizing", to_state="classifying", project_id=project_id_str
)
# Step 2: Classify complexity
@@ -305,9 +296,7 @@ async def generate_brainstorm_conversations(
if self.call_logger:
self.call_logger.set_agent("classifier", "Classifier")
classification = await self.classifier.classify(
- summarized_context,
- phase_type=context.phase_type,
- project_id=project_id_str
+ summarized_context, phase_type=context.phase_type, project_id=project_id_str
)
complexity = classification.complexity
@@ -317,14 +306,14 @@ async def generate_brainstorm_conversations(
decision=f"Classified as {complexity.value} complexity",
rationale=classification.rationale,
project_id=project_id_str,
- suggested_focus_areas=classification.suggested_focus_areas
+ suggested_focus_areas=classification.suggested_focus_areas,
)
self.logger.log_workflow_transition(
from_state="classifying",
to_state="exploring_code",
project_id=project_id_str,
- complexity=complexity.value
+ complexity=complexity.value,
)
# Step 2.5: Code Exploration (optional - provides codebase context)
@@ -354,15 +343,11 @@ async def generate_brainstorm_conversations(
project_id=project_id_str,
)
else:
- self.logger.logger.debug(
- f"Code exploration returned no results for project {project_id_str}"
- )
+ self.logger.logger.debug(f"Code exploration returned no results for project {project_id_str}")
except Exception as e:
# Log but continue without code exploration context
self.logger.log_error(e, {"context": "code_exploration_stage"})
- self.logger.logger.warning(
- f"Code exploration failed, continuing without: {e}"
- )
+ self.logger.logger.warning(f"Code exploration failed, continuing without: {e}")
self.logger.log_workflow_transition(
from_state="exploring_code",
@@ -390,14 +375,14 @@ async def generate_brainstorm_conversations(
complexity=complexity.value,
total_aspects=0,
total_questions=0,
- note="Phase fully explored - cumulative cap reached"
+ note="Phase fully explored - cumulative cap reached",
)
return BrainstormConversationResult(
aspects=[],
complexity=complexity,
total_aspects=0,
total_questions=0,
- generation_notes=["Phase is fully explored. No additional questions needed."]
+ generation_notes=["Phase is fully explored. No additional questions needed."],
)
# Low engagement early stop: don't generate more if user isn't answering existing questions
@@ -416,7 +401,7 @@ async def generate_brainstorm_conversations(
complexity=complexity.value,
total_aspects=0,
total_questions=0,
- note=f"Low engagement - only {answered_ratio:.1%} answered"
+ note=f"Low engagement - only {answered_ratio:.1%} answered",
)
return BrainstormConversationResult(
aspects=[],
@@ -426,13 +411,14 @@ async def generate_brainstorm_conversations(
generation_notes=[
f"Only {answered_count}/{existing_questions} questions answered. "
"Please answer existing questions before generating more."
- ]
+ ],
)
# Create adjusted caps for this generation based on remaining budget
# For user-initiated, keep USER_INITIATED_CAPS as-is (already conservative)
if not is_user_initiated:
from .types import ComplexityCaps
+
caps = ComplexityCaps(
complexity=caps.complexity,
min_aspects=min(caps.min_aspects, remaining_aspect_budget),
@@ -457,14 +443,14 @@ async def generate_brainstorm_conversations(
tech_stack_context=context.tech_stack_context, # Pass tech stack for 2nd-order questions
code_exploration_context=code_exploration_context, # Pass codebase analysis
sibling_phases_context=context.sibling_phases_context, # Pass sibling phase context
- project_id=project_id_str
+ project_id=project_id_str,
)
self.logger.log_workflow_transition(
from_state="generating_aspects",
to_state="generating_questions",
project_id=project_id_str,
- aspect_count=len(aspects)
+ aspect_count=len(aspects),
)
# Step 4: Generate questions for each aspect
@@ -482,7 +468,7 @@ async def generate_brainstorm_conversations(
tech_stack_context=context.tech_stack_context, # Pass tech stack for 2nd-order questions
code_exploration_context=code_exploration_context, # Pass codebase analysis
sibling_phases_context=context.sibling_phases_context, # Pass sibling phase context
- project_id=project_id_str
+ project_id=project_id_str,
)
total_questions = sum(len(a.clarification_questions) for a in aspects_with_questions)
@@ -491,7 +477,7 @@ async def generate_brainstorm_conversations(
from_state="generating_questions",
to_state="pruning",
project_id=project_id_str,
- total_questions=total_questions
+ total_questions=total_questions,
)
# Step 5: Refine for quality
@@ -505,7 +491,7 @@ async def generate_brainstorm_conversations(
project_id=project_id_str,
phase_summary=summarized_context.phase_summary,
phase_description=context.phase_description, # Pass original for full context
- existing_context=context.existing_context
+ existing_context=context.existing_context,
)
final_question_count = sum(len(a.clarification_questions) for a in refined_aspects)
@@ -515,7 +501,7 @@ async def generate_brainstorm_conversations(
to_state="complete",
project_id=project_id_str,
final_aspects=len(refined_aspects),
- final_questions=final_question_count
+ final_questions=final_question_count,
)
# Step 6: Complete
@@ -529,15 +515,15 @@ async def generate_brainstorm_conversations(
total_questions=final_question_count,
generation_notes=[
f"Complexity: {complexity.value}",
- f"Suggested focus areas: {', '.join(classification.suggested_focus_areas)}"
- ]
+ f"Suggested focus areas: {', '.join(classification.suggested_focus_areas)}",
+ ],
)
self.logger.log_agent_complete(
project_id=project_id_str,
complexity=complexity.value,
total_aspects=len(refined_aspects),
- total_questions=final_question_count
+ total_questions=final_question_count,
)
return result
@@ -545,17 +531,11 @@ async def generate_brainstorm_conversations(
except Exception as e:
self.logger.log_error(e, {"project_id": project_id_str})
self.logger.log_workflow_transition(
- from_state="error",
- to_state="failed",
- project_id=project_id_str,
- error=str(e)
+ from_state="error", to_state="failed", project_id=project_id_str, error=str(e)
)
raise
- async def _run_mock_workflow(
- self,
- context: BrainstormConversationContext
- ) -> BrainstormConversationResult:
+ async def _run_mock_workflow(self, context: BrainstormConversationContext) -> BrainstormConversationResult:
"""
Run mock workflow for testing without LLM calls.
@@ -565,9 +545,7 @@ async def _run_mock_workflow(
Returns:
Mock BrainstormConversationResult
"""
- self.logger.logger.info(
- f"Mock mode enabled: simulating workflow with {self.mock_delay_seconds}s delays"
- )
+ self.logger.logger.info(f"Mock mode enabled: simulating workflow with {self.mock_delay_seconds}s delays")
# Step through workflow with delays to simulate real processing
workflow_steps_with_agents = [
@@ -586,11 +564,10 @@ async def _run_mock_workflow(
# Return mock result
from .types import (
- GeneratedAspect,
+ AspectCategory,
GeneratedClarificationQuestion,
GeneratedMCQ,
MCQChoice,
- AspectCategory,
QuestionPriority,
)
@@ -613,10 +590,10 @@ async def _run_mock_workflow(
MCQChoice(id="c", label="Both internal and external"),
MCQChoice(id="d", label="Something else"),
],
- explanation="Understanding the target users is critical for design"
- )
+ explanation="Understanding the target users is critical for design",
+ ),
)
- ]
+ ],
),
GeneratedAspect(
title="Technical Architecture",
@@ -635,11 +612,11 @@ async def _run_mock_workflow(
MCQChoice(id="b", label="Evaluate new technologies"),
MCQChoice(id="c", label="Something else"),
],
- explanation="Technology choices impact development timeline"
- )
+ explanation="Technology choices impact development timeline",
+ ),
)
- ]
- )
+ ],
+ ),
]
return BrainstormConversationResult(
@@ -647,7 +624,7 @@ async def _run_mock_workflow(
complexity=PhaseComplexity.MEDIUM,
total_aspects=len(mock_aspects),
total_questions=sum(len(a.clarification_questions) for a in mock_aspects),
- generation_notes=["Mock mode - using test data"]
+ generation_notes=["Mock mode - using test data"],
)
async def close(self):
diff --git a/backend/app/agents/brainstorm_conversation/question_generator.py b/backend/app/agents/brainstorm_conversation/question_generator.py
index 7075d34..bb2667c 100644
--- a/backend/app/agents/brainstorm_conversation/question_generator.py
+++ b/backend/app/agents/brainstorm_conversation/question_generator.py
@@ -14,23 +14,23 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
+ STANDARD_MCQ_CHOICES,
+ CodeExplorationContext,
+ ComplexityCaps,
+ CrossProjectContext,
+ ExistingConversationContext,
GeneratedAspect,
GeneratedClarificationQuestion,
GeneratedMCQ,
MCQChoice,
QuestionPriority,
+ SiblingPhasesContext,
SummarizedPhaseContext,
- ComplexityCaps,
- STANDARD_MCQ_CHOICES,
- ExistingConversationContext,
- UserInitiatedContext,
- CrossProjectContext,
TechStackContext,
- CodeExplorationContext,
- SiblingPhasesContext,
+ UserInitiatedContext,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -158,7 +158,7 @@ async def generate_questions_for_aspect(
tech_stack_context: Optional[TechStackContext] = None,
code_exploration_context: Optional[CodeExplorationContext] = None,
sibling_phases_context: Optional[SiblingPhasesContext] = None,
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> List[GeneratedClarificationQuestion]:
"""
Generate clarification questions for a specific aspect.
@@ -185,15 +185,22 @@ async def generate_questions_for_aspect(
project_id=project_id,
aspect_title=aspect.title,
complexity=caps.complexity.value,
- target_range=f"{caps.min_questions_per_aspect}-{caps.max_questions_per_aspect}"
+ target_range=f"{caps.min_questions_per_aspect}-{caps.max_questions_per_aspect}",
)
try:
# Build the prompt
prompt = self._build_prompt(
- aspect, summarized_context, caps, existing_context, user_initiated_context,
- grounding_context, cross_project_context, tech_stack_context,
- code_exploration_context, sibling_phases_context
+ aspect,
+ summarized_context,
+ caps,
+ existing_context,
+ user_initiated_context,
+ grounding_context,
+ cross_project_context,
+ tech_stack_context,
+ code_exploration_context,
+ sibling_phases_context,
)
# Call the agent
@@ -201,7 +208,7 @@ async def generate_questions_for_aspect(
prompt=prompt,
model=str(self.model_client),
operation="generate_questions_for_aspect",
- aspect=aspect.title
+ aspect=aspect.title,
)
# Create fresh agent to avoid conversation history accumulation
@@ -212,10 +219,7 @@ async def generate_questions_for_aspect(
system_message=self._get_system_message(),
model_client=self.model_client,
)
- response = await fresh_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await fresh_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -228,10 +232,9 @@ async def generate_questions_for_aspect(
try:
questions_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
raise ValueError(f"Failed to parse question generator response as JSON: {e}")
# Convert to GeneratedClarificationQuestion objects
@@ -245,9 +248,7 @@ async def generate_questions_for_aspect(
# Skip invalid questions but continue processing
self.logger.log_agent_complete(
- aspect=aspect.title,
- generated_count=len(questions),
- priorities=[q.priority.value for q in questions]
+ aspect=aspect.title, generated_count=len(questions), priorities=[q.priority.value for q in questions]
)
return questions
@@ -268,7 +269,7 @@ async def generate_questions_for_all_aspects(
tech_stack_context: Optional[TechStackContext] = None,
code_exploration_context: Optional[CodeExplorationContext] = None,
sibling_phases_context: Optional[SiblingPhasesContext] = None,
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> List[GeneratedAspect]:
"""
Generate clarification questions for all aspects.
@@ -304,7 +305,7 @@ async def generate_questions_for_all_aspects(
tech_stack_context=tech_stack_context,
code_exploration_context=code_exploration_context,
sibling_phases_context=sibling_phases_context,
- project_id=project_id
+ project_id=project_id,
)
for aspect in aspects
]
@@ -323,15 +324,13 @@ async def generate_questions_for_all_aspects(
self.logger.log_decision(
decision=f"Generated {total_questions} questions for {len(aspects)} aspects",
rationale=f"Complexity: {caps.complexity.value}",
- project_id=project_id
+ project_id=project_id,
)
return aspects
def _build_existing_questions_section(
- self,
- existing_context: ExistingConversationContext,
- aspect_title: str
+ self, existing_context: ExistingConversationContext, aspect_title: str
) -> str:
"""
Build the existing questions context section for the prompt.
@@ -365,13 +364,15 @@ def _build_existing_questions_section(
if related_questions:
sections.append("### EXISTING QUESTIONS - DO NOT DUPLICATE:")
- sections.append("The following questions have already been asked. DO NOT create questions with the same or similar meaning.")
+ sections.append(
+ "The following questions have already been asked. DO NOT create questions with the same or similar meaning."
+ )
sections.append("")
for q in related_questions:
if q.status == "answered" and q.decision_summary:
sections.append(f"- Q: {q.question_title}")
- sections.append(f" Decision: \"{q.decision_summary}\"")
+ sections.append(f' Decision: "{q.decision_summary}"')
# Show unresolved points if any (limit to 2 for brevity)
if q.unresolved_points:
sections.append(f" Open: {', '.join(q.unresolved_points[:2])}")
@@ -389,10 +390,7 @@ def _build_existing_questions_section(
return "\n".join(sections)
- def _build_user_initiated_section(
- self,
- user_initiated_context: UserInitiatedContext
- ) -> str:
+ def _build_user_initiated_section(self, user_initiated_context: UserInitiatedContext) -> str:
"""
Build the user-initiated context section for the prompt.
@@ -468,11 +466,15 @@ def _build_tech_stack_section(self, tech_stack_context: TechStackContext) -> str
sections = []
sections.append("### TECHNOLOGY STACK CONTEXT")
sections.append("")
- sections.append(f"The user indicated this tech stack during initial discussion: **{tech_stack_context.proposed_stack}**")
+ sections.append(
+ f"The user indicated this tech stack during initial discussion: **{tech_stack_context.proposed_stack}**"
+ )
sections.append("")
if tech_stack_context.has_grounding:
- sections.append("NOTE: This project has grounding documentation (agents.md), which may already document tech decisions.")
+ sections.append(
+ "NOTE: This project has grounding documentation (agents.md), which may already document tech decisions."
+ )
sections.append("Be conservative - only ask stack questions if they seem genuinely unresolved.")
sections.append("")
@@ -493,11 +495,7 @@ def _build_tech_stack_section(self, tech_stack_context: TechStackContext) -> str
return "\n".join(sections)
- def _build_cross_project_section(
- self,
- cross_project_context: CrossProjectContext,
- aspect_title: str
- ) -> str:
+ def _build_cross_project_section(self, cross_project_context: CrossProjectContext, aspect_title: str) -> str:
"""
Build the cross-phase and project-level context section for the prompt.
@@ -531,7 +529,8 @@ def _build_cross_project_section(
for phase in cross_project_context.other_phases:
# Filter to show decisions most relevant to this aspect
relevant_decisions = [
- d for d in phase.decisions
+ d
+ for d in phase.decisions
if any(word in d.aspect_title.lower() for word in aspect_title.lower().split())
or any(word in d.question_title.lower() for word in aspect_title.lower().split())
][:5] # Cap at 5 relevant decisions
@@ -543,7 +542,7 @@ def _build_cross_project_section(
if relevant_decisions:
sections.append(f"**{phase.phase_title}:**")
for decision in relevant_decisions:
- sections.append(f" - {decision.question_title}: \"{decision.decision_summary_short}\"")
+ sections.append(f' - {decision.question_title}: "{decision.decision_summary_short}"')
sections.append("")
# Section 2: Project-level features
@@ -552,7 +551,7 @@ def _build_cross_project_section(
sections.append("")
for feat in cross_project_context.project_features[:10]: # Cap at 10
- sections.append(f" - [{feat.module_title}] {feat.feature_title}: \"{feat.decision_summary_short}\"")
+ sections.append(f' - [{feat.module_title}] {feat.feature_title}: "{feat.decision_summary_short}"')
sections.append("")
# Guidance
@@ -563,11 +562,7 @@ def _build_cross_project_section(
return "\n".join(sections)
- def _build_sibling_phases_section(
- self,
- sibling_phases_context: SiblingPhasesContext,
- aspect_title: str
- ) -> str:
+ def _build_sibling_phases_section(self, sibling_phases_context: SiblingPhasesContext, aspect_title: str) -> str:
"""
Build the sibling phases context section for the question generation prompt.
@@ -586,7 +581,7 @@ def _build_sibling_phases_section(
return ""
sections = []
- sections.append(f"### SIBLING PHASE DECISIONS (container: \"{sibling_phases_context.container_title}\"):")
+ sections.append(f'### SIBLING PHASE DECISIONS (container: "{sibling_phases_context.container_title}"):')
sections.append("Decisions from related phases in the same container. Avoid duplicating these questions:")
sections.append("")
@@ -595,7 +590,8 @@ def _build_sibling_phases_section(
for phase in sibling_phases_context.sibling_phases:
# Filter decisions by keyword overlap with current aspect
relevant_decisions = [
- d for d in phase.decisions
+ d
+ for d in phase.decisions
if any(word in d.aspect_title.lower() for word in aspect_words)
or any(word in d.question_title.lower() for word in aspect_words)
][:5]
@@ -607,7 +603,7 @@ def _build_sibling_phases_section(
if relevant_decisions:
sections.append(f"**{phase.phase_title}:**")
for decision in relevant_decisions:
- sections.append(f" - {decision.question_title}: \"{decision.decision_summary_short}\"")
+ sections.append(f' - {decision.question_title}: "{decision.decision_summary_short}"')
sections.append("")
sections.append("Avoid asking questions already answered in sibling phases above.")
@@ -615,10 +611,7 @@ def _build_sibling_phases_section(
return "\n".join(sections)
- def _build_code_exploration_section(
- self,
- code_exploration_context: CodeExplorationContext
- ) -> str:
+ def _build_code_exploration_section(self, code_exploration_context: CodeExplorationContext) -> str:
"""
Build the code exploration context section for the prompt.
@@ -654,7 +647,7 @@ def _build_prompt(
cross_project_context: Optional[CrossProjectContext] = None,
tech_stack_context: Optional[TechStackContext] = None,
code_exploration_context: Optional[CodeExplorationContext] = None,
- sibling_phases_context: Optional[SiblingPhasesContext] = None
+ sibling_phases_context: Optional[SiblingPhasesContext] = None,
) -> str:
"""
Build the question generation prompt for a specific aspect.
@@ -677,7 +670,7 @@ def _build_prompt(
prompt += f"clarification questions for the aspect: **{aspect.title}**\n\n"
# Add aspect details
- prompt += f"### Aspect Details:\n"
+ prompt += "### Aspect Details:\n"
prompt += f"- Title: {aspect.title}\n"
prompt += f"- Category: {aspect.category.value}\n"
prompt += f"- Description: {aspect.description}\n\n"
@@ -780,7 +773,7 @@ def _parse_question(self, q_data: dict) -> GeneratedClarificationQuestion:
description=description,
priority=priority,
initial_mcq=mcq,
- internal_agent_notes=internal_notes
+ internal_agent_notes=internal_notes,
)
def _parse_mcq(self, mcq_data: dict) -> GeneratedMCQ:
@@ -801,10 +794,10 @@ def _parse_mcq(self, mcq_data: dict) -> GeneratedMCQ:
for i, choice in enumerate(choices_data):
if isinstance(choice, dict):
- choice_id = choice.get("id", chr(ord('a') + i))
+ choice_id = choice.get("id", chr(ord("a") + i))
choice_label = choice.get("label", f"Option {i + 1}")
else:
- choice_id = chr(ord('a') + i)
+ choice_id = chr(ord("a") + i)
choice_label = str(choice)
choices.append(MCQChoice(id=choice_id, label=choice_label))
@@ -850,8 +843,7 @@ def _parse_mcq(self, mcq_data: dict) -> GeneratedMCQ:
async def create_question_generator(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> QuestionGeneratorAgent:
"""
Factory function to create a Question Generator Agent.
diff --git a/backend/app/agents/brainstorm_conversation/summarizer.py b/backend/app/agents/brainstorm_conversation/summarizer.py
index 4393f3a..34db4d8 100644
--- a/backend/app/agents/brainstorm_conversation/summarizer.py
+++ b/backend/app/agents/brainstorm_conversation/summarizer.py
@@ -13,8 +13,8 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
-from .types import BrainstormConversationContext, SummarizedPhaseContext
from .logging_config import get_agent_logger
+from .types import BrainstormConversationContext, SummarizedPhaseContext
from .utils import strip_markdown_json
@@ -87,7 +87,7 @@ async def summarize(self, context: BrainstormConversationContext) -> SummarizedP
project_id=str(context.project_id),
phase_id=str(context.brainstorming_phase_id),
phase_type=context.phase_type.value,
- description_length=len(context.phase_description)
+ description_length=len(context.phase_description),
)
try:
@@ -95,16 +95,9 @@ async def summarize(self, context: BrainstormConversationContext) -> SummarizedP
prompt = self._build_prompt(context)
# Call the agent
- self.logger.log_llm_call(
- prompt=prompt,
- model=str(self.model_client),
- operation="summarize_phase_context"
- )
+ self.logger.log_llm_call(prompt=prompt, model=str(self.model_client), operation="summarize_phase_context")
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -126,7 +119,7 @@ async def summarize(self, context: BrainstormConversationContext) -> SummarizedP
key_objectives=summary_data.get("key_objectives", []),
constraints=summary_data.get("constraints", []),
target_users=summary_data.get("target_users"),
- technical_context=summary_data.get("technical_context")
+ technical_context=summary_data.get("technical_context"),
)
self.logger.log_agent_complete(
@@ -134,7 +127,7 @@ async def summarize(self, context: BrainstormConversationContext) -> SummarizedP
num_objectives=len(result.key_objectives),
num_constraints=len(result.constraints),
has_target_users=bool(result.target_users),
- has_technical_context=bool(result.technical_context)
+ has_technical_context=bool(result.technical_context),
)
return result
@@ -183,8 +176,7 @@ def _build_prompt(self, context: BrainstormConversationContext) -> str:
async def create_summarizer_agent(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> SummarizerAgent:
"""
Factory function to create a Summarizer Agent.
diff --git a/backend/app/agents/brainstorm_conversation/types.py b/backend/app/agents/brainstorm_conversation/types.py
index 16b6c2f..af1b234 100644
--- a/backend/app/agents/brainstorm_conversation/types.py
+++ b/backend/app/agents/brainstorm_conversation/types.py
@@ -15,26 +15,28 @@
from dataclasses import dataclass, field
from enum import Enum
-from typing import List, Optional, Dict, Any, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
from uuid import UUID
if TYPE_CHECKING:
- from sqlalchemy.orm import Session
- from app.models.project import Project
+ pass
# ============================
# Enums
# ============================
+
class PhaseType(str, Enum):
"""Type of brainstorming phase."""
+
INITIAL = "initial" # Greenfield project brainstorming
FEATURE_SPECIFIC = "feature_specific" # Adding features to existing project
class PhaseComplexity(str, Enum):
"""Brainstorming phase complexity level determined by the classifier."""
+
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
@@ -42,6 +44,7 @@ class PhaseComplexity(str, Enum):
class AspectCategory(str, Enum):
"""Categories for aspects (exploration areas)."""
+
USER_EXPERIENCE = "User_Experience"
TECHNICAL_ARCHITECTURE = "Technical_Architecture"
DATA_MANAGEMENT = "Data_Management"
@@ -54,18 +57,21 @@ class AspectCategory(str, Enum):
class QuestionPriority(str, Enum):
"""3-level priority model for clarification questions."""
+
MUST_HAVE = "must_have" # Critical; should not be pruned
- IMPORTANT = "important" # High value; can be pruned only in low-complexity
- OPTIONAL = "optional" # Helpful but non-essential; first candidate for pruning
+ IMPORTANT = "important" # High value; can be pruned only in low-complexity
+ OPTIONAL = "optional" # Helpful but non-essential; first candidate for pruning
# ============================
# Dataclasses
# ============================
+
@dataclass
class MCQChoice:
"""A single choice option for an MCQ question."""
+
id: str
label: str
@@ -76,6 +82,7 @@ class GeneratedMCQ:
An MCQ (Multiple Choice Question) to start a conversation thread.
Each clarification question gets a thread with an initial MCQ.
"""
+
question_text: str
choices: List[MCQChoice]
explanation: Optional[str] = None
@@ -89,6 +96,7 @@ class GeneratedClarificationQuestion:
A clarification question generated by the Question Generator.
This will be stored as a Feature record.
"""
+
title: str
description: str
priority: QuestionPriority
@@ -102,6 +110,7 @@ class GeneratedAspect:
An aspect (exploration area) generated by the Aspect Generator.
This will be stored as a Module record.
"""
+
title: str
description: str
category: AspectCategory
@@ -118,6 +127,7 @@ class UserInitiatedContext:
When present, the pipeline uses conservative caps and incorporates
the user's specific prompt into aspect/question generation.
"""
+
user_prompt: str # User's focus area / request
num_questions: int = 3 # 1-5, default 3
session_history: List[Dict[str, Any]] = field(default_factory=list) # Previous messages
@@ -168,6 +178,7 @@ class BrainstormConversationContext:
Raw context passed to the Orchestrator for generating brainstorm conversations.
This is the full input data available for the multi-agent workflow.
"""
+
project_id: UUID
brainstorming_phase_id: UUID
phase_title: str
@@ -205,6 +216,7 @@ class SummarizedPhaseContext:
Output from the Summarizer Agent.
Condensed summary of the phase description and objectives (300-600 tokens).
"""
+
phase_summary: str
key_objectives: List[str]
constraints: List[str]
@@ -217,6 +229,7 @@ class ClassificationResult:
"""
Output from the Complexity Classification Agent.
"""
+
complexity: PhaseComplexity
rationale: str
suggested_focus_areas: List[str]
@@ -228,6 +241,7 @@ class ComplexityCaps:
Configuration caps based on complexity level.
Defines limits for aspect and question generation.
"""
+
complexity: PhaseComplexity
# Aspects
@@ -248,6 +262,7 @@ class BrainstormConversationResult:
Final result from the Brainstorm Conversation Orchestrator.
Contains all generated aspects with their clarification questions.
"""
+
aspects: List[GeneratedAspect]
complexity: PhaseComplexity
total_aspects: int
@@ -259,12 +274,14 @@ class BrainstormConversationResult:
# Agent Metadata for UI
# ============================
+
@dataclass
class AgentInfo:
"""
UI metadata for an agent in the Brainstorm Conversation workflow.
Used for progress tracking and visual representation.
"""
+
name: str
description: str
color: str # Hex color for UI tag
@@ -275,37 +292,37 @@ class AgentInfo:
"orchestrator": AgentInfo(
name="Orchestrator",
description="Coordinating the brainstorming workflow",
- color="#8B5CF6" # Purple
+ color="#8B5CF6", # Purple
),
"summarizer": AgentInfo(
name="Summarizer",
description="Analyzing phase description and objectives",
- color="#3B82F6" # Blue
+ color="#3B82F6", # Blue
),
"classifier": AgentInfo(
name="Classifier",
description="Determining phase complexity and focus",
- color="#10B981" # Green
+ color="#10B981", # Green
),
"aspect_generator": AgentInfo(
name="Aspect Generator",
description="Identifying key areas to explore",
- color="#F59E0B" # Amber
+ color="#F59E0B", # Amber
),
"question_generator": AgentInfo(
name="Question Generator",
description="Creating clarification questions with MCQs",
- color="#6366F1" # Indigo
+ color="#6366F1", # Indigo
),
"critic_pruner": AgentInfo(
name="Critic",
description="Refining and optimizing question quality",
- color="#EC4899" # Pink
+ color="#EC4899", # Pink
),
"code_explorer": AgentInfo(
name="Code Explorer",
description="Analyzing codebase for relevant patterns",
- color="#14B8A6" # Teal
+ color="#14B8A6", # Teal
),
}
@@ -319,7 +336,7 @@ class AgentInfo:
"generating_aspects",
"generating_questions",
"pruning",
- "complete"
+ "complete",
]
# User-friendly display names for workflow steps
@@ -395,6 +412,7 @@ class AgentInfo:
# Helper Functions
# ============================
+
def get_caps_for_complexity(complexity: PhaseComplexity) -> ComplexityCaps:
"""Get the configuration caps for a given complexity level."""
return COMPLEXITY_CAPS_CONFIG[complexity]
@@ -414,10 +432,7 @@ def create_mcq_choices(options: List[str]) -> List[MCQChoice]:
if not options or len(options) < 1 or len(options) > 3:
raise ValueError("Must provide 1-3 specific options for MCQ question")
- choices = [
- MCQChoice(id=f"option_{i}", label=label)
- for i, label in enumerate(options, start=1)
- ]
+ choices = [MCQChoice(id=f"option_{i}", label=label) for i, label in enumerate(options, start=1)]
# Append standard choices
choices.extend(STANDARD_MCQ_CHOICES)
@@ -429,12 +444,14 @@ def create_mcq_choices(options: List[str]) -> List[MCQChoice]:
# Existing Context Types (for Generate Additional)
# ============================
+
@dataclass
class ExistingQuestionWithAnswer:
"""
A single existing clarification question with its decision context.
Uses thread decision summaries for richer, more scalable context.
"""
+
question_id: str
question_title: str
question_description: str
@@ -453,6 +470,7 @@ class ExistingAspect:
An existing aspect (module) with its questions.
Used to provide context to generators about what's already been explored.
"""
+
aspect_id: str
title: str
description: str
@@ -468,6 +486,7 @@ class ExistingConversationContext:
Complete context about existing aspects and questions for generators.
Enables aspect-aware generation that builds on previous conversations.
"""
+
aspects: List[ExistingAspect]
total_aspects: int
total_questions: int
@@ -479,12 +498,14 @@ class ExistingConversationContext:
# Cross-Phase Context Types
# ============================
+
@dataclass
class CrossPhaseDecision:
"""
A decision from another phase's thread.
Uses decision_summary_short for compact cross-phase context.
"""
+
question_title: str
decision_summary_short: str # Single-sentence summary (~100-150 chars)
aspect_title: str
@@ -496,6 +517,7 @@ class CrossPhaseContext:
Context from another brainstorming phase in the same project.
Contains the phase info and its ACTIVE thread decisions.
"""
+
phase_id: str
phase_title: str
phase_description: str # Truncated to ~200 chars for compactness
@@ -508,6 +530,7 @@ class ProjectFeatureDecision:
A decision from a project-level feature thread.
Project features are IMPLEMENTATION features with module.brainstorming_phase_id IS NULL.
"""
+
feature_title: str
module_title: str
decision_summary_short: str
@@ -521,6 +544,7 @@ class CrossProjectContext:
1. Decisions from OTHER brainstorming phases (not the current one)
2. Decisions from project-level implementation features
"""
+
other_phases: List[CrossPhaseContext]
project_features: List[ProjectFeatureDecision]
@@ -529,6 +553,7 @@ class CrossProjectContext:
# Sibling Phase Context Types (for phases in same container)
# ============================
+
@dataclass
class SiblingPhaseContext:
"""
@@ -537,6 +562,7 @@ class SiblingPhaseContext:
Sibling phases share a container and have container_sequence ordering,
allowing agents to understand the progression of related phases.
"""
+
phase_id: str
phase_title: str
phase_subtype: str # "INITIAL_SPEC" or "EXTENSION"
@@ -554,6 +580,7 @@ class SiblingPhasesContext:
This provides agents with rich context about related phases when
generating content for extension phases within a container.
"""
+
container_id: str
container_title: str
sibling_phases: List[SiblingPhaseContext]
@@ -605,20 +632,16 @@ def validate_brainstorm_result(result: BrainstormConversationResult) -> List[str
# Check total questions
if result.total_questions > caps.total_max_questions:
- issues.append(
- f"Too many questions: {result.total_questions} > {caps.total_max_questions}"
- )
+ issues.append(f"Too many questions: {result.total_questions} > {caps.total_max_questions}")
# Check each aspect
for aspect in result.aspects:
if not aspect.title.strip():
- issues.append(f"Aspect has empty title")
+ issues.append("Aspect has empty title")
q_count = len(aspect.clarification_questions)
if q_count < caps.min_questions_per_aspect:
- issues.append(
- f"Aspect '{aspect.title}' has too few questions: {q_count} < {caps.min_questions_per_aspect}"
- )
+ issues.append(f"Aspect '{aspect.title}' has too few questions: {q_count} < {caps.min_questions_per_aspect}")
if q_count > caps.max_questions_per_aspect:
issues.append(
f"Aspect '{aspect.title}' has too many questions: {q_count} > {caps.max_questions_per_aspect}"
@@ -632,13 +655,9 @@ def validate_brainstorm_result(result: BrainstormConversationResult) -> List[str
# Check MCQ choices
choice_count = len(question.initial_mcq.choices)
if choice_count < MIN_MCQ_CHOICES:
- issues.append(
- f"Question '{question.title}' MCQ has too few choices: {choice_count}"
- )
+ issues.append(f"Question '{question.title}' MCQ has too few choices: {choice_count}")
if choice_count > MAX_MCQ_CHOICES:
- issues.append(
- f"Question '{question.title}' MCQ has too many choices: {choice_count}"
- )
+ issues.append(f"Question '{question.title}' MCQ has too many choices: {choice_count}")
# Check for duplicate aspect titles
aspect_titles = [a.title.lower().strip() for a in result.aspects]
diff --git a/backend/app/agents/brainstorm_conversation/utils.py b/backend/app/agents/brainstorm_conversation/utils.py
index 7962f3d..469fc1d 100644
--- a/backend/app/agents/brainstorm_conversation/utils.py
+++ b/backend/app/agents/brainstorm_conversation/utils.py
@@ -5,9 +5,9 @@
import re
# Import from common module and re-export for backwards compatibility
-from app.agents.response_parser import strip_markdown_json, normalize_response_content
+from app.agents.response_parser import normalize_response_content, strip_markdown_json
-__all__ = ['strip_markdown_json', 'normalize_response_content', 'truncate_text', 'normalize_whitespace']
+__all__ = ["strip_markdown_json", "normalize_response_content", "truncate_text", "normalize_whitespace"]
def truncate_text(text: str, max_length: int = 500) -> str:
@@ -25,7 +25,7 @@ def truncate_text(text: str, max_length: int = 500) -> str:
return text
# Truncate at word boundary
- truncated = text[:max_length].rsplit(' ', 1)[0]
+ truncated = text[:max_length].rsplit(" ", 1)[0]
return truncated + "..."
@@ -40,5 +40,5 @@ def normalize_whitespace(text: str) -> str:
Normalized text
"""
# Replace multiple whitespace with single space
- text = re.sub(r'\s+', ' ', text)
+ text = re.sub(r"\s+", " ", text)
return text.strip()
diff --git a/backend/app/agents/brainstorm_prompt_plan/__init__.py b/backend/app/agents/brainstorm_prompt_plan/__init__.py
index 497eebd..24a4cc4 100644
--- a/backend/app/agents/brainstorm_prompt_plan/__init__.py
+++ b/backend/app/agents/brainstorm_prompt_plan/__init__.py
@@ -11,54 +11,49 @@
4. QA - Validates completeness and actionability
"""
-from .types import (
- # Enums
- PromptPlanSectionId,
- # Dataclasses
- BrainstormPromptPlanContext,
- AnalyzedContext,
- PromptPlanOutlineSection,
- BrainstormPromptPlanOutline,
- PromptPlanSectionContent,
- BrainstormPromptPlan,
- PromptPlanValidationReport,
- BrainstormPromptPlanResult,
- AgentInfo,
- # Constants
- AGENT_METADATA,
- WORKFLOW_STEPS,
- FIXED_SECTIONS,
- ASPECT_CATEGORY_TO_SECTION_MAPPING,
- # Helper functions
- get_section_by_id,
- get_outline_section_by_id,
- build_section_tree,
+from .analyzer import (
+ AnalyzerAgent,
+ create_analyzer,
)
-
from .orchestrator import (
BrainstormPromptPlanOrchestrator,
create_orchestrator,
)
-
-from .analyzer import (
- AnalyzerAgent,
- create_analyzer,
-)
-
from .planner import (
PlannerAgent,
create_planner,
)
-
-from .writer import (
- WriterAgent,
- create_writer,
-)
-
from .qa import (
QAAgent,
create_qa,
)
+from .types import (
+ # Constants
+ AGENT_METADATA,
+ ASPECT_CATEGORY_TO_SECTION_MAPPING,
+ FIXED_SECTIONS,
+ WORKFLOW_STEPS,
+ AgentInfo,
+ AnalyzedContext,
+ BrainstormPromptPlan,
+ # Dataclasses
+ BrainstormPromptPlanContext,
+ BrainstormPromptPlanOutline,
+ BrainstormPromptPlanResult,
+ PromptPlanOutlineSection,
+ PromptPlanSectionContent,
+ # Enums
+ PromptPlanSectionId,
+ PromptPlanValidationReport,
+ build_section_tree,
+ get_outline_section_by_id,
+ # Helper functions
+ get_section_by_id,
+)
+from .writer import (
+ WriterAgent,
+ create_writer,
+)
__all__ = [
# Enums
diff --git a/backend/app/agents/brainstorm_prompt_plan/analyzer.py b/backend/app/agents/brainstorm_prompt_plan/analyzer.py
index 4b38f76..7ce9fda 100644
--- a/backend/app/agents/brainstorm_prompt_plan/analyzer.py
+++ b/backend/app/agents/brainstorm_prompt_plan/analyzer.py
@@ -13,11 +13,11 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
- BrainstormPromptPlanContext,
AnalyzedContext,
+ BrainstormPromptPlanContext,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -102,11 +102,7 @@ def _get_system_message(self) -> str:
Return ONLY the JSON object, no additional text."""
- async def analyze(
- self,
- context: BrainstormPromptPlanContext,
- project_id: Optional[str] = None
- ) -> AnalyzedContext:
+ async def analyze(self, context: BrainstormPromptPlanContext, project_id: Optional[str] = None) -> AnalyzedContext:
"""
Analyze brainstorming data and extract implementation components.
@@ -125,7 +121,7 @@ async def analyze(
phase_title=context.phase_title,
aspects_count=len(context.aspects),
questions_count=len(context.clarification_questions),
- threads_count=len(context.thread_discussions)
+ threads_count=len(context.thread_discussions),
)
try:
@@ -136,13 +132,10 @@ async def analyze(
self.logger.log_llm_call(
prompt=prompt[:500] + "..." if len(prompt) > 500 else prompt,
model=str(self.model_client),
- operation="analyze_brainstorm_context"
+ operation="analyze_brainstorm_context",
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -155,10 +148,9 @@ async def analyze(
try:
result_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
raise ValueError(f"Failed to parse analyzer response as JSON: {e}")
# Convert to AnalyzedContext
@@ -202,7 +194,7 @@ def _build_prompt(self, context: BrainstormPromptPlanContext) -> str:
Returns:
Formatted prompt string
"""
- prompt = f"Analyze the following brainstorming data to extract implementation components.\n\n"
+ prompt = "Analyze the following brainstorming data to extract implementation components.\n\n"
# Add phase info
prompt += f"### Phase Title: {context.phase_title}\n"
@@ -225,11 +217,11 @@ def _build_prompt(self, context: BrainstormPromptPlanContext) -> str:
prompt += f" Question: {q.get('spec_text', q.get('description', 'No description'))}\n"
# Include ALL answered MCQs for this feature
- answered_mcqs = q.get('answered_mcqs', [])
+ answered_mcqs = q.get("answered_mcqs", [])
for mcq in answered_mcqs:
- question_text = mcq.get('question_text', '')
- selected_label = mcq.get('selected_label', '')
- free_text = mcq.get('free_text')
+ question_text = mcq.get("question_text", "")
+ selected_label = mcq.get("selected_label", "")
+ free_text = mcq.get("free_text")
prompt += f" - MCQ: {question_text}\n"
prompt += f" Answer: {selected_label}\n"
@@ -243,22 +235,22 @@ def _build_prompt(self, context: BrainstormPromptPlanContext) -> str:
for thread in context.thread_discussions:
prompt += f"- **{thread.get('title', 'Untitled Thread')}**\n"
# Prefer decision summary if available
- if thread.get('decision_summary'):
+ if thread.get("decision_summary"):
prompt += f" {thread['decision_summary']}\n"
- if thread.get('unresolved_points'):
+ if thread.get("unresolved_points"):
prompt += " **Unresolved:**\n"
- for point in thread['unresolved_points']:
- question = point.get('question', '') if isinstance(point, dict) else str(point)
+ for point in thread["unresolved_points"]:
+ question = point.get("question", "") if isinstance(point, dict) else str(point)
prompt += f" - {question}\n"
else:
# Fallback to raw comments if no decision summary
- comments = thread.get('comments', [])
+ comments = thread.get("comments", [])
for comment in comments[:5]: # Limit to first 5 comments per thread
prompt += f" - {comment.get('content', '')[:200]}\n"
# Note any images attached to this thread
- if thread.get('images'):
+ if thread.get("images"):
prompt += " **Attached Images:**\n"
- for img in thread['images'][:5]: # Limit to first 5 images
+ for img in thread["images"][:5]: # Limit to first 5 images
prompt += f" - {img.get('filename', 'unnamed')} (ID: {img.get('id', 'unknown')})\n"
prompt += "\n"
@@ -307,10 +299,7 @@ def _build_sibling_implementation_section(sibling_phases_context) -> str:
if not sibling_phases_context:
return ""
- phases_with_analysis = [
- p for p in sibling_phases_context.sibling_phases
- if p.implementation_analysis
- ]
+ phases_with_analysis = [p for p in sibling_phases_context.sibling_phases if p.implementation_analysis]
if not phases_with_analysis:
return ""
@@ -327,10 +316,7 @@ def _build_sibling_implementation_section(sibling_phases_context) -> str:
return "\n".join(sections)
-async def create_analyzer(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> AnalyzerAgent:
+async def create_analyzer(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> AnalyzerAgent:
"""
Factory function to create an Analyzer Agent.
diff --git a/backend/app/agents/brainstorm_prompt_plan/logging_config.py b/backend/app/agents/brainstorm_prompt_plan/logging_config.py
index dd081c0..8d91cea 100644
--- a/backend/app/agents/brainstorm_prompt_plan/logging_config.py
+++ b/backend/app/agents/brainstorm_prompt_plan/logging_config.py
@@ -4,10 +4,10 @@
Provides structured logging for all agent decisions, LLM calls, and workflow steps.
"""
-import logging
import json
-from typing import Any, Dict, Optional
+import logging
from datetime import datetime, timezone
+from typing import Any, Dict, Optional
class BrainstormAgentLogger:
@@ -33,19 +33,12 @@ def __init__(self, agent_name: str, project_id: Optional[str] = None):
# Ensure structured output
if not self.logger.handlers:
handler = logging.StreamHandler()
- formatter = logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
- )
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
- def _structured_log(
- self,
- level: str,
- event: str,
- extra_data: Optional[Dict[str, Any]] = None
- ) -> None:
+ def _structured_log(self, level: str, event: str, extra_data: Optional[Dict[str, Any]] = None) -> None:
"""
Log a structured event.
@@ -78,12 +71,7 @@ def log_agent_complete(self, **kwargs: Any) -> None:
self._structured_log("info", f"{self.agent_name}_complete", kwargs)
def log_llm_call(
- self,
- prompt: str,
- model: str,
- response: Optional[str] = None,
- tokens_used: Optional[int] = None,
- **kwargs: Any
+ self, prompt: str, model: str, response: Optional[str] = None, tokens_used: Optional[int] = None, **kwargs: Any
) -> None:
"""
Log an LLM API call.
@@ -99,7 +87,7 @@ def log_llm_call(
"model": model,
"prompt_preview": prompt[:200] + "..." if len(prompt) > 200 else prompt,
"prompt_length": len(prompt),
- **kwargs
+ **kwargs,
}
if response:
@@ -120,11 +108,7 @@ def log_decision(self, decision: str, rationale: str, **kwargs: Any) -> None:
rationale: Explanation of why
**kwargs: Additional context
"""
- data = {
- "decision": decision,
- "rationale": rationale,
- **kwargs
- }
+ data = {"decision": decision, "rationale": rationale, **kwargs}
self._structured_log("info", "agent_decision", data)
def log_pruning_stats(
@@ -134,7 +118,7 @@ def log_pruning_stats(
initial_questions: int,
final_questions: int,
pruned_items: list,
- **kwargs: Any
+ **kwargs: Any,
) -> None:
"""
Log aspect and question pruning statistics.
@@ -155,7 +139,7 @@ def log_pruning_stats(
"pruned_aspects": initial_aspects - final_aspects,
"pruned_questions": initial_questions - final_questions,
"pruned_items": pruned_items,
- **kwargs
+ **kwargs,
}
self._structured_log("info", "pruning_stats", data)
@@ -186,11 +170,7 @@ def log_workflow_transition(self, from_state: str, to_state: str, **kwargs: Any)
to_state: New state
**kwargs: Additional context
"""
- data = {
- "from_state": from_state,
- "to_state": to_state,
- **kwargs
- }
+ data = {"from_state": from_state, "to_state": to_state, **kwargs}
self._structured_log("info", "workflow_transition", data)
diff --git a/backend/app/agents/brainstorm_prompt_plan/orchestrator.py b/backend/app/agents/brainstorm_prompt_plan/orchestrator.py
index 4f84e99..0878e6e 100644
--- a/backend/app/agents/brainstorm_prompt_plan/orchestrator.py
+++ b/backend/app/agents/brainstorm_prompt_plan/orchestrator.py
@@ -11,23 +11,22 @@
"""
import logging
-from typing import Callable, Dict, Any, List, Optional
+from typing import Any, Callable, Dict, Optional
from uuid import UUID
+# Re-use the same exception from brainstorm_spec
+from app.agents.brainstorm_spec import JobCancelledException
+
+from .analyzer import create_analyzer
+from .logging_config import get_agent_logger
+from .planner import create_planner
+from .qa import create_qa
from .types import (
+ AGENT_METADATA,
BrainstormPromptPlanContext,
BrainstormPromptPlanResult,
- AGENT_METADATA,
- WORKFLOW_STEPS,
)
-from .analyzer import create_analyzer
-from .planner import create_planner
from .writer import create_writer
-from .qa import create_qa
-from .logging_config import get_agent_logger
-
-# Re-use the same exception from brainstorm_spec
-from app.agents.brainstorm_spec import JobCancelledException
logger = logging.getLogger(__name__)
@@ -76,8 +75,8 @@ def _check_cancelled(self) -> None:
if not self.job_id:
return
- from app.services.job_service import JobService
from app.database import SessionLocal
+ from app.services.job_service import JobService
db = SessionLocal()
try:
@@ -88,10 +87,7 @@ def _check_cancelled(self) -> None:
db.close()
def _report_progress(
- self,
- workflow_step: str,
- agent_key: Optional[str] = None,
- extra_data: Optional[Dict[str, Any]] = None
+ self, workflow_step: str, agent_key: Optional[str] = None, extra_data: Optional[Dict[str, Any]] = None
) -> None:
"""
Report progress to the callback.
@@ -139,10 +135,7 @@ def _report_progress(
except Exception as e:
self.logger.log_error(e, {"context": "progress_callback"})
- async def generate_brainstorm_prompt_plan(
- self,
- context: BrainstormPromptPlanContext
- ) -> BrainstormPromptPlanResult:
+ async def generate_brainstorm_prompt_plan(self, context: BrainstormPromptPlanContext) -> BrainstormPromptPlanResult:
"""
Generate a complete brainstorm prompt plan.
@@ -188,11 +181,7 @@ async def generate_brainstorm_prompt_plan(
if self.call_logger:
self.call_logger.set_agent("planner", "Planner")
planner = await create_planner(self.model_client, project_id)
- outline = await planner.create_outline(
- analyzed_context,
- context.clarification_questions,
- project_id
- )
+ outline = await planner.create_outline(analyzed_context, context.clarification_questions, project_id)
# Check for cancellation before next step
self._check_cancelled()
@@ -205,10 +194,7 @@ async def generate_brainstorm_prompt_plan(
self.call_logger.set_agent("writer", "Writer")
writer = await create_writer(self.model_client, project_id)
prompt_plan = await writer.write_prompt_plan(
- outline,
- analyzed_context,
- context.clarification_questions,
- project_id
+ outline, analyzed_context, context.clarification_questions, project_id
)
# Check for cancellation before next step
@@ -221,12 +207,7 @@ async def generate_brainstorm_prompt_plan(
if self.call_logger:
self.call_logger.set_agent("qa", "QA Validator")
qa = await create_qa(self.model_client, project_id)
- validation_report = await qa.validate(
- prompt_plan,
- outline,
- context.clarification_questions,
- project_id
- )
+ validation_report = await qa.validate(prompt_plan, outline, context.clarification_questions, project_id)
# Complete
self._report_progress("complete", "orchestrator")
@@ -276,7 +257,7 @@ async def create_orchestrator(
Returns:
Initialized BrainstormPromptPlanOrchestrator instance
"""
- from app.agents.llm_client import create_litellm_client, LLMCallLogger
+ from app.agents.llm_client import LLMCallLogger, create_litellm_client
config = config or {}
model = config.get("model")
@@ -287,6 +268,7 @@ async def create_orchestrator(
call_logger = None
if job_id:
from app.database import SessionLocal
+
call_logger = LLMCallLogger(
db_session_factory=SessionLocal,
job_id=job_id,
diff --git a/backend/app/agents/brainstorm_prompt_plan/planner.py b/backend/app/agents/brainstorm_prompt_plan/planner.py
index 73d5ae1..263be3e 100644
--- a/backend/app/agents/brainstorm_prompt_plan/planner.py
+++ b/backend/app/agents/brainstorm_prompt_plan/planner.py
@@ -13,15 +13,15 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
+ ASPECT_CATEGORY_TO_SECTION_MAPPING,
+ FIXED_SECTIONS,
AnalyzedContext,
BrainstormPromptPlanOutline,
- PromptPlanOutlineSection,
PhaseDomainMapping,
- FIXED_SECTIONS,
- ASPECT_CATEGORY_TO_SECTION_MAPPING,
+ PromptPlanOutlineSection,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -161,10 +161,7 @@ def _get_system_message(self) -> str:
Return ONLY the JSON object."""
async def create_outline(
- self,
- analyzed_context: AnalyzedContext,
- clarification_questions: List[dict],
- project_id: Optional[str] = None
+ self, analyzed_context: AnalyzedContext, clarification_questions: List[dict], project_id: Optional[str] = None
) -> BrainstormPromptPlanOutline:
"""
Create the prompt plan outline from analyzed context.
@@ -180,10 +177,7 @@ async def create_outline(
Raises:
ValueError: If planning fails or returns invalid JSON
"""
- self.logger.log_agent_start(
- project_id=project_id,
- questions_count=len(clarification_questions)
- )
+ self.logger.log_agent_start(project_id=project_id, questions_count=len(clarification_questions))
try:
# Build the prompt
@@ -193,13 +187,10 @@ async def create_outline(
self.logger.log_llm_call(
prompt=prompt[:500] + "..." if len(prompt) > 500 else prompt,
model=str(self.model_client),
- operation="create_prompt_plan_outline"
+ operation="create_prompt_plan_outline",
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -212,10 +203,9 @@ async def create_outline(
try:
result_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
# Fall back to default outline
return self._create_default_outline(analyzed_context, clarification_questions)
@@ -230,12 +220,14 @@ async def create_outline(
# Parse phase_domain_mapping
phase_domain_mapping = []
for m_data in result_data.get("phase_domain_mapping", []):
- phase_domain_mapping.append(PhaseDomainMapping(
- phase_title=m_data.get("phase_title", ""),
- phase_index=m_data.get("phase_index", 0),
- domains=m_data.get("domains", []),
- keywords=m_data.get("keywords", []),
- ))
+ phase_domain_mapping.append(
+ PhaseDomainMapping(
+ phase_title=m_data.get("phase_title", ""),
+ phase_index=m_data.get("phase_index", 0),
+ domains=m_data.get("domains", []),
+ keywords=m_data.get("keywords", []),
+ )
+ )
# Ensure all fixed sections are present
outline = BrainstormPromptPlanOutline(
@@ -246,7 +238,7 @@ async def create_outline(
self.logger.log_agent_complete(
sections_count=len(outline.sections),
- total_subsections=sum(len(s.subsections) for s in outline.sections)
+ total_subsections=sum(len(s.subsections) for s in outline.sections),
)
return outline
@@ -256,11 +248,7 @@ async def create_outline(
# Return default outline on error
return self._create_default_outline(analyzed_context, clarification_questions)
- def _build_prompt(
- self,
- analyzed_context: AnalyzedContext,
- clarification_questions: List[dict]
- ) -> str:
+ def _build_prompt(self, analyzed_context: AnalyzedContext, clarification_questions: List[dict]) -> str:
"""
Build the planning prompt.
@@ -315,9 +303,9 @@ def _build_prompt(
if clarification_questions:
prompt += "### Clarification Questions (link these to sections):\n"
for q in clarification_questions:
- q_id = q.get('id', 'unknown')
- q_title = q.get('title', 'Untitled')
- q_category = q.get('category', 'General')
+ q_id = q.get("id", "unknown")
+ q_title = q.get("title", "Untitled")
+ q_category = q.get("category", "General")
prompt += f"- [{q_id}] {q_title} (Category: {q_category})\n"
prompt += "\n"
@@ -356,13 +344,15 @@ def _ensure_all_sections(self, outline: BrainstormPromptPlanOutline) -> Brainsto
for fixed in FIXED_SECTIONS:
if fixed["id"] not in existing_ids:
- outline.sections.append(PromptPlanOutlineSection(
- id=fixed["id"],
- title=fixed["title"],
- description="",
- subsections=[],
- linked_questions=[],
- ))
+ outline.sections.append(
+ PromptPlanOutlineSection(
+ id=fixed["id"],
+ title=fixed["title"],
+ description="",
+ subsections=[],
+ linked_questions=[],
+ )
+ )
# Sort by fixed order
section_order = {s["id"]: i for i, s in enumerate(FIXED_SECTIONS)}
@@ -371,9 +361,7 @@ def _ensure_all_sections(self, outline: BrainstormPromptPlanOutline) -> Brainsto
return outline
def _create_default_outline(
- self,
- analyzed_context: AnalyzedContext,
- clarification_questions: List[dict]
+ self, analyzed_context: AnalyzedContext, clarification_questions: List[dict]
) -> BrainstormPromptPlanOutline:
"""Create a default outline when LLM fails."""
sections = []
@@ -381,8 +369,8 @@ def _create_default_outline(
# Create question-to-section mapping based on categories
question_section_links = {}
for q in clarification_questions:
- q_id = q.get('id', '')
- category = q.get('category', 'Business_Logic')
+ q_id = q.get("id", "")
+ category = q.get("category", "Business_Logic")
target_sections = ASPECT_CATEGORY_TO_SECTION_MAPPING.get(category, [])
for section_id in target_sections:
if section_id.value not in question_section_links:
@@ -390,21 +378,20 @@ def _create_default_outline(
question_section_links[section_id.value].append(q_id)
for fixed in FIXED_SECTIONS:
- sections.append(PromptPlanOutlineSection(
- id=fixed["id"],
- title=fixed["title"],
- description="",
- subsections=[],
- linked_questions=question_section_links.get(fixed["id"], []),
- ))
+ sections.append(
+ PromptPlanOutlineSection(
+ id=fixed["id"],
+ title=fixed["title"],
+ description="",
+ subsections=[],
+ linked_questions=question_section_links.get(fixed["id"], []),
+ )
+ )
return BrainstormPromptPlanOutline(sections=sections, phase_domain_mapping=[])
-async def create_planner(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> PlannerAgent:
+async def create_planner(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> PlannerAgent:
"""
Factory function to create a Planner Agent.
diff --git a/backend/app/agents/brainstorm_prompt_plan/qa.py b/backend/app/agents/brainstorm_prompt_plan/qa.py
index 6d1c1e3..47ecf31 100644
--- a/backend/app/agents/brainstorm_prompt_plan/qa.py
+++ b/backend/app/agents/brainstorm_prompt_plan/qa.py
@@ -13,12 +13,12 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
BrainstormPromptPlan,
BrainstormPromptPlanOutline,
PromptPlanValidationReport,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -92,7 +92,7 @@ async def validate(
prompt_plan: BrainstormPromptPlan,
outline: BrainstormPromptPlanOutline,
clarification_questions: List[dict],
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> PromptPlanValidationReport:
"""
Validate the prompt plan for completeness and quality.
@@ -112,7 +112,7 @@ async def validate(
self.logger.log_agent_start(
project_id=project_id,
sections_count=len(prompt_plan.sections),
- questions_count=len(clarification_questions)
+ questions_count=len(clarification_questions),
)
try:
@@ -123,13 +123,10 @@ async def validate(
self.logger.log_llm_call(
prompt=prompt[:500] + "..." if len(prompt) > 500 else prompt,
model=str(self.model_client),
- operation="validate_prompt_plan"
+ operation="validate_prompt_plan",
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -142,10 +139,9 @@ async def validate(
try:
result_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
# Return a default report on parse failure
return self._create_fallback_report(prompt_plan, outline)
@@ -176,7 +172,7 @@ def _build_prompt(
self,
prompt_plan: BrainstormPromptPlan,
outline: BrainstormPromptPlanOutline,
- clarification_questions: List[dict]
+ clarification_questions: List[dict],
) -> str:
"""
Build the validation prompt.
@@ -195,14 +191,18 @@ def _build_prompt(
prompt += "### Generated Prompt Plan:\n"
for section in prompt_plan.sections:
prompt += f"\n**{section.title}** (ID: {section.id})\n"
- prompt += f"{section.body_markdown[:500]}...\n" if len(section.body_markdown) > 500 else f"{section.body_markdown}\n"
+ prompt += (
+ f"{section.body_markdown[:500]}...\n"
+ if len(section.body_markdown) > 500
+ else f"{section.body_markdown}\n"
+ )
if section.linked_questions:
prompt += f"Linked Questions: {', '.join(section.linked_questions)}\n"
prompt += "\n### Clarification Questions:\n"
for q in clarification_questions:
- q_id = q.get('id', 'unknown')
- q_title = q.get('title', 'Untitled')
+ q_id = q.get("id", "unknown")
+ q_title = q.get("title", "Untitled")
prompt += f"- [{q_id}] {q_title}\n"
prompt += "\nValidate completeness, clarity, and dependencies. Return ONLY the JSON object."
@@ -210,9 +210,7 @@ def _build_prompt(
return prompt
def _create_fallback_report(
- self,
- prompt_plan: BrainstormPromptPlan,
- outline: BrainstormPromptPlanOutline
+ self, prompt_plan: BrainstormPromptPlan, outline: BrainstormPromptPlanOutline
) -> PromptPlanValidationReport:
"""
Create a fallback validation report using simple heuristics.
@@ -235,16 +233,13 @@ def _create_fallback_report(
missing_components=[],
unclear_instructions=empty_sections,
dependency_issues=[],
- suggested_improvements=[
- "Fallback validation: Consider reviewing prompt plan for completeness"
- ] if empty_sections else [],
+ suggested_improvements=["Fallback validation: Consider reviewing prompt plan for completeness"]
+ if empty_sections
+ else [],
)
-async def create_qa(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> QAAgent:
+async def create_qa(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> QAAgent:
"""
Factory function to create a QA Agent.
diff --git a/backend/app/agents/brainstorm_prompt_plan/types.py b/backend/app/agents/brainstorm_prompt_plan/types.py
index d94e9fc..51da56a 100644
--- a/backend/app/agents/brainstorm_prompt_plan/types.py
+++ b/backend/app/agents/brainstorm_prompt_plan/types.py
@@ -14,16 +14,17 @@
from dataclasses import dataclass, field
from enum import Enum
-from typing import List, Optional, Dict, Any
+from typing import Any, Dict, List, Optional
from uuid import UUID
-
# ============================
# Enums
# ============================
+
class PromptPlanSectionId(str, Enum):
"""Fixed section IDs for the brainstorm prompt plan outline."""
+
INTRODUCTION = "introduction"
PROJECT_OVERVIEW = "project_overview"
IMPLEMENTATION_PHASES = "implementation_phases"
@@ -40,12 +41,14 @@ class PromptPlanSectionId(str, Enum):
# Dataclasses
# ============================
+
@dataclass
class BrainstormPromptPlanContext:
"""
Raw context passed to the Orchestrator for generating Brainstorm Prompt Plan.
This is the full input data from a brainstorming phase.
"""
+
project_id: UUID
brainstorming_phase_id: UUID
phase_title: str
@@ -90,6 +93,7 @@ class AnalyzedContext:
Output from the Analyzer Agent.
Extracted implementation components from brainstorming data.
"""
+
phase_summary: str
implementation_goals: List[str] = field(default_factory=list)
components_to_build: List[str] = field(default_factory=list)
@@ -115,10 +119,11 @@ class PromptPlanOutlineSection:
A single section in the prompt plan outline.
Can have nested subsections and linked questions.
"""
+
id: str
title: str
description: str = ""
- subsections: List['PromptPlanOutlineSection'] = field(default_factory=list)
+ subsections: List["PromptPlanOutlineSection"] = field(default_factory=list)
linked_questions: List[str] = field(default_factory=list)
# Q&A-aware generation: tracks whether section has sufficient answered questions
has_qa_backing: bool = True # Default True for backwards compatibility
@@ -131,6 +136,7 @@ class PhaseDomainMapping:
Mapping of a prompt plan phase to its relevant domains and keywords.
Used by module_feature extraction to match requirements to phases.
"""
+
phase_title: str
phase_index: int
domains: List[str] = field(default_factory=list) # e.g., ["Authentication", "Security"]
@@ -143,6 +149,7 @@ class BrainstormPromptPlanOutline:
Full prompt plan outline generated by the Planner Agent.
Contains hierarchical section tree and phase-domain mapping for downstream extraction.
"""
+
sections: List[PromptPlanOutlineSection] = field(default_factory=list)
# Mapping of phases to domains/keywords for module_feature extraction
phase_domain_mapping: List[PhaseDomainMapping] = field(default_factory=list)
@@ -154,6 +161,7 @@ class PromptPlanSectionContent:
A single prompt plan section with rendered markdown content.
Output from the Writer Agent.
"""
+
id: str
title: str
body_markdown: str
@@ -166,6 +174,7 @@ class BrainstormPromptPlan:
Complete prompt plan document generated by the Writer Agent.
Contains all sections with rendered markdown.
"""
+
sections: List[PromptPlanSectionContent] = field(default_factory=list)
# Phase-domain mapping for downstream module_feature extraction
phase_domain_mapping: List[PhaseDomainMapping] = field(default_factory=list)
@@ -218,6 +227,7 @@ class PromptPlanValidationReport:
Quality assurance report generated by the QA Agent.
Validates completeness and actionability of the prompt plan.
"""
+
ok: bool
missing_components: List[str] = field(default_factory=list)
unclear_instructions: List[str] = field(default_factory=list)
@@ -231,6 +241,7 @@ class BrainstormPromptPlanResult:
Final result returned by the Orchestrator.
Includes the complete prompt plan, outline, and validation report.
"""
+
prompt_plan: BrainstormPromptPlan
outline: BrainstormPromptPlanOutline
analyzed_context: AnalyzedContext
@@ -241,12 +252,14 @@ class BrainstormPromptPlanResult:
# Agent Metadata for UI
# ============================
+
@dataclass
class AgentInfo:
"""
UI metadata for an agent in the Brainstorm Prompt Plan generation workflow.
Used for progress tracking and visual representation.
"""
+
name: str
description: str
color: str # Hex color for UI tag
@@ -257,40 +270,33 @@ class AgentInfo:
"orchestrator": AgentInfo(
name="Orchestrator",
description="Coordinating prompt plan generation workflow",
- color="#8B5CF6" # Purple
+ color="#8B5CF6", # Purple
),
"analyzer": AgentInfo(
name="Analyzer",
description="Extracting implementation components from brainstorming",
- color="#3B82F6" # Blue
+ color="#3B82F6", # Blue
),
"planner": AgentInfo(
name="Planner",
description="Creating prompt plan outline with implementation phases",
- color="#10B981" # Green
+ color="#10B981", # Green
),
"writer": AgentInfo(
name="Writer",
description="Generating instructional content for each section",
- color="#F59E0B" # Amber
+ color="#F59E0B", # Amber
),
"qa": AgentInfo(
name="QA",
description="Validating completeness and actionability",
- color="#EC4899" # Pink
+ color="#EC4899", # Pink
),
}
# Workflow step definitions for progress tracking
-WORKFLOW_STEPS = [
- "start",
- "analyzing",
- "planning",
- "writing",
- "validating",
- "complete"
-]
+WORKFLOW_STEPS = ["start", "analyzing", "planning", "writing", "validating", "complete"]
# ============================
@@ -346,6 +352,7 @@ class AgentInfo:
# Helper Functions
# ============================
+
def get_section_by_id(sections: List[PromptPlanSectionContent], section_id: str) -> Optional[PromptPlanSectionContent]:
"""Get a section by its ID."""
for section in sections:
@@ -354,7 +361,9 @@ def get_section_by_id(sections: List[PromptPlanSectionContent], section_id: str)
return None
-def get_outline_section_by_id(sections: List[PromptPlanOutlineSection], section_id: str) -> Optional[PromptPlanOutlineSection]:
+def get_outline_section_by_id(
+ sections: List[PromptPlanOutlineSection], section_id: str
+) -> Optional[PromptPlanOutlineSection]:
"""Get an outline section by its ID (supports nested subsections)."""
for section in sections:
if section.id == section_id:
diff --git a/backend/app/agents/brainstorm_prompt_plan/utils.py b/backend/app/agents/brainstorm_prompt_plan/utils.py
index 9818f3e..4ce4b13 100644
--- a/backend/app/agents/brainstorm_prompt_plan/utils.py
+++ b/backend/app/agents/brainstorm_prompt_plan/utils.py
@@ -5,9 +5,9 @@
import re
# Import from common module and re-export for backwards compatibility
-from app.agents.response_parser import strip_markdown_json, normalize_response_content
+from app.agents.response_parser import normalize_response_content, strip_markdown_json
-__all__ = ['strip_markdown_json', 'normalize_response_content', 'truncate_text', 'normalize_whitespace']
+__all__ = ["strip_markdown_json", "normalize_response_content", "truncate_text", "normalize_whitespace"]
def truncate_text(text: str, max_length: int = 500) -> str:
@@ -25,7 +25,7 @@ def truncate_text(text: str, max_length: int = 500) -> str:
return text
# Truncate at word boundary
- truncated = text[:max_length].rsplit(' ', 1)[0]
+ truncated = text[:max_length].rsplit(" ", 1)[0]
return truncated + "..."
@@ -40,5 +40,5 @@ def normalize_whitespace(text: str) -> str:
Normalized text
"""
# Replace multiple whitespace with single space
- text = re.sub(r'\s+', ' ', text)
+ text = re.sub(r"\s+", " ", text)
return text.strip()
diff --git a/backend/app/agents/brainstorm_prompt_plan/writer.py b/backend/app/agents/brainstorm_prompt_plan/writer.py
index 2028504..16ecf9a 100644
--- a/backend/app/agents/brainstorm_prompt_plan/writer.py
+++ b/backend/app/agents/brainstorm_prompt_plan/writer.py
@@ -6,7 +6,6 @@
"""
import asyncio
-import json
from typing import List, Optional
from autogen_agentchat.agents import AssistantAgent
@@ -14,15 +13,14 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
AnalyzedContext,
- BrainstormPromptPlanOutline,
BrainstormPromptPlan,
- PromptPlanSectionContent,
+ BrainstormPromptPlanOutline,
PromptPlanOutlineSection,
+ PromptPlanSectionContent,
)
-from .logging_config import get_agent_logger
-from .utils import strip_markdown_json
class WriterAgent:
@@ -126,7 +124,7 @@ async def write_prompt_plan(
outline: BrainstormPromptPlanOutline,
analyzed_context: AnalyzedContext,
clarification_questions: List[dict],
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> BrainstormPromptPlan:
"""
Write the full prompt plan from outline and context.
@@ -143,19 +141,11 @@ async def write_prompt_plan(
Raises:
Exception: If writing fails for all sections
"""
- self.logger.log_agent_start(
- project_id=project_id,
- sections_count=len(outline.sections)
- )
+ self.logger.log_agent_start(project_id=project_id, sections_count=len(outline.sections))
# Create tasks for ALL sections to run in parallel
tasks = [
- self._write_section(
- section,
- analyzed_context,
- clarification_questions,
- project_id
- )
+ self._write_section(section, analyzed_context, clarification_questions, project_id)
for section in outline.sections
]
@@ -169,12 +159,14 @@ async def write_prompt_plan(
if isinstance(result, Exception):
self.logger.log_error(result, {"section_id": section.id})
# Create a placeholder for failed sections
- sections.append(PromptPlanSectionContent(
- id=section.id,
- title=section.title,
- body_markdown=f"*Content generation failed: {str(result)}*",
- linked_questions=section.linked_questions,
- ))
+ sections.append(
+ PromptPlanSectionContent(
+ id=section.id,
+ title=section.title,
+ body_markdown=f"*Content generation failed: {str(result)}*",
+ linked_questions=section.linked_questions,
+ )
+ )
else:
sections.append(result)
@@ -185,8 +177,7 @@ async def write_prompt_plan(
)
self.logger.log_agent_complete(
- sections_written=len(sections),
- total_markdown_length=sum(len(s.body_markdown) for s in sections)
+ sections_written=len(sections), total_markdown_length=sum(len(s.body_markdown) for s in sections)
)
return prompt_plan
@@ -196,7 +187,7 @@ async def _write_section(
section: PromptPlanOutlineSection,
analyzed_context: AnalyzedContext,
clarification_questions: List[dict],
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> PromptPlanSectionContent:
"""
Write content for a single section.
@@ -218,7 +209,7 @@ async def _write_section(
prompt=prompt[:300] + "..." if len(prompt) > 300 else prompt,
model=str(self.model_client),
operation="write_section",
- section_id=section.id
+ section_id=section.id,
)
# Create a FRESH agent for each section to avoid conversation history accumulation
@@ -230,10 +221,7 @@ async def _write_section(
model_client=self.model_client,
)
- response = await section_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await section_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -257,10 +245,7 @@ async def _write_section(
)
def _build_section_prompt(
- self,
- section: PromptPlanOutlineSection,
- analyzed_context: AnalyzedContext,
- clarification_questions: List[dict]
+ self, section: PromptPlanOutlineSection, analyzed_context: AnalyzedContext, clarification_questions: List[dict]
) -> str:
"""
Build the prompt for writing a section.
@@ -279,10 +264,10 @@ def _build_section_prompt(
prompt += f"Section Description: {section.description}\n\n"
# Check if section has Q&A backing - if not, generate placeholder
- if hasattr(section, 'has_qa_backing') and not section.has_qa_backing:
+ if hasattr(section, "has_qa_backing") and not section.has_qa_backing:
prompt += "### BLOCKED ON PENDING TOPICS:\n"
prompt += "This section lacks sufficient Q&A backing. The following topics need user decisions:\n"
- if hasattr(section, 'blocked_on') and section.blocked_on:
+ if hasattr(section, "blocked_on") and section.blocked_on:
for topic in section.blocked_on:
prompt += f"- {topic}\n"
else:
@@ -376,20 +361,20 @@ def _build_section_prompt(
for q_id in section.linked_questions:
# Find the question
for q in clarification_questions:
- if q.get('id') == q_id:
+ if q.get("id") == q_id:
prompt += f"- **{q.get('title', 'Untitled')}**\n"
prompt += f" {q.get('spec_text', q.get('description', ''))}\n"
# Include answer if available
- mcq_data = q.get('mcq_data', {})
+ mcq_data = q.get("mcq_data", {})
if mcq_data:
- selected = mcq_data.get('selected_option_id')
+ selected = mcq_data.get("selected_option_id")
if selected:
- choices = mcq_data.get('choices', [])
+ choices = mcq_data.get("choices", [])
for choice in choices:
- if choice.get('id') == selected:
+ if choice.get("id") == selected:
prompt += f" Answer: {choice.get('label', selected)}\n"
break
- free_text = mcq_data.get('free_text')
+ free_text = mcq_data.get("free_text")
if free_text:
prompt += f" Additional: {free_text}\n"
break
@@ -420,10 +405,7 @@ def _build_section_prompt(
return prompt
-async def create_writer(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> WriterAgent:
+async def create_writer(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> WriterAgent:
"""
Factory function to create a Writer Agent.
diff --git a/backend/app/agents/brainstorm_spec/__init__.py b/backend/app/agents/brainstorm_spec/__init__.py
index 2bcb537..35bdd15 100644
--- a/backend/app/agents/brainstorm_spec/__init__.py
+++ b/backend/app/agents/brainstorm_spec/__init__.py
@@ -11,56 +11,51 @@
4. QA/Coverage - Validates completeness and coverage
"""
-from .types import (
- # Enums
- BrainstormSpecSectionId,
- # Dataclasses
- BrainstormSpecContext,
- NormalizedBrainstormContext,
- SpecOutlineSection,
- BrainstormSpecOutline,
- SpecSectionContent,
- BrainstormSpecification,
- CoverageReport,
- BrainstormSpecResult,
- AgentInfo,
- # Constants
- AGENT_METADATA,
- WORKFLOW_STEPS,
- FIXED_SECTIONS,
- ASPECT_CATEGORY_TO_SECTION_MAPPING,
- # Helper functions
- get_section_by_id,
- get_outline_section_by_id,
- build_section_tree,
-)
-
from .orchestrator import (
BrainstormSpecOrchestrator,
- create_orchestrator,
JobCancelledException,
+ create_orchestrator,
+)
+from .planner import (
+ PlannerAgent,
+ create_planner,
+)
+from .qa_coverage import (
+ QACoverageAgent,
+ create_qa_coverage,
)
-
from .summarizer import (
SummarizerAgent,
create_summarizer,
)
-
-from .planner import (
- PlannerAgent,
- create_planner,
+from .types import (
+ # Constants
+ AGENT_METADATA,
+ ASPECT_CATEGORY_TO_SECTION_MAPPING,
+ FIXED_SECTIONS,
+ WORKFLOW_STEPS,
+ AgentInfo,
+ # Dataclasses
+ BrainstormSpecContext,
+ BrainstormSpecification,
+ BrainstormSpecOutline,
+ BrainstormSpecResult,
+ # Enums
+ BrainstormSpecSectionId,
+ CoverageReport,
+ NormalizedBrainstormContext,
+ SpecOutlineSection,
+ SpecSectionContent,
+ build_section_tree,
+ get_outline_section_by_id,
+ # Helper functions
+ get_section_by_id,
)
-
from .writer import (
WriterAgent,
create_writer,
)
-from .qa_coverage import (
- QACoverageAgent,
- create_qa_coverage,
-)
-
__all__ = [
# Enums
"BrainstormSpecSectionId",
diff --git a/backend/app/agents/brainstorm_spec/logging_config.py b/backend/app/agents/brainstorm_spec/logging_config.py
index dd081c0..8d91cea 100644
--- a/backend/app/agents/brainstorm_spec/logging_config.py
+++ b/backend/app/agents/brainstorm_spec/logging_config.py
@@ -4,10 +4,10 @@
Provides structured logging for all agent decisions, LLM calls, and workflow steps.
"""
-import logging
import json
-from typing import Any, Dict, Optional
+import logging
from datetime import datetime, timezone
+from typing import Any, Dict, Optional
class BrainstormAgentLogger:
@@ -33,19 +33,12 @@ def __init__(self, agent_name: str, project_id: Optional[str] = None):
# Ensure structured output
if not self.logger.handlers:
handler = logging.StreamHandler()
- formatter = logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
- )
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
- def _structured_log(
- self,
- level: str,
- event: str,
- extra_data: Optional[Dict[str, Any]] = None
- ) -> None:
+ def _structured_log(self, level: str, event: str, extra_data: Optional[Dict[str, Any]] = None) -> None:
"""
Log a structured event.
@@ -78,12 +71,7 @@ def log_agent_complete(self, **kwargs: Any) -> None:
self._structured_log("info", f"{self.agent_name}_complete", kwargs)
def log_llm_call(
- self,
- prompt: str,
- model: str,
- response: Optional[str] = None,
- tokens_used: Optional[int] = None,
- **kwargs: Any
+ self, prompt: str, model: str, response: Optional[str] = None, tokens_used: Optional[int] = None, **kwargs: Any
) -> None:
"""
Log an LLM API call.
@@ -99,7 +87,7 @@ def log_llm_call(
"model": model,
"prompt_preview": prompt[:200] + "..." if len(prompt) > 200 else prompt,
"prompt_length": len(prompt),
- **kwargs
+ **kwargs,
}
if response:
@@ -120,11 +108,7 @@ def log_decision(self, decision: str, rationale: str, **kwargs: Any) -> None:
rationale: Explanation of why
**kwargs: Additional context
"""
- data = {
- "decision": decision,
- "rationale": rationale,
- **kwargs
- }
+ data = {"decision": decision, "rationale": rationale, **kwargs}
self._structured_log("info", "agent_decision", data)
def log_pruning_stats(
@@ -134,7 +118,7 @@ def log_pruning_stats(
initial_questions: int,
final_questions: int,
pruned_items: list,
- **kwargs: Any
+ **kwargs: Any,
) -> None:
"""
Log aspect and question pruning statistics.
@@ -155,7 +139,7 @@ def log_pruning_stats(
"pruned_aspects": initial_aspects - final_aspects,
"pruned_questions": initial_questions - final_questions,
"pruned_items": pruned_items,
- **kwargs
+ **kwargs,
}
self._structured_log("info", "pruning_stats", data)
@@ -186,11 +170,7 @@ def log_workflow_transition(self, from_state: str, to_state: str, **kwargs: Any)
to_state: New state
**kwargs: Additional context
"""
- data = {
- "from_state": from_state,
- "to_state": to_state,
- **kwargs
- }
+ data = {"from_state": from_state, "to_state": to_state, **kwargs}
self._structured_log("info", "workflow_transition", data)
diff --git a/backend/app/agents/brainstorm_spec/orchestrator.py b/backend/app/agents/brainstorm_spec/orchestrator.py
index 0b83fc4..5697e41 100644
--- a/backend/app/agents/brainstorm_spec/orchestrator.py
+++ b/backend/app/agents/brainstorm_spec/orchestrator.py
@@ -11,26 +11,26 @@
"""
import logging
-from typing import Callable, Dict, Any, List, Optional
+from typing import Any, Callable, Dict, Optional
from uuid import UUID
+from .logging_config import get_agent_logger
+from .planner import create_planner
+from .qa_coverage import create_qa_coverage
+from .summarizer import create_summarizer
from .types import (
+ AGENT_METADATA,
BrainstormSpecContext,
BrainstormSpecResult,
- AGENT_METADATA,
- WORKFLOW_STEPS,
)
-from .summarizer import create_summarizer
-from .planner import create_planner
from .writer import create_writer
-from .qa_coverage import create_qa_coverage
-from .logging_config import get_agent_logger
logger = logging.getLogger(__name__)
class JobCancelledException(Exception):
"""Raised when a job is cancelled during pipeline execution."""
+
pass
@@ -78,8 +78,8 @@ def _check_cancelled(self) -> None:
if not self.job_id:
return
- from app.services.job_service import JobService
from app.database import SessionLocal
+ from app.services.job_service import JobService
db = SessionLocal()
try:
@@ -90,10 +90,7 @@ def _check_cancelled(self) -> None:
db.close()
def _report_progress(
- self,
- workflow_step: str,
- agent_key: Optional[str] = None,
- extra_data: Optional[Dict[str, Any]] = None
+ self, workflow_step: str, agent_key: Optional[str] = None, extra_data: Optional[Dict[str, Any]] = None
) -> None:
"""
Report progress to the callback.
@@ -141,10 +138,7 @@ def _report_progress(
except Exception as e:
self.logger.log_error(e, {"context": "progress_callback"})
- async def generate_brainstorm_spec(
- self,
- context: BrainstormSpecContext
- ) -> BrainstormSpecResult:
+ async def generate_brainstorm_spec(self, context: BrainstormSpecContext) -> BrainstormSpecResult:
"""
Generate a complete brainstorm specification.
@@ -190,11 +184,7 @@ async def generate_brainstorm_spec(
if self.call_logger:
self.call_logger.set_agent("planner", "Planner")
planner = await create_planner(self.model_client, project_id)
- outline = await planner.create_outline(
- normalized_context,
- context.clarification_questions,
- project_id
- )
+ outline = await planner.create_outline(normalized_context, context.clarification_questions, project_id)
# Check for cancellation before next step
self._check_cancelled()
@@ -207,10 +197,7 @@ async def generate_brainstorm_spec(
self.call_logger.set_agent("writer", "Writer")
writer = await create_writer(self.model_client, project_id)
specification = await writer.write_specification(
- outline,
- normalized_context,
- context.clarification_questions,
- project_id
+ outline, normalized_context, context.clarification_questions, project_id
)
# Check for cancellation before next step
@@ -223,12 +210,7 @@ async def generate_brainstorm_spec(
if self.call_logger:
self.call_logger.set_agent("qa_coverage", "QA Coverage")
qa = await create_qa_coverage(self.model_client, project_id)
- coverage_report = await qa.validate(
- specification,
- outline,
- context.clarification_questions,
- project_id
- )
+ coverage_report = await qa.validate(specification, outline, context.clarification_questions, project_id)
# Complete
self._report_progress("complete", "orchestrator")
@@ -278,7 +260,7 @@ async def create_orchestrator(
Returns:
Initialized BrainstormSpecOrchestrator instance
"""
- from app.agents.llm_client import create_litellm_client, LLMCallLogger
+ from app.agents.llm_client import LLMCallLogger, create_litellm_client
config = config or {}
model = config.get("model")
@@ -289,6 +271,7 @@ async def create_orchestrator(
call_logger = None
if job_id:
from app.database import SessionLocal
+
call_logger = LLMCallLogger(
db_session_factory=SessionLocal,
job_id=job_id,
diff --git a/backend/app/agents/brainstorm_spec/planner.py b/backend/app/agents/brainstorm_spec/planner.py
index 8182cb3..d21e251 100644
--- a/backend/app/agents/brainstorm_spec/planner.py
+++ b/backend/app/agents/brainstorm_spec/planner.py
@@ -13,14 +13,14 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
- NormalizedBrainstormContext,
+ ASPECT_CATEGORY_TO_SECTION_MAPPING,
+ FIXED_SECTIONS,
BrainstormSpecOutline,
+ NormalizedBrainstormContext,
SpecOutlineSection,
- FIXED_SECTIONS,
- ASPECT_CATEGORY_TO_SECTION_MAPPING,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -133,7 +133,7 @@ async def create_outline(
self,
normalized_context: NormalizedBrainstormContext,
clarification_questions: List[dict],
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> BrainstormSpecOutline:
"""
Create the specification outline from normalized context.
@@ -149,10 +149,7 @@ async def create_outline(
Raises:
ValueError: If planning fails or returns invalid JSON
"""
- self.logger.log_agent_start(
- project_id=project_id,
- questions_count=len(clarification_questions)
- )
+ self.logger.log_agent_start(project_id=project_id, questions_count=len(clarification_questions))
try:
# Build the prompt
@@ -162,13 +159,10 @@ async def create_outline(
self.logger.log_llm_call(
prompt=prompt[:500] + "..." if len(prompt) > 500 else prompt,
model=str(self.model_client),
- operation="create_spec_outline"
+ operation="create_spec_outline",
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -181,10 +175,9 @@ async def create_outline(
try:
result_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
# Fall back to default outline
return self._create_default_outline(clarification_questions)
@@ -202,7 +195,7 @@ async def create_outline(
self.logger.log_agent_complete(
sections_count=len(outline.sections),
- total_subsections=sum(len(s.subsections) for s in outline.sections)
+ total_subsections=sum(len(s.subsections) for s in outline.sections),
)
return outline
@@ -213,9 +206,7 @@ async def create_outline(
return self._create_default_outline(clarification_questions)
def _build_prompt(
- self,
- normalized_context: NormalizedBrainstormContext,
- clarification_questions: List[dict]
+ self, normalized_context: NormalizedBrainstormContext, clarification_questions: List[dict]
) -> str:
"""
Build the planning prompt.
@@ -257,9 +248,9 @@ def _build_prompt(
if clarification_questions:
prompt += "### Clarification Questions (link these to sections):\n"
for q in clarification_questions:
- q_id = q.get('id', 'unknown')
- q_title = q.get('title', 'Untitled')
- q_category = q.get('category', 'General')
+ q_id = q.get("id", "unknown")
+ q_title = q.get("title", "Untitled")
+ q_category = q.get("category", "General")
prompt += f"- [{q_id}] {q_title} (Category: {q_category})\n"
prompt += "\n"
@@ -271,7 +262,9 @@ def _build_prompt(
prompt += "\n"
prompt += "Create the outline with all 11 sections. "
- prompt += "For sections affected by unanswered topics, set has_qa_backing: false and list pending_clarifications. "
+ prompt += (
+ "For sections affected by unanswered topics, set has_qa_backing: false and list pending_clarifications. "
+ )
prompt += "Return ONLY the JSON object."
return prompt
@@ -298,13 +291,15 @@ def _ensure_all_sections(self, outline: BrainstormSpecOutline) -> BrainstormSpec
for fixed in FIXED_SECTIONS:
if fixed["id"] not in existing_ids:
- outline.sections.append(SpecOutlineSection(
- id=fixed["id"],
- title=fixed["title"],
- description="",
- subsections=[],
- linked_questions=[],
- ))
+ outline.sections.append(
+ SpecOutlineSection(
+ id=fixed["id"],
+ title=fixed["title"],
+ description="",
+ subsections=[],
+ linked_questions=[],
+ )
+ )
# Sort by fixed order
section_order = {s["id"]: i for i, s in enumerate(FIXED_SECTIONS)}
@@ -319,8 +314,8 @@ def _create_default_outline(self, clarification_questions: List[dict]) -> Brains
# Create question-to-section mapping based on categories
question_section_links = {}
for q in clarification_questions:
- q_id = q.get('id', '')
- category = q.get('category', 'Business_Logic')
+ q_id = q.get("id", "")
+ category = q.get("category", "Business_Logic")
target_sections = ASPECT_CATEGORY_TO_SECTION_MAPPING.get(category, [])
for section_id in target_sections:
if section_id.value not in question_section_links:
@@ -328,21 +323,20 @@ def _create_default_outline(self, clarification_questions: List[dict]) -> Brains
question_section_links[section_id.value].append(q_id)
for fixed in FIXED_SECTIONS:
- sections.append(SpecOutlineSection(
- id=fixed["id"],
- title=fixed["title"],
- description="",
- subsections=[],
- linked_questions=question_section_links.get(fixed["id"], []),
- ))
+ sections.append(
+ SpecOutlineSection(
+ id=fixed["id"],
+ title=fixed["title"],
+ description="",
+ subsections=[],
+ linked_questions=question_section_links.get(fixed["id"], []),
+ )
+ )
return BrainstormSpecOutline(sections=sections)
-async def create_planner(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> PlannerAgent:
+async def create_planner(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> PlannerAgent:
"""
Factory function to create a Planner Agent.
diff --git a/backend/app/agents/brainstorm_spec/qa_coverage.py b/backend/app/agents/brainstorm_spec/qa_coverage.py
index e5bc4eb..ec2beb7 100644
--- a/backend/app/agents/brainstorm_spec/qa_coverage.py
+++ b/backend/app/agents/brainstorm_spec/qa_coverage.py
@@ -13,12 +13,12 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
BrainstormSpecification,
BrainstormSpecOutline,
CoverageReport,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -92,7 +92,7 @@ async def validate(
specification: BrainstormSpecification,
outline: BrainstormSpecOutline,
clarification_questions: List[dict],
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> CoverageReport:
"""
Validate the specification for coverage and quality.
@@ -112,7 +112,7 @@ async def validate(
self.logger.log_agent_start(
project_id=project_id,
sections_count=len(specification.sections),
- questions_count=len(clarification_questions)
+ questions_count=len(clarification_questions),
)
try:
@@ -123,13 +123,10 @@ async def validate(
self.logger.log_llm_call(
prompt=prompt[:500] + "..." if len(prompt) > 500 else prompt,
model=str(self.model_client),
- operation="validate_coverage"
+ operation="validate_coverage",
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -142,14 +139,11 @@ async def validate(
try:
result_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
- # Return a default report on parse failure
- return self._create_fallback_report(
- specification, outline, clarification_questions
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
)
+ # Return a default report on parse failure
+ return self._create_fallback_report(specification, outline, clarification_questions)
# Convert to CoverageReport
report = CoverageReport(
@@ -172,15 +166,13 @@ async def validate(
except Exception as e:
self.logger.log_error(e, {"project_id": project_id})
# Return fallback report on error
- return self._create_fallback_report(
- specification, outline, clarification_questions
- )
+ return self._create_fallback_report(specification, outline, clarification_questions)
def _build_prompt(
self,
specification: BrainstormSpecification,
outline: BrainstormSpecOutline,
- clarification_questions: List[dict]
+ clarification_questions: List[dict],
) -> str:
"""
Build the validation prompt.
@@ -199,18 +191,22 @@ def _build_prompt(
prompt += "### Generated Specification:\n"
for section in specification.sections:
prompt += f"\n**{section.title}** (ID: {section.id})\n"
- prompt += f"{section.body_markdown[:500]}...\n" if len(section.body_markdown) > 500 else f"{section.body_markdown}\n"
+ prompt += (
+ f"{section.body_markdown[:500]}...\n"
+ if len(section.body_markdown) > 500
+ else f"{section.body_markdown}\n"
+ )
if section.linked_questions:
prompt += f"Linked Questions: {', '.join(section.linked_questions)}\n"
prompt += "\n### Clarification Questions:\n"
must_have_questions = []
for q in clarification_questions:
- q_id = q.get('id', 'unknown')
- q_title = q.get('title', 'Untitled')
- q_priority = q.get('priority', 'optional')
+ q_id = q.get("id", "unknown")
+ q_title = q.get("title", "Untitled")
+ q_priority = q.get("priority", "optional")
prompt += f"- [{q_id}] {q_title} (Priority: {q_priority})\n"
- if q_priority == 'must_have':
+ if q_priority == "must_have":
must_have_questions.append(q_id)
if must_have_questions:
@@ -224,7 +220,7 @@ def _create_fallback_report(
self,
specification: BrainstormSpecification,
outline: BrainstormSpecOutline,
- clarification_questions: List[dict]
+ clarification_questions: List[dict],
) -> CoverageReport:
"""
Create a fallback coverage report using simple heuristics.
@@ -247,9 +243,9 @@ def _create_fallback_report(
# Find uncovered must-have questions
uncovered_must_have = []
for q in clarification_questions:
- q_id = q.get('id', '')
- priority = q.get('priority', 'optional')
- if priority == 'must_have' and q_id not in covered_questions:
+ q_id = q.get("id", "")
+ priority = q.get("priority", "optional")
+ if priority == "must_have" and q_id not in covered_questions:
uncovered_must_have.append(q_id)
return CoverageReport(
@@ -257,16 +253,13 @@ def _create_fallback_report(
uncovered_must_have_questions=uncovered_must_have,
weak_coverage_warnings=[],
contradictions_found=[],
- suggested_improvements=[
- "Fallback validation: Consider reviewing specification for completeness"
- ] if uncovered_must_have else [],
+ suggested_improvements=["Fallback validation: Consider reviewing specification for completeness"]
+ if uncovered_must_have
+ else [],
)
-async def create_qa_coverage(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> QACoverageAgent:
+async def create_qa_coverage(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> QACoverageAgent:
"""
Factory function to create a QA/Coverage Agent.
diff --git a/backend/app/agents/brainstorm_spec/summarizer.py b/backend/app/agents/brainstorm_spec/summarizer.py
index 18a7bf2..1304c8d 100644
--- a/backend/app/agents/brainstorm_spec/summarizer.py
+++ b/backend/app/agents/brainstorm_spec/summarizer.py
@@ -14,11 +14,11 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
BrainstormSpecContext,
NormalizedBrainstormContext,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -100,9 +100,7 @@ def _get_system_message(self) -> str:
Return ONLY the JSON object, no additional text."""
async def normalize(
- self,
- context: BrainstormSpecContext,
- project_id: Optional[str] = None
+ self, context: BrainstormSpecContext, project_id: Optional[str] = None
) -> NormalizedBrainstormContext:
"""
Normalize brainstorming data into structured context.
@@ -122,7 +120,7 @@ async def normalize(
phase_title=context.phase_title,
aspects_count=len(context.aspects),
questions_count=len(context.clarification_questions),
- threads_count=len(context.thread_discussions)
+ threads_count=len(context.thread_discussions),
)
try:
@@ -133,13 +131,10 @@ async def normalize(
self.logger.log_llm_call(
prompt=prompt[:500] + "..." if len(prompt) > 500 else prompt,
model=str(self.model_client),
- operation="normalize_brainstorm_context"
+ operation="normalize_brainstorm_context",
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -152,10 +147,9 @@ async def normalize(
try:
result_data = json.loads(cleaned_response)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned_response[:500]
- })
+ self.logger.log_error(
+ e, {"raw_response": response_text[:500], "cleaned_response": cleaned_response[:500]}
+ )
raise ValueError(f"Failed to parse summarizer response as JSON: {e}")
# Convert to NormalizedBrainstormContext
@@ -199,7 +193,7 @@ def _build_prompt(self, context: BrainstormSpecContext) -> str:
Returns:
Formatted prompt string
"""
- prompt = f"Normalize the following brainstorming data into structured context.\n\n"
+ prompt = "Normalize the following brainstorming data into structured context.\n\n"
# Add phase info
prompt += f"### Phase Title: {context.phase_title}\n"
@@ -222,11 +216,11 @@ def _build_prompt(self, context: BrainstormSpecContext) -> str:
prompt += f" Question: {q.get('spec_text', q.get('description', 'No description'))}\n"
# Include ALL answered MCQs for this feature
- answered_mcqs = q.get('answered_mcqs', [])
+ answered_mcqs = q.get("answered_mcqs", [])
for mcq in answered_mcqs:
- question_text = mcq.get('question_text', '')
- selected_label = mcq.get('selected_label', '')
- free_text = mcq.get('free_text')
+ question_text = mcq.get("question_text", "")
+ selected_label = mcq.get("selected_label", "")
+ free_text = mcq.get("free_text")
prompt += f" - MCQ: {question_text}\n"
prompt += f" Answer: {selected_label}\n"
@@ -240,22 +234,22 @@ def _build_prompt(self, context: BrainstormSpecContext) -> str:
for thread in context.thread_discussions:
prompt += f"- **{thread.get('title', 'Untitled Thread')}**\n"
# Prefer decision summary if available
- if thread.get('decision_summary'):
+ if thread.get("decision_summary"):
prompt += f" {thread['decision_summary']}\n"
- if thread.get('unresolved_points'):
+ if thread.get("unresolved_points"):
prompt += " **Unresolved:**\n"
- for point in thread['unresolved_points']:
- question = point.get('question', '') if isinstance(point, dict) else str(point)
+ for point in thread["unresolved_points"]:
+ question = point.get("question", "") if isinstance(point, dict) else str(point)
prompt += f" - {question}\n"
else:
# Fallback to raw comments if no decision summary
- comments = thread.get('comments', [])
+ comments = thread.get("comments", [])
for comment in comments[:5]: # Limit to first 5 comments per thread
prompt += f" - {comment.get('content', '')[:200]}\n"
# Note any images attached to this thread
- if thread.get('images'):
+ if thread.get("images"):
prompt += " **Attached Images:**\n"
- for img in thread['images'][:5]: # Limit to first 5 images
+ for img in thread["images"][:5]: # Limit to first 5 images
prompt += f" - {img.get('filename', 'unnamed')} (ID: {img.get('id', 'unknown')})\n"
prompt += "\n"
@@ -292,10 +286,7 @@ def _build_sibling_implementation_section(sibling_phases_context) -> str:
if not sibling_phases_context:
return ""
- phases_with_analysis = [
- p for p in sibling_phases_context.sibling_phases
- if p.implementation_analysis
- ]
+ phases_with_analysis = [p for p in sibling_phases_context.sibling_phases if p.implementation_analysis]
if not phases_with_analysis:
return ""
@@ -312,10 +303,7 @@ def _build_sibling_implementation_section(sibling_phases_context) -> str:
return "\n".join(sections)
-async def create_summarizer(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> SummarizerAgent:
+async def create_summarizer(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> SummarizerAgent:
"""
Factory function to create a Summarizer Agent.
diff --git a/backend/app/agents/brainstorm_spec/types.py b/backend/app/agents/brainstorm_spec/types.py
index 77faba6..34f3e30 100644
--- a/backend/app/agents/brainstorm_spec/types.py
+++ b/backend/app/agents/brainstorm_spec/types.py
@@ -14,16 +14,17 @@
from dataclasses import dataclass, field
from enum import Enum
-from typing import List, Optional, Dict, Any
+from typing import Any, Dict, List, Optional
from uuid import UUID
-
# ============================
# Enums
# ============================
+
class BrainstormSpecSectionId(str, Enum):
"""Fixed section IDs for the brainstorm specification outline."""
+
EXECUTIVE_SUMMARY = "executive_summary"
PROBLEM_STATEMENT = "problem_statement"
GOALS_AND_NON_GOALS = "goals_and_non_goals"
@@ -41,12 +42,14 @@ class BrainstormSpecSectionId(str, Enum):
# Dataclasses
# ============================
+
@dataclass
class BrainstormSpecContext:
"""
Raw context passed to the Orchestrator for generating Brainstorm Specification.
This is the full input data from a brainstorming phase.
"""
+
project_id: UUID
brainstorming_phase_id: UUID
phase_title: str
@@ -84,6 +87,7 @@ class NormalizedBrainstormContext:
Output from the Summarizer/Normalizer Agent.
Condensed summaries of all brainstorming inputs (300-600 tokens per category).
"""
+
phase_summary: str
key_objectives: List[str] = field(default_factory=list)
user_personas: List[str] = field(default_factory=list)
@@ -111,10 +115,11 @@ class SpecOutlineSection:
A single section in the specification outline.
Can have nested subsections and linked clarification questions.
"""
+
id: str
title: str
description: str = ""
- subsections: List['SpecOutlineSection'] = field(default_factory=list)
+ subsections: List["SpecOutlineSection"] = field(default_factory=list)
linked_questions: List[str] = field(default_factory=list) # Question IDs
# Q&A-aware generation: tracks whether section has sufficient answered questions
has_qa_backing: bool = True # Default True for backwards compatibility
@@ -127,6 +132,7 @@ class BrainstormSpecOutline:
Full specification outline generated by the Planner Agent.
Contains hierarchical section tree with linked questions.
"""
+
sections: List[SpecOutlineSection] = field(default_factory=list)
@@ -136,6 +142,7 @@ class SpecSectionContent:
A single specification section with rendered markdown content.
Output from the Writer Agent.
"""
+
id: str
title: str
body_markdown: str
@@ -149,6 +156,7 @@ class BrainstormSpecification:
Complete specification document generated by the Writer Agent.
Contains all sections with rendered markdown.
"""
+
sections: List[SpecSectionContent] = field(default_factory=list)
def to_markdown(self) -> str:
@@ -192,7 +200,7 @@ def to_summary_markdown(self) -> str:
# Fallback: use first 300 chars of body if no summary
truncated = section.body_markdown[:300]
if len(section.body_markdown) > 300:
- truncated = truncated.rsplit(' ', 1)[0] + "..."
+ truncated = truncated.rsplit(" ", 1)[0] + "..."
lines.append(truncated)
lines.append("")
return "\n".join(lines)
@@ -204,6 +212,7 @@ class CoverageReport:
Quality assurance and coverage report generated by the QA Agent.
Validates completeness, consistency, and quality of the specification.
"""
+
ok: bool
uncovered_must_have_questions: List[str] = field(default_factory=list)
weak_coverage_warnings: List[str] = field(default_factory=list)
@@ -217,6 +226,7 @@ class BrainstormSpecResult:
Final result returned by the Orchestrator.
Includes the complete specification, outline, and coverage report.
"""
+
specification: BrainstormSpecification
outline: BrainstormSpecOutline
normalized_context: NormalizedBrainstormContext
@@ -227,12 +237,14 @@ class BrainstormSpecResult:
# Agent Metadata for UI
# ============================
+
@dataclass
class AgentInfo:
"""
UI metadata for an agent in the Brainstorm Spec generation workflow.
Used for progress tracking and visual representation.
"""
+
name: str
description: str
color: str # Hex color for UI tag
@@ -243,40 +255,33 @@ class AgentInfo:
"orchestrator": AgentInfo(
name="Orchestrator",
description="Coordinating specification generation workflow",
- color="#8B5CF6" # Purple
+ color="#8B5CF6", # Purple
),
"summarizer": AgentInfo(
name="Summarizer",
description="Normalizing brainstorm discussions into structured context",
- color="#3B82F6" # Blue
+ color="#3B82F6", # Blue
),
"planner": AgentInfo(
name="Planner",
description="Creating specification outline with linked questions",
- color="#10B981" # Green
+ color="#10B981", # Green
),
"writer": AgentInfo(
name="Writer",
description="Generating specification content for each section",
- color="#F59E0B" # Amber
+ color="#F59E0B", # Amber
),
"qa_coverage": AgentInfo(
name="QA/Coverage",
description="Validating completeness and coverage of specification",
- color="#EC4899" # Pink
+ color="#EC4899", # Pink
),
}
# Workflow step definitions for progress tracking
-WORKFLOW_STEPS = [
- "start",
- "normalizing",
- "planning",
- "writing",
- "validating",
- "complete"
-]
+WORKFLOW_STEPS = ["start", "normalizing", "planning", "writing", "validating", "complete"]
# ============================
@@ -335,6 +340,7 @@ class AgentInfo:
# Helper Functions
# ============================
+
def get_section_by_id(sections: List[SpecSectionContent], section_id: str) -> Optional[SpecSectionContent]:
"""Get a section by its ID."""
for section in sections:
diff --git a/backend/app/agents/brainstorm_spec/utils.py b/backend/app/agents/brainstorm_spec/utils.py
index 37b931a..c1b8642 100644
--- a/backend/app/agents/brainstorm_spec/utils.py
+++ b/backend/app/agents/brainstorm_spec/utils.py
@@ -5,9 +5,9 @@
import re
# Import from common module and re-export for backwards compatibility
-from app.agents.response_parser import strip_markdown_json, normalize_response_content
+from app.agents.response_parser import normalize_response_content, strip_markdown_json
-__all__ = ['strip_markdown_json', 'normalize_response_content', 'truncate_text', 'normalize_whitespace']
+__all__ = ["strip_markdown_json", "normalize_response_content", "truncate_text", "normalize_whitespace"]
def truncate_text(text: str, max_length: int = 500) -> str:
@@ -25,7 +25,7 @@ def truncate_text(text: str, max_length: int = 500) -> str:
return text
# Truncate at word boundary
- truncated = text[:max_length].rsplit(' ', 1)[0]
+ truncated = text[:max_length].rsplit(" ", 1)[0]
return truncated + "..."
@@ -40,5 +40,5 @@ def normalize_whitespace(text: str) -> str:
Normalized text
"""
# Replace multiple whitespace with single space
- text = re.sub(r'\s+', ' ', text)
+ text = re.sub(r"\s+", " ", text)
return text.strip()
diff --git a/backend/app/agents/brainstorm_spec/writer.py b/backend/app/agents/brainstorm_spec/writer.py
index 1724af5..db20d3b 100644
--- a/backend/app/agents/brainstorm_spec/writer.py
+++ b/backend/app/agents/brainstorm_spec/writer.py
@@ -14,14 +14,14 @@
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
- NormalizedBrainstormContext,
- BrainstormSpecOutline,
BrainstormSpecification,
- SpecSectionContent,
+ BrainstormSpecOutline,
+ NormalizedBrainstormContext,
SpecOutlineSection,
+ SpecSectionContent,
)
-from .logging_config import get_agent_logger
from .utils import strip_markdown_json
@@ -142,7 +142,7 @@ async def write_specification(
outline: BrainstormSpecOutline,
normalized_context: NormalizedBrainstormContext,
clarification_questions: List[dict],
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> BrainstormSpecification:
"""
Write the full specification from outline and context.
@@ -159,19 +159,11 @@ async def write_specification(
Raises:
Exception: If writing fails for all sections
"""
- self.logger.log_agent_start(
- project_id=project_id,
- sections_count=len(outline.sections)
- )
+ self.logger.log_agent_start(project_id=project_id, sections_count=len(outline.sections))
# Create tasks for all sections to execute in parallel
tasks = [
- self._write_section(
- section,
- normalized_context,
- clarification_questions,
- project_id
- )
+ self._write_section(section, normalized_context, clarification_questions, project_id)
for section in outline.sections
]
@@ -185,20 +177,21 @@ async def write_specification(
if isinstance(result, Exception):
self.logger.log_error(result, {"section_id": section.id})
# Create a placeholder for failed sections
- sections.append(SpecSectionContent(
- id=section.id,
- title=section.title,
- body_markdown=f"*Content generation failed: {str(result)}*",
- linked_questions=section.linked_questions,
- ))
+ sections.append(
+ SpecSectionContent(
+ id=section.id,
+ title=section.title,
+ body_markdown=f"*Content generation failed: {str(result)}*",
+ linked_questions=section.linked_questions,
+ )
+ )
else:
sections.append(result)
specification = BrainstormSpecification(sections=sections)
self.logger.log_agent_complete(
- sections_written=len(sections),
- total_markdown_length=sum(len(s.body_markdown) for s in sections)
+ sections_written=len(sections), total_markdown_length=sum(len(s.body_markdown) for s in sections)
)
return specification
@@ -208,7 +201,7 @@ async def _write_section(
section: SpecOutlineSection,
normalized_context: NormalizedBrainstormContext,
clarification_questions: List[dict],
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
) -> SpecSectionContent:
"""
Write content for a single section.
@@ -230,7 +223,7 @@ async def _write_section(
prompt=prompt[:300] + "..." if len(prompt) > 300 else prompt,
model=str(self.model_client),
operation="write_section",
- section_id=section.id
+ section_id=section.id,
)
# Create a FRESH agent for each section to avoid conversation history accumulation
@@ -242,10 +235,7 @@ async def _write_section(
model_client=self.model_client,
)
- response = await section_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await section_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -291,8 +281,7 @@ def _parse_writer_response(self, response_text: str, section_id: str) -> tuple:
# Validate content exists
if not content:
self.logger.log_error(
- ValueError("Empty content in JSON response"),
- {"section_id": section_id, "response": cleaned[:500]}
+ ValueError("Empty content in JSON response"), {"section_id": section_id, "response": cleaned[:500]}
)
# Fallback: treat entire response as content
return self._clean_markdown(cleaned), ""
@@ -307,7 +296,7 @@ def _parse_writer_response(self, response_text: str, section_id: str) -> tuple:
# Fallback: treat response as raw markdown (legacy behavior)
self.logger.log_error(
ValueError("Failed to parse JSON, using raw markdown"),
- {"section_id": section_id, "response": cleaned[:200]}
+ {"section_id": section_id, "response": cleaned[:200]},
)
body_markdown = self._clean_markdown(cleaned)
summary = self._generate_fallback_summary(body_markdown)
@@ -329,14 +318,14 @@ def _generate_fallback_summary(self, content: str, max_chars: int = 300) -> str:
if len(content) <= max_chars:
return content
# Take first N characters, break at word boundary
- truncated = content[:max_chars].rsplit(' ', 1)[0]
+ truncated = content[:max_chars].rsplit(" ", 1)[0]
return truncated + "..."
def _build_section_prompt(
self,
section: SpecOutlineSection,
normalized_context: NormalizedBrainstormContext,
- clarification_questions: List[dict]
+ clarification_questions: List[dict],
) -> str:
"""
Build the prompt for writing a section.
@@ -355,10 +344,10 @@ def _build_section_prompt(
prompt += f"Section Description: {section.description}\n\n"
# Check if section has Q&A backing - if not, generate placeholder
- if hasattr(section, 'has_qa_backing') and not section.has_qa_backing:
+ if hasattr(section, "has_qa_backing") and not section.has_qa_backing:
prompt += "### PENDING CLARIFICATIONS:\n"
prompt += "This section lacks sufficient Q&A backing. The following topics need clarification:\n"
- if hasattr(section, 'pending_clarifications') and section.pending_clarifications:
+ if hasattr(section, "pending_clarifications") and section.pending_clarifications:
for clarification in section.pending_clarifications:
prompt += f"- {clarification}\n"
else:
@@ -418,7 +407,9 @@ def _build_section_prompt(
if normalized_context.system_context_summary:
prompt += "\n### System Context (for coherence, not direct requirements):\n"
prompt += normalized_context.system_context_summary
- prompt += "\n\nUse this to ensure consistency with other phases. Do NOT add requirements from other phases.\n"
+ prompt += (
+ "\n\nUse this to ensure consistency with other phases. Do NOT add requirements from other phases.\n"
+ )
elif "data" in section_id:
if normalized_context.data_requirements:
@@ -441,7 +432,9 @@ def _build_section_prompt(
if normalized_context.system_context_summary:
prompt += "\n### System Context (for coherence, not direct requirements):\n"
prompt += normalized_context.system_context_summary
- prompt += "\n\nUse this to ensure consistency with other phases. Do NOT add requirements from other phases.\n"
+ prompt += (
+ "\n\nUse this to ensure consistency with other phases. Do NOT add requirements from other phases.\n"
+ )
elif "constraint" in section_id or "assumption" in section_id:
if normalized_context.constraints:
@@ -463,20 +456,20 @@ def _build_section_prompt(
for q_id in section.linked_questions:
# Find the question
for q in clarification_questions:
- if q.get('id') == q_id:
+ if q.get("id") == q_id:
prompt += f"- **{q.get('title', 'Untitled')}**\n"
prompt += f" {q.get('spec_text', q.get('description', ''))}\n"
# Include answer if available
- mcq_data = q.get('mcq_data', {})
+ mcq_data = q.get("mcq_data", {})
if mcq_data:
- selected = mcq_data.get('selected_option_id')
+ selected = mcq_data.get("selected_option_id")
if selected:
- choices = mcq_data.get('choices', [])
+ choices = mcq_data.get("choices", [])
for choice in choices:
- if choice.get('id') == selected:
+ if choice.get("id") == selected:
prompt += f" Answer: {choice.get('label', selected)}\n"
break
- free_text = mcq_data.get('free_text')
+ free_text = mcq_data.get("free_text")
if free_text:
prompt += f" Additional: {free_text}\n"
break
@@ -507,10 +500,7 @@ def _build_section_prompt(
return prompt
-async def create_writer(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> WriterAgent:
+async def create_writer(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> WriterAgent:
"""
Factory function to create a Writer Agent.
diff --git a/backend/app/agents/collab_thread_assistant/__init__.py b/backend/app/agents/collab_thread_assistant/__init__.py
index 5d3d14a..55fe50b 100644
--- a/backend/app/agents/collab_thread_assistant/__init__.py
+++ b/backend/app/agents/collab_thread_assistant/__init__.py
@@ -16,30 +16,30 @@
- Phase 8: Production Hardening & Observability
"""
-from .types import CollabThreadContext, AssistantResponse
-from .validators import ResponseValidator, ValidationResult
+from .assistant import SYSTEM_PROMPT, CollabThreadAssistant
from .config import (
- TOKEN_THRESHOLD,
- SUMMARY_MAX_TOKENS,
- RECENT_MESSAGES_COUNT,
+ ENABLE_DEBUG_LOGGING,
MAX_RETRIES,
+ RECENT_MESSAGES_COUNT,
RETRY_BACKOFF_MS,
- ENABLE_DEBUG_LOGGING,
+ SUMMARY_MAX_TOKENS,
+ TOKEN_THRESHOLD,
)
-from .context_loader import load_thread, load_files, token_count, load_spec_draft_context
-from .summarizer import SummarizerAgent
-from .assistant import CollabThreadAssistant, SYSTEM_PROMPT
-from .orchestrator import build_context, call_assistant, handle_ai_mention
-from .spec_draft_handler import handle_spec_draft_ai_mention
-from .spec_draft_assistant import SpecDraftAssistant
-from .retry import with_retry, with_retry_sync, RetryError
+from .context_loader import load_files, load_spec_draft_context, load_thread, token_count
from .instrumentation import (
CollabThreadAssistantLogger,
- get_assistant_logger,
DebugInfo,
- SummarizationEvent,
RetryEvent,
+ SummarizationEvent,
+ get_assistant_logger,
)
+from .orchestrator import build_context, call_assistant, handle_ai_mention
+from .retry import RetryError, with_retry, with_retry_sync
+from .spec_draft_assistant import SpecDraftAssistant
+from .spec_draft_handler import handle_spec_draft_ai_mention
+from .summarizer import SummarizerAgent
+from .types import AssistantResponse, CollabThreadContext
+from .validators import ResponseValidator, ValidationResult
__all__ = [
# Types
diff --git a/backend/app/agents/collab_thread_assistant/assistant.py b/backend/app/agents/collab_thread_assistant/assistant.py
index 4406302..81ce5a3 100644
--- a/backend/app/agents/collab_thread_assistant/assistant.py
+++ b/backend/app/agents/collab_thread_assistant/assistant.py
@@ -8,20 +8,21 @@
"""
import logging
-from typing import List, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, List, Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from app.agents.brainstorm_conversation.types import CrossProjectContext
+
from .types import (
- CollabThreadContext,
- ThreadMessage,
BrainstormingPhaseContext,
+ CollabThreadContext,
CurrentThreadContext,
+ ThreadMessage,
)
-from app.agents.brainstorm_conversation.types import CrossProjectContext
if TYPE_CHECKING:
from app.agents.llm_client import LLMCallLogger
@@ -169,9 +170,7 @@ def _create_agent(self) -> AssistantAgent:
model_client=self.model_client,
)
- def _format_brainstorming_context(
- self, brainstorming_context: BrainstormingPhaseContext
- ) -> str:
+ def _format_brainstorming_context(self, brainstorming_context: BrainstormingPhaseContext) -> str:
"""
Format brainstorming phase context for the prompt.
@@ -222,9 +221,7 @@ def _format_brainstorming_context(
return "\n".join(lines)
- def _format_cross_project_context(
- self, cross_project_context: CrossProjectContext
- ) -> str:
+ def _format_cross_project_context(self, cross_project_context: CrossProjectContext) -> str:
"""
Format cross-project context (decisions from other phases + project features).
@@ -256,8 +253,7 @@ def _format_cross_project_context(
for decision in phase_ctx.decisions:
lines.append(
- f"- **{decision.question_title}** ({decision.aspect_title}): "
- f"{decision.decision_summary_short}"
+ f"- **{decision.question_title}** ({decision.aspect_title}): {decision.decision_summary_short}"
)
lines.append("")
@@ -267,8 +263,7 @@ def _format_cross_project_context(
lines.append("")
for feature in cross_project_context.project_features:
lines.append(
- f"- **{feature.feature_title}** ({feature.module_title}): "
- f"{feature.decision_summary_short}"
+ f"- **{feature.feature_title}** ({feature.module_title}): {feature.decision_summary_short}"
)
lines.append("")
@@ -305,8 +300,7 @@ def _format_context(self, context: CollabThreadContext) -> str:
# Cross-project context (decisions from OTHER phases + project features)
# Placed early to give the assistant broad project awareness
if context.cross_project_context and (
- context.cross_project_context.other_phases
- or context.cross_project_context.project_features
+ context.cross_project_context.other_phases or context.cross_project_context.project_features
):
sections.append(self._format_cross_project_context(context.cross_project_context))
sections.append("")
@@ -399,9 +393,7 @@ def _format_recent_messages(self, messages: List[ThreadMessage]) -> str:
lines.append("")
return "\n".join(lines)
- def _format_current_thread_context(
- self, current_thread_context: CurrentThreadContext
- ) -> str:
+ def _format_current_thread_context(self, current_thread_context: CurrentThreadContext) -> str:
"""
Format the current thread context for the User's Question section.
@@ -499,9 +491,7 @@ def _build_prompt(
# Format current thread context for User's Question section
current_thread_section = ""
if context.current_thread_context:
- current_thread_section = self._format_current_thread_context(
- context.current_thread_context
- )
+ current_thread_section = self._format_current_thread_context(context.current_thread_context)
current_thread_section += "\n"
# Add additional context if provided (e.g., MCQ answer context)
@@ -687,10 +677,7 @@ async def respond(
"""
# Set agent context for LLM call logging
if self.llm_call_logger:
- self.llm_call_logger.set_agent(
- "collab_thread_assistant",
- "Collab Thread Assistant (@MFBTAI)"
- )
+ self.llm_call_logger.set_agent("collab_thread_assistant", "Collab Thread Assistant (@MFBTAI)")
# Create a fresh agent for this call
agent = self._create_agent()
@@ -743,7 +730,7 @@ def _generate_fallback_response(self, user_message: str, error: str) -> str:
## Key Points
-- Your question: {user_message[:200]}{'...' if len(user_message) > 200 else ''}
+- Your question: {user_message[:200]}{"..." if len(user_message) > 200 else ""}
- I was unable to generate a complete response due to a technical issue.
## Next Steps
diff --git a/backend/app/agents/collab_thread_assistant/context_loader.py b/backend/app/agents/collab_thread_assistant/context_loader.py
index cc37649..947f86e 100644
--- a/backend/app/agents/collab_thread_assistant/context_loader.py
+++ b/backend/app/agents/collab_thread_assistant/context_loader.py
@@ -15,31 +15,29 @@
import litellm
from sqlalchemy.orm import Session, joinedload
-from app.models.thread import Thread, ContextType
-from app.models.thread_item import ThreadItem, ThreadItemType
-from app.models.feature import Feature, FeatureVisibilityStatus, FeatureType
-from app.models.module import Module, ModuleType
-from app.models.brainstorming_phase import BrainstormingPhase
-from app.services.grounding_service import GroundingService
-from app.services.agent_utils import AGENT_EMAIL
-
from app.agents.brainstorm_conversation.types import (
- CrossProjectContext,
CrossPhaseContext,
CrossPhaseDecision,
+ CrossProjectContext,
ProjectFeatureDecision,
)
+from app.models.brainstorming_phase import BrainstormingPhase
+from app.models.feature import Feature, FeatureType, FeatureVisibilityStatus
+from app.models.module import Module, ModuleType
+from app.models.thread import ContextType, Thread
+from app.models.thread_item import ThreadItem, ThreadItemType
+from app.services.agent_utils import AGENT_EMAIL
+from app.services.grounding_service import GroundingService
+from .config import RECENT_MESSAGES_COUNT, TOKEN_COUNT_MODEL
from .types import (
- ThreadMessage,
- BrainstormingPhaseContext,
AnsweredQuestion,
- ThreadDiscussionSummary,
- MCQChoice,
- FeatureContext,
+ BrainstormingPhaseContext,
CurrentThreadContext,
+ FeatureContext,
+ MCQChoice,
+ ThreadMessage,
)
-from .config import RECENT_MESSAGES_COUNT, TOKEN_COUNT_MODEL
logger = logging.getLogger(__name__)
@@ -130,9 +128,7 @@ def load_thread(
thread = (
db.query(Thread)
.filter(Thread.id == thread_id)
- .options(
- joinedload(Thread.items).joinedload(ThreadItem.author)
- )
+ .options(joinedload(Thread.items).joinedload(ThreadItem.author))
.first()
)
@@ -145,10 +141,7 @@ def load_thread(
ThreadItemType.CODE_EXPLORATION.value,
ThreadItemType.WEB_SEARCH.value,
]
- relevant_items = [
- item for item in thread.items
- if item.item_type in allowed_types
- ]
+ relevant_items = [item for item in thread.items if item.item_type in allowed_types]
# Convert to ThreadMessage objects
messages: List[ThreadMessage] = []
@@ -370,12 +363,7 @@ def load_files(
# Infer project_id from feature if not provided
if project_id is None:
- feature = (
- db.query(Feature)
- .options(joinedload(Feature.module))
- .filter(Feature.id == feature_id)
- .first()
- )
+ feature = db.query(Feature).options(joinedload(Feature.module)).filter(Feature.id == feature_id).first()
if feature and feature.module:
project_id = str(feature.module.project_id)
@@ -416,11 +404,13 @@ def _extract_mcq_choices(choices: List[dict], selected_option_id: Optional[str])
"""Extract MCQ choices with selection status."""
mcq_choices = []
for choice in choices:
- mcq_choices.append(MCQChoice(
- id=choice.get("id", ""),
- label=choice.get("label", ""),
- is_selected=(choice.get("id") == selected_option_id) if selected_option_id else False,
- ))
+ mcq_choices.append(
+ MCQChoice(
+ id=choice.get("id", ""),
+ label=choice.get("label", ""),
+ is_selected=(choice.get("id") == selected_option_id) if selected_option_id else False,
+ )
+ )
return mcq_choices
@@ -429,7 +419,7 @@ def _extract_unresolved_points(thread: Thread) -> List[str]:
unresolved = []
if thread.unresolved_points:
for point in thread.unresolved_points:
- question = point.get('question', '') if isinstance(point, dict) else str(point)
+ question = point.get("question", "") if isinstance(point, dict) else str(point)
if question:
unresolved.append(question)
return unresolved
@@ -437,10 +427,7 @@ def _extract_unresolved_points(thread: Thread) -> List[str]:
def _extract_key_points_fallback(thread: Thread) -> List[str]:
"""Extract key points from comments as fallback when no decision summary."""
- comment_items = [
- item for item in thread.items
- if item.item_type == ThreadItemType.COMMENT.value
- ]
+ comment_items = [item for item in thread.items if item.item_type == ThreadItemType.COMMENT.value]
if not comment_items:
return []
@@ -453,11 +440,7 @@ def _extract_key_points_fallback(thread: Thread) -> List[str]:
if body:
# Truncate long comments
truncated = body[:200] + "..." if len(body) > 200 else body
- author_name = (
- item.author.display_name
- if item.author and item.author.display_name
- else "User"
- )
+ author_name = item.author.display_name if item.author and item.author.display_name else "User"
key_points.append(f"{author_name}: {truncated}")
return key_points
@@ -489,9 +472,7 @@ def load_brainstorming_phase_context(
# Load feature with module and brainstorming phase
feature = (
db.query(Feature)
- .options(
- joinedload(Feature.module).joinedload(Module.brainstorming_phase)
- )
+ .options(joinedload(Feature.module).joinedload(Module.brainstorming_phase))
.filter(Feature.id == feature_id)
.first()
)
@@ -586,14 +567,16 @@ def load_brainstorming_phase_context(
# Create FeatureContext combining questions + summary
if feature_questions or decision_summary or key_points:
- feature_contexts.append(FeatureContext(
- feature_id=str(conv_feature.id),
- feature_title=conv_feature.title,
- answered_questions=feature_questions,
- decision_summary=decision_summary,
- unresolved_points=unresolved,
- key_points=key_points,
- ))
+ feature_contexts.append(
+ FeatureContext(
+ feature_id=str(conv_feature.id),
+ feature_title=conv_feature.title,
+ answered_questions=feature_questions,
+ decision_summary=decision_summary,
+ unresolved_points=unresolved,
+ key_points=key_points,
+ )
+ )
# Apply limit
feature_contexts = feature_contexts[:MAX_FEATURE_CONTEXTS]
@@ -645,12 +628,7 @@ def load_current_thread_context(
return None
# Load thread with items
- thread = (
- db.query(Thread)
- .filter(Thread.id == thread_id)
- .options(joinedload(Thread.items))
- .first()
- )
+ thread = db.query(Thread).filter(Thread.id == thread_id).options(joinedload(Thread.items)).first()
if not thread:
logger.debug(f"Thread {thread_id} not found for current thread context")
return None
@@ -675,13 +653,15 @@ def load_current_thread_context(
mcq_choices = _extract_mcq_choices(choices, selected_option_id)
- mcq_questions.append(AnsweredQuestion(
- question_text=content_data.get("question_text", ""),
- selected_label=selected_label,
- choices=mcq_choices,
- free_text=content_data.get("free_text"),
- feature_title=feature.title,
- ))
+ mcq_questions.append(
+ AnsweredQuestion(
+ question_text=content_data.get("question_text", ""),
+ selected_label=selected_label,
+ choices=mcq_choices,
+ free_text=content_data.get("free_text"),
+ feature_title=feature.title,
+ )
+ )
# Extract decision summary and unresolved points
unresolved = _extract_unresolved_points(thread)
@@ -740,41 +720,40 @@ def load_cross_project_context(
# 1. Query all brainstorming phases (not archived)
phase_query = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.project_id == project_id,
- BrainstormingPhase.archived_at.is_(None)
+ BrainstormingPhase.project_id == project_id, BrainstormingPhase.archived_at.is_(None)
)
# Exclude current phase if specified
if exclude_phase_id:
phase_query = phase_query.filter(BrainstormingPhase.id != exclude_phase_id)
- all_phases = phase_query.order_by(
- BrainstormingPhase.created_at
- ).limit(MAX_PHASES_FOR_CROSS_CONTEXT).all()
+ all_phases = phase_query.order_by(BrainstormingPhase.created_at).limit(MAX_PHASES_FOR_CROSS_CONTEXT).all()
for phase in all_phases:
decisions: List[CrossPhaseDecision] = []
# Get modules for this phase
- modules = db.query(Module).filter(
- Module.brainstorming_phase_id == phase.id,
- Module.archived_at.is_(None)
- ).all()
+ modules = db.query(Module).filter(Module.brainstorming_phase_id == phase.id, Module.archived_at.is_(None)).all()
for module in modules:
# Get ACTIVE features (questions) with threads that have decisions
- features = db.query(Feature).filter(
- Feature.module_id == module.id,
- Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
- Feature.archived_at.is_(None)
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == module.id,
+ Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
+ Feature.archived_at.is_(None),
+ )
+ .all()
+ )
for feature in features:
# Get thread for this feature
- thread = db.query(Thread).filter(
- Thread.context_type == ContextType.BRAINSTORM_FEATURE,
- Thread.context_id == str(feature.id)
- ).first()
+ thread = (
+ db.query(Thread)
+ .filter(Thread.context_type == ContextType.BRAINSTORM_FEATURE, Thread.context_id == str(feature.id))
+ .first()
+ )
# Only include if thread has decision_summary_short or decision_summary
if thread and (thread.decision_summary_short or thread.decision_summary):
@@ -785,11 +764,13 @@ def load_cross_project_context(
else thread.decision_summary
)
if summary:
- decisions.append(CrossPhaseDecision(
- question_title=feature.title,
- decision_summary_short=summary,
- aspect_title=module.title,
- ))
+ decisions.append(
+ CrossPhaseDecision(
+ question_title=feature.title,
+ decision_summary_short=summary,
+ aspect_title=module.title,
+ )
+ )
# Cap decisions per phase
if len(decisions) >= MAX_DECISIONS_PER_PHASE:
@@ -805,34 +786,38 @@ def load_cross_project_context(
if len(description) > 200:
description = description[:200] + "..."
- phases_context.append(CrossPhaseContext(
- phase_id=str(phase.id),
- phase_title=phase.title,
- phase_description=description,
- decisions=decisions,
- ))
+ phases_context.append(
+ CrossPhaseContext(
+ phase_id=str(phase.id),
+ phase_title=phase.title,
+ phase_description=description,
+ decisions=decisions,
+ )
+ )
# 2. Query project-level features (module.brainstorming_phase_id IS NULL)
- project_modules = db.query(Module).filter(
- Module.project_id == project_id,
- Module.brainstorming_phase_id.is_(None),
- Module.archived_at.is_(None)
- ).all()
+ project_modules = (
+ db.query(Module)
+ .filter(Module.project_id == project_id, Module.brainstorming_phase_id.is_(None), Module.archived_at.is_(None))
+ .all()
+ )
for module in project_modules:
# Get IMPLEMENTATION features (not CONVERSATION)
- features = db.query(Feature).filter(
- Feature.module_id == module.id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
- Feature.archived_at.is_(None)
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == module.id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
+ Feature.archived_at.is_(None),
+ )
+ .all()
+ )
for feature in features:
# Get thread for this feature (could be SPEC or GENERAL context type)
- thread = db.query(Thread).filter(
- Thread.context_id == str(feature.id)
- ).first()
+ thread = db.query(Thread).filter(Thread.context_id == str(feature.id)).first()
# Only include if thread has decision summary
if thread and (thread.decision_summary_short or thread.decision_summary):
@@ -842,11 +827,13 @@ def load_cross_project_context(
else thread.decision_summary
)
if summary:
- project_features_context.append(ProjectFeatureDecision(
- feature_title=feature.title,
- module_title=module.title,
- decision_summary_short=summary,
- ))
+ project_features_context.append(
+ ProjectFeatureDecision(
+ feature_title=feature.title,
+ module_title=module.title,
+ decision_summary_short=summary,
+ )
+ )
# Cap project features
if len(project_features_context) >= MAX_PROJECT_FEATURES_FOR_CROSS_CONTEXT:
@@ -908,8 +895,8 @@ def load_spec_draft_context(
Raises:
ValueError: If draft version or phase not found.
"""
- from app.services.draft_version_service import DraftVersionService
from app.services.brainstorming_phase_service import BrainstormingPhaseService
+ from app.services.draft_version_service import DraftVersionService
# Load draft version
draft = DraftVersionService.get_draft(db, UUID(version_id))
@@ -931,10 +918,7 @@ def load_spec_draft_context(
# Load grounding files for project context
grounding_data = load_grounding_files(db, str(phase.project_id))
- logger.info(
- f"Loaded spec draft context for version {version_id}, "
- f"block {block_id}, phase {phase.title}"
- )
+ logger.info(f"Loaded spec draft context for version {version_id}, block {block_id}, phase {phase.title}")
return {
"full_document": draft.content_markdown or "",
diff --git a/backend/app/agents/collab_thread_assistant/exploration_parser.py b/backend/app/agents/collab_thread_assistant/exploration_parser.py
index bbbdcf2..8317a79 100644
--- a/backend/app/agents/collab_thread_assistant/exploration_parser.py
+++ b/backend/app/agents/collab_thread_assistant/exploration_parser.py
@@ -60,8 +60,7 @@ def parse_exploration_request(response_text: str) -> Optional[CodeExplorationReq
for fallback in fallback_patterns:
if re.search(fallback, response_text.lower()):
logger.warning(
- f"Detected exploration intent without proper block format. "
- f"Response contains: '{fallback}'"
+ f"Detected exploration intent without proper block format. Response contains: '{fallback}'"
)
return None
diff --git a/backend/app/agents/collab_thread_assistant/instrumentation.py b/backend/app/agents/collab_thread_assistant/instrumentation.py
index 383d378..8a3a322 100644
--- a/backend/app/agents/collab_thread_assistant/instrumentation.py
+++ b/backend/app/agents/collab_thread_assistant/instrumentation.py
@@ -7,11 +7,11 @@
retry attempts, and debug information collection.
"""
-import logging
import json
+import logging
from dataclasses import dataclass, field
-from typing import Any, Dict, List, Optional
from datetime import datetime, timezone
+from typing import Any, Dict, List, Optional
@dataclass
@@ -110,9 +110,7 @@ def __init__(
# Ensure structured output
if not self.logger.handlers:
handler = logging.StreamHandler()
- formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
- )
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
@@ -158,11 +156,7 @@ def log_request_start(self, message_preview: str) -> None:
Args:
message_preview: Truncated preview of the user message.
"""
- preview = (
- message_preview[:200] + "..."
- if len(message_preview) > 200
- else message_preview
- )
+ preview = message_preview[:200] + "..." if len(message_preview) > 200 else message_preview
self._structured_log(
"info",
"request_start",
@@ -274,11 +268,7 @@ def log_summarization_triggered(
)
)
- reduction_pct = (
- round((1 - summarized_tokens / original_tokens) * 100, 1)
- if original_tokens > 0
- else 0
- )
+ reduction_pct = round((1 - summarized_tokens / original_tokens) * 100, 1) if original_tokens > 0 else 0
self._structured_log(
"info",
diff --git a/backend/app/agents/collab_thread_assistant/mcq_parser.py b/backend/app/agents/collab_thread_assistant/mcq_parser.py
index f0bea3f..69d1d6d 100644
--- a/backend/app/agents/collab_thread_assistant/mcq_parser.py
+++ b/backend/app/agents/collab_thread_assistant/mcq_parser.py
@@ -17,15 +17,13 @@
MAX_MCQS_PER_RESPONSE = 3
# Regex pattern to find MCQ blocks
-MCQ_BLOCK_PATTERN = re.compile(
- r'\[MFBT_MCQ\](.*?)\[/MFBT_MCQ\]',
- re.DOTALL
-)
+MCQ_BLOCK_PATTERN = re.compile(r"\[MFBT_MCQ\](.*?)\[/MFBT_MCQ\]", re.DOTALL)
@dataclass
class ParsedMCQ:
"""A single parsed MCQ from an MFBTAI response."""
+
question_text: str
choices: list[dict] # [{"id": "option_1", "label": "..."}]
explanation: Optional[str] = None
@@ -36,6 +34,7 @@ class ParsedMCQ:
@dataclass
class ParsedResponse:
"""Result of parsing an MFBTAI response for MCQ blocks."""
+
preamble_text: Optional[str] = None # Text before MCQ block
mcqs: list[ParsedMCQ] = field(default_factory=list)
has_mcq_block: bool = False
@@ -107,9 +106,7 @@ def _parse_mcq_json(json_str: str) -> tuple[list[ParsedMCQ], Optional[str]]:
return [], "'questions' must be an array"
if len(questions) > MAX_MCQS_PER_RESPONSE:
- logger.warning(
- f"MCQ block has {len(questions)} questions, limiting to {MAX_MCQS_PER_RESPONSE}"
- )
+ logger.warning(f"MCQ block has {len(questions)} questions, limiting to {MAX_MCQS_PER_RESPONSE}")
questions = questions[:MAX_MCQS_PER_RESPONSE]
parsed_mcqs = []
@@ -133,13 +130,15 @@ def _parse_mcq_json(json_str: str) -> tuple[list[ParsedMCQ], Optional[str]]:
recommended_option_id = None
recommended_reason = None
- parsed_mcqs.append(ParsedMCQ(
- question_text=mcq_data["question_text"],
- choices=mcq_data["choices"],
- explanation=mcq_data.get("explanation"),
- recommended_option_id=recommended_option_id,
- recommended_reason=recommended_reason,
- ))
+ parsed_mcqs.append(
+ ParsedMCQ(
+ question_text=mcq_data["question_text"],
+ choices=mcq_data["choices"],
+ explanation=mcq_data.get("explanation"),
+ recommended_option_id=recommended_option_id,
+ recommended_reason=recommended_reason,
+ )
+ )
return parsed_mcqs, None
@@ -179,7 +178,7 @@ def parse_mfbtai_response(response_text: str) -> ParsedResponse:
)
# Extract preamble (text before MCQ block)
- preamble = response_text[:match.start()].strip()
+ preamble = response_text[: match.start()].strip()
preamble_text = preamble if preamble else None
# Parse MCQ JSON
diff --git a/backend/app/agents/collab_thread_assistant/orchestrator.py b/backend/app/agents/collab_thread_assistant/orchestrator.py
index 844e556..e3ef9a9 100644
--- a/backend/app/agents/collab_thread_assistant/orchestrator.py
+++ b/backend/app/agents/collab_thread_assistant/orchestrator.py
@@ -14,41 +14,41 @@
import asyncio
import logging
import time
-from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple
from uuid import UUID
from autogen_core.models import ChatCompletionClient
from sqlalchemy.orm import Session, joinedload
from app.models.feature import Feature
-from app.models.thread import Thread, ContextType
-from app.models.project import Project
from app.models.module import Module
+from app.models.project import Project
+from app.models.thread import ContextType, Thread
-from .types import AssistantResponse, CollabThreadContext
-from .context_loader import (
- load_thread,
- load_feature_files,
- load_grounding_files,
- load_brainstorming_phase_context,
- load_current_thread_context,
- load_cross_project_context,
- token_count,
-)
-from .summarizer import SummarizerAgent
from .assistant import CollabThreadAssistant
-from .retry import with_retry, RetryError
from .config import (
- TOKEN_THRESHOLD,
- SUMMARY_MAX_TOKENS,
- RECENT_MESSAGES_COUNT,
- ENABLE_SUMMARIZATION,
ENABLE_DEBUG_LOGGING,
- MIN_MESSAGES_FOR_SUMMARY,
+ ENABLE_SUMMARIZATION,
MAX_RETRIES,
+ MIN_MESSAGES_FOR_SUMMARY,
+ RECENT_MESSAGES_COUNT,
RETRY_BACKOFF_MS,
+ SUMMARY_MAX_TOKENS,
+ TOKEN_THRESHOLD,
+)
+from .context_loader import (
+ load_brainstorming_phase_context,
+ load_cross_project_context,
+ load_current_thread_context,
+ load_feature_files,
+ load_grounding_files,
+ load_thread,
+ token_count,
)
from .instrumentation import CollabThreadAssistantLogger, get_assistant_logger
+from .retry import RetryError, with_retry
+from .summarizer import SummarizerAgent
+from .types import AssistantResponse, CollabThreadContext
if TYPE_CHECKING:
from app.agents.llm_client import LLMCallLogger
@@ -102,12 +102,7 @@ async def build_context(
# Infer project_id from feature if not provided
if project_id is None:
- feature = (
- db.query(Feature)
- .options(joinedload(Feature.module))
- .filter(Feature.id == feature_id)
- .first()
- )
+ feature = db.query(Feature).options(joinedload(Feature.module)).filter(Feature.id == feature_id).first()
if feature and feature.module:
project_id = str(feature.module.project_id)
@@ -134,6 +129,7 @@ async def build_context(
# Check if web search is available
from app.services.platform_settings_service import is_web_search_available_sync
+
web_search_enabled = is_web_search_available_sync(db)
# Load brainstorming context if this is a BRAINSTORM_FEATURE thread
@@ -187,9 +183,9 @@ async def build_context(
exclude_phase_id=current_phase_id, # Exclude current phase to avoid duplication
)
if cross_project_context:
- total_decisions = sum(
- len(phase.decisions) for phase in cross_project_context.other_phases
- ) + len(cross_project_context.project_features)
+ total_decisions = sum(len(phase.decisions) for phase in cross_project_context.other_phases) + len(
+ cross_project_context.project_features
+ )
logger.info(
f"Loaded cross-project context: {len(cross_project_context.other_phases)} phases, "
f"{len(cross_project_context.project_features)} project features, "
@@ -233,57 +229,53 @@ async def build_context(
messages_to_summarize = None
# Check if thread needs summarization
- if (
- token_counts["thread"] > TOKEN_THRESHOLD
- and len(all_messages) >= MIN_MESSAGES_FOR_SUMMARY
- ):
+ if token_counts["thread"] > TOKEN_THRESHOLD and len(all_messages) >= MIN_MESSAGES_FOR_SUMMARY:
# Get messages excluding recent ones (they'll be included verbatim)
messages_to_summarize = (
- all_messages[:-RECENT_MESSAGES_COUNT]
- if len(all_messages) > RECENT_MESSAGES_COUNT
- else all_messages
+ all_messages[:-RECENT_MESSAGES_COUNT] if len(all_messages) > RECENT_MESSAGES_COUNT else all_messages
)
if messages_to_summarize:
logger.info(
f"Summarizing thread {thread_id}: {len(messages_to_summarize)} messages "
f"({token_counts['thread']} tokens)"
)
- summarization_tasks.append((
- "thread",
- summarizer.summarize_thread(messages_to_summarize, max_tokens=SUMMARY_MAX_TOKENS)
- ))
+ summarization_tasks.append(
+ ("thread", summarizer.summarize_thread(messages_to_summarize, max_tokens=SUMMARY_MAX_TOKENS))
+ )
# Check if spec needs summarization
if token_counts["spec"] > TOKEN_THRESHOLD:
logger.info(f"Summarizing spec for feature {feature_id}: {token_counts['spec']} tokens")
- summarization_tasks.append((
- "spec",
- summarizer.summarize(spec, max_tokens=SUMMARY_MAX_TOKENS, context_type="spec")
- ))
+ summarization_tasks.append(
+ ("spec", summarizer.summarize(spec, max_tokens=SUMMARY_MAX_TOKENS, context_type="spec"))
+ )
# Check if prompt_plan needs summarization
if token_counts["prompt_plan"] > TOKEN_THRESHOLD:
logger.info(f"Summarizing prompt_plan for feature {feature_id}: {token_counts['prompt_plan']} tokens")
- summarization_tasks.append((
- "prompt_plan",
- summarizer.summarize(prompt_plan, max_tokens=SUMMARY_MAX_TOKENS, context_type="prompt_plan")
- ))
+ summarization_tasks.append(
+ (
+ "prompt_plan",
+ summarizer.summarize(prompt_plan, max_tokens=SUMMARY_MAX_TOKENS, context_type="prompt_plan"),
+ )
+ )
# Check if notes needs summarization
if token_counts["notes"] > TOKEN_THRESHOLD:
logger.info(f"Summarizing notes for feature {feature_id}: {token_counts['notes']} tokens")
- summarization_tasks.append((
- "notes",
- summarizer.summarize(notes, max_tokens=SUMMARY_MAX_TOKENS, context_type="notes")
- ))
+ summarization_tasks.append(
+ ("notes", summarizer.summarize(notes, max_tokens=SUMMARY_MAX_TOKENS, context_type="notes"))
+ )
# Check if grounding needs summarization
if token_counts["grounding"] > TOKEN_THRESHOLD:
logger.info(f"Summarizing grounding for project {project_id}: {token_counts['grounding']} tokens")
- summarization_tasks.append((
- "grounding",
- summarizer.summarize(grounding_combined, max_tokens=SUMMARY_MAX_TOKENS, context_type="grounding")
- ))
+ summarization_tasks.append(
+ (
+ "grounding",
+ summarizer.summarize(grounding_combined, max_tokens=SUMMARY_MAX_TOKENS, context_type="grounding"),
+ )
+ )
# Execute all summarization tasks in parallel
if summarization_tasks:
@@ -460,7 +452,7 @@ def _generate_graceful_fallback(
## Key Points
-- Your question: {message[:200]}{'...' if len(message) > 200 else ''}
+- Your question: {message[:200]}{"..." if len(message) > 200 else ""}
- Thread ID: {thread_id}
- Feature ID: {feature_id}
- Multiple retry attempts were made before this fallback response.
@@ -681,10 +673,7 @@ def on_assistant_retry(attempt: int, error: Exception) -> None:
tokens_used = sum(context.token_counts.values())
assistant_logger.log_request_complete(latency_ms, success=False, tokens_used=tokens_used)
- summarized_parts = [
- key for key, was_summarized in context.summarization_applied.items()
- if was_summarized
- ]
+ summarized_parts = [key for key, was_summarized in context.summarization_applied.items() if was_summarized]
metadata = {
"implemented": True,
@@ -720,10 +709,7 @@ def on_assistant_retry(attempt: int, error: Exception) -> None:
latency_ms = (time.time() - start_time) * 1000
# Determine which parts were summarized
- summarized_parts = [
- key for key, was_summarized in context.summarization_applied.items()
- if was_summarized
- ]
+ summarized_parts = [key for key, was_summarized in context.summarization_applied.items() if was_summarized]
# Log successful request completion
tokens_used = sum(context.token_counts.values())
@@ -800,14 +786,14 @@ async def handle_ai_mention(
Raises:
ValueError: If thread not found, feature not found, or no LLM configured.
"""
- from app.models import Thread, Project
from app.agents.llm_client import (
+ DEFAULT_LLM_REQUEST_TIMEOUT_SECONDS,
LiteLLMChatCompletionClient,
LLMCallLogger,
- DEFAULT_LLM_REQUEST_TIMEOUT_SECONDS,
)
- from app.services.platform_settings_service import require_llm_config_sync
from app.database import SessionLocal
+ from app.models import Project, Thread
+ from app.services.platform_settings_service import require_llm_config_sync
# Get thread to find project and org
thread = db.query(Thread).filter(Thread.id == thread_id).first()
diff --git a/backend/app/agents/collab_thread_assistant/retry.py b/backend/app/agents/collab_thread_assistant/retry.py
index 8367456..c734e9c 100644
--- a/backend/app/agents/collab_thread_assistant/retry.py
+++ b/backend/app/agents/collab_thread_assistant/retry.py
@@ -11,14 +11,14 @@
# Re-export everything from the shared retry module
from app.agents.retry import (
+ # Constants (re-export from config for compatibility)
+ LEGACY_BACKOFF_MS,
# Exception
RetryError,
+ calculate_backoff_delay,
# Functions
with_retry,
with_retry_sync,
- calculate_backoff_delay,
- # Constants (re-export from config for compatibility)
- LEGACY_BACKOFF_MS,
)
# Re-export config values for code that imports them from here
diff --git a/backend/app/agents/collab_thread_assistant/spec_draft_assistant.py b/backend/app/agents/collab_thread_assistant/spec_draft_assistant.py
index 74c29f9..e9c08ac 100644
--- a/backend/app/agents/collab_thread_assistant/spec_draft_assistant.py
+++ b/backend/app/agents/collab_thread_assistant/spec_draft_assistant.py
@@ -6,7 +6,7 @@
"""
import logging
-from typing import Any, Dict, List, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_core.models import ChatCompletionClient
@@ -190,6 +190,7 @@ async def respond(
elif isinstance(content, dict):
# JSON response - convert to string
import json
+
return json.dumps(content)
return str(content)
diff --git a/backend/app/agents/collab_thread_assistant/spec_draft_handler.py b/backend/app/agents/collab_thread_assistant/spec_draft_handler.py
index 4be1499..0cb6ba5 100644
--- a/backend/app/agents/collab_thread_assistant/spec_draft_handler.py
+++ b/backend/app/agents/collab_thread_assistant/spec_draft_handler.py
@@ -54,15 +54,16 @@ async def handle_spec_draft_ai_mention(
Raises:
ValueError: If thread not found, version not found, or no LLM configured.
"""
- from app.models import Thread, Project
from app.agents.llm_client import (
+ DEFAULT_LLM_REQUEST_TIMEOUT_SECONDS,
LiteLLMChatCompletionClient,
LLMCallLogger,
- DEFAULT_LLM_REQUEST_TIMEOUT_SECONDS,
)
- from app.services.platform_settings_service import require_llm_config_sync
from app.database import SessionLocal
- from .context_loader import load_spec_draft_context, load_thread, load_grounding_files
+ from app.models import Project, Thread
+ from app.services.platform_settings_service import require_llm_config_sync
+
+ from .context_loader import load_spec_draft_context, load_thread
from .spec_draft_assistant import SpecDraftAssistant
# Get thread to find project
diff --git a/backend/app/agents/collab_thread_assistant/summarizer.py b/backend/app/agents/collab_thread_assistant/summarizer.py
index 1a43ef9..36cd9c3 100644
--- a/backend/app/agents/collab_thread_assistant/summarizer.py
+++ b/backend/app/agents/collab_thread_assistant/summarizer.py
@@ -8,15 +8,15 @@
"""
import logging
-from typing import List, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, List, Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
-from .types import ThreadMessage
from .config import SUMMARY_MAX_TOKENS
+from .types import ThreadMessage
if TYPE_CHECKING:
from app.agents.llm_client import LLMCallLogger
@@ -167,7 +167,7 @@ async def summarize(
agent = self._create_agent(system_prompt, "file_summarizer")
# Build the prompt
- prompt = f"""Please summarize the following {context_type or 'document'}:
+ prompt = f"""Please summarize the following {context_type or "document"}:
---
{text}
diff --git a/backend/app/agents/collab_thread_assistant/types.py b/backend/app/agents/collab_thread_assistant/types.py
index 454a077..550c79e 100644
--- a/backend/app/agents/collab_thread_assistant/types.py
+++ b/backend/app/agents/collab_thread_assistant/types.py
@@ -6,7 +6,7 @@
"""
from dataclasses import dataclass, field
-from typing import Any, Dict, List, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
from uuid import UUID
if TYPE_CHECKING:
diff --git a/backend/app/agents/collab_thread_assistant/web_search_parser.py b/backend/app/agents/collab_thread_assistant/web_search_parser.py
index d54ac52..f91495d 100644
--- a/backend/app/agents/collab_thread_assistant/web_search_parser.py
+++ b/backend/app/agents/collab_thread_assistant/web_search_parser.py
@@ -59,8 +59,7 @@ def parse_web_search_request(response_text: str) -> Optional[WebSearchRequest]:
for fallback in fallback_patterns:
if re.search(fallback, response_text.lower()):
logger.warning(
- f"Detected web search intent without proper block format. "
- f"Response contains: '{fallback}'"
+ f"Detected web search intent without proper block format. Response contains: '{fallback}'"
)
return None
diff --git a/backend/app/agents/collab_thread_decision_summarizer/orchestrator.py b/backend/app/agents/collab_thread_decision_summarizer/orchestrator.py
index ae7745c..61723c7 100644
--- a/backend/app/agents/collab_thread_decision_summarizer/orchestrator.py
+++ b/backend/app/agents/collab_thread_decision_summarizer/orchestrator.py
@@ -17,10 +17,9 @@
create_litellm_client,
)
from app.database import SessionLocal
-from app.models.thread import Thread, ContextType
-from app.models.thread_item import ThreadItem, ThreadItemType
from app.models.feature import Feature
-from app.models.user import User
+from app.models.thread import ContextType, Thread
+from app.models.thread_item import ThreadItem, ThreadItemType
from app.services.agent_utils import AGENT_EMAIL
from .config import (
@@ -137,9 +136,7 @@ async def summarize_thread(
all_items = sorted(thread.items, key=lambda x: x.created_at)
# Find unprocessed items (after last_summarized_item_id)
- unprocessed_items = self._get_unprocessed_items(
- all_items, thread.last_summarized_item_id
- )
+ unprocessed_items = self._get_unprocessed_items(all_items, thread.last_summarized_item_id)
if not unprocessed_items:
logger.info(f"Thread {thread_id}: No unprocessed items")
@@ -153,9 +150,7 @@ async def summarize_thread(
final_suggested_implementation_name=thread.suggested_implementation_name,
)
- logger.info(
- f"Thread {thread_id}: Processing {len(unprocessed_items)} unprocessed items"
- )
+ logger.info(f"Thread {thread_id}: Processing {len(unprocessed_items)} unprocessed items")
# Load existing state
current_summary = thread.decision_summary
@@ -172,9 +167,7 @@ async def summarize_thread(
{
"current_item": i + 1,
"total_items": len(unprocessed_items),
- "progress_percentage": 10 + int(
- (i / len(unprocessed_items)) * 70
- ),
+ "progress_percentage": 10 + int((i / len(unprocessed_items)) * 70),
},
)
@@ -420,9 +413,7 @@ def _update_thread_summary(
db.commit()
db.refresh(thread)
- logger.debug(
- f"Updated thread {thread.id}: last_summarized_item_id={last_processed_id}"
- )
+ logger.debug(f"Updated thread {thread.id}: last_summarized_item_id={last_processed_id}")
async def create_orchestrator(
diff --git a/backend/app/agents/collab_thread_decision_summarizer/summarizer.py b/backend/app/agents/collab_thread_decision_summarizer/summarizer.py
index c63e4f0..284197b 100644
--- a/backend/app/agents/collab_thread_decision_summarizer/summarizer.py
+++ b/backend/app/agents/collab_thread_decision_summarizer/summarizer.py
@@ -6,20 +6,19 @@
import json
import logging
-from typing import Awaitable, Callable, List, Optional, Tuple, TYPE_CHECKING
+from typing import TYPE_CHECKING, Awaitable, Callable, Optional, Tuple
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .config import SUMMARY_MAX_TOKENS
from .types import (
DecisionSummaryContext,
DecisionSummaryResult,
UnresolvedPoint,
- UnresolvedStatus,
)
-from .config import SUMMARY_MAX_TOKENS
if TYPE_CHECKING:
from app.agents.llm_client import LLMCallLogger
@@ -195,10 +194,7 @@ async def process_item(
# Set agent context for LLM call logging
if self.llm_call_logger:
retry_suffix = f" (retry {attempt})" if attempt > 0 else ""
- self.llm_call_logger.set_agent(
- "decision_summarizer",
- f"Decision Summarizer{retry_suffix}"
- )
+ self.llm_call_logger.set_agent("decision_summarizer", f"Decision Summarizer{retry_suffix}")
# Create fresh agent for each attempt (AutoGen agents accumulate history)
agent = self._create_agent()
@@ -225,10 +221,7 @@ async def process_item(
if json_parsed:
# Successfully parsed JSON
- logger.debug(
- f"Processed item {context.new_item.item_id}: "
- f"summary_changed={result.summary_changed}"
- )
+ logger.debug(f"Processed item {context.new_item.item_id}: summary_changed={result.summary_changed}")
return result
# JSON parsing failed - retry if not last attempt
@@ -242,9 +235,7 @@ async def process_item(
continue
else:
# Last attempt - use fallback
- logger.warning(
- f"LLM did not return valid JSON after {MAX_JSON_RETRIES} attempts, using fallback"
- )
+ logger.warning(f"LLM did not return valid JSON after {MAX_JSON_RETRIES} attempts, using fallback")
return result
except Exception as e:
@@ -303,7 +294,7 @@ def _build_prompt(self, context: DecisionSummaryContext) -> str:
item_content = f"""Type: MCQ Answer (EXPLICIT DECISION)
Question: {item.mcq_question}
Selected Answer: {item.mcq_selected_answer}
-Additional Notes: {item.mcq_free_text or 'None provided'}"""
+Additional Notes: {item.mcq_free_text or "None provided"}"""
else:
item_content = f"""Type: Comment
Content: {item.content}"""
@@ -317,7 +308,7 @@ def _build_prompt(self, context: DecisionSummaryContext) -> str:
{unresolved_json}
## NEW MESSAGE:
-Author: {item.author}{' (AI Assistant)' if item.is_ai else ''}
+Author: {item.author}{" (AI Assistant)" if item.is_ai else ""}
Timestamp: {item.created_at.isoformat()}
{item_content}
diff --git a/backend/app/agents/collab_thread_decision_summarizer/types.py b/backend/app/agents/collab_thread_decision_summarizer/types.py
index 6a7eb96..a723638 100644
--- a/backend/app/agents/collab_thread_decision_summarizer/types.py
+++ b/backend/app/agents/collab_thread_decision_summarizer/types.py
@@ -1,6 +1,6 @@
"""Type definitions for the CollabThreadDecisionSummarizer agent."""
-from dataclasses import dataclass, field
+from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
diff --git a/backend/app/agents/feature_content/__init__.py b/backend/app/agents/feature_content/__init__.py
index 1d5c79e..906d173 100644
--- a/backend/app/agents/feature_content/__init__.py
+++ b/backend/app/agents/feature_content/__init__.py
@@ -29,26 +29,24 @@
print(result.content_markdown)
"""
-from .types import (
- ContentType,
- FeatureContentContext,
- ContentGenerationResult,
- FeatureInfo,
- ModuleInfo,
- ThreadItem,
- AGENT_METADATA,
- WORKFLOW_STEPS,
-)
-
from .context_loader import (
- load_feature_context,
format_thread_items,
+ load_feature_context,
)
-
from .orchestrator import (
FeatureContentOrchestrator,
create_orchestrator,
)
+from .types import (
+ AGENT_METADATA,
+ WORKFLOW_STEPS,
+ ContentGenerationResult,
+ ContentType,
+ FeatureContentContext,
+ FeatureInfo,
+ ModuleInfo,
+ ThreadItem,
+)
__all__ = [
# Types
diff --git a/backend/app/agents/feature_content/context_loader.py b/backend/app/agents/feature_content/context_loader.py
index 396623e..6048dc1 100644
--- a/backend/app/agents/feature_content/context_loader.py
+++ b/backend/app/agents/feature_content/context_loader.py
@@ -12,15 +12,14 @@
from sqlalchemy.orm import Session, joinedload
-from app.models.feature import Feature
-from app.models.module import Module
-from app.models.project import Project
-from app.models.implementation import Implementation
-from app.models.thread import Thread, ContextType
-from app.models.thread_item import ThreadItem as ThreadItemModel, ThreadItemType
from app.agents.collab_thread_assistant.context_loader import (
load_grounding_files,
)
+from app.models.feature import Feature
+from app.models.implementation import Implementation
+from app.models.project import Project
+from app.models.thread import ContextType, Thread
+from app.models.thread_item import ThreadItem as ThreadItemModel
from .types import (
FeatureContentContext,
@@ -52,7 +51,7 @@ def _find_implementation_segment(
# Find all IMPLEMENTATION_CREATED markers ordered by created_at
markers = []
for item in thread_items_db:
- item_type_str = item.item_type.value if hasattr(item.item_type, 'value') else item.item_type
+ item_type_str = item.item_type.value if hasattr(item.item_type, "value") else item.item_type
if item_type_str == "implementation_created":
markers.append(item)
@@ -76,7 +75,7 @@ def _find_implementation_segment(
)
return (None, None, True)
- is_first = (target_index == 0)
+ is_first = target_index == 0
# Segment end is this marker's timestamp
segment_end = target_marker.created_at
@@ -111,12 +110,7 @@ def load_feature_context(
ValueError: If feature, module, project, or thread not found
"""
# Load feature with module
- feature = (
- db.query(Feature)
- .options(joinedload(Feature.module))
- .filter(Feature.id == feature_id)
- .first()
- )
+ feature = db.query(Feature).options(joinedload(Feature.module)).filter(Feature.id == feature_id).first()
if not feature:
raise ValueError(f"Feature {feature_id} not found")
@@ -242,7 +236,7 @@ def _convert_thread_items(
items = []
for item in thread_items_db:
# item_type may be an enum or string depending on how it's loaded
- item_type_str = item.item_type.value if hasattr(item.item_type, 'value') else item.item_type
+ item_type_str = item.item_type.value if hasattr(item.item_type, "value") else item.item_type
content_data = item.content_data or {}
# Determine if this item is in the focus segment
@@ -255,12 +249,14 @@ def _convert_thread_items(
if item_type_str == "comment":
author = item.author.display_name if item.author else "Unknown"
- items.append(ThreadItem(
- item_type="comment",
- author_name=author,
- body=content_data.get("body_markdown", ""),
- is_focus_segment=is_focus,
- ))
+ items.append(
+ ThreadItem(
+ item_type="comment",
+ author_name=author,
+ body=content_data.get("body_markdown", ""),
+ is_focus_segment=is_focus,
+ )
+ )
elif item_type_str == "mcq_followup":
question = content_data.get("question_text", "")
selected_option_id = content_data.get("selected_option_id")
@@ -273,31 +269,37 @@ def _convert_thread_items(
selected_label = choice.get("label", "")
break
- items.append(ThreadItem(
- item_type="mcq",
- author_name="System",
- mcq_question=question,
- mcq_selected_label=selected_label,
- mcq_free_text=content_data.get("free_text"),
- is_focus_segment=is_focus,
- ))
+ items.append(
+ ThreadItem(
+ item_type="mcq",
+ author_name="System",
+ mcq_question=question,
+ mcq_selected_label=selected_label,
+ mcq_free_text=content_data.get("free_text"),
+ is_focus_segment=is_focus,
+ )
+ )
elif item_type_str == "code_exploration":
# Include full code exploration results (prompt + output)
- items.append(ThreadItem(
- item_type="code_exploration",
- author_name="Code Explorer",
- exploration_prompt=content_data.get("prompt", ""),
- exploration_output=content_data.get("output", ""),
- is_focus_segment=is_focus,
- ))
+ items.append(
+ ThreadItem(
+ item_type="code_exploration",
+ author_name="Code Explorer",
+ exploration_prompt=content_data.get("prompt", ""),
+ exploration_output=content_data.get("output", ""),
+ is_focus_segment=is_focus,
+ )
+ )
elif item_type_str == "web_search":
# Include web search query only - the conclusion comes as MFBTAI comment
- items.append(ThreadItem(
- item_type="web_search",
- author_name="Web Search",
- exploration_prompt=content_data.get("query", ""),
- is_focus_segment=is_focus,
- ))
+ items.append(
+ ThreadItem(
+ item_type="web_search",
+ author_name="Web Search",
+ exploration_prompt=content_data.get("query", ""),
+ is_focus_segment=is_focus,
+ )
+ )
return items
diff --git a/backend/app/agents/feature_content/orchestrator.py b/backend/app/agents/feature_content/orchestrator.py
index 44b3e8d..acdffd8 100644
--- a/backend/app/agents/feature_content/orchestrator.py
+++ b/backend/app/agents/feature_content/orchestrator.py
@@ -14,17 +14,16 @@
from autogen_agentchat.messages import TextMessage
from autogen_core.models import ChatCompletionClient
-from app.agents.llm_client import create_litellm_client, LLMCallLogger
from app.agents.brainstorm_spec import JobCancelledException
+from app.agents.llm_client import LLMCallLogger, create_litellm_client
from .types import (
AGENT_METADATA,
- WORKFLOW_STEPS,
+ PROMPT_PLAN_SYSTEM_MESSAGE,
+ SPEC_SYSTEM_MESSAGE,
+ ContentGenerationResult,
ContentType,
FeatureContentContext,
- ContentGenerationResult,
- SPEC_SYSTEM_MESSAGE,
- PROMPT_PLAN_SYSTEM_MESSAGE,
)
logger = logging.getLogger(__name__)
@@ -128,8 +127,8 @@ def _check_cancelled(self) -> None:
if not self.job_id:
return
- from app.services.job_service import JobService
from app.database import SessionLocal
+ from app.services.job_service import JobService
db = SessionLocal()
try:
@@ -154,9 +153,9 @@ def _build_spec_prompt(self, context: FeatureContentContext) -> str:
- **Feature Key**: {context.feature.feature_key}
- **Title**: {context.feature.title}
- **Module**: {context.module.title}
-- **Module Description**: {context.module.description or 'N/A'}
-- **Category**: {context.feature.category or 'N/A'}
-- **Priority**: {context.feature.priority or 'N/A'}
+- **Module Description**: {context.module.description or "N/A"}
+- **Category**: {context.feature.category or "N/A"}
+- **Priority**: {context.feature.priority or "N/A"}
"""
# Add feature description if available
@@ -170,7 +169,7 @@ def _build_spec_prompt(self, context: FeatureContentContext) -> str:
if context.implementation_id:
prompt += f"""
## Implementation Context
-- **Implementation Name**: {context.implementation_name or 'N/A'}
+- **Implementation Name**: {context.implementation_name or "N/A"}
- **Type**: {"First implementation" if context.is_first_implementation else "Subsequent implementation"}
"""
if not context.is_first_implementation:
@@ -226,9 +225,9 @@ def _build_prompt_plan_prompt(self, context: FeatureContentContext) -> str:
- **Feature Key**: {context.feature.feature_key}
- **Title**: {context.feature.title}
- **Module**: {context.module.title}
-- **Module Description**: {context.module.description or 'N/A'}
-- **Category**: {context.feature.category or 'N/A'}
-- **Priority**: {context.feature.priority or 'N/A'}
+- **Module Description**: {context.module.description or "N/A"}
+- **Category**: {context.feature.category or "N/A"}
+- **Priority**: {context.feature.priority or "N/A"}
"""
# Add feature description if available
@@ -242,7 +241,7 @@ def _build_prompt_plan_prompt(self, context: FeatureContentContext) -> str:
if context.implementation_id:
prompt += f"""
## Implementation Context
-- **Implementation Name**: {context.implementation_name or 'N/A'}
+- **Implementation Name**: {context.implementation_name or "N/A"}
- **Type**: {"First implementation" if context.is_first_implementation else "Subsequent implementation"}
"""
if not context.is_first_implementation:
@@ -325,9 +324,7 @@ async def generate(
Raises:
ValueError: If generation fails
"""
- logger.info(
- f"Generating {content_type.value} for feature {context.feature.feature_key}"
- )
+ logger.info(f"Generating {content_type.value} for feature {context.feature.feature_key}")
# Determine agent key based on content type
agent_key = "spec_generator" if content_type == ContentType.SPEC else "prompt_plan_generator"
@@ -363,10 +360,7 @@ async def generate(
self._check_cancelled()
# Run the agent
- response = await agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- cancellation_token=None
- )
+ response = await agent.on_messages([TextMessage(content=prompt, source="user")], cancellation_token=None)
# Extract response text
response_text = response.chat_message.content
@@ -380,19 +374,15 @@ async def generate(
self._report_progress("complete", 100, agent_key)
logger.info(
- f"Generated {content_type.value} for feature {context.feature.feature_key}: "
- f"{len(content_markdown)} chars"
+ f"Generated {content_type.value} for feature {context.feature.feature_key}: {len(content_markdown)} chars"
)
# Build metadata
metadata = {}
- if hasattr(self.model_client, 'get_usage_stats'):
+ if hasattr(self.model_client, "get_usage_stats"):
usage_stats = self.model_client.get_usage_stats()
metadata["model"] = usage_stats.get("model")
- metadata["tokens_used"] = (
- usage_stats.get("prompt_tokens", 0) +
- usage_stats.get("completion_tokens", 0)
- )
+ metadata["tokens_used"] = usage_stats.get("prompt_tokens", 0) + usage_stats.get("completion_tokens", 0)
metadata["cost_usd"] = usage_stats.get("cost_usd")
return ContentGenerationResult(
@@ -403,7 +393,7 @@ async def generate(
def get_usage_stats(self) -> Dict[str, Any]:
"""Get LLM usage statistics from the model client."""
- if hasattr(self.model_client, 'get_usage_stats'):
+ if hasattr(self.model_client, "get_usage_stats"):
return self.model_client.get_usage_stats()
return {}
diff --git a/backend/app/agents/feature_content/types.py b/backend/app/agents/feature_content/types.py
index ae1a67f..ae9211f 100644
--- a/backend/app/agents/feature_content/types.py
+++ b/backend/app/agents/feature_content/types.py
@@ -14,6 +14,7 @@
class ContentType(str, Enum):
"""Type of content to generate."""
+
SPEC = "spec"
PROMPT_PLAN = "prompt_plan"
@@ -21,6 +22,7 @@ class ContentType(str, Enum):
@dataclass
class ThreadItem:
"""A single item from a feature's discussion thread."""
+
item_type: str # "comment", "mcq", "code_exploration", or "web_search"
author_name: str
body: Optional[str] = None # For comments
@@ -36,6 +38,7 @@ class ThreadItem:
@dataclass
class FeatureInfo:
"""Basic feature information for context."""
+
feature_id: UUID
feature_key: str # e.g., "USER-001"
title: str
@@ -48,6 +51,7 @@ class FeatureInfo:
@dataclass
class ModuleInfo:
"""Module information for context."""
+
module_id: UUID
title: str
description: Optional[str] = None
@@ -61,6 +65,7 @@ class FeatureContentContext:
Contains all information needed to generate a specification or prompt plan
from a feature's discussion thread.
"""
+
# Identifiers
feature_id: UUID
project_id: UUID
@@ -92,6 +97,7 @@ class ContentGenerationResult:
"""
Result of content generation by the agent.
"""
+
content_markdown: str # The generated spec or prompt plan
content_type: ContentType
metadata: Dict[str, Any] = field(default_factory=dict)
@@ -105,6 +111,7 @@ class ContentGenerationResult:
@dataclass
class AgentInfo:
"""Agent display information for UI progress tracking."""
+
name: str
description: str
color: str
@@ -115,23 +122,17 @@ class AgentInfo:
"spec_generator": AgentInfo(
name="Spec Writer",
description="Generating feature specification from conversation",
- color="#3B82F6" # Blue
+ color="#3B82F6", # Blue
),
"prompt_plan_generator": AgentInfo(
name="Prompt Plan Writer",
description="Creating implementation prompt plan from specification",
- color="#10B981" # Green
+ color="#10B981", # Green
),
}
# Workflow step definitions for progress tracking
-WORKFLOW_STEPS = [
- "start",
- "gathering_context",
- "generating_content",
- "saving_version",
- "complete"
-]
+WORKFLOW_STEPS = ["start", "gathering_context", "generating_content", "saving_version", "complete"]
# System messages for different content types
diff --git a/backend/app/agents/grounding/__init__.py b/backend/app/agents/grounding/__init__.py
index b8933dd..8d805f4 100644
--- a/backend/app/agents/grounding/__init__.py
+++ b/backend/app/agents/grounding/__init__.py
@@ -1,11 +1,11 @@
"""Grounding update agent for maintaining agents.md grounding files."""
+from .orchestrator import GroundingUpdateOrchestrator, create_orchestrator
from .types import (
+ GroundingChanges,
GroundingUpdateContext,
GroundingUpdateResult,
- GroundingChanges,
)
-from .orchestrator import create_orchestrator, GroundingUpdateOrchestrator
__all__ = [
"GroundingUpdateContext",
diff --git a/backend/app/agents/grounding/merge_orchestrator.py b/backend/app/agents/grounding/merge_orchestrator.py
index 5a43aa3..3ad4b1a 100644
--- a/backend/app/agents/grounding/merge_orchestrator.py
+++ b/backend/app/agents/grounding/merge_orchestrator.py
@@ -8,19 +8,20 @@
import json
from dataclasses import dataclass, field
-from typing import Optional, Dict, Any, Callable, List
+from typing import Any, Callable, Dict, List, Optional
from uuid import UUID
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core.models import ChatCompletionClient
-from app.agents.llm_client import create_litellm_client, LLMCallLogger
+from app.agents.llm_client import LLMCallLogger, create_litellm_client
@dataclass
class GroundingMergeChanges:
"""Summary of changes made during merge."""
+
added: List[str] = field(default_factory=list)
updated: List[str] = field(default_factory=list)
kept: List[str] = field(default_factory=list)
@@ -29,6 +30,7 @@ class GroundingMergeChanges:
@dataclass
class GroundingMergeResult:
"""Result of grounding merge by the agent."""
+
merged_content: str
changes: GroundingMergeChanges
summary: str
@@ -184,12 +186,7 @@ def __init__(
system_message=MERGE_SYSTEM_PROMPT,
)
- def _create_model_client(
- self,
- provider: str,
- api_key: str,
- config: Dict[str, Any]
- ) -> ChatCompletionClient:
+ def _create_model_client(self, provider: str, api_key: str, config: Dict[str, Any]) -> ChatCompletionClient:
"""Create a model client for the specified provider."""
model = config.get("model")
if not model:
@@ -246,15 +243,9 @@ def _parse_response(self, response_text: str) -> GroundingMergeResult:
try:
data = json.loads(response_text)
except json.JSONDecodeError as e:
- is_truncated = (
- "Unterminated string" in str(e) or
- not response_text.rstrip().endswith("}")
- )
+ is_truncated = "Unterminated string" in str(e) or not response_text.rstrip().endswith("}")
if is_truncated:
- raise ValueError(
- f"LLM response was truncated. The files may be too large. "
- f"Parse error: {e}"
- )
+ raise ValueError(f"LLM response was truncated. The files may be too large. Parse error: {e}")
raise ValueError(f"Failed to parse JSON response: {e}")
changes_data = data.get("changes", {})
@@ -304,10 +295,7 @@ async def merge_grounding(
self.llm_call_logger.set_agent("grounding_merger", "Grounding Merger")
# Run the agent
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- cancellation_token=None
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], cancellation_token=None)
if progress_callback:
progress_callback("Processing response...", 80)
@@ -387,10 +375,7 @@ async def pull_from_global(
self.llm_call_logger.set_agent("grounding_puller", "Grounding Puller")
# Run the agent
- response = await pull_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- cancellation_token=None
- )
+ response = await pull_agent.on_messages([TextMessage(content=prompt, source="user")], cancellation_token=None)
if progress_callback:
progress_callback("Processing response...", 80)
diff --git a/backend/app/agents/grounding/orchestrator.py b/backend/app/agents/grounding/orchestrator.py
index 7425be6..65fa4f4 100644
--- a/backend/app/agents/grounding/orchestrator.py
+++ b/backend/app/agents/grounding/orchestrator.py
@@ -7,22 +7,21 @@
"""
import json
-from typing import Optional, Dict, Any, Callable
+from typing import Any, Callable, Dict, Optional
from uuid import UUID
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core.models import ChatCompletionClient
-from app.agents.llm_client import create_litellm_client, LLMCallLogger
+from app.agents.llm_client import LLMCallLogger, create_litellm_client
from .types import (
+ GroundingChanges,
GroundingUpdateContext,
GroundingUpdateResult,
- GroundingChanges,
)
-
# System prompt for the grounding updater agent
SYSTEM_PROMPT = """You are an expert grounding file updater for a software project management platform.
@@ -149,12 +148,7 @@ def __init__(
system_message=SYSTEM_PROMPT,
)
- def _create_model_client(
- self,
- provider: str,
- api_key: str,
- config: Dict[str, Any]
- ) -> ChatCompletionClient:
+ def _create_model_client(self, provider: str, api_key: str, config: Dict[str, Any]) -> ChatCompletionClient:
"""
Create a model client for the specified provider using LiteLLM.
@@ -248,10 +242,7 @@ def _parse_response(self, response_text: str) -> GroundingUpdateResult:
data = json.loads(response_text)
except json.JSONDecodeError as e:
# Check if this looks like a truncated response
- is_truncated = (
- "Unterminated string" in str(e) or
- not response_text.rstrip().endswith("}")
- )
+ is_truncated = "Unterminated string" in str(e) or not response_text.rstrip().endswith("}")
if is_truncated:
raise ValueError(
f"LLM response was truncated (likely exceeded max_tokens). "
@@ -276,9 +267,7 @@ def _parse_response(self, response_text: str) -> GroundingUpdateResult:
)
async def update_grounding(
- self,
- context: GroundingUpdateContext,
- progress_callback: Optional[Callable[[str, int], None]] = None
+ self, context: GroundingUpdateContext, progress_callback: Optional[Callable[[str, int], None]] = None
) -> GroundingUpdateResult:
"""
Analyze feature notes and update the grounding file.
@@ -307,10 +296,7 @@ async def update_grounding(
self.llm_call_logger.set_agent("grounding_updater", "Grounding Updater")
# Run the agent
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- cancellation_token=None
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], cancellation_token=None)
if progress_callback:
progress_callback("Processing response...", 80)
@@ -329,9 +315,7 @@ async def close(self):
pass
async def summarize_content(
- self,
- content: str,
- progress_callback: Optional[Callable[[str, int], None]] = None
+ self, content: str, progress_callback: Optional[Callable[[str, int], None]] = None
) -> str:
"""
Generate a summary of agents.md content without modifying it.
@@ -374,8 +358,7 @@ async def summarize_content(
# Run the agent
response = await summarize_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- cancellation_token=None
+ [TextMessage(content=prompt, source="user")], cancellation_token=None
)
if progress_callback:
diff --git a/backend/app/agents/grounding/types.py b/backend/app/agents/grounding/types.py
index 6f75c2a..1b8e4ea 100644
--- a/backend/app/agents/grounding/types.py
+++ b/backend/app/agents/grounding/types.py
@@ -15,6 +15,7 @@ class GroundingUpdateContext:
"""
Context passed to the grounding agent for analysis.
"""
+
project_id: UUID
feature_id: UUID
feature_key: str # e.g., "USER-001"
@@ -29,6 +30,7 @@ class GroundingChanges:
"""
Summary of changes made to agents.md.
"""
+
added: List[str] = field(default_factory=list) # New items added
updated: List[str] = field(default_factory=list) # Items that were modified
removed: List[str] = field(default_factory=list) # Items that were removed
@@ -39,6 +41,7 @@ class GroundingUpdateResult:
"""
Result of grounding update by the agent.
"""
+
updated_content: str # The new agents.md content
changes: GroundingChanges
summary: str # Brief summary of what was changed
diff --git a/backend/app/agents/image_annotator/annotator.py b/backend/app/agents/image_annotator/annotator.py
index e8d1dfe..e079efc 100644
--- a/backend/app/agents/image_annotator/annotator.py
+++ b/backend/app/agents/image_annotator/annotator.py
@@ -6,11 +6,12 @@
"""
import logging
-from typing import Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import MultiModalMessage
-from autogen_core import CancellationToken, Image as AGImage
+from autogen_core import CancellationToken
+from autogen_core import Image as AGImage
from autogen_core.models import ChatCompletionClient
from .types import ImageAnnotationContext, ImageAnnotationResult
@@ -139,10 +140,7 @@ async def annotate(
# Set agent name for call logging
if self.llm_call_logger:
- self.llm_call_logger.set_agent(
- "image_annotator",
- "Image Annotator"
- )
+ self.llm_call_logger.set_agent("image_annotator", "Image Annotator")
# Build multimodal message with text and image
# Parse data URI to extract base64 data and create AutoGen Image
@@ -184,10 +182,7 @@ async def annotate(
if annotation.startswith('"') and annotation.endswith('"'):
annotation = annotation[1:-1]
- logger.info(
- f"Generated annotation for image {context.image_id}: "
- f"{annotation[:100]}..."
- )
+ logger.info(f"Generated annotation for image {context.image_id}: {annotation[:100]}...")
return ImageAnnotationResult(annotation=annotation)
diff --git a/backend/app/agents/image_annotator/orchestrator.py b/backend/app/agents/image_annotator/orchestrator.py
index cce0009..1f365bd 100644
--- a/backend/app/agents/image_annotator/orchestrator.py
+++ b/backend/app/agents/image_annotator/orchestrator.py
@@ -5,22 +5,20 @@
in pre-phase discussions.
"""
-import base64
import logging
-from typing import Any, Dict, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Dict, Optional
from uuid import UUID
-from sqlalchemy.orm import Session
-
from autogen_core.models import ChatCompletionClient
+from sqlalchemy.orm import Session
-from app.models.project_chat import ProjectChat
-from app.models.project import Project
-from app.models.organization import Organization
from app.models.grounding_file import GroundingFile
+from app.models.organization import Organization
+from app.models.project import Project
+from app.models.project_chat import ProjectChat
-from .types import ImageAnnotationContext, ImageAnnotationResult
from .annotator import ImageAnnotatorAgent
+from .types import ImageAnnotationContext, ImageAnnotationResult
if TYPE_CHECKING:
from app.agents.llm_client import LLMCallLogger
@@ -54,17 +52,13 @@ def load_context(
ValueError: If discussion not found.
"""
# Load discussion
- discussion = db.query(ProjectChat).filter(
- ProjectChat.id == project_chat_id
- ).first()
+ discussion = db.query(ProjectChat).filter(ProjectChat.id == project_chat_id).first()
if not discussion:
raise ValueError(f"Discussion {project_chat_id} not found")
# Load organization
- organization = db.query(Organization).filter(
- Organization.id == discussion.org_id
- ).first()
+ organization = db.query(Organization).filter(Organization.id == discussion.org_id).first()
if not organization:
raise ValueError(f"Organization {discussion.org_id} not found")
@@ -88,9 +82,7 @@ def load_context(
)
# Project-scoped discussion - load project context
- project = db.query(Project).filter(
- Project.id == discussion.project_id
- ).first()
+ project = db.query(Project).filter(Project.id == discussion.project_id).first()
if not project:
raise ValueError(f"Project {discussion.project_id} not found")
@@ -99,10 +91,11 @@ def load_context(
has_grounding = False
grounding_summary = None
- grounding_file = db.query(GroundingFile).filter(
- GroundingFile.project_id == project.id,
- GroundingFile.filename == "agents.md"
- ).first()
+ grounding_file = (
+ db.query(GroundingFile)
+ .filter(GroundingFile.project_id == project.id, GroundingFile.filename == "agents.md")
+ .first()
+ )
if grounding_file and grounding_file.summary:
has_grounding = True
@@ -223,7 +216,7 @@ async def handle_image_annotation(
# Get usage stats from model client
usage_stats = {}
- if hasattr(model_client, 'get_usage_stats'):
+ if hasattr(model_client, "get_usage_stats"):
usage_stats = model_client.get_usage_stats()
return {
diff --git a/backend/app/agents/image_annotator/types.py b/backend/app/agents/image_annotator/types.py
index 856ee85..9a95fd2 100644
--- a/backend/app/agents/image_annotator/types.py
+++ b/backend/app/agents/image_annotator/types.py
@@ -18,6 +18,7 @@ class ImageAnnotationContext:
Contains all information the agent needs to generate a meaningful
annotation for an uploaded image.
"""
+
# Image info
image_id: str
image_filename: str
@@ -51,6 +52,7 @@ class ImageAnnotationResult:
Contains the generated annotation for the image.
"""
+
# The annotation text describing the image
annotation: str
diff --git a/backend/app/agents/llm_client.py b/backend/app/agents/llm_client.py
index 8579b0a..a6f8203 100644
--- a/backend/app/agents/llm_client.py
+++ b/backend/app/agents/llm_client.py
@@ -25,13 +25,10 @@
import litellm
from autogen_core import CancellationToken, FunctionCall
-
-from app.agents.retry import llm_retry
from autogen_core.models import (
AssistantMessage,
ChatCompletionClient,
CreateResult,
- FunctionExecutionResult,
FunctionExecutionResultMessage,
LLMMessage,
ModelCapabilities,
@@ -44,6 +41,8 @@
from autogen_core.tools import Tool, ToolSchema
from pydantic import BaseModel
+from app.agents.retry import llm_retry
+
logger = logging.getLogger(__name__)
# Suppress LiteLLM's verbose logging
@@ -189,9 +188,9 @@ def log_call(
try:
# Import here to avoid circular imports
+ from app.models.job import Job
from app.services.llm_call_log_service import LLMCallLogService
from app.services.llm_usage_log_service import LLMUsageLogService
- from app.models.job import Job
db = self.db_session_factory()
try:
@@ -324,11 +323,17 @@ def __init__(
def _supports_vision(self) -> bool:
"""Check if model supports vision/images."""
model_lower = self._model.lower()
- return any(x in model_lower for x in [
- "gpt-4o", "gpt-4-turbo", "gpt-4-vision",
- "claude-3", "claude-3.5",
- "gemini",
- ])
+ return any(
+ x in model_lower
+ for x in [
+ "gpt-4o",
+ "gpt-4-turbo",
+ "gpt-4-vision",
+ "claude-3",
+ "claude-3.5",
+ "gemini",
+ ]
+ )
def _convert_messages(self, messages: Sequence[LLMMessage]) -> List[Dict[str, Any]]:
"""
@@ -344,17 +349,21 @@ def _convert_messages(self, messages: Sequence[LLMMessage]) -> List[Dict[str, An
for msg in messages:
if isinstance(msg, SystemMessage):
- result.append({
- "role": "system",
- "content": msg.content,
- })
+ result.append(
+ {
+ "role": "system",
+ "content": msg.content,
+ }
+ )
elif isinstance(msg, UserMessage):
# Handle string or list content (for images)
if isinstance(msg.content, str):
- result.append({
- "role": "user",
- "content": msg.content,
- })
+ result.append(
+ {
+ "role": "user",
+ "content": msg.content,
+ }
+ )
else:
# Handle multimodal content (text + images)
content_parts = []
@@ -363,45 +372,52 @@ def _convert_messages(self, messages: Sequence[LLMMessage]) -> List[Dict[str, An
content_parts.append({"type": "text", "text": part})
else:
# Image object - convert to base64 URL
- content_parts.append({
- "type": "image_url",
- "image_url": {"url": part.data_uri}
- })
- result.append({
- "role": "user",
- "content": content_parts,
- })
+ content_parts.append({"type": "image_url", "image_url": {"url": part.data_uri}})
+ result.append(
+ {
+ "role": "user",
+ "content": content_parts,
+ }
+ )
elif isinstance(msg, AssistantMessage):
if isinstance(msg.content, str):
- result.append({
- "role": "assistant",
- "content": msg.content,
- })
+ result.append(
+ {
+ "role": "assistant",
+ "content": msg.content,
+ }
+ )
else:
# Function calls
tool_calls = []
for fc in msg.content:
- tool_calls.append({
- "id": fc.id,
- "type": "function",
- "function": {
- "name": fc.name,
- "arguments": fc.arguments,
+ tool_calls.append(
+ {
+ "id": fc.id,
+ "type": "function",
+ "function": {
+ "name": fc.name,
+ "arguments": fc.arguments,
+ },
}
- })
- result.append({
- "role": "assistant",
- "content": None,
- "tool_calls": tool_calls,
- })
+ )
+ result.append(
+ {
+ "role": "assistant",
+ "content": None,
+ "tool_calls": tool_calls,
+ }
+ )
elif isinstance(msg, FunctionExecutionResultMessage):
# Add tool results
for fr in msg.content:
- result.append({
- "role": "tool",
- "tool_call_id": fr.call_id,
- "content": fr.content,
- })
+ result.append(
+ {
+ "role": "tool",
+ "tool_call_id": fr.call_id,
+ "content": fr.content,
+ }
+ )
return result
@@ -414,14 +430,16 @@ def _convert_tools(self, tools: Sequence[Tool | ToolSchema]) -> List[Dict[str, A
else:
schema = tool
- result.append({
- "type": "function",
- "function": {
- "name": schema["name"],
- "description": schema.get("description", ""),
- "parameters": schema.get("parameters", {"type": "object", "properties": {}}),
+ result.append(
+ {
+ "type": "function",
+ "function": {
+ "name": schema["name"],
+ "description": schema.get("description", ""),
+ "parameters": schema.get("parameters", {"type": "object", "properties": {}}),
+ },
}
- })
+ )
return result
def _parse_response(self, response: Any, cached: bool = False) -> CreateResult:
@@ -467,7 +485,9 @@ def _parse_response(self, response: Any, cached: bool = False) -> CreateResult:
thought=getattr(message, "reasoning_content", None),
)
- def _normalize_finish_reason(self, reason: Optional[str]) -> Literal["stop", "length", "function_calls", "content_filter", "unknown"]:
+ def _normalize_finish_reason(
+ self, reason: Optional[str]
+ ) -> Literal["stop", "length", "function_calls", "content_filter", "unknown"]:
"""Normalize finish reason to AutoGen's expected values."""
if reason is None:
return "unknown"
@@ -585,10 +605,7 @@ async def create(
if tools:
kwargs["tools"] = self._convert_tools(tools)
if isinstance(tool_choice, Tool):
- kwargs["tool_choice"] = {
- "type": "function",
- "function": {"name": tool_choice.schema["name"]}
- }
+ kwargs["tool_choice"] = {"type": "function", "function": {"name": tool_choice.schema["name"]}}
elif tool_choice != "auto":
kwargs["tool_choice"] = tool_choice
@@ -675,10 +692,7 @@ async def create_stream(
if tools:
kwargs["tools"] = self._convert_tools(tools)
if isinstance(tool_choice, Tool):
- kwargs["tool_choice"] = {
- "type": "function",
- "function": {"name": tool_choice.schema["name"]}
- }
+ kwargs["tool_choice"] = {"type": "function", "function": {"name": tool_choice.schema["name"]}}
elif tool_choice != "auto":
kwargs["tool_choice"] = tool_choice
@@ -702,16 +716,16 @@ async def create_stream(
try:
async for chunk in response:
- if hasattr(chunk, 'choices') and chunk.choices:
+ if hasattr(chunk, "choices") and chunk.choices:
delta = chunk.choices[0].delta
# Yield content chunks
- if hasattr(delta, 'content') and delta.content:
+ if hasattr(delta, "content") and delta.content:
full_content += delta.content
yield delta.content
# Accumulate tool calls
- if hasattr(delta, 'tool_calls') and delta.tool_calls:
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
for tc in delta.tool_calls:
idx = tc.index
while len(tool_calls) <= idx:
@@ -729,7 +743,7 @@ async def create_stream(
finish_reason = chunk.choices[0].finish_reason
# Get usage from final chunk if available
- if hasattr(chunk, 'usage') and chunk.usage:
+ if hasattr(chunk, "usage") and chunk.usage:
prompt_tokens = chunk.usage.prompt_tokens
completion_tokens = chunk.usage.completion_tokens
except Exception as e:
@@ -818,10 +832,7 @@ def count_tokens(
return count
except Exception:
# Fallback: rough estimate
- total_chars = sum(
- len(str(m.get("content", "")))
- for m in litellm_messages
- )
+ total_chars = sum(len(str(m.get("content", ""))) for m in litellm_messages)
return total_chars // 4 # Rough estimate: 4 chars per token
def remaining_tokens(
diff --git a/backend/app/agents/module_feature/__init__.py b/backend/app/agents/module_feature/__init__.py
index c71e6bf..015ea36 100644
--- a/backend/app/agents/module_feature/__init__.py
+++ b/backend/app/agents/module_feature/__init__.py
@@ -15,41 +15,41 @@
- prompt_plan_text: HOW to build (step-by-step instructions)
"""
+from .merger import MergerAgent
+from .orchestrator import ModuleFeatureOrchestrator, create_orchestrator
+from .plan_structurer import PlanStructurerAgent
+from .spec_analyzer import SpecAnalyzerAgent
from .types import (
- # Input/Output types
- ModuleFeatureContext,
- ExtractedModule,
+ AGENT_METADATA,
+ # UI metadata
+ WORKFLOW_STEPS,
+ AgentInfo,
+ CoverageReport,
ExtractedFeature,
+ ExtractedModule,
ExtractionResult,
- # Agent intermediate types
- SpecAnalysis,
- SpecRequirement,
- PlanStructure,
+ FeatureCategoryType,
+ FeatureContent,
+ FeatureMapping,
+ FeaturePriorityLevel,
ImplementationPhase,
ImplementationStep,
MergedMapping,
- ModuleMapping,
- FeatureMapping,
- FeatureContent,
- WriterOutput,
- CoverageReport,
# Enums
ModuleCategory,
- FeaturePriorityLevel,
- FeatureCategoryType,
- # UI metadata
- WORKFLOW_STEPS,
- AGENT_METADATA,
- AgentInfo,
+ # Input/Output types
+ ModuleFeatureContext,
+ ModuleMapping,
+ PlanStructure,
+ # Agent intermediate types
+ SpecAnalysis,
+ SpecRequirement,
+ WriterOutput,
# Helpers
validate_extraction_result,
)
-from .orchestrator import ModuleFeatureOrchestrator, create_orchestrator
-from .spec_analyzer import SpecAnalyzerAgent
-from .plan_structurer import PlanStructurerAgent
-from .merger import MergerAgent
-from .writer import WriterAgent
from .validator import ValidatorAgent
+from .writer import WriterAgent
__all__ = [
# Input/Output Types
diff --git a/backend/app/agents/module_feature/logging_config.py b/backend/app/agents/module_feature/logging_config.py
index 37722d0..76460aa 100644
--- a/backend/app/agents/module_feature/logging_config.py
+++ b/backend/app/agents/module_feature/logging_config.py
@@ -12,10 +12,10 @@
- Coverage Validator (Agent 5)
"""
-import logging
import json
-from typing import Any, Dict, List, Optional
+import logging
from datetime import datetime, timezone
+from typing import Any, Dict, List, Optional
class ModuleFeatureAgentLogger:
@@ -41,19 +41,12 @@ def __init__(self, agent_name: str, project_id: Optional[str] = None):
# Ensure structured output
if not self.logger.handlers:
handler = logging.StreamHandler()
- formatter = logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
- )
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
- def _structured_log(
- self,
- level: str,
- event: str,
- extra_data: Optional[Dict[str, Any]] = None
- ) -> None:
+ def _structured_log(self, level: str, event: str, extra_data: Optional[Dict[str, Any]] = None) -> None:
"""
Log a structured event.
@@ -86,12 +79,7 @@ def log_agent_complete(self, **kwargs: Any) -> None:
self._structured_log("info", f"{self.agent_name}_complete", kwargs)
def log_llm_call(
- self,
- prompt: str,
- model: str,
- response: Optional[str] = None,
- tokens_used: Optional[int] = None,
- **kwargs: Any
+ self, prompt: str, model: str, response: Optional[str] = None, tokens_used: Optional[int] = None, **kwargs: Any
) -> None:
"""
Log an LLM API call.
@@ -107,7 +95,7 @@ def log_llm_call(
"model": model,
"prompt_preview": prompt[:200] + "..." if len(prompt) > 200 else prompt,
"prompt_length": len(prompt),
- **kwargs
+ **kwargs,
}
if response:
@@ -119,13 +107,7 @@ def log_llm_call(
self._structured_log("info", "llm_call", data)
- def log_extraction_stats(
- self,
- modules_count: int,
- total_features: int,
- spec_length: int,
- **kwargs: Any
- ) -> None:
+ def log_extraction_stats(self, modules_count: int, total_features: int, spec_length: int, **kwargs: Any) -> None:
"""
Log extraction statistics.
@@ -140,7 +122,7 @@ def log_extraction_stats(
"total_features": total_features,
"spec_length": spec_length,
"avg_features_per_module": round(total_features / modules_count, 2) if modules_count > 0 else 0,
- **kwargs
+ **kwargs,
}
self._structured_log("info", "extraction_stats", data)
@@ -171,11 +153,7 @@ def log_workflow_transition(self, from_state: str, to_state: str, **kwargs: Any)
to_state: New state
**kwargs: Additional context
"""
- data = {
- "from_state": from_state,
- "to_state": to_state,
- **kwargs
- }
+ data = {"from_state": from_state, "to_state": to_state, **kwargs}
self._structured_log("info", "workflow_transition", data)
def log_validation_issues(self, issues: list, **kwargs: Any) -> None:
@@ -190,7 +168,7 @@ def log_validation_issues(self, issues: list, **kwargs: Any) -> None:
"issues_count": len(issues),
"issues": issues[:10], # Limit to first 10 issues
"has_issues": len(issues) > 0,
- **kwargs
+ **kwargs,
}
self._structured_log("warning" if issues else "info", "validation_issues", data)
@@ -200,7 +178,7 @@ def log_spec_analysis(
domain_areas: List[str],
data_models_count: int,
api_endpoints_count: int,
- **kwargs: Any
+ **kwargs: Any,
) -> None:
"""
Log spec analysis results from Agent 1.
@@ -218,16 +196,12 @@ def log_spec_analysis(
"domain_areas_count": len(domain_areas),
"data_models_count": data_models_count,
"api_endpoints_count": api_endpoints_count,
- **kwargs
+ **kwargs,
}
self._structured_log("info", "spec_analysis_complete", data)
def log_plan_structure(
- self,
- phases_count: int,
- total_steps: int,
- cross_cutting_concerns: List[str],
- **kwargs: Any
+ self, phases_count: int, total_steps: int, cross_cutting_concerns: List[str], **kwargs: Any
) -> None:
"""
Log plan structure results from Agent 2.
@@ -243,17 +217,12 @@ def log_plan_structure(
"total_steps": total_steps,
"cross_cutting_concerns": cross_cutting_concerns,
"avg_steps_per_phase": round(total_steps / phases_count, 2) if phases_count > 0 else 0,
- **kwargs
+ **kwargs,
}
self._structured_log("info", "plan_structure_complete", data)
def log_merge_result(
- self,
- modules_count: int,
- features_count: int,
- unmapped_requirements: int,
- unmapped_steps: int,
- **kwargs: Any
+ self, modules_count: int, features_count: int, unmapped_requirements: int, unmapped_steps: int, **kwargs: Any
) -> None:
"""
Log merge results from Agent 3.
@@ -272,17 +241,15 @@ def log_merge_result(
"unmapped_steps": unmapped_steps,
"mapping_success_rate": round(
(features_count / (features_count + unmapped_requirements + unmapped_steps)) * 100, 2
- ) if (features_count + unmapped_requirements + unmapped_steps) > 0 else 100,
- **kwargs
+ )
+ if (features_count + unmapped_requirements + unmapped_steps) > 0
+ else 100,
+ **kwargs,
}
self._structured_log("info", "merge_complete", data)
def log_content_written(
- self,
- features_processed: int,
- avg_spec_text_length: int,
- avg_prompt_plan_text_length: int,
- **kwargs: Any
+ self, features_processed: int, avg_spec_text_length: int, avg_prompt_plan_text_length: int, **kwargs: Any
) -> None:
"""
Log content writing results from Agent 4.
@@ -297,7 +264,7 @@ def log_content_written(
"features_processed": features_processed,
"avg_spec_text_length": avg_spec_text_length,
"avg_prompt_plan_text_length": avg_prompt_plan_text_length,
- **kwargs
+ **kwargs,
}
self._structured_log("info", "content_written", data)
@@ -308,7 +275,7 @@ def log_coverage_report(
uncovered_requirements: int,
uncovered_steps: int,
content_issues: int,
- **kwargs: Any
+ **kwargs: Any,
) -> None:
"""
Log coverage validation results from Agent 5.
@@ -327,7 +294,7 @@ def log_coverage_report(
"uncovered_requirements": uncovered_requirements,
"uncovered_steps": uncovered_steps,
"content_issues": content_issues,
- **kwargs
+ **kwargs,
}
level = "info" if ok else "warning"
self._structured_log(level, "coverage_report", data)
diff --git a/backend/app/agents/module_feature/merger.py b/backend/app/agents/module_feature/merger.py
index 794bf96..21205ac 100644
--- a/backend/app/agents/module_feature/merger.py
+++ b/backend/app/agents/module_feature/merger.py
@@ -10,26 +10,25 @@
import asyncio
import json
from collections import defaultdict
-from typing import Optional, List, Dict, Any, Set
+from typing import Any, Dict, List, Optional, Set
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
- ModuleFeatureContext,
- SpecAnalysis,
- SpecRequirement,
- PlanStructure,
+ FeatureMapping,
ImplementationPhase,
MergedMapping,
+ ModuleFeatureContext,
ModuleMapping,
- FeatureMapping,
+ PlanStructure,
+ SpecAnalysis,
+ SpecRequirement,
)
-from .logging_config import get_agent_logger
-from .utils import strip_markdown_json, generate_unique_id, generate_semantic_id, parse_json_with_repair
-
+from .utils import generate_semantic_id, parse_json_with_repair, strip_markdown_json
# Default domain to phase keyword mapping (fallback when phase_domain_mapping not available)
DEFAULT_DOMAIN_KEYWORDS = {
@@ -207,7 +206,7 @@ async def merge(
project_id=str(context.project_id),
requirements_count=len(spec_analysis.requirements),
phases_count=len(plan_structure.phases),
- total_steps=plan_structure.total_steps
+ total_steps=plan_structure.total_steps,
)
try:
@@ -244,17 +243,13 @@ async def _merge_parallel(
Returns:
Combined MergedMapping from all phases
"""
- self.logger.logger.info(
- f"Using parallel phase processing for {len(plan_structure.phases)} phases"
- )
+ self.logger.logger.info(f"Using parallel phase processing for {len(plan_structure.phases)} phases")
# Extract phase_domain_mapping from prompt plan
phase_mappings = self._extract_phase_domain_mapping(context)
# Create mapping dict for quick lookup
- mapping_by_index = {
- m.get("phase_index"): m for m in phase_mappings
- }
+ mapping_by_index = {m.get("phase_index"): m for m in phase_mappings}
# SINGLE-BEST MATCHING: Pre-compute requirement assignments sequentially
# This ensures each requirement is assigned to only ONE phase
@@ -272,9 +267,7 @@ async def _merge_parallel(
break
# Match requirements to this phase
- matching_reqs = self._match_requirements_to_phase(
- phase, spec_analysis.requirements, phase_mapping
- )
+ matching_reqs = self._match_requirements_to_phase(phase, spec_analysis.requirements, phase_mapping)
# Filter out requirements already matched to earlier phases
matching_reqs = [r for r in matching_reqs if r.id not in matched_requirement_ids]
@@ -289,9 +282,7 @@ async def _merge_parallel(
# Create parallel tasks using pre-computed assignments
tasks = []
for phase in plan_structure.phases:
- tasks.append(
- self._merge_phase(phase, phase_requirements[phase.phase_index], spec_analysis, context)
- )
+ tasks.append(self._merge_phase(phase, phase_requirements[phase.phase_index], spec_analysis, context))
# Run all phases in parallel
results = await asyncio.gather(*tasks, return_exceptions=True)
@@ -300,32 +291,39 @@ async def _merge_parallel(
phase_results = []
for i, result in enumerate(results):
if isinstance(result, Exception):
- self.logger.log_error(result, {
- "phase_index": plan_structure.phases[i].phase_index,
- "phase_title": plan_structure.phases[i].title,
- })
+ self.logger.log_error(
+ result,
+ {
+ "phase_index": plan_structure.phases[i].phase_index,
+ "phase_title": plan_structure.phases[i].title,
+ },
+ )
# Use fallback for failed phases
phase = plan_structure.phases[i]
- phase_results.append({
- "phase_index": phase.phase_index,
- "module": {
- "title": phase.title,
- "description": phase.objective,
- "order_index": phase.phase_index,
- "category": "phase",
- "phase_reference": phase.phase_index,
- },
- "features": [{
- "title": f"Implement {phase.title}",
+ phase_results.append(
+ {
"phase_index": phase.phase_index,
- "step_index": 1,
- "spec_requirement_ids": [],
- "plan_step_id": f"P{phase.phase_index}-S1",
- "priority": "important",
- "category": "Other",
- }],
- "unmapped_requirements": [],
- })
+ "module": {
+ "title": phase.title,
+ "description": phase.objective,
+ "order_index": phase.phase_index,
+ "category": "phase",
+ "phase_reference": phase.phase_index,
+ },
+ "features": [
+ {
+ "title": f"Implement {phase.title}",
+ "phase_index": phase.phase_index,
+ "step_index": 1,
+ "spec_requirement_ids": [],
+ "plan_step_id": f"P{phase.phase_index}-S1",
+ "priority": "important",
+ "category": "Other",
+ }
+ ],
+ "unmapped_requirements": [],
+ }
+ )
else:
phase_results.append(result)
@@ -385,13 +383,10 @@ async def _merge_single_call(
self.logger.log_llm_call(
prompt=prompt[:500] + "..." if len(prompt) > 500 else prompt,
model=str(self.model_client),
- operation="merge_spec_and_plan"
+ operation="merge_spec_and_plan",
)
- response = await self.agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await self.agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -402,12 +397,15 @@ async def _merge_single_call(
result_data = parse_json_with_repair(response_text)
except json.JSONDecodeError as e:
cleaned = strip_markdown_json(response_text)
- self.logger.log_error(e, {
- "raw_response": response_text[:500],
- "cleaned_response": cleaned[:500],
- "response_length": len(response_text),
- "error_position": e.pos if hasattr(e, 'pos') else None
- })
+ self.logger.log_error(
+ e,
+ {
+ "raw_response": response_text[:500],
+ "cleaned_response": cleaned[:500],
+ "response_length": len(response_text),
+ "error_position": e.pos if hasattr(e, "pos") else None,
+ },
+ )
raise ValueError(f"Failed to parse merger response as JSON: {e}")
# Convert to MergedMapping
@@ -415,32 +413,36 @@ async def _merge_single_call(
raw_modules = result_data.get("modules", [])
for idx, mod_data in enumerate(raw_modules, start=1):
mod_title = mod_data.get("title", f"Module {idx}")
- modules.append(ModuleMapping(
- module_id=generate_semantic_id("MOD", mod_title, idx),
- title=mod_title,
- description=mod_data.get("description", ""),
- order_index=mod_data.get("order_index", idx),
- category=mod_data.get("category", "phase"),
- phase_reference=mod_data.get("phase_reference"),
- feature_ids=mod_data.get("feature_ids", []),
- ))
+ modules.append(
+ ModuleMapping(
+ module_id=generate_semantic_id("MOD", mod_title, idx),
+ title=mod_title,
+ description=mod_data.get("description", ""),
+ order_index=mod_data.get("order_index", idx),
+ category=mod_data.get("category", "phase"),
+ phase_reference=mod_data.get("phase_reference"),
+ feature_ids=mod_data.get("feature_ids", []),
+ )
+ )
features = []
raw_features = result_data.get("features", [])
for idx, feat_data in enumerate(raw_features, start=1):
feat_title = feat_data.get("title", f"Feature {idx}")
- features.append(FeatureMapping(
- feature_id=generate_semantic_id("FEAT", feat_title, idx),
- title=feat_title,
- module_id=feat_data.get("module_id", "MOD-001"),
- phase_index=feat_data.get("phase_index", 0),
- step_index=feat_data.get("step_index", idx),
- global_order=feat_data.get("global_order", idx),
- spec_requirement_ids=feat_data.get("spec_requirement_ids", []),
- plan_step_id=feat_data.get("plan_step_id"),
- priority=feat_data.get("priority", "important"),
- category=feat_data.get("category", "Other"),
- ))
+ features.append(
+ FeatureMapping(
+ feature_id=generate_semantic_id("FEAT", feat_title, idx),
+ title=feat_title,
+ module_id=feat_data.get("module_id", "MOD-001"),
+ phase_index=feat_data.get("phase_index", 0),
+ step_index=feat_data.get("step_index", idx),
+ global_order=feat_data.get("global_order", idx),
+ spec_requirement_ids=feat_data.get("spec_requirement_ids", []),
+ plan_step_id=feat_data.get("plan_step_id"),
+ priority=feat_data.get("priority", "important"),
+ category=feat_data.get("category", "Other"),
+ )
+ )
merged = MergedMapping(
modules=modules,
@@ -464,10 +466,7 @@ async def _merge_single_call(
return merged
def _build_prompt(
- self,
- spec_analysis: SpecAnalysis,
- plan_structure: PlanStructure,
- context: ModuleFeatureContext
+ self, spec_analysis: SpecAnalysis, plan_structure: PlanStructure, context: ModuleFeatureContext
) -> str:
"""
Build the merge prompt from spec analysis and plan structure.
@@ -534,9 +533,9 @@ def _build_prompt(
prompt += "\n"
# Add cross-phase context if available (for awareness only)
- if hasattr(context, 'cross_project_context') and context.cross_project_context:
+ if hasattr(context, "cross_project_context") and context.cross_project_context:
cross_ctx = context.cross_project_context
- if hasattr(cross_ctx, 'other_phases') and cross_ctx.other_phases:
+ if hasattr(cross_ctx, "other_phases") and cross_ctx.other_phases:
prompt += "## DECISIONS FROM OTHER PHASES (for awareness only):\n\n"
prompt += "Use these for consistency. Do NOT add their requirements to this phase.\n\n"
for phase_ctx in cross_ctx.other_phases[:3]: # Limit to top 3 phases
@@ -720,10 +719,7 @@ async def _merge_phase(
model_client=self.model_client,
)
- response = await phase_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await phase_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -734,10 +730,7 @@ async def _merge_phase(
result["phase_index"] = phase.phase_index
return result
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "phase": phase.title,
- "response_preview": response_text[:300]
- })
+ self.logger.log_error(e, {"phase": phase.title, "response_preview": response_text[:300]})
# Return minimal fallback
return {
"phase_index": phase.phase_index,
@@ -809,18 +802,20 @@ def _combine_phase_results(
feature_id = generate_semantic_id("FEAT", feat_title, feat_counter)
module.feature_ids.append(feature_id)
- features.append(FeatureMapping(
- feature_id=feature_id,
- title=feat_title,
- module_id=module.module_id,
- phase_index=feat_data.get("phase_index", result.get("phase_index", 0)),
- step_index=feat_data.get("step_index", feat_counter),
- global_order=feat_counter,
- spec_requirement_ids=feat_data.get("spec_requirement_ids", []),
- plan_step_id=feat_data.get("plan_step_id"),
- priority=feat_data.get("priority", "important"),
- category=feat_data.get("category", "Other"),
- ))
+ features.append(
+ FeatureMapping(
+ feature_id=feature_id,
+ title=feat_title,
+ module_id=module.module_id,
+ phase_index=feat_data.get("phase_index", result.get("phase_index", 0)),
+ step_index=feat_data.get("step_index", feat_counter),
+ global_order=feat_counter,
+ spec_requirement_ids=feat_data.get("spec_requirement_ids", []),
+ plan_step_id=feat_data.get("plan_step_id"),
+ priority=feat_data.get("priority", "important"),
+ category=feat_data.get("category", "Other"),
+ )
+ )
modules.append(module)
@@ -902,10 +897,7 @@ def _deduplicate_features(self, merged: MergedMapping) -> MergedMapping:
return merged
-async def create_merger(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> MergerAgent:
+async def create_merger(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> MergerAgent:
"""
Factory function to create a Merger Agent.
diff --git a/backend/app/agents/module_feature/orchestrator.py b/backend/app/agents/module_feature/orchestrator.py
index df56140..76d61ef 100644
--- a/backend/app/agents/module_feature/orchestrator.py
+++ b/backend/app/agents/module_feature/orchestrator.py
@@ -11,29 +11,29 @@
import asyncio
import logging
-from typing import Optional, Callable, Dict, Any
+from typing import Any, Callable, Dict, Optional
from uuid import UUID
from autogen_core.models import ChatCompletionClient
+# Re-use the same exception from brainstorm_spec
+from app.agents.brainstorm_spec import JobCancelledException
+
+from .logging_config import get_agent_logger
+from .merger import MergerAgent
+from .plan_structurer import PlanStructurerAgent
+from .spec_analyzer import SpecAnalyzerAgent
from .types import (
- ModuleFeatureContext,
- ExtractionResult,
- ExtractedModule,
- ExtractedFeature,
- WORKFLOW_STEPS,
AGENT_METADATA,
+ WORKFLOW_STEPS,
+ ExtractedFeature,
+ ExtractedModule,
+ ExtractionResult,
+ ModuleFeatureContext,
validate_extraction_result,
)
-from .logging_config import get_agent_logger
-from .spec_analyzer import SpecAnalyzerAgent
-from .plan_structurer import PlanStructurerAgent
-from .merger import MergerAgent
-from .writer import WriterAgent
from .validator import ValidatorAgent
-
-# Re-use the same exception from brainstorm_spec
-from app.agents.brainstorm_spec import JobCancelledException
+from .writer import WriterAgent
logger = logging.getLogger(__name__)
@@ -87,8 +87,8 @@ def _check_cancelled(self) -> None:
if not self.job_id:
return
- from app.services.job_service import JobService
from app.database import SessionLocal
+ from app.services.job_service import JobService
db = SessionLocal()
try:
@@ -121,7 +121,7 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
self.logger.log_workflow_transition(
from_state=WORKFLOW_STEPS[0], # "start"
- to_state=WORKFLOW_STEPS[1] # "analyzing_spec"
+ to_state=WORKFLOW_STEPS[1], # "analyzing_spec"
)
try:
@@ -142,12 +142,12 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
self._report_progress(
f"Extracted {len(spec_analysis.requirements)} requirements and {len(plan_structure.phases)} phases",
30,
- "plan_structurer"
+ "plan_structurer",
)
self.logger.log_workflow_transition(
from_state=WORKFLOW_STEPS[1], # "analyzing_spec"
- to_state=WORKFLOW_STEPS[3] # "merging" (skip structuring_plan since parallel)
+ to_state=WORKFLOW_STEPS[3], # "merging" (skip structuring_plan since parallel)
)
# Step 3: Merge Spec and Plan (30-50%)
@@ -158,7 +158,7 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
self._report_progress(
f"Merger: Created {len(merged_mapping.modules)} modules with {len(merged_mapping.features)} features",
50,
- "merger"
+ "merger",
)
# Check for cancellation after merger
@@ -166,20 +166,16 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
self.logger.log_workflow_transition(
from_state=WORKFLOW_STEPS[3], # "merging"
- to_state=WORKFLOW_STEPS[4] # "writing_content"
+ to_state=WORKFLOW_STEPS[4], # "writing_content"
)
# Step 4: Write Feature Content (50-85%)
self._report_progress("Content Writer: Generating spec_text and prompt_plan_text", 50, "writer")
if self.call_logger:
self.call_logger.set_agent("writer", "Content Writer")
- writer_output = await self.writer.write_all(
- merged_mapping, spec_analysis, plan_structure, context
- )
+ writer_output = await self.writer.write_all(merged_mapping, spec_analysis, plan_structure, context)
self._report_progress(
- f"Content Writer: Generated content for {len(writer_output.feature_contents)} features",
- 85,
- "writer"
+ f"Content Writer: Generated content for {len(writer_output.feature_contents)} features", 85, "writer"
)
# Check for cancellation after writer
@@ -187,7 +183,7 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
self.logger.log_workflow_transition(
from_state=WORKFLOW_STEPS[4], # "writing_content"
- to_state=WORKFLOW_STEPS[5] # "validating"
+ to_state=WORKFLOW_STEPS[5], # "validating"
)
# Step 5: Validate Coverage (85-100%)
@@ -195,11 +191,7 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
coverage_report = await self.validator.validate(
spec_analysis, plan_structure, merged_mapping, writer_output
)
- self._report_progress(
- f"Validator: Coverage {coverage_report.coverage_percentage}%",
- 95,
- "validator"
- )
+ self._report_progress(f"Validator: Coverage {coverage_report.coverage_percentage}%", 95, "validator")
# Build final result (pass context for image resolution)
result = self._build_final_result(merged_mapping, writer_output, coverage_report, context)
@@ -214,7 +206,7 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
self.logger.log_workflow_transition(
from_state=WORKFLOW_STEPS[5], # "validating"
- to_state=WORKFLOW_STEPS[6] # "complete"
+ to_state=WORKFLOW_STEPS[6], # "complete"
)
self.logger.log_agent_complete(
@@ -227,10 +219,7 @@ async def extract_modules_features(self, context: ModuleFeatureContext) -> Extra
return result
except Exception as e:
- self.logger.log_error(e, {
- "project_id": str(context.project_id),
- "workflow_step": "unknown"
- })
+ self.logger.log_error(e, {"project_id": str(context.project_id), "workflow_step": "unknown"})
raise
def _build_final_result(
@@ -253,16 +242,11 @@ def _build_final_result(
Final ExtractionResult ready for persistence
"""
# Build content lookup
- content_map = {
- fc.feature_id: fc
- for fc in writer_output.feature_contents
- }
+ content_map = {fc.feature_id: fc for fc in writer_output.feature_contents}
# Build image attachment lookup by ID for quick resolution
image_attachment_map = {
- img.get("id"): img
- for img in context.phase_description_image_attachments
- if img.get("id")
+ img.get("id"): img for img in context.phase_description_image_attachments if img.get("id")
}
# Build module-to-features lookup
@@ -282,9 +266,15 @@ def _build_final_result(
features = []
for feat in module_features.get(mod.module_id, []):
content = content_map.get(feat.feature_id)
- description = content.description if content else f"This feature enables {feat.title.lower()} functionality for users."
+ description = (
+ content.description
+ if content
+ else f"This feature enables {feat.title.lower()} functionality for users."
+ )
spec_text = content.spec_text if content else f"## {feat.title}\n\nImplement as specified."
- prompt_plan_text = content.prompt_plan_text if content else f"## Implementation\n\n1. Implement {feat.title}"
+ prompt_plan_text = (
+ content.prompt_plan_text if content else f"## Implementation\n\n1. Implement {feat.title}"
+ )
# Resolve relevant_image_ids to full image attachment dicts
description_image_attachments = []
@@ -293,26 +283,30 @@ def _build_final_result(
if img_id in image_attachment_map:
description_image_attachments.append(image_attachment_map[img_id])
- features.append(ExtractedFeature(
- title=feat.title,
- description=description,
- spec_text=spec_text,
- prompt_plan_text=prompt_plan_text,
- priority=feat.priority,
- category=feat.category,
- order_index=feat.step_index,
- spec_requirement_refs=feat.spec_requirement_ids,
- description_image_attachments=description_image_attachments,
- ))
-
- modules.append(ExtractedModule(
- title=mod.title,
- description=mod.description,
- order_index=mod.order_index,
- module_category=mod.category,
- phase_reference=mod.phase_reference,
- features=features,
- ))
+ features.append(
+ ExtractedFeature(
+ title=feat.title,
+ description=description,
+ spec_text=spec_text,
+ prompt_plan_text=prompt_plan_text,
+ priority=feat.priority,
+ category=feat.category,
+ order_index=feat.step_index,
+ spec_requirement_refs=feat.spec_requirement_ids,
+ description_image_attachments=description_image_attachments,
+ )
+ )
+
+ modules.append(
+ ExtractedModule(
+ title=mod.title,
+ description=mod.description,
+ order_index=mod.order_index,
+ module_category=mod.category,
+ phase_reference=mod.phase_reference,
+ features=features,
+ )
+ )
# Sort modules by order_index
modules.sort(key=lambda m: m.order_index)
@@ -394,7 +388,7 @@ async def create_orchestrator(
Raises:
ValueError: If provider is unsupported
"""
- from app.agents.llm_client import create_litellm_client, LLMCallLogger
+ from app.agents.llm_client import LLMCallLogger, create_litellm_client
model = config.get("model")
if not model:
@@ -415,6 +409,7 @@ async def create_orchestrator(
call_logger = None
if job_id:
from app.database import SessionLocal
+
call_logger = LLMCallLogger(
db_session_factory=SessionLocal,
job_id=job_id,
diff --git a/backend/app/agents/module_feature/plan_structurer.py b/backend/app/agents/module_feature/plan_structurer.py
index 3a19601..2529898 100644
--- a/backend/app/agents/module_feature/plan_structurer.py
+++ b/backend/app/agents/module_feature/plan_structurer.py
@@ -9,21 +9,21 @@
import asyncio
import json
-from typing import Optional, List, Dict, Any
+from typing import Any, Dict, List, Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
- ModuleFeatureContext,
- PlanStructure,
ImplementationPhase,
ImplementationStep,
+ ModuleFeatureContext,
+ PlanStructure,
)
-from .logging_config import get_agent_logger
-from .utils import strip_markdown_json, extract_markdown_sections
+from .utils import extract_markdown_sections, strip_markdown_json
class PlanStructurerAgent:
@@ -112,7 +112,7 @@ async def structure(
self.logger.log_agent_start(
project_id=str(context.project_id),
plan_length=len(context.prompt_plan_markdown),
- has_json_plan=context.prompt_plan_json is not None
+ has_json_plan=context.prompt_plan_json is not None,
)
try:
@@ -123,9 +123,7 @@ async def structure(
if phase_mapping:
# Use phase_domain_mapping to guide phase extraction
- self.logger.logger.info(
- f"Using phase_domain_mapping with {len(phase_mapping)} defined phases"
- )
+ self.logger.logger.info(f"Using phase_domain_mapping with {len(phase_mapping)} defined phases")
return await self._structure_with_phase_mapping(context, phase_mapping)
# Fallback: Split plan into sections (legacy behavior)
@@ -133,19 +131,12 @@ async def structure(
if not sections:
# Fallback: treat entire plan as one section
- sections = [{
- 'id': 'full_plan',
- 'title': 'Full Prompt Plan',
- 'content': context.prompt_plan_markdown
- }]
+ sections = [{"id": "full_plan", "title": "Full Prompt Plan", "content": context.prompt_plan_markdown}]
self.logger.logger.info(f"Processing {len(sections)} sections in parallel (no phase_domain_mapping)")
# Process all sections in parallel
- tasks = [
- self._structure_section(section, context)
- for section in sections
- ]
+ tasks = [self._structure_section(section, context) for section in sections]
section_results = await asyncio.gather(*tasks, return_exceptions=True)
# Merge results from all sections
@@ -154,16 +145,14 @@ async def structure(
for idx, (section, result) in enumerate(zip(sections, section_results)):
if isinstance(result, Exception):
- self.logger.log_error(result, {
- "section_id": section['id'],
- "section_title": section['title'],
- "error": str(result)
- })
+ self.logger.log_error(
+ result, {"section_id": section["id"], "section_title": section["title"], "error": str(result)}
+ )
continue
# Collect phases from this section
for phase in result.get("phases", []):
- phase["source_section"] = section['title']
+ phase["source_section"] = section["title"]
all_phases.append(phase)
all_cross_cutting.extend(result.get("cross_cutting_concerns", []))
@@ -178,7 +167,9 @@ def dedupe_list(items):
if key not in seen:
seen.add(key)
if isinstance(item, dict):
- result.append(item.get("name") or item.get("concern") or item.get("description") or str(item))
+ result.append(
+ item.get("name") or item.get("concern") or item.get("description") or str(item)
+ )
else:
result.append(item)
return result
@@ -195,21 +186,25 @@ def dedupe_list(items):
raw_steps = phase_data.get("steps", [])
for step_idx, step_data in enumerate(raw_steps, start=1):
- steps.append(ImplementationStep(
- step_id=f"P{new_idx}-S{step_idx}",
- title=step_data.get("title", "Untitled Step"),
- description=step_data.get("description", ""),
- expected_artifacts=step_data.get("expected_artifacts", []),
- completion_criteria=step_data.get("completion_criteria", []),
- ))
-
- phases.append(ImplementationPhase(
- phase_index=new_idx,
- title=phase_data.get("title", f"Phase {new_idx}"),
- objective=phase_data.get("objective", ""),
- steps=steps,
- dependencies=phase_data.get("dependencies", []),
- ))
+ steps.append(
+ ImplementationStep(
+ step_id=f"P{new_idx}-S{step_idx}",
+ title=step_data.get("title", "Untitled Step"),
+ description=step_data.get("description", ""),
+ expected_artifacts=step_data.get("expected_artifacts", []),
+ completion_criteria=step_data.get("completion_criteria", []),
+ )
+ )
+
+ phases.append(
+ ImplementationPhase(
+ phase_index=new_idx,
+ title=phase_data.get("title", f"Phase {new_idx}"),
+ objective=phase_data.get("objective", ""),
+ steps=steps,
+ dependencies=phase_data.get("dependencies", []),
+ )
+ )
structure = PlanStructure(
phases=phases,
@@ -253,10 +248,7 @@ async def _structure_with_phase_mapping(
PlanStructure with exactly the number of phases in phase_mapping
"""
# Process each phase in parallel to extract steps
- tasks = [
- self._extract_phase_steps(context, phase_def, idx + 1)
- for idx, phase_def in enumerate(phase_mapping)
- ]
+ tasks = [self._extract_phase_steps(context, phase_def, idx + 1) for idx, phase_def in enumerate(phase_mapping)]
phase_results = await asyncio.gather(*tasks, return_exceptions=True)
# Build phases from results
@@ -267,38 +259,41 @@ async def _structure_with_phase_mapping(
phase_index = idx + 1
if isinstance(result, Exception):
- self.logger.log_error(result, {
- "phase_title": phase_def.get("phase_title"),
- "error": str(result)
- })
+ self.logger.log_error(result, {"phase_title": phase_def.get("phase_title"), "error": str(result)})
# Create minimal phase on error
- phases.append(ImplementationPhase(
- phase_index=phase_index,
- title=phase_def.get("phase_title", f"Phase {phase_index}"),
- objective=f"Implement {phase_def.get('phase_title', 'phase')}",
- steps=[],
- dependencies=[],
- ))
+ phases.append(
+ ImplementationPhase(
+ phase_index=phase_index,
+ title=phase_def.get("phase_title", f"Phase {phase_index}"),
+ objective=f"Implement {phase_def.get('phase_title', 'phase')}",
+ steps=[],
+ dependencies=[],
+ )
+ )
continue
# Build steps from result
steps = []
for step_idx, step_data in enumerate(result.get("steps", []), start=1):
- steps.append(ImplementationStep(
- step_id=f"P{phase_index}-S{step_idx}",
- title=step_data.get("title", "Untitled Step"),
- description=step_data.get("description", ""),
- expected_artifacts=step_data.get("expected_artifacts", []),
- completion_criteria=step_data.get("completion_criteria", []),
- ))
-
- phases.append(ImplementationPhase(
- phase_index=phase_index,
- title=phase_def.get("phase_title", f"Phase {phase_index}"),
- objective=result.get("objective", f"Implement {phase_def.get('phase_title', 'phase')}"),
- steps=steps,
- dependencies=result.get("dependencies", []),
- ))
+ steps.append(
+ ImplementationStep(
+ step_id=f"P{phase_index}-S{step_idx}",
+ title=step_data.get("title", "Untitled Step"),
+ description=step_data.get("description", ""),
+ expected_artifacts=step_data.get("expected_artifacts", []),
+ completion_criteria=step_data.get("completion_criteria", []),
+ )
+ )
+
+ phases.append(
+ ImplementationPhase(
+ phase_index=phase_index,
+ title=phase_def.get("phase_title", f"Phase {phase_index}"),
+ objective=result.get("objective", f"Implement {phase_def.get('phase_title', 'phase')}"),
+ steps=steps,
+ dependencies=result.get("dependencies", []),
+ )
+ )
all_cross_cutting.extend(result.get("cross_cutting_concerns", []))
@@ -354,8 +349,8 @@ async def _extract_phase_steps(
# Build prompt to extract steps for this specific phase
prompt = f"""Extract implementation steps for Phase {phase_index}: {phase_title}
-**Phase Keywords:** {', '.join(keywords)}
-**Domains:** {', '.join(domains)}
+**Phase Keywords:** {", ".join(keywords)}
+**Domains:** {", ".join(domains)}
**Prompt Plan Content:**
{context.prompt_plan_markdown[:15000]}
@@ -385,9 +380,7 @@ async def _extract_phase_steps(
Return ONLY the JSON object."""
self.logger.log_llm_call(
- prompt=f"Phase {phase_index}: {phase_title}",
- model=str(self.model_client),
- operation="extract_phase_steps"
+ prompt=f"Phase {phase_index}: {phase_title}", model=str(self.model_client), operation="extract_phase_steps"
)
# Create fresh agent for this phase
@@ -397,10 +390,7 @@ async def _extract_phase_steps(
model_client=self.model_client,
)
- response = await phase_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await phase_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -411,21 +401,20 @@ async def _extract_phase_steps(
try:
result = json.loads(cleaned)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "phase_title": phase_title,
- "response_preview": cleaned[:200]
- })
+ self.logger.log_error(e, {"phase_title": phase_title, "response_preview": cleaned[:200]})
# Return minimal result
return {
"objective": f"Implement {phase_title}",
- "steps": [{
- "title": f"Implement {phase_title}",
- "description": f"Complete implementation for {phase_title}",
- "expected_artifacts": [],
- "completion_criteria": [f"{phase_title} complete"]
- }],
+ "steps": [
+ {
+ "title": f"Implement {phase_title}",
+ "description": f"Complete implementation for {phase_title}",
+ "expected_artifacts": [],
+ "completion_criteria": [f"{phase_title} complete"],
+ }
+ ],
"dependencies": [],
- "cross_cutting_concerns": []
+ "cross_cutting_concerns": [],
}
return result
@@ -446,18 +435,15 @@ async def _structure_section(
Dict with phases and cross_cutting_concerns
"""
# Skip empty sections
- if not section['content'].strip():
- return {
- "phases": [],
- "cross_cutting_concerns": []
- }
+ if not section["content"].strip():
+ return {"phases": [], "cross_cutting_concerns": []}
prompt = self._build_section_prompt(section, context)
self.logger.log_llm_call(
prompt=f"Section: {section['title']} ({len(section['content'])} chars)",
model=str(self.model_client),
- operation="structure_section"
+ operation="structure_section",
)
# Create a FRESH agent for each section to avoid conversation history accumulation
@@ -469,10 +455,7 @@ async def _structure_section(
model_client=self.model_client,
)
- response = await section_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await section_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -483,25 +466,21 @@ async def _structure_section(
try:
result = json.loads(cleaned)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "section_id": section['id'],
- "section_title": section['title'],
- "response_length": len(response_text),
- "response_preview": cleaned[:200]
- })
+ self.logger.log_error(
+ e,
+ {
+ "section_id": section["id"],
+ "section_title": section["title"],
+ "response_length": len(response_text),
+ "response_preview": cleaned[:200],
+ },
+ )
# Return empty result rather than failing entirely
- return {
- "phases": [],
- "cross_cutting_concerns": []
- }
+ return {"phases": [], "cross_cutting_concerns": []}
return result
- def _build_section_prompt(
- self,
- section: Dict[str, str],
- context: ModuleFeatureContext
- ) -> str:
+ def _build_section_prompt(self, section: Dict[str, str], context: ModuleFeatureContext) -> str:
"""
Build the structuring prompt for a single section.
@@ -512,7 +491,7 @@ def _build_section_prompt(
Returns:
Formatted prompt string
"""
- prompt = f"Extract implementation phases and steps from this prompt plan section.\n\n"
+ prompt = "Extract implementation phases and steps from this prompt plan section.\n\n"
# Add project context
if context.project_name:
@@ -520,7 +499,7 @@ def _build_section_prompt(
# Add section
prompt += f"## {section['title']}\n\n"
- prompt += section['content']
+ prompt += section["content"]
prompt += "\n\n"
prompt += "Extract phases and steps from THIS SECTION. "
@@ -530,8 +509,7 @@ def _build_section_prompt(
async def create_plan_structurer(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> PlanStructurerAgent:
"""
Factory function to create a Plan Structurer Agent.
diff --git a/backend/app/agents/module_feature/spec_analyzer.py b/backend/app/agents/module_feature/spec_analyzer.py
index 61a881d..3c4f4b2 100644
--- a/backend/app/agents/module_feature/spec_analyzer.py
+++ b/backend/app/agents/module_feature/spec_analyzer.py
@@ -9,23 +9,23 @@
import asyncio
import json
-from typing import Optional, List, Dict, Any
+from typing import Any, Dict, List, Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
ModuleFeatureContext,
SpecAnalysis,
SpecRequirement,
)
-from .logging_config import get_agent_logger
from .utils import (
- strip_markdown_json,
- generate_unique_id,
extract_markdown_sections,
+ generate_unique_id,
+ strip_markdown_json,
)
@@ -111,7 +111,7 @@ async def analyze(
self.logger.log_agent_start(
project_id=str(context.project_id),
spec_length=len(context.final_spec_markdown),
- has_json_spec=context.final_spec_json is not None
+ has_json_spec=context.final_spec_json is not None,
)
try:
@@ -120,19 +120,12 @@ async def analyze(
if not sections:
# Fallback: treat entire spec as one section
- sections = [{
- 'id': 'full_spec',
- 'title': 'Full Specification',
- 'content': context.final_spec_markdown
- }]
+ sections = [{"id": "full_spec", "title": "Full Specification", "content": context.final_spec_markdown}]
self.logger.logger.info(f"Processing {len(sections)} sections in parallel")
# Process all sections in parallel
- tasks = [
- self._analyze_section(section, context)
- for section in sections
- ]
+ tasks = [self._analyze_section(section, context) for section in sections]
section_results = await asyncio.gather(*tasks, return_exceptions=True)
# Merge results from all sections
@@ -145,17 +138,15 @@ async def analyze(
for idx, (section, result) in enumerate(zip(sections, section_results)):
if isinstance(result, Exception):
- self.logger.log_error(result, {
- "section_id": section['id'],
- "section_title": section['title'],
- "error": str(result)
- })
+ self.logger.log_error(
+ result, {"section_id": section["id"], "section_title": section["title"], "error": str(result)}
+ )
continue
# Add section info to each requirement
for req in result.get("requirements", []):
- req["section_id"] = section['id']
- req["section_title"] = section['title']
+ req["section_id"] = section["id"]
+ req["section_title"] = section["title"]
all_requirements.append(req)
if req.get("domain_area"):
all_domain_areas.add(req["domain_area"])
@@ -177,7 +168,13 @@ def dedupe_list(items):
seen.add(key)
# For dicts, extract just the name/text if possible
if isinstance(item, dict):
- result.append(item.get("component_name") or item.get("name") or item.get("constraint_type") or item.get("description") or str(item))
+ result.append(
+ item.get("component_name")
+ or item.get("name")
+ or item.get("constraint_type")
+ or item.get("description")
+ or str(item)
+ )
else:
result.append(item)
return result
@@ -190,14 +187,16 @@ def dedupe_list(items):
# Convert to SpecRequirement objects with unique IDs
requirements = []
for idx, req_data in enumerate(all_requirements, start=1):
- requirements.append(SpecRequirement(
- id=generate_unique_id("REQ", idx),
- section_id=req_data.get("section_id", "unknown"),
- section_title=req_data.get("section_title", "Unknown Section"),
- requirement_text=req_data.get("requirement_text", ""),
- domain_area=req_data.get("domain_area", "Other"),
- implied_components=req_data.get("implied_components", []),
- ))
+ requirements.append(
+ SpecRequirement(
+ id=generate_unique_id("REQ", idx),
+ section_id=req_data.get("section_id", "unknown"),
+ section_title=req_data.get("section_title", "Unknown Section"),
+ requirement_text=req_data.get("requirement_text", ""),
+ domain_area=req_data.get("domain_area", "Other"),
+ implied_components=req_data.get("implied_components", []),
+ )
+ )
analysis = SpecAnalysis(
requirements=requirements,
@@ -243,21 +242,15 @@ async def _analyze_section(
Dict with requirements, data_models, api_endpoints, etc.
"""
# Skip empty sections
- if not section['content'].strip():
- return {
- "requirements": [],
- "data_models": [],
- "api_endpoints": [],
- "ui_components": [],
- "constraints": []
- }
+ if not section["content"].strip():
+ return {"requirements": [], "data_models": [], "api_endpoints": [], "ui_components": [], "constraints": []}
prompt = self._build_section_prompt(section, context)
self.logger.log_llm_call(
prompt=f"Section: {section['title']} ({len(section['content'])} chars)",
model=str(self.model_client),
- operation="analyze_section"
+ operation="analyze_section",
)
# Create a FRESH agent for each section to avoid conversation history accumulation
@@ -269,10 +262,7 @@ async def _analyze_section(
model_client=self.model_client,
)
- response = await section_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await section_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -284,28 +274,21 @@ async def _analyze_section(
try:
result = json.loads(cleaned)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "section_id": section['id'],
- "section_title": section['title'],
- "response_length": len(response_text),
- "response_preview": cleaned[:200]
- })
+ self.logger.log_error(
+ e,
+ {
+ "section_id": section["id"],
+ "section_title": section["title"],
+ "response_length": len(response_text),
+ "response_preview": cleaned[:200],
+ },
+ )
# Return empty result rather than failing entirely
- return {
- "requirements": [],
- "data_models": [],
- "api_endpoints": [],
- "ui_components": [],
- "constraints": []
- }
+ return {"requirements": [], "data_models": [], "api_endpoints": [], "ui_components": [], "constraints": []}
return result
- def _build_section_prompt(
- self,
- section: Dict[str, str],
- context: ModuleFeatureContext
- ) -> str:
+ def _build_section_prompt(self, section: Dict[str, str], context: ModuleFeatureContext) -> str:
"""
Build the analysis prompt for a single section.
@@ -316,7 +299,7 @@ def _build_section_prompt(
Returns:
Formatted prompt string
"""
- prompt = f"Extract requirements from this specification section.\n\n"
+ prompt = "Extract requirements from this specification section.\n\n"
# Add project context
if context.project_name:
@@ -324,7 +307,7 @@ def _build_section_prompt(
# Add section
prompt += f"## {section['title']}\n\n"
- prompt += section['content']
+ prompt += section["content"]
prompt += "\n\n"
prompt += "Extract all implementable requirements from THIS SECTION. "
@@ -334,8 +317,7 @@ def _build_section_prompt(
async def create_spec_analyzer(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
+ model_client: ChatCompletionClient, project_id: Optional[str] = None
) -> SpecAnalyzerAgent:
"""
Factory function to create a Spec Analyzer Agent.
diff --git a/backend/app/agents/module_feature/types.py b/backend/app/agents/module_feature/types.py
index 84054e1..d4921c9 100644
--- a/backend/app/agents/module_feature/types.py
+++ b/backend/app/agents/module_feature/types.py
@@ -16,22 +16,24 @@
from dataclasses import dataclass, field
from enum import Enum
-from typing import List, Optional, Dict, Any
+from typing import Any, Dict, List, Optional
from uuid import UUID
-
# ============================
# Enums
# ============================
+
class ModuleCategory(str, Enum):
"""Category of module based on how it was derived."""
+
PHASE = "phase" # Maps to an implementation phase from the prompt plan
CROSS_CUTTING = "cross_cutting" # Cross-cutting concern (config, testing, etc.)
class FeaturePriorityLevel(str, Enum):
"""Priority level for features."""
+
MUST_HAVE = "must_have"
IMPORTANT = "important"
OPTIONAL = "optional"
@@ -39,6 +41,7 @@ class FeaturePriorityLevel(str, Enum):
class FeatureCategoryType(str, Enum):
"""Category of feature based on technical domain."""
+
DATA_MODEL = "Data Model"
API = "API"
UI = "UI"
@@ -56,11 +59,13 @@ class FeatureCategoryType(str, Enum):
# Input Context
# ============================
+
@dataclass
class ImageAttachmentInfo:
"""
Lightweight image info for the LLM to understand available images.
"""
+
id: str # UUID of the image
filename: str
description: str = "" # Optional description if available
@@ -74,6 +79,7 @@ class ModuleFeatureContext:
Contains all the information needed to extract modules and features
from a finalized specification AND prompt plan.
"""
+
project_id: UUID
brainstorming_phase_id: UUID
@@ -106,11 +112,13 @@ class ModuleFeatureContext:
# Agent 1: Spec Analyzer Types
# ============================
+
@dataclass
class SpecRequirement:
"""
A single requirement extracted from the specification.
"""
+
id: str # Unique ID for traceability (e.g., "REQ-001")
section_id: str # Source section ID (e.g., "functional_requirements")
section_title: str
@@ -125,6 +133,7 @@ class SpecAnalysis:
Output from the Spec Analyzer Agent.
Parsed specification with extracted requirements by domain.
"""
+
requirements: List[SpecRequirement] = field(default_factory=list)
domain_areas: List[str] = field(default_factory=list)
data_models: List[str] = field(default_factory=list)
@@ -142,11 +151,13 @@ def __post_init__(self):
# Agent 2: Plan Structurer Types
# ============================
+
@dataclass
class ImplementationStep:
"""
A single step within an implementation phase.
"""
+
step_id: str # Unique ID (e.g., "P1-S1" for Phase 1, Step 1)
title: str
description: str
@@ -159,6 +170,7 @@ class ImplementationPhase:
"""
An implementation phase from the prompt plan.
"""
+
phase_index: int # 1-based index
title: str
objective: str
@@ -172,6 +184,7 @@ class PlanStructure:
Output from the Plan Structurer Agent.
Parsed prompt plan with implementation phases and steps.
"""
+
phases: List[ImplementationPhase] = field(default_factory=list)
total_steps: int = 0
cross_cutting_concerns: List[str] = field(default_factory=list) # Config, testing, etc.
@@ -185,11 +198,13 @@ def __post_init__(self):
# Agent 3: Merger Types
# ============================
+
@dataclass
class FeatureMapping:
"""
Mapping of a feature to its sources in spec and plan.
"""
+
feature_id: str # Unique ID (e.g., "FEAT-001")
title: str
module_id: str # Which module this belongs to
@@ -213,6 +228,7 @@ class ModuleMapping:
"""
Mapping of a module to its source phase.
"""
+
module_id: str # Unique ID (e.g., "MOD-001")
title: str
description: str
@@ -228,6 +244,7 @@ class MergedMapping:
Output from the Merger Agent.
Aligned modules and features from spec and plan.
"""
+
modules: List[ModuleMapping] = field(default_factory=list)
features: List[FeatureMapping] = field(default_factory=list)
@@ -240,12 +257,14 @@ class MergedMapping:
# Agent 4: Content Writer Types
# ============================
+
@dataclass
class FeatureContent:
"""
Rich content for a single feature.
Generated by the Content Writer Agent.
"""
+
feature_id: str
description: str # User-friendly description of what the feature does
spec_text: str # WHAT to build - requirements, acceptance criteria
@@ -260,6 +279,7 @@ class WriterOutput:
Output from the Content Writer Agent.
All features with their content populated.
"""
+
feature_contents: List[FeatureContent] = field(default_factory=list)
@@ -267,12 +287,14 @@ class WriterOutput:
# Agent 5: Coverage Validator Types
# ============================
+
@dataclass
class CoverageReport:
"""
Quality assurance report from the Coverage Validator Agent.
Validates completeness and ordering.
"""
+
ok: bool
# Coverage issues
@@ -300,12 +322,14 @@ class CoverageReport:
# Final Output Types
# ============================
+
@dataclass
class ExtractedFeature:
"""
A fully extracted feature with all content.
Final output for persistence.
"""
+
title: str
description: str # User-friendly description of what the feature does
spec_text: str # WHAT: requirements, acceptance criteria
@@ -324,6 +348,7 @@ class ExtractedModule:
A fully extracted module with all features.
Final output for persistence.
"""
+
title: str
description: str
order_index: int
@@ -338,6 +363,7 @@ class ExtractionResult:
Final result from the module/feature extraction process.
Includes all modules, features, and validation report.
"""
+
modules: List[ExtractedModule] = field(default_factory=list)
total_features: int = 0
coverage_report: Optional[CoverageReport] = None
@@ -352,12 +378,14 @@ def __post_init__(self):
# Agent Metadata for UI
# ============================
+
@dataclass
class AgentInfo:
"""
UI metadata for an agent in the extraction workflow.
Used for progress tracking and visual representation.
"""
+
name: str
description: str
color: str # Hex color for UI tag
@@ -368,46 +396,38 @@ class AgentInfo:
"orchestrator": AgentInfo(
name="Orchestrator",
description="Coordinating the module/feature extraction workflow",
- color="#8B5CF6" # Purple
+ color="#8B5CF6", # Purple
),
"spec_analyzer": AgentInfo(
name="Spec Analyzer",
description="Parsing specification to extract requirements",
- color="#3B82F6" # Blue
+ color="#3B82F6", # Blue
),
"plan_structurer": AgentInfo(
name="Plan Structurer",
description="Parsing prompt plan to extract implementation phases",
- color="#10B981" # Green
+ color="#10B981", # Green
),
"merger": AgentInfo(
name="Merger",
description="Aligning spec requirements with plan phases",
- color="#F59E0B" # Amber
+ color="#F59E0B", # Amber
),
"writer": AgentInfo(
name="Content Writer",
description="Generating spec_text and prompt_plan_text for features",
- color="#EF4444" # Red
+ color="#EF4444", # Red
),
"validator": AgentInfo(
name="Validator",
description="Validating coverage and ordering",
- color="#EC4899" # Pink
+ color="#EC4899", # Pink
),
}
# Workflow step definitions for progress tracking
-WORKFLOW_STEPS = [
- "start",
- "analyzing_spec",
- "structuring_plan",
- "merging",
- "writing_content",
- "validating",
- "complete"
-]
+WORKFLOW_STEPS = ["start", "analyzing_spec", "structuring_plan", "merging", "writing_content", "validating", "complete"]
# ============================
@@ -437,6 +457,7 @@ class AgentInfo:
# Helper Functions
# ============================
+
def get_module_by_id(modules: List[ModuleMapping], module_id: str) -> Optional[ModuleMapping]:
"""Get a module mapping by its ID."""
for module in modules:
@@ -508,7 +529,9 @@ def validate_extraction_result(result: ExtractionResult) -> List[str]:
issues.append(f"Feature '{feature.title}' has spec_text too short ({len(feature.spec_text)} chars)")
if len(feature.prompt_plan_text) < MIN_PROMPT_PLAN_TEXT_LENGTH:
- issues.append(f"Feature '{feature.title}' has prompt_plan_text too short ({len(feature.prompt_plan_text)} chars)")
+ issues.append(
+ f"Feature '{feature.title}' has prompt_plan_text too short ({len(feature.prompt_plan_text)} chars)"
+ )
# Check for duplicate module titles
titles = [m.title.lower() for m in result.modules]
diff --git a/backend/app/agents/module_feature/utils.py b/backend/app/agents/module_feature/utils.py
index cfbcd00..9615167 100644
--- a/backend/app/agents/module_feature/utils.py
+++ b/backend/app/agents/module_feature/utils.py
@@ -4,16 +4,16 @@
Provides JSON parsing, text processing, and markdown extraction utilities.
"""
-import re
import json
import logging
+import re
import traceback
from typing import Any, Dict, List, Optional
from json_repair import repair_json
# Import from common module and re-export for backwards compatibility
-from app.agents.response_parser import strip_markdown_json, normalize_response_content
+from app.agents.response_parser import strip_markdown_json
# Get logger for JSON repair warnings
_repair_logger = logging.getLogger("module_feature.json_repair")
@@ -87,7 +87,7 @@ def repair_truncated_json(text: str) -> str:
escape_next = False
continue
- if char == '\\':
+ if char == "\\":
escape_next = True
continue
@@ -98,15 +98,15 @@ def repair_truncated_json(text: str) -> str:
if in_string:
continue
- if char == '{':
- stack.append('}')
- elif char == '[':
- stack.append(']')
- elif char == '}':
- if stack and stack[-1] == '}':
+ if char == "{":
+ stack.append("}")
+ elif char == "[":
+ stack.append("]")
+ elif char == "}":
+ if stack and stack[-1] == "}":
stack.pop()
- elif char == ']':
- if stack and stack[-1] == ']':
+ elif char == "]":
+ if stack and stack[-1] == "]":
stack.pop()
# If we're inside a string, close it first
@@ -138,7 +138,7 @@ def fix_common_json_errors(text: str) -> str:
"""
# Remove trailing commas before closing brackets/braces
# Pattern: comma followed by optional whitespace then ] or }
- text = re.sub(r',(\s*[\]}])', r'\1', text)
+ text = re.sub(r",(\s*[\]}])", r"\1", text)
# Fix missing colons after object keys
# Pattern: "key" followed by whitespace then another "value" without a colon
@@ -164,7 +164,7 @@ def fix_common_json_errors(text: str) -> str:
i += 1
continue
- if char == '\\':
+ if char == "\\":
escape_next = True
result.append(char)
i += 1
@@ -178,15 +178,15 @@ def fix_common_json_errors(text: str) -> str:
if in_string:
# Check for control characters that need escaping
- if char == '\n':
- result.append('\\n')
- elif char == '\r':
- result.append('\\r')
- elif char == '\t':
- result.append('\\t')
+ if char == "\n":
+ result.append("\\n")
+ elif char == "\r":
+ result.append("\\r")
+ elif char == "\t":
+ result.append("\\t")
elif ord(char) < 32:
# Other control characters - escape as unicode
- result.append(f'\\u{ord(char):04x}')
+ result.append(f"\\u{ord(char):04x}")
else:
result.append(char)
else:
@@ -194,7 +194,7 @@ def fix_common_json_errors(text: str) -> str:
i += 1
- return ''.join(result)
+ return "".join(result)
def _log_json_repair_banner(strategy: str, text_preview: str, text_length: int, extra_info: str = ""):
@@ -210,10 +210,10 @@ def _log_json_repair_banner(strategy: str, text_preview: str, text_length: int,
β Strategy: {strategy:<67}β
β Text length: {text_length:<64}β
β Preview: {text_preview[:60]:<68}β
-{f'β Info: {extra_info:<71}β' if extra_info else ''}
+{f"β Info: {extra_info:<71}β" if extra_info else ""}
β βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ£
β CALL STACK: β
-{chr(10).join(f'β {line:<77}β' for line in caller_info.split(chr(10)))}
+{chr(10).join(f"β {line:<77}β" for line in caller_info.split(chr(10)))}
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
"""
_repair_logger.warning(banner)
@@ -255,18 +255,18 @@ def parse_json_with_repair(text: str) -> Dict[str, Any]:
if isinstance(repaired, dict):
_log_json_repair_banner(
"Strategy 2: json-repair library",
- cleaned[:100].replace('\n', '\\n'),
+ cleaned[:100].replace("\n", "\\n"),
len(cleaned),
- f"Initial error: {initial_error[:50]}"
+ f"Initial error: {initial_error[:50]}",
)
return repaired
elif isinstance(repaired, str):
result = json.loads(repaired)
_log_json_repair_banner(
"Strategy 2: json-repair library (string result)",
- cleaned[:100].replace('\n', '\\n'),
+ cleaned[:100].replace("\n", "\\n"),
len(cleaned),
- f"Initial error: {initial_error[:50]}"
+ f"Initial error: {initial_error[:50]}",
)
return result
except Exception:
@@ -278,9 +278,9 @@ def parse_json_with_repair(text: str) -> Dict[str, Any]:
result = json.loads(fixed_and_repaired)
_log_json_repair_banner(
"Strategy 3: fix_common_errors + repair_truncated",
- cleaned[:100].replace('\n', '\\n'),
+ cleaned[:100].replace("\n", "\\n"),
len(cleaned),
- f"Initial error: {initial_error[:50]}"
+ f"Initial error: {initial_error[:50]}",
)
return result
except json.JSONDecodeError:
@@ -290,7 +290,7 @@ def parse_json_with_repair(text: str) -> Dict[str, Any]:
# Sometimes LLMs add extra text before or after the JSON
try:
# Find the first { and match to its closing }
- start = cleaned.find('{')
+ start = cleaned.find("{")
if start != -1:
# Track braces to find matching close
depth = 0
@@ -302,16 +302,16 @@ def parse_json_with_repair(text: str) -> Dict[str, Any]:
if escape_next:
escape_next = False
continue
- if char == '\\':
+ if char == "\\":
escape_next = True
continue
if char == '"' and not escape_next:
in_string = not in_string
continue
if not in_string:
- if char == '{':
+ if char == "{":
depth += 1
- elif char == '}':
+ elif char == "}":
depth -= 1
if depth == 0:
end = i + 1
@@ -325,9 +325,9 @@ def parse_json_with_repair(text: str) -> Dict[str, Any]:
if isinstance(repaired, dict):
_log_json_repair_banner(
"Strategy 4: Extract JSON + json-repair",
- cleaned[:100].replace('\n', '\\n'),
+ cleaned[:100].replace("\n", "\\n"),
len(cleaned),
- f"Extracted {end - start} chars from position {start}"
+ f"Extracted {end - start} chars from position {start}",
)
return repaired
except Exception:
@@ -335,9 +335,9 @@ def parse_json_with_repair(text: str) -> Dict[str, Any]:
result = json.loads(extracted)
_log_json_repair_banner(
"Strategy 4: Extract JSON object",
- cleaned[:100].replace('\n', '\\n'),
+ cleaned[:100].replace("\n", "\\n"),
len(cleaned),
- f"Extracted {end - start} chars from position {start}"
+ f"Extracted {end - start} chars from position {start}",
)
return result
except json.JSONDecodeError:
@@ -346,9 +346,9 @@ def parse_json_with_repair(text: str) -> Dict[str, Any]:
# All strategies failed - raise with the original cleaned text
_log_json_repair_banner(
"ALL STRATEGIES FAILED - raising exception",
- cleaned[:100].replace('\n', '\\n'),
+ cleaned[:100].replace("\n", "\\n"),
len(cleaned),
- f"Initial error: {initial_error[:50]}"
+ f"Initial error: {initial_error[:50]}",
)
return json.loads(cleaned)
@@ -368,7 +368,7 @@ def truncate_text(text: str, max_length: int = 500) -> str:
return text
# Truncate at word boundary
- truncated = text[:max_length].rsplit(' ', 1)[0]
+ truncated = text[:max_length].rsplit(" ", 1)[0]
return truncated + "..."
@@ -383,7 +383,7 @@ def normalize_whitespace(text: str) -> str:
Normalized text
"""
# Replace multiple whitespace with single space
- text = re.sub(r'\s+', ' ', text)
+ text = re.sub(r"\s+", " ", text)
return text.strip()
@@ -401,18 +401,20 @@ def extract_markdown_sections(markdown: str) -> List[Dict[str, str]]:
current_section = None
current_content = []
- for line in markdown.split('\n'):
+ for line in markdown.split("\n"):
# Check for ## header
- header_match = re.match(r'^##\s+(.+)$', line)
+ header_match = re.match(r"^##\s+(.+)$", line)
if header_match:
# Save previous section if exists
if current_section is not None:
- sections.append({
- 'id': slugify(current_section),
- 'title': current_section,
- 'content': '\n'.join(current_content).strip()
- })
+ sections.append(
+ {
+ "id": slugify(current_section),
+ "title": current_section,
+ "content": "\n".join(current_content).strip(),
+ }
+ )
current_section = header_match.group(1).strip()
current_content = []
@@ -421,11 +423,9 @@ def extract_markdown_sections(markdown: str) -> List[Dict[str, str]]:
# Save final section
if current_section is not None:
- sections.append({
- 'id': slugify(current_section),
- 'title': current_section,
- 'content': '\n'.join(current_content).strip()
- })
+ sections.append(
+ {"id": slugify(current_section), "title": current_section, "content": "\n".join(current_content).strip()}
+ )
return sections
@@ -451,13 +451,13 @@ def slugify(text: str) -> str:
# Convert to lowercase
text = text.lower()
# Replace spaces with underscores
- text = re.sub(r'\s+', '_', text)
+ text = re.sub(r"\s+", "_", text)
# Remove non-alphanumeric characters (except underscores)
- text = re.sub(r'[^a-z0-9_]', '', text)
+ text = re.sub(r"[^a-z0-9_]", "", text)
# Remove consecutive underscores
- text = re.sub(r'_+', '_', text)
+ text = re.sub(r"_+", "_", text)
# Remove leading/trailing underscores
- text = text.strip('_')
+ text = text.strip("_")
return text
@@ -480,10 +480,7 @@ def extract_phase_steps_from_markdown(markdown: str) -> List[Dict[str, Any]]:
current_steps = []
# Patterns for phase headers
- phase_pattern = re.compile(
- r'^(?:#{1,4}\s*)?(?:Phase\s*)?(\d+)[.:]\s*(.+?)$',
- re.IGNORECASE | re.MULTILINE
- )
+ phase_pattern = re.compile(r"^(?:#{1,4}\s*)?(?:Phase\s*)?(\d+)[.:]\s*(.+?)$", re.IGNORECASE | re.MULTILINE)
# Split by phase headers
parts = phase_pattern.split(markdown)
@@ -500,11 +497,7 @@ def extract_phase_steps_from_markdown(markdown: str) -> List[Dict[str, Any]]:
# Extract steps from content
steps = extract_steps_from_content(phase_content)
- phases.append({
- 'index': phase_index,
- 'title': phase_title,
- 'steps': steps
- })
+ phases.append({"index": phase_index, "title": phase_title, "steps": steps})
i += 3
@@ -526,17 +519,16 @@ def extract_steps_from_content(content: str) -> List[Dict[str, str]]:
steps = []
# Pattern for list items (numbered or bulleted)
- item_pattern = re.compile(
- r'^(?:\d+[.)]|\*|-)\s+(.+?)$',
- re.MULTILINE
- )
+ item_pattern = re.compile(r"^(?:\d+[.)]|\*|-)\s+(.+?)$", re.MULTILINE)
for match in item_pattern.finditer(content):
step_text = match.group(1).strip()
- steps.append({
- 'title': step_text,
- 'description': '' # Could be enhanced to capture following lines
- })
+ steps.append(
+ {
+ "title": step_text,
+ "description": "", # Could be enhanced to capture following lines
+ }
+ )
return steps
@@ -583,14 +575,14 @@ def generate_semantic_id(prefix: str, title: str, index: int, max_len: int = 20)
return f"{prefix}-{index:03d}"
# Convert to slug: uppercase, alphanumeric + hyphens only
- slug = re.sub(r'[^a-zA-Z0-9]+', '-', title.strip())
- slug = slug.strip('-').upper()
+ slug = re.sub(r"[^a-zA-Z0-9]+", "-", title.strip())
+ slug = slug.strip("-").upper()
# Truncate if too long
if len(slug) > max_len:
# Try to break at a hyphen
truncated = slug[:max_len]
- last_hyphen = truncated.rfind('-')
+ last_hyphen = truncated.rfind("-")
if last_hyphen > max_len // 2:
slug = truncated[:last_hyphen]
else:
@@ -613,7 +605,7 @@ def chunk_list(items: List[Any], chunk_size: int) -> List[List[Any]]:
Returns:
List of chunks
"""
- return [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
+ return [items[i : i + chunk_size] for i in range(0, len(items), chunk_size)]
def merge_markdown_sections(sections: List[Dict[str, str]]) -> str:
@@ -630,6 +622,6 @@ def merge_markdown_sections(sections: List[Dict[str, str]]) -> str:
for section in sections:
lines.append(f"## {section['title']}")
lines.append("")
- lines.append(section['content'])
+ lines.append(section["content"])
lines.append("")
return "\n".join(lines)
diff --git a/backend/app/agents/module_feature/validator.py b/backend/app/agents/module_feature/validator.py
index 4b9b020..ad9355b 100644
--- a/backend/app/agents/module_feature/validator.py
+++ b/backend/app/agents/module_feature/validator.py
@@ -7,16 +7,16 @@
from typing import List, Optional, Set
+from .logging_config import get_agent_logger
from .types import (
- SpecAnalysis,
- PlanStructure,
+ MIN_PROMPT_PLAN_TEXT_LENGTH,
+ MIN_SPEC_TEXT_LENGTH,
+ CoverageReport,
MergedMapping,
+ PlanStructure,
+ SpecAnalysis,
WriterOutput,
- CoverageReport,
- MIN_SPEC_TEXT_LENGTH,
- MIN_PROMPT_PLAN_TEXT_LENGTH,
)
-from .logging_config import get_agent_logger
class ValidatorAgent:
@@ -68,23 +68,14 @@ async def validate(
try:
# Build content lookup
- content_map = {
- fc.feature_id: fc
- for fc in writer_output.feature_contents
- }
+ content_map = {fc.feature_id: fc for fc in writer_output.feature_contents}
# Validate coverage
- uncovered_requirements = self._check_requirement_coverage(
- spec_analysis, merged_mapping
- )
- uncovered_steps = self._check_step_coverage(
- plan_structure, merged_mapping
- )
+ uncovered_requirements = self._check_requirement_coverage(spec_analysis, merged_mapping)
+ uncovered_steps = self._check_step_coverage(plan_structure, merged_mapping)
# Validate ordering
- ordering_issues = self._check_ordering(
- plan_structure, merged_mapping
- )
+ ordering_issues = self._check_ordering(plan_structure, merged_mapping)
# Content quality checks removed - relying on prompt-based prevention
# in Writer agent (TBD markers) rather than post-hoc pattern matching
@@ -115,10 +106,7 @@ async def validate(
must_have_without_content.append(feature.feature_id)
# Calculate coverage percentage
- total_items = (
- len(spec_analysis.requirements) +
- plan_structure.total_steps
- )
+ total_items = len(spec_analysis.requirements) + plan_structure.total_steps
covered_items = total_items - len(uncovered_requirements) - len(uncovered_steps)
coverage_percentage = (covered_items / total_items * 100) if total_items > 0 else 100
@@ -134,8 +122,7 @@ async def validate(
# Determine if validation passed
# OK if: no must_have requirements uncovered, no must_have features missing content
must_have_reqs_uncovered = [
- req_id for req_id in uncovered_requirements
- if self._is_must_have_requirement(spec_analysis, req_id)
+ req_id for req_id in uncovered_requirements if self._is_must_have_requirement(spec_analysis, req_id)
]
ok = len(must_have_reqs_uncovered) == 0 and len(must_have_without_content) == 0
@@ -357,26 +344,20 @@ def _generate_suggestions(
if uncovered_requirements:
suggestions.append(
f"Consider adding features for {len(uncovered_requirements)} uncovered requirements: "
- f"{', '.join(uncovered_requirements[:5])}"
- + ("..." if len(uncovered_requirements) > 5 else "")
+ f"{', '.join(uncovered_requirements[:5])}" + ("..." if len(uncovered_requirements) > 5 else "")
)
if uncovered_steps:
suggestions.append(
f"Consider adding features for {len(uncovered_steps)} uncovered plan steps: "
- f"{', '.join(uncovered_steps[:5])}"
- + ("..." if len(uncovered_steps) > 5 else "")
+ f"{', '.join(uncovered_steps[:5])}" + ("..." if len(uncovered_steps) > 5 else "")
)
if ordering_issues:
- suggestions.append(
- f"Review feature ordering to ensure phase dependencies are respected"
- )
+ suggestions.append("Review feature ordering to ensure phase dependencies are respected")
if empty_spec_text:
- suggestions.append(
- f"Enhance spec_text for {len(empty_spec_text)} features with insufficient content"
- )
+ suggestions.append(f"Enhance spec_text for {len(empty_spec_text)} features with insufficient content")
if empty_prompt_plan_text:
suggestions.append(
@@ -389,9 +370,7 @@ def _generate_suggestions(
return suggestions
-async def create_validator(
- project_id: Optional[str] = None
-) -> ValidatorAgent:
+async def create_validator(project_id: Optional[str] = None) -> ValidatorAgent:
"""
Factory function to create a Validator Agent.
diff --git a/backend/app/agents/module_feature/writer.py b/backend/app/agents/module_feature/writer.py
index 3e46d2e..f65b86d 100644
--- a/backend/app/agents/module_feature/writer.py
+++ b/backend/app/agents/module_feature/writer.py
@@ -7,24 +7,24 @@
import asyncio
import json
-from typing import List, Optional, Tuple
+from typing import List, Optional
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
from autogen_core.models import ChatCompletionClient
+from .logging_config import get_agent_logger
from .types import (
+ FeatureContent,
+ FeatureMapping,
+ MergedMapping,
ModuleFeatureContext,
- SpecAnalysis,
PlanStructure,
- MergedMapping,
- FeatureMapping,
- FeatureContent,
+ SpecAnalysis,
WriterOutput,
)
-from .logging_config import get_agent_logger
-from .utils import strip_markdown_json, chunk_list
+from .utils import chunk_list, strip_markdown_json
class WriterAgent:
@@ -131,10 +131,7 @@ async def write_all(
Raises:
ValueError: If writing fails
"""
- self.logger.log_agent_start(
- project_id=str(context.project_id),
- features_count=len(merged_mapping.features)
- )
+ self.logger.log_agent_start(project_id=str(context.project_id), features_count=len(merged_mapping.features))
try:
# Process features individually (1 per call) in PARALLEL
@@ -149,12 +146,7 @@ async def write_all(
# Create tasks for parallel processing
tasks = [
self._write_chunk_with_index(
- chunk_idx,
- feature_chunk,
- spec_analysis,
- plan_structure,
- context,
- len(feature_chunks)
+ chunk_idx, feature_chunk, spec_analysis, plan_structure, context, len(feature_chunks)
)
for chunk_idx, feature_chunk in enumerate(feature_chunks)
]
@@ -166,18 +158,17 @@ async def write_all(
all_contents: List[FeatureContent] = []
for chunk_idx, result in enumerate(chunk_results):
if isinstance(result, Exception):
- self.logger.log_error(result, {
- "chunk_index": chunk_idx,
- "error": str(result)
- })
+ self.logger.log_error(result, {"chunk_index": chunk_idx, "error": str(result)})
# Generate fallback content for failed chunks
for feature in feature_chunks[chunk_idx]:
- all_contents.append(FeatureContent(
- feature_id=feature.feature_id,
- description=f"This feature enables {feature.title.lower()} functionality for users.",
- spec_text=f"## {feature.title}\n\nImplement this feature as specified.",
- prompt_plan_text=f"## Implementation\n\n1. Implement {feature.title}\n2. Write tests\n3. Verify functionality",
- ))
+ all_contents.append(
+ FeatureContent(
+ feature_id=feature.feature_id,
+ description=f"This feature enables {feature.title.lower()} functionality for users.",
+ spec_text=f"## {feature.title}\n\nImplement this feature as specified.",
+ prompt_plan_text=f"## Implementation\n\n1. Implement {feature.title}\n2. Write tests\n3. Verify functionality",
+ )
+ )
else:
all_contents.extend(result)
@@ -194,9 +185,7 @@ async def write_all(
avg_prompt_plan_text_length=avg_plan_len,
)
- self.logger.log_agent_complete(
- features_written=len(all_contents)
- )
+ self.logger.log_agent_complete(features_written=len(all_contents))
return WriterOutput(feature_contents=all_contents)
@@ -231,12 +220,10 @@ async def _write_chunk_with_index(
prompt=f"Processing chunk {chunk_idx + 1}/{total_chunks} ({len(features)} features)",
model=str(self.model_client),
operation="write_feature_content",
- chunk_size=len(features)
+ chunk_size=len(features),
)
- return await self._write_chunk(
- features, spec_analysis, plan_structure, context
- )
+ return await self._write_chunk(features, spec_analysis, plan_structure, context)
async def _write_chunk(
self,
@@ -268,10 +255,7 @@ async def _write_chunk(
model_client=self.model_client,
)
- response = await chunk_agent.on_messages(
- [TextMessage(content=prompt, source="user")],
- CancellationToken()
- )
+ response = await chunk_agent.on_messages([TextMessage(content=prompt, source="user")], CancellationToken())
response_text = response.chat_message.content
if isinstance(response_text, list):
@@ -283,11 +267,14 @@ async def _write_chunk(
try:
result_data = json.loads(cleaned)
except json.JSONDecodeError as e:
- self.logger.log_error(e, {
- "response_length": len(response_text),
- "response_preview": cleaned[:300],
- "error_position": e.pos if hasattr(e, 'pos') else None
- })
+ self.logger.log_error(
+ e,
+ {
+ "response_length": len(response_text),
+ "response_preview": cleaned[:300],
+ "error_position": e.pos if hasattr(e, "pos") else None,
+ },
+ )
# Return fallback content on parse failure
return [
FeatureContent(
@@ -307,7 +294,7 @@ async def _write_chunk(
"result_keys": list(result_data.keys()) if isinstance(result_data, dict) else None,
"response_preview": response_text[:500],
"response_length": len(response_text),
- }
+ },
)
# Convert to FeatureContent objects
@@ -322,21 +309,18 @@ async def _write_chunk(
else:
self.logger.log_error(
ValueError(f"Invalid feature entry: expected dict with feature_id, got {type(f).__name__}"),
- {"raw_value": str(f)[:200] if f else None}
+ {"raw_value": str(f)[:200] if f else None},
)
# Log if we had to filter out invalid entries
if len(valid_features) < len(raw_features):
self.logger.log_error(
ValueError(f"Filtered out {len(raw_features) - len(valid_features)} invalid feature entries"),
- {"total_raw": len(raw_features), "valid_count": len(valid_features)}
+ {"total_raw": len(raw_features), "valid_count": len(valid_features)},
)
# Create a map for easy lookup
- content_map = {
- f.get("feature_id", ""): f
- for f in valid_features
- }
+ content_map = {f.get("feature_id", ""): f for f in valid_features}
for feature in features:
if feature.feature_id in content_map:
@@ -347,22 +331,30 @@ async def _write_chunk(
raw_image_ids = []
relevant_image_ids = [str(img_id) for img_id in raw_image_ids if img_id]
- contents.append(FeatureContent(
- feature_id=feature.feature_id,
- description=data.get("description", f"This feature enables {feature.title.lower()} functionality for users."),
- spec_text=data.get("spec_text", f"## {feature.title}\n\nImplement as specified."),
- prompt_plan_text=data.get("prompt_plan_text", f"## Implementation\n\n1. Implement {feature.title}"),
- relevant_image_ids=relevant_image_ids,
- ))
+ contents.append(
+ FeatureContent(
+ feature_id=feature.feature_id,
+ description=data.get(
+ "description", f"This feature enables {feature.title.lower()} functionality for users."
+ ),
+ spec_text=data.get("spec_text", f"## {feature.title}\n\nImplement as specified."),
+ prompt_plan_text=data.get(
+ "prompt_plan_text", f"## Implementation\n\n1. Implement {feature.title}"
+ ),
+ relevant_image_ids=relevant_image_ids,
+ )
+ )
else:
# Fallback for missing features
- contents.append(FeatureContent(
- feature_id=feature.feature_id,
- description=f"This feature enables {feature.title.lower()} functionality for users.",
- spec_text=f"## {feature.title}\n\nImplement this feature according to the specification.",
- prompt_plan_text=f"## Implementation\n\n1. Implement {feature.title}\n2. Write unit tests\n3. Verify functionality",
- relevant_image_ids=[],
- ))
+ contents.append(
+ FeatureContent(
+ feature_id=feature.feature_id,
+ description=f"This feature enables {feature.title.lower()} functionality for users.",
+ spec_text=f"## {feature.title}\n\nImplement this feature according to the specification.",
+ prompt_plan_text=f"## Implementation\n\n1. Implement {feature.title}\n2. Write unit tests\n3. Verify functionality",
+ relevant_image_ids=[],
+ )
+ )
return contents
@@ -400,7 +392,7 @@ def _build_prompt(
prompt += "\n"
# Add pending clarification topics if available
- if hasattr(context, 'topics_pending_clarification') and context.topics_pending_clarification:
+ if hasattr(context, "topics_pending_clarification") and context.topics_pending_clarification:
prompt += "## PENDING CLARIFICATION TOPICS:\n\n"
prompt += "The following topics were not fully decided in brainstorming. Do NOT invent solutions:\n"
for topic in context.topics_pending_clarification:
@@ -470,10 +462,7 @@ def _build_prompt(
return prompt
-async def create_writer(
- model_client: ChatCompletionClient,
- project_id: Optional[str] = None
-) -> WriterAgent:
+async def create_writer(model_client: ChatCompletionClient, project_id: Optional[str] = None) -> WriterAgent:
"""
Factory function to create a Writer Agent.
diff --git a/backend/app/agents/project_chat_assistant/__init__.py b/backend/app/agents/project_chat_assistant/__init__.py
index 5857d20..4c109b0 100644
--- a/backend/app/agents/project_chat_assistant/__init__.py
+++ b/backend/app/agents/project_chat_assistant/__init__.py
@@ -7,17 +7,17 @@
description when ready.
"""
-from .types import (
- ProjectChatContext,
- ProjectChatAssistantResponse,
- MCQOption,
- ExistingPhaseInfo,
-)
-from .assistant import ProjectChatAssistant, SYSTEM_PROMPT
+from .assistant import SYSTEM_PROMPT, ProjectChatAssistant
from .orchestrator import (
- load_context,
generate_response,
handle_user_message,
+ load_context,
+)
+from .types import (
+ ExistingPhaseInfo,
+ MCQOption,
+ ProjectChatAssistantResponse,
+ ProjectChatContext,
)
__all__ = [
diff --git a/backend/app/agents/project_chat_assistant/assistant.py b/backend/app/agents/project_chat_assistant/assistant.py
index 2e2a0a3..8e9bdf3 100644
--- a/backend/app/agents/project_chat_assistant/assistant.py
+++ b/backend/app/agents/project_chat_assistant/assistant.py
@@ -8,28 +8,24 @@
import json
import logging
import re
-from typing import Awaitable, Callable, List, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional
from autogen_agentchat.agents import AssistantAgent
-from autogen_agentchat.messages import StructuredMessage, TextMessage
+from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken
-from pydantic import ValidationError
-from autogen_core.models import ChatCompletionClient, UserMessage, AssistantMessage, LLMMessage
from autogen_core.model_context import TokenLimitedChatCompletionContext
+from autogen_core.models import AssistantMessage, ChatCompletionClient, LLMMessage, UserMessage
-from .types import (
- ProjectChatContext,
- ProjectChatAssistantResponse,
- ContainerContext,
- MCQOption,
- ExistingPhaseInfo,
- ExistingModuleInfo,
-)
from app.agents.collab_thread_assistant.web_search_parser import has_web_search_block
from app.agents.response_parser import (
- strip_markdown_json,
- normalize_response_content,
extract_json_from_text,
+ strip_markdown_json,
+)
+
+from .types import (
+ ContainerContext,
+ ProjectChatAssistantResponse,
+ ProjectChatContext,
)
if TYPE_CHECKING:
@@ -350,7 +346,9 @@ def _build_grounding_section(grounding_context: str) -> str:
sections.append("")
sections.append(grounding_context)
sections.append("")
- sections.append("Use this context to understand the project's technical foundation when helping users define what to build.")
+ sections.append(
+ "Use this context to understand the project's technical foundation when helping users define what to build."
+ )
return "\n".join(sections)
@@ -360,7 +358,7 @@ def _build_extension_context_section(container_ctx: ContainerContext) -> str:
lines = [
"### EXTENSION MODE",
"",
- f"You are helping create an **extension** for the container **\"{container_ctx.container_title}\"**.",
+ f'You are helping create an **extension** for the container **"{container_ctx.container_title}"**.',
"",
"An extension explores a new aspect that builds upon or complements the initial spec.",
"Focus on what NEW aspect extends the initial spec - avoid duplicating what's already covered.",
@@ -447,9 +445,7 @@ def _build_context_section(context: ProjectChatContext) -> str:
)
sections.append(exploration_info)
else:
- sections.append(
- "This is a GREENFIELD project - starting fresh with no existing codebase."
- )
+ sections.append("This is a GREENFIELD project - starting fresh with no existing codebase.")
# Existing phases
if context.existing_phases:
@@ -472,7 +468,7 @@ def _build_context_section(context: ProjectChatContext) -> str:
for decision in phase.decisions[:5]: # Limit to 5 decisions per phase
decision_lines.append(
f"- [{decision.aspect_title}] {decision.question_title}: "
- f"\"{decision.decision_summary_short}\""
+ f'"{decision.decision_summary_short}"'
)
if len(decision_lines) > 1: # Has actual decisions
sections.append("\n".join(decision_lines))
@@ -480,10 +476,7 @@ def _build_context_section(context: ProjectChatContext) -> str:
if cross_ctx.project_features:
feature_lines = ["Project-level feature decisions:"]
for feat in cross_ctx.project_features[:5]: # Limit to 5
- feature_lines.append(
- f"- [{feat.module_title}] {feat.feature_title}: "
- f"\"{feat.decision_summary_short}\""
- )
+ feature_lines.append(f'- [{feat.module_title}] {feat.feature_title}: "{feat.decision_summary_short}"')
sections.append("\n".join(feature_lines))
# Extension mode context
@@ -728,9 +721,7 @@ def _parse_response(self, response_text: str) -> tuple[ProjectChatAssistantRespo
try:
inner_data = json.loads(inner_json_str)
if isinstance(inner_data, dict) and "reply_text" in inner_data:
- logger.warning(
- "Detected JSON in reply_text field - extracting inner reply_text"
- )
+ logger.warning("Detected JSON in reply_text field - extracting inner reply_text")
# Use the inner data instead
data = inner_data
except (json.JSONDecodeError, TypeError):
@@ -739,7 +730,11 @@ def _parse_response(self, response_text: str) -> tuple[ProjectChatAssistantRespo
# Fallback: Detect exploration intent when JSON parsing failed or was incomplete
# This catches cases where the agent says "Let me explore..." without proper JSON
# Skip if web search is requested (via JSON field or block) - "let me search" should NOT trigger code exploration
- if not data.get("wants_code_exploration") and not data.get("wants_web_search") and not has_web_search_block(response_text):
+ if (
+ not data.get("wants_code_exploration")
+ and not data.get("wants_web_search")
+ and not has_web_search_block(response_text)
+ ):
reply_text = data.get("reply_text", response_text).lower()
original_reply = data.get("reply_text", response_text)
@@ -870,9 +865,7 @@ async def generate_response(
# While adding: smart token-aware trimming from the middle
initial_messages = None
if context.recent_messages:
- initial_messages = self._convert_messages_to_llm_format(
- context.recent_messages
- )
+ initial_messages = self._convert_messages_to_llm_format(context.recent_messages)
# Only send the current user message - history is in the model_context
user_message = context.user_message
@@ -894,10 +887,7 @@ async def generate_response(
# Set agent name for call logging
if self.llm_call_logger:
retry_suffix = f" (retry {attempt})" if attempt > 0 else ""
- self.llm_call_logger.set_agent(
- "project_chat",
- f"Pre-Phase Discussion Assistant{retry_suffix}"
- )
+ self.llm_call_logger.set_agent("project_chat", f"Pre-Phase Discussion Assistant{retry_suffix}")
try:
# Always append schema as suffix - it's the last thing the LLM sees
diff --git a/backend/app/agents/project_chat_assistant/orchestrator.py b/backend/app/agents/project_chat_assistant/orchestrator.py
index fcd625b..799f241 100644
--- a/backend/app/agents/project_chat_assistant/orchestrator.py
+++ b/backend/app/agents/project_chat_assistant/orchestrator.py
@@ -6,33 +6,37 @@
"""
import logging
-from typing import Any, Awaitable, Callable, Dict, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional
from uuid import UUID
+from autogen_core.models import ChatCompletionClient
from sqlalchemy import func
from sqlalchemy.orm import Session
-from autogen_core.models import ChatCompletionClient
-
-from app.models.project_chat import ProjectChat
-from app.models.project import Project
-from app.models.organization import Organization
-from app.models.brainstorming_phase import BrainstormingPhase
-from app.models.grounding_file import GroundingFile
-from app.models.module import Module, ModuleType
-from app.models.feature import Feature, FeatureVisibilityStatus, FeatureType
-from app.models.thread import Thread, ContextType
-from app.models.platform_settings import PlatformSettings
-
from app.agents.brainstorm_conversation.types import (
- CrossProjectContext,
CrossPhaseContext,
CrossPhaseDecision,
+ CrossProjectContext,
ProjectFeatureDecision,
)
+from app.models.brainstorming_phase import BrainstormingPhase
+from app.models.feature import Feature, FeatureType, FeatureVisibilityStatus
+from app.models.grounding_file import GroundingFile
+from app.models.module import Module, ModuleType
+from app.models.organization import Organization
+from app.models.platform_settings import PlatformSettings
+from app.models.project import Project
+from app.models.project_chat import ProjectChat
+from app.models.thread import ContextType, Thread
-from .types import ProjectChatContext, ProjectChatAssistantResponse, ExistingPhaseInfo, ExistingModuleInfo, ContainerContext
from .assistant import ProjectChatAssistant
+from .types import (
+ ContainerContext,
+ ExistingModuleInfo,
+ ExistingPhaseInfo,
+ ProjectChatAssistantResponse,
+ ProjectChatContext,
+)
if TYPE_CHECKING:
from app.agents.llm_client import LLMCallLogger
@@ -67,47 +71,55 @@ def _build_cross_project_context_for_project_chat(
project_features_context = []
# 1. Query ALL brainstorming phases (not archived)
- all_phases = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.project_id == project_id,
- BrainstormingPhase.archived_at.is_(None)
- ).order_by(BrainstormingPhase.created_at).limit(max_phases).all()
+ all_phases = (
+ db.query(BrainstormingPhase)
+ .filter(BrainstormingPhase.project_id == project_id, BrainstormingPhase.archived_at.is_(None))
+ .order_by(BrainstormingPhase.created_at)
+ .limit(max_phases)
+ .all()
+ )
for phase in all_phases:
decisions = []
# Get modules for this phase
- modules = db.query(Module).filter(
- Module.brainstorming_phase_id == phase.id,
- Module.archived_at.is_(None)
- ).all()
+ modules = db.query(Module).filter(Module.brainstorming_phase_id == phase.id, Module.archived_at.is_(None)).all()
for module in modules:
# Get ACTIVE features (questions) with threads that have decisions
- features = db.query(Feature).filter(
- Feature.module_id == module.id,
- Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
- Feature.archived_at.is_(None)
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == module.id,
+ Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
+ Feature.archived_at.is_(None),
+ )
+ .all()
+ )
for feature in features:
# Get thread for this feature
- thread = db.query(Thread).filter(
- Thread.context_type == ContextType.BRAINSTORM_FEATURE,
- Thread.context_id == str(feature.id)
- ).first()
+ thread = (
+ db.query(Thread)
+ .filter(Thread.context_type == ContextType.BRAINSTORM_FEATURE, Thread.context_id == str(feature.id))
+ .first()
+ )
# Only include if thread has decision_summary_short or decision_summary
if thread and (thread.decision_summary_short or thread.decision_summary):
summary = thread.decision_summary_short or (
- thread.decision_summary[:100] + "..." if len(thread.decision_summary or "") > 100
+ thread.decision_summary[:100] + "..."
+ if len(thread.decision_summary or "") > 100
else thread.decision_summary
)
if summary:
- decisions.append(CrossPhaseDecision(
- question_title=feature.title,
- decision_summary_short=summary,
- aspect_title=module.title,
- ))
+ decisions.append(
+ CrossPhaseDecision(
+ question_title=feature.title,
+ decision_summary_short=summary,
+ aspect_title=module.title,
+ )
+ )
# Cap decisions per phase
if len(decisions) >= max_decisions_per_phase:
@@ -123,47 +135,54 @@ def _build_cross_project_context_for_project_chat(
if len(description) > 200:
description = description[:200] + "..."
- phases_context.append(CrossPhaseContext(
- phase_id=str(phase.id),
- phase_title=phase.title,
- phase_description=description,
- decisions=decisions,
- ))
+ phases_context.append(
+ CrossPhaseContext(
+ phase_id=str(phase.id),
+ phase_title=phase.title,
+ phase_description=description,
+ decisions=decisions,
+ )
+ )
# 2. Query project-level features (module.brainstorming_phase_id IS NULL)
- project_modules = db.query(Module).filter(
- Module.project_id == project_id,
- Module.brainstorming_phase_id.is_(None),
- Module.archived_at.is_(None)
- ).all()
+ project_modules = (
+ db.query(Module)
+ .filter(Module.project_id == project_id, Module.brainstorming_phase_id.is_(None), Module.archived_at.is_(None))
+ .all()
+ )
for module in project_modules:
# Get IMPLEMENTATION features (not CONVERSATION)
- features = db.query(Feature).filter(
- Feature.module_id == module.id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
- Feature.archived_at.is_(None)
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == module.id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.visibility_status == FeatureVisibilityStatus.ACTIVE,
+ Feature.archived_at.is_(None),
+ )
+ .all()
+ )
for feature in features:
# Get thread for this feature (could be SPEC or GENERAL context type)
- thread = db.query(Thread).filter(
- Thread.context_id == str(feature.id)
- ).first()
+ thread = db.query(Thread).filter(Thread.context_id == str(feature.id)).first()
# Only include if thread has decision summary
if thread and (thread.decision_summary_short or thread.decision_summary):
summary = thread.decision_summary_short or (
- thread.decision_summary[:100] + "..." if len(thread.decision_summary or "") > 100
+ thread.decision_summary[:100] + "..."
+ if len(thread.decision_summary or "") > 100
else thread.decision_summary
)
if summary:
- project_features_context.append(ProjectFeatureDecision(
- feature_title=feature.title,
- module_title=module.title,
- decision_summary_short=summary,
- ))
+ project_features_context.append(
+ ProjectFeatureDecision(
+ feature_title=feature.title,
+ module_title=module.title,
+ decision_summary_short=summary,
+ )
+ )
# Cap project features
if len(project_features_context) >= max_project_features:
@@ -205,9 +224,7 @@ def load_context(
ValueError: If discussion not found or not associated with a project.
"""
# Load discussion
- discussion = db.query(ProjectChat).filter(
- ProjectChat.id == project_chat_id
- ).first()
+ discussion = db.query(ProjectChat).filter(ProjectChat.id == project_chat_id).first()
if not discussion:
raise ValueError(f"Discussion {project_chat_id} not found")
@@ -220,9 +237,7 @@ def load_context(
)
# Load organization (always required)
- organization = db.query(Organization).filter(
- Organization.id == discussion.org_id
- ).first()
+ organization = db.query(Organization).filter(Organization.id == discussion.org_id).first()
if not organization:
raise ValueError(f"Organization {discussion.org_id} not found")
@@ -243,9 +258,7 @@ def load_context(
recent_messages.append(msg_data)
# Load project context
- project = db.query(Project).filter(
- Project.id == discussion.project_id
- ).first()
+ project = db.query(Project).filter(Project.id == discussion.project_id).first()
if not project:
raise ValueError(f"Project {discussion.project_id} not found")
@@ -255,10 +268,11 @@ def load_context(
grounding_summary = None
grounding_context = None
- grounding_file = db.query(GroundingFile).filter(
- GroundingFile.project_id == project.id,
- GroundingFile.filename == "agents.md"
- ).first()
+ grounding_file = (
+ db.query(GroundingFile)
+ .filter(GroundingFile.project_id == project.id, GroundingFile.filename == "agents.md")
+ .first()
+ )
if grounding_file:
# Prefer summary when available (saves tokens), fall back to full content
@@ -274,8 +288,7 @@ def load_context(
has_grounding = True
grounding_context = grounding_file.content
logger.info(
- f"Using full agents.md for project chat {project_chat_id} "
- f"({len(grounding_file.content)} chars)"
+ f"Using full agents.md for project chat {project_chat_id} ({len(grounding_file.content)} chars)"
)
# Check for repositories (brownfield indicator)
@@ -294,14 +307,16 @@ def load_context(
if sum(languages.values()) > 0:
has_repositories = True
- repositories.append({
- "slug": repo.slug,
- "display_name": repo.display_name,
- "repo_url": repo.repo_url,
- "default_branch": repo.default_branch,
- "user_remarks": repo.user_remarks,
- "primary_language": primary_language,
- })
+ repositories.append(
+ {
+ "slug": repo.slug,
+ "display_name": repo.display_name,
+ "repo_url": repo.repo_url,
+ "default_branch": repo.default_branch,
+ "user_remarks": repo.user_remarks,
+ "primary_language": primary_language,
+ }
+ )
# Check if code explorer is enabled
code_explorer_enabled = False
@@ -311,6 +326,7 @@ def load_context(
# Check if web search is enabled
from app.services.platform_settings_service import is_web_search_available_sync
+
web_search_enabled = is_web_search_available_sync(db)
# Note: We no longer load last_exploration_output/prompt here because
@@ -318,10 +334,13 @@ def load_context(
# messages, providing proper chronological context to the agent.
# Load existing phases
- existing_phases_db = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.project_id == project.id,
- BrainstormingPhase.archived_at.is_(None)
- ).order_by(BrainstormingPhase.created_at.desc()).limit(10).all()
+ existing_phases_db = (
+ db.query(BrainstormingPhase)
+ .filter(BrainstormingPhase.project_id == project.id, BrainstormingPhase.archived_at.is_(None))
+ .order_by(BrainstormingPhase.created_at.desc())
+ .limit(10)
+ .all()
+ )
existing_phases = [
ExistingPhaseInfo(
@@ -333,27 +352,40 @@ def load_context(
]
# Load project-level modules (for feature placement proposals)
- existing_modules_db = db.query(Module).filter(
- Module.project_id == project.id,
- Module.brainstorming_phase_id.is_(None), # Project-level modules only
- Module.archived_at.is_(None),
- Module.module_type == ModuleType.IMPLEMENTATION,
- ).order_by(Module.created_at.desc()).limit(20).all()
+ existing_modules_db = (
+ db.query(Module)
+ .filter(
+ Module.project_id == project.id,
+ Module.brainstorming_phase_id.is_(None), # Project-level modules only
+ Module.archived_at.is_(None),
+ Module.module_type == ModuleType.IMPLEMENTATION,
+ )
+ .order_by(Module.created_at.desc())
+ .limit(20)
+ .all()
+ )
existing_modules = []
for module in existing_modules_db:
# Count features in this module
- feature_count = db.query(func.count(Feature.id)).filter(
- Feature.module_id == module.id,
- Feature.archived_at.is_(None),
- ).scalar() or 0
-
- existing_modules.append(ExistingModuleInfo(
- module_id=str(module.id),
- title=module.title,
- description=module.description[:200] if module.description else None,
- feature_count=feature_count,
- ))
+ feature_count = (
+ db.query(func.count(Feature.id))
+ .filter(
+ Feature.module_id == module.id,
+ Feature.archived_at.is_(None),
+ )
+ .scalar()
+ or 0
+ )
+
+ existing_modules.append(
+ ExistingModuleInfo(
+ module_id=str(module.id),
+ title=module.title,
+ description=module.description[:200] if module.description else None,
+ feature_count=feature_count,
+ )
+ )
# Build cross-project context (decisions from existing phases)
cross_project_context = _build_cross_project_context_for_project_chat(db, project.id)
@@ -363,9 +395,7 @@ def load_context(
if discussion.target_container_id:
from app.services.phase_container_service import PhaseContainerService
- preview = PhaseContainerService.get_extension_preview(
- db, discussion.target_container_id
- )
+ preview = PhaseContainerService.get_extension_preview(db, discussion.target_container_id)
if preview:
target_container = ContainerContext(
container_id=preview["container_id"],
@@ -436,7 +466,9 @@ async def generate_response(
# Load context
logger.info(f"Loading context for discussion {project_chat_id}")
context = load_context(
- db, project_chat_id, user_message,
+ db,
+ project_chat_id,
+ user_message,
is_exploration_followup=is_exploration_followup,
is_web_search_followup=is_web_search_followup,
)
@@ -497,7 +529,7 @@ async def handle_user_message(
# Get usage stats from model client
usage_stats = {}
- if hasattr(model_client, 'get_usage_stats'):
+ if hasattr(model_client, "get_usage_stats"):
usage_stats = model_client.get_usage_stats()
return {
diff --git a/backend/app/agents/project_chat_assistant/types.py b/backend/app/agents/project_chat_assistant/types.py
index ccf0280..ad8c1b1 100644
--- a/backend/app/agents/project_chat_assistant/types.py
+++ b/backend/app/agents/project_chat_assistant/types.py
@@ -6,10 +6,11 @@
"""
from dataclasses import dataclass, field
-from typing import Any, Dict, List, Optional, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
from uuid import UUID
-from pydantic import BaseModel, Field as PydanticField
+from pydantic import BaseModel
+from pydantic import Field as PydanticField
if TYPE_CHECKING:
from app.agents.brainstorm_conversation.types import CrossProjectContext
@@ -18,6 +19,7 @@
@dataclass
class ExistingPhaseInfo:
"""Summary info about an existing brainstorming phase."""
+
phase_id: str
title: str
description: Optional[str] = None
@@ -26,6 +28,7 @@ class ExistingPhaseInfo:
@dataclass
class ExistingModuleInfo:
"""Summary info about an existing project-level module."""
+
module_id: str
title: str
description: Optional[str] = None
@@ -34,6 +37,7 @@ class ExistingModuleInfo:
class MCQOption(BaseModel):
"""A single option for an MCQ question."""
+
id: str
text: str
@@ -41,6 +45,7 @@ class MCQOption(BaseModel):
@dataclass
class ContainerContext:
"""Context about a target container for extension creation."""
+
container_id: str
container_title: str
initial_spec_summary: Optional[str] = None
@@ -60,6 +65,7 @@ class ProjectChatContext:
Note: Project creation is handled by the ProjectWizard, not by project-chat discussions.
Pre-phase discussions always operate within an existing project.
"""
+
# Identifiers
project_chat_id: UUID
org_id: UUID
@@ -78,7 +84,9 @@ class ProjectChatContext:
grounding_context: Optional[str] = None # Full agents.md content or summary (for LLM context)
# Repository info (brownfield indicator) - supports multiple repos
- repositories: List[Dict[str, Any]] = field(default_factory=list) # List of repos with slug, display_name, repo_url, default_branch, user_remarks, primary_language
+ repositories: List[Dict[str, Any]] = field(
+ default_factory=list
+ ) # List of repos with slug, display_name, repo_url, default_branch, user_remarks, primary_language
has_repositories: bool = False # True if project has any repos with code
# Code exploration capability
@@ -126,6 +134,7 @@ class ProjectChatAssistantResponse(BaseModel):
This is a Pydantic model to enable AutoGen's output_content_type feature
for structured JSON output from the LLM.
"""
+
# The reply text to show the user
reply_text: str
@@ -175,10 +184,7 @@ def to_response_data(self) -> Dict[str, Any]:
}
if self.mcq_options:
- data["mcq_options"] = [
- {"id": opt.id, "text": opt.text}
- for opt in self.mcq_options
- ]
+ data["mcq_options"] = [{"id": opt.id, "text": opt.text} for opt in self.mcq_options]
# Phase fields
if self.proposed_title:
diff --git a/backend/app/agents/project_chat_gating/agent.py b/backend/app/agents/project_chat_gating/agent.py
index 02cfe66..e378fc8 100644
--- a/backend/app/agents/project_chat_gating/agent.py
+++ b/backend/app/agents/project_chat_gating/agent.py
@@ -15,7 +15,7 @@
from .types import GatingResponse
if TYPE_CHECKING:
- from app.agents.llm_client import LLMCallLogger, LiteLLMChatCompletionClient
+ from app.agents.llm_client import LiteLLMChatCompletionClient, LLMCallLogger
logger = logging.getLogger(__name__)
diff --git a/backend/app/agents/response_parser.py b/backend/app/agents/response_parser.py
index cbec62f..7539d61 100644
--- a/backend/app/agents/response_parser.py
+++ b/backend/app/agents/response_parser.py
@@ -12,7 +12,6 @@
import json
import logging
-import re
from typing import Any, Dict, List, Optional, Union
logger = logging.getLogger(__name__)
@@ -55,7 +54,7 @@ def strip_markdown_json(text: str) -> str:
if not text:
return text
- lines = text.split('\n')
+ lines = text.split("\n")
# Need at least 2 lines for opening and closing fences
if len(lines) < 2:
@@ -66,18 +65,18 @@ def strip_markdown_json(text: str) -> str:
# Check for opening fence (```, ```json, ```JSON, etc.)
# and closing fence (must be exactly ```)
- if first_line.startswith('```') and last_line == '```':
+ if first_line.startswith("```") and last_line == "```":
# Remove first and last lines only, preserving everything in between
- inner_content = '\n'.join(lines[1:-1])
+ inner_content = "\n".join(lines[1:-1])
return inner_content.strip()
# Handle case where there's trailing content after closing fence
# e.g., "```json\n{...}\n```\n\nSome explanation"
# Find the last line that is exactly "```"
for i in range(len(lines) - 1, 0, -1):
- if lines[i].strip() == '```':
- if first_line.startswith('```'):
- inner_content = '\n'.join(lines[1:i])
+ if lines[i].strip() == "```":
+ if first_line.startswith("```"):
+ inner_content = "\n".join(lines[1:i])
return inner_content.strip()
break
@@ -101,7 +100,7 @@ def strip_markdown_content(text: str) -> str:
if not text:
return text
- lines = text.split('\n')
+ lines = text.split("\n")
if len(lines) < 2:
return text
@@ -110,18 +109,18 @@ def strip_markdown_content(text: str) -> str:
last_line = lines[-1].strip()
# Check for opening fence and closing fence
- opens_with_fence = first_line.startswith('```')
- closes_with_fence = last_line == '```'
+ opens_with_fence = first_line.startswith("```")
+ closes_with_fence = last_line == "```"
if opens_with_fence and closes_with_fence:
- inner_content = '\n'.join(lines[1:-1])
+ inner_content = "\n".join(lines[1:-1])
return inner_content.strip()
# Handle trailing content after fence
for i in range(len(lines) - 1, 0, -1):
- if lines[i].strip() == '```':
+ if lines[i].strip() == "```":
if opens_with_fence:
- inner_content = '\n'.join(lines[1:i])
+ inner_content = "\n".join(lines[1:i])
return inner_content.strip()
break
@@ -169,8 +168,8 @@ def extract_json_from_text(text: str) -> Optional[str]:
text = text.strip()
# Find the start of JSON (either { or [)
- obj_start = text.find('{')
- arr_start = text.find('[')
+ obj_start = text.find("{")
+ arr_start = text.find("[")
if obj_start == -1 and arr_start == -1:
return None
@@ -178,17 +177,17 @@ def extract_json_from_text(text: str) -> Optional[str]:
# Determine which comes first
if obj_start == -1:
start = arr_start
- open_char, close_char = '[', ']'
+ open_char, close_char = "[", "]"
elif arr_start == -1:
start = obj_start
- open_char, close_char = '{', '}'
+ open_char, close_char = "{", "}"
else:
if obj_start < arr_start:
start = obj_start
- open_char, close_char = '{', '}'
+ open_char, close_char = "{", "}"
else:
start = arr_start
- open_char, close_char = '[', ']'
+ open_char, close_char = "[", "]"
# Track brackets to find matching close
depth = 0
@@ -200,7 +199,7 @@ def extract_json_from_text(text: str) -> Optional[str]:
escape_next = False
continue
- if char == '\\':
+ if char == "\\":
escape_next = True
continue
@@ -214,15 +213,12 @@ def extract_json_from_text(text: str) -> Optional[str]:
elif char == close_char:
depth -= 1
if depth == 0:
- return text[start:i + 1]
+ return text[start : i + 1]
return None
-def parse_json_response(
- text: str,
- fallback_to_raw: bool = False
-) -> Union[Dict[str, Any], List[Any], str]:
+def parse_json_response(text: str, fallback_to_raw: bool = False) -> Union[Dict[str, Any], List[Any], str]:
"""
Parse a JSON response from an LLM with multiple fallback strategies.
@@ -268,23 +264,15 @@ def parse_json_response(
# Strategy 4: Fallback to raw text
if fallback_to_raw:
- logger.warning(
- f"Failed to parse JSON from response, returning raw text. "
- f"Preview: {cleaned[:100]}..."
- )
+ logger.warning(f"Failed to parse JSON from response, returning raw text. Preview: {cleaned[:100]}...")
return cleaned
# All strategies failed
- raise json.JSONDecodeError(
- f"Failed to parse JSON from response",
- cleaned,
- 0
- )
+ raise json.JSONDecodeError("Failed to parse JSON from response", cleaned, 0)
def safe_parse_json(
- text: str,
- default: Optional[Union[Dict[str, Any], List[Any]]] = None
+ text: str, default: Optional[Union[Dict[str, Any], List[Any]]] = None
) -> Union[Dict[str, Any], List[Any]]:
"""
Safely parse JSON, returning a default value on failure.
diff --git a/backend/app/agents/retry.py b/backend/app/agents/retry.py
index a850762..ec2e831 100644
--- a/backend/app/agents/retry.py
+++ b/backend/app/agents/retry.py
@@ -18,7 +18,7 @@ async def create(self, messages):
import asyncio
import logging
-from typing import Any, Awaitable, Callable, List, Optional, Tuple, Type, TypeVar
+from typing import Awaitable, Callable, List, Optional, Tuple, Type, TypeVar
import litellm.exceptions as litellm_exc
from tenacity import (
@@ -242,9 +242,7 @@ def before_sleep_callback(retry_state: RetryCallState) -> None:
with attempt:
result = await func()
if attempt.retry_state.attempt_number > 1:
- logger.info(
- f"Retry succeeded on attempt {attempt.retry_state.attempt_number}"
- )
+ logger.info(f"Retry succeeded on attempt {attempt.retry_state.attempt_number}")
return result
except Exception as e:
# Check if it's a retryable exception that exhausted all attempts
@@ -281,9 +279,7 @@ async def _with_retry_legacy(
return result
except Exception as e:
last_exception = e
- logger.warning(
- f"Attempt {attempt + 1}/{max_attempts} failed: {type(e).__name__}: {e}"
- )
+ logger.warning(f"Attempt {attempt + 1}/{max_attempts} failed: {type(e).__name__}: {e}")
# Call the on_retry callback if provided
if on_retry is not None:
@@ -306,9 +302,7 @@ async def _with_retry_legacy(
)
-def _calculate_legacy_backoff(
- attempt: int, backoff_ms: Optional[List[int]] = None
-) -> float:
+def _calculate_legacy_backoff(attempt: int, backoff_ms: Optional[List[int]] = None) -> float:
"""Calculate backoff delay for legacy mode."""
if backoff_ms is None:
backoff_ms = LEGACY_BACKOFF_MS
@@ -318,9 +312,7 @@ def _calculate_legacy_backoff(
# Backward-compatible alias
-def calculate_backoff_delay(
- attempt: int, backoff_ms: Optional[List[int]] = None
-) -> float:
+def calculate_backoff_delay(attempt: int, backoff_ms: Optional[List[int]] = None) -> float:
"""
Calculate the backoff delay for a given attempt.
diff --git a/backend/app/auth/__init__.py b/backend/app/auth/__init__.py
index 32ae8fa..da826d1 100644
--- a/backend/app/auth/__init__.py
+++ b/backend/app/auth/__init__.py
@@ -1,6 +1,7 @@
"""
Authentication utilities and dependencies.
"""
+
from app.auth.providers import (
KNOWN_PROVIDERS,
get_configured_providers,
diff --git a/backend/app/auth/api_key_utils.py b/backend/app/auth/api_key_utils.py
index 015c930..ff1b6e7 100644
--- a/backend/app/auth/api_key_utils.py
+++ b/backend/app/auth/api_key_utils.py
@@ -2,6 +2,7 @@
import hashlib
import uuid
+
import bcrypt
diff --git a/backend/app/auth/dependencies.py b/backend/app/auth/dependencies.py
index 1081e25..54f0e5e 100644
--- a/backend/app/auth/dependencies.py
+++ b/backend/app/auth/dependencies.py
@@ -4,28 +4,27 @@
Provides dependency functions for extracting and validating the current user
from JWT tokens, session cookies, or API keys in requests.
"""
+
from datetime import datetime, timezone
from typing import Annotated
-from uuid import UUID
-from fastapi import Cookie, Depends, HTTPException, Request, status, Header
-from fastapi.security import OAuth2PasswordBearer, HTTPBearer
+from fastapi import Cookie, Depends, Header, HTTPException, status
+from fastapi.security import HTTPBearer, OAuth2PasswordBearer
from jose import JWTError
from sqlalchemy.orm import Session
-from app.auth.utils import decode_access_token
-from app.auth.api_key_utils import verify_api_key, hash_api_key_sha256
+from app.auth.api_key_utils import hash_api_key_sha256, verify_api_key
from app.auth.encryption_utils import verify_api_key_encrypted
+from app.auth.utils import decode_access_token
from app.config import settings
from app.database import get_db
-from app.models.user import User
from app.models.api_key import ApiKey
from app.models.project import Project
-from app.services.user_service import UserService
+from app.models.user import User
+from app.services.mcp_oauth_service import MCPOAuthService
from app.services.project_service import ProjectService
from app.services.project_share_service import ProjectShareService
-from app.services.mcp_oauth_service import MCPOAuthService
-
+from app.services.user_service import UserService
# OAuth2 scheme for token extraction (makes it optional so we can fall back to cookie)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/login", auto_error=False)
@@ -207,11 +206,7 @@ async def __call__(
# Fast O(1) lookup via SHA-256 index
lookup_hash = hash_api_key_sha256(api_key)
- matched_key = (
- db.query(ApiKey)
- .filter(ApiKey.key_lookup_hash == lookup_hash, ApiKey.revoked == False)
- .first()
- )
+ matched_key = db.query(ApiKey).filter(ApiKey.key_lookup_hash == lookup_hash, ApiKey.revoked == False).first()
if matched_key:
# Verify the key actually matches (SHA-256 collision safety)
@@ -223,11 +218,7 @@ async def __call__(
# Fallback: scan legacy keys that don't have a lookup hash yet
if not matched_key:
- legacy_keys = (
- db.query(ApiKey)
- .filter(ApiKey.key_lookup_hash.is_(None), ApiKey.revoked == False)
- .all()
- )
+ legacy_keys = db.query(ApiKey).filter(ApiKey.key_lookup_hash.is_(None), ApiKey.revoked == False).all()
for key in legacy_keys:
if key.key_encrypted:
if verify_api_key_encrypted(api_key, key.key_encrypted):
@@ -262,9 +253,7 @@ async def __call__(
)
# Verify user has access to this project (direct, via group, or via org)
- has_access = ProjectShareService.user_has_project_access(
- db, project.id, user.id
- )
+ has_access = ProjectShareService.user_has_project_access(db, project.id, user.id)
if not has_access:
# Return 404 for privacy (don't reveal project exists)
raise HTTPException(
@@ -329,9 +318,7 @@ async def __call__(
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid or missing credentials",
- headers={
- "WWW-Authenticate": f'Bearer resource="{resource_url}"'
- },
+ headers={"WWW-Authenticate": f'Bearer resource="{resource_url}"'},
)
if not authorization:
@@ -351,6 +338,7 @@ async def __call__(
# Try to decode as JWT
import logging
+
logger = logging.getLogger(__name__)
try:
@@ -392,9 +380,7 @@ async def __call__(
project = ProjectService.get_by_identifier(db, self.project_id)
if project:
# Verify user has access to this project (direct or via group)
- has_access = ProjectShareService.user_has_project_access(
- db, project.id, user.id
- )
+ has_access = ProjectShareService.user_has_project_access(db, project.id, user.id)
if has_access:
return user, project, None
diff --git a/backend/app/auth/domain_validation.py b/backend/app/auth/domain_validation.py
index 8f5007f..1048e59 100644
--- a/backend/app/auth/domain_validation.py
+++ b/backend/app/auth/domain_validation.py
@@ -1,4 +1,5 @@
"""Email domain validation for signup."""
+
from app.config import settings
diff --git a/backend/app/auth/platform_admin.py b/backend/app/auth/platform_admin.py
index ac4b3a1..63ea31f 100644
--- a/backend/app/auth/platform_admin.py
+++ b/backend/app/auth/platform_admin.py
@@ -1,4 +1,5 @@
"""Platform admin authorization helpers."""
+
from typing import Annotated
from fastapi import Depends, HTTPException, status
diff --git a/backend/app/auth/providers.py b/backend/app/auth/providers.py
index 1891161..a7e0bba 100644
--- a/backend/app/auth/providers.py
+++ b/backend/app/auth/providers.py
@@ -7,12 +7,12 @@
Enterprise auth providers (e.g. Scalekit SSO) are registered via the plugin
registry β see app/plugin_registry.py.
"""
+
from authlib.integrations.starlette_client import OAuth
from app.config import settings
from app.schemas.oauth import NormalizedUserInfo
-
# Global OAuth registry instance
oauth = OAuth()
diff --git a/backend/app/auth/service.py b/backend/app/auth/service.py
index c157ba8..326f970 100644
--- a/backend/app/auth/service.py
+++ b/backend/app/auth/service.py
@@ -32,6 +32,7 @@
- Check expires_at and refresh tokens proactively
- Consistent with existing OrgBugTracker token encryption pattern
"""
+
from datetime import datetime, timezone
from sqlalchemy.orm import Session
@@ -42,7 +43,6 @@
from app.models.user_identity import UserIdentity
from app.schemas.oauth import NormalizedUserInfo
-
# Hardcoded provider mapping for Phase 1
# In Phase 2, this could be read from the database for external IdPs
PROVIDER_CONFIG = {
@@ -103,16 +103,11 @@ def get_or_create_identity_provider(
# Validate slug
if provider_slug not in PROVIDER_CONFIG:
raise ValueError(
- f"Unknown provider: '{provider_slug}'. "
- f"Valid providers are: {', '.join(PROVIDER_CONFIG.keys())}"
+ f"Unknown provider: '{provider_slug}'. Valid providers are: {', '.join(PROVIDER_CONFIG.keys())}"
)
# Try to find existing provider
- provider = (
- db.query(IdentityProvider)
- .filter(IdentityProvider.slug == provider_slug)
- .first()
- )
+ provider = db.query(IdentityProvider).filter(IdentityProvider.slug == provider_slug).first()
if provider:
return provider
@@ -248,8 +243,4 @@ def get_user_identities(
Returns:
List of UserIdentity instances
"""
- return (
- db.query(UserIdentity)
- .filter(UserIdentity.user_id == user_id)
- .all()
- )
+ return db.query(UserIdentity).filter(UserIdentity.user_id == user_id).all()
diff --git a/backend/app/auth/trial.py b/backend/app/auth/trial.py
index 442351d..2a2d8f5 100644
--- a/backend/app/auth/trial.py
+++ b/backend/app/auth/trial.py
@@ -15,7 +15,8 @@
- No token limits enforced
- No trial expiration checks
"""
-from datetime import datetime, timezone, timedelta
+
+from datetime import datetime, timedelta, timezone
from typing import Annotated, Optional
from fastapi import Depends, HTTPException, status
@@ -33,6 +34,7 @@
def _has_plan_plugin() -> bool:
"""Check if the plan enforcement plugin is registered."""
from app.plugin_registry import get_plugin_registry
+
return get_plugin_registry().plan_plugin is not None
diff --git a/backend/app/auth/utils.py b/backend/app/auth/utils.py
index b0651d5..1505d46 100644
--- a/backend/app/auth/utils.py
+++ b/backend/app/auth/utils.py
@@ -3,8 +3,9 @@
Provides password hashing and JWT token management utilities.
"""
-from datetime import datetime, timedelta, UTC
-from typing import Dict, Any
+
+from datetime import UTC, datetime, timedelta
+from typing import Any, Dict
import bcrypt
from jose import JWTError, jwt
@@ -23,14 +24,14 @@ def hash_password(password: str) -> str:
Bcrypt hashed password string
"""
# Convert password to bytes (bcrypt requires bytes)
- password_bytes = password.encode('utf-8')
+ password_bytes = password.encode("utf-8")
# Generate salt and hash
salt = bcrypt.gensalt()
hashed = bcrypt.hashpw(password_bytes, salt)
# Return as string for database storage
- return hashed.decode('utf-8')
+ return hashed.decode("utf-8")
def verify_password(plain_password: str, hashed_password: str) -> bool:
@@ -45,8 +46,8 @@ def verify_password(plain_password: str, hashed_password: str) -> bool:
True if the password matches, False otherwise
"""
# Convert both to bytes
- password_bytes = plain_password.encode('utf-8')
- hashed_bytes = hashed_password.encode('utf-8')
+ password_bytes = plain_password.encode("utf-8")
+ hashed_bytes = hashed_password.encode("utf-8")
# Verify
return bcrypt.checkpw(password_bytes, hashed_bytes)
diff --git a/backend/app/config.py b/backend/app/config.py
index cb6cd06..171834f 100644
--- a/backend/app/config.py
+++ b/backend/app/config.py
@@ -44,13 +44,9 @@ class Settings(BaseSettings):
default="postgresql://mfbt:iammfbt@localhost:5432/mfbt_dev",
description="PostgreSQL connection URL",
)
- database_echo: bool = Field(
- default=False, description="Echo SQL queries (for debugging)"
- )
+ database_echo: bool = Field(default=False, description="Echo SQL queries (for debugging)")
database_pool_size: int = Field(default=10, description="Database connection pool size")
- database_max_overflow: int = Field(
- default=20, description="Max overflow connections"
- )
+ database_max_overflow: int = Field(default=20, description="Max overflow connections")
# Testing
test_database_url: PostgresDsn = Field(
@@ -86,9 +82,7 @@ def kafka_bootstrap_servers(self) -> str:
default="",
description="Encryption key for API keys and integration tokens (used with PBKDF2)",
)
- access_token_expire_minutes: int = Field(
- default=129600, description="Access token expiration time in minutes"
- )
+ access_token_expire_minutes: int = Field(default=129600, description="Access token expiration time in minutes")
# OAuth Providers (Phase 9)
google_client_id: str | None = Field(
@@ -235,6 +229,7 @@ def _default_slack_command_name(self) -> "Settings":
if not self.slack_command_name:
self.slack_command_name = "/mfbt"
return self
+
slack_oauth_redirect_url: str | None = Field(
default=None,
description="Override for Slack OAuth redirect URL (e.g. ngrok tunnel). If empty, constructed from BASE_URL.",
@@ -253,33 +248,21 @@ def platform_admin_emails(self) -> set[str]:
"""Parse platform admin emails into a set for fast lookup."""
if not self.platform_admins:
return set()
- return {
- email.strip().lower()
- for email in self.platform_admins.split(",")
- if email.strip()
- }
+ return {email.strip().lower() for email in self.platform_admins.split(",") if email.strip()}
@property
def permitted_domains(self) -> set[str]:
"""Parse permitted signup domains into a set for fast lookup."""
if not self.permitted_signup_domains:
return set()
- return {
- domain.strip().lower()
- for domain in self.permitted_signup_domains.split(",")
- if domain.strip()
- }
+ return {domain.strip().lower() for domain in self.permitted_signup_domains.split(",") if domain.strip()}
@property
def trial_exempted_emails(self) -> set[str]:
"""Parse trial-exempted emails into a set for fast lookup."""
if not self.trial_mode_exempted_user_emails:
return set()
- return {
- email.strip().lower()
- for email in self.trial_mode_exempted_user_emails.split(",")
- if email.strip()
- }
+ return {email.strip().lower() for email in self.trial_mode_exempted_user_emails.split(",") if email.strip()}
@property
def trial_exempted_domains(self) -> set[str]:
@@ -287,9 +270,7 @@ def trial_exempted_domains(self) -> set[str]:
if not self.trial_mode_exempted_email_domains:
return set()
return {
- domain.strip().lower()
- for domain in self.trial_mode_exempted_email_domains.split(",")
- if domain.strip()
+ domain.strip().lower() for domain in self.trial_mode_exempted_email_domains.split(",") if domain.strip()
}
@property
@@ -331,10 +312,7 @@ def has_slack_oauth_config(self) -> bool:
@property
def has_github_oauth_env_config(self) -> bool:
"""Check if GitHub OAuth env vars are configured with non-empty values."""
- return bool(
- self.github_integration_oauth_client_id
- and self.github_integration_oauth_client_secret
- )
+ return bool(self.github_integration_oauth_client_id and self.github_integration_oauth_client_secret)
@lru_cache
diff --git a/backend/app/database.py b/backend/app/database.py
index d249ad3..2069737 100644
--- a/backend/app/database.py
+++ b/backend/app/database.py
@@ -3,17 +3,16 @@
Provides SQLAlchemy engine, session factory, and base model class.
"""
+
from contextlib import asynccontextmanager
from typing import AsyncGenerator, Generator
-from sqlalchemy import create_engine, MetaData
+from sqlalchemy import MetaData, create_engine
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
-from sqlalchemy.orm import DeclarativeBase, sessionmaker, Session
-from sqlalchemy.pool import NullPool
+from sqlalchemy.orm import DeclarativeBase, Session, sessionmaker
from app.config import settings
-
# Naming convention for constraints (helpful for Alembic migrations)
NAMING_CONVENTION = {
"ix": "ix_%(column_0_label)s",
diff --git a/backend/app/integrations/__init__.py b/backend/app/integrations/__init__.py
index 3458ede..1e68add 100644
--- a/backend/app/integrations/__init__.py
+++ b/backend/app/integrations/__init__.py
@@ -1,4 +1,5 @@
"""Bug tracker integrations."""
+
from app.integrations.base import BugTrackerAdapter, TicketData
__all__ = ["BugTrackerAdapter", "TicketData"]
diff --git a/backend/app/integrations/base.py b/backend/app/integrations/base.py
index 53f1373..646a20d 100644
--- a/backend/app/integrations/base.py
+++ b/backend/app/integrations/base.py
@@ -1,4 +1,5 @@
"""Base interface for bug tracker adapters."""
+
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any
diff --git a/backend/app/integrations/factory.py b/backend/app/integrations/factory.py
index 4e8c022..1e4c3d4 100644
--- a/backend/app/integrations/factory.py
+++ b/backend/app/integrations/factory.py
@@ -1,4 +1,5 @@
"""Factory for creating bug tracker adapters."""
+
from typing import Any
from app.integrations.base import BugTrackerAdapter
@@ -7,9 +8,7 @@
from app.integrations.jira import JiraAdapter
-def get_adapter(
- provider: str, token: str, config: dict[str, Any] | None = None
-) -> BugTrackerAdapter:
+def get_adapter(provider: str, token: str, config: dict[str, Any] | None = None) -> BugTrackerAdapter:
"""Get the appropriate bug tracker adapter for the given provider.
Args:
@@ -31,9 +30,6 @@ def get_adapter(
adapter_class = adapters.get(provider.lower())
if not adapter_class:
- raise ValueError(
- f"Unsupported provider: {provider}. "
- f"Supported providers: {', '.join(adapters.keys())}"
- )
+ raise ValueError(f"Unsupported provider: {provider}. Supported providers: {', '.join(adapters.keys())}")
return adapter_class(token=token, config=config)
diff --git a/backend/app/integrations/github.py b/backend/app/integrations/github.py
index 0100c26..ce78b6d 100644
--- a/backend/app/integrations/github.py
+++ b/backend/app/integrations/github.py
@@ -1,4 +1,5 @@
"""GitHub Issues adapter with PAT, GitHub App, and OAuth authentication support."""
+
import re
import time
from typing import Any
@@ -261,9 +262,7 @@ async def fetch_ticket(self, external_id: str) -> TicketData:
# Parse external_id (format: "owner/repo#123")
match = re.match(r"^([^/]+)/([^#]+)#(\d+)$", external_id)
if not match:
- raise ValueError(
- f"Invalid GitHub issue format: {external_id}. Expected 'owner/repo#123'"
- )
+ raise ValueError(f"Invalid GitHub issue format: {external_id}. Expected 'owner/repo#123'")
owner, repo, issue_number = match.groups()
@@ -281,9 +280,7 @@ async def fetch_ticket(self, external_id: str) -> TicketData:
comments_url = issue_data.get("comments_url")
comments_data = []
if comments_url and issue_data.get("comments", 0) > 0:
- comments_response = await client.get(
- comments_url, headers=headers, timeout=30.0
- )
+ comments_response = await client.get(comments_url, headers=headers, timeout=30.0)
comments_response.raise_for_status()
raw_comments = comments_response.json()
comments_data = [
@@ -310,13 +307,8 @@ async def fetch_ticket(self, external_id: str) -> TicketData:
"updated_at": issue_data.get("updated_at"),
"closed_at": issue_data.get("closed_at"),
"author": issue_data.get("user", {}).get("login", "unknown"),
- "assignees": [
- assignee.get("login")
- for assignee in issue_data.get("assignees", [])
- ],
- "milestone": issue_data.get("milestone", {}).get("title")
- if issue_data.get("milestone")
- else None,
+ "assignees": [assignee.get("login") for assignee in issue_data.get("assignees", [])],
+ "milestone": issue_data.get("milestone", {}).get("title") if issue_data.get("milestone") else None,
}
return TicketData(
@@ -349,9 +341,7 @@ async def list_repositories(self, page: int = 1, per_page: int = 100) -> list[di
# For GitHub App, use installation repositories endpoint
url = f"{self.BASE_URL}/installation/repositories"
params = {"per_page": per_page, "page": page}
- response = await client.get(
- url, headers=headers, params=params, timeout=30.0
- )
+ response = await client.get(url, headers=headers, params=params, timeout=30.0)
response.raise_for_status()
data = response.json()
return data.get("repositories", [])
@@ -370,9 +360,7 @@ async def list_repositories(self, page: int = 1, per_page: int = 100) -> list[di
"sort": "updated",
"affiliation": "owner,collaborator,organization_member",
}
- response = await client.get(
- url, headers=headers, params=params, timeout=30.0
- )
+ response = await client.get(url, headers=headers, params=params, timeout=30.0)
response.raise_for_status()
return response.json()
@@ -400,9 +388,7 @@ async def search_issues(
"""
# Validate repo format
if "/" not in repo_or_project:
- raise ValueError(
- f"Invalid repository format: {repo_or_project}. Expected 'owner/repo'"
- )
+ raise ValueError(f"Invalid repository format: {repo_or_project}. Expected 'owner/repo'")
# Build search query
# GitHub search syntax: repo:owner/repo is:issue query state:open
diff --git a/backend/app/integrations/gitlab.py b/backend/app/integrations/gitlab.py
index 4d9dc89..163d271 100644
--- a/backend/app/integrations/gitlab.py
+++ b/backend/app/integrations/gitlab.py
@@ -1,4 +1,5 @@
"""GitLab adapter (stub implementation)."""
+
from typing import Any
from app.integrations.base import BugTrackerAdapter, IssueSearchResult, TicketData
diff --git a/backend/app/integrations/jira.py b/backend/app/integrations/jira.py
index 47f098f..cda77d3 100644
--- a/backend/app/integrations/jira.py
+++ b/backend/app/integrations/jira.py
@@ -1,4 +1,5 @@
"""Jira adapter."""
+
import base64
import logging
import re
@@ -121,18 +122,14 @@ async def test_connection(self) -> dict[str, Any]:
# Log response body for debugging non-200 responses
try:
response_text = response.text[:500]
- logger.warning(
- f"Jira test_connection: non-200 response body: {response_text}"
- )
+ logger.warning(f"Jira test_connection: non-200 response body: {response_text}")
except Exception:
pass
response.raise_for_status()
user_data = response.json()
- display_name = user_data.get(
- "displayName", user_data.get("emailAddress", "unknown")
- )
+ display_name = user_data.get("displayName", user_data.get("emailAddress", "unknown"))
logger.info(f"Jira test_connection: success, connected as {display_name}")
return {
"success": True,
@@ -146,9 +143,7 @@ async def test_connection(self) -> dict[str, Any]:
error_detail = ""
try:
error_body = e.response.text[:500]
- logger.error(
- f"Jira test_connection: HTTP {status_code} error, body: {error_body}"
- )
+ logger.error(f"Jira test_connection: HTTP {status_code} error, body: {error_body}")
error_detail = f" - {error_body}" if error_body else ""
except Exception:
logger.error(f"Jira test_connection: HTTP {status_code} error")
@@ -222,9 +217,7 @@ async def fetch_ticket(self, external_id: str) -> TicketData:
else:
# Validate issue key format (e.g., "PROJ-123")
if not re.match(r"^[A-Z]+-\d+$", external_id):
- raise ValueError(
- f"Invalid Jira issue key: {external_id}. Expected 'PROJECTKEY-123'"
- )
+ raise ValueError(f"Invalid Jira issue key: {external_id}. Expected 'PROJECTKEY-123'")
issue_key = external_id
# Fetch issue data
@@ -240,9 +233,7 @@ async def fetch_ticket(self, external_id: str) -> TicketData:
"fields": "summary,description,status,labels,comment,created,updated,resolution,assignee,reporter,issuetype"
}
- response = await client.get(
- api_url, headers=headers, params=params, timeout=30.0
- )
+ response = await client.get(api_url, headers=headers, params=params, timeout=30.0)
response.raise_for_status()
issue_data = response.json()
@@ -273,11 +264,13 @@ async def fetch_ticket(self, external_id: str) -> TicketData:
comment_body = self._extract_text_from_adf(comment_body)
elif comment_body is None:
comment_body = ""
- comments_data.append({
- "author": author_info.get("displayName") or author_info.get("emailAddress", "unknown"),
- "body": comment_body,
- "created_at": comment.get("created"),
- })
+ comments_data.append(
+ {
+ "author": author_info.get("displayName") or author_info.get("emailAddress", "unknown"),
+ "body": comment_body,
+ "created_at": comment.get("created"),
+ }
+ )
# Build metadata (use site_url for human-readable browse links)
metadata = {
@@ -467,9 +460,7 @@ async def search_issues(
"maxResults": 30, # Limit for import UI
}
- response = await client.post(
- api_url, headers=headers, json=payload, timeout=30.0
- )
+ response = await client.post(api_url, headers=headers, json=payload, timeout=30.0)
response.raise_for_status()
data = response.json()
@@ -534,9 +525,7 @@ async def list_projects(self) -> list[dict]:
api_url = f"{api_base_url}/rest/api/3/project"
params = {"maxResults": 100, "orderBy": "name"}
- response = await client.get(
- api_url, headers=headers, params=params, timeout=30.0
- )
+ response = await client.get(api_url, headers=headers, params=params, timeout=30.0)
response.raise_for_status()
projects = response.json()
diff --git a/backend/app/main.py b/backend/app/main.py
index ae63291..038f909 100644
--- a/backend/app/main.py
+++ b/backend/app/main.py
@@ -6,21 +6,64 @@
To run the server:
uv run uvicorn app.main:app --reload
"""
+
import asyncio
import logging
from contextlib import asynccontextmanager
from fastapi import Depends, FastAPI
from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import JSONResponse
from starlette.middleware.sessions import SessionMiddleware
from app.auth.trial import require_active_trial, require_tokens_available
from app.config import settings
-from app.routers import auth, orgs, projects, project_repositories, threads, thread_items, integrations, llm_preferences, jobs, websocket, testing, api_keys, mcp_http
-from app.routers import brainstorming_phases, modules, features, drafts, activity, agent_api, llm_call_logs, mcp_call_logs, team_roles, grounding, feature_content_versions, platform_settings, email_templates, implementations, phase_containers, grounding_notes
-from app.routers import invitations, invite_acceptance, project_shares, user_groups, user_question_sessions, thread_images, dashboard, images, mcp_images, form_drafts, project_chats, org_chats, project_chat_images, conversations
-from app.routers import analytics, plan_recommendations
+from app.routers import (
+ activity,
+ agent_api,
+ analytics,
+ api_keys,
+ auth,
+ brainstorming_phases,
+ conversations,
+ dashboard,
+ drafts,
+ email_templates,
+ feature_content_versions,
+ features,
+ form_drafts,
+ grounding,
+ grounding_notes,
+ images,
+ implementations,
+ integrations,
+ invitations,
+ invite_acceptance,
+ jobs,
+ llm_call_logs,
+ llm_preferences,
+ mcp_call_logs,
+ mcp_http,
+ mcp_images,
+ modules,
+ org_chats,
+ orgs,
+ phase_containers,
+ plan_recommendations,
+ platform_settings,
+ project_chat_images,
+ project_chats,
+ project_repositories,
+ project_shares,
+ projects,
+ team_roles,
+ testing,
+ thread_images,
+ thread_items,
+ threads,
+ user_groups,
+ user_question_sessions,
+ websocket,
+)
logger = logging.getLogger(__name__)
@@ -33,7 +76,7 @@ async def lifespan(app: FastAPI):
Starts the WebSocket broadcast consumer on startup and stops it on shutdown.
"""
# Startup
- with open('/tmp/mfbt_lifespan.log', 'a') as f:
+ with open("/tmp/mfbt_lifespan.log", "a") as f:
f.write("=" * 80 + "\n")
f.write("LIFESPAN STARTUP CALLED\n")
f.write("=" * 80 + "\n")
@@ -44,8 +87,8 @@ async def lifespan(app: FastAPI):
logger.info("Starting WebSocket broadcast consumer...")
try:
- from app.websocket.broadcast_consumer import get_broadcast_consumer
from app.services.kafka_producer import get_sync_kafka_producer
+ from app.websocket.broadcast_consumer import get_broadcast_consumer
# Start sync Kafka producer
sync_producer = get_sync_kafka_producer()
@@ -64,6 +107,7 @@ async def lifespan(app: FastAPI):
except Exception as e:
print(f"ERROR creating broadcast consumer: {e}")
import traceback
+
traceback.print_exc()
raise
diff --git a/backend/app/mcp/server.py b/backend/app/mcp/server.py
index ef41aa1..f8a86d2 100644
--- a/backend/app/mcp/server.py
+++ b/backend/app/mcp/server.py
@@ -5,7 +5,7 @@
from mcp.server import FastMCP
from sqlalchemy import create_engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import sessionmaker
from app.mcp.tools.append_feature_note import append_feature_note
from app.mcp.tools.create_clarification_question import create_clarification_question
@@ -132,9 +132,7 @@ async def handle_get_toc(
) -> dict:
"""Get table of contents for spec or prompt plan."""
with get_db() as db:
- return get_toc(
- db, project_id=project_id, project_key=project_key, target=target
- )
+ return get_toc(db, project_id=project_id, project_key=project_key, target=target)
# Register getSection tool
@mcp.tool(
diff --git a/backend/app/mcp/tools/append_feature_note.py b/backend/app/mcp/tools/append_feature_note.py
index f44dffc..f151fb7 100644
--- a/backend/app/mcp/tools/append_feature_note.py
+++ b/backend/app/mcp/tools/append_feature_note.py
@@ -1,7 +1,8 @@
"""appendFeatureNote MCP tool - append a note to a feature's implementation notes."""
-from typing import Optional, Dict, Any
+from typing import Any, Dict, Optional
from uuid import UUID
+
from sqlalchemy.orm import Session
from app.mcp.utils.project_resolver import resolve_project
diff --git a/backend/app/mcp/tools/create_clarification_question.py b/backend/app/mcp/tools/create_clarification_question.py
index 9ede8c0..ce96486 100644
--- a/backend/app/mcp/tools/create_clarification_question.py
+++ b/backend/app/mcp/tools/create_clarification_question.py
@@ -1,11 +1,12 @@
"""createClarificationQuestion MCP tool - create a clarification question thread."""
-from typing import Optional, Dict, Any
+from typing import Any, Dict, Optional
+
from sqlalchemy.orm import Session
from app.mcp.utils.project_resolver import resolve_project
-from app.services.thread_service import ThreadService
from app.models import ContextType
+from app.services.thread_service import ThreadService
def create_clarification_question(
@@ -88,7 +89,7 @@ def create_clarification_question(
"project_key": project.key,
"title": thread.title,
"pending_approval": thread.pending_approval,
- "context_type": thread.context_type.value if hasattr(thread.context_type, 'value') else thread.context_type,
+ "context_type": thread.context_type.value if hasattr(thread.context_type, "value") else thread.context_type,
"context_id": thread.context_id,
"created_at": thread.created_at.isoformat(),
"comment_preview": body[:200] + "..." if len(body) > 200 else body,
diff --git a/backend/app/mcp/tools/get_context.py b/backend/app/mcp/tools/get_context.py
index adfb1ec..74cbfe4 100644
--- a/backend/app/mcp/tools/get_context.py
+++ b/backend/app/mcp/tools/get_context.py
@@ -1,14 +1,15 @@
"""getContext MCP tool - retrieve full project context."""
-from typing import Optional, Dict, Any, List
+from typing import Any, Dict, List, Optional
+
from sqlalchemy.orm import Session
from app.mcp.utils.project_resolver import resolve_project
from app.models.brainstorming_phase import BrainstormingPhase
-from app.models.final_spec import FinalSpec
+from app.models.feature import Feature, FeatureStatus, FeatureType
from app.models.final_prompt_plan import FinalPromptPlan
+from app.models.final_spec import FinalSpec
from app.models.module import Module, ModuleType
-from app.models.feature import Feature, FeatureType, FeatureStatus
def get_context(
@@ -45,7 +46,7 @@ def get_context(
# Build project metadata
project_info = {
"id": str(project.id),
- "type": project.type.value if hasattr(project.type, 'value') else project.type,
+ "type": project.type.value if hasattr(project.type, "value") else project.type,
"name": project.name,
"key": project.key,
"parent_application_key": None,
@@ -68,18 +69,10 @@ def get_context(
phases_info: List[Dict[str, Any]] = []
for phase in phases:
# Get final spec for this phase
- final_spec = (
- db.query(FinalSpec)
- .filter(FinalSpec.brainstorming_phase_id == phase.id)
- .first()
- )
+ final_spec = db.query(FinalSpec).filter(FinalSpec.brainstorming_phase_id == phase.id).first()
# Get final prompt plan for this phase
- final_plan = (
- db.query(FinalPromptPlan)
- .filter(FinalPromptPlan.brainstorming_phase_id == phase.id)
- .first()
- )
+ final_plan = db.query(FinalPromptPlan).filter(FinalPromptPlan.brainstorming_phase_id == phase.id).first()
# Get implementation modules and features for this phase
modules = (
@@ -109,45 +102,47 @@ def get_context(
features_info: List[Dict[str, Any]] = []
for feature in features:
- features_info.append({
- "id": str(feature.id),
- "feature_key": feature.feature_key,
- "title": feature.title,
- "priority": feature.priority.value if hasattr(feature.priority, 'value') else feature.priority,
- "category": feature.category,
- "completion_status": feature.completion_status.value if hasattr(feature.completion_status, 'value') else feature.completion_status,
- "spec_text": feature.spec_text,
- "prompt_plan_text": feature.prompt_plan_text,
- "has_implementation_notes": bool(feature.implementation_notes),
- })
-
- modules_info.append({
- "id": str(module.id),
- "title": module.title,
- "description": module.description,
- "order_index": module.order_index,
- "features": features_info,
- "feature_count": len(features_info),
- })
+ features_info.append(
+ {
+ "id": str(feature.id),
+ "feature_key": feature.feature_key,
+ "title": feature.title,
+ "priority": feature.priority.value if hasattr(feature.priority, "value") else feature.priority,
+ "category": feature.category,
+ "completion_status": feature.completion_status.value
+ if hasattr(feature.completion_status, "value")
+ else feature.completion_status,
+ "spec_text": feature.spec_text,
+ "prompt_plan_text": feature.prompt_plan_text,
+ "has_implementation_notes": bool(feature.implementation_notes),
+ }
+ )
+
+ modules_info.append(
+ {
+ "id": str(module.id),
+ "title": module.title,
+ "description": module.description,
+ "order_index": module.order_index,
+ "features": features_info,
+ "feature_count": len(features_info),
+ }
+ )
# Calculate completion stats
total_features = sum(len(m["features"]) for m in modules_info)
completed_features = sum(
- 1 for m in modules_info
- for f in m["features"]
- if f["completion_status"] == "completed"
+ 1 for m in modules_info for f in m["features"] if f["completion_status"] == "completed"
)
in_progress_features = sum(
- 1 for m in modules_info
- for f in m["features"]
- if f["completion_status"] == "in_progress"
+ 1 for m in modules_info for f in m["features"] if f["completion_status"] == "in_progress"
)
phase_info = {
"id": str(phase.id),
"title": phase.title,
"description": phase.description,
- "phase_type": phase.phase_type.value if hasattr(phase.phase_type, 'value') else phase.phase_type,
+ "phase_type": phase.phase_type.value if hasattr(phase.phase_type, "value") else phase.phase_type,
"final_spec": {
"available": final_spec is not None,
"content_markdown": final_spec.content_markdown if final_spec else None,
diff --git a/backend/app/mcp/tools/get_feature_notes.py b/backend/app/mcp/tools/get_feature_notes.py
index 8903eef..1b56ce3 100644
--- a/backend/app/mcp/tools/get_feature_notes.py
+++ b/backend/app/mcp/tools/get_feature_notes.py
@@ -1,7 +1,8 @@
"""getFeatureNotes MCP tool - retrieve implementation notes for a feature."""
-from typing import Optional, Dict, Any
+from typing import Any, Dict, Optional
from uuid import UUID
+
from sqlalchemy.orm import Session
from app.mcp.utils.project_resolver import resolve_project
diff --git a/backend/app/mcp/tools/get_section.py b/backend/app/mcp/tools/get_section.py
index aa1238c..5d03ad6 100644
--- a/backend/app/mcp/tools/get_section.py
+++ b/backend/app/mcp/tools/get_section.py
@@ -1,11 +1,12 @@
"""getSection MCP tool - retrieve specific section from spec or prompt plan."""
-from typing import Optional, Dict, Any
+from typing import Any, Dict, Optional
+
from sqlalchemy.orm import Session
-from app.mcp.utils.project_resolver import resolve_project
from app.mcp.utils.markdown_parser import extract_section
-from app.models import SpecType, BrainstormingPhase
+from app.mcp.utils.project_resolver import resolve_project
+from app.models import BrainstormingPhase, SpecType
from app.services.spec_service import SpecService
@@ -66,17 +67,13 @@ def get_section(
# Fallback: Legacy project-level specs (SpecVersion with project_id)
if not content_markdown:
spec_type = SpecType.SPECIFICATION if target == "spec" else SpecType.PROMPT_PLAN
- active_spec = SpecService.get_active_spec(
- db, project_id=project.id, spec_type=spec_type
- )
+ active_spec = SpecService.get_active_spec(db, project_id=project.id, spec_type=spec_type)
if active_spec:
content_markdown = active_spec.content_markdown
content_json = active_spec.content_json
if not content_markdown:
- raise ValueError(
- f"No active {target} found for project {project.id}"
- )
+ raise ValueError(f"No active {target} found for project {project.id}")
# Extract section
section_markdown = None
@@ -92,8 +89,7 @@ def get_section(
# List available section IDs to help with debugging
available_ids = [s["id"] for s in content_json["sections"]]
raise ValueError(
- f"Section '{section_id}' not found in {target}. "
- f"Available section IDs: {', '.join(available_ids)}"
+ f"Section '{section_id}' not found in {target}. Available section IDs: {', '.join(available_ids)}"
)
else:
# Fallback: extract from markdown using header matching
diff --git a/backend/app/mcp/tools/get_toc.py b/backend/app/mcp/tools/get_toc.py
index 2630f27..5a96cb1 100644
--- a/backend/app/mcp/tools/get_toc.py
+++ b/backend/app/mcp/tools/get_toc.py
@@ -1,11 +1,12 @@
"""getToc MCP tool - retrieve table of contents for spec or prompt plan."""
-from typing import Optional, Dict, Any, List
+from typing import Any, Dict, List, Optional
+
from sqlalchemy.orm import Session
+from app.mcp.utils.markdown_parser import TocEntry, extract_toc
from app.mcp.utils.project_resolver import resolve_project
-from app.mcp.utils.markdown_parser import extract_toc, TocEntry
-from app.models import SpecType, FinalSpec, FinalPromptPlan, BrainstormingPhase
+from app.models import BrainstormingPhase, SpecType
from app.services.spec_service import SpecService
@@ -67,9 +68,7 @@ def get_toc(
# Fallback: Legacy project-level specs (SpecVersion with project_id)
if not content_markdown:
spec_type = SpecType.SPECIFICATION if target == "spec" else SpecType.PROMPT_PLAN
- active_spec = SpecService.get_active_spec(
- db, project_id=project.id, spec_type=spec_type
- )
+ active_spec = SpecService.get_active_spec(db, project_id=project.id, spec_type=spec_type)
if active_spec:
content_markdown = active_spec.content_markdown
content_json = active_spec.content_json
@@ -79,12 +78,14 @@ def get_toc(
# If content_json exists with sections, use it for structured TOC
if content_json and "sections" in content_json:
for section in content_json["sections"]:
- toc.append({
- "id": section["id"],
- "title": section["title"],
- "has_content": bool(section.get("body_markdown")),
- "linked_questions_count": len(section.get("linked_questions", []))
- })
+ toc.append(
+ {
+ "id": section["id"],
+ "title": section["title"],
+ "has_content": bool(section.get("body_markdown")),
+ "linked_questions_count": len(section.get("linked_questions", [])),
+ }
+ )
else:
# Fallback: parse markdown headers
toc = extract_toc(content_markdown)
diff --git a/backend/app/mcp/tools/vfs_cat.py b/backend/app/mcp/tools/vfs_cat.py
index 9422f2b..99b215b 100644
--- a/backend/app/mcp/tools/vfs_cat.py
+++ b/backend/app/mcp/tools/vfs_cat.py
@@ -5,7 +5,7 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError
+from app.mcp.vfs import PathNotFoundError, resolve_path
from app.mcp.vfs.content import get_file_content
@@ -58,11 +58,7 @@ def vfs_cat(
# This ensures users see their own version, not global
from app.mcp.vfs import NodeType
- if (
- resolved.node_type == NodeType.GROUNDING_FILE
- and resolved.grounding_filename == "agents.md"
- and user_id
- ):
+ if resolved.node_type == NodeType.GROUNDING_FILE and resolved.grounding_filename == "agents.md" and user_id:
from app.services.grounding_service import GroundingService
user_uuid = UUID(user_id)
@@ -98,10 +94,7 @@ def vfs_cat(
return {"error": str(e)}
# Add note for agents.md global fallback (when no branch version exists)
- if (
- resolved.node_type == NodeType.GROUNDING_FILE
- and resolved.grounding_filename == "agents.md"
- ):
+ if resolved.node_type == NodeType.GROUNDING_FILE and resolved.grounding_filename == "agents.md":
result["note"] = (
"Always include branch_name when reading or writing this file. "
"This is the global starting-point version. Your branch-specific "
diff --git a/backend/app/mcp/tools/vfs_find.py b/backend/app/mcp/tools/vfs_find.py
index 7906037..475b35b 100644
--- a/backend/app/mcp/tools/vfs_find.py
+++ b/backend/app/mcp/tools/vfs_find.py
@@ -6,12 +6,12 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError, NodeType, slugify, feature_dir_name, module_dir_name
+from app.mcp.vfs import NodeType, PathNotFoundError, feature_dir_name, module_dir_name, resolve_path, slugify
from app.mcp.vfs.content import list_directory
from app.models.brainstorming_phase import BrainstormingPhase
-from app.models.module import Module, ModuleType
-from app.models.feature import Feature, FeatureType, FeatureStatus, FeatureProvenance
+from app.models.feature import Feature, FeatureProvenance, FeatureStatus, FeatureType
from app.models.implementation import Implementation
+from app.models.module import Module, ModuleType
from app.services.grounding_service import GroundingService
@@ -113,9 +113,7 @@ def _collect_all_paths(
elif resolved.node_type == NodeType.SYSTEM_GENERATED_DIR:
# /phases/system-generated/ lists all phases
- phases = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.project_id == project_id
- ).all()
+ phases = db.query(BrainstormingPhase).filter(BrainstormingPhase.project_id == project_id).all()
for phase in phases:
phase_slug = slugify(phase.title)
paths.extend(_collect_all_paths(db, project_id, f"/phases/system-generated/{phase_slug}"))
@@ -143,22 +141,30 @@ def _collect_all_paths(
elif resolved.node_type == NodeType.FEATURES_DIR:
# System-generated features directory
- modules = db.query(Module).filter(
- Module.brainstorming_phase_id == resolved.phase_id,
- Module.module_type == ModuleType.IMPLEMENTATION,
- Module.archived_at.is_(None),
- ).all()
+ modules = (
+ db.query(Module)
+ .filter(
+ Module.brainstorming_phase_id == resolved.phase_id,
+ Module.module_type == ModuleType.IMPLEMENTATION,
+ Module.archived_at.is_(None),
+ )
+ .all()
+ )
for module in modules:
module_slug = module_dir_name(module.module_key, module.title)
paths.extend(_collect_all_paths(db, project_id, f"{resolved.path}/{module_slug}"))
elif resolved.node_type == NodeType.MODULE_DIR:
# System-generated module directory (only IMPLEMENTATION features)
- features = db.query(Feature).filter(
- Feature.module_id == resolved.module_id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == resolved.module_id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .all()
+ )
for feature in features:
feature_slug = feature_dir_name(feature.feature_key, feature.title)
paths.extend(_collect_all_paths(db, project_id, f"{resolved.path}/{feature_slug}"))
@@ -174,35 +180,43 @@ def _collect_all_paths(
elif resolved.node_type == NodeType.USER_DEFINED_FEATURES_DIR:
# /phases/user-defined/features/ lists modules with user-defined features
- modules = db.query(Module).filter(
- Module.project_id == project_id,
- Module.module_type == ModuleType.IMPLEMENTATION,
- Module.archived_at.is_(None),
- ).all()
+ modules = (
+ db.query(Module)
+ .filter(
+ Module.project_id == project_id,
+ Module.module_type == ModuleType.IMPLEMENTATION,
+ Module.archived_at.is_(None),
+ )
+ .all()
+ )
for module in modules:
# Check if this module has any user-defined features
- has_user_features = db.query(Feature).filter(
- Feature.module_id == module.id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- ).first()
+ has_user_features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == module.id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
+ .first()
+ )
if has_user_features:
module_slug = module_dir_name(module.module_key, module.title)
paths.extend(_collect_all_paths(db, project_id, f"/phases/user-defined/features/{module_slug}"))
elif resolved.node_type == NodeType.USER_DEFINED_MODULE_DIR:
# /phases/user-defined/features/{module}/ lists user-defined features
- features = db.query(Feature).filter(
- Feature.module_id == resolved.module_id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == resolved.module_id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
+ .all()
+ )
for feature in features:
feature_slug = feature_dir_name(feature.feature_key, feature.title)
paths.extend(_collect_all_paths(db, project_id, f"{resolved.path}/{feature_slug}"))
diff --git a/backend/app/mcp/tools/vfs_grep.py b/backend/app/mcp/tools/vfs_grep.py
index e39530d..84f479a 100644
--- a/backend/app/mcp/tools/vfs_grep.py
+++ b/backend/app/mcp/tools/vfs_grep.py
@@ -6,12 +6,12 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError, NodeType, slugify, feature_dir_name, module_dir_name
+from app.mcp.vfs import NodeType, PathNotFoundError, feature_dir_name, module_dir_name, resolve_path, slugify
from app.mcp.vfs.content import get_file_content, list_directory
from app.models.brainstorming_phase import BrainstormingPhase
-from app.models.module import Module, ModuleType
-from app.models.feature import Feature, FeatureType, FeatureStatus, FeatureProvenance
+from app.models.feature import Feature, FeatureProvenance, FeatureStatus, FeatureType
from app.models.implementation import Implementation
+from app.models.module import Module, ModuleType
from app.services.grounding_service import GroundingService
@@ -116,9 +116,7 @@ def _collect_files(db: Session, project_id: UUID, path: str) -> List[str]:
# /phases/system-generated/ lists all phases
elif resolved.node_type == NodeType.SYSTEM_GENERATED_DIR:
- phases = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.project_id == project_id
- ).all()
+ phases = db.query(BrainstormingPhase).filter(BrainstormingPhase.project_id == project_id).all()
for phase in phases:
phase_slug = slugify(phase.title)
files.extend(_collect_files(db, project_id, f"/phases/system-generated/{phase_slug}"))
@@ -149,22 +147,30 @@ def _collect_files(db: Session, project_id: UUID, path: str) -> List[str]:
# Get features directory contents
elif resolved.node_type == NodeType.FEATURES_DIR:
- modules = db.query(Module).filter(
- Module.brainstorming_phase_id == resolved.phase_id,
- Module.module_type == ModuleType.IMPLEMENTATION,
- Module.archived_at.is_(None),
- ).all()
+ modules = (
+ db.query(Module)
+ .filter(
+ Module.brainstorming_phase_id == resolved.phase_id,
+ Module.module_type == ModuleType.IMPLEMENTATION,
+ Module.archived_at.is_(None),
+ )
+ .all()
+ )
for module in modules:
module_slug = module_dir_name(module.module_key, module.title)
files.extend(_collect_files(db, project_id, f"{resolved.path}/{module_slug}"))
# Get module contents
elif resolved.node_type == NodeType.MODULE_DIR:
- features = db.query(Feature).filter(
- Feature.module_id == resolved.module_id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == resolved.module_id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .all()
+ )
for feature in features:
feature_slug = feature_dir_name(feature.feature_key, feature.title)
files.extend(_collect_files(db, project_id, f"{resolved.path}/{feature_slug}"))
@@ -180,35 +186,43 @@ def _collect_files(db: Session, project_id: UUID, path: str) -> List[str]:
# /phases/user-defined/features/ lists modules with user-defined features
elif resolved.node_type == NodeType.USER_DEFINED_FEATURES_DIR:
- modules = db.query(Module).filter(
- Module.project_id == project_id,
- Module.module_type == ModuleType.IMPLEMENTATION,
- Module.archived_at.is_(None),
- ).all()
+ modules = (
+ db.query(Module)
+ .filter(
+ Module.project_id == project_id,
+ Module.module_type == ModuleType.IMPLEMENTATION,
+ Module.archived_at.is_(None),
+ )
+ .all()
+ )
for module in modules:
# Check if this module has any user-defined features
- has_user_features = db.query(Feature).filter(
- Feature.module_id == module.id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- ).first()
+ has_user_features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == module.id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
+ .first()
+ )
if has_user_features:
module_slug = module_dir_name(module.module_key, module.title)
files.extend(_collect_files(db, project_id, f"/phases/user-defined/features/{module_slug}"))
# /phases/user-defined/features/{module}/ lists user-defined features
elif resolved.node_type == NodeType.USER_DEFINED_MODULE_DIR:
- features = db.query(Feature).filter(
- Feature.module_id == resolved.module_id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- ).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == resolved.module_id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
+ .all()
+ )
for feature in features:
feature_slug = feature_dir_name(feature.feature_key, feature.title)
files.extend(_collect_files(db, project_id, f"{resolved.path}/{feature_slug}"))
@@ -281,7 +295,7 @@ def _search_content(
start = max(0, i - context_lines)
end = min(len(lines), i + context_lines + 1)
match["context_before"] = lines[start:i]
- match["context_after"] = lines[i + 1:end]
+ match["context_after"] = lines[i + 1 : end]
matches.append(match)
diff --git a/backend/app/mcp/tools/vfs_head.py b/backend/app/mcp/tools/vfs_head.py
index 83a8873..2ec257c 100644
--- a/backend/app/mcp/tools/vfs_head.py
+++ b/backend/app/mcp/tools/vfs_head.py
@@ -5,7 +5,7 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError
+from app.mcp.vfs import PathNotFoundError, resolve_path
from app.mcp.vfs.content import get_file_content
diff --git a/backend/app/mcp/tools/vfs_ls.py b/backend/app/mcp/tools/vfs_ls.py
index be9c153..7ec97e1 100644
--- a/backend/app/mcp/tools/vfs_ls.py
+++ b/backend/app/mcp/tools/vfs_ls.py
@@ -1,11 +1,11 @@
"""VFS ls tool - list directory contents."""
-from typing import Any, Dict, Optional
+from typing import Any, Dict
from uuid import UUID
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError, NotADirectoryError
+from app.mcp.vfs import PathNotFoundError, resolve_path
from app.mcp.vfs.content import list_directory
@@ -64,22 +64,13 @@ def vfs_ls(
# If not long format, simplify entries
if not long:
- result["entries"] = [
- {"name": e["name"], "type": e["type"]}
- for e in result["entries"]
- ]
+ result["entries"] = [{"name": e["name"], "type": e["type"]} for e in result["entries"]]
# If not all, filter hidden files
if not all:
- result["entries"] = [
- e for e in result["entries"]
- if not e["name"].startswith(".")
- ]
+ result["entries"] = [e for e in result["entries"] if not e["name"].startswith(".")]
# Also update text output
lines = result.get("text", "").split("\n")
- result["text"] = "\n".join(
- line for line in lines
- if not any(part.startswith(".") for part in line.split())
- )
+ result["text"] = "\n".join(line for line in lines if not any(part.startswith(".") for part in line.split()))
return result
diff --git a/backend/app/mcp/tools/vfs_sed.py b/backend/app/mcp/tools/vfs_sed.py
index 2bdeacc..6c82e51 100644
--- a/backend/app/mcp/tools/vfs_sed.py
+++ b/backend/app/mcp/tools/vfs_sed.py
@@ -6,7 +6,7 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError, NodeType
+from app.mcp.vfs import NodeType, PathNotFoundError, resolve_path
from app.mcp.vfs.content import get_file_content
from app.models.feature import Feature
@@ -120,8 +120,8 @@ def _sed_feature_notes(
db.refresh(feature)
# Broadcast and trigger grounding update
- from app.services.feature_service import FeatureService
from app.mcp.tools.vfs_write import _trigger_grounding_update
+ from app.services.feature_service import FeatureService
FeatureService._broadcast_feature_update(db, feature, "notes")
_trigger_grounding_update(db, project_id, feature.id)
@@ -147,8 +147,8 @@ def _sed_grounding_file(
matches: int,
) -> Dict[str, Any]:
"""Apply sed to grounding file."""
- from app.services.grounding_service import GroundingService
from app.mcp.tools.vfs_write import _trigger_grounding_summarize
+ from app.services.grounding_service import GroundingService
filename = resolved.grounding_filename
grounding_file = GroundingService.get_file(db, project_id, filename)
diff --git a/backend/app/mcp/tools/vfs_set_metadata.py b/backend/app/mcp/tools/vfs_set_metadata.py
index e1fc2ef..ecdfdb9 100644
--- a/backend/app/mcp/tools/vfs_set_metadata.py
+++ b/backend/app/mcp/tools/vfs_set_metadata.py
@@ -5,9 +5,9 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError
-from app.mcp.vfs.metadata import set_metadata, COMPUTED_KEYS, SIDE_EFFECT_KEYS
-from app.mcp.vfs.errors import PermissionDeniedError, InvalidPathError
+from app.mcp.vfs import PathNotFoundError, resolve_path
+from app.mcp.vfs.errors import InvalidPathError, PermissionDeniedError
+from app.mcp.vfs.metadata import COMPUTED_KEYS, set_metadata
def vfs_set_metadata(
diff --git a/backend/app/mcp/tools/vfs_tail.py b/backend/app/mcp/tools/vfs_tail.py
index 13c1ee7..7fa1100 100644
--- a/backend/app/mcp/tools/vfs_tail.py
+++ b/backend/app/mcp/tools/vfs_tail.py
@@ -5,7 +5,7 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError
+from app.mcp.vfs import PathNotFoundError, resolve_path
from app.mcp.vfs.content import get_file_content
diff --git a/backend/app/mcp/tools/vfs_tree.py b/backend/app/mcp/tools/vfs_tree.py
index a5419d7..0a0d71a 100644
--- a/backend/app/mcp/tools/vfs_tree.py
+++ b/backend/app/mcp/tools/vfs_tree.py
@@ -5,12 +5,20 @@
from sqlalchemy.orm import Session
-from app.mcp.vfs import resolve_path, PathNotFoundError, NodeType, slugify, feature_dir_name, module_dir_name, ResolvedPath
+from app.mcp.vfs import (
+ NodeType,
+ PathNotFoundError,
+ ResolvedPath,
+ feature_dir_name,
+ module_dir_name,
+ resolve_path,
+ slugify,
+)
from app.mcp.vfs.content import list_directory
from app.models.brainstorming_phase import BrainstormingPhase
-from app.models.module import Module, ModuleType
-from app.models.feature import Feature, FeatureType, FeatureStatus, FeatureProvenance
+from app.models.feature import Feature, FeatureProvenance, FeatureStatus, FeatureType
from app.models.implementation import Implementation
+from app.models.module import Module, ModuleType
from app.services.grounding_service import GroundingService
@@ -106,9 +114,7 @@ def _build_tree(
for subdir in ["phases", "project-info", "system-info", "for-coding-agents"]:
try:
child_resolved = resolve_path(db, project_id, f"/{subdir}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -120,9 +126,7 @@ def _build_tree(
for subdir in ["system-generated", "user-defined"]:
try:
child_resolved = resolve_path(db, project_id, f"/phases/{subdir}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -131,16 +135,17 @@ def _build_tree(
elif resolved.node_type == NodeType.SYSTEM_GENERATED_DIR:
# /phases/system-generated/ lists all phases
- phases = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.project_id == project_id
- ).order_by(BrainstormingPhase.created_at).all()
+ phases = (
+ db.query(BrainstormingPhase)
+ .filter(BrainstormingPhase.project_id == project_id)
+ .order_by(BrainstormingPhase.created_at)
+ .all()
+ )
for phase in phases:
phase_slug = slugify(phase.title)
try:
child_resolved = resolve_path(db, project_id, f"/phases/system-generated/{phase_slug}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -151,9 +156,7 @@ def _build_tree(
for subdir in ["phase-spec", "phase-prompt-plan", "features"]:
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/{subdir}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -175,9 +178,7 @@ def _build_tree(
if current_depth + 1 < max_depth:
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/by-section")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
# Replace the placeholder with actual tree
children[0] = child_node
stats["directories"] += child_stats["directories"] - 1
@@ -189,28 +190,33 @@ def _build_tree(
try:
result = list_directory(db, project_id, resolved)
for entry in result.get("entries", []):
- children.append({
- "name": entry["name"],
- "type": "file",
- })
+ children.append(
+ {
+ "name": entry["name"],
+ "type": "file",
+ }
+ )
stats["files"] += 1
except Exception:
pass
elif resolved.node_type == NodeType.FEATURES_DIR:
- modules = db.query(Module).filter(
- Module.brainstorming_phase_id == resolved.phase_id,
- Module.module_type == ModuleType.IMPLEMENTATION,
- Module.archived_at.is_(None),
- ).order_by(Module.order_index).all()
+ modules = (
+ db.query(Module)
+ .filter(
+ Module.brainstorming_phase_id == resolved.phase_id,
+ Module.module_type == ModuleType.IMPLEMENTATION,
+ Module.archived_at.is_(None),
+ )
+ .order_by(Module.order_index)
+ .all()
+ )
for module in modules:
module_slug = module_dir_name(module.module_key, module.title)
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/{module_slug}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -218,19 +224,22 @@ def _build_tree(
pass
elif resolved.node_type == NodeType.MODULE_DIR:
- features = db.query(Feature).filter(
- Feature.module_id == resolved.module_id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).order_by(Feature.created_at).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == resolved.module_id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .order_by(Feature.created_at)
+ .all()
+ )
for feature in features:
feature_slug = feature_dir_name(feature.feature_key, feature.title)
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/{feature_slug}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -248,9 +257,7 @@ def _build_tree(
for dirname in ["implementations", "conversations"]:
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/{dirname}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
# Replace placeholder with actual tree
for i, c in enumerate(children):
if c["name"] == dirname:
@@ -265,9 +272,7 @@ def _build_tree(
# /phases/user-defined/ has: features/
try:
child_resolved = resolve_path(db, project_id, "/phases/user-defined/features")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -276,30 +281,35 @@ def _build_tree(
elif resolved.node_type == NodeType.USER_DEFINED_FEATURES_DIR:
# /phases/user-defined/features/ lists modules with user-defined features
- modules = db.query(Module).filter(
- Module.project_id == project_id,
- Module.module_type == ModuleType.IMPLEMENTATION,
- Module.archived_at.is_(None),
- ).order_by(Module.order_index).all()
+ modules = (
+ db.query(Module)
+ .filter(
+ Module.project_id == project_id,
+ Module.module_type == ModuleType.IMPLEMENTATION,
+ Module.archived_at.is_(None),
+ )
+ .order_by(Module.order_index)
+ .all()
+ )
for module in modules:
# Check if this module has any user-defined features
- has_user_features = db.query(Feature).filter(
- Feature.module_id == module.id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- ).first()
+ has_user_features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == module.id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
+ .first()
+ )
if has_user_features:
module_slug = module_dir_name(module.module_key, module.title)
try:
child_resolved = resolve_path(db, project_id, f"/phases/user-defined/features/{module_slug}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -308,22 +318,23 @@ def _build_tree(
elif resolved.node_type == NodeType.USER_DEFINED_MODULE_DIR:
# /phases/user-defined/features/{module}/ lists user-defined features
- features = db.query(Feature).filter(
- Feature.module_id == resolved.module_id,
- Feature.feature_type == FeatureType.IMPLEMENTATION,
- Feature.status == FeatureStatus.ACTIVE,
- ).filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- ).order_by(Feature.created_at).all()
+ features = (
+ db.query(Feature)
+ .filter(
+ Feature.module_id == resolved.module_id,
+ Feature.feature_type == FeatureType.IMPLEMENTATION,
+ Feature.status == FeatureStatus.ACTIVE,
+ )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
+ .order_by(Feature.created_at)
+ .all()
+ )
for feature in features:
feature_slug = feature_dir_name(feature.feature_key, feature.title)
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/{feature_slug}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -341,9 +352,7 @@ def _build_tree(
for dirname in ["implementations", "conversations"]:
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/{dirname}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
# Replace placeholder with actual tree
for i, c in enumerate(children):
if c["name"] == dirname:
@@ -363,9 +372,7 @@ def _build_tree(
# /system-info/ has: users/
try:
child_resolved = resolve_path(db, project_id, "/system-info/users")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
@@ -396,9 +403,7 @@ def _build_tree(
impl_slug = slugify(impl.name)
try:
child_resolved = resolve_path(db, project_id, f"{resolved.path}/{impl_slug}")
- child_node, child_stats = _build_tree(
- db, project_id, child_resolved, max_depth, current_depth + 1
- )
+ child_node, child_stats = _build_tree(db, project_id, child_resolved, max_depth, current_depth + 1)
children.append(child_node)
stats["directories"] += child_stats["directories"]
stats["files"] += child_stats["files"]
diff --git a/backend/app/mcp/tools/vfs_write.py b/backend/app/mcp/tools/vfs_write.py
index 820a8af..f80c25e 100644
--- a/backend/app/mcp/tools/vfs_write.py
+++ b/backend/app/mcp/tools/vfs_write.py
@@ -5,9 +5,7 @@
from sqlalchemy.orm import Session
-from app.config import settings
-from app.mcp.vfs import resolve_path, PathNotFoundError, NodeType
-from app.mcp.vfs.errors import ReadOnlyError
+from app.mcp.vfs import NodeType, PathNotFoundError, resolve_path
from app.models.feature import Feature
from app.models.implementation import Implementation
@@ -77,29 +75,43 @@ def vfs_write(
# Handle grounding files (/for-coding-agents/*)
if resolved.node_type == NodeType.GROUNDING_FILE:
return _write_grounding_file(
- db, project_uuid, user_uuid, resolved, content, append,
- branch_name=branch_name, repo_path=repo_path
+ db, project_uuid, user_uuid, resolved, content, append, branch_name=branch_name, repo_path=repo_path
)
# Handle feature notes.md files (both system-generated and user-defined)
- if resolved.node_type in (NodeType.FEATURE_FILE, NodeType.USER_DEFINED_FEATURE_FILE) and resolved.file_name == "notes.md":
+ if (
+ resolved.node_type in (NodeType.FEATURE_FILE, NodeType.USER_DEFINED_FEATURE_FILE)
+ and resolved.file_name == "notes.md"
+ ):
return _write_feature_notes(
- db, project_uuid, user_uuid, resolved, content, append,
- coding_agent_name, branch_name=branch_name, repo_path=repo_path
+ db,
+ project_uuid,
+ user_uuid,
+ resolved,
+ content,
+ append,
+ coding_agent_name,
+ branch_name=branch_name,
+ repo_path=repo_path,
)
# Handle implementation notes.md files
if resolved.node_type == NodeType.IMPLEMENTATION_NOTES_FILE:
return _write_implementation_notes(
- db, project_uuid, user_uuid, resolved, content, append,
- coding_agent_name, branch_name=branch_name, repo_path=repo_path
+ db,
+ project_uuid,
+ user_uuid,
+ resolved,
+ content,
+ append,
+ coding_agent_name,
+ branch_name=branch_name,
+ repo_path=repo_path,
)
# Handle conversations.md writes (create thread comments)
if resolved.node_type == NodeType.CONVERSATIONS_FILE:
- return _write_conversation_comment(
- db, project_uuid, user_uuid, resolved, content, coding_agent_name
- )
+ return _write_conversation_comment(db, project_uuid, user_uuid, resolved, content, coding_agent_name)
# All other files are read-only
return {
@@ -154,14 +166,14 @@ def _write_feature_notes(
# Broadcast feature update to WebSocket clients
from app.services.feature_service import FeatureService
+
FeatureService._broadcast_feature_update(db, feature, "notes")
# Trigger grounding update job targeting user's branch-specific agents.md
# Default to "main" branch to ensure user isolation
effective_branch = branch_name or "main"
_trigger_grounding_update(
- db, project_id, feature.id,
- user_id=user_id, branch_name=effective_branch, repo_path=repo_path
+ db, project_id, feature.id, user_id=user_id, branch_name=effective_branch, repo_path=repo_path
)
return {
@@ -194,9 +206,7 @@ def _write_implementation_notes(
if not resolved.implementation_id:
return {"error": "Implementation not found"}
- impl = db.query(Implementation).filter(
- Implementation.id == resolved.implementation_id
- ).first()
+ impl = db.query(Implementation).filter(Implementation.id == resolved.implementation_id).first()
if not impl:
return {"error": "Implementation not found"}
@@ -226,14 +236,19 @@ def _write_implementation_notes(
# Broadcast implementation update to WebSocket clients
from app.services.implementation_service import ImplementationService
+
ImplementationService.broadcast_implementation_updated(db, impl, "notes")
# Trigger grounding update job targeting user's branch-specific agents.md
# Default to "main" branch to ensure user isolation
effective_branch = branch_name or "main"
_trigger_grounding_update(
- db, project_id, feature.id,
- user_id=user_id, branch_name=effective_branch, repo_path=repo_path,
+ db,
+ project_id,
+ feature.id,
+ user_id=user_id,
+ branch_name=effective_branch,
+ repo_path=repo_path,
implementation_id=impl.id,
)
@@ -270,9 +285,9 @@ def _trigger_grounding_update(
"""
import logging
+ from app.models.job import JobType
from app.services.job_service import JobService
from app.services.project_service import ProjectService
- from app.models.job import JobType
from workers.core.helpers import publish_job_to_kafka
logger = logging.getLogger(__name__)
@@ -318,9 +333,7 @@ def _trigger_grounding_update(
if success:
branch_info = f", branch={branch_name}" if branch_name else ""
- logger.info(
- f"Triggered grounding update job {job.id} for feature {feature_id}{branch_info}"
- )
+ logger.info(f"Triggered grounding update job {job.id} for feature {feature_id}{branch_info}")
else:
logger.warning(f"Failed to publish grounding update job {job.id} to Kafka")
@@ -340,9 +353,9 @@ def _trigger_grounding_summarize(db: Session, project_id: UUID) -> None:
"""
import logging
+ from app.models.job import JobType
from app.services.job_service import JobService
from app.services.project_service import ProjectService
- from app.models.job import JobType
from workers.core.helpers import publish_job_to_kafka
logger = logging.getLogger(__name__)
@@ -382,9 +395,7 @@ def _trigger_grounding_summarize(db: Session, project_id: UUID) -> None:
logger.error(f"Failed to trigger grounding summarize: {e}")
-def _trigger_grounding_branch_summarize(
- db: Session, project_id: UUID, user_id: UUID, branch_name: str
-) -> None:
+def _trigger_grounding_branch_summarize(db: Session, project_id: UUID, user_id: UUID, branch_name: str) -> None:
"""
Trigger an async job to regenerate the summary for a branch-specific agents.md.
@@ -396,9 +407,9 @@ def _trigger_grounding_branch_summarize(
"""
import logging
+ from app.models.job import JobType
from app.services.job_service import JobService
from app.services.project_service import ProjectService
- from app.models.job import JobType
from workers.core.helpers import publish_job_to_kafka
logger = logging.getLogger(__name__)
@@ -407,9 +418,7 @@ def _trigger_grounding_branch_summarize(
# Get project to find org_id
project = ProjectService.get_project_by_id(db, project_id)
if not project:
- logger.warning(
- f"Could not trigger branch grounding summarize: project {project_id} not found"
- )
+ logger.warning(f"Could not trigger branch grounding summarize: project {project_id} not found")
return
# Create the job
@@ -435,13 +444,10 @@ def _trigger_grounding_branch_summarize(
if success:
logger.info(
- f"Triggered branch grounding summarize job {job.id} for "
- f"project {project_id}, branch {branch_name}"
+ f"Triggered branch grounding summarize job {job.id} for project {project_id}, branch {branch_name}"
)
else:
- logger.warning(
- f"Failed to publish branch grounding summarize job {job.id} to Kafka"
- )
+ logger.warning(f"Failed to publish branch grounding summarize job {job.id} to Kafka")
except Exception as e:
logger.error(f"Failed to trigger branch grounding summarize: {e}")
@@ -474,15 +480,12 @@ def _write_grounding_file(
if filename == "agents.md":
effective_branch = branch_name or "main"
branch_file = GroundingService.update_branch_file(
- db, project_id, user_id, effective_branch, content,
- append=append, repo_path=repo_path, filename=filename
+ db, project_id, user_id, effective_branch, content, append=append, repo_path=repo_path, filename=filename
)
action = "appended" if append else "written"
# Broadcast branch file update to WebSocket clients
- GroundingService._broadcast_branch_grounding_update(
- db, project_id, branch_file, action
- )
+ GroundingService._broadcast_branch_grounding_update(db, project_id, branch_file, action)
# Trigger summarization for branch file
_trigger_grounding_branch_summarize(db, project_id, user_id, effective_branch)
@@ -500,9 +503,7 @@ def _write_grounding_file(
if grounding_file:
# Update existing file
- grounding_file = GroundingService.update_file(
- db, project_id, filename, content, append=append
- )
+ grounding_file = GroundingService.update_file(db, project_id, filename, content, append=append)
action = "appended" if append else "written"
else:
# Create new file (only for valid extensions)
@@ -511,9 +512,7 @@ def _write_grounding_file(
"error": f"Invalid file extension for: {filename}",
"hint": "Allowed: .md, .txt, .json, .yaml, .yml",
}
- grounding_file = GroundingService.create_file(
- db, project_id, filename, content, user_id
- )
+ grounding_file = GroundingService.create_file(db, project_id, filename, content, user_id)
action = "created"
# Broadcast grounding file update to WebSocket clients
@@ -551,12 +550,11 @@ def _write_conversation_comment(
from sqlalchemy.orm.attributes import flag_modified
- from app.services.thread_service import ThreadService
- from app.services.project_share_service import ProjectShareService
- from app.services.mention_utils import extract_user_mentions
- from app.models.thread import Thread, ContextType
+ from app.models.thread import Thread
from app.models.user import User
- from app.models.project import Project
+ from app.services.mention_utils import extract_user_mentions
+ from app.services.project_share_service import ProjectShareService
+ from app.services.thread_service import ThreadService
logger = logging.getLogger(__name__)
@@ -677,8 +675,8 @@ def _get_or_create_feature_thread(
user_id: UUID,
):
"""Get existing thread or create new one for a feature."""
- from app.models.thread import Thread, ContextType
from app.models.feature import Feature
+ from app.models.thread import ContextType, Thread
# Check if thread already exists
thread = (
@@ -761,9 +759,13 @@ def run_async_safely(coro):
for ref in image_refs:
# Find staged image
- submission = db.query(MCPImageSubmission).filter(
- MCPImageSubmission.submission_id == ref,
- ).first()
+ submission = (
+ db.query(MCPImageSubmission)
+ .filter(
+ MCPImageSubmission.submission_id == ref,
+ )
+ .first()
+ )
if not submission:
return {
@@ -786,12 +788,14 @@ def run_async_safely(coro):
# Upload to S3 using async service
try:
+
async def upload_to_s3():
# Create a fresh async engine for this event loop to avoid
# "Future attached to a different loop" errors when running
# in a thread pool with a new event loop via asyncio.run()
- from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
+ from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
+
from app.config import settings
db_url = str(settings.database_url)
@@ -826,10 +830,7 @@ async def upload_to_s3():
processed_images.append(result.metadata.to_dict())
submissions_to_delete.append(submission)
- logger.info(
- f"Uploaded staged image to S3: submission_id={ref}, "
- f"s3_key={result.metadata.s3_key}"
- )
+ logger.info(f"Uploaded staged image to S3: submission_id={ref}, s3_key={result.metadata.s3_key}")
except Exception as e:
logger.error(f"Failed to upload staged image {ref}: {e}", exc_info=True)
diff --git a/backend/app/mcp/utils/markdown_parser.py b/backend/app/mcp/utils/markdown_parser.py
index 6aea5d9..190ecdb 100644
--- a/backend/app/mcp/utils/markdown_parser.py
+++ b/backend/app/mcp/utils/markdown_parser.py
@@ -1,7 +1,7 @@
"""Markdown parsing utilities for TOC extraction and section extraction."""
import re
-from typing import List, Dict, TypedDict
+from typing import List, TypedDict
class TocEntry(TypedDict):
@@ -27,10 +27,10 @@ def _heading_to_id(heading_text: str) -> str:
text = heading_text.replace("'", "")
# Replace special characters and spaces with hyphens
- text = re.sub(r'[^a-zA-Z0-9\s-]', '', text)
+ text = re.sub(r"[^a-zA-Z0-9\s-]", "", text)
# Convert to lowercase and replace spaces/multiple hyphens with single hyphen
- text = re.sub(r'[\s-]+', '-', text.strip().lower())
+ text = re.sub(r"[\s-]+", "-", text.strip().lower())
# Prefix with "sec-"
return f"sec-{text}"
@@ -59,7 +59,7 @@ def extract_toc(markdown: str) -> List[TocEntry]:
return []
# Regex to match markdown headings: ^(#{1,6})\s+(.+)$
- heading_pattern = re.compile(r'^(#{1,6})\s+(.+)$', re.MULTILINE)
+ heading_pattern = re.compile(r"^(#{1,6})\s+(.+)$", re.MULTILINE)
matches = heading_pattern.findall(markdown)
if not matches:
@@ -138,10 +138,10 @@ def extract_section(markdown: str, section_id: str) -> str:
# We extract from the target heading until:
# - The next heading at any level (for "direct content only" semantics)
# - OR end of document
- lines = markdown.split('\n')
+ lines = markdown.split("\n")
section_lines: List[str] = []
in_section = False
- heading_marker = '#' * target_entry["level"]
+ heading_marker = "#" * target_entry["level"]
heading_line = f"{heading_marker} {target_entry['title']}"
for line in lines:
@@ -154,7 +154,7 @@ def extract_section(markdown: str, section_id: str) -> str:
# If in section, collect lines until ANY next heading
if in_section:
# Check if this line is ANY heading
- if line.strip().startswith('#') and re.match(r'^#{1,6}\s+.+$', line.strip()):
+ if line.strip().startswith("#") and re.match(r"^#{1,6}\s+.+$", line.strip()):
# This is a heading - stop collecting
break
@@ -163,4 +163,4 @@ def extract_section(markdown: str, section_id: str) -> str:
if not section_lines:
raise ValueError(f"Section '{section_id}' not found in markdown")
- return '\n'.join(section_lines)
+ return "\n".join(section_lines)
diff --git a/backend/app/mcp/utils/project_resolver.py b/backend/app/mcp/utils/project_resolver.py
index 938a565..6dc459b 100644
--- a/backend/app/mcp/utils/project_resolver.py
+++ b/backend/app/mcp/utils/project_resolver.py
@@ -2,6 +2,7 @@
from typing import Optional
from uuid import UUID
+
from sqlalchemy.orm import Session
from app.models import Project
diff --git a/backend/app/mcp/vfs/__init__.py b/backend/app/mcp/vfs/__init__.py
index 1033f75..60cee13 100644
--- a/backend/app/mcp/vfs/__init__.py
+++ b/backend/app/mcp/vfs/__init__.py
@@ -35,21 +35,21 @@
"""
from app.mcp.vfs.errors import (
- VFSError,
- PathNotFoundError,
+ InvalidPathError,
NotADirectoryError,
NotAFileError,
+ PathNotFoundError,
PermissionDeniedError,
- InvalidPathError,
+ VFSError,
)
from app.mcp.vfs.path_resolver import (
NodeType,
ResolvedPath,
- resolve_path,
- slugify,
+ build_path,
feature_dir_name,
module_dir_name,
- build_path,
+ resolve_path,
+ slugify,
)
__all__ = [
diff --git a/backend/app/mcp/vfs/content.py b/backend/app/mcp/vfs/content.py
index 19950a9..b2306ff 100644
--- a/backend/app/mcp/vfs/content.py
+++ b/backend/app/mcp/vfs/content.py
@@ -7,23 +7,23 @@
from sqlalchemy.orm import Session
-from app.models.brainstorming_phase import BrainstormingPhase
-from app.models.module import Module, ModuleType
-from app.models.feature import Feature, FeatureType, FeatureStatus, FeatureProvenance
-from app.models.implementation import Implementation
-from app.models.project import Project
-from app.models.org_membership import OrgMembership
-from app.models.spec_version import SpecVersion, SpecType
-from app.models.user import User
+from app.mcp.utils.markdown_parser import extract_section, extract_toc
+from app.mcp.vfs.errors import PathNotFoundError
from app.mcp.vfs.path_resolver import (
NodeType,
ResolvedPath,
- slugify,
feature_dir_name,
module_dir_name,
+ slugify,
)
-from app.mcp.vfs.errors import PathNotFoundError
-from app.mcp.utils.markdown_parser import extract_toc, extract_section
+from app.models.brainstorming_phase import BrainstormingPhase
+from app.models.feature import Feature, FeatureProvenance, FeatureStatus, FeatureType
+from app.models.implementation import Implementation
+from app.models.module import Module, ModuleType
+from app.models.org_membership import OrgMembership
+from app.models.project import Project
+from app.models.spec_version import SpecType, SpecVersion
+from app.models.user import User
from app.services.team_role_service import TeamRoleService
@@ -111,7 +111,7 @@ def _resolve_image_references(markdown: str) -> str:
from app.services.image_service import ImageService
# Pattern matches IMAGE_REF:uuid-format
- pattern = r'IMAGE_REF:([a-f0-9-]+)'
+ pattern = r"IMAGE_REF:([a-f0-9-]+)"
def replacer(match):
image_id = match.group(1)
@@ -331,9 +331,7 @@ def replacer(match):
}
-def _wrap_prompt_plan_with_instructions(
- content: str, feature: Feature, resolved: ResolvedPath
-) -> str:
+def _wrap_prompt_plan_with_instructions(content: str, feature: Feature, resolved: ResolvedPath) -> str:
"""Wrap prompt plan with preamble/postscript instructions for coding agents."""
feature_dir = resolved.path.rsplit("/", 1)[0] + "/"
@@ -466,13 +464,9 @@ def get_file_content(
raise ValueError(f"Cannot get file content for node type: {resolved.node_type}")
-def _get_full_document_content(
- db: Session, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _get_full_document_content(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
"""Get content for full.md files (spec or prompt plan)."""
- phase = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.id == resolved.phase_id
- ).first()
+ phase = db.query(BrainstormingPhase).filter(BrainstormingPhase.id == resolved.phase_id).first()
if not phase:
raise PathNotFoundError(resolved.path)
@@ -483,9 +477,7 @@ def _get_full_document_content(
# Resolve any IMAGE_REF:xxx references to signed URLs
content = _resolve_image_references(content_markdown)
# Append any phase description images that weren't embedded by the LLM
- content = _append_description_images(
- content, phase.description_image_attachments, "phase description"
- )
+ content = _append_description_images(content, phase.description_image_attachments, "phase description")
elif resolved.document_type == "spec":
content = "# No specification available\n\nThis phase does not have a specification yet."
else:
@@ -502,15 +494,11 @@ def _get_full_document_content(
}
-def _get_summary_document_content(
- db: Session, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _get_summary_document_content(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
"""Get content for summary.md files (spec only)."""
from app.services.brainstorming_phase_service import _build_spec_summary_from_json
- phase = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.id == resolved.phase_id
- ).first()
+ phase = db.query(BrainstormingPhase).filter(BrainstormingPhase.id == resolved.phase_id).first()
if not phase:
raise PathNotFoundError(resolved.path)
@@ -523,6 +511,7 @@ def _get_summary_document_content(
# Create a simple object with content_json attribute to use existing function
class ContentHolder:
pass
+
holder = ContentHolder()
holder.content_json = content_json
content = _build_spec_summary_from_json(holder)
@@ -543,9 +532,7 @@ class ContentHolder:
def _get_section_content(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
"""Get content for section files (by-section/*.md)."""
- phase = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.id == resolved.phase_id
- ).first()
+ phase = db.query(BrainstormingPhase).filter(BrainstormingPhase.id == resolved.phase_id).first()
if not phase:
raise PathNotFoundError(resolved.path)
@@ -601,17 +588,13 @@ def _get_feature_file_content(db: Session, resolved: ResolvedPath) -> Dict[str,
# Resolve any IMAGE_REF:xxx references to signed URLs
content = _resolve_image_references(content)
# Append any feature description images that weren't embedded
- content = _append_description_images(
- content, feature.description_image_attachments, "feature"
- )
+ content = _append_description_images(content, feature.description_image_attachments, "feature")
elif resolved.file_name == "prompt_plan.md":
if feature.prompt_plan_text:
# Resolve image references before wrapping with instructions
resolved_text = _resolve_image_references(feature.prompt_plan_text)
# Append any feature description images that weren't embedded
- resolved_text = _append_description_images(
- resolved_text, feature.description_image_attachments, "feature"
- )
+ resolved_text = _append_description_images(resolved_text, feature.description_image_attachments, "feature")
content = _wrap_prompt_plan_with_instructions(resolved_text, feature, resolved)
else:
content = "# No prompt plan\n\nThis feature does not have a prompt plan."
@@ -633,9 +616,7 @@ def _get_feature_file_content(db: Session, resolved: ResolvedPath) -> Dict[str,
}
-def list_directory(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def list_directory(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""
List contents of a virtual directory.
@@ -691,9 +672,7 @@ def list_directory(
raise ValueError(f"Cannot list directory for node type: {resolved.node_type}")
-def _list_root(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _list_root(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""List root directory (top-level directories)."""
entries = [
{"name": "phases/", "type": "d", "description": "System-generated and user-defined features"},
@@ -717,16 +696,10 @@ def _list_root(
}
-def _list_phases_dir(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _list_phases_dir(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""List /phases/ directory (system-generated and user-defined)."""
# Count system-generated phases
- phase_count = (
- db.query(BrainstormingPhase)
- .filter(BrainstormingPhase.project_id == project_id)
- .count()
- )
+ phase_count = db.query(BrainstormingPhase).filter(BrainstormingPhase.project_id == project_id).count()
# Count user-defined features
user_feature_count = (
@@ -739,10 +712,7 @@ def _list_phases_dir(
Feature.feature_type == FeatureType.IMPLEMENTATION,
Feature.status == FeatureStatus.ACTIVE,
)
- .filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
.count()
)
@@ -777,9 +747,7 @@ def _list_phases_dir(
}
-def _list_system_generated_dir(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _list_system_generated_dir(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""List /phases/system-generated/ directory (all phases)."""
phases = (
db.query(BrainstormingPhase)
@@ -798,14 +766,16 @@ def _list_system_generated_dir(
progress = PhaseProgressService.get_phase_progress(db, phase.id)
- entries.append({
- "name": f"{phase_slug}/",
- "type": "d",
- "title": phase.title,
- "total_features": progress.total_features,
- "completed_features": progress.completed_features,
- "progress": progress.progress_percent,
- })
+ entries.append(
+ {
+ "name": f"{phase_slug}/",
+ "type": "d",
+ "title": phase.title,
+ "total_features": progress.total_features,
+ "completed_features": progress.completed_features,
+ "progress": progress.progress_percent,
+ }
+ )
text_lines.append(f"drwxr-xr-x {phase_slug}/")
return {
@@ -818,9 +788,7 @@ def _list_system_generated_dir(
}
-def _list_user_defined_dir(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _list_user_defined_dir(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""List /phases/user-defined/ directory."""
# Count user-defined features
user_features = (
@@ -833,10 +801,7 @@ def _list_user_defined_dir(
Feature.feature_type == FeatureType.IMPLEMENTATION,
Feature.status == FeatureStatus.ACTIVE,
)
- .filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
.all()
)
@@ -872,9 +837,7 @@ def _list_user_defined_dir(
}
-def _list_user_defined_features_dir(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _list_user_defined_features_dir(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""List /phases/user-defined/features/ directory (modules with user-defined features)."""
# Get all modules in project
modules = (
@@ -904,10 +867,7 @@ def _list_user_defined_features_dir(
Feature.feature_type == FeatureType.IMPLEMENTATION,
Feature.status == FeatureStatus.ACTIVE,
)
- .filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
.all()
)
@@ -915,22 +875,22 @@ def _list_user_defined_features_dir(
continue # Skip modules with no user-defined features
feature_count = len(features)
- completed = sum(
- 1 for f in features if f.completion_status and f.completion_status.value == "completed"
- )
+ completed = sum(1 for f in features if f.completion_status and f.completion_status.value == "completed")
progress = (completed / feature_count * 100) if feature_count > 0 else 0
total_features += feature_count
completed_features += completed
- entries.append({
- "name": f"{module_slug}/",
- "type": "d",
- "title": module.title,
- "feature_count": feature_count,
- "completed": completed,
- "progress": round(progress, 1),
- })
+ entries.append(
+ {
+ "name": f"{module_slug}/",
+ "type": "d",
+ "title": module.title,
+ "feature_count": feature_count,
+ "completed": completed,
+ "progress": round(progress, 1),
+ }
+ )
text_lines.append(f"drwxr-xr-x {module_slug}/")
overall_progress = (completed_features / total_features * 100) if total_features > 0 else 0
@@ -959,10 +919,7 @@ def _list_user_defined_module(db: Session, resolved: ResolvedPath) -> Dict[str,
Feature.feature_type == FeatureType.IMPLEMENTATION,
Feature.status == FeatureStatus.ACTIVE,
)
- .filter(
- (Feature.provenance == FeatureProvenance.USER)
- | (Feature.external_provider.isnot(None))
- )
+ .filter((Feature.provenance == FeatureProvenance.USER) | (Feature.external_provider.isnot(None)))
.order_by(Feature.created_at)
.all()
)
@@ -980,16 +937,18 @@ def _list_user_defined_module(db: Session, resolved: ResolvedPath) -> Dict[str,
if next_feature is None and status == "pending":
next_feature = feature.feature_key
- entries.append({
- "name": f"{feature_slug}/",
- "type": "d",
- "feature_key": feature.feature_key,
- "title": feature.title,
- "status": status,
- "priority": priority,
- "provenance": provenance,
- "external_provider": feature.external_provider,
- })
+ entries.append(
+ {
+ "name": f"{feature_slug}/",
+ "type": "d",
+ "feature_key": feature.feature_key,
+ "title": feature.title,
+ "status": status,
+ "priority": priority,
+ "provenance": provenance,
+ "external_provider": feature.external_provider,
+ }
+ )
text_lines.append(f"drwxr-xr-x {feature_slug}/")
completed = sum(1 for f in features if f.completion_status and f.completion_status.value == "completed")
@@ -1011,7 +970,7 @@ def _list_user_defined_module(db: Session, resolved: ResolvedPath) -> Dict[str,
def _list_user_defined_feature(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
"""List user-defined feature directory (implementations/, conversations/)."""
- from app.models.thread import Thread, ContextType
+ from app.models.thread import ContextType, Thread
from app.models.thread_item import ThreadItem
feature = db.query(Feature).filter(Feature.id == resolved.feature_id).first()
@@ -1019,11 +978,7 @@ def _list_user_defined_feature(db: Session, resolved: ResolvedPath) -> Dict[str,
# Count implementations
implementation_count = 0
if feature:
- implementation_count = (
- db.query(Implementation)
- .filter(Implementation.feature_id == feature.id)
- .count()
- )
+ implementation_count = db.query(Implementation).filter(Implementation.feature_id == feature.id).count()
# Check if feature has a conversation thread with items
has_conversation = False
@@ -1037,11 +992,7 @@ def _list_user_defined_feature(db: Session, resolved: ResolvedPath) -> Dict[str,
.first()
)
if thread:
- item_count = (
- db.query(ThreadItem)
- .filter(ThreadItem.thread_id == thread.id)
- .count()
- )
+ item_count = db.query(ThreadItem).filter(ThreadItem.thread_id == thread.id).count()
has_conversation = item_count > 0
entries = [
@@ -1187,14 +1138,16 @@ def _get_team_info_json(db: Session, project_id: UUID) -> Dict[str, Any]:
}
for a in assignments
]
- roles.append({
- "id": str(role_def.id),
- "role_key": role_def.role_key,
- "title": role_def.title,
- "description": role_def.description,
- "is_default": role_def.is_default,
- "members": members,
- })
+ roles.append(
+ {
+ "id": str(role_def.id),
+ "role_key": role_def.role_key,
+ "title": role_def.title,
+ "description": role_def.description,
+ "is_default": role_def.is_default,
+ "members": members,
+ }
+ )
content = json.dumps({"roles": roles}, indent=2)
@@ -1220,9 +1173,7 @@ def _list_phase(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
]
# Get phase metadata
- phase = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.id == resolved.phase_id
- ).first()
+ phase = db.query(BrainstormingPhase).filter(BrainstormingPhase.id == resolved.phase_id).first()
from app.services.phase_progress_service import PhaseProgressService
@@ -1273,9 +1224,7 @@ def _list_document_dir(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
def _list_sections(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
"""List by-section/ directory with all available sections."""
- phase = db.query(BrainstormingPhase).filter(
- BrainstormingPhase.id == resolved.phase_id
- ).first()
+ phase = db.query(BrainstormingPhase).filter(BrainstormingPhase.id == resolved.phase_id).first()
sections = []
@@ -1290,29 +1239,35 @@ def _list_sections(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
# Try structured content first
if content_json and isinstance(content_json, dict) and "sections" in content_json:
for section in content_json["sections"]:
- sections.append({
- "id": section.get("id", "unknown"),
- "title": section.get("title", "Untitled"),
- })
+ sections.append(
+ {
+ "id": section.get("id", "unknown"),
+ "title": section.get("title", "Untitled"),
+ }
+ )
elif full_content:
# Fall back to markdown parsing
toc = extract_toc(full_content)
for entry in toc:
- sections.append({
- "id": entry["id"],
- "title": entry["title"],
- })
+ sections.append(
+ {
+ "id": entry["id"],
+ "title": entry["title"],
+ }
+ )
entries = []
text_lines = []
for section in sections:
file_name = f"{section['id']}.md"
- entries.append({
- "name": file_name,
- "type": "f",
- "title": section["title"],
- })
+ entries.append(
+ {
+ "name": file_name,
+ "type": "f",
+ "title": section["title"],
+ }
+ )
text_lines.append(f"-rw-r--r-- {file_name}")
return {
@@ -1338,14 +1293,16 @@ def _list_features_dir(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
for mod in phase_progress.modules:
module_slug = module_dir_name(mod.module_key, mod.title)
- entries.append({
- "name": f"{module_slug}/",
- "type": "d",
- "title": mod.title,
- "feature_count": mod.total_features,
- "completed": mod.completed_features,
- "progress": mod.progress_percent,
- })
+ entries.append(
+ {
+ "name": f"{module_slug}/",
+ "type": "d",
+ "title": mod.title,
+ "feature_count": mod.total_features,
+ "completed": mod.completed_features,
+ "progress": mod.progress_percent,
+ }
+ )
text_lines.append(f"drwxr-xr-x {module_slug}/")
return {
@@ -1378,9 +1335,7 @@ def _list_module(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
from app.services.phase_progress_service import PhaseProgressService
- total, completed, pending, in_prog, pct, next_feature = (
- PhaseProgressService.compute_feature_stats(features)
- )
+ total, completed, pending, in_prog, pct, next_feature = PhaseProgressService.compute_feature_stats(features)
entries = []
text_lines = []
@@ -1391,16 +1346,18 @@ def _list_module(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
priority = feature.priority.value if feature.priority else "important"
provenance = feature.provenance.value if feature.provenance else "system"
- entries.append({
- "name": f"{feature_slug}/",
- "type": "d",
- "feature_key": feature.feature_key,
- "title": feature.title,
- "status": f_status,
- "priority": priority,
- "provenance": provenance,
- "external_provider": feature.external_provider,
- })
+ entries.append(
+ {
+ "name": f"{feature_slug}/",
+ "type": "d",
+ "feature_key": feature.feature_key,
+ "title": feature.title,
+ "status": f_status,
+ "priority": priority,
+ "provenance": provenance,
+ "external_provider": feature.external_provider,
+ }
+ )
text_lines.append(f"drwxr-xr-x {feature_slug}/")
return {
@@ -1419,7 +1376,7 @@ def _list_module(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
def _list_feature(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
"""List feature directory (implementations/, conversations/)."""
- from app.models.thread import Thread, ContextType
+ from app.models.thread import ContextType, Thread
from app.models.thread_item import ThreadItem
feature = db.query(Feature).filter(Feature.id == resolved.feature_id).first()
@@ -1427,11 +1384,7 @@ def _list_feature(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
# Count implementations
implementation_count = 0
if feature:
- implementation_count = (
- db.query(Implementation)
- .filter(Implementation.feature_id == feature.id)
- .count()
- )
+ implementation_count = db.query(Implementation).filter(Implementation.feature_id == feature.id).count()
# Check if feature has a conversation thread with items
has_conversation = False
@@ -1445,11 +1398,7 @@ def _list_feature(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
.first()
)
if thread:
- item_count = (
- db.query(ThreadItem)
- .filter(ThreadItem.thread_id == thread.id)
- .count()
- )
+ item_count = db.query(ThreadItem).filter(ThreadItem.thread_id == thread.id).count()
has_conversation = item_count > 0
entries = [
@@ -1484,9 +1433,7 @@ def _list_feature(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
}
-def _list_grounding_dir(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _list_grounding_dir(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""List /for-coding-agents/ directory."""
from app.services.grounding_service import GroundingService
@@ -1497,22 +1444,26 @@ def _list_grounding_dir(
for gf in files:
permissions = "-rw-r--r--" if gf.is_protected else "-rw-rw-rw-"
- entries.append({
- "name": gf.filename,
- "type": "f",
- "is_protected": gf.is_protected,
- "updated_at": gf.updated_at.isoformat() if gf.updated_at else None,
- "writable": True,
- })
+ entries.append(
+ {
+ "name": gf.filename,
+ "type": "f",
+ "is_protected": gf.is_protected,
+ "updated_at": gf.updated_at.isoformat() if gf.updated_at else None,
+ "writable": True,
+ }
+ )
text_lines.append(f"{permissions} {gf.filename}")
# Add static mfbt-usage-guide directory
- entries.append({
- "name": "mfbt-usage-guide/",
- "type": "d",
- "description": "MFBT usage guides for coding agents",
- "is_static": True,
- })
+ entries.append(
+ {
+ "name": "mfbt-usage-guide/",
+ "type": "d",
+ "description": "MFBT usage guides for coding agents",
+ "is_static": True,
+ }
+ )
text_lines.append("drwxr-xr-x mfbt-usage-guide/")
return {
@@ -1526,9 +1477,7 @@ def _list_grounding_dir(
}
-def _get_grounding_file_content(
- db: Session, project_id: UUID, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _get_grounding_file_content(db: Session, project_id: UUID, resolved: ResolvedPath) -> Dict[str, Any]:
"""Get content for grounding files (/for-coding-agents/*)."""
from app.services.grounding_service import GroundingService
@@ -1615,6 +1564,7 @@ def _get_extension_from_content_type(content_type: str) -> str:
def _transform_mentions_for_vfs(body: str) -> str:
"""Transform @[Name](uuid) to @Name (user_id: uuid) for coding agents."""
import re
+
# Match @[Name](id) where id can be any alphanumeric+hyphen string
pattern = re.compile(r"@\[([^\]]+)\]\(([a-zA-Z0-9-]+)\)")
return pattern.sub(r"@\1 (user_id: \2)", body)
@@ -1645,9 +1595,7 @@ def _get_thread_images(db: Session, thread_id) -> List[dict]:
return images
-def _list_conversations_dir(
- db: Session, resolved: ResolvedPath
-) -> Dict[str, Any]:
+def _list_conversations_dir(db: Session, resolved: ResolvedPath) -> Dict[str, Any]:
"""List {feature}/conversations/ directory.
Note: Images are now embedded directly in conversations.md as markdown
@@ -1673,11 +1621,7 @@ def _list_conversations_dir(
if resolved.thread_id:
thread = db.query(Thread).filter(Thread.id == resolved.thread_id).first()
if thread:
- item_count = (
- db.query(ThreadItem)
- .filter(ThreadItem.thread_id == resolved.thread_id)
- .count()
- )
+ item_count = db.query(ThreadItem).filter(ThreadItem.thread_id == resolved.thread_id).count()
return {
"path": resolved.path,
@@ -1707,9 +1651,10 @@ def _get_conversations_file_content(
The upload token is only generated when both project_id, user_id,
and resolved.feature_id are available.
"""
+ from sqlalchemy.orm import joinedload
+
from app.models.thread import Thread
from app.models.thread_item import ThreadItem, ThreadItemType
- from sqlalchemy.orm import joinedload
if not resolved.thread_id:
return {
@@ -1718,11 +1663,7 @@ def _get_conversations_file_content(
"content": "# No Conversation\n\nNo discussion has occurred for this feature yet.",
}
- thread = (
- db.query(Thread)
- .filter(Thread.id == resolved.thread_id)
- .first()
- )
+ thread = db.query(Thread).filter(Thread.id == resolved.thread_id).first()
if not thread:
return {
@@ -1782,9 +1723,7 @@ def _get_conversations_file_content(
width = img.get("width", 0)
height = img.get("height", 0)
if width and height:
- markdown_lines.append(
- f"\n\n"
- )
+ markdown_lines.append(f"\n\n")
else:
markdown_lines.append(f"\n\n")
@@ -1823,8 +1762,8 @@ def _get_conversations_file_content(
# Generate upload token if we have the required context
if project_id and user_id and resolved.feature_id:
- from app.services.image_service import ImageService
from app.config import settings
+ from app.services.image_service import ImageService
upload_token = ImageService.generate_upload_token(
project_id=project_id,
@@ -1834,10 +1773,10 @@ def _get_conversations_file_content(
base_url = settings.base_url.rstrip("/")
markdown_lines.append("1. (Optional) Upload images via API:\n")
- markdown_lines.append(f" curl -X POST \"{base_url}/api/v1/mcp/images/upload\" \\\n")
- markdown_lines.append(f" -H \"Authorization: Bearer {upload_token}\" \\\n")
- markdown_lines.append(" -F \"file=@/path/to/screenshot.png\"\n\n")
- markdown_lines.append(" Response: {\"image_id\": \"abc123\", \"expires_in_hours\": 1}\n\n")
+ markdown_lines.append(f' curl -X POST "{base_url}/api/v1/mcp/images/upload" \\\n')
+ markdown_lines.append(f' -H "Authorization: Bearer {upload_token}" \\\n')
+ markdown_lines.append(' -F "file=@/path/to/screenshot.png"\n\n')
+ markdown_lines.append(' Response: {"image_id": "abc123", "expires_in_hours": 1}\n\n')
markdown_lines.append(" Supported formats: png, jpg, jpeg, gif, webp (max 10MB)\n")
markdown_lines.append(" Token valid for 1 hour.\n\n")
else:
@@ -1845,7 +1784,9 @@ def _get_conversations_file_content(
markdown_lines.append("2. Post comment (with optional image references):\n")
markdown_lines.append("```json\n")
- markdown_lines.append('{"action": "add_comment", "body_markdown": "Your text. Use @[Name](user_id) for mentions.", "images": ["Loading...
Access denied
+
Platform usage analytics and plan recommendations
{error}
+
View LLM call details for debugging, troubleshooting, and analysis
+
{selectedUserId || selectedProjectId
? "Try adjusting your filters to see more results."
: "LLM call logs will appear here after running generation tasks like brainstorming, spec generation, or feature extraction."}
diff --git a/frontend/app/dashboard/page.tsx b/frontend/app/dashboard/page.tsx
index d1374e8..6936159 100644
--- a/frontend/app/dashboard/page.tsx
+++ b/frontend/app/dashboard/page.tsx
@@ -46,8 +46,7 @@ export default function DashboardPage() {
prevStats.llm_usage_this_month.total_prompt_tokens + usageLog.prompt_tokens,
total_completion_tokens:
prevStats.llm_usage_this_month.total_completion_tokens + usageLog.completion_tokens,
- total_tokens:
- prevStats.llm_usage_this_month.total_tokens + newTokens,
+ total_tokens: prevStats.llm_usage_this_month.total_tokens + newTokens,
total_cost_usd:
usageLog.cost_usd !== null
? (prevStats.llm_usage_this_month.total_cost_usd || 0) + usageLog.cost_usd
@@ -85,7 +84,7 @@ export default function DashboardPage() {
try {
const data = await apiClient.get {error} {error}
+
Organization overview and LLM usage metrics
Loading... Access denied
+
Manage email templates sent by the platform
{error} {error} Select a template from the sidebar to edit
- Checking authentication...
- Checking authentication...
- Resolving link...
- Resolving link... {error} {error}
+
Redirecting to {conversationTitle || "conversation"}...
in {projectName} in {projectName} Redirecting... Validating invitation...
+
{error || "This invitation link is invalid or has expired."}
Dashboard
- Dashboard
- Email Templates
- Unable to Open Link
- Invalid Invitation
- Invalid Invitation
+