Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
4 changes: 2 additions & 2 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@

## Database Migrations

-
-

## Env Config

-
-

## Relevant Docs

Expand Down
39 changes: 39 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
name: Lint

on:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
backend-lint:
name: Backend (Ruff)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/ruff-action@v3
with:
args: check
src: backend/ code-explorer/
- uses: astral-sh/ruff-action@v3
with:
args: format --check
src: backend/ code-explorer/

frontend-lint:
name: Frontend (ESLint + Prettier)
runs-on: ubuntu-latest
defaults:
run:
working-directory: frontend
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 22
cache: npm
cache-dependency-path: frontend/package-lock.json
- run: npm ci
- run: npx eslint .
- run: npx prettier --check .
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ env/
# UV
.python-version

# Linting
.ruff_cache/

# Testing
.pytest_cache/
.coverage
Expand Down
44 changes: 44 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
repos:
# Backend: Ruff lint + format
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.7
hooks:
- id: ruff
name: "backend: ruff lint"
args: [--fix, --exit-non-zero-on-fix]
files: ^(backend|code-explorer)/
types_or: [python, pyi]
- id: ruff-format
name: "backend: ruff format"
files: ^(backend|code-explorer)/
types_or: [python, pyi]

# Frontend: ESLint + Prettier
- repo: local
hooks:
- id: frontend-eslint
name: "frontend: eslint"
entry: bash -c 'args=(); for f in "$@"; do args+=("${f#frontend/}"); done; cd frontend && npx eslint --fix "${args[@]}"' --
language: system
files: ^frontend/.*\.(ts|tsx|js|jsx)$
types: [file]
- id: frontend-prettier
name: "frontend: prettier"
entry: bash -c 'args=(); for f in "$@"; do args+=("${f#frontend/}"); done; cd frontend && npx prettier --write "${args[@]}"' --
language: system
files: ^frontend/.*\.(ts|tsx|js|jsx|css|json|md)$
types: [file]

# General checks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
- id: check-yaml
- id: check-json
exclude: ^frontend/package-lock\.json$
- id: check-merge-conflict
- id: check-added-large-files
args: [--maxkb=500]
19 changes: 18 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Need to give your existing projects the vibe coding boost? All you need to do is


## 🔍 Grounded and relevant
mfbt includes Code Exploration and Web Search agents to ensure that the implementation plans that are generated are not just based on your conversations, but are also based on real code exploration and web searches.
mfbt includes Code Exploration and Web Search agents to ensure that the implementation plans that are generated are not just based on your conversations, but are also based on real code exploration and web searches.

<img src="assets/screenshots/ocr_conversation.png" style="width: 600px">

Expand Down Expand Up @@ -137,6 +137,23 @@ This builds all images from local source and enables file watching:

Dependency changes (`pyproject.toml`, `uv.lock`, `package.json`) trigger a full container rebuild.

### 🧹 Code Quality

Pre-commit hooks enforce formatting and linting automatically on every commit.

```bash
# One-time setup
bash scripts/setup-hooks.sh

# Run all hooks manually
pre-commit run --all-files
```

**Hooks included:**
- **Backend**: [Ruff](https://docs.astral.sh/ruff/) lint + format
- **Frontend**: [ESLint](https://eslint.org/) + [Prettier](https://prettier.io/) with Tailwind CSS plugin
- **General**: trailing whitespace, EOF fixer, YAML/JSON validation, merge conflict detection

### 🧪 Running Tests

```bash
Expand Down
Binary file modified backend/README.md
Binary file not shown.
2 changes: 1 addition & 1 deletion backend/alembic/README
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Generic single-database configuration.
Generic single-database configuration.
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,17 @@
Create Date: 2025-12-05 14:21:20.764109

"""

from typing import Sequence, Union

from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql

from alembic import op

# revision identifiers, used by Alembic.
revision: str = '00ebcf349edc'
down_revision: Union[str, Sequence[str], None] = '6baa75dcb961'
revision: str = "00ebcf349edc"
down_revision: Union[str, Sequence[str], None] = "6baa75dcb961"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None

Expand All @@ -23,64 +24,37 @@ def upgrade() -> None:
"""Add feature completion tracking fields."""
# Create the feature_completion_status enum for PostgreSQL
bind = op.get_bind()
if bind.dialect.name == 'postgresql':
if bind.dialect.name == "postgresql":
feature_completion_status = postgresql.ENUM(
'pending', 'in_progress', 'completed',
name='feature_completion_status',
create_type=False
"pending", "in_progress", "completed", name="feature_completion_status", create_type=False
)
feature_completion_status.create(bind, checkfirst=True)

# Add completion tracking columns to features table
op.add_column(
'features',
sa.Column(
'completion_status',
sa.String(20),
server_default='pending',
nullable=False
)
)
op.add_column(
'features',
sa.Column('completion_summary', sa.Text(), nullable=True)
)
op.add_column(
'features',
sa.Column('completed_at', sa.DateTime(timezone=True), nullable=True)
)
op.add_column(
'features',
sa.Column(
'completed_by_id',
sa.UUID(),
nullable=True
)
)
op.add_column("features", sa.Column("completion_status", sa.String(20), server_default="pending", nullable=False))
op.add_column("features", sa.Column("completion_summary", sa.Text(), nullable=True))
op.add_column("features", sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True))
op.add_column("features", sa.Column("completed_by_id", sa.UUID(), nullable=True))

# Add foreign key constraint (PostgreSQL only due to SQLite limitations)
if bind.dialect.name == 'postgresql':
op.create_foreign_key(
'fk_features_completed_by_id',
'features', 'users',
['completed_by_id'], ['id']
)
if bind.dialect.name == "postgresql":
op.create_foreign_key("fk_features_completed_by_id", "features", "users", ["completed_by_id"], ["id"])


def downgrade() -> None:
"""Remove feature completion tracking fields."""
bind = op.get_bind()

# Drop foreign key constraint (PostgreSQL only)
if bind.dialect.name == 'postgresql':
op.drop_constraint('fk_features_completed_by_id', 'features', type_='foreignkey')
if bind.dialect.name == "postgresql":
op.drop_constraint("fk_features_completed_by_id", "features", type_="foreignkey")

# Drop columns
op.drop_column('features', 'completed_by_id')
op.drop_column('features', 'completed_at')
op.drop_column('features', 'completion_summary')
op.drop_column('features', 'completion_status')
op.drop_column("features", "completed_by_id")
op.drop_column("features", "completed_at")
op.drop_column("features", "completion_summary")
op.drop_column("features", "completion_status")

# Drop the enum type (PostgreSQL only)
if bind.dialect.name == 'postgresql':
if bind.dialect.name == "postgresql":
op.execute("DROP TYPE IF EXISTS feature_completion_status")
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,27 @@
Create Date: 2025-11-20 13:03:09.128947

"""

from typing import Sequence, Union

from alembic import op
import sqlalchemy as sa

from alembic import op

# revision identifiers, used by Alembic.
revision: str = '021b37581165'
down_revision: Union[str, Sequence[str], None] = 'ed7322775e46'
revision: str = "021b37581165"
down_revision: Union[str, Sequence[str], None] = "ed7322775e46"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None


def upgrade() -> None:
"""Upgrade schema."""
op.add_column('implementation_phases', sa.Column('completion_summary', sa.Text(), nullable=True))
op.add_column('implementation_phases', sa.Column('triggered_by', sa.String(length=50), nullable=True))
op.add_column("implementation_phases", sa.Column("completion_summary", sa.Text(), nullable=True))
op.add_column("implementation_phases", sa.Column("triggered_by", sa.String(length=50), nullable=True))


def downgrade() -> None:
"""Downgrade schema."""
op.drop_column('implementation_phases', 'triggered_by')
op.drop_column('implementation_phases', 'completion_summary')
op.drop_column("implementation_phases", "triggered_by")
op.drop_column("implementation_phases", "completion_summary")
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,17 @@
instead of using a batched scheduler with trigger records.

"""

from typing import Sequence, Union

from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql

from alembic import op

# revision identifiers, used by Alembic.
revision: str = '0c4f46a254f8'
down_revision: Union[str, Sequence[str], None] = 'u0v1w2x3y4z5'
revision: str = "0c4f46a254f8"
down_revision: Union[str, Sequence[str], None] = "u0v1w2x3y4z5"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,16 @@
Create Date: 2025-11-20 10:37:38.546482

"""

from typing import Sequence, Union

from alembic import op
import sqlalchemy as sa

from alembic import op

# revision identifiers, used by Alembic.
revision: str = '0c5625ec8ba1'
down_revision: Union[str, Sequence[str], None] = 'a60117da6409'
revision: str = "0c5625ec8ba1"
down_revision: Union[str, Sequence[str], None] = "a60117da6409"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None

Expand All @@ -22,28 +23,32 @@ def upgrade() -> None:
"""Upgrade schema."""
# Create project_memberships table
op.create_table(
'project_memberships',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('project_id', sa.UUID(), nullable=False),
sa.Column('user_id', sa.UUID(), nullable=False),
sa.Column('role', sa.String(length=20), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], name=op.f('fk_project_memberships_project_id_projects'), ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_project_memberships_user_id_users'), ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_project_memberships')),
sa.UniqueConstraint('project_id', 'user_id', name=op.f('uq_project_memberships_project_id_user_id'))
"project_memberships",
sa.Column("id", sa.UUID(), nullable=False),
sa.Column("project_id", sa.UUID(), nullable=False),
sa.Column("user_id", sa.UUID(), nullable=False),
sa.Column("role", sa.String(length=20), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.ForeignKeyConstraint(
["project_id"], ["projects.id"], name=op.f("fk_project_memberships_project_id_projects"), ondelete="CASCADE"
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_project_memberships_user_id_users"), ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_project_memberships")),
sa.UniqueConstraint("project_id", "user_id", name=op.f("uq_project_memberships_project_id_user_id")),
)

# Create indexes for foreign keys
op.create_index(op.f('ix_project_memberships_project_id'), 'project_memberships', ['project_id'], unique=False)
op.create_index(op.f('ix_project_memberships_user_id'), 'project_memberships', ['user_id'], unique=False)
op.create_index(op.f("ix_project_memberships_project_id"), "project_memberships", ["project_id"], unique=False)
op.create_index(op.f("ix_project_memberships_user_id"), "project_memberships", ["user_id"], unique=False)


def downgrade() -> None:
"""Downgrade schema."""
# Drop indexes
op.drop_index(op.f('ix_project_memberships_user_id'), table_name='project_memberships')
op.drop_index(op.f('ix_project_memberships_project_id'), table_name='project_memberships')
op.drop_index(op.f("ix_project_memberships_user_id"), table_name="project_memberships")
op.drop_index(op.f("ix_project_memberships_project_id"), table_name="project_memberships")

# Drop table
op.drop_table('project_memberships')
op.drop_table("project_memberships")
Loading
Loading