diff --git a/community_mcp_servers/.env.example b/community_mcp_servers/.env.example
new file mode 100644
index 00000000..ea8e1fd8
--- /dev/null
+++ b/community_mcp_servers/.env.example
@@ -0,0 +1,122 @@
+# LLM Provider Configuration (Required)
+
+# Choose your LLM provider: gemini, groq, openai, or lightning
+LLM_PROVIDER=gemini
+
+# API Keys (provide based on your LLM_PROVIDER choice)
+GOOGLE_API_KEY=your_google_api_key_here
+# GROQ_API_KEY=your_groq_api_key_here
+# OPENAI_API_KEY=your_openai_api_key_here
+# LIGHTNING_API_KEY=your_lightning_api_key_here
+
+# Optional: Custom model name
+# MODEL=models/gemini-2.0-flash-exp
+
+# Optional: Lightning AI custom endpoint
+# LIGHTNING_BASE_URL=https://lightning.ai/api/v1
+
+# GitHub Agent Configuration
+
+# Required: GitHub Personal Access Token
+# Create at: https://github.com/settings/tokens
+GITHUB_PERSONAL_ACCESS_TOKEN=your_github_token_here
+
+# Optional: GitHub MCP Server Configuration
+# GITHUB_MCP_BEARER_TOKEN=your_bearer_token
+# GITHUB_MCP_SERVER_URL=https://api.githubcopilot.com/mcp/
+
+# Slack Agent Configuration
+# See: https://github.com/korotovsky/slack-mcp-server
+
+# Authentication: Choose ONE of these three methods:
+
+# Method 1: User OAuth Token (Recommended - full access)
+# Create at: https://api.slack.com/apps
+SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
+
+# Method 2: Bot Token (Limited access - invited channels only, no search)
+# SLACK_MCP_XOXB_TOKEN=xoxb-your-bot-token-here
+
+# Method 3: Browser Tokens (Stealth mode - no permissions needed)
+# Extract from browser DevTools while logged into Slack
+# SLACK_MCP_XOXC_TOKEN=xoxc-your-browser-token-here
+# SLACK_MCP_XOXD_TOKEN=xoxd-your-browser-cookie-here
+
+# Optional: Enable message posting
+# Options: empty/false (disabled), true (all channels), or comma-separated channel IDs
+# Use ! prefix to allow all EXCEPT specified channels: !C123,C456
+# SLACK_MCP_ADD_MESSAGE_TOOL=false
+
+# Optional: Auto-mark sent messages as read
+# SLACK_MCP_ADD_MESSAGE_MARK=true
+
+# Optional: Transport Configuration
+# SLACK_MCP_TRANSPORT=stdio
+# SLACK_MCP_SERVER_URL=http://127.0.0.1:13080/sse
+# SLACK_MCP_API_KEY=your-api-key-here
+# SLACK_MCP_HOST=127.0.0.1
+# SLACK_MCP_PORT=13080
+
+# Optional: Enterprise Slack settings
+# SLACK_MCP_USER_AGENT=your-browser-user-agent
+# SLACK_MCP_CUSTOM_TLS=true
+# SLACK_MCP_PROXY=http://proxy.example.com:8080
+
+# Optional: Cache configuration
+# SLACK_MCP_USERS_CACHE=/path/to/users_cache.json
+# SLACK_MCP_CHANNELS_CACHE=/path/to/channels_cache_v2.json
+
+# Optional: Logging
+# SLACK_MCP_LOG_LEVEL=info
+
+# Jira Agent Configuration
+# Uses mcp-atlassian package via uvx (Python)
+# See: https://github.com/sooperset/mcp-atlassian
+# Configured for: https://mifosforge.jira.com
+
+# Required: Jira Instance URL
+JIRA_URL=https://mifosforge.jira.com
+
+# Authentication: Choose ONE method
+
+# Method 1: API Token (Recommended for Cloud)
+# Create at: https://id.atlassian.com/manage-profile/security/api-tokens
+JIRA_USERNAME=your.email@company.com
+JIRA_API_TOKEN=your_jira_api_token_here
+
+# Method 2: Personal Access Token (Server/Data Center)
+# Go to: Profile → Personal Access Tokens → Create token
+# JIRA_PERSONAL_TOKEN=your_personal_access_token_here
+
+# Optional: SSL verification (set to false for self-signed certs)
+# JIRA_SSL_VERIFY=true
+
+# Optional: Project filtering (limit to specific projects)
+# JIRA_PROJECTS_FILTER=MIFOS,FINERACT
+
+# Optional: Read-only mode (disable write operations)
+# READ_ONLY_MODE=true
+
+# Optional: Enable only specific tools
+# ENABLED_TOOLS=jira_search,jira_get_issue
+
+# Optional: Transport Configuration
+# Default: stdio (uses uvx mcp-atlassian)
+# JIRA_MCP_TRANSPORT=stdio
+
+# Optional: Use Docker instead of uvx
+# JIRA_MCP_USE_DOCKER=false
+# JIRA_MCP_DOCKER_IMAGE=ghcr.io/sooperset/mcp-atlassian:latest
+
+# Optional: Proxy settings
+# HTTP_PROXY=http://proxy.example.com:8080
+# HTTPS_PROXY=http://proxy.example.com:8080
+# NO_PROXY=localhost,127.0.0.1
+
+# Optional: Custom HTTP headers (for corporate environments)
+# Format: key=value,key2=value2
+# JIRA_CUSTOM_HEADERS=X-Custom-Header=value
+
+# Optional: Logging
+# MCP_VERBOSE=true
+# MCP_VERY_VERBOSE=false
diff --git a/community_mcp_servers/.gitignore b/community_mcp_servers/.gitignore
new file mode 100644
index 00000000..b46cdcbb
--- /dev/null
+++ b/community_mcp_servers/.gitignore
@@ -0,0 +1,51 @@
+*.env
+
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+.venv/
+venv/
+ENV/
+env/
+
+# uv
+uv.lock
+.python-version
+
+# IDEs
+.vscode/
+
+# Node
+node_modules/
+package-lock.json
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# Testing
+.pytest_cache/
+.coverage
+htmlcov/
+*.cover
+
+# Logs
+*.log
+
diff --git a/community_mcp_servers/README.md b/community_mcp_servers/README.md
new file mode 100644
index 00000000..f663e78b
--- /dev/null
+++ b/community_mcp_servers/README.md
@@ -0,0 +1,195 @@
+# Community AI MCP Agent
+
+A unified Python CLI for interacting with GitHub, Slack, and Jira using the Model Context Protocol (MCP) and LangChain.
+
+## Features
+
+- **GitHub Agent**: Repository management, issues, PRs, and code operations
+- **Slack Agent**: Channel management, messaging, and workspace interactions
+- **Jira Agent**: Issue tracking, project management, and workflow automation
+- **Multi-LLM Support**: Works with Gemini, Groq, OpenAI, and Lightning AI
+- **Extensible Architecture**: Built on LangChain and LangGraph for easy customization
+
+## Quick Start
+
+### 1. Installation
+
+```bash
+# Clone the repository
+cd community_chatbot/mcp_impl
+
+# Install Python dependencies (using uv or pip)
+uv sync
+# or
+pip install -r requirements.txt
+
+# Install Node.js dependencies for Slack agent (requires Node.js)
+cd agents
+npm install
+cd ..
+```
+
+### 2. Configuration
+
+Copy the example environment file and configure your credentials:
+
+```bash
+cp .env.example .env
+```
+
+**Minimum required configuration:**
+
+```env
+# Choose your LLM provider
+LLM_PROVIDER=gemini
+GOOGLE_API_KEY=your_key_here
+
+# Enable agents as needed
+GITHUB_PERSONAL_ACCESS_TOKEN=your_token_here
+SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
+JIRA_URL=https://your-company.atlassian.net
+JIRA_USERNAME=your.email@company.com
+JIRA_API_TOKEN=your_token_here
+```
+
+### 3. Usage
+
+```bash
+# Run the GitHub agent
+python main.py github
+
+# Run the Slack agent
+python main.py slack
+
+# Run the Jira agent
+python main.py jira
+
+# Get help for any agent
+python main.py github --help
+```
+
+## Agent Details
+
+### GitHub Agent
+
+Interact with GitHub repositories using your personal access token.
+
+**Required:**
+
+- `GITHUB_PERSONAL_ACCESS_TOKEN` - [Create here](https://github.com/settings/tokens)
+
+**Capabilities:**
+
+- Repository operations (create, clone, search)
+- Issue and PR management
+- Code search and file operations
+- Workflow automation
+
+### Slack Agent
+
+Connect to Slack workspaces and manage communications.
+
+**Required:**
+
+- `SLACK_MCP_XOXP_TOKEN` - [Create Slack app](https://api.slack.com/apps)
+
+**Capabilities:**
+
+- Channel and user management
+- Message posting and retrieval
+- Workspace information
+
+**Optional:** Enable message posting with `SLACK_MCP_ADD_MESSAGE_TOOL=true`
+
+### Jira Agent
+
+Manage Jira projects and issues programmatically.
+
+**Required:**
+
+- `JIRA_URL` - Your Jira instance URL
+- `JIRA_USERNAME` - Your email
+- `JIRA_API_TOKEN` - [Create here](https://id.atlassian.com/manage-profile/security/api-tokens)
+
+**Capabilities:**
+
+- Issue CRUD operations
+- Project and sprint management
+- Advanced JQL searches
+- Custom field handling
+
+## Implementation Details
+
+- **MCP integration:** Agents use an internal MCP-based client flow implemented in [community_chatbot/mcp_impl/lib/base_agent.py](community_chatbot/mcp_impl/lib/base_agent.py). The CLI shell for each agent is created with the helper in [community_chatbot/mcp_impl/lib/base_mcp.py](community_chatbot/mcp_impl/lib/base_mcp.py) which exposes commands like `list-tools`, `chat`, `invoke-tool`, and `health`.
+
+- **MCP client used:** The code constructs a `MultiServerMCPClient` (from the `langchain_mcp_adapters` package) inside `BaseAgent.initialize()` to discover and load remote MCP tools. Tools discovered from the MCP endpoints are converted into LangGraph/LangChain-compatible tool definitions and used to create a React-style agent via `langgraph.prebuilt.create_react_agent`.
+
+- **Transport options:** Agents support multiple transport modes:
+ - `stdio` — runs a local process (usually an `npx` package or a Docker image) and communicates over stdio. Examples:
+ - Slack: runs `npx slack-mcp-server --transport stdio` (see [community_chatbot/mcp_impl/agents/slack_agent.py](community_chatbot/mcp_impl/agents/slack_agent.py)).
+ - Jira: can run `ghcr.io/sooperset/mcp-atlassian:latest` (Docker) or `npx mcp-atlassian@latest` (see [community_chatbot/mcp_impl/agents/jira_agent.py](community_chatbot/mcp_impl/agents/jira_agent.py)).
+ - `sse` / `streamable_http` — connects to an HTTP/SSE MCP server endpoint. Default example endpoints used by the code:
+ - Slack HTTP default: `http://127.0.0.1:13080/sse`
+ - Jira HTTP default: `http://127.0.0.1:8080/sse`
+ The HTTP transport builder is implemented in [community_chatbot/mcp_impl/lib/base_transport.py](community_chatbot/mcp_impl/lib/base_transport.py).
+
+- **LLM & agent creation:** The LLM provider is chosen by `LLM_PROVIDER` (see [community_chatbot/mcp_impl/lib/base_agent.py](community_chatbot/mcp_impl/lib/base_agent.py)) and the repository includes provider adapters in [community_chatbot/mcp_impl/llm_providers/](community_chatbot/mcp_impl/llm_providers/) (Gemini, Groq, Lightning). The selected LLM is passed into `create_react_agent` alongside the loaded MCP tools to form the agent executor.
+
+### GitHub Agent
+
+- `https://github.com/github/github-mcp-server` — GitHub MCP server repository.
+- `https://api.githubcopilot.com/mcp/` — default GitHub MCP endpoint used as the service URL in `agents/github_agent.py`.
+- `langchain_mcp_adapters` — MCP client package used via `MultiServerMCPClient` (see `lib/base_agent.py`).
+- `langgraph` / `langchain` — used to create the React-style agent (`langgraph.prebuilt.create_react_agent`).
+
+### Slack Agent
+
+- `https://github.com/korotovsky/slack-mcp-server` — Slack MCP server repository.
+- `npx slack-mcp-server` — npm package invoked in `agents/slack_agent.py` when using the `stdio` transport (runs `slack-mcp-server --transport stdio`).
+- `SLACK_*` environment variables (tokens) map to Slack credentials and standard Slack developer docs:
+- Default local HTTP/SSE endpoint in code: `http://127.0.0.1:13080/sse` (used when `SLACK_MCP_TRANSPORT` is set to `sse`/`streamable_http`).
+
+### Jira Agent
+
+- `https://github.com/sooperset/mcp-atlassian` — Jira MCP server repository.
+- `ghcr.io/sooperset/mcp-atlassian:latest` — Docker image referenced in `agents/jira_agent.py` for the `stdio` Docker transport.
+- `npx mcp-atlassian@latest` — npm package fallback for `stdio` (non-Docker) mode.
+- Jira developer docs and API token creation:
+- Default local HTTP/SSE endpoint in code: `http://127.0.0.1:8080/sse` (used when `JIRA_MCP_TRANSPORT` is `sse`/`streamable_http`).
+
+## Project Structure
+
+```
+mcp_impl/
+├── agents/ # Agent implementations
+│ ├── github_agent.py
+│ ├── jira_agent.py
+│ └── slack_agent.py
+├── lib/ # Core library
+│ ├── base_agent.py # Base agent class
+│ ├── base_mcp.py # MCP integration
+│ └── utils.py # Utilities
+├── llm_providers/ # LLM provider implementations
+│ ├── gemini.py
+│ ├── groq_llm.py
+│ ├── lightning_llm.py
+│ └── __init__.py
+├── main.py # CLI entry point
+├── .env.example # Configuration template
+└── requirements.txt # Python dependencies
+```
+
+## Requirements
+
+- Python >= 3.12
+- Valid API keys for your chosen LLM provider
+- Agent-specific credentials (GitHub token, Slack token, Jira credentials)
+
+## Getting API Keys
+
+- **GitHub**: [Settings → Developer settings → Personal access tokens](https://github.com/settings/tokens)
+- **Slack**: [Create a Slack app](https://api.slack.com/apps) → Install to workspace → Copy OAuth token
+- **Jira**: [Account security → API tokens](https://id.atlassian.com/manage-profile/security/api-tokens)
+- **Gemini**: [Google AI Studio](https://aistudio.google.com/app/apikey)
+- **Groq**: [Groq Console](https://console.groq.com/keys)
+- **OpenAI**: [OpenAI API Keys](https://platform.openai.com/api-keys)
diff --git a/community_mcp_servers/agents/__init__.py b/community_mcp_servers/agents/__init__.py
new file mode 100644
index 00000000..44063ea9
--- /dev/null
+++ b/community_mcp_servers/agents/__init__.py
@@ -0,0 +1,5 @@
+from .github_agent import get_github_agent
+from .jira_agent import get_jira_agent
+from .slack_agent import get_slack_agent
+
+__all__ = ["get_github_agent", "get_jira_agent", "get_slack_agent"]
diff --git a/community_mcp_servers/agents/github_agent.py b/community_mcp_servers/agents/github_agent.py
new file mode 100644
index 00000000..b24812bc
--- /dev/null
+++ b/community_mcp_servers/agents/github_agent.py
@@ -0,0 +1,25 @@
+import os
+from lib.base_agent import BaseAgent
+
+__all__ = ["get_github_agent"]
+
+
+def get_github_agent() -> BaseAgent:
+ if not (
+ os.getenv("GITHUB_PERSONAL_ACCESS_TOKEN")
+ or os.getenv("GITHUB_MCP_BEARER_TOKEN")
+ ):
+ required_vars = [
+ "GITHUB_PERSONAL_ACCESS_TOKEN or GITHUB_MCP_BEARER_TOKEN"
+ ]
+ else:
+ required_vars = []
+
+ return BaseAgent(
+ service_name="github",
+ required_env_vars=required_vars,
+ server_url_env="GITHUB_MCP_SERVER_URL",
+ default_server_url="https://api.githubcopilot.com/mcp/",
+ token_env="GITHUB_PERSONAL_ACCESS_TOKEN",
+ bearer_token_env="GITHUB_MCP_BEARER_TOKEN",
+ )
diff --git a/community_mcp_servers/agents/jira_agent.py b/community_mcp_servers/agents/jira_agent.py
new file mode 100644
index 00000000..5d612a25
--- /dev/null
+++ b/community_mcp_servers/agents/jira_agent.py
@@ -0,0 +1,174 @@
+import os
+from typing import Any
+from lib.base_agent import BaseAgent
+from lib.base_transport import StdioTransportMixin, HttpTransportMixin
+
+__all__ = ["get_jira_agent"]
+
+# Environment variables for Jira MCP (mcp-atlassian package)
+# See: https://github.com/sooperset/mcp-atlassian
+# Configured for Jira-only usage (no Confluence)
+JIRA_ENV_VARS = [
+ # Connection settings
+ "JIRA_URL", # Jira instance URL
+ # Cloud authentication (API Token)
+ "JIRA_USERNAME", # Email for Cloud
+ "JIRA_API_TOKEN", # API token from id.atlassian.com
+ # Server/Data Center authentication (PAT)
+ "JIRA_PERSONAL_TOKEN", # Personal Access Token
+ # SSL and proxy settings
+ "JIRA_SSL_VERIFY", # SSL verification (true/false)
+ "HTTP_PROXY", # HTTP proxy URL
+ "HTTPS_PROXY", # HTTPS proxy URL
+ "JIRA_HTTPS_PROXY", # Jira-specific HTTPS proxy
+ "NO_PROXY", # Hosts to bypass proxy
+ "SOCKS_PROXY", # SOCKS proxy URL
+ # Custom headers (for corporate environments)
+ "JIRA_CUSTOM_HEADERS", # Format: key=value,key2=value2
+ # Filtering and access control
+ "JIRA_PROJECTS_FILTER", # Limit to specific projects
+ "ENABLED_TOOLS", # Enable specific tools only
+ "READ_ONLY_MODE", # Disable write operations
+ # Server options
+ "TRANSPORT", # Transport type for the server
+ "PORT", # Port for HTTP transports (default: 8000)
+ "HOST", # Host for HTTP transports (default: 0.0.0.0)
+ "STATELESS", # Enable stateless mode
+ # Logging
+ "MCP_VERBOSE", # Enable verbose logging
+ "MCP_VERY_VERBOSE", # Enable debug logging
+ "MCP_LOGGING_STDOUT", # Log to stdout instead of stderr
+]
+
+
+def get_jira_agent() -> BaseAgent:
+ """Create and return a Jira MCP agent.
+
+ Uses mcp-atlassian package via uvx (Python).
+ Configured for Jira-only access (no Confluence).
+
+ Supports two authentication methods:
+ 1. API Token (Cloud): JIRA_USERNAME + JIRA_API_TOKEN
+ 2. Personal Access Token (Server/DC): JIRA_PERSONAL_TOKEN
+
+ Transport options:
+ - stdio (default): Direct subprocess communication
+ - sse: Server-Sent Events over HTTP
+ - streamable_http: HTTP transport
+ """
+ has_url = bool(os.getenv("JIRA_URL"))
+ has_api_token = bool(
+ os.getenv("JIRA_USERNAME") and os.getenv("JIRA_API_TOKEN")
+ )
+ has_pat = bool(os.getenv("JIRA_PERSONAL_TOKEN"))
+
+ # Build list of missing required variables
+ required_vars = []
+ if not has_url:
+ required_vars.append("JIRA_URL")
+ if not has_api_token and not has_pat:
+ required_vars.append(
+ "(JIRA_USERNAME and JIRA_API_TOKEN) or JIRA_PERSONAL_TOKEN"
+ )
+
+ transport = os.getenv("JIRA_MCP_TRANSPORT", "stdio").lower()
+
+ if transport == "stdio":
+ return _create_stdio_agent(required_vars)
+ elif transport in ("sse", "streamable_http"):
+ return _create_http_agent(required_vars, transport)
+ else:
+ raise ValueError(
+ f"Unsupported JIRA_MCP_TRANSPORT: {transport}. "
+ f"Use 'stdio', 'sse', or 'streamable_http'"
+ )
+
+
+def _create_stdio_agent(required_vars: list[str]) -> BaseAgent:
+ """Create a Jira agent using stdio transport (uvx subprocess)."""
+
+ class JiraStdioAgent(BaseAgent, StdioTransportMixin):
+ def get_connection_config(self) -> dict[str, Any]:
+ use_docker = os.getenv("JIRA_MCP_USE_DOCKER", "").lower()
+ if use_docker in ("true", "1", "yes"):
+ return self._get_docker_config()
+ return self._get_uvx_config()
+
+ def _get_docker_config(self) -> dict[str, Any]:
+ """Build Docker configuration for mcp-atlassian."""
+ docker_args = ["run", "-i", "--rm"]
+
+ # Forward all configured Jira environment variables
+ for var in JIRA_ENV_VARS:
+ if os.getenv(var):
+ docker_args.extend(["-e", var])
+
+ image = os.getenv(
+ "JIRA_MCP_DOCKER_IMAGE",
+ "ghcr.io/sooperset/mcp-atlassian:latest",
+ )
+ docker_args.append(image)
+
+ # Build env dict for subprocess
+ env = {var: os.getenv(var, "") for var in JIRA_ENV_VARS}
+ env["PATH"] = os.environ.get("PATH", "")
+
+ return self.build_stdio_config("docker", docker_args, env)
+
+ def _get_uvx_config(self) -> dict[str, Any]:
+ """Build uvx configuration for mcp-atlassian.
+
+ Uses uvx (Python package runner) instead of npx.
+ Official usage: uvx mcp-atlassian
+ """
+ # Build environment with all Jira variables
+ env: dict[str, str] = {
+ "PATH": os.environ.get("PATH", ""),
+ }
+
+ # Forward all configured Jira environment variables
+ for var in JIRA_ENV_VARS:
+ value = os.getenv(var)
+ if value:
+ env[var] = value
+
+ # Use uvx to run mcp-atlassian
+ # Note: Use --python=3.12 if Python 3.14+ causes issues
+ return self.build_stdio_config(
+ "uvx",
+ ["mcp-atlassian"],
+ env
+ )
+
+ return JiraStdioAgent(
+ service_name="jira", required_env_vars=required_vars
+ )
+
+
+def _create_http_agent(required_vars: list[str], transport: str) -> BaseAgent:
+ """Create a Jira agent using SSE or HTTP transport.
+
+ Requires a running mcp-atlassian server.
+ Default: http://127.0.0.1:8000
+ """
+
+ class JiraHttpAgent(BaseAgent, HttpTransportMixin):
+ def __init__(self, transport_type: str, **kwargs):
+ super().__init__(**kwargs)
+ self.transport_type = transport_type
+
+ def get_connection_config(self) -> dict[str, Any]:
+ # Get server URL from environment or use default
+ host = os.getenv("HOST", "127.0.0.1")
+ port = os.getenv("PORT", "8000")
+ default_url = f"http://{host}:{port}/sse"
+
+ return self.build_http_config(
+ "jira", default_url, self.transport_type
+ )
+
+ return JiraHttpAgent(
+ transport_type=transport,
+ service_name="jira",
+ required_env_vars=required_vars,
+ )
diff --git a/community_mcp_servers/agents/slack_agent.py b/community_mcp_servers/agents/slack_agent.py
new file mode 100644
index 00000000..8fc2570c
--- /dev/null
+++ b/community_mcp_servers/agents/slack_agent.py
@@ -0,0 +1,136 @@
+import os
+from typing import Any
+from lib.base_agent import BaseAgent
+from lib.base_transport import StdioTransportMixin, HttpTransportMixin
+
+__all__ = ["get_slack_agent"]
+
+# All environment variables supported by slack-mcp-server
+# See: https://github.com/korotovsky/slack-mcp-server
+SLACK_ENV_VARS = [
+ # Authentication tokens (one required)
+ "SLACK_MCP_XOXP_TOKEN", # User OAuth token (xoxp-...)
+ "SLACK_MCP_XOXB_TOKEN", # Bot token (xoxb-...) - limited
+ "SLACK_MCP_XOXC_TOKEN", # Browser token (xoxc-...)
+ "SLACK_MCP_XOXD_TOKEN", # Browser cookie d (xoxd-...)
+ # Server configuration
+ "SLACK_MCP_PORT", # Port for SSE/HTTP (default: 13080)
+ "SLACK_MCP_HOST", # Host for SSE/HTTP (default: 127.0.0.1)
+ "SLACK_MCP_API_KEY", # Bearer token for SSE/HTTP transports
+ # Proxy and network settings
+ "SLACK_MCP_PROXY", # Proxy URL for outgoing requests
+ "SLACK_MCP_USER_AGENT", # Custom User-Agent (Enterprise)
+ "SLACK_MCP_CUSTOM_TLS", # Custom TLS (Enterprise Slack)
+ # TLS/SSL settings
+ "SLACK_MCP_SERVER_CA", # Path to CA certificate
+ "SLACK_MCP_SERVER_CA_TOOLKIT", # HTTPToolkit CA
+ "SLACK_MCP_SERVER_CA_INSECURE", # Trust insecure (NOT RECOMMENDED)
+ # Message posting settings
+ "SLACK_MCP_ADD_MESSAGE_TOOL", # Enable posting
+ "SLACK_MCP_ADD_MESSAGE_MARK", # Auto-mark as read
+ "SLACK_MCP_ADD_MESSAGE_UNFURLING", # Enable link unfurling
+ # Cache configuration
+ "SLACK_MCP_USERS_CACHE", # Path to users cache file
+ "SLACK_MCP_CHANNELS_CACHE", # Path to channels cache
+ # Logging
+ "SLACK_MCP_LOG_LEVEL", # debug, info, warn, error
+]
+
+
+def get_slack_agent() -> BaseAgent:
+ """Create and return a Slack MCP agent.
+
+ Supports three authentication methods:
+ 1. XOXP token (User OAuth token)
+ 2. XOXB token (Bot token - limited access, no search)
+ 3. XOXC + XOXD tokens (Browser tokens - stealth mode)
+
+ Transport options:
+ - stdio (default): Direct subprocess communication
+ - sse: Server-Sent Events over HTTP
+ - streamable_http: HTTP transport
+ """
+ has_xoxp = bool(os.getenv("SLACK_MCP_XOXP_TOKEN"))
+ has_xoxb = bool(os.getenv("SLACK_MCP_XOXB_TOKEN"))
+ has_xoxc = bool(os.getenv("SLACK_MCP_XOXC_TOKEN"))
+ has_xoxd = bool(os.getenv("SLACK_MCP_XOXD_TOKEN"))
+
+ # Validate authentication: need xoxp OR xoxb OR (xoxc AND xoxd)
+ if not has_xoxp and not has_xoxb and not (has_xoxc and has_xoxd):
+ required_vars = [
+ "SLACK_MCP_XOXP_TOKEN or SLACK_MCP_XOXB_TOKEN or "
+ "(SLACK_MCP_XOXC_TOKEN and SLACK_MCP_XOXD_TOKEN)"
+ ]
+ else:
+ required_vars = []
+
+ transport = os.getenv("SLACK_MCP_TRANSPORT", "stdio").lower()
+
+ if transport == "stdio":
+ return _create_stdio_agent(required_vars)
+ elif transport in ("sse", "streamable_http"):
+ return _create_http_agent(required_vars, transport)
+ else:
+ raise ValueError(
+ f"Unsupported SLACK_MCP_TRANSPORT: {transport}. "
+ f"Use 'stdio', 'sse', or 'streamable_http'"
+ )
+
+
+def _create_stdio_agent(required_vars: list[str]) -> BaseAgent:
+ """Create a Slack agent using stdio transport (npx subprocess)."""
+
+ class SlackStdioAgent(BaseAgent, StdioTransportMixin):
+ def get_connection_config(self) -> dict[str, Any]:
+ # Build environment with all supported Slack MCP variables
+ env: dict[str, str] = {
+ "PATH": os.environ.get("PATH", ""),
+ }
+
+ # Forward all configured Slack environment variables
+ for var in SLACK_ENV_VARS:
+ value = os.getenv(var)
+ if value:
+ env[var] = value
+
+ # Use npx with -y flag to auto-confirm install
+ return self.build_stdio_config(
+ "npx",
+ ["-y", "slack-mcp-server@latest", "--transport", "stdio"],
+ env
+ )
+
+ return SlackStdioAgent(
+ service_name="slack", required_env_vars=required_vars
+ )
+
+
+def _create_http_agent(required_vars: list[str], transport: str) -> BaseAgent:
+ """Create a Slack agent using SSE or HTTP transport.
+
+ Requires SLACK_MCP_SERVER_URL to be set, pointing to a running
+ slack-mcp-server instance (e.g., http://127.0.0.1:13080/sse).
+
+ Optionally use SLACK_MCP_API_KEY for authentication.
+ """
+
+ class SlackHttpAgent(BaseAgent, HttpTransportMixin):
+ def __init__(self, transport_type: str, **kwargs):
+ super().__init__(**kwargs)
+ self.transport_type = transport_type
+
+ def get_connection_config(self) -> dict[str, Any]:
+ # Get server URL from environment or use default
+ host = os.getenv("SLACK_MCP_HOST", "127.0.0.1")
+ port = os.getenv("SLACK_MCP_PORT", "13080")
+ default_url = f"http://{host}:{port}/sse"
+
+ return self.build_http_config(
+ "slack", default_url, self.transport_type
+ )
+
+ return SlackHttpAgent(
+ transport_type=transport,
+ service_name="slack",
+ required_env_vars=required_vars,
+ )
diff --git a/community_mcp_servers/lib/__init__.py b/community_mcp_servers/lib/__init__.py
new file mode 100644
index 00000000..d0e38ee6
--- /dev/null
+++ b/community_mcp_servers/lib/__init__.py
@@ -0,0 +1,3 @@
+"""Common modules for MCP agents."""
+
+__all__ = ["state", "utils", "base_agent", "base_commands", "base_mcp", "base_transport"]
diff --git a/community_mcp_servers/lib/base_agent.py b/community_mcp_servers/lib/base_agent.py
new file mode 100644
index 00000000..a0ef32de
--- /dev/null
+++ b/community_mcp_servers/lib/base_agent.py
@@ -0,0 +1,188 @@
+import os
+from typing import Any, cast
+
+from langchain_core.messages import AIMessage, BaseMessage
+from langchain_mcp_adapters.client import MultiServerMCPClient
+from langgraph.prebuilt import create_react_agent
+
+from .state import RuntimeState
+from .utils import (
+ build_connection_config,
+ sanitize_tool_name,
+ schema_from_model,
+)
+
+
+__all__ = [
+ "BaseAgent",
+ "initialize_agent",
+ "ensure_agent_initialized",
+ "stream_agent_response",
+]
+
+
+def _get_llm_provider():
+ from llm_providers import gemini
+ from llm_providers import lightning_llm
+ from llm_providers import groq_llm
+
+ provider = os.getenv("LLM_PROVIDER", "gemini").lower()
+
+ if provider == "lightning":
+ return lightning_llm.get_llm
+ elif provider == "groq":
+ return groq_llm.get_llm
+ elif provider == "gemini":
+ return gemini.get_llm
+ else:
+ raise ValueError(
+ f"Unsupported LLM_PROVIDER: {provider}. "
+ f"Supported values: 'lightning', 'groq', 'gemini'"
+ )
+
+
+class BaseAgent:
+ """Base class for MCP agents providing common functionality."""
+
+ def __init__(
+ self,
+ service_name: str,
+ required_env_vars: list[str] | None = None,
+ server_url_env: str | None = None,
+ default_server_url: str | None = None,
+ token_env: str | None = None,
+ bearer_token_env: str | None = None,
+ ):
+ """Initialize base agent with service-specific configuration.
+
+ Args:
+ service_name: Name of the service (e.g., "github", "jira", "slack")
+ required_env_vars: List of required environment variables
+ server_url_env: Environment variable name for server URL
+ default_server_url: Default server URL if not specified
+ token_env: Primary token environment variable name
+ bearer_token_env: Optional bearer token environment variable name
+ """
+ self.service_name = service_name
+ self.required_env_vars = required_env_vars or []
+ self.server_url_env = server_url_env
+ self.default_server_url = default_server_url
+ self.token_env = token_env
+ self.bearer_token_env = bearer_token_env
+
+ def validate_environment(self) -> None:
+ """Validate required environment variables."""
+ missing_vars: list[str] = []
+ for var in self.required_env_vars:
+ if not os.getenv(var):
+ missing_vars.append(var)
+
+ if missing_vars:
+ missing_str = ", ".join(sorted(set(missing_vars)))
+ raise ValueError(
+ "Missing required environment variables: " + missing_str
+ )
+
+ def get_connection_config(self) -> dict[str, Any]:
+ """Get the connection configuration for this service."""
+ return build_connection_config(
+ service_name=self.service_name,
+ server_url_env=self.server_url_env or f"{self.service_name.upper()}_MCP_SERVER_URL",
+ default_server_url=self.default_server_url or f"https://api.{self.service_name}.com/mcp/",
+ token_env=self.token_env or f"{self.service_name.upper()}_PERSONAL_ACCESS_TOKEN",
+ bearer_token_env=self.bearer_token_env,
+ )
+
+ async def initialize(self, state: RuntimeState) -> None:
+ """Initialize the agent with MCP client and tools."""
+ self.validate_environment()
+
+ connection = self.get_connection_config()
+ state.mcp_client = MultiServerMCPClient({self.service_name: connection})
+
+ client = state.mcp_client
+ if client is None:
+ raise RuntimeError("Failed to initialize MCP client.")
+
+ # Get tools from MCP client
+ tools = await client.get_tools()
+
+ state.tool_summaries = []
+ state.tool_map = {}
+ state.tool_details = {}
+
+ for tool in tools:
+ original_name = tool.name
+ sanitized_name = sanitize_tool_name(original_name)
+
+ args_schema = schema_from_model(getattr(tool, "args_schema", None))
+ metadata = getattr(tool, "metadata", {}) or {}
+
+ state.tool_map[sanitized_name] = tool
+ state.tool_details[sanitized_name] = {
+ "name": sanitized_name,
+ "original_name": original_name,
+ "description": getattr(tool, "description", ""),
+ "metadata": metadata,
+ "args_schema": args_schema,
+ }
+ state.tool_summaries.append(
+ {
+ "name": sanitized_name,
+ "original_name": original_name,
+ "description": getattr(tool, "description", ""),
+ }
+ )
+
+ # Get LLM and create agent with tools
+ get_llm = _get_llm_provider()
+ llm = get_llm()
+
+ # Create React agent with model and tools
+ state.agent_executor = create_react_agent(llm, tools)
+
+
+async def initialize_agent(
+ state: RuntimeState,
+ agent: BaseAgent,
+) -> None:
+ """Async Initialize an agent using the base agent class."""
+ await agent.initialize(state)
+
+
+async def ensure_agent_initialized(
+ state: RuntimeState,
+ agent: BaseAgent,
+) -> None:
+ """Ensure agent is initialized, initializing if necessary."""
+ if state.agent_executor is None or state.mcp_client is None:
+ await initialize_agent(state, agent)
+ if state.agent_executor is None or state.mcp_client is None:
+ raise RuntimeError("Agent failed to initialize")
+
+
+async def stream_agent_response(
+ state: RuntimeState,
+ session_history: list[BaseMessage],
+) -> AIMessage:
+ """Stream agent response for a given session history."""
+ if state.agent_executor is None:
+ raise RuntimeError("Agent executor is not initialized.")
+
+ executor = cast(Any, state.agent_executor)
+
+ # Use ainvoke for async execution with the agent
+ result = await executor.ainvoke({"messages": session_history})
+
+ # Extract the last AI message from the result
+ messages = result.get("messages", [])
+ last_ai_message: AIMessage | None = None
+
+ for message in reversed(messages):
+ if isinstance(message, AIMessage):
+ last_ai_message = message
+ break
+
+ if last_ai_message is None:
+ raise RuntimeError("The agent did not return a response.")
+ return last_ai_message
diff --git a/community_mcp_servers/lib/base_commands.py b/community_mcp_servers/lib/base_commands.py
new file mode 100644
index 00000000..42af9d5f
--- /dev/null
+++ b/community_mcp_servers/lib/base_commands.py
@@ -0,0 +1,297 @@
+import asyncio
+import json
+import sys
+from pathlib import Path
+from typing import Any
+
+from langchain_core.messages import HumanMessage
+
+from .base_agent import BaseAgent, ensure_agent_initialized, stream_agent_response
+from .state import RuntimeState
+from .utils import (
+ extract_message_text,
+ sanitize_tool_name,
+ _eprint,
+)
+
+
+__all__ = [
+ "list_tools",
+ "tool_info",
+ "invoke_tool",
+ "chat",
+ "chat_loop",
+ "sessions",
+ "clear_session",
+ "export_session",
+ "health",
+]
+
+
+async def list_tools(state: RuntimeState, agent: BaseAgent) -> int:
+ """List available MCP tools."""
+ await ensure_agent_initialized(state, agent)
+ if not state.tool_summaries:
+ print("No tools available.")
+ return 0
+
+ for tool in state.tool_summaries:
+ description = tool.get("description") or "(no description)"
+ original_name = tool.get("original_name", "?")
+ print(f"- {tool['name']} (original: {original_name}): {description}")
+ return 0
+
+
+async def tool_info(state: RuntimeState, agent: BaseAgent, tool_identifier: str) -> int:
+ """Show detailed information about a specific tool."""
+ await ensure_agent_initialized(state, agent)
+ detail = _find_tool_detail(state, tool_identifier)
+ if detail is None:
+ _eprint(
+ f"Tool '{tool_identifier}' not found. "
+ "Run list-tools to see available IDs.",
+ )
+ return 1
+
+ print(f"CLI name: {detail['name']}")
+ print(f"Original name: {detail['original_name']}")
+ if detail.get("description"):
+ print(f"Description: {detail['description']}")
+ metadata = detail.get("metadata") or {}
+ if metadata:
+ print("Metadata:")
+ for key, value in metadata.items():
+ print(f" - {key}: {value}")
+ args_schema = detail.get("args_schema")
+ if args_schema:
+ print("Args schema:", json.dumps(args_schema, indent=2))
+ return 0
+
+
+def _find_tool_detail(
+ state: RuntimeState,
+ identifier: str,
+) -> dict[str, Any] | None:
+ """Find tool details by identifier (sanitized or original name)."""
+ normalized = sanitize_tool_name(identifier)
+ if normalized in state.tool_details:
+ return state.tool_details[normalized]
+ for detail in state.tool_details.values():
+ if detail.get("original_name") == identifier:
+ return detail
+ return None
+
+
+async def invoke_tool(
+ state: RuntimeState,
+ agent: BaseAgent,
+ tool_identifier: str,
+ args_json: str | None = None,
+) -> int:
+ """Invoke a tool directly with provided arguments."""
+ await ensure_agent_initialized(state, agent)
+ detail = _find_tool_detail(state, tool_identifier)
+ if detail is None:
+ _eprint(
+ f"Tool '{tool_identifier}' not found. "
+ "Run list-tools to see available IDs.",
+ )
+ return 1
+
+ tool = state.tool_map.get(detail["name"])
+ if tool is None:
+ _eprint(f"Tool '{tool_identifier}' is not loaded in the agent.")
+ return 1
+
+ arguments: dict[str, Any] = {}
+ if args_json:
+ try:
+ parsed_args = json.loads(args_json)
+ except json.JSONDecodeError as exc:
+ _eprint(f"Failed to parse args JSON: {exc}")
+ return 1
+ if not isinstance(parsed_args, dict):
+ print(
+ "Tool arguments must be provided as a JSON object.",
+ file=sys.stderr,
+ )
+ return 1
+ arguments = parsed_args
+
+ result = await tool.ainvoke(arguments)
+ if isinstance(result, tuple) and len(result) == 2:
+ text_part, artifacts = result
+ if text_part:
+ print(text_part)
+ if artifacts:
+ print(
+ "Artifacts:",
+ json.dumps([repr(a) for a in artifacts], indent=2),
+ )
+ else:
+ print(result)
+ return 0
+
+
+async def chat(
+ state: RuntimeState,
+ agent: BaseAgent,
+ message: str,
+ session_id: str = "default",
+) -> int:
+ """Send a message to the agent and receive a response."""
+ await ensure_agent_initialized(state, agent)
+
+ human_message = HumanMessage(content=message)
+ session_history = state.record_message(session_id, human_message)
+
+ try:
+ last_ai_message = await stream_agent_response(state, session_history)
+ state.record_message(session_id, last_ai_message)
+ print(extract_message_text(last_ai_message))
+ return 0
+ except RuntimeError as exc:
+ state.pop_last_message(session_id)
+ print(str(exc), file=sys.stderr)
+ return 1
+
+
+async def chat_loop(
+ state: RuntimeState,
+ agent: BaseAgent,
+ session_id: str = "default",
+ exit_command: str = "/exit",
+ reset_command: str = "/reset",
+ prompt_prefix: str | None = None,
+) -> int:
+ """Start an interactive chat session."""
+ await ensure_agent_initialized(state, agent)
+
+ normalized_exit = exit_command.strip().lower()
+ normalized_reset = reset_command.strip().lower()
+ prompt_template = prompt_prefix or f"[{session_id}]> "
+
+ print("Interactive chat started. Type your message and press Enter.")
+ print(
+ "Use '{exit}' to quit and '{reset}' to clear the session.".format(
+ exit=exit_command,
+ reset=reset_command,
+ )
+ )
+
+ loop = asyncio.get_running_loop()
+
+ while True:
+ try:
+ user_message = await loop.run_in_executor(
+ None, input, prompt_template
+ )
+ except (KeyboardInterrupt, EOFError):
+ print("\nExiting chat loop.")
+ return 0
+
+ if user_message is None:
+ continue
+
+ stripped = user_message.strip()
+ if not stripped:
+ continue
+
+ lowered = stripped.lower()
+ if lowered == normalized_exit:
+ print("Ending chat loop.")
+ return 0
+
+ if lowered == normalized_reset:
+ state.clear_session(session_id, persist=True)
+ print(f"Session '{session_id}' reset.")
+ continue
+
+ human_message = HumanMessage(content=user_message)
+ session_history = state.record_message(session_id, human_message)
+
+ try:
+ last_ai_message = await stream_agent_response(
+ state, session_history
+ )
+ state.record_message(session_id, last_ai_message)
+ print(extract_message_text(last_ai_message))
+ except RuntimeError as exc:
+ print(str(exc), file=sys.stderr)
+ state.pop_last_message(session_id)
+
+
+async def sessions(state: RuntimeState) -> int:
+ """List all active chat sessions."""
+ summaries = state.list_sessions()
+ if not summaries:
+ print("No active sessions.")
+ return 0
+
+ print("Active sessions:")
+ for info in summaries:
+ session_id = info["session_id"]
+ count = info.get("message_count", 0)
+ updated = info.get("updated_at") or "unknown"
+ print(f"- {session_id} ({count} messages, updated {updated})")
+ return 0
+
+
+async def clear_session(state: RuntimeState, session_id: str) -> int:
+ """Clear a specific chat session."""
+ if session_id in state.chat_sessions:
+ state.clear_session(session_id, persist=True)
+ print(f"Cleared session '{session_id}'.")
+ return 0
+
+ print(f"Session '{session_id}' not found.", file=sys.stderr)
+ return 1
+
+
+async def export_session(
+ state: RuntimeState,
+ session_id: str,
+ output_path: str | None = None,
+) -> int:
+ """Export a chat session to JSON format."""
+ export_payload = state.serialize_session(session_id)
+ if export_payload is None:
+ print(
+ f"Session '{session_id}' not found.",
+ file=sys.stderr,
+ )
+ return 1
+
+ payload_text = json.dumps(export_payload, indent=2, ensure_ascii=False)
+
+ if not output_path:
+ print(payload_text)
+ return 0
+
+ try:
+ path = Path(output_path).expanduser()
+ path.parent.mkdir(parents=True, exist_ok=True)
+ path.write_text(payload_text + "\n", encoding="utf-8")
+ except OSError as exc:
+ _eprint(f"Failed to export session: {exc}")
+ return 1
+
+ print(f"Session '{session_id}' exported to '{path}'.")
+ return 0
+
+
+async def health(state: RuntimeState) -> int:
+ """Show agent health status."""
+ status = {
+ "agent_initialized": state.agent_executor is not None,
+ "tools_available": len(state.tool_summaries),
+ }
+ if state.agent_executor is None:
+ print(
+ "Agent not initialized. Run a command that initializes it"
+ " (e.g. list-tools)."
+ )
+ else:
+ print("Agent initialized.")
+ print(f"Tools available: {status['tools_available']}")
+ return 0
diff --git a/community_mcp_servers/lib/base_mcp.py b/community_mcp_servers/lib/base_mcp.py
new file mode 100644
index 00000000..cc58790f
--- /dev/null
+++ b/community_mcp_servers/lib/base_mcp.py
@@ -0,0 +1,88 @@
+import asyncio
+import typer
+from typing import Callable
+
+from .base_agent import BaseAgent
+from .state import RuntimeState
+from . import base_commands
+
+
+__all__ = ["create_mcp_cli"]
+
+
+def create_mcp_cli(
+ service_name: str,
+ agent_factory: Callable[[], BaseAgent],
+ description: str | None = None,
+) -> typer.Typer:
+ if description is None:
+ description = f"{service_name.title()} MCP Agent CLI"
+
+ state = RuntimeState(service_name=service_name)
+ agent = agent_factory()
+
+ app = typer.Typer(help=description)
+
+ @app.command("list-tools")
+ def list_tools() -> int:
+ return asyncio.run(base_commands.list_tools(state, agent))
+
+ @app.command("chat")
+ def chat(
+ message: str = typer.Argument(..., help="Message to send to the agent."),
+ session_id: str = typer.Option("default", help="Chat session identifier."),
+ ) -> int:
+ return asyncio.run(base_commands.chat(state, agent, message, session_id))
+
+ @app.command("chat-loop")
+ def chat_loop(
+ session_id: str = typer.Option("default", help="Chat session identifier."),
+ exit_command: str = typer.Option("/exit", help="Command to end the loop."),
+ reset_command: str = typer.Option("/reset", help="Command to reset session."),
+ prompt_prefix: str | None = typer.Option(None, help="Custom prompt prefix."),
+ ) -> int:
+ return asyncio.run(
+ base_commands.chat_loop(
+ state, agent, session_id, exit_command, reset_command, prompt_prefix
+ )
+ )
+
+ @app.command("tool-info")
+ def tool_info(
+ tool_identifier: str = typer.Argument(..., help="Tool CLI or original name.")
+ ) -> int:
+ return asyncio.run(base_commands.tool_info(state, agent, tool_identifier))
+
+ @app.command("invoke-tool")
+ def invoke_tool(
+ tool_identifier: str = typer.Argument(..., help="Tool to invoke."),
+ args_json: str = typer.Option("{}", help="JSON arguments for the tool."),
+ ) -> int:
+ return asyncio.run(
+ base_commands.invoke_tool(state, agent, tool_identifier, args_json)
+ )
+
+ @app.command("sessions")
+ def sessions() -> int:
+ return asyncio.run(base_commands.sessions(state))
+
+ @app.command("clear-session")
+ def clear_session(
+ session_id: str = typer.Argument(..., help="Session id to clear."),
+ ) -> int:
+ return asyncio.run(base_commands.clear_session(state, session_id))
+
+ @app.command("export-session")
+ def export_session(
+ session_id: str = typer.Argument(..., help="Session id to export."),
+ output_path: str | None = typer.Option(None, "--output", "-o", help="Output path."),
+ ) -> int:
+ return asyncio.run(
+ base_commands.export_session(state, session_id, output_path)
+ )
+
+ @app.command("health")
+ def health() -> int:
+ return asyncio.run(base_commands.health(state))
+
+ return app
diff --git a/community_mcp_servers/lib/base_transport.py b/community_mcp_servers/lib/base_transport.py
new file mode 100644
index 00000000..3d07bd4e
--- /dev/null
+++ b/community_mcp_servers/lib/base_transport.py
@@ -0,0 +1,54 @@
+import os
+from typing import Any
+
+
+__all__ = [
+ "StdioTransportMixin",
+ "HttpTransportMixin",
+]
+
+
+class StdioTransportMixin:
+ def build_stdio_config(
+ self,
+ command: str,
+ args: list[str],
+ env_vars: dict[str, str],
+ ) -> dict[str, Any]:
+ return {
+ "transport": "stdio",
+ "command": command,
+ "args": args,
+ "env": env_vars,
+ }
+
+
+class HttpTransportMixin:
+ def build_http_config(
+ self,
+ service_name: str,
+ default_url: str,
+ transport_type: str = "streamable_http",
+ ) -> dict[str, Any]:
+ from .utils import build_connection_config
+
+ server_url_env = f"{service_name.upper()}_MCP_SERVER_URL"
+ api_key_env = f"{service_name.upper()}_MCP_API_KEY"
+
+ if not os.getenv(server_url_env):
+ raise ValueError(
+ f"{server_url_env} is required for {transport_type}"
+ )
+
+ config = build_connection_config(
+ service_name=service_name,
+ server_url_env=server_url_env,
+ default_server_url=default_url,
+ token_env=api_key_env or f"{service_name.upper()}_TOKEN",
+ bearer_token_env=api_key_env,
+ )
+
+ if transport_type == "sse":
+ config["transport"] = "sse"
+
+ return config
diff --git a/community_mcp_servers/lib/state.py b/community_mcp_servers/lib/state.py
new file mode 100644
index 00000000..b5185878
--- /dev/null
+++ b/community_mcp_servers/lib/state.py
@@ -0,0 +1,297 @@
+import importlib
+import json
+import os
+import sys
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, cast
+
+from langchain_core.messages import (
+ BaseMessage,
+ AIMessage,
+ HumanMessage,
+ SystemMessage,
+ ToolMessage,
+ ChatMessage,
+ FunctionMessage
+)
+
+from .utils import truthy
+
+
+__all__ = ["RuntimeState", "MESSAGE_TYPE_REGISTRY"]
+
+
+_STATE_FILE_ENV = "MCP_STATE_FILE"
+_STATE_DIR_ENV = "MCP_STATE_DIR"
+_STATE_DISABLE_ENV = "MCP_DISABLE_PERSISTENCE"
+_DEFAULT_STATE_FILENAME = "mcp_sessions.json"
+_DEFAULT_STATE_SUBDIR = ".mcp"
+
+MESSAGE_TYPE_REGISTRY: dict[str, type[BaseMessage]] = {
+ "ai": AIMessage,
+ "human": HumanMessage,
+ "system": SystemMessage,
+ "tool": ToolMessage,
+}
+for key, cls in (("function", FunctionMessage), ("chat", ChatMessage)):
+ if key not in MESSAGE_TYPE_REGISTRY and cls is not None:
+ MESSAGE_TYPE_REGISTRY[key] = cast(type[BaseMessage], cls)
+
+
+def _persistence_disabled() -> bool:
+ return truthy(os.getenv(_STATE_DISABLE_ENV))
+
+
+def _resolve_state_file_path(service_name: str = "mcp") -> Path:
+ if _persistence_disabled():
+ return Path(os.devnull)
+
+ configured_file = os.getenv(_STATE_FILE_ENV)
+ if configured_file:
+ path = Path(configured_file).expanduser()
+ else:
+ base_dir = os.getenv(_STATE_DIR_ENV)
+ if base_dir:
+ base_path = Path(base_dir).expanduser()
+ else:
+ base_path = Path.home() / _DEFAULT_STATE_SUBDIR
+ path = base_path / f"{service_name}_sessions.json"
+
+ try:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ except OSError as exc:
+ print(f"Unable to prepare state directory: {exc}", file=sys.stderr)
+ return path
+
+
+def _utc_timestamp() -> str:
+ return datetime.now(timezone.utc).isoformat(timespec="seconds")
+
+
+def _serialize_message(message: BaseMessage) -> dict[str, Any]:
+ try:
+ data = json.loads(message.json())
+ except (AttributeError, ValueError, TypeError):
+ if hasattr(message, "dict"):
+ data = cast(Any, message).dict()
+ else:
+ data = {"content": getattr(message, "content", "")}
+ data.pop("type", None)
+ return {
+ "type": message.type,
+ "class_path": (
+ f"{message.__class__.__module__}."
+ f"{message.__class__.__name__}"
+ ),
+ "data": data,
+ }
+
+
+def _locate_message_class(class_path: str) -> type[BaseMessage] | None:
+ module_name, _, class_name = class_path.rpartition(".")
+ if not module_name or not class_name:
+ return None
+ try:
+ module = importlib.import_module(module_name)
+ except ImportError:
+ return None
+ candidate = getattr(module, class_name, None)
+ return cast(type[BaseMessage], candidate) if isinstance(candidate, type) and issubclass(candidate, BaseMessage) else None
+
+
+def _deserialize_message(payload: dict[str, Any]) -> BaseMessage | None:
+ message_type = payload.get("type")
+ data = payload.get("data") or {}
+ if not isinstance(data, dict):
+ return None
+ message_cls = (
+ MESSAGE_TYPE_REGISTRY.get(message_type)
+ if isinstance(message_type, str)
+ else None
+ )
+ if message_cls is None:
+ class_path = payload.get("class_path")
+ if isinstance(class_path, str):
+ message_cls = _locate_message_class(class_path)
+ if message_cls is None:
+ return None
+
+ data = data.copy()
+ data.pop("type", None)
+ try:
+ return message_cls(**data)
+ except (TypeError, ValueError):
+ return message_cls(content=data.get("content", ""))
+
+
+class RuntimeState:
+ """Holds runtime information shared across CLI commands."""
+
+ def __init__(self, service_name: str = "mcp") -> None:
+ self.service_name = service_name
+ self.agent_executor: Any = None
+ self.mcp_client: Any | None = None
+ self.chat_sessions: dict[str, list[BaseMessage]] = {}
+ self.session_metadata: dict[str, dict[str, Any]] = {}
+ self.tool_summaries: list[dict[str, object]] = []
+ self.tool_map: dict[str, Any] = {}
+ self.tool_details: dict[str, dict[str, Any]] = {}
+ self.persistence_enabled: bool = not _persistence_disabled()
+ self.state_file_path: Path = _resolve_state_file_path(service_name)
+ if self.persistence_enabled:
+ self._load_persisted_sessions()
+
+ def record_message(
+ self,
+ session_id: str,
+ message: BaseMessage,
+ ) -> list[BaseMessage]:
+ history = self._ensure_session(session_id)
+ history.append(message)
+ self._touch_session(session_id, persist=True)
+ return history
+
+ def pop_last_message(self, session_id: str) -> None:
+ history = self.chat_sessions.get(session_id)
+ if not history:
+ return
+ history.pop()
+ if history:
+ self._touch_session(session_id, persist=True)
+ return
+ self.clear_session(session_id, persist=True)
+
+ def clear_session(self, session_id: str, *, persist: bool = False) -> None:
+ self.chat_sessions.pop(session_id, None)
+ self.session_metadata.pop(session_id, None)
+ if persist and self.persistence_enabled:
+ self._persist_sessions()
+
+ def serialize_session(self, session_id: str) -> dict[str, Any] | None:
+ history = self.chat_sessions.get(session_id)
+ if history is None:
+ return None
+ metadata = self.session_metadata.get(session_id, {})
+ return {
+ "session_id": session_id,
+ "metadata": {
+ "created_at": metadata.get("created_at"),
+ "updated_at": metadata.get("updated_at"),
+ "message_count": len(history),
+ },
+ "messages": [_serialize_message(m) for m in history],
+ }
+
+ def list_sessions(self) -> list[dict[str, Any]]:
+ sessions = [
+ {
+ "session_id": sid,
+ "message_count": len(hist),
+ "created_at": (
+ self.session_metadata.get(sid, {})
+ .get("created_at")
+ ),
+ "updated_at": (
+ self.session_metadata.get(sid, {})
+ .get("updated_at")
+ ),
+ }
+ for sid, hist in self.chat_sessions.items()
+ ]
+ sessions.sort(
+ key=lambda item: item.get("updated_at") or "",
+ reverse=True,
+ )
+ return sessions
+
+ def _ensure_session(self, session_id: str) -> list[BaseMessage]:
+ history = self.chat_sessions.setdefault(session_id, [])
+ if not history:
+ now = _utc_timestamp()
+ self.session_metadata[session_id] = {
+ "created_at": now,
+ "updated_at": now,
+ "message_count": 0,
+ }
+ self._persist_sessions()
+ return history
+
+ def _touch_session(self, session_id: str, *, persist: bool) -> None:
+ metadata = self.session_metadata.setdefault(session_id, {})
+ metadata.setdefault("created_at", _utc_timestamp())
+ metadata["updated_at"] = _utc_timestamp()
+ metadata["message_count"] = len(self.chat_sessions.get(session_id, []))
+ if persist and self.persistence_enabled:
+ self._persist_sessions()
+
+ def _load_persisted_sessions(self) -> None:
+ if not self.persistence_enabled:
+ return
+ path = self.state_file_path
+ if not path.exists():
+ return
+ try:
+ raw = path.read_text(encoding="utf-8")
+ payload = json.loads(raw)
+ except (OSError, json.JSONDecodeError) as exc:
+ print(
+ f"Failed to load persisted chat sessions: {exc}",
+ file=sys.stderr,
+ )
+ return
+
+ sessions = payload.get("sessions")
+ if not isinstance(sessions, dict):
+ return
+
+ for session_id, record in sessions.items():
+ if not isinstance(session_id, str) or not isinstance(record, dict):
+ continue
+ messages_payload = record.get("messages", [])
+ if not isinstance(messages_payload, list):
+ messages_payload = []
+ history: list[BaseMessage] = []
+ for message_payload in messages_payload:
+ if not isinstance(message_payload, dict):
+ continue
+ message = _deserialize_message(message_payload)
+ if message is not None:
+ history.append(message)
+ self.chat_sessions[session_id] = history
+ metadata = dict(record.get("metadata") or {})
+ metadata.setdefault("created_at", _utc_timestamp())
+ metadata.setdefault("updated_at", metadata["created_at"])
+ metadata["message_count"] = len(history)
+ self.session_metadata[session_id] = metadata
+
+ def _persist_sessions(self) -> None:
+ if not self.persistence_enabled:
+ return
+ payload = {
+ "version": 1,
+ "sessions": {
+ session_id: {
+ "messages": [
+ _serialize_message(message)
+ for message in history
+ ],
+ "metadata": {
+ **self.session_metadata.get(session_id, {}),
+ "message_count": len(history),
+ },
+ }
+ for session_id, history in self.chat_sessions.items()
+ },
+ }
+ try:
+ tmp_path = self.state_file_path.with_suffix(
+ self.state_file_path.suffix + ".tmp"
+ )
+ tmp_path.write_text(
+ json.dumps(payload, indent=2, ensure_ascii=False),
+ encoding="utf-8",
+ )
+ tmp_path.replace(self.state_file_path)
+ except OSError as exc:
+ print(f"Failed to persist chat sessions: {exc}", file=sys.stderr)
diff --git a/community_mcp_servers/lib/utils.py b/community_mcp_servers/lib/utils.py
new file mode 100644
index 00000000..d5a10b8e
--- /dev/null
+++ b/community_mcp_servers/lib/utils.py
@@ -0,0 +1,176 @@
+import json
+import os
+import sys
+import re
+from typing import Any, Callable
+
+from langchain_core.messages import BaseMessage
+
+__all__ = [
+ "sanitize_tool_name",
+ "truthy",
+ "load_json_env",
+ "build_connection_config",
+ "schema_from_model",
+ "extract_message_text",
+ "_eprint",
+]
+
+
+def sanitize_tool_name(name: str) -> str:
+ return re.sub(r"[^a-zA-Z0-9_-]+", "_", name.lower()).strip("_")
+
+
+def truthy(value: str | None) -> bool:
+ if value is None:
+ return False
+ return value.strip().lower() in {"1", "true", "t", "yes", "y", "on"}
+
+
+def load_json_env(
+ env_name: str, *, value_validator: Callable[[Any], Any] | None = None
+) -> dict[str, str]:
+ raw = os.getenv(env_name)
+ if not raw:
+ return {}
+ try:
+ parsed = json.loads(raw)
+ except json.JSONDecodeError as exc:
+ raise ValueError(
+ f"Environment variable {env_name} must contain valid JSON."
+ ) from exc
+ if not isinstance(parsed, dict):
+ raise ValueError(
+ (
+ f"Environment variable {env_name} must be a JSON object "
+ "with string keys."
+ )
+ )
+ if value_validator is None:
+ return {str(k): str(v) for k, v in parsed.items()}
+ validated: dict[str, str] = {}
+ for key, value in parsed.items():
+ validated[str(key)] = str(value_validator(value))
+ return validated
+
+
+def build_connection_config(
+ service_name: str = "github",
+ server_url_env: str = "GITHUB_MCP_SERVER_URL",
+ default_server_url: str = "https://api.githubcopilot.com/mcp/",
+ token_env: str = "GITHUB_PERSONAL_ACCESS_TOKEN",
+ bearer_token_env: str | None = "GITHUB_MCP_BEARER_TOKEN",
+) -> dict[str, Any]:
+ """
+ Build connection configuration for MCP services.
+
+ Args:
+ service_name: Name of the service (for logging/display)
+ server_url_env: Environment variable name for server URL
+ default_server_url: Default server URL if not specified
+ token_env: Primary token environment variable name
+ bearer_token_env: Optional bearer token environment variable name
+ """
+ server_url = os.getenv(server_url_env, default_server_url).strip()
+ if not server_url:
+ raise ValueError(f"{server_url_env} cannot be empty.")
+
+ # Check for readonly path configuration
+ readonly_path_env = f"{service_name.upper()}_MCP_USE_READONLY_PATH"
+ if truthy(os.getenv(readonly_path_env)) and not server_url.endswith("/readonly"):
+ server_url = server_url.rstrip("/") + "/readonly"
+
+ transport_env = f"{service_name.upper()}_MCP_TRANSPORT"
+ transport = (
+ os.getenv(transport_env, "streamable_http").strip().lower()
+ )
+ if transport not in {"streamable_http", "sse"}:
+ raise ValueError(
+ f"Unsupported {transport_env}. Use 'streamable_http' or 'sse'."
+ )
+
+ headers: dict[str, str] = {}
+ auth_token = os.getenv(bearer_token_env) if bearer_token_env else None
+ if not auth_token:
+ auth_token = os.getenv(token_env)
+ if auth_token:
+ headers["Authorization"] = f"Bearer {auth_token}"
+
+ # Check for toolsets configuration
+ toolsets_env = f"{service_name.upper()}_MCP_TOOLSETS"
+ toolsets = os.getenv(toolsets_env)
+ if toolsets:
+ headers["X-MCP-Toolsets"] = toolsets.strip()
+
+ # Check for readonly flag
+ readonly_env = f"{service_name.upper()}_MCP_READONLY"
+ if truthy(os.getenv(readonly_env)):
+ headers["X-MCP-Readonly"] = "true"
+
+ # Check for user agent
+ user_agent_env = f"{service_name.upper()}_MCP_USER_AGENT"
+ user_agent = os.getenv(user_agent_env)
+ if user_agent:
+ headers["User-Agent"] = user_agent.strip()
+
+ # Check for extra headers
+ extra_headers_env = f"{service_name.upper()}_MCP_EXTRA_HEADERS"
+ headers.update(load_json_env(extra_headers_env))
+
+ connection: dict[str, Any] = {"url": server_url, "transport": transport}
+ if headers:
+ connection["headers"] = headers
+
+ timeout_env = f"{service_name.upper()}_MCP_TIMEOUT_SECONDS"
+ timeout = os.getenv(timeout_env)
+ if timeout:
+ try:
+ timeout_value = float(timeout)
+ except ValueError as exc:
+ raise ValueError(
+ f"{timeout_env} must be a positive number"
+ ) from exc
+ if timeout_value <= 0:
+ raise ValueError(
+ f"{timeout_env} must be greater than zero"
+ )
+ connection["timeout"] = timeout_value
+
+ return connection
+
+
+def schema_from_model(model: Any) -> dict[str, Any] | None:
+ if model is None:
+ return None
+ for attr in ("model_json_schema", "schema"):
+ schema_fn = getattr(model, attr, None)
+ if callable(schema_fn):
+ schema = schema_fn()
+ if isinstance(schema, dict):
+ return schema
+ return None
+
+
+def extract_message_text(message: BaseMessage) -> str:
+ content = message.content
+ if isinstance(content, str):
+ return content
+ if isinstance(content, list):
+ text_chunks = [
+ c.get("text", "")
+ for c in content
+ if isinstance(c, dict) and c.get("type") == "text"
+ ]
+ if text_chunks:
+ return "\n".join(text_chunks)
+ return str(content)
+
+
+def _eprint(
+ *args: object,
+ sep: str | None = None,
+ end: str | None = None,
+ flush: bool = False,
+) -> None:
+
+ print(*args, file=sys.stderr, sep=sep, end=end, flush=flush)
diff --git a/community_mcp_servers/llm_providers/__init__.py b/community_mcp_servers/llm_providers/__init__.py
new file mode 100644
index 00000000..95b40e88
--- /dev/null
+++ b/community_mcp_servers/llm_providers/__init__.py
@@ -0,0 +1,3 @@
+"""LLM provider modules for MCP agents."""
+
+__all__ = ["gemini", "groq_llm", "lightning_llm"]
diff --git a/community_mcp_servers/llm_providers/gemini.py b/community_mcp_servers/llm_providers/gemini.py
new file mode 100644
index 00000000..c883733f
--- /dev/null
+++ b/community_mcp_servers/llm_providers/gemini.py
@@ -0,0 +1,22 @@
+import os
+from langchain_google_genai import ChatGoogleGenerativeAI
+from langchain_core.language_models import BaseChatModel
+from pydantic import SecretStr
+
+def get_llm() -> BaseChatModel:
+
+ api_key = os.getenv("GOOGLE_API_KEY")
+ if not api_key:
+ raise ValueError(
+ "Missing required environment variable: GOOGLE_API_KEY"
+ )
+
+ model_id = os.getenv("MODEL", "models/gemini-3-pro-preview").strip()
+ if not model_id:
+ raise ValueError("Model ID cannot be empty.")
+
+ return ChatGoogleGenerativeAI(
+ model=model_id,
+ api_key=SecretStr(api_key),
+ disable_streaming="tool_calling"
+ )
diff --git a/community_mcp_servers/llm_providers/groq_llm.py b/community_mcp_servers/llm_providers/groq_llm.py
new file mode 100644
index 00000000..6b052c68
--- /dev/null
+++ b/community_mcp_servers/llm_providers/groq_llm.py
@@ -0,0 +1,23 @@
+import os
+
+from pydantic import SecretStr
+from langchain_groq import ChatGroq
+from langchain_core.language_models import BaseChatModel
+
+
+def get_llm() -> BaseChatModel:
+
+ api_key = os.getenv("GROQ_API_KEY")
+ if not api_key:
+ raise ValueError(
+ "Missing required environment variable: GROQ_API_KEY"
+ )
+
+ model_id = os.getenv("MODEL", "llama-3.1-8b-instant").strip()
+ if not model_id:
+ raise ValueError("Model ID cannot be empty.")
+
+ return ChatGroq(
+ model=model_id,
+ api_key=SecretStr(api_key),
+ )
diff --git a/community_mcp_servers/llm_providers/lightning_llm.py b/community_mcp_servers/llm_providers/lightning_llm.py
new file mode 100644
index 00000000..00ceb759
--- /dev/null
+++ b/community_mcp_servers/llm_providers/lightning_llm.py
@@ -0,0 +1,31 @@
+import os
+from langchain_openai import ChatOpenAI
+from langchain_core.language_models import BaseChatModel
+from pydantic import SecretStr
+
+
+def get_llm() -> BaseChatModel:
+
+ api_key = os.getenv("LIGHTNING_API_KEY")
+ if not api_key:
+ raise ValueError(
+ "Missing required environment variable: LIGHTNING_API_KEY"
+ )
+
+ base_url = os.getenv("LIGHTNING_BASE_URL")
+ if not base_url:
+ raise ValueError(
+ "Missing required environment variable: LIGHTNING_BASE_URL"
+ )
+
+ model_id = os.getenv(
+ "MODEL", "meta-llama/Llama-3.3-70B-Instruct"
+ ).strip()
+ if not model_id:
+ raise ValueError("Model ID cannot be empty.")
+
+ return ChatOpenAI(
+ api_key=SecretStr(api_key),
+ base_url=base_url,
+ model=model_id
+ )
diff --git a/community_mcp_servers/main.py b/community_mcp_servers/main.py
new file mode 100644
index 00000000..028a08c4
--- /dev/null
+++ b/community_mcp_servers/main.py
@@ -0,0 +1,38 @@
+import sys
+from pathlib import Path
+
+import typer
+from dotenv import load_dotenv
+
+load_dotenv()
+
+sys.path.insert(0, str(Path(__file__).parent))
+
+from lib.base_mcp import create_mcp_cli
+from agents import get_github_agent, get_jira_agent, get_slack_agent
+
+
+app = typer.Typer(
+ help="Community AI MCP Agent - Unified access to GitHub, Slack, and more"
+)
+
+app.add_typer(
+ create_mcp_cli("github", get_github_agent, "GitHub MCP Agent CLI"),
+ name="github",
+ help="GitHub MCP Agent - Interact with GitHub repositories",
+)
+
+app.add_typer(
+ create_mcp_cli("jira", get_jira_agent, "Jira MCP Agent CLI"),
+ name="jira",
+ help="Jira MCP Agent - Interact with Jira projects",
+)
+
+app.add_typer(
+ create_mcp_cli("slack", get_slack_agent, "Slack MCP Agent CLI"),
+ name="slack",
+ help="Slack MCP Agent - Interact with Slack workspaces",
+)
+
+if __name__ == "__main__":
+ app()
diff --git a/community_mcp_servers/package.json b/community_mcp_servers/package.json
new file mode 100644
index 00000000..e207da29
--- /dev/null
+++ b/community_mcp_servers/package.json
@@ -0,0 +1,9 @@
+{
+ "name": "mcp-agents",
+ "version": "1.0.0",
+ "description": "Node.js MCP server dependencies for Slack and other stdio-based agents",
+ "private": true,
+ "dependencies": {
+ "slack-mcp-server": "latest"
+ }
+}
diff --git a/community_mcp_servers/pyproject.toml b/community_mcp_servers/pyproject.toml
new file mode 100644
index 00000000..5f0a8c0c
--- /dev/null
+++ b/community_mcp_servers/pyproject.toml
@@ -0,0 +1,28 @@
+[project]
+name = "mcp_impl"
+version = "0.1.0"
+description = "Community AI MCP Agent"
+readme = "README.md"
+requires-python = ">=3.12"
+dependencies = [
+ "langchain",
+ "langchain-community",
+ "langchain-core",
+ "langchain-google-genai",
+ "langchain-text-splitters",
+ "langchain-mcp-adapters",
+ "langsmith",
+ "langgraph",
+ "langgraph-checkpoint",
+ "langgraph-prebuilt",
+ "langgraph-sdk",
+ "google-genai",
+ "pygithub",
+ "slack-sdk",
+ "pydantic",
+ "python-dotenv",
+ "sqlalchemy",
+ "typer",
+ "langchain-groq",
+ "langchain-openai>=1.1.0",
+]
diff --git a/community_mcp_servers/requirements.txt b/community_mcp_servers/requirements.txt
new file mode 100644
index 00000000..6c0956dc
--- /dev/null
+++ b/community_mcp_servers/requirements.txt
@@ -0,0 +1,30 @@
+# LangChain
+langchain
+langchain-community
+langchain-core
+langchain-google-genai
+langchain-openai
+langchain-text-splitters
+langchain-mcp-adapters
+langsmith
+
+# LangGraph
+langgraph
+langgraph-checkpoint
+langgraph-prebuilt
+langgraph-sdk
+
+google-genai
+
+# External APIs
+pygithub
+slack-sdk
+
+# Utilities
+pydantic
+python-dotenv
+sqlalchemy
+typer
+
+# Groq
+langchain-groq