From aafd0f01177f7e40ed68436ecdf35ed22e7f113b Mon Sep 17 00:00:00 2001
From: Raghav Gupta <142162663+Raghav-56@users.noreply.github.com>
Date: Sat, 22 Nov 2025 23:18:20 +0530
Subject: [PATCH 1/5] feat(mcp): openMF#46 add GitHub MCP CLI chat
- GitHub MCP server integration test via cli
- Add gemini, lightning and groq llm support
- Set up CLI interface for chat
Uses typer and chat loop
The gemini 3 one works, tested that
---
community_chatbot/mcp/.gitignore | 2 +
community_chatbot/mcp/github/.env.example | 20 ++
community_chatbot/mcp/github/.gitignore | 2 +
community_chatbot/mcp/github/agent.py | 133 ++++++++
community_chatbot/mcp/github/commands.py | 286 +++++++++++++++++
community_chatbot/mcp/github/gemini.py | 22 ++
community_chatbot/mcp/github/github_mcp.py | 137 ++++++++
community_chatbot/mcp/github/groq_llm.py | 22 ++
community_chatbot/mcp/github/lightning_llm.py | 39 +++
community_chatbot/mcp/github/state.py | 296 ++++++++++++++++++
community_chatbot/mcp/github/utils.py | 148 +++++++++
community_chatbot/mcp/requirements.txt | 28 ++
12 files changed, 1135 insertions(+)
create mode 100644 community_chatbot/mcp/.gitignore
create mode 100644 community_chatbot/mcp/github/.env.example
create mode 100644 community_chatbot/mcp/github/.gitignore
create mode 100644 community_chatbot/mcp/github/agent.py
create mode 100644 community_chatbot/mcp/github/commands.py
create mode 100644 community_chatbot/mcp/github/gemini.py
create mode 100644 community_chatbot/mcp/github/github_mcp.py
create mode 100644 community_chatbot/mcp/github/groq_llm.py
create mode 100644 community_chatbot/mcp/github/lightning_llm.py
create mode 100644 community_chatbot/mcp/github/state.py
create mode 100644 community_chatbot/mcp/github/utils.py
create mode 100644 community_chatbot/mcp/requirements.txt
diff --git a/community_chatbot/mcp/.gitignore b/community_chatbot/mcp/.gitignore
new file mode 100644
index 00000000..634f145f
--- /dev/null
+++ b/community_chatbot/mcp/.gitignore
@@ -0,0 +1,2 @@
+*.env
+__pycache__/
diff --git a/community_chatbot/mcp/github/.env.example b/community_chatbot/mcp/github/.env.example
new file mode 100644
index 00000000..faf0086a
--- /dev/null
+++ b/community_chatbot/mcp/github/.env.example
@@ -0,0 +1,20 @@
+GITHUB_PERSONAL_ACCESS_TOKEN=your_github_token_here
+# OPENAI_API_KEY=your_openai_api_key_here
+
+GOOGLE_API_KEY=your_google_api_key_here
+
+GROQ_API_KEY=your_groq_api_key_here
+
+LLM_PROVIDER=gemini
+
+LIGHTNING_API_KEY=your_lightning_api_key_here
+
+# Format(for custom models): https://lightning.ai/raghav-56/model-apis/models/your-model-id
+LIGHTNING_BASE_URL=https://lightning.ai/api/v1
+
+# Model name (optional)
+MODEL="models/gemini-3-pro-preview"
+
+GITHUB_APP_PRIVATE_KEY="your_github_app_private_key_here"
+
+MCP_LOG_LEVEL=debug
diff --git a/community_chatbot/mcp/github/.gitignore b/community_chatbot/mcp/github/.gitignore
new file mode 100644
index 00000000..634f145f
--- /dev/null
+++ b/community_chatbot/mcp/github/.gitignore
@@ -0,0 +1,2 @@
+*.env
+__pycache__/
diff --git a/community_chatbot/mcp/github/agent.py b/community_chatbot/mcp/github/agent.py
new file mode 100644
index 00000000..eda1eeda
--- /dev/null
+++ b/community_chatbot/mcp/github/agent.py
@@ -0,0 +1,133 @@
+import os
+from typing import Any, cast
+
+from langchain_core.messages import AIMessage, BaseMessage, SystemMessage
+from langchain_mcp_adapters.client import MultiServerMCPClient
+from langgraph.prebuilt import create_react_agent
+
+from state import RuntimeState
+from utils import (
+ build_connection_config,
+ sanitize_tool_name,
+ schema_from_model,
+)
+
+
+__all__ = [
+ "initialize_agent",
+ "ensure_agent_initialized",
+ "stream_agent_response",
+]
+
+
+def _get_llm_provider():
+
+ provider = os.getenv("LLM_PROVIDER", "lightning").lower()
+
+ if provider == "lightning":
+ from lightning_llm import get_llm
+ elif provider == "groq":
+ from groq_llm import get_llm
+ elif provider == "gemini":
+ from gemini import get_llm
+ else:
+ raise ValueError(
+ f"Unsupported LLM_PROVIDER: {provider}. "
+ f"Supported values: 'lightning', 'groq', 'gemini'"
+ )
+
+ return get_llm
+
+
+async def initialize_agent(state: RuntimeState) -> None:
+ missing_vars: list[str] = []
+ if not (
+ os.getenv("GITHUB_PERSONAL_ACCESS_TOKEN")
+ or os.getenv("GITHUB_MCP_BEARER_TOKEN")
+ ):
+ missing_vars.append("GITHUB_PERSONAL_ACCESS_TOKEN")
+
+ if missing_vars:
+ missing_str = ", ".join(sorted(set(missing_vars)))
+ raise ValueError(
+ "Missing required environment variables: " + missing_str
+ )
+
+ connection = build_connection_config()
+
+ state.mcp_client = MultiServerMCPClient({"github": connection})
+
+ client = state.mcp_client
+ if client is None:
+ raise RuntimeError("Failed to initialize MCP client.")
+
+ # Get tools from MCP client - no server_name parameter needed
+ tools = await client.get_tools()
+
+ state.tool_summaries = []
+ state.tool_map = {}
+ state.tool_details = {}
+
+ for tool in tools:
+ original_name = tool.name
+ sanitized_name = sanitize_tool_name(original_name)
+
+ args_schema = schema_from_model(getattr(tool, "args_schema", None))
+ metadata = getattr(tool, "metadata", {}) or {}
+
+ state.tool_map[sanitized_name] = tool
+ state.tool_details[sanitized_name] = {
+ "name": sanitized_name,
+ "original_name": original_name,
+ "description": getattr(tool, "description", ""),
+ "metadata": metadata,
+ "args_schema": args_schema,
+ }
+ state.tool_summaries.append(
+ {
+ "name": sanitized_name,
+ "original_name": original_name,
+ "description": getattr(tool, "description", ""),
+ }
+ )
+
+ # Get LLM and create agent with tools
+ get_llm = _get_llm_provider()
+ llm = get_llm()
+
+ # Create React agent with model and tools
+ # The agent will automatically bind tools to the model
+ state.agent_executor = create_react_agent(llm, tools)
+
+
+async def ensure_agent_initialized(state: RuntimeState) -> None:
+ if state.agent_executor is None or state.mcp_client is None:
+ await initialize_agent(state)
+ if state.agent_executor is None or state.mcp_client is None:
+ raise RuntimeError("Agent failed to initialize")
+
+
+async def stream_agent_response(
+ state: RuntimeState,
+ session_history: list[BaseMessage],
+) -> AIMessage:
+ if state.agent_executor is None:
+ raise RuntimeError("Agent executor is not initialized.")
+
+ executor = cast(Any, state.agent_executor)
+
+ # Use ainvoke for async execution with the agent
+ result = await executor.ainvoke({"messages": session_history})
+
+ # Extract the last AI message from the result
+ messages = result.get("messages", [])
+ last_ai_message: AIMessage | None = None
+
+ for message in reversed(messages):
+ if isinstance(message, AIMessage):
+ last_ai_message = message
+ break
+
+ if last_ai_message is None:
+ raise RuntimeError("The agent did not return a response.")
+ return last_ai_message
diff --git a/community_chatbot/mcp/github/commands.py b/community_chatbot/mcp/github/commands.py
new file mode 100644
index 00000000..f1673e02
--- /dev/null
+++ b/community_chatbot/mcp/github/commands.py
@@ -0,0 +1,286 @@
+import asyncio
+import json
+import sys
+from pathlib import Path
+from typing import Any
+
+from langchain_core.messages import HumanMessage
+
+from agent import ensure_agent_initialized, stream_agent_response
+from state import RuntimeState
+from utils import (
+ extract_message_text,
+ sanitize_tool_name,
+ _eprint,
+)
+
+
+__all__ = [
+ "list_tools",
+ "tool_info",
+ "invoke_tool",
+ "chat",
+ "chat_loop",
+ "sessions",
+ "clear_session",
+ "export_session",
+ "health",
+]
+
+
+
+
+async def list_tools(state: RuntimeState) -> int:
+ await ensure_agent_initialized(state)
+ if not state.tool_summaries:
+ print("No tools available.")
+ return 0
+
+ for tool in state.tool_summaries:
+ description = tool.get("description") or "(no description)"
+ original_name = tool.get("original_name", "?")
+ print(f"- {tool['name']} (original: {original_name}): {description}")
+ return 0
+
+
+async def tool_info(state: RuntimeState, tool_identifier: str) -> int:
+ await ensure_agent_initialized(state)
+ detail = _find_tool_detail(state, tool_identifier)
+ if detail is None:
+ _eprint(
+ f"Tool '{tool_identifier}' not found. "
+ "Run list-tools to see available IDs.",
+ )
+ return 1
+
+ print(f"CLI name: {detail['name']}")
+ print(f"Original name: {detail['original_name']}")
+ if detail.get("description"):
+ print(f"Description: {detail['description']}")
+ metadata = detail.get("metadata") or {}
+ if metadata:
+ print("Metadata:")
+ for key, value in metadata.items():
+ print(f" - {key}: {value}")
+ args_schema = detail.get("args_schema")
+ if args_schema:
+ print("Args schema:", json.dumps(args_schema, indent=2))
+ return 0
+
+
+async def invoke_tool(
+ state: RuntimeState,
+ tool_identifier: str,
+ args_json: str | None = None,
+) -> int:
+ await ensure_agent_initialized(state)
+ detail = _find_tool_detail(state, tool_identifier)
+ if detail is None:
+ _eprint(
+ f"Tool '{tool_identifier}' not found. "
+ "Run list-tools to see available IDs.",
+ )
+ return 1
+
+ tool = state.tool_map.get(detail["name"])
+ if tool is None:
+ _eprint(f"Tool '{tool_identifier}' is not loaded in the agent.")
+ return 1
+
+ arguments: dict[str, Any] = {}
+ if args_json:
+ try:
+ parsed_args = json.loads(args_json)
+ except json.JSONDecodeError as exc:
+ _eprint(f"Failed to parse args JSON: {exc}")
+ return 1
+ if not isinstance(parsed_args, dict):
+ print(
+ "Tool arguments must be provided as a JSON object.",
+ file=sys.stderr,
+ )
+ return 1
+ arguments = parsed_args
+
+ result = await tool.ainvoke(arguments)
+ if isinstance(result, tuple) and len(result) == 2:
+ text_part, artifacts = result
+ if text_part:
+ print(text_part)
+ if artifacts:
+ print(
+ "Artifacts:",
+ json.dumps([repr(a) for a in artifacts], indent=2),
+ )
+ else:
+ print(result)
+ return 0
+
+
+async def chat(
+ state: RuntimeState,
+ message: str,
+ session_id: str = "default",
+) -> int:
+ await ensure_agent_initialized(state)
+
+ human_message = HumanMessage(content=message)
+ session_history = state.record_message(session_id, human_message)
+
+ try:
+ last_ai_message = await stream_agent_response(state, session_history)
+ state.record_message(session_id, last_ai_message)
+ print(extract_message_text(last_ai_message))
+ return 0
+ except RuntimeError as exc:
+ state.pop_last_message(session_id)
+ print(str(exc), file=sys.stderr)
+ return 1
+
+
+async def chat_loop(
+ state: RuntimeState,
+ session_id: str = "default",
+ exit_command: str = "/exit",
+ reset_command: str = "/reset",
+ prompt_prefix: str | None = None,
+) -> int:
+ await ensure_agent_initialized(state)
+
+ normalized_exit = exit_command.strip().lower()
+ normalized_reset = reset_command.strip().lower()
+ prompt_template = prompt_prefix or f"[{session_id}]> "
+
+ print("Interactive chat started. Type your message and press Enter.")
+ print(
+ "Use '{exit}' to quit and '{reset}' to clear the session.".format(
+ exit=exit_command,
+ reset=reset_command,
+ )
+ )
+
+ loop = asyncio.get_running_loop()
+
+ while True:
+ try:
+ user_message = await loop.run_in_executor(
+ None, input, prompt_template
+ )
+ except (KeyboardInterrupt, EOFError):
+ print("\nExiting chat loop.")
+ return 0
+
+ if user_message is None:
+ continue
+
+ stripped = user_message.strip()
+ if not stripped:
+ continue
+
+ lowered = stripped.lower()
+ if lowered == normalized_exit:
+ print("Ending chat loop.")
+ return 0
+
+ if lowered == normalized_reset:
+ state.clear_session(session_id, persist=True)
+ print(f"Session '{session_id}' reset.")
+ continue
+
+ human_message = HumanMessage(content=user_message)
+ session_history = state.record_message(session_id, human_message)
+
+ try:
+ last_ai_message = await stream_agent_response(
+ state, session_history
+ )
+ state.record_message(session_id, last_ai_message)
+ print(extract_message_text(last_ai_message))
+ except RuntimeError as exc:
+ print(str(exc), file=sys.stderr)
+ state.pop_last_message(session_id)
+
+
+async def sessions(state: RuntimeState) -> int:
+ summaries = state.list_sessions()
+ if not summaries:
+ print("No active sessions.")
+ return 0
+
+ print("Active sessions:")
+ for info in summaries:
+ session_id = info["session_id"]
+ count = info.get("message_count", 0)
+ updated = info.get("updated_at") or "unknown"
+ print(f"- {session_id} ({count} messages, updated {updated})")
+ return 0
+
+
+async def clear_session(state: RuntimeState, session_id: str) -> int:
+ if session_id in state.chat_sessions:
+ state.clear_session(session_id, persist=True)
+ print(f"Cleared session '{session_id}'.")
+ return 0
+
+ print(f"Session '{session_id}' not found.", file=sys.stderr)
+ return 1
+
+
+async def export_session(
+ state: RuntimeState,
+ session_id: str,
+ output_path: str | None = None,
+) -> int:
+ export_payload = state.serialize_session(session_id)
+ if export_payload is None:
+ print(
+ f"Session '{session_id}' not found.",
+ file=sys.stderr,
+ )
+ return 1
+
+ payload_text = json.dumps(export_payload, indent=2, ensure_ascii=False)
+
+ if not output_path:
+ print(payload_text)
+ return 0
+
+ try:
+ path = Path(output_path).expanduser()
+ path.parent.mkdir(parents=True, exist_ok=True)
+ path.write_text(payload_text + "\n", encoding="utf-8")
+ except OSError as exc:
+ _eprint(f"Failed to export session: {exc}")
+ return 1
+
+ print(f"Session '{session_id}' exported to '{path}'.")
+ return 0
+
+
+async def health(state: RuntimeState) -> int:
+ status = {
+ "agent_initialized": state.agent_executor is not None,
+ "tools_available": len(state.tool_summaries),
+ }
+ if state.agent_executor is None:
+ print(
+ "Agent not initialized. Run a command that initializes it"
+ " (e.g. list-tools)."
+ )
+ else:
+ print("Agent initialized.")
+ print(f"Tools available: {status['tools_available']}")
+ return 0
+
+
+def _find_tool_detail(
+ state: RuntimeState,
+ identifier: str,
+) -> dict[str, Any] | None:
+ normalized = sanitize_tool_name(identifier)
+ if normalized in state.tool_details:
+ return state.tool_details[normalized]
+ for detail in state.tool_details.values():
+ if detail.get("original_name") == identifier:
+ return detail
+ return None
diff --git a/community_chatbot/mcp/github/gemini.py b/community_chatbot/mcp/github/gemini.py
new file mode 100644
index 00000000..212e75a6
--- /dev/null
+++ b/community_chatbot/mcp/github/gemini.py
@@ -0,0 +1,22 @@
+import os
+from langchain_google_genai import ChatGoogleGenerativeAI
+from langchain_core.language_models import BaseChatModel
+
+def get_llm() -> BaseChatModel:
+ """
+ Returns a configured LangChain chat model instance for Gemini.
+ Raises ValueError if required environment variables are missing.
+ """
+ api_key = os.getenv("GOOGLE_API_KEY")
+ if not api_key:
+ raise ValueError("Missing required environment variable: GOOGLE_API_KEY")
+
+ model_id = os.getenv("GITHUB_MCP_MODEL", "gemini-2.5-pro").strip()
+ if not model_id:
+ raise ValueError("Model ID cannot be empty.")
+
+ return ChatGoogleGenerativeAI(
+ model=model_id,
+ api_key=api_key,
+ streaming=True,
+ )
diff --git a/community_chatbot/mcp/github/github_mcp.py b/community_chatbot/mcp/github/github_mcp.py
new file mode 100644
index 00000000..4d29f1d7
--- /dev/null
+++ b/community_chatbot/mcp/github/github_mcp.py
@@ -0,0 +1,137 @@
+import asyncio
+
+from dotenv import load_dotenv
+import typer
+
+
+from state import RuntimeState
+import commands
+
+
+load_dotenv()
+
+state = RuntimeState()
+
+app = typer.Typer(help="GitHub MCP Agent CLI")
+
+
+@app.command("list-tools")
+def list_tools() -> int:
+ """List the available MCP tools from the GitHub server."""
+ return asyncio.run(commands.list_tools(state))
+
+
+@app.command("chat")
+def chat(
+ message: str = typer.Argument(..., help="Message to send to the agent."),
+ session_id: str = typer.Option("default", help="Chat session identifier."),
+) -> int:
+ """Send a message to the agent and stream the response."""
+ return asyncio.run(commands.chat(state, message=message, session_id=session_id))
+
+
+@app.command("chat-loop")
+def chat_loop(
+ session_id: str = typer.Option(
+ "default",
+ help="Chat session identifier to use for the loop.",
+ ),
+ exit_command: str = typer.Option(
+ "/exit",
+ help="Command typed alone on a line to end the loop.",
+ ),
+ reset_command: str = typer.Option(
+ "/reset",
+ help="Command typed alone on a line to reset the session history.",
+ ),
+ prompt_prefix: str | None = typer.Option(
+ None,
+ help="Optional custom prompt prefix displayed before user input.",
+ ),
+) -> int:
+ """Start an interactive multi-turn chat session."""
+ return asyncio.run(
+ commands.chat_loop(
+ state,
+ session_id=session_id,
+ exit_command=exit_command,
+ reset_command=reset_command,
+ prompt_prefix=prompt_prefix,
+ )
+ )
+
+
+@app.command("tool-info")
+def tool_info(
+ tool_identifier: str = typer.Argument(
+ ..., help="Tool CLI or original name."
+ )
+) -> int:
+ """Show detailed information about a tool."""
+ return asyncio.run(commands.tool_info(state, tool_identifier=tool_identifier))
+
+
+@app.command("invoke-tool")
+def invoke_tool(
+ tool_identifier: str = typer.Argument(
+ ..., help="Tool CLI or original name to invoke."
+ ),
+ args_json: str = typer.Option(
+ "{}",
+ help="JSON object containing arguments for the tool.",
+ ),
+) -> int:
+ """Invoke a GitHub MCP tool directly."""
+ return asyncio.run(
+ commands.invoke_tool(
+ state,
+ tool_identifier=tool_identifier,
+ args_json=args_json,
+ )
+ )
+
+
+@app.command("sessions")
+def sessions() -> int:
+ """List active chat session identifiers."""
+ return asyncio.run(commands.sessions(state))
+
+
+@app.command("clear-session")
+def clear_session(
+ session_id: str = typer.Argument(..., help="Session id to clear."),
+) -> int:
+ """Clear a stored chat session."""
+ return asyncio.run(commands.clear_session(state, session_id=session_id))
+
+
+@app.command("export-session")
+def export_session(
+ session_id: str = typer.Argument(
+ ..., help="Session id to export."
+ ),
+ output_path: str | None = typer.Option(
+ None,
+ "--output",
+ "-o",
+ help="Optional path to write the exported JSON transcript.",
+ ),
+) -> int:
+ """Export a chat session transcript for testing or archival."""
+ return asyncio.run(
+ commands.export_session(
+ state,
+ session_id=session_id,
+ output_path=output_path,
+ )
+ )
+
+
+@app.command("health")
+def health() -> int:
+ """Show initialization status and tool count."""
+ return asyncio.run(commands.health(state))
+
+
+if __name__ == "__main__":
+ app()
diff --git a/community_chatbot/mcp/github/groq_llm.py b/community_chatbot/mcp/github/groq_llm.py
new file mode 100644
index 00000000..f3cb790f
--- /dev/null
+++ b/community_chatbot/mcp/github/groq_llm.py
@@ -0,0 +1,22 @@
+import os
+from langchain_groq import ChatGroq
+from langchain_core.language_models import BaseChatModel
+
+def get_llm() -> BaseChatModel:
+ """
+ Returns a configured LangChain chat model instance for Groq.
+ Raises ValueError if required environment variables are missing.
+ """
+ api_key = os.getenv("GROQ_API_KEY")
+ if not api_key:
+ raise ValueError("Missing required environment variable: GROQ_API_KEY")
+
+ model_id = os.getenv("MODEL", "llama-3.1-8b-instant").strip()
+ if not model_id:
+ raise ValueError("Model ID cannot be empty.")
+
+ return ChatGroq(
+ model=model_id,
+ api_key=api_key,
+ streaming=True
+ )
diff --git a/community_chatbot/mcp/github/lightning_llm.py b/community_chatbot/mcp/github/lightning_llm.py
new file mode 100644
index 00000000..6d2e3b75
--- /dev/null
+++ b/community_chatbot/mcp/github/lightning_llm.py
@@ -0,0 +1,39 @@
+import os
+from langchain_openai import ChatOpenAI
+from langchain_core.language_models import BaseChatModel
+
+
+def get_llm() -> BaseChatModel:
+ api_key = os.getenv("LIGHTNING_API_KEY")
+ if not api_key:
+ raise ValueError(
+ "Missing required environment variable: LIGHTNING_API_KEY"
+ )
+
+ base_url = os.getenv("LIGHTNING_BASE_URL")
+ if not base_url:
+ raise ValueError(
+ "Missing required environment variable: LIGHTNING_BASE_URL"
+ )
+
+ # Model name from your Lightning AI deployment
+ # Can be overridden via environment variable
+ model_id = os.getenv(
+ "MODEL", "meta-llama/Llama-3.3-70B-Instruct"
+ ).strip()
+ if not model_id:
+ raise ValueError("Model ID cannot be empty.")
+
+ # Optional: timeout and max_retries for robustness
+ timeout = int(os.getenv("LIGHTNING_TIMEOUT", "60"))
+ max_retries = int(os.getenv("LIGHTNING_MAX_RETRIES", "3"))
+
+ return ChatOpenAI(
+ api_key=api_key,
+ base_url=base_url,
+ model=model_id,
+ temperature=0.7,
+ streaming=True,
+ timeout=timeout,
+ max_retries=max_retries,
+ )
diff --git a/community_chatbot/mcp/github/state.py b/community_chatbot/mcp/github/state.py
new file mode 100644
index 00000000..108bec7c
--- /dev/null
+++ b/community_chatbot/mcp/github/state.py
@@ -0,0 +1,296 @@
+import importlib
+import json
+import os
+import sys
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, cast
+
+from langchain_core.messages import (
+ AIMessage,
+ BaseMessage,
+ HumanMessage,
+ SystemMessage,
+ ToolMessage,
+)
+
+from langchain_core.messages import ChatMessage, FunctionMessage
+
+from utils import truthy
+
+
+__all__ = ["RuntimeState", "MESSAGE_TYPE_REGISTRY"]
+
+
+_STATE_FILE_ENV = "GITHUB_MCP_STATE_FILE"
+_STATE_DIR_ENV = "GITHUB_MCP_STATE_DIR"
+_STATE_DISABLE_ENV = "GITHUB_MCP_DISABLE_PERSISTENCE"
+_DEFAULT_STATE_FILENAME = "github_mcp_sessions.json"
+_DEFAULT_STATE_SUBDIR = ".github_mcp"
+
+MESSAGE_TYPE_REGISTRY: dict[str, type[BaseMessage]] = {
+ "ai": AIMessage,
+ "human": HumanMessage,
+ "system": SystemMessage,
+ "tool": ToolMessage,
+}
+for key, cls in (("function", FunctionMessage), ("chat", ChatMessage)):
+ if key not in MESSAGE_TYPE_REGISTRY and cls is not None:
+ MESSAGE_TYPE_REGISTRY[key] = cast(type[BaseMessage], cls)
+
+
+def _persistence_disabled() -> bool:
+ return truthy(os.getenv(_STATE_DISABLE_ENV))
+
+
+def _resolve_state_file_path() -> Path:
+ if _persistence_disabled():
+ return Path(os.devnull)
+
+ configured_file = os.getenv(_STATE_FILE_ENV)
+ if configured_file:
+ path = Path(configured_file).expanduser()
+ else:
+ base_dir = os.getenv(_STATE_DIR_ENV)
+ if base_dir:
+ base_path = Path(base_dir).expanduser()
+ else:
+ base_path = Path.home() / _DEFAULT_STATE_SUBDIR
+ path = base_path / _DEFAULT_STATE_FILENAME
+
+ try:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ except OSError as exc:
+ print(f"Unable to prepare state directory: {exc}", file=sys.stderr)
+ return path
+
+
+def _utc_timestamp() -> str:
+ return datetime.now(timezone.utc).isoformat(timespec="seconds")
+
+
+def _serialize_message(message: BaseMessage) -> dict[str, Any]:
+ try:
+ data = json.loads(message.json())
+ except (AttributeError, ValueError, TypeError):
+ if hasattr(message, "dict"):
+ data = cast(Any, message).dict() # type: ignore[call-arg]
+ else:
+ data = {"content": getattr(message, "content", "")}
+ data.pop("type", None)
+ return {
+ "type": message.type,
+ "class_path": (
+ f"{message.__class__.__module__}."
+ f"{message.__class__.__name__}"
+ ),
+ "data": data,
+ }
+
+
+def _locate_message_class(class_path: str) -> type[BaseMessage] | None:
+ module_name, _, class_name = class_path.rpartition(".")
+ if not module_name or not class_name:
+ return None
+ try:
+ module = importlib.import_module(module_name)
+ except ImportError:
+ return None
+ candidate = getattr(module, class_name, None)
+ return cast(type[BaseMessage], candidate) if isinstance(candidate, type) and issubclass(candidate, BaseMessage) else None
+
+
+def _deserialize_message(payload: dict[str, Any]) -> BaseMessage | None:
+ message_type = payload.get("type")
+ data = payload.get("data") or {}
+ if not isinstance(data, dict):
+ return None
+ message_cls = (
+ MESSAGE_TYPE_REGISTRY.get(message_type)
+ if isinstance(message_type, str)
+ else None
+ )
+ if message_cls is None:
+ class_path = payload.get("class_path")
+ if isinstance(class_path, str):
+ message_cls = _locate_message_class(class_path)
+ if message_cls is None:
+ return None
+
+ data = data.copy()
+ data.pop("type", None)
+ try:
+ return message_cls(**data)
+ except (TypeError, ValueError):
+ return message_cls(content=data.get("content", ""))
+
+
+class RuntimeState:
+ """Holds runtime information shared across CLI commands."""
+
+ def __init__(self) -> None:
+ self.agent_executor: Any = None
+ self.mcp_client: Any | None = None
+ self.chat_sessions: dict[str, list[BaseMessage]] = {}
+ self.session_metadata: dict[str, dict[str, Any]] = {}
+ self.tool_summaries: list[dict[str, object]] = []
+ self.tool_map: dict[str, Any] = {}
+ self.tool_details: dict[str, dict[str, Any]] = {}
+ self.persistence_enabled: bool = not _persistence_disabled()
+ self.state_file_path: Path = _resolve_state_file_path()
+ if self.persistence_enabled:
+ self._load_persisted_sessions()
+
+ def record_message(
+ self,
+ session_id: str,
+ message: BaseMessage,
+ ) -> list[BaseMessage]:
+ history = self._ensure_session(session_id)
+ history.append(message)
+ self._touch_session(session_id, persist=True)
+ return history
+
+ def pop_last_message(self, session_id: str) -> None:
+ history = self.chat_sessions.get(session_id)
+ if not history:
+ return
+ history.pop()
+ if history:
+ self._touch_session(session_id, persist=True)
+ return
+ self.clear_session(session_id, persist=True)
+
+ def clear_session(self, session_id: str, *, persist: bool = False) -> None:
+ self.chat_sessions.pop(session_id, None)
+ self.session_metadata.pop(session_id, None)
+ if persist and self.persistence_enabled:
+ self._persist_sessions()
+
+ def serialize_session(self, session_id: str) -> dict[str, Any] | None:
+ history = self.chat_sessions.get(session_id)
+ if history is None:
+ return None
+ metadata = self.session_metadata.get(session_id, {})
+ return {
+ "session_id": session_id,
+ "metadata": {
+ "created_at": metadata.get("created_at"),
+ "updated_at": metadata.get("updated_at"),
+ "message_count": len(history),
+ },
+ "messages": [_serialize_message(m) for m in history],
+ }
+
+ def list_sessions(self) -> list[dict[str, Any]]:
+ sessions = [
+ {
+ "session_id": sid,
+ "message_count": len(hist),
+ "created_at": (
+ self.session_metadata.get(sid, {})
+ .get("created_at")
+ ),
+ "updated_at": (
+ self.session_metadata.get(sid, {})
+ .get("updated_at")
+ ),
+ }
+ for sid, hist in self.chat_sessions.items()
+ ]
+ sessions.sort(
+ key=lambda item: item.get("updated_at") or "",
+ reverse=True,
+ )
+ return sessions
+
+ def _ensure_session(self, session_id: str) -> list[BaseMessage]:
+ history = self.chat_sessions.setdefault(session_id, [])
+ if not history:
+ now = _utc_timestamp()
+ self.session_metadata[session_id] = {
+ "created_at": now,
+ "updated_at": now,
+ "message_count": 0,
+ }
+ self._persist_sessions()
+ return history
+
+ def _touch_session(self, session_id: str, *, persist: bool) -> None:
+ metadata = self.session_metadata.setdefault(session_id, {})
+ metadata.setdefault("created_at", _utc_timestamp())
+ metadata["updated_at"] = _utc_timestamp()
+ metadata["message_count"] = len(self.chat_sessions.get(session_id, []))
+ if persist and self.persistence_enabled:
+ self._persist_sessions()
+
+ def _load_persisted_sessions(self) -> None:
+ if not self.persistence_enabled:
+ return
+ path = self.state_file_path
+ if not path.exists():
+ return
+ try:
+ raw = path.read_text(encoding="utf-8")
+ payload = json.loads(raw)
+ except (OSError, json.JSONDecodeError) as exc:
+ print(
+ f"Failed to load persisted chat sessions: {exc}",
+ file=sys.stderr,
+ )
+ return
+
+ sessions = payload.get("sessions")
+ if not isinstance(sessions, dict):
+ return
+
+ for session_id, record in sessions.items():
+ if not isinstance(session_id, str) or not isinstance(record, dict):
+ continue
+ messages_payload = record.get("messages", [])
+ if not isinstance(messages_payload, list):
+ messages_payload = []
+ history: list[BaseMessage] = []
+ for message_payload in messages_payload:
+ if not isinstance(message_payload, dict):
+ continue
+ message = _deserialize_message(message_payload)
+ if message is not None:
+ history.append(message)
+ self.chat_sessions[session_id] = history
+ metadata = dict(record.get("metadata") or {})
+ metadata.setdefault("created_at", _utc_timestamp())
+ metadata.setdefault("updated_at", metadata["created_at"])
+ metadata["message_count"] = len(history)
+ self.session_metadata[session_id] = metadata
+
+ def _persist_sessions(self) -> None:
+ if not self.persistence_enabled:
+ return
+ payload = {
+ "version": 1,
+ "sessions": {
+ session_id: {
+ "messages": [
+ _serialize_message(message)
+ for message in history
+ ],
+ "metadata": {
+ **self.session_metadata.get(session_id, {}),
+ "message_count": len(history),
+ },
+ }
+ for session_id, history in self.chat_sessions.items()
+ },
+ }
+ try:
+ tmp_path = self.state_file_path.with_suffix(
+ self.state_file_path.suffix + ".tmp"
+ )
+ tmp_path.write_text(
+ json.dumps(payload, indent=2, ensure_ascii=False),
+ encoding="utf-8",
+ )
+ tmp_path.replace(self.state_file_path)
+ except OSError as exc:
+ print(f"Failed to persist chat sessions: {exc}", file=sys.stderr)
diff --git a/community_chatbot/mcp/github/utils.py b/community_chatbot/mcp/github/utils.py
new file mode 100644
index 00000000..652053bb
--- /dev/null
+++ b/community_chatbot/mcp/github/utils.py
@@ -0,0 +1,148 @@
+import json
+import os
+import sys
+import re
+from typing import Any, Callable
+
+from langchain_core.messages import BaseMessage
+
+__all__ = [
+ "sanitize_tool_name",
+ "truthy",
+ "load_json_env",
+ "build_connection_config",
+ "schema_from_model",
+ "extract_message_text",
+ "_eprint",
+]
+
+
+def sanitize_tool_name(name: str) -> str:
+ return re.sub(r"[^a-zA-Z0-9_-]+", "_", name.lower()).strip("_")
+
+
+def truthy(value: str | None) -> bool:
+ if value is None:
+ return False
+ return value.strip().lower() in {"1", "true", "t", "yes", "y", "on"}
+
+
+def load_json_env(
+ env_name: str, *, value_validator: Callable[[Any], Any] | None = None
+) -> dict[str, str]:
+ raw = os.getenv(env_name)
+ if not raw:
+ return {}
+ try:
+ parsed = json.loads(raw)
+ except json.JSONDecodeError as exc:
+ raise ValueError(
+ f"Environment variable {env_name} must contain valid JSON."
+ ) from exc
+ if not isinstance(parsed, dict):
+ raise ValueError(
+ (
+ f"Environment variable {env_name} must be a JSON object "
+ "with string keys."
+ )
+ )
+ if value_validator is None:
+ return {str(k): str(v) for k, v in parsed.items()}
+ validated: dict[str, str] = {}
+ for key, value in parsed.items():
+ validated[str(key)] = str(value_validator(value))
+ return validated
+
+
+def build_connection_config() -> dict[str, Any]:
+ server_url = os.getenv(
+ "GITHUB_MCP_SERVER_URL", "https://api.githubcopilot.com/mcp/"
+ ).strip()
+ if not server_url:
+ raise ValueError("GITHUB_MCP_SERVER_URL cannot be empty.")
+
+ if truthy(os.getenv("GITHUB_MCP_USE_READONLY_PATH")) and not \
+ server_url.endswith("/readonly"):
+ server_url = server_url.rstrip("/") + "/readonly"
+
+ transport = (
+ os.getenv("GITHUB_MCP_TRANSPORT", "streamable_http").strip().lower()
+ )
+ if transport not in {"streamable_http", "sse"}:
+ raise ValueError(
+ "Unsupported GITHUB_MCP_TRANSPORT. Use 'streamable_http' or 'sse'."
+ )
+
+ headers: dict[str, str] = {}
+ auth_token = (
+ os.getenv("GITHUB_MCP_BEARER_TOKEN")
+ or os.getenv("GITHUB_PERSONAL_ACCESS_TOKEN")
+ )
+ if auth_token:
+ headers["Authorization"] = f"Bearer {auth_token}"
+ toolsets = os.getenv("GITHUB_MCP_TOOLSETS")
+ if toolsets:
+ headers["X-MCP-Toolsets"] = toolsets.strip()
+ if truthy(os.getenv("GITHUB_MCP_READONLY")):
+ headers["X-MCP-Readonly"] = "true"
+ user_agent = os.getenv("GITHUB_MCP_USER_AGENT")
+ if user_agent:
+ headers["User-Agent"] = user_agent.strip()
+ headers.update(load_json_env("GITHUB_MCP_EXTRA_HEADERS"))
+
+ connection: dict[str, Any] = {"url": server_url, "transport": transport}
+ if headers:
+ connection["headers"] = headers
+
+ timeout = os.getenv("GITHUB_MCP_TIMEOUT_SECONDS")
+ if timeout:
+ try:
+ timeout_value = float(timeout)
+ except ValueError as exc:
+ raise ValueError(
+ "GITHUB_MCP_TIMEOUT_SECONDS must be a positive number"
+ ) from exc
+ if timeout_value <= 0:
+ raise ValueError(
+ "GITHUB_MCP_TIMEOUT_SECONDS must be greater than zero"
+ )
+ connection["timeout"] = timeout_value
+
+ return connection
+
+
+def schema_from_model(model: Any) -> dict[str, Any] | None:
+ if model is None:
+ return None
+ for attr in ("model_json_schema", "schema"):
+ schema_fn = getattr(model, attr, None)
+ if callable(schema_fn):
+ schema = schema_fn()
+ if isinstance(schema, dict):
+ return schema
+ return None
+
+
+def extract_message_text(message: BaseMessage) -> str:
+ content = message.content
+ if isinstance(content, str):
+ return content
+ if isinstance(content, list):
+ text_chunks = [
+ c.get("text", "")
+ for c in content
+ if isinstance(c, dict) and c.get("type") == "text"
+ ]
+ if text_chunks:
+ return "\n".join(text_chunks)
+ return str(content)
+
+
+def _eprint(
+ *args: object,
+ sep: str | None = None,
+ end: str | None = None,
+ flush: bool = False,
+) -> None:
+
+ print(*args, file=sys.stderr, sep=sep, end=end, flush=flush)
diff --git a/community_chatbot/mcp/requirements.txt b/community_chatbot/mcp/requirements.txt
new file mode 100644
index 00000000..a7699bc1
--- /dev/null
+++ b/community_chatbot/mcp/requirements.txt
@@ -0,0 +1,28 @@
+# LangChain
+langchain
+langchain-community
+langchain-core
+langchain-google-genai
+langchain-text-splitters
+langchain-mcp-adapters
+langsmith
+
+# LangGraph
+langgraph
+langgraph-checkpoint
+langgraph-prebuilt
+langgraph-sdk
+
+google-genai
+
+# External APIs
+pygithub
+
+# Utilities
+pydantic
+python-dotenv
+sqlalchemy
+typer
+
+# Groq
+langchain-groq
From f61cf0e4c95747c6da066f11e965d43da0c774da Mon Sep 17 00:00:00 2001
From: Raghav Gupta <142162663+Raghav-56@users.noreply.github.com>
Date: Sun, 23 Nov 2025 18:20:59 +0530
Subject: [PATCH 2/5] feat: modularize and generalize MCP code
- llm providers moved to llm_providers package
- common mcp code moved to lib package
- github specific code remains in mcp/github package
TODO: implement slack and jira mcp packages next
---
.../mcp/{github => }/.env.example | 0
community_chatbot/mcp/github/.gitignore | 2 -
community_chatbot/mcp/github/__init__.py | 5 +
community_chatbot/mcp/github/agent.py | 137 +-------
community_chatbot/mcp/github/commands.py | 246 ++-------------
community_chatbot/mcp/github/github_mcp.py | 32 +-
community_chatbot/mcp/lib/__init__.py | 3 +
community_chatbot/mcp/lib/base_agent.py | 188 +++++++++++
community_chatbot/mcp/lib/base_commands.py | 297 ++++++++++++++++++
.../mcp/{github => lib}/state.py | 29 +-
.../mcp/{github => lib}/utils.py | 76 +++--
.../mcp/llm_providers/__init__.py | 3 +
.../mcp/{github => llm_providers}/gemini.py | 16 +-
.../mcp/{github => llm_providers}/groq_llm.py | 15 +-
.../lightning_llm.py | 16 +-
15 files changed, 635 insertions(+), 430 deletions(-)
rename community_chatbot/mcp/{github => }/.env.example (100%)
delete mode 100644 community_chatbot/mcp/github/.gitignore
create mode 100644 community_chatbot/mcp/github/__init__.py
create mode 100644 community_chatbot/mcp/lib/__init__.py
create mode 100644 community_chatbot/mcp/lib/base_agent.py
create mode 100644 community_chatbot/mcp/lib/base_commands.py
rename community_chatbot/mcp/{github => lib}/state.py (94%)
rename community_chatbot/mcp/{github => lib}/utils.py (57%)
create mode 100644 community_chatbot/mcp/llm_providers/__init__.py
rename community_chatbot/mcp/{github => llm_providers}/gemini.py (50%)
rename community_chatbot/mcp/{github => llm_providers}/groq_llm.py (58%)
rename community_chatbot/mcp/{github => llm_providers}/lightning_llm.py (62%)
diff --git a/community_chatbot/mcp/github/.env.example b/community_chatbot/mcp/.env.example
similarity index 100%
rename from community_chatbot/mcp/github/.env.example
rename to community_chatbot/mcp/.env.example
diff --git a/community_chatbot/mcp/github/.gitignore b/community_chatbot/mcp/github/.gitignore
deleted file mode 100644
index 634f145f..00000000
--- a/community_chatbot/mcp/github/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.env
-__pycache__/
diff --git a/community_chatbot/mcp/github/__init__.py b/community_chatbot/mcp/github/__init__.py
new file mode 100644
index 00000000..0b10bf97
--- /dev/null
+++ b/community_chatbot/mcp/github/__init__.py
@@ -0,0 +1,5 @@
+"""GitHub MCP Agent module."""
+
+from .agent import get_github_agent
+
+__all__ = ["get_github_agent"]
diff --git a/community_chatbot/mcp/github/agent.py b/community_chatbot/mcp/github/agent.py
index eda1eeda..9f4f1cc6 100644
--- a/community_chatbot/mcp/github/agent.py
+++ b/community_chatbot/mcp/github/agent.py
@@ -1,133 +1,28 @@
import os
-from typing import Any, cast
-
-from langchain_core.messages import AIMessage, BaseMessage, SystemMessage
-from langchain_mcp_adapters.client import MultiServerMCPClient
-from langgraph.prebuilt import create_react_agent
-
-from state import RuntimeState
-from utils import (
- build_connection_config,
- sanitize_tool_name,
- schema_from_model,
-)
-
+from community_chatbot.mcp.lib.base_agent import BaseAgent
__all__ = [
- "initialize_agent",
- "ensure_agent_initialized",
- "stream_agent_response",
+ "get_github_agent",
]
-def _get_llm_provider():
-
- provider = os.getenv("LLM_PROVIDER", "lightning").lower()
-
- if provider == "lightning":
- from lightning_llm import get_llm
- elif provider == "groq":
- from groq_llm import get_llm
- elif provider == "gemini":
- from gemini import get_llm
- else:
- raise ValueError(
- f"Unsupported LLM_PROVIDER: {provider}. "
- f"Supported values: 'lightning', 'groq', 'gemini'"
- )
-
- return get_llm
-
+def get_github_agent() -> BaseAgent:
-async def initialize_agent(state: RuntimeState) -> None:
- missing_vars: list[str] = []
if not (
os.getenv("GITHUB_PERSONAL_ACCESS_TOKEN")
or os.getenv("GITHUB_MCP_BEARER_TOKEN")
):
- missing_vars.append("GITHUB_PERSONAL_ACCESS_TOKEN")
-
- if missing_vars:
- missing_str = ", ".join(sorted(set(missing_vars)))
- raise ValueError(
- "Missing required environment variables: " + missing_str
- )
-
- connection = build_connection_config()
-
- state.mcp_client = MultiServerMCPClient({"github": connection})
-
- client = state.mcp_client
- if client is None:
- raise RuntimeError("Failed to initialize MCP client.")
-
- # Get tools from MCP client - no server_name parameter needed
- tools = await client.get_tools()
-
- state.tool_summaries = []
- state.tool_map = {}
- state.tool_details = {}
-
- for tool in tools:
- original_name = tool.name
- sanitized_name = sanitize_tool_name(original_name)
-
- args_schema = schema_from_model(getattr(tool, "args_schema", None))
- metadata = getattr(tool, "metadata", {}) or {}
-
- state.tool_map[sanitized_name] = tool
- state.tool_details[sanitized_name] = {
- "name": sanitized_name,
- "original_name": original_name,
- "description": getattr(tool, "description", ""),
- "metadata": metadata,
- "args_schema": args_schema,
- }
- state.tool_summaries.append(
- {
- "name": sanitized_name,
- "original_name": original_name,
- "description": getattr(tool, "description", ""),
- }
- )
-
- # Get LLM and create agent with tools
- get_llm = _get_llm_provider()
- llm = get_llm()
-
- # Create React agent with model and tools
- # The agent will automatically bind tools to the model
- state.agent_executor = create_react_agent(llm, tools)
-
-
-async def ensure_agent_initialized(state: RuntimeState) -> None:
- if state.agent_executor is None or state.mcp_client is None:
- await initialize_agent(state)
- if state.agent_executor is None or state.mcp_client is None:
- raise RuntimeError("Agent failed to initialize")
-
-
-async def stream_agent_response(
- state: RuntimeState,
- session_history: list[BaseMessage],
-) -> AIMessage:
- if state.agent_executor is None:
- raise RuntimeError("Agent executor is not initialized.")
-
- executor = cast(Any, state.agent_executor)
-
- # Use ainvoke for async execution with the agent
- result = await executor.ainvoke({"messages": session_history})
-
- # Extract the last AI message from the result
- messages = result.get("messages", [])
- last_ai_message: AIMessage | None = None
-
- for message in reversed(messages):
- if isinstance(message, AIMessage):
- last_ai_message = message
- break
+ required_vars = [
+ "GITHUB_PERSONAL_ACCESS_TOKEN or GITHUB_MCP_BEARER_TOKEN"
+ ]
+ else:
+ required_vars = []
- if last_ai_message is None:
- raise RuntimeError("The agent did not return a response.")
- return last_ai_message
+ return BaseAgent(
+ service_name="github",
+ required_env_vars=required_vars,
+ server_url_env="GITHUB_MCP_SERVER_URL",
+ default_server_url="https://api.githubcopilot.com/mcp/",
+ token_env="GITHUB_PERSONAL_ACCESS_TOKEN",
+ bearer_token_env="GITHUB_MCP_BEARER_TOKEN",
+ )
diff --git a/community_chatbot/mcp/github/commands.py b/community_chatbot/mcp/github/commands.py
index f1673e02..74337253 100644
--- a/community_chatbot/mcp/github/commands.py
+++ b/community_chatbot/mcp/github/commands.py
@@ -1,18 +1,6 @@
-import asyncio
-import json
-import sys
-from pathlib import Path
-from typing import Any
-
-from langchain_core.messages import HumanMessage
-
-from agent import ensure_agent_initialized, stream_agent_response
-from state import RuntimeState
-from utils import (
- extract_message_text,
- sanitize_tool_name,
- _eprint,
-)
+from community_chatbot.mcp.lib import base_commands
+from community_chatbot.mcp.lib.state import RuntimeState
+from community_chatbot.mcp.github.agent import get_github_agent
__all__ = [
@@ -28,44 +16,15 @@
]
+_github_agent = get_github_agent()
async def list_tools(state: RuntimeState) -> int:
- await ensure_agent_initialized(state)
- if not state.tool_summaries:
- print("No tools available.")
- return 0
-
- for tool in state.tool_summaries:
- description = tool.get("description") or "(no description)"
- original_name = tool.get("original_name", "?")
- print(f"- {tool['name']} (original: {original_name}): {description}")
- return 0
+ return await base_commands.list_tools(state, _github_agent)
async def tool_info(state: RuntimeState, tool_identifier: str) -> int:
- await ensure_agent_initialized(state)
- detail = _find_tool_detail(state, tool_identifier)
- if detail is None:
- _eprint(
- f"Tool '{tool_identifier}' not found. "
- "Run list-tools to see available IDs.",
- )
- return 1
-
- print(f"CLI name: {detail['name']}")
- print(f"Original name: {detail['original_name']}")
- if detail.get("description"):
- print(f"Description: {detail['description']}")
- metadata = detail.get("metadata") or {}
- if metadata:
- print("Metadata:")
- for key, value in metadata.items():
- print(f" - {key}: {value}")
- args_schema = detail.get("args_schema")
- if args_schema:
- print("Args schema:", json.dumps(args_schema, indent=2))
- return 0
+ return await base_commands.tool_info(state, _github_agent, tool_identifier)
async def invoke_tool(
@@ -73,48 +32,9 @@ async def invoke_tool(
tool_identifier: str,
args_json: str | None = None,
) -> int:
- await ensure_agent_initialized(state)
- detail = _find_tool_detail(state, tool_identifier)
- if detail is None:
- _eprint(
- f"Tool '{tool_identifier}' not found. "
- "Run list-tools to see available IDs.",
- )
- return 1
-
- tool = state.tool_map.get(detail["name"])
- if tool is None:
- _eprint(f"Tool '{tool_identifier}' is not loaded in the agent.")
- return 1
-
- arguments: dict[str, Any] = {}
- if args_json:
- try:
- parsed_args = json.loads(args_json)
- except json.JSONDecodeError as exc:
- _eprint(f"Failed to parse args JSON: {exc}")
- return 1
- if not isinstance(parsed_args, dict):
- print(
- "Tool arguments must be provided as a JSON object.",
- file=sys.stderr,
- )
- return 1
- arguments = parsed_args
-
- result = await tool.ainvoke(arguments)
- if isinstance(result, tuple) and len(result) == 2:
- text_part, artifacts = result
- if text_part:
- print(text_part)
- if artifacts:
- print(
- "Artifacts:",
- json.dumps([repr(a) for a in artifacts], indent=2),
- )
- else:
- print(result)
- return 0
+ return await base_commands.invoke_tool(
+ state, _github_agent, tool_identifier, args_json
+ )
async def chat(
@@ -122,20 +42,7 @@ async def chat(
message: str,
session_id: str = "default",
) -> int:
- await ensure_agent_initialized(state)
-
- human_message = HumanMessage(content=message)
- session_history = state.record_message(session_id, human_message)
-
- try:
- last_ai_message = await stream_agent_response(state, session_history)
- state.record_message(session_id, last_ai_message)
- print(extract_message_text(last_ai_message))
- return 0
- except RuntimeError as exc:
- state.pop_last_message(session_id)
- print(str(exc), file=sys.stderr)
- return 1
+ return await base_commands.chat(state, _github_agent, message, session_id)
async def chat_loop(
@@ -145,85 +52,22 @@ async def chat_loop(
reset_command: str = "/reset",
prompt_prefix: str | None = None,
) -> int:
- await ensure_agent_initialized(state)
-
- normalized_exit = exit_command.strip().lower()
- normalized_reset = reset_command.strip().lower()
- prompt_template = prompt_prefix or f"[{session_id}]> "
-
- print("Interactive chat started. Type your message and press Enter.")
- print(
- "Use '{exit}' to quit and '{reset}' to clear the session.".format(
- exit=exit_command,
- reset=reset_command,
- )
+ return await base_commands.chat_loop(
+ state,
+ _github_agent,
+ session_id,
+ exit_command,
+ reset_command,
+ prompt_prefix,
)
- loop = asyncio.get_running_loop()
-
- while True:
- try:
- user_message = await loop.run_in_executor(
- None, input, prompt_template
- )
- except (KeyboardInterrupt, EOFError):
- print("\nExiting chat loop.")
- return 0
-
- if user_message is None:
- continue
-
- stripped = user_message.strip()
- if not stripped:
- continue
-
- lowered = stripped.lower()
- if lowered == normalized_exit:
- print("Ending chat loop.")
- return 0
-
- if lowered == normalized_reset:
- state.clear_session(session_id, persist=True)
- print(f"Session '{session_id}' reset.")
- continue
-
- human_message = HumanMessage(content=user_message)
- session_history = state.record_message(session_id, human_message)
-
- try:
- last_ai_message = await stream_agent_response(
- state, session_history
- )
- state.record_message(session_id, last_ai_message)
- print(extract_message_text(last_ai_message))
- except RuntimeError as exc:
- print(str(exc), file=sys.stderr)
- state.pop_last_message(session_id)
-
async def sessions(state: RuntimeState) -> int:
- summaries = state.list_sessions()
- if not summaries:
- print("No active sessions.")
- return 0
-
- print("Active sessions:")
- for info in summaries:
- session_id = info["session_id"]
- count = info.get("message_count", 0)
- updated = info.get("updated_at") or "unknown"
- print(f"- {session_id} ({count} messages, updated {updated})")
- return 0
+ return await base_commands.sessions(state)
async def clear_session(state: RuntimeState, session_id: str) -> int:
- if session_id in state.chat_sessions:
- state.clear_session(session_id, persist=True)
- print(f"Cleared session '{session_id}'.")
- return 0
-
- print(f"Session '{session_id}' not found.", file=sys.stderr)
- return 1
+ return await base_commands.clear_session(state, session_id)
async def export_session(
@@ -231,56 +75,8 @@ async def export_session(
session_id: str,
output_path: str | None = None,
) -> int:
- export_payload = state.serialize_session(session_id)
- if export_payload is None:
- print(
- f"Session '{session_id}' not found.",
- file=sys.stderr,
- )
- return 1
-
- payload_text = json.dumps(export_payload, indent=2, ensure_ascii=False)
-
- if not output_path:
- print(payload_text)
- return 0
-
- try:
- path = Path(output_path).expanduser()
- path.parent.mkdir(parents=True, exist_ok=True)
- path.write_text(payload_text + "\n", encoding="utf-8")
- except OSError as exc:
- _eprint(f"Failed to export session: {exc}")
- return 1
-
- print(f"Session '{session_id}' exported to '{path}'.")
- return 0
+ return await base_commands.export_session(state, session_id, output_path)
async def health(state: RuntimeState) -> int:
- status = {
- "agent_initialized": state.agent_executor is not None,
- "tools_available": len(state.tool_summaries),
- }
- if state.agent_executor is None:
- print(
- "Agent not initialized. Run a command that initializes it"
- " (e.g. list-tools)."
- )
- else:
- print("Agent initialized.")
- print(f"Tools available: {status['tools_available']}")
- return 0
-
-
-def _find_tool_detail(
- state: RuntimeState,
- identifier: str,
-) -> dict[str, Any] | None:
- normalized = sanitize_tool_name(identifier)
- if normalized in state.tool_details:
- return state.tool_details[normalized]
- for detail in state.tool_details.values():
- if detail.get("original_name") == identifier:
- return detail
- return None
+ return await base_commands.health(state)
diff --git a/community_chatbot/mcp/github/github_mcp.py b/community_chatbot/mcp/github/github_mcp.py
index 4d29f1d7..747f66d2 100644
--- a/community_chatbot/mcp/github/github_mcp.py
+++ b/community_chatbot/mcp/github/github_mcp.py
@@ -3,31 +3,34 @@
from dotenv import load_dotenv
import typer
-
-from state import RuntimeState
-import commands
+from community_chatbot.mcp.lib.state import RuntimeState
+from community_chatbot.mcp.github import commands
load_dotenv()
-state = RuntimeState()
+state = RuntimeState(service_name="github")
app = typer.Typer(help="GitHub MCP Agent CLI")
@app.command("list-tools")
def list_tools() -> int:
- """List the available MCP tools from the GitHub server."""
return asyncio.run(commands.list_tools(state))
@app.command("chat")
def chat(
- message: str = typer.Argument(..., help="Message to send to the agent."),
- session_id: str = typer.Option("default", help="Chat session identifier."),
+ message: str = typer.Argument(
+ ..., help="Message to send to the agent."
+ ),
+ session_id: str = typer.Option(
+ "default", help="Chat session identifier."
+ ),
) -> int:
- """Send a message to the agent and stream the response."""
- return asyncio.run(commands.chat(state, message=message, session_id=session_id))
+ return asyncio.run(
+ commands.chat(state, message=message, session_id=session_id)
+ )
@app.command("chat-loop")
@@ -49,7 +52,6 @@ def chat_loop(
help="Optional custom prompt prefix displayed before user input.",
),
) -> int:
- """Start an interactive multi-turn chat session."""
return asyncio.run(
commands.chat_loop(
state,
@@ -67,8 +69,9 @@ def tool_info(
..., help="Tool CLI or original name."
)
) -> int:
- """Show detailed information about a tool."""
- return asyncio.run(commands.tool_info(state, tool_identifier=tool_identifier))
+ return asyncio.run(
+ commands.tool_info(state, tool_identifier=tool_identifier)
+ )
@app.command("invoke-tool")
@@ -81,7 +84,6 @@ def invoke_tool(
help="JSON object containing arguments for the tool.",
),
) -> int:
- """Invoke a GitHub MCP tool directly."""
return asyncio.run(
commands.invoke_tool(
state,
@@ -93,7 +95,6 @@ def invoke_tool(
@app.command("sessions")
def sessions() -> int:
- """List active chat session identifiers."""
return asyncio.run(commands.sessions(state))
@@ -101,7 +102,6 @@ def sessions() -> int:
def clear_session(
session_id: str = typer.Argument(..., help="Session id to clear."),
) -> int:
- """Clear a stored chat session."""
return asyncio.run(commands.clear_session(state, session_id=session_id))
@@ -117,7 +117,6 @@ def export_session(
help="Optional path to write the exported JSON transcript.",
),
) -> int:
- """Export a chat session transcript for testing or archival."""
return asyncio.run(
commands.export_session(
state,
@@ -129,7 +128,6 @@ def export_session(
@app.command("health")
def health() -> int:
- """Show initialization status and tool count."""
return asyncio.run(commands.health(state))
diff --git a/community_chatbot/mcp/lib/__init__.py b/community_chatbot/mcp/lib/__init__.py
new file mode 100644
index 00000000..91e72762
--- /dev/null
+++ b/community_chatbot/mcp/lib/__init__.py
@@ -0,0 +1,3 @@
+"""Common modules for MCP agents."""
+
+__all__ = ["state", "utils", "base_agent", "base_commands"]
diff --git a/community_chatbot/mcp/lib/base_agent.py b/community_chatbot/mcp/lib/base_agent.py
new file mode 100644
index 00000000..6697c355
--- /dev/null
+++ b/community_chatbot/mcp/lib/base_agent.py
@@ -0,0 +1,188 @@
+import os
+from typing import Any, cast
+
+from langchain_core.messages import AIMessage, BaseMessage
+from langchain_mcp_adapters.client import MultiServerMCPClient
+from langgraph.prebuilt import create_react_agent
+
+from .state import RuntimeState
+from .utils import (
+ build_connection_config,
+ sanitize_tool_name,
+ schema_from_model,
+)
+
+
+__all__ = [
+ "BaseAgent",
+ "initialize_agent",
+ "ensure_agent_initialized",
+ "stream_agent_response",
+]
+
+
+def _get_llm_provider():
+ from community_chatbot.mcp.llm_providers import gemini
+ from community_chatbot.mcp.llm_providers import lightning_llm
+ from community_chatbot.mcp.llm_providers import groq_llm
+
+ provider = os.getenv("LLM_PROVIDER", "gemini").lower()
+
+ if provider == "lightning":
+ return lightning_llm.get_llm
+ elif provider == "groq":
+ return groq_llm.get_llm
+ elif provider == "gemini":
+ return gemini.get_llm
+ else:
+ raise ValueError(
+ f"Unsupported LLM_PROVIDER: {provider}. "
+ f"Supported values: 'lightning', 'groq', 'gemini'"
+ )
+
+
+class BaseAgent:
+ """Base class for MCP agents providing common functionality."""
+
+ def __init__(
+ self,
+ service_name: str,
+ required_env_vars: list[str] | None = None,
+ server_url_env: str | None = None,
+ default_server_url: str | None = None,
+ token_env: str | None = None,
+ bearer_token_env: str | None = None,
+ ):
+ """Initialize base agent with service-specific configuration.
+
+ Args:
+ service_name: Name of the service (e.g., "github", "jira", "slack")
+ required_env_vars: List of required environment variables
+ server_url_env: Environment variable name for server URL
+ default_server_url: Default server URL if not specified
+ token_env: Primary token environment variable name
+ bearer_token_env: Optional bearer token environment variable name
+ """
+ self.service_name = service_name
+ self.required_env_vars = required_env_vars or []
+ self.server_url_env = server_url_env
+ self.default_server_url = default_server_url
+ self.token_env = token_env
+ self.bearer_token_env = bearer_token_env
+
+ def validate_environment(self) -> None:
+ """Validate required environment variables."""
+ missing_vars: list[str] = []
+ for var in self.required_env_vars:
+ if not os.getenv(var):
+ missing_vars.append(var)
+
+ if missing_vars:
+ missing_str = ", ".join(sorted(set(missing_vars)))
+ raise ValueError(
+ "Missing required environment variables: " + missing_str
+ )
+
+ def get_connection_config(self) -> dict[str, Any]:
+ """Get the connection configuration for this service."""
+ return build_connection_config(
+ service_name=self.service_name,
+ server_url_env=self.server_url_env or f"{self.service_name.upper()}_MCP_SERVER_URL",
+ default_server_url=self.default_server_url or f"https://api.{self.service_name}.com/mcp/",
+ token_env=self.token_env or f"{self.service_name.upper()}_PERSONAL_ACCESS_TOKEN",
+ bearer_token_env=self.bearer_token_env,
+ )
+
+ async def initialize(self, state: RuntimeState) -> None:
+ """Initialize the agent with MCP client and tools."""
+ self.validate_environment()
+
+ connection = self.get_connection_config()
+ state.mcp_client = MultiServerMCPClient({self.service_name: connection})
+
+ client = state.mcp_client
+ if client is None:
+ raise RuntimeError("Failed to initialize MCP client.")
+
+ # Get tools from MCP client
+ tools = await client.get_tools()
+
+ state.tool_summaries = []
+ state.tool_map = {}
+ state.tool_details = {}
+
+ for tool in tools:
+ original_name = tool.name
+ sanitized_name = sanitize_tool_name(original_name)
+
+ args_schema = schema_from_model(getattr(tool, "args_schema", None))
+ metadata = getattr(tool, "metadata", {}) or {}
+
+ state.tool_map[sanitized_name] = tool
+ state.tool_details[sanitized_name] = {
+ "name": sanitized_name,
+ "original_name": original_name,
+ "description": getattr(tool, "description", ""),
+ "metadata": metadata,
+ "args_schema": args_schema,
+ }
+ state.tool_summaries.append(
+ {
+ "name": sanitized_name,
+ "original_name": original_name,
+ "description": getattr(tool, "description", ""),
+ }
+ )
+
+ # Get LLM and create agent with tools
+ get_llm = _get_llm_provider()
+ llm = get_llm()
+
+ # Create React agent with model and tools
+ state.agent_executor = create_react_agent(llm, tools)
+
+
+async def initialize_agent(
+ state: RuntimeState,
+ agent: BaseAgent,
+) -> None:
+ """Async Initialize an agent using the base agent class."""
+ await agent.initialize(state)
+
+
+async def ensure_agent_initialized(
+ state: RuntimeState,
+ agent: BaseAgent,
+) -> None:
+ """Ensure agent is initialized, initializing if necessary."""
+ if state.agent_executor is None or state.mcp_client is None:
+ await initialize_agent(state, agent)
+ if state.agent_executor is None or state.mcp_client is None:
+ raise RuntimeError("Agent failed to initialize")
+
+
+async def stream_agent_response(
+ state: RuntimeState,
+ session_history: list[BaseMessage],
+) -> AIMessage:
+ """Stream agent response for a given session history."""
+ if state.agent_executor is None:
+ raise RuntimeError("Agent executor is not initialized.")
+
+ executor = cast(Any, state.agent_executor)
+
+ # Use ainvoke for async execution with the agent
+ result = await executor.ainvoke({"messages": session_history})
+
+ # Extract the last AI message from the result
+ messages = result.get("messages", [])
+ last_ai_message: AIMessage | None = None
+
+ for message in reversed(messages):
+ if isinstance(message, AIMessage):
+ last_ai_message = message
+ break
+
+ if last_ai_message is None:
+ raise RuntimeError("The agent did not return a response.")
+ return last_ai_message
diff --git a/community_chatbot/mcp/lib/base_commands.py b/community_chatbot/mcp/lib/base_commands.py
new file mode 100644
index 00000000..42af9d5f
--- /dev/null
+++ b/community_chatbot/mcp/lib/base_commands.py
@@ -0,0 +1,297 @@
+import asyncio
+import json
+import sys
+from pathlib import Path
+from typing import Any
+
+from langchain_core.messages import HumanMessage
+
+from .base_agent import BaseAgent, ensure_agent_initialized, stream_agent_response
+from .state import RuntimeState
+from .utils import (
+ extract_message_text,
+ sanitize_tool_name,
+ _eprint,
+)
+
+
+__all__ = [
+ "list_tools",
+ "tool_info",
+ "invoke_tool",
+ "chat",
+ "chat_loop",
+ "sessions",
+ "clear_session",
+ "export_session",
+ "health",
+]
+
+
+async def list_tools(state: RuntimeState, agent: BaseAgent) -> int:
+ """List available MCP tools."""
+ await ensure_agent_initialized(state, agent)
+ if not state.tool_summaries:
+ print("No tools available.")
+ return 0
+
+ for tool in state.tool_summaries:
+ description = tool.get("description") or "(no description)"
+ original_name = tool.get("original_name", "?")
+ print(f"- {tool['name']} (original: {original_name}): {description}")
+ return 0
+
+
+async def tool_info(state: RuntimeState, agent: BaseAgent, tool_identifier: str) -> int:
+ """Show detailed information about a specific tool."""
+ await ensure_agent_initialized(state, agent)
+ detail = _find_tool_detail(state, tool_identifier)
+ if detail is None:
+ _eprint(
+ f"Tool '{tool_identifier}' not found. "
+ "Run list-tools to see available IDs.",
+ )
+ return 1
+
+ print(f"CLI name: {detail['name']}")
+ print(f"Original name: {detail['original_name']}")
+ if detail.get("description"):
+ print(f"Description: {detail['description']}")
+ metadata = detail.get("metadata") or {}
+ if metadata:
+ print("Metadata:")
+ for key, value in metadata.items():
+ print(f" - {key}: {value}")
+ args_schema = detail.get("args_schema")
+ if args_schema:
+ print("Args schema:", json.dumps(args_schema, indent=2))
+ return 0
+
+
+def _find_tool_detail(
+ state: RuntimeState,
+ identifier: str,
+) -> dict[str, Any] | None:
+ """Find tool details by identifier (sanitized or original name)."""
+ normalized = sanitize_tool_name(identifier)
+ if normalized in state.tool_details:
+ return state.tool_details[normalized]
+ for detail in state.tool_details.values():
+ if detail.get("original_name") == identifier:
+ return detail
+ return None
+
+
+async def invoke_tool(
+ state: RuntimeState,
+ agent: BaseAgent,
+ tool_identifier: str,
+ args_json: str | None = None,
+) -> int:
+ """Invoke a tool directly with provided arguments."""
+ await ensure_agent_initialized(state, agent)
+ detail = _find_tool_detail(state, tool_identifier)
+ if detail is None:
+ _eprint(
+ f"Tool '{tool_identifier}' not found. "
+ "Run list-tools to see available IDs.",
+ )
+ return 1
+
+ tool = state.tool_map.get(detail["name"])
+ if tool is None:
+ _eprint(f"Tool '{tool_identifier}' is not loaded in the agent.")
+ return 1
+
+ arguments: dict[str, Any] = {}
+ if args_json:
+ try:
+ parsed_args = json.loads(args_json)
+ except json.JSONDecodeError as exc:
+ _eprint(f"Failed to parse args JSON: {exc}")
+ return 1
+ if not isinstance(parsed_args, dict):
+ print(
+ "Tool arguments must be provided as a JSON object.",
+ file=sys.stderr,
+ )
+ return 1
+ arguments = parsed_args
+
+ result = await tool.ainvoke(arguments)
+ if isinstance(result, tuple) and len(result) == 2:
+ text_part, artifacts = result
+ if text_part:
+ print(text_part)
+ if artifacts:
+ print(
+ "Artifacts:",
+ json.dumps([repr(a) for a in artifacts], indent=2),
+ )
+ else:
+ print(result)
+ return 0
+
+
+async def chat(
+ state: RuntimeState,
+ agent: BaseAgent,
+ message: str,
+ session_id: str = "default",
+) -> int:
+ """Send a message to the agent and receive a response."""
+ await ensure_agent_initialized(state, agent)
+
+ human_message = HumanMessage(content=message)
+ session_history = state.record_message(session_id, human_message)
+
+ try:
+ last_ai_message = await stream_agent_response(state, session_history)
+ state.record_message(session_id, last_ai_message)
+ print(extract_message_text(last_ai_message))
+ return 0
+ except RuntimeError as exc:
+ state.pop_last_message(session_id)
+ print(str(exc), file=sys.stderr)
+ return 1
+
+
+async def chat_loop(
+ state: RuntimeState,
+ agent: BaseAgent,
+ session_id: str = "default",
+ exit_command: str = "/exit",
+ reset_command: str = "/reset",
+ prompt_prefix: str | None = None,
+) -> int:
+ """Start an interactive chat session."""
+ await ensure_agent_initialized(state, agent)
+
+ normalized_exit = exit_command.strip().lower()
+ normalized_reset = reset_command.strip().lower()
+ prompt_template = prompt_prefix or f"[{session_id}]> "
+
+ print("Interactive chat started. Type your message and press Enter.")
+ print(
+ "Use '{exit}' to quit and '{reset}' to clear the session.".format(
+ exit=exit_command,
+ reset=reset_command,
+ )
+ )
+
+ loop = asyncio.get_running_loop()
+
+ while True:
+ try:
+ user_message = await loop.run_in_executor(
+ None, input, prompt_template
+ )
+ except (KeyboardInterrupt, EOFError):
+ print("\nExiting chat loop.")
+ return 0
+
+ if user_message is None:
+ continue
+
+ stripped = user_message.strip()
+ if not stripped:
+ continue
+
+ lowered = stripped.lower()
+ if lowered == normalized_exit:
+ print("Ending chat loop.")
+ return 0
+
+ if lowered == normalized_reset:
+ state.clear_session(session_id, persist=True)
+ print(f"Session '{session_id}' reset.")
+ continue
+
+ human_message = HumanMessage(content=user_message)
+ session_history = state.record_message(session_id, human_message)
+
+ try:
+ last_ai_message = await stream_agent_response(
+ state, session_history
+ )
+ state.record_message(session_id, last_ai_message)
+ print(extract_message_text(last_ai_message))
+ except RuntimeError as exc:
+ print(str(exc), file=sys.stderr)
+ state.pop_last_message(session_id)
+
+
+async def sessions(state: RuntimeState) -> int:
+ """List all active chat sessions."""
+ summaries = state.list_sessions()
+ if not summaries:
+ print("No active sessions.")
+ return 0
+
+ print("Active sessions:")
+ for info in summaries:
+ session_id = info["session_id"]
+ count = info.get("message_count", 0)
+ updated = info.get("updated_at") or "unknown"
+ print(f"- {session_id} ({count} messages, updated {updated})")
+ return 0
+
+
+async def clear_session(state: RuntimeState, session_id: str) -> int:
+ """Clear a specific chat session."""
+ if session_id in state.chat_sessions:
+ state.clear_session(session_id, persist=True)
+ print(f"Cleared session '{session_id}'.")
+ return 0
+
+ print(f"Session '{session_id}' not found.", file=sys.stderr)
+ return 1
+
+
+async def export_session(
+ state: RuntimeState,
+ session_id: str,
+ output_path: str | None = None,
+) -> int:
+ """Export a chat session to JSON format."""
+ export_payload = state.serialize_session(session_id)
+ if export_payload is None:
+ print(
+ f"Session '{session_id}' not found.",
+ file=sys.stderr,
+ )
+ return 1
+
+ payload_text = json.dumps(export_payload, indent=2, ensure_ascii=False)
+
+ if not output_path:
+ print(payload_text)
+ return 0
+
+ try:
+ path = Path(output_path).expanduser()
+ path.parent.mkdir(parents=True, exist_ok=True)
+ path.write_text(payload_text + "\n", encoding="utf-8")
+ except OSError as exc:
+ _eprint(f"Failed to export session: {exc}")
+ return 1
+
+ print(f"Session '{session_id}' exported to '{path}'.")
+ return 0
+
+
+async def health(state: RuntimeState) -> int:
+ """Show agent health status."""
+ status = {
+ "agent_initialized": state.agent_executor is not None,
+ "tools_available": len(state.tool_summaries),
+ }
+ if state.agent_executor is None:
+ print(
+ "Agent not initialized. Run a command that initializes it"
+ " (e.g. list-tools)."
+ )
+ else:
+ print("Agent initialized.")
+ print(f"Tools available: {status['tools_available']}")
+ return 0
diff --git a/community_chatbot/mcp/github/state.py b/community_chatbot/mcp/lib/state.py
similarity index 94%
rename from community_chatbot/mcp/github/state.py
rename to community_chatbot/mcp/lib/state.py
index 108bec7c..b5185878 100644
--- a/community_chatbot/mcp/github/state.py
+++ b/community_chatbot/mcp/lib/state.py
@@ -7,26 +7,26 @@
from typing import Any, cast
from langchain_core.messages import (
- AIMessage,
BaseMessage,
+ AIMessage,
HumanMessage,
SystemMessage,
ToolMessage,
+ ChatMessage,
+ FunctionMessage
)
-from langchain_core.messages import ChatMessage, FunctionMessage
-
-from utils import truthy
+from .utils import truthy
__all__ = ["RuntimeState", "MESSAGE_TYPE_REGISTRY"]
-_STATE_FILE_ENV = "GITHUB_MCP_STATE_FILE"
-_STATE_DIR_ENV = "GITHUB_MCP_STATE_DIR"
-_STATE_DISABLE_ENV = "GITHUB_MCP_DISABLE_PERSISTENCE"
-_DEFAULT_STATE_FILENAME = "github_mcp_sessions.json"
-_DEFAULT_STATE_SUBDIR = ".github_mcp"
+_STATE_FILE_ENV = "MCP_STATE_FILE"
+_STATE_DIR_ENV = "MCP_STATE_DIR"
+_STATE_DISABLE_ENV = "MCP_DISABLE_PERSISTENCE"
+_DEFAULT_STATE_FILENAME = "mcp_sessions.json"
+_DEFAULT_STATE_SUBDIR = ".mcp"
MESSAGE_TYPE_REGISTRY: dict[str, type[BaseMessage]] = {
"ai": AIMessage,
@@ -43,7 +43,7 @@ def _persistence_disabled() -> bool:
return truthy(os.getenv(_STATE_DISABLE_ENV))
-def _resolve_state_file_path() -> Path:
+def _resolve_state_file_path(service_name: str = "mcp") -> Path:
if _persistence_disabled():
return Path(os.devnull)
@@ -56,7 +56,7 @@ def _resolve_state_file_path() -> Path:
base_path = Path(base_dir).expanduser()
else:
base_path = Path.home() / _DEFAULT_STATE_SUBDIR
- path = base_path / _DEFAULT_STATE_FILENAME
+ path = base_path / f"{service_name}_sessions.json"
try:
path.parent.mkdir(parents=True, exist_ok=True)
@@ -74,7 +74,7 @@ def _serialize_message(message: BaseMessage) -> dict[str, Any]:
data = json.loads(message.json())
except (AttributeError, ValueError, TypeError):
if hasattr(message, "dict"):
- data = cast(Any, message).dict() # type: ignore[call-arg]
+ data = cast(Any, message).dict()
else:
data = {"content": getattr(message, "content", "")}
data.pop("type", None)
@@ -128,7 +128,8 @@ def _deserialize_message(payload: dict[str, Any]) -> BaseMessage | None:
class RuntimeState:
"""Holds runtime information shared across CLI commands."""
- def __init__(self) -> None:
+ def __init__(self, service_name: str = "mcp") -> None:
+ self.service_name = service_name
self.agent_executor: Any = None
self.mcp_client: Any | None = None
self.chat_sessions: dict[str, list[BaseMessage]] = {}
@@ -137,7 +138,7 @@ def __init__(self) -> None:
self.tool_map: dict[str, Any] = {}
self.tool_details: dict[str, dict[str, Any]] = {}
self.persistence_enabled: bool = not _persistence_disabled()
- self.state_file_path: Path = _resolve_state_file_path()
+ self.state_file_path: Path = _resolve_state_file_path(service_name)
if self.persistence_enabled:
self._load_persisted_sessions()
diff --git a/community_chatbot/mcp/github/utils.py b/community_chatbot/mcp/lib/utils.py
similarity index 57%
rename from community_chatbot/mcp/github/utils.py
rename to community_chatbot/mcp/lib/utils.py
index 652053bb..d5a10b8e 100644
--- a/community_chatbot/mcp/github/utils.py
+++ b/community_chatbot/mcp/lib/utils.py
@@ -41,10 +41,10 @@ def load_json_env(
) from exc
if not isinstance(parsed, dict):
raise ValueError(
- (
- f"Environment variable {env_name} must be a JSON object "
- "with string keys."
- )
+ (
+ f"Environment variable {env_name} must be a JSON object "
+ "with string keys."
+ )
)
if value_validator is None:
return {str(k): str(v) for k, v in parsed.items()}
@@ -54,57 +54,85 @@ def load_json_env(
return validated
-def build_connection_config() -> dict[str, Any]:
- server_url = os.getenv(
- "GITHUB_MCP_SERVER_URL", "https://api.githubcopilot.com/mcp/"
- ).strip()
+def build_connection_config(
+ service_name: str = "github",
+ server_url_env: str = "GITHUB_MCP_SERVER_URL",
+ default_server_url: str = "https://api.githubcopilot.com/mcp/",
+ token_env: str = "GITHUB_PERSONAL_ACCESS_TOKEN",
+ bearer_token_env: str | None = "GITHUB_MCP_BEARER_TOKEN",
+) -> dict[str, Any]:
+ """
+ Build connection configuration for MCP services.
+
+ Args:
+ service_name: Name of the service (for logging/display)
+ server_url_env: Environment variable name for server URL
+ default_server_url: Default server URL if not specified
+ token_env: Primary token environment variable name
+ bearer_token_env: Optional bearer token environment variable name
+ """
+ server_url = os.getenv(server_url_env, default_server_url).strip()
if not server_url:
- raise ValueError("GITHUB_MCP_SERVER_URL cannot be empty.")
+ raise ValueError(f"{server_url_env} cannot be empty.")
- if truthy(os.getenv("GITHUB_MCP_USE_READONLY_PATH")) and not \
- server_url.endswith("/readonly"):
+ # Check for readonly path configuration
+ readonly_path_env = f"{service_name.upper()}_MCP_USE_READONLY_PATH"
+ if truthy(os.getenv(readonly_path_env)) and not server_url.endswith("/readonly"):
server_url = server_url.rstrip("/") + "/readonly"
+ transport_env = f"{service_name.upper()}_MCP_TRANSPORT"
transport = (
- os.getenv("GITHUB_MCP_TRANSPORT", "streamable_http").strip().lower()
+ os.getenv(transport_env, "streamable_http").strip().lower()
)
if transport not in {"streamable_http", "sse"}:
raise ValueError(
- "Unsupported GITHUB_MCP_TRANSPORT. Use 'streamable_http' or 'sse'."
+ f"Unsupported {transport_env}. Use 'streamable_http' or 'sse'."
)
headers: dict[str, str] = {}
- auth_token = (
- os.getenv("GITHUB_MCP_BEARER_TOKEN")
- or os.getenv("GITHUB_PERSONAL_ACCESS_TOKEN")
- )
+ auth_token = os.getenv(bearer_token_env) if bearer_token_env else None
+ if not auth_token:
+ auth_token = os.getenv(token_env)
if auth_token:
headers["Authorization"] = f"Bearer {auth_token}"
- toolsets = os.getenv("GITHUB_MCP_TOOLSETS")
+
+ # Check for toolsets configuration
+ toolsets_env = f"{service_name.upper()}_MCP_TOOLSETS"
+ toolsets = os.getenv(toolsets_env)
if toolsets:
headers["X-MCP-Toolsets"] = toolsets.strip()
- if truthy(os.getenv("GITHUB_MCP_READONLY")):
+
+ # Check for readonly flag
+ readonly_env = f"{service_name.upper()}_MCP_READONLY"
+ if truthy(os.getenv(readonly_env)):
headers["X-MCP-Readonly"] = "true"
- user_agent = os.getenv("GITHUB_MCP_USER_AGENT")
+
+ # Check for user agent
+ user_agent_env = f"{service_name.upper()}_MCP_USER_AGENT"
+ user_agent = os.getenv(user_agent_env)
if user_agent:
headers["User-Agent"] = user_agent.strip()
- headers.update(load_json_env("GITHUB_MCP_EXTRA_HEADERS"))
+
+ # Check for extra headers
+ extra_headers_env = f"{service_name.upper()}_MCP_EXTRA_HEADERS"
+ headers.update(load_json_env(extra_headers_env))
connection: dict[str, Any] = {"url": server_url, "transport": transport}
if headers:
connection["headers"] = headers
- timeout = os.getenv("GITHUB_MCP_TIMEOUT_SECONDS")
+ timeout_env = f"{service_name.upper()}_MCP_TIMEOUT_SECONDS"
+ timeout = os.getenv(timeout_env)
if timeout:
try:
timeout_value = float(timeout)
except ValueError as exc:
raise ValueError(
- "GITHUB_MCP_TIMEOUT_SECONDS must be a positive number"
+ f"{timeout_env} must be a positive number"
) from exc
if timeout_value <= 0:
raise ValueError(
- "GITHUB_MCP_TIMEOUT_SECONDS must be greater than zero"
+ f"{timeout_env} must be greater than zero"
)
connection["timeout"] = timeout_value
diff --git a/community_chatbot/mcp/llm_providers/__init__.py b/community_chatbot/mcp/llm_providers/__init__.py
new file mode 100644
index 00000000..95b40e88
--- /dev/null
+++ b/community_chatbot/mcp/llm_providers/__init__.py
@@ -0,0 +1,3 @@
+"""LLM provider modules for MCP agents."""
+
+__all__ = ["gemini", "groq_llm", "lightning_llm"]
diff --git a/community_chatbot/mcp/github/gemini.py b/community_chatbot/mcp/llm_providers/gemini.py
similarity index 50%
rename from community_chatbot/mcp/github/gemini.py
rename to community_chatbot/mcp/llm_providers/gemini.py
index 212e75a6..c883733f 100644
--- a/community_chatbot/mcp/github/gemini.py
+++ b/community_chatbot/mcp/llm_providers/gemini.py
@@ -1,22 +1,22 @@
import os
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.language_models import BaseChatModel
+from pydantic import SecretStr
def get_llm() -> BaseChatModel:
- """
- Returns a configured LangChain chat model instance for Gemini.
- Raises ValueError if required environment variables are missing.
- """
+
api_key = os.getenv("GOOGLE_API_KEY")
if not api_key:
- raise ValueError("Missing required environment variable: GOOGLE_API_KEY")
+ raise ValueError(
+ "Missing required environment variable: GOOGLE_API_KEY"
+ )
- model_id = os.getenv("GITHUB_MCP_MODEL", "gemini-2.5-pro").strip()
+ model_id = os.getenv("MODEL", "models/gemini-3-pro-preview").strip()
if not model_id:
raise ValueError("Model ID cannot be empty.")
return ChatGoogleGenerativeAI(
model=model_id,
- api_key=api_key,
- streaming=True,
+ api_key=SecretStr(api_key),
+ disable_streaming="tool_calling"
)
diff --git a/community_chatbot/mcp/github/groq_llm.py b/community_chatbot/mcp/llm_providers/groq_llm.py
similarity index 58%
rename from community_chatbot/mcp/github/groq_llm.py
rename to community_chatbot/mcp/llm_providers/groq_llm.py
index f3cb790f..6b052c68 100644
--- a/community_chatbot/mcp/github/groq_llm.py
+++ b/community_chatbot/mcp/llm_providers/groq_llm.py
@@ -1,15 +1,17 @@
import os
+
+from pydantic import SecretStr
from langchain_groq import ChatGroq
from langchain_core.language_models import BaseChatModel
+
def get_llm() -> BaseChatModel:
- """
- Returns a configured LangChain chat model instance for Groq.
- Raises ValueError if required environment variables are missing.
- """
+
api_key = os.getenv("GROQ_API_KEY")
if not api_key:
- raise ValueError("Missing required environment variable: GROQ_API_KEY")
+ raise ValueError(
+ "Missing required environment variable: GROQ_API_KEY"
+ )
model_id = os.getenv("MODEL", "llama-3.1-8b-instant").strip()
if not model_id:
@@ -17,6 +19,5 @@ def get_llm() -> BaseChatModel:
return ChatGroq(
model=model_id,
- api_key=api_key,
- streaming=True
+ api_key=SecretStr(api_key),
)
diff --git a/community_chatbot/mcp/github/lightning_llm.py b/community_chatbot/mcp/llm_providers/lightning_llm.py
similarity index 62%
rename from community_chatbot/mcp/github/lightning_llm.py
rename to community_chatbot/mcp/llm_providers/lightning_llm.py
index 6d2e3b75..00ceb759 100644
--- a/community_chatbot/mcp/github/lightning_llm.py
+++ b/community_chatbot/mcp/llm_providers/lightning_llm.py
@@ -1,9 +1,11 @@
import os
from langchain_openai import ChatOpenAI
from langchain_core.language_models import BaseChatModel
+from pydantic import SecretStr
def get_llm() -> BaseChatModel:
+
api_key = os.getenv("LIGHTNING_API_KEY")
if not api_key:
raise ValueError(
@@ -16,24 +18,14 @@ def get_llm() -> BaseChatModel:
"Missing required environment variable: LIGHTNING_BASE_URL"
)
- # Model name from your Lightning AI deployment
- # Can be overridden via environment variable
model_id = os.getenv(
"MODEL", "meta-llama/Llama-3.3-70B-Instruct"
).strip()
if not model_id:
raise ValueError("Model ID cannot be empty.")
- # Optional: timeout and max_retries for robustness
- timeout = int(os.getenv("LIGHTNING_TIMEOUT", "60"))
- max_retries = int(os.getenv("LIGHTNING_MAX_RETRIES", "3"))
-
return ChatOpenAI(
- api_key=api_key,
+ api_key=SecretStr(api_key),
base_url=base_url,
- model=model_id,
- temperature=0.7,
- streaming=True,
- timeout=timeout,
- max_retries=max_retries,
+ model=model_id
)
From 82fed731784fda42124afb9fae969655a5bbeadf Mon Sep 17 00:00:00 2001
From: Raghav Gupta <142162663+Raghav-56@users.noreply.github.com>
Date: Mon, 24 Nov 2025 22:11:33 +0530
Subject: [PATCH 3/5] feat(mcp): openMF#46 Add Slack MCP integration
- use https://github.com/korotovsky/slack-mcp-server
---
community_chatbot/mcp/.env.example | 20 ---
community_chatbot/mcp/.gitignore | 2 -
community_chatbot/mcp_impl/.env.example | 66 ++++++++
community_chatbot/mcp_impl/.gitignore | 51 +++++++
community_chatbot/mcp_impl/README.md | 0
.../{mcp => mcp_impl}/github/__init__.py | 0
.../{mcp => mcp_impl}/github/agent.py | 2 +-
.../{mcp => mcp_impl}/github/commands.py | 6 +-
.../{mcp => mcp_impl}/github/github_mcp.py | 4 +-
.../{mcp => mcp_impl}/lib/__init__.py | 0
.../{mcp => mcp_impl}/lib/base_agent.py | 6 +-
.../{mcp => mcp_impl}/lib/base_commands.py | 0
.../{mcp => mcp_impl}/lib/state.py | 0
.../{mcp => mcp_impl}/lib/utils.py | 0
.../llm_providers/__init__.py | 0
.../{mcp => mcp_impl}/llm_providers/gemini.py | 0
.../llm_providers/groq_llm.py | 0
.../llm_providers/lightning_llm.py | 0
community_chatbot/mcp_impl/main.py | 29 ++++
community_chatbot/mcp_impl/package.json | 13 ++
community_chatbot/mcp_impl/pyproject.toml | 28 ++++
.../{mcp => mcp_impl}/requirements.txt | 2 +
community_chatbot/mcp_impl/slack/README.md | 68 +++++++++
community_chatbot/mcp_impl/slack/__init__.py | 5 +
community_chatbot/mcp_impl/slack/agent.py | 141 +++++++++++++++++
community_chatbot/mcp_impl/slack/commands.py | 82 ++++++++++
community_chatbot/mcp_impl/slack/slack_mcp.py | 144 ++++++++++++++++++
27 files changed, 638 insertions(+), 31 deletions(-)
delete mode 100644 community_chatbot/mcp/.env.example
delete mode 100644 community_chatbot/mcp/.gitignore
create mode 100644 community_chatbot/mcp_impl/.env.example
create mode 100644 community_chatbot/mcp_impl/.gitignore
create mode 100644 community_chatbot/mcp_impl/README.md
rename community_chatbot/{mcp => mcp_impl}/github/__init__.py (100%)
rename community_chatbot/{mcp => mcp_impl}/github/agent.py (91%)
rename community_chatbot/{mcp => mcp_impl}/github/commands.py (91%)
rename community_chatbot/{mcp => mcp_impl}/github/github_mcp.py (96%)
rename community_chatbot/{mcp => mcp_impl}/lib/__init__.py (100%)
rename community_chatbot/{mcp => mcp_impl}/lib/base_agent.py (97%)
rename community_chatbot/{mcp => mcp_impl}/lib/base_commands.py (100%)
rename community_chatbot/{mcp => mcp_impl}/lib/state.py (100%)
rename community_chatbot/{mcp => mcp_impl}/lib/utils.py (100%)
rename community_chatbot/{mcp => mcp_impl}/llm_providers/__init__.py (100%)
rename community_chatbot/{mcp => mcp_impl}/llm_providers/gemini.py (100%)
rename community_chatbot/{mcp => mcp_impl}/llm_providers/groq_llm.py (100%)
rename community_chatbot/{mcp => mcp_impl}/llm_providers/lightning_llm.py (100%)
create mode 100644 community_chatbot/mcp_impl/main.py
create mode 100644 community_chatbot/mcp_impl/package.json
create mode 100644 community_chatbot/mcp_impl/pyproject.toml
rename community_chatbot/{mcp => mcp_impl}/requirements.txt (92%)
create mode 100644 community_chatbot/mcp_impl/slack/README.md
create mode 100644 community_chatbot/mcp_impl/slack/__init__.py
create mode 100644 community_chatbot/mcp_impl/slack/agent.py
create mode 100644 community_chatbot/mcp_impl/slack/commands.py
create mode 100644 community_chatbot/mcp_impl/slack/slack_mcp.py
diff --git a/community_chatbot/mcp/.env.example b/community_chatbot/mcp/.env.example
deleted file mode 100644
index faf0086a..00000000
--- a/community_chatbot/mcp/.env.example
+++ /dev/null
@@ -1,20 +0,0 @@
-GITHUB_PERSONAL_ACCESS_TOKEN=your_github_token_here
-# OPENAI_API_KEY=your_openai_api_key_here
-
-GOOGLE_API_KEY=your_google_api_key_here
-
-GROQ_API_KEY=your_groq_api_key_here
-
-LLM_PROVIDER=gemini
-
-LIGHTNING_API_KEY=your_lightning_api_key_here
-
-# Format(for custom models): https://lightning.ai/raghav-56/model-apis/models/your-model-id
-LIGHTNING_BASE_URL=https://lightning.ai/api/v1
-
-# Model name (optional)
-MODEL="models/gemini-3-pro-preview"
-
-GITHUB_APP_PRIVATE_KEY="your_github_app_private_key_here"
-
-MCP_LOG_LEVEL=debug
diff --git a/community_chatbot/mcp/.gitignore b/community_chatbot/mcp/.gitignore
deleted file mode 100644
index 634f145f..00000000
--- a/community_chatbot/mcp/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.env
-__pycache__/
diff --git a/community_chatbot/mcp_impl/.env.example b/community_chatbot/mcp_impl/.env.example
new file mode 100644
index 00000000..d499a11c
--- /dev/null
+++ b/community_chatbot/mcp_impl/.env.example
@@ -0,0 +1,66 @@
+GITHUB_PERSONAL_ACCESS_TOKEN=your_github_token_here
+# OPENAI_API_KEY=your_openai_api_key_here
+
+GOOGLE_API_KEY=your_google_api_key_here
+
+GROQ_API_KEY=your_groq_api_key_here
+
+LLM_PROVIDER=gemini
+
+LIGHTNING_API_KEY=your_lightning_api_key_here
+
+# Format(for custom models): https://lightning.ai/raghav-56/model-apis/models/your-model-id
+LIGHTNING_BASE_URL=https://lightning.ai/api/v1
+
+# Model name (optional)
+MODEL="models/gemini-3-pro-preview"
+
+GITHUB_APP_PRIVATE_KEY="your_github_app_private_key_here"
+
+MCP_LOG_LEVEL=debug
+
+# Slack MCP Configuration
+
+# Authentication via OAuth Token (Recommended - more secure, doesn't expire frequently)
+# there are other methods also available, see documentation for details
+SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
+
+
+# Enable message posting (set to 'true' for all channels, or comma-separated channel IDs)
+SLACK_MCP_ADD_MESSAGE_TOOL=true
+
+# Transport Configuration (Optional)
+# Options: stdio (default, runs npx subprocess), sse, streamable_http
+# SLACK_MCP_TRANSPORT=stdio
+
+# Server Connection (Required for SSE/HTTP transports only)
+# SLACK_MCP_SERVER_URL=http://127.0.0.1:13080/sse
+# SLACK_MCP_API_KEY=your-api-key-here
+
+# Cache Configuration (Optional)
+# Paths to cache files for users and channels
+# SLACK_MCP_USERS_CACHE=.users_cache.json
+# SLACK_MCP_CHANNELS_CACHE=.channels_cache_v2.json
+
+# Message Posting Controls (Optional - disabled by default for safety)
+# Options:
+# - Empty/not set: Posting disabled (default)
+# - "true": Enable posting to all channels
+# - "C123,C456": Whitelist specific channel IDs (comma-separated)
+# - "!C123,C456": Blacklist specific channel IDs (allow all except these)
+# SLACK_MCP_ADD_MESSAGE_TOOL=
+
+# Auto-mark posted messages as read (Optional)
+# SLACK_MCP_ADD_MESSAGE_MARK=true
+
+# Link unfurling control (Optional)
+# Options:
+# - Empty/not set: Unfurling disabled (default)
+# - "true": Enable unfurling for all domains
+# - "github.com,slack.com": Whitelist specific domains (comma-separated)
+# SLACK_MCP_ADD_MESSAGE_UNFURLING=
+
+# Enterprise Configuration (Optional - for custom Slack environments)
+# SLACK_MCP_PROXY=http://proxy.example.com:8080
+# SLACK_MCP_USER_AGENT=CustomAgent/1.0
+# SLACK_MCP_CUSTOM_TLS=true
diff --git a/community_chatbot/mcp_impl/.gitignore b/community_chatbot/mcp_impl/.gitignore
new file mode 100644
index 00000000..b46cdcbb
--- /dev/null
+++ b/community_chatbot/mcp_impl/.gitignore
@@ -0,0 +1,51 @@
+*.env
+
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+.venv/
+venv/
+ENV/
+env/
+
+# uv
+uv.lock
+.python-version
+
+# IDEs
+.vscode/
+
+# Node
+node_modules/
+package-lock.json
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# Testing
+.pytest_cache/
+.coverage
+htmlcov/
+*.cover
+
+# Logs
+*.log
+
diff --git a/community_chatbot/mcp_impl/README.md b/community_chatbot/mcp_impl/README.md
new file mode 100644
index 00000000..e69de29b
diff --git a/community_chatbot/mcp/github/__init__.py b/community_chatbot/mcp_impl/github/__init__.py
similarity index 100%
rename from community_chatbot/mcp/github/__init__.py
rename to community_chatbot/mcp_impl/github/__init__.py
diff --git a/community_chatbot/mcp/github/agent.py b/community_chatbot/mcp_impl/github/agent.py
similarity index 91%
rename from community_chatbot/mcp/github/agent.py
rename to community_chatbot/mcp_impl/github/agent.py
index 9f4f1cc6..db31ae69 100644
--- a/community_chatbot/mcp/github/agent.py
+++ b/community_chatbot/mcp_impl/github/agent.py
@@ -1,5 +1,5 @@
import os
-from community_chatbot.mcp.lib.base_agent import BaseAgent
+from lib.base_agent import BaseAgent
__all__ = [
"get_github_agent",
diff --git a/community_chatbot/mcp/github/commands.py b/community_chatbot/mcp_impl/github/commands.py
similarity index 91%
rename from community_chatbot/mcp/github/commands.py
rename to community_chatbot/mcp_impl/github/commands.py
index 74337253..7f10a559 100644
--- a/community_chatbot/mcp/github/commands.py
+++ b/community_chatbot/mcp_impl/github/commands.py
@@ -1,6 +1,6 @@
-from community_chatbot.mcp.lib import base_commands
-from community_chatbot.mcp.lib.state import RuntimeState
-from community_chatbot.mcp.github.agent import get_github_agent
+from lib import base_commands
+from lib.state import RuntimeState
+from github.agent import get_github_agent
__all__ = [
diff --git a/community_chatbot/mcp/github/github_mcp.py b/community_chatbot/mcp_impl/github/github_mcp.py
similarity index 96%
rename from community_chatbot/mcp/github/github_mcp.py
rename to community_chatbot/mcp_impl/github/github_mcp.py
index 747f66d2..495e5e85 100644
--- a/community_chatbot/mcp/github/github_mcp.py
+++ b/community_chatbot/mcp_impl/github/github_mcp.py
@@ -3,8 +3,8 @@
from dotenv import load_dotenv
import typer
-from community_chatbot.mcp.lib.state import RuntimeState
-from community_chatbot.mcp.github import commands
+from lib.state import RuntimeState
+from github import commands
load_dotenv()
diff --git a/community_chatbot/mcp/lib/__init__.py b/community_chatbot/mcp_impl/lib/__init__.py
similarity index 100%
rename from community_chatbot/mcp/lib/__init__.py
rename to community_chatbot/mcp_impl/lib/__init__.py
diff --git a/community_chatbot/mcp/lib/base_agent.py b/community_chatbot/mcp_impl/lib/base_agent.py
similarity index 97%
rename from community_chatbot/mcp/lib/base_agent.py
rename to community_chatbot/mcp_impl/lib/base_agent.py
index 6697c355..a0ef32de 100644
--- a/community_chatbot/mcp/lib/base_agent.py
+++ b/community_chatbot/mcp_impl/lib/base_agent.py
@@ -22,9 +22,9 @@
def _get_llm_provider():
- from community_chatbot.mcp.llm_providers import gemini
- from community_chatbot.mcp.llm_providers import lightning_llm
- from community_chatbot.mcp.llm_providers import groq_llm
+ from llm_providers import gemini
+ from llm_providers import lightning_llm
+ from llm_providers import groq_llm
provider = os.getenv("LLM_PROVIDER", "gemini").lower()
diff --git a/community_chatbot/mcp/lib/base_commands.py b/community_chatbot/mcp_impl/lib/base_commands.py
similarity index 100%
rename from community_chatbot/mcp/lib/base_commands.py
rename to community_chatbot/mcp_impl/lib/base_commands.py
diff --git a/community_chatbot/mcp/lib/state.py b/community_chatbot/mcp_impl/lib/state.py
similarity index 100%
rename from community_chatbot/mcp/lib/state.py
rename to community_chatbot/mcp_impl/lib/state.py
diff --git a/community_chatbot/mcp/lib/utils.py b/community_chatbot/mcp_impl/lib/utils.py
similarity index 100%
rename from community_chatbot/mcp/lib/utils.py
rename to community_chatbot/mcp_impl/lib/utils.py
diff --git a/community_chatbot/mcp/llm_providers/__init__.py b/community_chatbot/mcp_impl/llm_providers/__init__.py
similarity index 100%
rename from community_chatbot/mcp/llm_providers/__init__.py
rename to community_chatbot/mcp_impl/llm_providers/__init__.py
diff --git a/community_chatbot/mcp/llm_providers/gemini.py b/community_chatbot/mcp_impl/llm_providers/gemini.py
similarity index 100%
rename from community_chatbot/mcp/llm_providers/gemini.py
rename to community_chatbot/mcp_impl/llm_providers/gemini.py
diff --git a/community_chatbot/mcp/llm_providers/groq_llm.py b/community_chatbot/mcp_impl/llm_providers/groq_llm.py
similarity index 100%
rename from community_chatbot/mcp/llm_providers/groq_llm.py
rename to community_chatbot/mcp_impl/llm_providers/groq_llm.py
diff --git a/community_chatbot/mcp/llm_providers/lightning_llm.py b/community_chatbot/mcp_impl/llm_providers/lightning_llm.py
similarity index 100%
rename from community_chatbot/mcp/llm_providers/lightning_llm.py
rename to community_chatbot/mcp_impl/llm_providers/lightning_llm.py
diff --git a/community_chatbot/mcp_impl/main.py b/community_chatbot/mcp_impl/main.py
new file mode 100644
index 00000000..510e0b9d
--- /dev/null
+++ b/community_chatbot/mcp_impl/main.py
@@ -0,0 +1,29 @@
+import sys
+from pathlib import Path
+
+import typer
+from github import github_mcp
+from slack import slack_mcp
+
+sys.path.insert(0, str(Path(__file__).parent))
+
+
+app = typer.Typer(
+ help="Community AI MCP Agent - Unified access to GitHub, Slack, and more"
+)
+
+app.add_typer(
+ github_mcp.app,
+ name="github",
+ help="GitHub MCP Agent - Interact with GitHub repositories"
+)
+
+app.add_typer(
+ slack_mcp.app,
+ name="slack",
+ help="Slack MCP Agent - Interact with Slack workspaces"
+)
+
+
+if __name__ == "__main__":
+ app()
diff --git a/community_chatbot/mcp_impl/package.json b/community_chatbot/mcp_impl/package.json
new file mode 100644
index 00000000..1ddc4ba3
--- /dev/null
+++ b/community_chatbot/mcp_impl/package.json
@@ -0,0 +1,13 @@
+{
+ "name": "community-mcp-impl",
+ "version": "1.0.0",
+ "description": "MCP Implementation for Community AI Agents",
+ "private": true,
+ "dependencies": {
+ "slack-mcp-server": "latest",
+ "slack-mcp-server-windows-amd64": "1.1.26"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ }
+}
diff --git a/community_chatbot/mcp_impl/pyproject.toml b/community_chatbot/mcp_impl/pyproject.toml
new file mode 100644
index 00000000..5f0a8c0c
--- /dev/null
+++ b/community_chatbot/mcp_impl/pyproject.toml
@@ -0,0 +1,28 @@
+[project]
+name = "mcp_impl"
+version = "0.1.0"
+description = "Community AI MCP Agent"
+readme = "README.md"
+requires-python = ">=3.12"
+dependencies = [
+ "langchain",
+ "langchain-community",
+ "langchain-core",
+ "langchain-google-genai",
+ "langchain-text-splitters",
+ "langchain-mcp-adapters",
+ "langsmith",
+ "langgraph",
+ "langgraph-checkpoint",
+ "langgraph-prebuilt",
+ "langgraph-sdk",
+ "google-genai",
+ "pygithub",
+ "slack-sdk",
+ "pydantic",
+ "python-dotenv",
+ "sqlalchemy",
+ "typer",
+ "langchain-groq",
+ "langchain-openai>=1.1.0",
+]
diff --git a/community_chatbot/mcp/requirements.txt b/community_chatbot/mcp_impl/requirements.txt
similarity index 92%
rename from community_chatbot/mcp/requirements.txt
rename to community_chatbot/mcp_impl/requirements.txt
index a7699bc1..6c0956dc 100644
--- a/community_chatbot/mcp/requirements.txt
+++ b/community_chatbot/mcp_impl/requirements.txt
@@ -3,6 +3,7 @@ langchain
langchain-community
langchain-core
langchain-google-genai
+langchain-openai
langchain-text-splitters
langchain-mcp-adapters
langsmith
@@ -17,6 +18,7 @@ google-genai
# External APIs
pygithub
+slack-sdk
# Utilities
pydantic
diff --git a/community_chatbot/mcp_impl/slack/README.md b/community_chatbot/mcp_impl/slack/README.md
new file mode 100644
index 00000000..ee1cb7da
--- /dev/null
+++ b/community_chatbot/mcp_impl/slack/README.md
@@ -0,0 +1,68 @@
+# Info on Transport Configuration
+
+## Option 1: stdio Transport (Default - Recommended for Getting Started)
+
+Runs the Slack MCP server as a subprocess using npx.
+
+**Configuration** (`.env`):
+```env
+SLACK_MCP_TRANSPORT=stdio
+# Authentication tokens (see above)
+SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
+```
+
+**Requirements**:
+- Node.js and npx installed
+- Internet connection (to download slack-mcp-server@latest)
+
+**How it works**:
+- Agent automatically spawns `npx -y slack-mcp-server@latest --transport stdio`
+- Server runs as subprocess, terminated when agent stops
+- No manual server management needed
+
+## Option 2: SSE Transport (Server-Sent Events)
+
+Connect to a separately running Slack MCP server via HTTP.
+
+**Start the server** (in a separate terminal):
+```bash
+# Using npx
+npx -y slack-mcp-server@latest --transport sse
+
+# Or using Docker
+docker run -d -p 13080:13080 \
+ -e SLACK_MCP_XOXP_TOKEN=xoxp-... \
+ ghcr.io/korotovsky/slack-mcp-server \
+ mcp-server --transport sse
+```
+
+**Configuration** (`.env`):
+```env
+SLACK_MCP_TRANSPORT=sse
+SLACK_MCP_SERVER_URL=http://127.0.0.1:13080/sse
+SLACK_MCP_API_KEY=your-api-key-here # Optional, for authentication
+# Authentication tokens
+SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
+```
+
+**When to use**:
+- Multiple clients connecting to same server
+- Remote server deployment
+- Better debugging (server logs separate from agent)
+
+## Option 3: Docker Compose Deployment
+
+For production or team usage:
+
+```bash
+cd community_chatbot/mcp/slack
+wget -O docker-compose.yml https://github.com/korotovsky/slack-mcp-server/releases/latest/download/docker-compose.yml
+wget -O .env https://github.com/korotovsky/slack-mcp-server/releases/latest/download/default.env.dist
+
+# Edit .env with your tokens
+nano .env
+
+# Start services
+docker network create app-tier
+docker-compose up -d
+```
\ No newline at end of file
diff --git a/community_chatbot/mcp_impl/slack/__init__.py b/community_chatbot/mcp_impl/slack/__init__.py
new file mode 100644
index 00000000..11e07583
--- /dev/null
+++ b/community_chatbot/mcp_impl/slack/__init__.py
@@ -0,0 +1,5 @@
+from .agent import get_slack_agent
+
+__all__ = [
+ "get_slack_agent",
+]
diff --git a/community_chatbot/mcp_impl/slack/agent.py b/community_chatbot/mcp_impl/slack/agent.py
new file mode 100644
index 00000000..9a40119e
--- /dev/null
+++ b/community_chatbot/mcp_impl/slack/agent.py
@@ -0,0 +1,141 @@
+import os
+from typing import Any
+from lib.base_agent import BaseAgent
+
+__all__ = [
+ "get_slack_agent",
+]
+
+
+def get_slack_agent() -> BaseAgent:
+ """
+ Create and configure a Slack MCP agent.
+
+ Supports multiple authentication modes:
+ - XOXP (OAuth token): Recommended, more secure
+ - XOXC/XOXD (Browser tokens): Stealth mode, no additional permissions
+
+ Supports multiple transports:
+ - stdio: Default, runs server as subprocess (npx slack-mcp-server@latest)
+ - sse: Server-Sent Events, connects to separate running server
+ - streamable_http: HTTP transport for remote servers
+ """
+
+ # Check for authentication tokens
+ has_xoxp = bool(os.getenv("SLACK_MCP_XOXP_TOKEN"))
+ has_xoxc = bool(os.getenv("SLACK_MCP_XOXC_TOKEN"))
+ has_xoxd = bool(os.getenv("SLACK_MCP_XOXD_TOKEN"))
+
+ # Validate authentication
+ if not has_xoxp and not (has_xoxc and has_xoxd):
+ required_vars = [
+ "SLACK_MCP_XOXP_TOKEN or (SLACK_MCP_XOXC_TOKEN and SLACK_MCP_XOXD_TOKEN)"
+ ]
+ else:
+ required_vars = []
+
+ # Determine transport and connection configuration
+ transport = os.getenv("SLACK_MCP_TRANSPORT", "stdio").lower()
+
+ if transport == "stdio":
+ # stdio transport: Run npx slack-mcp-server as subprocess
+ return _create_stdio_agent(required_vars)
+ elif transport in ("sse", "streamable_http"):
+ # SSE/HTTP transport: Connect to running server
+ return _create_http_agent(required_vars, transport)
+ else:
+ raise ValueError(
+ f"Unsupported SLACK_MCP_TRANSPORT: {transport}. "
+ f"Supported values: 'stdio', 'sse', 'streamable_http'"
+ )
+
+
+def _create_stdio_agent(required_vars: list[str]) -> BaseAgent:
+ """Create Slack agent with stdio transport (npx subprocess)."""
+
+ class SlackStdioAgent(BaseAgent):
+ def get_connection_config(self) -> dict[str, Any]:
+ """Build stdio connection config for local slack-mcp-server."""
+ # Pass authentication tokens to the server
+ env_vars = {
+ "PATH": os.environ.get("PATH", ""),
+ "SLACK_MCP_XOXP_TOKEN": os.getenv(
+ "SLACK_MCP_XOXP_TOKEN", ""
+ ),
+ "SLACK_MCP_XOXC_TOKEN": os.getenv(
+ "SLACK_MCP_XOXC_TOKEN", ""
+ ),
+ "SLACK_MCP_XOXD_TOKEN": os.getenv(
+ "SLACK_MCP_XOXD_TOKEN", ""
+ ),
+ "SLACK_MCP_ADD_MESSAGE_TOOL": os.getenv(
+ "SLACK_MCP_ADD_MESSAGE_TOOL", ""
+ ),
+ }
+
+ # Use local node_modules binary (installed via package.json)
+ import pathlib
+ project_root = pathlib.Path(__file__).parent.parent
+ local_bin = (
+ project_root / "node_modules" / ".bin"
+ / "slack-mcp-server.cmd"
+ )
+
+ return {
+ "transport": "stdio",
+ "command": str(local_bin),
+ "args": ["--transport", "stdio"],
+ "env": env_vars,
+ }
+
+ return SlackStdioAgent(
+ service_name="slack",
+ required_env_vars=required_vars,
+ )
+
+
+def _create_http_agent(required_vars: list[str], transport: str) -> BaseAgent:
+ """Create Slack agent with SSE/HTTP transport."""
+
+ class SlackHttpAgent(BaseAgent):
+ def __init__(self, transport_type: str, **kwargs):
+ super().__init__(**kwargs)
+ self.transport_type = transport_type
+
+ def validate_environment(self) -> None:
+ """Validate environment for HTTP/SSE transport."""
+ super().validate_environment()
+
+ # For HTTP/SSE, we need server URL
+ if not os.getenv("SLACK_MCP_SERVER_URL"):
+ raise ValueError(
+ "SLACK_MCP_SERVER_URL is required for SSE/HTTP transport. "
+ "Example: http://127.0.0.1:13080/sse"
+ )
+
+ def get_connection_config(self) -> dict[str, Any]:
+ """Build HTTP/SSE connection config."""
+ from lib.utils import build_connection_config
+
+ server_url = os.getenv("SLACK_MCP_SERVER_URL", "http://127.0.0.1:13080/sse")
+
+ # Build base config
+ config = build_connection_config(
+ service_name="slack",
+ server_url_env="SLACK_MCP_SERVER_URL",
+ default_server_url=server_url,
+ token_env=None, # Not used for HTTP/SSE
+ bearer_token_env="SLACK_MCP_API_KEY",
+ )
+
+ # Override transport if using SSE
+ if self.transport_type == "sse":
+ config["transport"] = "sse"
+
+ return config
+
+ return SlackHttpAgent(
+ transport_type=transport,
+ service_name="slack",
+ required_env_vars=required_vars,
+ )
diff --git a/community_chatbot/mcp_impl/slack/commands.py b/community_chatbot/mcp_impl/slack/commands.py
new file mode 100644
index 00000000..50af10ec
--- /dev/null
+++ b/community_chatbot/mcp_impl/slack/commands.py
@@ -0,0 +1,82 @@
+from lib import base_commands
+from lib.state import RuntimeState
+from slack.agent import get_slack_agent
+
+
+__all__ = [
+ "list_tools",
+ "tool_info",
+ "invoke_tool",
+ "chat",
+ "chat_loop",
+ "sessions",
+ "clear_session",
+ "export_session",
+ "health",
+]
+
+
+_slack_agent = get_slack_agent()
+
+
+async def list_tools(state: RuntimeState) -> int:
+ return await base_commands.list_tools(state, _slack_agent)
+
+
+async def tool_info(state: RuntimeState, tool_identifier: str) -> int:
+ return await base_commands.tool_info(state, _slack_agent, tool_identifier)
+
+
+async def invoke_tool(
+ state: RuntimeState,
+ tool_identifier: str,
+ args_json: str | None = None,
+) -> int:
+ return await base_commands.invoke_tool(
+ state, _slack_agent, tool_identifier, args_json
+ )
+
+
+async def chat(
+ state: RuntimeState,
+ message: str,
+ session_id: str = "default",
+) -> int:
+ return await base_commands.chat(state, _slack_agent, message, session_id)
+
+
+async def chat_loop(
+ state: RuntimeState,
+ session_id: str = "default",
+ exit_command: str = "/exit",
+ reset_command: str = "/reset",
+ prompt_prefix: str | None = None,
+) -> int:
+ return await base_commands.chat_loop(
+ state,
+ _slack_agent,
+ session_id,
+ exit_command,
+ reset_command,
+ prompt_prefix,
+ )
+
+
+async def sessions(state: RuntimeState) -> int:
+ return await base_commands.sessions(state)
+
+
+async def clear_session(state: RuntimeState, session_id: str) -> int:
+ return await base_commands.clear_session(state, session_id)
+
+
+async def export_session(
+ state: RuntimeState,
+ session_id: str,
+ output_path: str | None = None,
+) -> int:
+ return await base_commands.export_session(state, session_id, output_path)
+
+
+async def health(state: RuntimeState) -> int:
+ return await base_commands.health(state)
diff --git a/community_chatbot/mcp_impl/slack/slack_mcp.py b/community_chatbot/mcp_impl/slack/slack_mcp.py
new file mode 100644
index 00000000..5de8a9e6
--- /dev/null
+++ b/community_chatbot/mcp_impl/slack/slack_mcp.py
@@ -0,0 +1,144 @@
+import asyncio
+
+from dotenv import load_dotenv
+import typer
+
+from lib.state import RuntimeState
+from slack import commands
+
+
+load_dotenv()
+
+state = RuntimeState(service_name="slack")
+
+app = typer.Typer(help="Slack MCP Agent CLI")
+
+
+@app.command("list-tools")
+def list_tools() -> int:
+ """List all available Slack MCP tools."""
+ return asyncio.run(commands.list_tools(state))
+
+
+@app.command("chat")
+def chat(
+ message: str = typer.Argument(
+ ..., help="Message to send to the agent."
+ ),
+ session_id: str = typer.Option(
+ "default", help="Chat session identifier."
+ ),
+) -> int:
+ """Send a single message to the Slack agent."""
+ return asyncio.run(
+ commands.chat(state, message=message, session_id=session_id)
+ )
+
+
+@app.command("chat-loop")
+def chat_loop(
+ session_id: str = typer.Option(
+ "default",
+ help="Chat session identifier to use for the loop.",
+ ),
+ exit_command: str = typer.Option(
+ "/exit",
+ help="Command typed alone on a line to end the loop.",
+ ),
+ reset_command: str = typer.Option(
+ "/reset",
+ help="Command typed alone on a line to reset the session history.",
+ ),
+ prompt_prefix: str | None = typer.Option(
+ None,
+ help="Optional custom prompt prefix displayed before user input.",
+ ),
+) -> int:
+ """Start an interactive chat session with the Slack agent."""
+ return asyncio.run(
+ commands.chat_loop(
+ state,
+ session_id=session_id,
+ exit_command=exit_command,
+ reset_command=reset_command,
+ prompt_prefix=prompt_prefix,
+ )
+ )
+
+
+@app.command("tool-info")
+def tool_info(
+ tool_identifier: str = typer.Argument(
+ ..., help="Tool CLI or original name."
+ )
+) -> int:
+ """Show detailed information about a specific tool."""
+ return asyncio.run(
+ commands.tool_info(state, tool_identifier=tool_identifier)
+ )
+
+
+@app.command("invoke-tool")
+def invoke_tool(
+ tool_identifier: str = typer.Argument(
+ ..., help="Tool CLI or original name to invoke."
+ ),
+ args_json: str = typer.Option(
+ "{}",
+ help="JSON object containing arguments for the tool.",
+ ),
+) -> int:
+ """Directly invoke a Slack MCP tool with JSON arguments."""
+ return asyncio.run(
+ commands.invoke_tool(
+ state,
+ tool_identifier=tool_identifier,
+ args_json=args_json,
+ )
+ )
+
+
+@app.command("sessions")
+def sessions() -> int:
+ """List all active chat sessions."""
+ return asyncio.run(commands.sessions(state))
+
+
+@app.command("clear-session")
+def clear_session(
+ session_id: str = typer.Argument(..., help="Session id to clear."),
+) -> int:
+ """Clear a specific chat session."""
+ return asyncio.run(commands.clear_session(state, session_id=session_id))
+
+
+@app.command("export-session")
+def export_session(
+ session_id: str = typer.Argument(
+ ..., help="Session id to export."
+ ),
+ output_path: str | None = typer.Option(
+ None,
+ "--output",
+ "-o",
+ help="Optional path to write the exported JSON transcript.",
+ ),
+) -> int:
+ """Export a chat session to JSON format."""
+ return asyncio.run(
+ commands.export_session(
+ state,
+ session_id=session_id,
+ output_path=output_path,
+ )
+ )
+
+
+@app.command("health")
+def health() -> int:
+ """Check the health status of the Slack agent."""
+ return asyncio.run(commands.health(state))
+
+
+if __name__ == "__main__":
+ app()
From 8c7f59f1b3d4f7f36b0630f45f9da34e1fa1dd89 Mon Sep 17 00:00:00 2001
From: Raghav Gupta <142162663+Raghav-56@users.noreply.github.com>
Date: Sun, 18 Jan 2026 15:12:22 +0530
Subject: [PATCH 4/5] feat(mcp): openMF#46 Add Jira MCP integration - Use
https://github.com/sooperset/mcp-atlassian - Refactor and organize agents
into 'agents' directory - Add Readme
---
community_chatbot/mcp_impl/.env.example | 144 +++++++++----
community_chatbot/mcp_impl/README.md | 195 ++++++++++++++++++
community_chatbot/mcp_impl/agents/__init__.py | 5 +
.../agent.py => agents/github_agent.py} | 5 +-
.../mcp_impl/agents/jira_agent.py | 174 ++++++++++++++++
.../mcp_impl/agents/slack_agent.py | 136 ++++++++++++
community_chatbot/mcp_impl/github/__init__.py | 5 -
community_chatbot/mcp_impl/github/commands.py | 82 --------
.../mcp_impl/github/github_mcp.py | 135 ------------
community_chatbot/mcp_impl/lib/__init__.py | 2 +-
community_chatbot/mcp_impl/lib/base_mcp.py | 88 ++++++++
.../mcp_impl/lib/base_transport.py | 54 +++++
community_chatbot/mcp_impl/main.py | 23 ++-
community_chatbot/mcp_impl/package.json | 10 +-
community_chatbot/mcp_impl/slack/README.md | 68 ------
community_chatbot/mcp_impl/slack/__init__.py | 5 -
community_chatbot/mcp_impl/slack/agent.py | 141 -------------
community_chatbot/mcp_impl/slack/commands.py | 82 --------
community_chatbot/mcp_impl/slack/slack_mcp.py | 144 -------------
19 files changed, 773 insertions(+), 725 deletions(-)
create mode 100644 community_chatbot/mcp_impl/agents/__init__.py
rename community_chatbot/mcp_impl/{github/agent.py => agents/github_agent.py} (94%)
create mode 100644 community_chatbot/mcp_impl/agents/jira_agent.py
create mode 100644 community_chatbot/mcp_impl/agents/slack_agent.py
delete mode 100644 community_chatbot/mcp_impl/github/__init__.py
delete mode 100644 community_chatbot/mcp_impl/github/commands.py
delete mode 100644 community_chatbot/mcp_impl/github/github_mcp.py
create mode 100644 community_chatbot/mcp_impl/lib/base_mcp.py
create mode 100644 community_chatbot/mcp_impl/lib/base_transport.py
delete mode 100644 community_chatbot/mcp_impl/slack/README.md
delete mode 100644 community_chatbot/mcp_impl/slack/__init__.py
delete mode 100644 community_chatbot/mcp_impl/slack/agent.py
delete mode 100644 community_chatbot/mcp_impl/slack/commands.py
delete mode 100644 community_chatbot/mcp_impl/slack/slack_mcp.py
diff --git a/community_chatbot/mcp_impl/.env.example b/community_chatbot/mcp_impl/.env.example
index d499a11c..ea8e1fd8 100644
--- a/community_chatbot/mcp_impl/.env.example
+++ b/community_chatbot/mcp_impl/.env.example
@@ -1,66 +1,122 @@
-GITHUB_PERSONAL_ACCESS_TOKEN=your_github_token_here
-# OPENAI_API_KEY=your_openai_api_key_here
+# LLM Provider Configuration (Required)
-GOOGLE_API_KEY=your_google_api_key_here
+# Choose your LLM provider: gemini, groq, openai, or lightning
+LLM_PROVIDER=gemini
-GROQ_API_KEY=your_groq_api_key_here
+# API Keys (provide based on your LLM_PROVIDER choice)
+GOOGLE_API_KEY=your_google_api_key_here
+# GROQ_API_KEY=your_groq_api_key_here
+# OPENAI_API_KEY=your_openai_api_key_here
+# LIGHTNING_API_KEY=your_lightning_api_key_here
-LLM_PROVIDER=gemini
+# Optional: Custom model name
+# MODEL=models/gemini-2.0-flash-exp
-LIGHTNING_API_KEY=your_lightning_api_key_here
+# Optional: Lightning AI custom endpoint
+# LIGHTNING_BASE_URL=https://lightning.ai/api/v1
-# Format(for custom models): https://lightning.ai/raghav-56/model-apis/models/your-model-id
-LIGHTNING_BASE_URL=https://lightning.ai/api/v1
+# GitHub Agent Configuration
-# Model name (optional)
-MODEL="models/gemini-3-pro-preview"
+# Required: GitHub Personal Access Token
+# Create at: https://github.com/settings/tokens
+GITHUB_PERSONAL_ACCESS_TOKEN=your_github_token_here
-GITHUB_APP_PRIVATE_KEY="your_github_app_private_key_here"
+# Optional: GitHub MCP Server Configuration
+# GITHUB_MCP_BEARER_TOKEN=your_bearer_token
+# GITHUB_MCP_SERVER_URL=https://api.githubcopilot.com/mcp/
-MCP_LOG_LEVEL=debug
+# Slack Agent Configuration
+# See: https://github.com/korotovsky/slack-mcp-server
-# Slack MCP Configuration
+# Authentication: Choose ONE of these three methods:
-# Authentication via OAuth Token (Recommended - more secure, doesn't expire frequently)
-# there are other methods also available, see documentation for details
+# Method 1: User OAuth Token (Recommended - full access)
+# Create at: https://api.slack.com/apps
SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
+# Method 2: Bot Token (Limited access - invited channels only, no search)
+# SLACK_MCP_XOXB_TOKEN=xoxb-your-bot-token-here
-# Enable message posting (set to 'true' for all channels, or comma-separated channel IDs)
-SLACK_MCP_ADD_MESSAGE_TOOL=true
+# Method 3: Browser Tokens (Stealth mode - no permissions needed)
+# Extract from browser DevTools while logged into Slack
+# SLACK_MCP_XOXC_TOKEN=xoxc-your-browser-token-here
+# SLACK_MCP_XOXD_TOKEN=xoxd-your-browser-cookie-here
-# Transport Configuration (Optional)
-# Options: stdio (default, runs npx subprocess), sse, streamable_http
-# SLACK_MCP_TRANSPORT=stdio
+# Optional: Enable message posting
+# Options: empty/false (disabled), true (all channels), or comma-separated channel IDs
+# Use ! prefix to allow all EXCEPT specified channels: !C123,C456
+# SLACK_MCP_ADD_MESSAGE_TOOL=false
+
+# Optional: Auto-mark sent messages as read
+# SLACK_MCP_ADD_MESSAGE_MARK=true
-# Server Connection (Required for SSE/HTTP transports only)
+# Optional: Transport Configuration
+# SLACK_MCP_TRANSPORT=stdio
# SLACK_MCP_SERVER_URL=http://127.0.0.1:13080/sse
# SLACK_MCP_API_KEY=your-api-key-here
+# SLACK_MCP_HOST=127.0.0.1
+# SLACK_MCP_PORT=13080
-# Cache Configuration (Optional)
-# Paths to cache files for users and channels
-# SLACK_MCP_USERS_CACHE=.users_cache.json
-# SLACK_MCP_CHANNELS_CACHE=.channels_cache_v2.json
+# Optional: Enterprise Slack settings
+# SLACK_MCP_USER_AGENT=your-browser-user-agent
+# SLACK_MCP_CUSTOM_TLS=true
+# SLACK_MCP_PROXY=http://proxy.example.com:8080
-# Message Posting Controls (Optional - disabled by default for safety)
-# Options:
-# - Empty/not set: Posting disabled (default)
-# - "true": Enable posting to all channels
-# - "C123,C456": Whitelist specific channel IDs (comma-separated)
-# - "!C123,C456": Blacklist specific channel IDs (allow all except these)
-# SLACK_MCP_ADD_MESSAGE_TOOL=
+# Optional: Cache configuration
+# SLACK_MCP_USERS_CACHE=/path/to/users_cache.json
+# SLACK_MCP_CHANNELS_CACHE=/path/to/channels_cache_v2.json
-# Auto-mark posted messages as read (Optional)
-# SLACK_MCP_ADD_MESSAGE_MARK=true
+# Optional: Logging
+# SLACK_MCP_LOG_LEVEL=info
-# Link unfurling control (Optional)
-# Options:
-# - Empty/not set: Unfurling disabled (default)
-# - "true": Enable unfurling for all domains
-# - "github.com,slack.com": Whitelist specific domains (comma-separated)
-# SLACK_MCP_ADD_MESSAGE_UNFURLING=
+# Jira Agent Configuration
+# Uses mcp-atlassian package via uvx (Python)
+# See: https://github.com/sooperset/mcp-atlassian
+# Configured for: https://mifosforge.jira.com
-# Enterprise Configuration (Optional - for custom Slack environments)
-# SLACK_MCP_PROXY=http://proxy.example.com:8080
-# SLACK_MCP_USER_AGENT=CustomAgent/1.0
-# SLACK_MCP_CUSTOM_TLS=true
+# Required: Jira Instance URL
+JIRA_URL=https://mifosforge.jira.com
+
+# Authentication: Choose ONE method
+
+# Method 1: API Token (Recommended for Cloud)
+# Create at: https://id.atlassian.com/manage-profile/security/api-tokens
+JIRA_USERNAME=your.email@company.com
+JIRA_API_TOKEN=your_jira_api_token_here
+
+# Method 2: Personal Access Token (Server/Data Center)
+# Go to: Profile → Personal Access Tokens → Create token
+# JIRA_PERSONAL_TOKEN=your_personal_access_token_here
+
+# Optional: SSL verification (set to false for self-signed certs)
+# JIRA_SSL_VERIFY=true
+
+# Optional: Project filtering (limit to specific projects)
+# JIRA_PROJECTS_FILTER=MIFOS,FINERACT
+
+# Optional: Read-only mode (disable write operations)
+# READ_ONLY_MODE=true
+
+# Optional: Enable only specific tools
+# ENABLED_TOOLS=jira_search,jira_get_issue
+
+# Optional: Transport Configuration
+# Default: stdio (uses uvx mcp-atlassian)
+# JIRA_MCP_TRANSPORT=stdio
+
+# Optional: Use Docker instead of uvx
+# JIRA_MCP_USE_DOCKER=false
+# JIRA_MCP_DOCKER_IMAGE=ghcr.io/sooperset/mcp-atlassian:latest
+
+# Optional: Proxy settings
+# HTTP_PROXY=http://proxy.example.com:8080
+# HTTPS_PROXY=http://proxy.example.com:8080
+# NO_PROXY=localhost,127.0.0.1
+
+# Optional: Custom HTTP headers (for corporate environments)
+# Format: key=value,key2=value2
+# JIRA_CUSTOM_HEADERS=X-Custom-Header=value
+
+# Optional: Logging
+# MCP_VERBOSE=true
+# MCP_VERY_VERBOSE=false
diff --git a/community_chatbot/mcp_impl/README.md b/community_chatbot/mcp_impl/README.md
index e69de29b..f663e78b 100644
--- a/community_chatbot/mcp_impl/README.md
+++ b/community_chatbot/mcp_impl/README.md
@@ -0,0 +1,195 @@
+# Community AI MCP Agent
+
+A unified Python CLI for interacting with GitHub, Slack, and Jira using the Model Context Protocol (MCP) and LangChain.
+
+## Features
+
+- **GitHub Agent**: Repository management, issues, PRs, and code operations
+- **Slack Agent**: Channel management, messaging, and workspace interactions
+- **Jira Agent**: Issue tracking, project management, and workflow automation
+- **Multi-LLM Support**: Works with Gemini, Groq, OpenAI, and Lightning AI
+- **Extensible Architecture**: Built on LangChain and LangGraph for easy customization
+
+## Quick Start
+
+### 1. Installation
+
+```bash
+# Clone the repository
+cd community_chatbot/mcp_impl
+
+# Install Python dependencies (using uv or pip)
+uv sync
+# or
+pip install -r requirements.txt
+
+# Install Node.js dependencies for Slack agent (requires Node.js)
+cd agents
+npm install
+cd ..
+```
+
+### 2. Configuration
+
+Copy the example environment file and configure your credentials:
+
+```bash
+cp .env.example .env
+```
+
+**Minimum required configuration:**
+
+```env
+# Choose your LLM provider
+LLM_PROVIDER=gemini
+GOOGLE_API_KEY=your_key_here
+
+# Enable agents as needed
+GITHUB_PERSONAL_ACCESS_TOKEN=your_token_here
+SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
+JIRA_URL=https://your-company.atlassian.net
+JIRA_USERNAME=your.email@company.com
+JIRA_API_TOKEN=your_token_here
+```
+
+### 3. Usage
+
+```bash
+# Run the GitHub agent
+python main.py github
+
+# Run the Slack agent
+python main.py slack
+
+# Run the Jira agent
+python main.py jira
+
+# Get help for any agent
+python main.py github --help
+```
+
+## Agent Details
+
+### GitHub Agent
+
+Interact with GitHub repositories using your personal access token.
+
+**Required:**
+
+- `GITHUB_PERSONAL_ACCESS_TOKEN` - [Create here](https://github.com/settings/tokens)
+
+**Capabilities:**
+
+- Repository operations (create, clone, search)
+- Issue and PR management
+- Code search and file operations
+- Workflow automation
+
+### Slack Agent
+
+Connect to Slack workspaces and manage communications.
+
+**Required:**
+
+- `SLACK_MCP_XOXP_TOKEN` - [Create Slack app](https://api.slack.com/apps)
+
+**Capabilities:**
+
+- Channel and user management
+- Message posting and retrieval
+- Workspace information
+
+**Optional:** Enable message posting with `SLACK_MCP_ADD_MESSAGE_TOOL=true`
+
+### Jira Agent
+
+Manage Jira projects and issues programmatically.
+
+**Required:**
+
+- `JIRA_URL` - Your Jira instance URL
+- `JIRA_USERNAME` - Your email
+- `JIRA_API_TOKEN` - [Create here](https://id.atlassian.com/manage-profile/security/api-tokens)
+
+**Capabilities:**
+
+- Issue CRUD operations
+- Project and sprint management
+- Advanced JQL searches
+- Custom field handling
+
+## Implementation Details
+
+- **MCP integration:** Agents use an internal MCP-based client flow implemented in [community_chatbot/mcp_impl/lib/base_agent.py](community_chatbot/mcp_impl/lib/base_agent.py). The CLI shell for each agent is created with the helper in [community_chatbot/mcp_impl/lib/base_mcp.py](community_chatbot/mcp_impl/lib/base_mcp.py) which exposes commands like `list-tools`, `chat`, `invoke-tool`, and `health`.
+
+- **MCP client used:** The code constructs a `MultiServerMCPClient` (from the `langchain_mcp_adapters` package) inside `BaseAgent.initialize()` to discover and load remote MCP tools. Tools discovered from the MCP endpoints are converted into LangGraph/LangChain-compatible tool definitions and used to create a React-style agent via `langgraph.prebuilt.create_react_agent`.
+
+- **Transport options:** Agents support multiple transport modes:
+ - `stdio` — runs a local process (usually an `npx` package or a Docker image) and communicates over stdio. Examples:
+ - Slack: runs `npx slack-mcp-server --transport stdio` (see [community_chatbot/mcp_impl/agents/slack_agent.py](community_chatbot/mcp_impl/agents/slack_agent.py)).
+ - Jira: can run `ghcr.io/sooperset/mcp-atlassian:latest` (Docker) or `npx mcp-atlassian@latest` (see [community_chatbot/mcp_impl/agents/jira_agent.py](community_chatbot/mcp_impl/agents/jira_agent.py)).
+ - `sse` / `streamable_http` — connects to an HTTP/SSE MCP server endpoint. Default example endpoints used by the code:
+ - Slack HTTP default: `http://127.0.0.1:13080/sse`
+ - Jira HTTP default: `http://127.0.0.1:8080/sse`
+ The HTTP transport builder is implemented in [community_chatbot/mcp_impl/lib/base_transport.py](community_chatbot/mcp_impl/lib/base_transport.py).
+
+- **LLM & agent creation:** The LLM provider is chosen by `LLM_PROVIDER` (see [community_chatbot/mcp_impl/lib/base_agent.py](community_chatbot/mcp_impl/lib/base_agent.py)) and the repository includes provider adapters in [community_chatbot/mcp_impl/llm_providers/](community_chatbot/mcp_impl/llm_providers/) (Gemini, Groq, Lightning). The selected LLM is passed into `create_react_agent` alongside the loaded MCP tools to form the agent executor.
+
+### GitHub Agent
+
+- `https://github.com/github/github-mcp-server` — GitHub MCP server repository.
+- `https://api.githubcopilot.com/mcp/` — default GitHub MCP endpoint used as the service URL in `agents/github_agent.py`.
+- `langchain_mcp_adapters` — MCP client package used via `MultiServerMCPClient` (see `lib/base_agent.py`).
+- `langgraph` / `langchain` — used to create the React-style agent (`langgraph.prebuilt.create_react_agent`).
+
+### Slack Agent
+
+- `https://github.com/korotovsky/slack-mcp-server` — Slack MCP server repository.
+- `npx slack-mcp-server` — npm package invoked in `agents/slack_agent.py` when using the `stdio` transport (runs `slack-mcp-server --transport stdio`).
+- `SLACK_*` environment variables (tokens) map to Slack credentials and standard Slack developer docs:
+- Default local HTTP/SSE endpoint in code: `http://127.0.0.1:13080/sse` (used when `SLACK_MCP_TRANSPORT` is set to `sse`/`streamable_http`).
+
+### Jira Agent
+
+- `https://github.com/sooperset/mcp-atlassian` — Jira MCP server repository.
+- `ghcr.io/sooperset/mcp-atlassian:latest` — Docker image referenced in `agents/jira_agent.py` for the `stdio` Docker transport.
+- `npx mcp-atlassian@latest` — npm package fallback for `stdio` (non-Docker) mode.
+- Jira developer docs and API token creation:
+- Default local HTTP/SSE endpoint in code: `http://127.0.0.1:8080/sse` (used when `JIRA_MCP_TRANSPORT` is `sse`/`streamable_http`).
+
+## Project Structure
+
+```
+mcp_impl/
+├── agents/ # Agent implementations
+│ ├── github_agent.py
+│ ├── jira_agent.py
+│ └── slack_agent.py
+├── lib/ # Core library
+│ ├── base_agent.py # Base agent class
+│ ├── base_mcp.py # MCP integration
+│ └── utils.py # Utilities
+├── llm_providers/ # LLM provider implementations
+│ ├── gemini.py
+│ ├── groq_llm.py
+│ ├── lightning_llm.py
+│ └── __init__.py
+├── main.py # CLI entry point
+├── .env.example # Configuration template
+└── requirements.txt # Python dependencies
+```
+
+## Requirements
+
+- Python >= 3.12
+- Valid API keys for your chosen LLM provider
+- Agent-specific credentials (GitHub token, Slack token, Jira credentials)
+
+## Getting API Keys
+
+- **GitHub**: [Settings → Developer settings → Personal access tokens](https://github.com/settings/tokens)
+- **Slack**: [Create a Slack app](https://api.slack.com/apps) → Install to workspace → Copy OAuth token
+- **Jira**: [Account security → API tokens](https://id.atlassian.com/manage-profile/security/api-tokens)
+- **Gemini**: [Google AI Studio](https://aistudio.google.com/app/apikey)
+- **Groq**: [Groq Console](https://console.groq.com/keys)
+- **OpenAI**: [OpenAI API Keys](https://platform.openai.com/api-keys)
diff --git a/community_chatbot/mcp_impl/agents/__init__.py b/community_chatbot/mcp_impl/agents/__init__.py
new file mode 100644
index 00000000..44063ea9
--- /dev/null
+++ b/community_chatbot/mcp_impl/agents/__init__.py
@@ -0,0 +1,5 @@
+from .github_agent import get_github_agent
+from .jira_agent import get_jira_agent
+from .slack_agent import get_slack_agent
+
+__all__ = ["get_github_agent", "get_jira_agent", "get_slack_agent"]
diff --git a/community_chatbot/mcp_impl/github/agent.py b/community_chatbot/mcp_impl/agents/github_agent.py
similarity index 94%
rename from community_chatbot/mcp_impl/github/agent.py
rename to community_chatbot/mcp_impl/agents/github_agent.py
index db31ae69..b24812bc 100644
--- a/community_chatbot/mcp_impl/github/agent.py
+++ b/community_chatbot/mcp_impl/agents/github_agent.py
@@ -1,13 +1,10 @@
import os
from lib.base_agent import BaseAgent
-__all__ = [
- "get_github_agent",
-]
+__all__ = ["get_github_agent"]
def get_github_agent() -> BaseAgent:
-
if not (
os.getenv("GITHUB_PERSONAL_ACCESS_TOKEN")
or os.getenv("GITHUB_MCP_BEARER_TOKEN")
diff --git a/community_chatbot/mcp_impl/agents/jira_agent.py b/community_chatbot/mcp_impl/agents/jira_agent.py
new file mode 100644
index 00000000..5d612a25
--- /dev/null
+++ b/community_chatbot/mcp_impl/agents/jira_agent.py
@@ -0,0 +1,174 @@
+import os
+from typing import Any
+from lib.base_agent import BaseAgent
+from lib.base_transport import StdioTransportMixin, HttpTransportMixin
+
+__all__ = ["get_jira_agent"]
+
+# Environment variables for Jira MCP (mcp-atlassian package)
+# See: https://github.com/sooperset/mcp-atlassian
+# Configured for Jira-only usage (no Confluence)
+JIRA_ENV_VARS = [
+ # Connection settings
+ "JIRA_URL", # Jira instance URL
+ # Cloud authentication (API Token)
+ "JIRA_USERNAME", # Email for Cloud
+ "JIRA_API_TOKEN", # API token from id.atlassian.com
+ # Server/Data Center authentication (PAT)
+ "JIRA_PERSONAL_TOKEN", # Personal Access Token
+ # SSL and proxy settings
+ "JIRA_SSL_VERIFY", # SSL verification (true/false)
+ "HTTP_PROXY", # HTTP proxy URL
+ "HTTPS_PROXY", # HTTPS proxy URL
+ "JIRA_HTTPS_PROXY", # Jira-specific HTTPS proxy
+ "NO_PROXY", # Hosts to bypass proxy
+ "SOCKS_PROXY", # SOCKS proxy URL
+ # Custom headers (for corporate environments)
+ "JIRA_CUSTOM_HEADERS", # Format: key=value,key2=value2
+ # Filtering and access control
+ "JIRA_PROJECTS_FILTER", # Limit to specific projects
+ "ENABLED_TOOLS", # Enable specific tools only
+ "READ_ONLY_MODE", # Disable write operations
+ # Server options
+ "TRANSPORT", # Transport type for the server
+ "PORT", # Port for HTTP transports (default: 8000)
+ "HOST", # Host for HTTP transports (default: 0.0.0.0)
+ "STATELESS", # Enable stateless mode
+ # Logging
+ "MCP_VERBOSE", # Enable verbose logging
+ "MCP_VERY_VERBOSE", # Enable debug logging
+ "MCP_LOGGING_STDOUT", # Log to stdout instead of stderr
+]
+
+
+def get_jira_agent() -> BaseAgent:
+ """Create and return a Jira MCP agent.
+
+ Uses mcp-atlassian package via uvx (Python).
+ Configured for Jira-only access (no Confluence).
+
+ Supports two authentication methods:
+ 1. API Token (Cloud): JIRA_USERNAME + JIRA_API_TOKEN
+ 2. Personal Access Token (Server/DC): JIRA_PERSONAL_TOKEN
+
+ Transport options:
+ - stdio (default): Direct subprocess communication
+ - sse: Server-Sent Events over HTTP
+ - streamable_http: HTTP transport
+ """
+ has_url = bool(os.getenv("JIRA_URL"))
+ has_api_token = bool(
+ os.getenv("JIRA_USERNAME") and os.getenv("JIRA_API_TOKEN")
+ )
+ has_pat = bool(os.getenv("JIRA_PERSONAL_TOKEN"))
+
+ # Build list of missing required variables
+ required_vars = []
+ if not has_url:
+ required_vars.append("JIRA_URL")
+ if not has_api_token and not has_pat:
+ required_vars.append(
+ "(JIRA_USERNAME and JIRA_API_TOKEN) or JIRA_PERSONAL_TOKEN"
+ )
+
+ transport = os.getenv("JIRA_MCP_TRANSPORT", "stdio").lower()
+
+ if transport == "stdio":
+ return _create_stdio_agent(required_vars)
+ elif transport in ("sse", "streamable_http"):
+ return _create_http_agent(required_vars, transport)
+ else:
+ raise ValueError(
+ f"Unsupported JIRA_MCP_TRANSPORT: {transport}. "
+ f"Use 'stdio', 'sse', or 'streamable_http'"
+ )
+
+
+def _create_stdio_agent(required_vars: list[str]) -> BaseAgent:
+ """Create a Jira agent using stdio transport (uvx subprocess)."""
+
+ class JiraStdioAgent(BaseAgent, StdioTransportMixin):
+ def get_connection_config(self) -> dict[str, Any]:
+ use_docker = os.getenv("JIRA_MCP_USE_DOCKER", "").lower()
+ if use_docker in ("true", "1", "yes"):
+ return self._get_docker_config()
+ return self._get_uvx_config()
+
+ def _get_docker_config(self) -> dict[str, Any]:
+ """Build Docker configuration for mcp-atlassian."""
+ docker_args = ["run", "-i", "--rm"]
+
+ # Forward all configured Jira environment variables
+ for var in JIRA_ENV_VARS:
+ if os.getenv(var):
+ docker_args.extend(["-e", var])
+
+ image = os.getenv(
+ "JIRA_MCP_DOCKER_IMAGE",
+ "ghcr.io/sooperset/mcp-atlassian:latest",
+ )
+ docker_args.append(image)
+
+ # Build env dict for subprocess
+ env = {var: os.getenv(var, "") for var in JIRA_ENV_VARS}
+ env["PATH"] = os.environ.get("PATH", "")
+
+ return self.build_stdio_config("docker", docker_args, env)
+
+ def _get_uvx_config(self) -> dict[str, Any]:
+ """Build uvx configuration for mcp-atlassian.
+
+ Uses uvx (Python package runner) instead of npx.
+ Official usage: uvx mcp-atlassian
+ """
+ # Build environment with all Jira variables
+ env: dict[str, str] = {
+ "PATH": os.environ.get("PATH", ""),
+ }
+
+ # Forward all configured Jira environment variables
+ for var in JIRA_ENV_VARS:
+ value = os.getenv(var)
+ if value:
+ env[var] = value
+
+ # Use uvx to run mcp-atlassian
+ # Note: Use --python=3.12 if Python 3.14+ causes issues
+ return self.build_stdio_config(
+ "uvx",
+ ["mcp-atlassian"],
+ env
+ )
+
+ return JiraStdioAgent(
+ service_name="jira", required_env_vars=required_vars
+ )
+
+
+def _create_http_agent(required_vars: list[str], transport: str) -> BaseAgent:
+ """Create a Jira agent using SSE or HTTP transport.
+
+ Requires a running mcp-atlassian server.
+ Default: http://127.0.0.1:8000
+ """
+
+ class JiraHttpAgent(BaseAgent, HttpTransportMixin):
+ def __init__(self, transport_type: str, **kwargs):
+ super().__init__(**kwargs)
+ self.transport_type = transport_type
+
+ def get_connection_config(self) -> dict[str, Any]:
+ # Get server URL from environment or use default
+ host = os.getenv("HOST", "127.0.0.1")
+ port = os.getenv("PORT", "8000")
+ default_url = f"http://{host}:{port}/sse"
+
+ return self.build_http_config(
+ "jira", default_url, self.transport_type
+ )
+
+ return JiraHttpAgent(
+ transport_type=transport,
+ service_name="jira",
+ required_env_vars=required_vars,
+ )
diff --git a/community_chatbot/mcp_impl/agents/slack_agent.py b/community_chatbot/mcp_impl/agents/slack_agent.py
new file mode 100644
index 00000000..8fc2570c
--- /dev/null
+++ b/community_chatbot/mcp_impl/agents/slack_agent.py
@@ -0,0 +1,136 @@
+import os
+from typing import Any
+from lib.base_agent import BaseAgent
+from lib.base_transport import StdioTransportMixin, HttpTransportMixin
+
+__all__ = ["get_slack_agent"]
+
+# All environment variables supported by slack-mcp-server
+# See: https://github.com/korotovsky/slack-mcp-server
+SLACK_ENV_VARS = [
+ # Authentication tokens (one required)
+ "SLACK_MCP_XOXP_TOKEN", # User OAuth token (xoxp-...)
+ "SLACK_MCP_XOXB_TOKEN", # Bot token (xoxb-...) - limited
+ "SLACK_MCP_XOXC_TOKEN", # Browser token (xoxc-...)
+ "SLACK_MCP_XOXD_TOKEN", # Browser cookie d (xoxd-...)
+ # Server configuration
+ "SLACK_MCP_PORT", # Port for SSE/HTTP (default: 13080)
+ "SLACK_MCP_HOST", # Host for SSE/HTTP (default: 127.0.0.1)
+ "SLACK_MCP_API_KEY", # Bearer token for SSE/HTTP transports
+ # Proxy and network settings
+ "SLACK_MCP_PROXY", # Proxy URL for outgoing requests
+ "SLACK_MCP_USER_AGENT", # Custom User-Agent (Enterprise)
+ "SLACK_MCP_CUSTOM_TLS", # Custom TLS (Enterprise Slack)
+ # TLS/SSL settings
+ "SLACK_MCP_SERVER_CA", # Path to CA certificate
+ "SLACK_MCP_SERVER_CA_TOOLKIT", # HTTPToolkit CA
+ "SLACK_MCP_SERVER_CA_INSECURE", # Trust insecure (NOT RECOMMENDED)
+ # Message posting settings
+ "SLACK_MCP_ADD_MESSAGE_TOOL", # Enable posting
+ "SLACK_MCP_ADD_MESSAGE_MARK", # Auto-mark as read
+ "SLACK_MCP_ADD_MESSAGE_UNFURLING", # Enable link unfurling
+ # Cache configuration
+ "SLACK_MCP_USERS_CACHE", # Path to users cache file
+ "SLACK_MCP_CHANNELS_CACHE", # Path to channels cache
+ # Logging
+ "SLACK_MCP_LOG_LEVEL", # debug, info, warn, error
+]
+
+
+def get_slack_agent() -> BaseAgent:
+ """Create and return a Slack MCP agent.
+
+ Supports three authentication methods:
+ 1. XOXP token (User OAuth token)
+ 2. XOXB token (Bot token - limited access, no search)
+ 3. XOXC + XOXD tokens (Browser tokens - stealth mode)
+
+ Transport options:
+ - stdio (default): Direct subprocess communication
+ - sse: Server-Sent Events over HTTP
+ - streamable_http: HTTP transport
+ """
+ has_xoxp = bool(os.getenv("SLACK_MCP_XOXP_TOKEN"))
+ has_xoxb = bool(os.getenv("SLACK_MCP_XOXB_TOKEN"))
+ has_xoxc = bool(os.getenv("SLACK_MCP_XOXC_TOKEN"))
+ has_xoxd = bool(os.getenv("SLACK_MCP_XOXD_TOKEN"))
+
+ # Validate authentication: need xoxp OR xoxb OR (xoxc AND xoxd)
+ if not has_xoxp and not has_xoxb and not (has_xoxc and has_xoxd):
+ required_vars = [
+ "SLACK_MCP_XOXP_TOKEN or SLACK_MCP_XOXB_TOKEN or "
+ "(SLACK_MCP_XOXC_TOKEN and SLACK_MCP_XOXD_TOKEN)"
+ ]
+ else:
+ required_vars = []
+
+ transport = os.getenv("SLACK_MCP_TRANSPORT", "stdio").lower()
+
+ if transport == "stdio":
+ return _create_stdio_agent(required_vars)
+ elif transport in ("sse", "streamable_http"):
+ return _create_http_agent(required_vars, transport)
+ else:
+ raise ValueError(
+ f"Unsupported SLACK_MCP_TRANSPORT: {transport}. "
+ f"Use 'stdio', 'sse', or 'streamable_http'"
+ )
+
+
+def _create_stdio_agent(required_vars: list[str]) -> BaseAgent:
+ """Create a Slack agent using stdio transport (npx subprocess)."""
+
+ class SlackStdioAgent(BaseAgent, StdioTransportMixin):
+ def get_connection_config(self) -> dict[str, Any]:
+ # Build environment with all supported Slack MCP variables
+ env: dict[str, str] = {
+ "PATH": os.environ.get("PATH", ""),
+ }
+
+ # Forward all configured Slack environment variables
+ for var in SLACK_ENV_VARS:
+ value = os.getenv(var)
+ if value:
+ env[var] = value
+
+ # Use npx with -y flag to auto-confirm install
+ return self.build_stdio_config(
+ "npx",
+ ["-y", "slack-mcp-server@latest", "--transport", "stdio"],
+ env
+ )
+
+ return SlackStdioAgent(
+ service_name="slack", required_env_vars=required_vars
+ )
+
+
+def _create_http_agent(required_vars: list[str], transport: str) -> BaseAgent:
+ """Create a Slack agent using SSE or HTTP transport.
+
+ Requires SLACK_MCP_SERVER_URL to be set, pointing to a running
+ slack-mcp-server instance (e.g., http://127.0.0.1:13080/sse).
+
+ Optionally use SLACK_MCP_API_KEY for authentication.
+ """
+
+ class SlackHttpAgent(BaseAgent, HttpTransportMixin):
+ def __init__(self, transport_type: str, **kwargs):
+ super().__init__(**kwargs)
+ self.transport_type = transport_type
+
+ def get_connection_config(self) -> dict[str, Any]:
+ # Get server URL from environment or use default
+ host = os.getenv("SLACK_MCP_HOST", "127.0.0.1")
+ port = os.getenv("SLACK_MCP_PORT", "13080")
+ default_url = f"http://{host}:{port}/sse"
+
+ return self.build_http_config(
+ "slack", default_url, self.transport_type
+ )
+
+ return SlackHttpAgent(
+ transport_type=transport,
+ service_name="slack",
+ required_env_vars=required_vars,
+ )
diff --git a/community_chatbot/mcp_impl/github/__init__.py b/community_chatbot/mcp_impl/github/__init__.py
deleted file mode 100644
index 0b10bf97..00000000
--- a/community_chatbot/mcp_impl/github/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""GitHub MCP Agent module."""
-
-from .agent import get_github_agent
-
-__all__ = ["get_github_agent"]
diff --git a/community_chatbot/mcp_impl/github/commands.py b/community_chatbot/mcp_impl/github/commands.py
deleted file mode 100644
index 7f10a559..00000000
--- a/community_chatbot/mcp_impl/github/commands.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from lib import base_commands
-from lib.state import RuntimeState
-from github.agent import get_github_agent
-
-
-__all__ = [
- "list_tools",
- "tool_info",
- "invoke_tool",
- "chat",
- "chat_loop",
- "sessions",
- "clear_session",
- "export_session",
- "health",
-]
-
-
-_github_agent = get_github_agent()
-
-
-async def list_tools(state: RuntimeState) -> int:
- return await base_commands.list_tools(state, _github_agent)
-
-
-async def tool_info(state: RuntimeState, tool_identifier: str) -> int:
- return await base_commands.tool_info(state, _github_agent, tool_identifier)
-
-
-async def invoke_tool(
- state: RuntimeState,
- tool_identifier: str,
- args_json: str | None = None,
-) -> int:
- return await base_commands.invoke_tool(
- state, _github_agent, tool_identifier, args_json
- )
-
-
-async def chat(
- state: RuntimeState,
- message: str,
- session_id: str = "default",
-) -> int:
- return await base_commands.chat(state, _github_agent, message, session_id)
-
-
-async def chat_loop(
- state: RuntimeState,
- session_id: str = "default",
- exit_command: str = "/exit",
- reset_command: str = "/reset",
- prompt_prefix: str | None = None,
-) -> int:
- return await base_commands.chat_loop(
- state,
- _github_agent,
- session_id,
- exit_command,
- reset_command,
- prompt_prefix,
- )
-
-
-async def sessions(state: RuntimeState) -> int:
- return await base_commands.sessions(state)
-
-
-async def clear_session(state: RuntimeState, session_id: str) -> int:
- return await base_commands.clear_session(state, session_id)
-
-
-async def export_session(
- state: RuntimeState,
- session_id: str,
- output_path: str | None = None,
-) -> int:
- return await base_commands.export_session(state, session_id, output_path)
-
-
-async def health(state: RuntimeState) -> int:
- return await base_commands.health(state)
diff --git a/community_chatbot/mcp_impl/github/github_mcp.py b/community_chatbot/mcp_impl/github/github_mcp.py
deleted file mode 100644
index 495e5e85..00000000
--- a/community_chatbot/mcp_impl/github/github_mcp.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-import typer
-
-from lib.state import RuntimeState
-from github import commands
-
-
-load_dotenv()
-
-state = RuntimeState(service_name="github")
-
-app = typer.Typer(help="GitHub MCP Agent CLI")
-
-
-@app.command("list-tools")
-def list_tools() -> int:
- return asyncio.run(commands.list_tools(state))
-
-
-@app.command("chat")
-def chat(
- message: str = typer.Argument(
- ..., help="Message to send to the agent."
- ),
- session_id: str = typer.Option(
- "default", help="Chat session identifier."
- ),
-) -> int:
- return asyncio.run(
- commands.chat(state, message=message, session_id=session_id)
- )
-
-
-@app.command("chat-loop")
-def chat_loop(
- session_id: str = typer.Option(
- "default",
- help="Chat session identifier to use for the loop.",
- ),
- exit_command: str = typer.Option(
- "/exit",
- help="Command typed alone on a line to end the loop.",
- ),
- reset_command: str = typer.Option(
- "/reset",
- help="Command typed alone on a line to reset the session history.",
- ),
- prompt_prefix: str | None = typer.Option(
- None,
- help="Optional custom prompt prefix displayed before user input.",
- ),
-) -> int:
- return asyncio.run(
- commands.chat_loop(
- state,
- session_id=session_id,
- exit_command=exit_command,
- reset_command=reset_command,
- prompt_prefix=prompt_prefix,
- )
- )
-
-
-@app.command("tool-info")
-def tool_info(
- tool_identifier: str = typer.Argument(
- ..., help="Tool CLI or original name."
- )
-) -> int:
- return asyncio.run(
- commands.tool_info(state, tool_identifier=tool_identifier)
- )
-
-
-@app.command("invoke-tool")
-def invoke_tool(
- tool_identifier: str = typer.Argument(
- ..., help="Tool CLI or original name to invoke."
- ),
- args_json: str = typer.Option(
- "{}",
- help="JSON object containing arguments for the tool.",
- ),
-) -> int:
- return asyncio.run(
- commands.invoke_tool(
- state,
- tool_identifier=tool_identifier,
- args_json=args_json,
- )
- )
-
-
-@app.command("sessions")
-def sessions() -> int:
- return asyncio.run(commands.sessions(state))
-
-
-@app.command("clear-session")
-def clear_session(
- session_id: str = typer.Argument(..., help="Session id to clear."),
-) -> int:
- return asyncio.run(commands.clear_session(state, session_id=session_id))
-
-
-@app.command("export-session")
-def export_session(
- session_id: str = typer.Argument(
- ..., help="Session id to export."
- ),
- output_path: str | None = typer.Option(
- None,
- "--output",
- "-o",
- help="Optional path to write the exported JSON transcript.",
- ),
-) -> int:
- return asyncio.run(
- commands.export_session(
- state,
- session_id=session_id,
- output_path=output_path,
- )
- )
-
-
-@app.command("health")
-def health() -> int:
- return asyncio.run(commands.health(state))
-
-
-if __name__ == "__main__":
- app()
diff --git a/community_chatbot/mcp_impl/lib/__init__.py b/community_chatbot/mcp_impl/lib/__init__.py
index 91e72762..d0e38ee6 100644
--- a/community_chatbot/mcp_impl/lib/__init__.py
+++ b/community_chatbot/mcp_impl/lib/__init__.py
@@ -1,3 +1,3 @@
"""Common modules for MCP agents."""
-__all__ = ["state", "utils", "base_agent", "base_commands"]
+__all__ = ["state", "utils", "base_agent", "base_commands", "base_mcp", "base_transport"]
diff --git a/community_chatbot/mcp_impl/lib/base_mcp.py b/community_chatbot/mcp_impl/lib/base_mcp.py
new file mode 100644
index 00000000..cc58790f
--- /dev/null
+++ b/community_chatbot/mcp_impl/lib/base_mcp.py
@@ -0,0 +1,88 @@
+import asyncio
+import typer
+from typing import Callable
+
+from .base_agent import BaseAgent
+from .state import RuntimeState
+from . import base_commands
+
+
+__all__ = ["create_mcp_cli"]
+
+
+def create_mcp_cli(
+ service_name: str,
+ agent_factory: Callable[[], BaseAgent],
+ description: str | None = None,
+) -> typer.Typer:
+ if description is None:
+ description = f"{service_name.title()} MCP Agent CLI"
+
+ state = RuntimeState(service_name=service_name)
+ agent = agent_factory()
+
+ app = typer.Typer(help=description)
+
+ @app.command("list-tools")
+ def list_tools() -> int:
+ return asyncio.run(base_commands.list_tools(state, agent))
+
+ @app.command("chat")
+ def chat(
+ message: str = typer.Argument(..., help="Message to send to the agent."),
+ session_id: str = typer.Option("default", help="Chat session identifier."),
+ ) -> int:
+ return asyncio.run(base_commands.chat(state, agent, message, session_id))
+
+ @app.command("chat-loop")
+ def chat_loop(
+ session_id: str = typer.Option("default", help="Chat session identifier."),
+ exit_command: str = typer.Option("/exit", help="Command to end the loop."),
+ reset_command: str = typer.Option("/reset", help="Command to reset session."),
+ prompt_prefix: str | None = typer.Option(None, help="Custom prompt prefix."),
+ ) -> int:
+ return asyncio.run(
+ base_commands.chat_loop(
+ state, agent, session_id, exit_command, reset_command, prompt_prefix
+ )
+ )
+
+ @app.command("tool-info")
+ def tool_info(
+ tool_identifier: str = typer.Argument(..., help="Tool CLI or original name.")
+ ) -> int:
+ return asyncio.run(base_commands.tool_info(state, agent, tool_identifier))
+
+ @app.command("invoke-tool")
+ def invoke_tool(
+ tool_identifier: str = typer.Argument(..., help="Tool to invoke."),
+ args_json: str = typer.Option("{}", help="JSON arguments for the tool."),
+ ) -> int:
+ return asyncio.run(
+ base_commands.invoke_tool(state, agent, tool_identifier, args_json)
+ )
+
+ @app.command("sessions")
+ def sessions() -> int:
+ return asyncio.run(base_commands.sessions(state))
+
+ @app.command("clear-session")
+ def clear_session(
+ session_id: str = typer.Argument(..., help="Session id to clear."),
+ ) -> int:
+ return asyncio.run(base_commands.clear_session(state, session_id))
+
+ @app.command("export-session")
+ def export_session(
+ session_id: str = typer.Argument(..., help="Session id to export."),
+ output_path: str | None = typer.Option(None, "--output", "-o", help="Output path."),
+ ) -> int:
+ return asyncio.run(
+ base_commands.export_session(state, session_id, output_path)
+ )
+
+ @app.command("health")
+ def health() -> int:
+ return asyncio.run(base_commands.health(state))
+
+ return app
diff --git a/community_chatbot/mcp_impl/lib/base_transport.py b/community_chatbot/mcp_impl/lib/base_transport.py
new file mode 100644
index 00000000..3d07bd4e
--- /dev/null
+++ b/community_chatbot/mcp_impl/lib/base_transport.py
@@ -0,0 +1,54 @@
+import os
+from typing import Any
+
+
+__all__ = [
+ "StdioTransportMixin",
+ "HttpTransportMixin",
+]
+
+
+class StdioTransportMixin:
+ def build_stdio_config(
+ self,
+ command: str,
+ args: list[str],
+ env_vars: dict[str, str],
+ ) -> dict[str, Any]:
+ return {
+ "transport": "stdio",
+ "command": command,
+ "args": args,
+ "env": env_vars,
+ }
+
+
+class HttpTransportMixin:
+ def build_http_config(
+ self,
+ service_name: str,
+ default_url: str,
+ transport_type: str = "streamable_http",
+ ) -> dict[str, Any]:
+ from .utils import build_connection_config
+
+ server_url_env = f"{service_name.upper()}_MCP_SERVER_URL"
+ api_key_env = f"{service_name.upper()}_MCP_API_KEY"
+
+ if not os.getenv(server_url_env):
+ raise ValueError(
+ f"{server_url_env} is required for {transport_type}"
+ )
+
+ config = build_connection_config(
+ service_name=service_name,
+ server_url_env=server_url_env,
+ default_server_url=default_url,
+ token_env=api_key_env or f"{service_name.upper()}_TOKEN",
+ bearer_token_env=api_key_env,
+ )
+
+ if transport_type == "sse":
+ config["transport"] = "sse"
+
+ return config
diff --git a/community_chatbot/mcp_impl/main.py b/community_chatbot/mcp_impl/main.py
index 510e0b9d..028a08c4 100644
--- a/community_chatbot/mcp_impl/main.py
+++ b/community_chatbot/mcp_impl/main.py
@@ -2,28 +2,37 @@
from pathlib import Path
import typer
-from github import github_mcp
-from slack import slack_mcp
+from dotenv import load_dotenv
+
+load_dotenv()
sys.path.insert(0, str(Path(__file__).parent))
+from lib.base_mcp import create_mcp_cli
+from agents import get_github_agent, get_jira_agent, get_slack_agent
+
app = typer.Typer(
help="Community AI MCP Agent - Unified access to GitHub, Slack, and more"
)
app.add_typer(
- github_mcp.app,
+ create_mcp_cli("github", get_github_agent, "GitHub MCP Agent CLI"),
name="github",
- help="GitHub MCP Agent - Interact with GitHub repositories"
+ help="GitHub MCP Agent - Interact with GitHub repositories",
)
app.add_typer(
- slack_mcp.app,
- name="slack",
- help="Slack MCP Agent - Interact with Slack workspaces"
+ create_mcp_cli("jira", get_jira_agent, "Jira MCP Agent CLI"),
+ name="jira",
+ help="Jira MCP Agent - Interact with Jira projects",
)
+app.add_typer(
+ create_mcp_cli("slack", get_slack_agent, "Slack MCP Agent CLI"),
+ name="slack",
+ help="Slack MCP Agent - Interact with Slack workspaces",
+)
if __name__ == "__main__":
app()
diff --git a/community_chatbot/mcp_impl/package.json b/community_chatbot/mcp_impl/package.json
index 1ddc4ba3..e207da29 100644
--- a/community_chatbot/mcp_impl/package.json
+++ b/community_chatbot/mcp_impl/package.json
@@ -1,13 +1,9 @@
{
- "name": "community-mcp-impl",
+ "name": "mcp-agents",
"version": "1.0.0",
- "description": "MCP Implementation for Community AI Agents",
+ "description": "Node.js MCP server dependencies for Slack and other stdio-based agents",
"private": true,
"dependencies": {
- "slack-mcp-server": "latest",
- "slack-mcp-server-windows-amd64": "1.1.26"
- },
- "engines": {
- "node": ">=18.0.0"
+ "slack-mcp-server": "latest"
}
}
diff --git a/community_chatbot/mcp_impl/slack/README.md b/community_chatbot/mcp_impl/slack/README.md
deleted file mode 100644
index ee1cb7da..00000000
--- a/community_chatbot/mcp_impl/slack/README.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Info on Transport Configuration
-
-## Option 1: stdio Transport (Default - Recommended for Getting Started)
-
-Runs the Slack MCP server as a subprocess using npx.
-
-**Configuration** (`.env`):
-```env
-SLACK_MCP_TRANSPORT=stdio
-# Authentication tokens (see above)
-SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
-```
-
-**Requirements**:
-- Node.js and npx installed
-- Internet connection (to download slack-mcp-server@latest)
-
-**How it works**:
-- Agent automatically spawns `npx -y slack-mcp-server@latest --transport stdio`
-- Server runs as subprocess, terminated when agent stops
-- No manual server management needed
-
-## Option 2: SSE Transport (Server-Sent Events)
-
-Connect to a separately running Slack MCP server via HTTP.
-
-**Start the server** (in a separate terminal):
-```bash
-# Using npx
-npx -y slack-mcp-server@latest --transport sse
-
-# Or using Docker
-docker run -d -p 13080:13080 \
- -e SLACK_MCP_XOXP_TOKEN=xoxp-... \
- ghcr.io/korotovsky/slack-mcp-server \
- mcp-server --transport sse
-```
-
-**Configuration** (`.env`):
-```env
-SLACK_MCP_TRANSPORT=sse
-SLACK_MCP_SERVER_URL=http://127.0.0.1:13080/sse
-SLACK_MCP_API_KEY=your-api-key-here # Optional, for authentication
-# Authentication tokens
-SLACK_MCP_XOXP_TOKEN=xoxp-your-token-here
-```
-
-**When to use**:
-- Multiple clients connecting to same server
-- Remote server deployment
-- Better debugging (server logs separate from agent)
-
-## Option 3: Docker Compose Deployment
-
-For production or team usage:
-
-```bash
-cd community_chatbot/mcp/slack
-wget -O docker-compose.yml https://github.com/korotovsky/slack-mcp-server/releases/latest/download/docker-compose.yml
-wget -O .env https://github.com/korotovsky/slack-mcp-server/releases/latest/download/default.env.dist
-
-# Edit .env with your tokens
-nano .env
-
-# Start services
-docker network create app-tier
-docker-compose up -d
-```
\ No newline at end of file
diff --git a/community_chatbot/mcp_impl/slack/__init__.py b/community_chatbot/mcp_impl/slack/__init__.py
deleted file mode 100644
index 11e07583..00000000
--- a/community_chatbot/mcp_impl/slack/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .agent import get_slack_agent
-
-__all__ = [
- "get_slack_agent",
-]
diff --git a/community_chatbot/mcp_impl/slack/agent.py b/community_chatbot/mcp_impl/slack/agent.py
deleted file mode 100644
index 9a40119e..00000000
--- a/community_chatbot/mcp_impl/slack/agent.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import os
-from typing import Any
-from lib.base_agent import BaseAgent
-
-__all__ = [
- "get_slack_agent",
-]
-
-
-def get_slack_agent() -> BaseAgent:
- """
- Create and configure a Slack MCP agent.
-
- Supports multiple authentication modes:
- - XOXP (OAuth token): Recommended, more secure
- - XOXC/XOXD (Browser tokens): Stealth mode, no additional permissions
-
- Supports multiple transports:
- - stdio: Default, runs server as subprocess (npx slack-mcp-server@latest)
- - sse: Server-Sent Events, connects to separate running server
- - streamable_http: HTTP transport for remote servers
- """
-
- # Check for authentication tokens
- has_xoxp = bool(os.getenv("SLACK_MCP_XOXP_TOKEN"))
- has_xoxc = bool(os.getenv("SLACK_MCP_XOXC_TOKEN"))
- has_xoxd = bool(os.getenv("SLACK_MCP_XOXD_TOKEN"))
-
- # Validate authentication
- if not has_xoxp and not (has_xoxc and has_xoxd):
- required_vars = [
- "SLACK_MCP_XOXP_TOKEN or (SLACK_MCP_XOXC_TOKEN and SLACK_MCP_XOXD_TOKEN)"
- ]
- else:
- required_vars = []
-
- # Determine transport and connection configuration
- transport = os.getenv("SLACK_MCP_TRANSPORT", "stdio").lower()
-
- if transport == "stdio":
- # stdio transport: Run npx slack-mcp-server as subprocess
- return _create_stdio_agent(required_vars)
- elif transport in ("sse", "streamable_http"):
- # SSE/HTTP transport: Connect to running server
- return _create_http_agent(required_vars, transport)
- else:
- raise ValueError(
- f"Unsupported SLACK_MCP_TRANSPORT: {transport}. "
- f"Supported values: 'stdio', 'sse', 'streamable_http'"
- )
-
-
-def _create_stdio_agent(required_vars: list[str]) -> BaseAgent:
- """Create Slack agent with stdio transport (npx subprocess)."""
-
- class SlackStdioAgent(BaseAgent):
- def get_connection_config(self) -> dict[str, Any]:
- """Build stdio connection config for local slack-mcp-server."""
- # Pass authentication tokens to the server
- env_vars = {
- "PATH": os.environ.get("PATH", ""),
- "SLACK_MCP_XOXP_TOKEN": os.getenv(
- "SLACK_MCP_XOXP_TOKEN", ""
- ),
- "SLACK_MCP_XOXC_TOKEN": os.getenv(
- "SLACK_MCP_XOXC_TOKEN", ""
- ),
- "SLACK_MCP_XOXD_TOKEN": os.getenv(
- "SLACK_MCP_XOXD_TOKEN", ""
- ),
- "SLACK_MCP_ADD_MESSAGE_TOOL": os.getenv(
- "SLACK_MCP_ADD_MESSAGE_TOOL", ""
- ),
- }
-
- # Use local node_modules binary (installed via package.json)
- import pathlib
- project_root = pathlib.Path(__file__).parent.parent
- local_bin = (
- project_root / "node_modules" / ".bin"
- / "slack-mcp-server.cmd"
- )
-
- return {
- "transport": "stdio",
- "command": str(local_bin),
- "args": ["--transport", "stdio"],
- "env": env_vars,
- }
-
- return SlackStdioAgent(
- service_name="slack",
- required_env_vars=required_vars,
- )
-
-
-def _create_http_agent(required_vars: list[str], transport: str) -> BaseAgent:
- """Create Slack agent with SSE/HTTP transport."""
-
- class SlackHttpAgent(BaseAgent):
- def __init__(self, transport_type: str, **kwargs):
- super().__init__(**kwargs)
- self.transport_type = transport_type
-
- def validate_environment(self) -> None:
- """Validate environment for HTTP/SSE transport."""
- super().validate_environment()
-
- # For HTTP/SSE, we need server URL
- if not os.getenv("SLACK_MCP_SERVER_URL"):
- raise ValueError(
- "SLACK_MCP_SERVER_URL is required for SSE/HTTP transport. "
- "Example: http://127.0.0.1:13080/sse"
- )
-
- def get_connection_config(self) -> dict[str, Any]:
- """Build HTTP/SSE connection config."""
- from lib.utils import build_connection_config
-
- server_url = os.getenv("SLACK_MCP_SERVER_URL", "http://127.0.0.1:13080/sse")
-
- # Build base config
- config = build_connection_config(
- service_name="slack",
- server_url_env="SLACK_MCP_SERVER_URL",
- default_server_url=server_url,
- token_env=None, # Not used for HTTP/SSE
- bearer_token_env="SLACK_MCP_API_KEY",
- )
-
- # Override transport if using SSE
- if self.transport_type == "sse":
- config["transport"] = "sse"
-
- return config
-
- return SlackHttpAgent(
- transport_type=transport,
- service_name="slack",
- required_env_vars=required_vars,
- )
diff --git a/community_chatbot/mcp_impl/slack/commands.py b/community_chatbot/mcp_impl/slack/commands.py
deleted file mode 100644
index 50af10ec..00000000
--- a/community_chatbot/mcp_impl/slack/commands.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from lib import base_commands
-from lib.state import RuntimeState
-from slack.agent import get_slack_agent
-
-
-__all__ = [
- "list_tools",
- "tool_info",
- "invoke_tool",
- "chat",
- "chat_loop",
- "sessions",
- "clear_session",
- "export_session",
- "health",
-]
-
-
-_slack_agent = get_slack_agent()
-
-
-async def list_tools(state: RuntimeState) -> int:
- return await base_commands.list_tools(state, _slack_agent)
-
-
-async def tool_info(state: RuntimeState, tool_identifier: str) -> int:
- return await base_commands.tool_info(state, _slack_agent, tool_identifier)
-
-
-async def invoke_tool(
- state: RuntimeState,
- tool_identifier: str,
- args_json: str | None = None,
-) -> int:
- return await base_commands.invoke_tool(
- state, _slack_agent, tool_identifier, args_json
- )
-
-
-async def chat(
- state: RuntimeState,
- message: str,
- session_id: str = "default",
-) -> int:
- return await base_commands.chat(state, _slack_agent, message, session_id)
-
-
-async def chat_loop(
- state: RuntimeState,
- session_id: str = "default",
- exit_command: str = "/exit",
- reset_command: str = "/reset",
- prompt_prefix: str | None = None,
-) -> int:
- return await base_commands.chat_loop(
- state,
- _slack_agent,
- session_id,
- exit_command,
- reset_command,
- prompt_prefix,
- )
-
-
-async def sessions(state: RuntimeState) -> int:
- return await base_commands.sessions(state)
-
-
-async def clear_session(state: RuntimeState, session_id: str) -> int:
- return await base_commands.clear_session(state, session_id)
-
-
-async def export_session(
- state: RuntimeState,
- session_id: str,
- output_path: str | None = None,
-) -> int:
- return await base_commands.export_session(state, session_id, output_path)
-
-
-async def health(state: RuntimeState) -> int:
- return await base_commands.health(state)
diff --git a/community_chatbot/mcp_impl/slack/slack_mcp.py b/community_chatbot/mcp_impl/slack/slack_mcp.py
deleted file mode 100644
index 5de8a9e6..00000000
--- a/community_chatbot/mcp_impl/slack/slack_mcp.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-import typer
-
-from lib.state import RuntimeState
-from slack import commands
-
-
-load_dotenv()
-
-state = RuntimeState(service_name="slack")
-
-app = typer.Typer(help="Slack MCP Agent CLI")
-
-
-@app.command("list-tools")
-def list_tools() -> int:
- """List all available Slack MCP tools."""
- return asyncio.run(commands.list_tools(state))
-
-
-@app.command("chat")
-def chat(
- message: str = typer.Argument(
- ..., help="Message to send to the agent."
- ),
- session_id: str = typer.Option(
- "default", help="Chat session identifier."
- ),
-) -> int:
- """Send a single message to the Slack agent."""
- return asyncio.run(
- commands.chat(state, message=message, session_id=session_id)
- )
-
-
-@app.command("chat-loop")
-def chat_loop(
- session_id: str = typer.Option(
- "default",
- help="Chat session identifier to use for the loop.",
- ),
- exit_command: str = typer.Option(
- "/exit",
- help="Command typed alone on a line to end the loop.",
- ),
- reset_command: str = typer.Option(
- "/reset",
- help="Command typed alone on a line to reset the session history.",
- ),
- prompt_prefix: str | None = typer.Option(
- None,
- help="Optional custom prompt prefix displayed before user input.",
- ),
-) -> int:
- """Start an interactive chat session with the Slack agent."""
- return asyncio.run(
- commands.chat_loop(
- state,
- session_id=session_id,
- exit_command=exit_command,
- reset_command=reset_command,
- prompt_prefix=prompt_prefix,
- )
- )
-
-
-@app.command("tool-info")
-def tool_info(
- tool_identifier: str = typer.Argument(
- ..., help="Tool CLI or original name."
- )
-) -> int:
- """Show detailed information about a specific tool."""
- return asyncio.run(
- commands.tool_info(state, tool_identifier=tool_identifier)
- )
-
-
-@app.command("invoke-tool")
-def invoke_tool(
- tool_identifier: str = typer.Argument(
- ..., help="Tool CLI or original name to invoke."
- ),
- args_json: str = typer.Option(
- "{}",
- help="JSON object containing arguments for the tool.",
- ),
-) -> int:
- """Directly invoke a Slack MCP tool with JSON arguments."""
- return asyncio.run(
- commands.invoke_tool(
- state,
- tool_identifier=tool_identifier,
- args_json=args_json,
- )
- )
-
-
-@app.command("sessions")
-def sessions() -> int:
- """List all active chat sessions."""
- return asyncio.run(commands.sessions(state))
-
-
-@app.command("clear-session")
-def clear_session(
- session_id: str = typer.Argument(..., help="Session id to clear."),
-) -> int:
- """Clear a specific chat session."""
- return asyncio.run(commands.clear_session(state, session_id=session_id))
-
-
-@app.command("export-session")
-def export_session(
- session_id: str = typer.Argument(
- ..., help="Session id to export."
- ),
- output_path: str | None = typer.Option(
- None,
- "--output",
- "-o",
- help="Optional path to write the exported JSON transcript.",
- ),
-) -> int:
- """Export a chat session to JSON format."""
- return asyncio.run(
- commands.export_session(
- state,
- session_id=session_id,
- output_path=output_path,
- )
- )
-
-
-@app.command("health")
-def health() -> int:
- """Check the health status of the Slack agent."""
- return asyncio.run(commands.health(state))
-
-
-if __name__ == "__main__":
- app()
From a4da9206bee3ad312f6ee4892562a503a654ddd9 Mon Sep 17 00:00:00 2001
From: Raghav Gupta <142162663+Raghav-56@users.noreply.github.com>
Date: Tue, 20 Jan 2026 22:30:48 +0530
Subject: [PATCH 5/5] chore: openMF#46 make community_mcp_servers
---
.../mcp_impl => community_mcp_servers}/.env.example | 0
{community_chatbot/mcp_impl => community_mcp_servers}/.gitignore | 0
{community_chatbot/mcp_impl => community_mcp_servers}/README.md | 0
.../mcp_impl => community_mcp_servers}/agents/__init__.py | 0
.../mcp_impl => community_mcp_servers}/agents/github_agent.py | 0
.../mcp_impl => community_mcp_servers}/agents/jira_agent.py | 0
.../mcp_impl => community_mcp_servers}/agents/slack_agent.py | 0
.../mcp_impl => community_mcp_servers}/lib/__init__.py | 0
.../mcp_impl => community_mcp_servers}/lib/base_agent.py | 0
.../mcp_impl => community_mcp_servers}/lib/base_commands.py | 0
.../mcp_impl => community_mcp_servers}/lib/base_mcp.py | 0
.../mcp_impl => community_mcp_servers}/lib/base_transport.py | 0
.../mcp_impl => community_mcp_servers}/lib/state.py | 0
.../mcp_impl => community_mcp_servers}/lib/utils.py | 0
.../mcp_impl => community_mcp_servers}/llm_providers/__init__.py | 0
.../mcp_impl => community_mcp_servers}/llm_providers/gemini.py | 0
.../mcp_impl => community_mcp_servers}/llm_providers/groq_llm.py | 0
.../llm_providers/lightning_llm.py | 0
{community_chatbot/mcp_impl => community_mcp_servers}/main.py | 0
.../mcp_impl => community_mcp_servers}/package.json | 0
.../mcp_impl => community_mcp_servers}/pyproject.toml | 0
.../mcp_impl => community_mcp_servers}/requirements.txt | 0
22 files changed, 0 insertions(+), 0 deletions(-)
rename {community_chatbot/mcp_impl => community_mcp_servers}/.env.example (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/.gitignore (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/README.md (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/agents/__init__.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/agents/github_agent.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/agents/jira_agent.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/agents/slack_agent.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/lib/__init__.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/lib/base_agent.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/lib/base_commands.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/lib/base_mcp.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/lib/base_transport.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/lib/state.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/lib/utils.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/llm_providers/__init__.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/llm_providers/gemini.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/llm_providers/groq_llm.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/llm_providers/lightning_llm.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/main.py (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/package.json (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/pyproject.toml (100%)
rename {community_chatbot/mcp_impl => community_mcp_servers}/requirements.txt (100%)
diff --git a/community_chatbot/mcp_impl/.env.example b/community_mcp_servers/.env.example
similarity index 100%
rename from community_chatbot/mcp_impl/.env.example
rename to community_mcp_servers/.env.example
diff --git a/community_chatbot/mcp_impl/.gitignore b/community_mcp_servers/.gitignore
similarity index 100%
rename from community_chatbot/mcp_impl/.gitignore
rename to community_mcp_servers/.gitignore
diff --git a/community_chatbot/mcp_impl/README.md b/community_mcp_servers/README.md
similarity index 100%
rename from community_chatbot/mcp_impl/README.md
rename to community_mcp_servers/README.md
diff --git a/community_chatbot/mcp_impl/agents/__init__.py b/community_mcp_servers/agents/__init__.py
similarity index 100%
rename from community_chatbot/mcp_impl/agents/__init__.py
rename to community_mcp_servers/agents/__init__.py
diff --git a/community_chatbot/mcp_impl/agents/github_agent.py b/community_mcp_servers/agents/github_agent.py
similarity index 100%
rename from community_chatbot/mcp_impl/agents/github_agent.py
rename to community_mcp_servers/agents/github_agent.py
diff --git a/community_chatbot/mcp_impl/agents/jira_agent.py b/community_mcp_servers/agents/jira_agent.py
similarity index 100%
rename from community_chatbot/mcp_impl/agents/jira_agent.py
rename to community_mcp_servers/agents/jira_agent.py
diff --git a/community_chatbot/mcp_impl/agents/slack_agent.py b/community_mcp_servers/agents/slack_agent.py
similarity index 100%
rename from community_chatbot/mcp_impl/agents/slack_agent.py
rename to community_mcp_servers/agents/slack_agent.py
diff --git a/community_chatbot/mcp_impl/lib/__init__.py b/community_mcp_servers/lib/__init__.py
similarity index 100%
rename from community_chatbot/mcp_impl/lib/__init__.py
rename to community_mcp_servers/lib/__init__.py
diff --git a/community_chatbot/mcp_impl/lib/base_agent.py b/community_mcp_servers/lib/base_agent.py
similarity index 100%
rename from community_chatbot/mcp_impl/lib/base_agent.py
rename to community_mcp_servers/lib/base_agent.py
diff --git a/community_chatbot/mcp_impl/lib/base_commands.py b/community_mcp_servers/lib/base_commands.py
similarity index 100%
rename from community_chatbot/mcp_impl/lib/base_commands.py
rename to community_mcp_servers/lib/base_commands.py
diff --git a/community_chatbot/mcp_impl/lib/base_mcp.py b/community_mcp_servers/lib/base_mcp.py
similarity index 100%
rename from community_chatbot/mcp_impl/lib/base_mcp.py
rename to community_mcp_servers/lib/base_mcp.py
diff --git a/community_chatbot/mcp_impl/lib/base_transport.py b/community_mcp_servers/lib/base_transport.py
similarity index 100%
rename from community_chatbot/mcp_impl/lib/base_transport.py
rename to community_mcp_servers/lib/base_transport.py
diff --git a/community_chatbot/mcp_impl/lib/state.py b/community_mcp_servers/lib/state.py
similarity index 100%
rename from community_chatbot/mcp_impl/lib/state.py
rename to community_mcp_servers/lib/state.py
diff --git a/community_chatbot/mcp_impl/lib/utils.py b/community_mcp_servers/lib/utils.py
similarity index 100%
rename from community_chatbot/mcp_impl/lib/utils.py
rename to community_mcp_servers/lib/utils.py
diff --git a/community_chatbot/mcp_impl/llm_providers/__init__.py b/community_mcp_servers/llm_providers/__init__.py
similarity index 100%
rename from community_chatbot/mcp_impl/llm_providers/__init__.py
rename to community_mcp_servers/llm_providers/__init__.py
diff --git a/community_chatbot/mcp_impl/llm_providers/gemini.py b/community_mcp_servers/llm_providers/gemini.py
similarity index 100%
rename from community_chatbot/mcp_impl/llm_providers/gemini.py
rename to community_mcp_servers/llm_providers/gemini.py
diff --git a/community_chatbot/mcp_impl/llm_providers/groq_llm.py b/community_mcp_servers/llm_providers/groq_llm.py
similarity index 100%
rename from community_chatbot/mcp_impl/llm_providers/groq_llm.py
rename to community_mcp_servers/llm_providers/groq_llm.py
diff --git a/community_chatbot/mcp_impl/llm_providers/lightning_llm.py b/community_mcp_servers/llm_providers/lightning_llm.py
similarity index 100%
rename from community_chatbot/mcp_impl/llm_providers/lightning_llm.py
rename to community_mcp_servers/llm_providers/lightning_llm.py
diff --git a/community_chatbot/mcp_impl/main.py b/community_mcp_servers/main.py
similarity index 100%
rename from community_chatbot/mcp_impl/main.py
rename to community_mcp_servers/main.py
diff --git a/community_chatbot/mcp_impl/package.json b/community_mcp_servers/package.json
similarity index 100%
rename from community_chatbot/mcp_impl/package.json
rename to community_mcp_servers/package.json
diff --git a/community_chatbot/mcp_impl/pyproject.toml b/community_mcp_servers/pyproject.toml
similarity index 100%
rename from community_chatbot/mcp_impl/pyproject.toml
rename to community_mcp_servers/pyproject.toml
diff --git a/community_chatbot/mcp_impl/requirements.txt b/community_mcp_servers/requirements.txt
similarity index 100%
rename from community_chatbot/mcp_impl/requirements.txt
rename to community_mcp_servers/requirements.txt