Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 0 additions & 36 deletions .github/workflows/community-chatbot-backend.yml

This file was deleted.

46 changes: 0 additions & 46 deletions .github/workflows/community-chatbot-frontend.yml

This file was deleted.

Binary file modified .gitignore
Binary file not shown.
127 changes: 127 additions & 0 deletions MCP_Enhancement/agent_start.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
from fastmcp import FastMCP
import os
from dotenv import load_dotenv
from atlassian import Jira
from github import Github

# Initialize FastMCP Server
mcp = FastMCP("Mifos Knowledge Librarian")

# Load credentials
load_dotenv()

# Initialize clients
jira_client = Jira(
url=os.environ["JIRA_INSTANCE_URL"],
username=os.environ["JIRA_USERNAME"],
password=os.environ["JIRA_API_TOKEN"]
)
github_client = Github(os.environ["GITHUB_TOKEN"])

@mcp.tool()
def search_jira_tickets(query: str):
"""
Search for Jira tickets using JQL.
Used for tracking feature progress and identifying developer consensus.
"""
try:
issues = jira_client.jql(query, limit=5)
results = []
for issue_data in issues['issues']:
# The jql result gives us the key and summary, let's get comments
comments_data = jira_client.get_issue_comments(issue_data['key'])
comments = [
f"{c['author']['displayName']}: {c['body']}"
for c in comments_data['comments'][-3:]
]
results.append({
"key": issue_data['key'],
"summary": issue_data['fields']['summary'],
"comments": comments,
})
return results
except Exception as e:
return f"Error searching Jira: {e}"

@mcp.tool()
def get_github_pr_details(pr_number: int, repo_name: str):
"""
Fetch description and changed files from a specific GitHub Pull Request.
Used for synthesizing release notes.
"""
try:
repo = github_client.get_repo(repo_name)
pr = repo.get_pull(pr_number)
files = [f.filename for f in pr.get_files()]
return {
"title": pr.title,
"description": pr.body,
"changed_files": files,
}
except Exception as e:
return f"Error fetching GitHub PR: {e}"

@mcp.tool()
def generate_knowledge_summary(jira_key: str, repo_name: str = "apache/fineract"):
"""
Takes a Jira Key, searches for a corresponding GitHub PR in a specified repo,
and returns a combined summary of the ticket and the PR.
Defaults to the 'apache/fineract' repository.
"""
try:
# 1. Get Jira ticket info
jira_info_list = search_jira_tickets(f'key = {jira_key}')
if not jira_info_list or isinstance(jira_info_list, str):
return f"Could not retrieve Jira ticket {jira_key}."
jira_info = jira_info_list[0]

# 2. Find corresponding GitHub PR
query = f'repo:{repo_name} is:pr "{jira_key}"'
prs = github_client.search_issues(query)
if prs.totalCount == 0:
return {
"jira_summary": jira_info['summary'],
"jira_comments": jira_info['comments'],
"github_pr": "No corresponding GitHub PR found."
}

# 3. Get PR details from the first match
pr_number = prs[0].number
pr_details = get_github_pr_details(pr_number, repo_name)

# 4. Combine and return
return {
"jira_key": jira_key,
"jira_summary": jira_info['summary'],
"jira_comments": jira_info['comments'],
"github_pr_title": pr_details.get('title'),
"github_pr_description": pr_details.get('description'),
"github_pr_changed_files": pr_details.get('changed_files'),
}
except Exception as e:
return f"Error generating knowledge summary: {e}"

@mcp.tool()
def search_project_docs(query: str):
"""
Search through static Mifos documentation and READMEs.
Use this for architectural questions or project setup guides.
"""
# Simple implementation: search for keywords in .md files
docs_path = "./docs" # Or root if docs/ doesn't exist
results = []
if not os.path.exists(docs_path):
docs_path = "." # Fallback to root

for root, dirs, files in os.walk(docs_path):
for file in files:
if file.endswith(".md"):
with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
content = f.read()
if query.lower() in content.lower():
results.append(f"--- {file} ---\n{content[:500]}...")

return "\n".join(results) if results else "No matching documentation found."

if __name__ == "__main__":
mcp.run()
Empty file added MCP_Enhancement/src/__init__.py
Empty file.
117 changes: 62 additions & 55 deletions Repo Clone Automation/repo_cloner.py
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We aren't really using this so no need to make any changes here, it's mostly legacy code and I am unable to understand what are you trying to change?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Understood! I've reverted all changes to repo_cloner.py to keep this PR strictly focused on the FastMCP unification and avoid adding noise to legacy code. Thanks for clarifying its status.

Original file line number Diff line number Diff line change
Expand Up @@ -2,68 +2,75 @@
import time
import zipfile
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# Set your download directory (absolute path)
download_dir = os.path.abspath("downloads")
if not os.path.exists(download_dir):
os.makedirs(download_dir)
def clone_repository(repo_url, download_dir):
"""
Clones a GitHub repository by downloading it as a ZIP file and extracting it.
"""
# Set your download directory (absolute path)
if not os.path.exists(download_dir):
os.makedirs(download_dir)

chrome_options = webdriver.ChromeOptions()
prefs = {
"download.default_directory": download_dir,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
}
chrome_options.add_experimental_option("prefs", prefs)
chrome_options = webdriver.ChromeOptions()
prefs = {
"download.default_directory": download_dir,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
}
chrome_options.add_experimental_option("prefs", prefs)

# Initialize the Chrome driver (ensure chromedriver is in your PATH)
driver = webdriver.Chrome(options=chrome_options)
# Initialize the Chrome driver (ensure chromedriver is in your PATH)
driver = webdriver.Chrome(options=chrome_options)

try:
# Navigate to the GitHub repository page
repo_url = "https://github.com/openMF/mifos-gazelle?tab=readme-ov-file" # add ur repos, if needed i will write a function take multiple repo input to extract effectively
driver.get(repo_url)

# Wait until the "Code" button is clickable and click it.But here (//span[contains(text(), 'Code')]) isnt a unique locator so i have to use below provided one, this has tendency to change if github updates their frontend
wait = WebDriverWait(driver, 10)
code_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//button[@data-variant='primary']//span[contains(@class, 'prc-Button-Label-pTQ')]")))
code_button.click()

# this is the download zip locator which is unique
download_zip = wait.until(EC.element_to_be_clickable((By.XPATH, "//span[contains(text(), 'Download ZIP')]")))
download_zip.click()

# Wait for the ZIP file to appear in the download folder (timeout after 60 seconds)
zip_filename = None
timeout = 60 # seconds
start_time = time.time()
while time.time() - start_time < timeout:
for filename in os.listdir(download_dir):
if filename.endswith(".zip"):
zip_filename = filename
try:
# Navigate to the GitHub repository page
driver.get(repo_url)

# Wait until the "Code" button is clickable and click it
wait = WebDriverWait(driver, 10)
code_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//button[@data-variant='primary']//span[contains(@class, 'prc-Button-Label-pTQ')]")))
code_button.click()

# this is the download zip locator which is unique
download_zip = wait.until(EC.element_to_be_clickable((By.XPATH, "//span[contains(text(), 'Download ZIP')]")))
download_zip.click()

# Wait for the ZIP file to appear in the download folder
zip_filename = None
timeout = 60 # seconds
start_time = time.time()
while time.time() - start_time < timeout:
for filename in os.listdir(download_dir):
if filename.endswith(".zip"):
zip_filename = filename
break
if zip_filename:
break
if zip_filename:
break
time.sleep(1)

if not zip_filename:
print("Download timed out or ZIP file not found.")
else:
zip_path = os.path.join(download_dir, zip_filename)
print(f"Downloaded file: {zip_path}")
time.sleep(1)

# Unzip the downloaded archive into a subfolder called "extracted"
extract_dir = os.path.join(download_dir, "extracted")
if not os.path.exists(extract_dir):
os.makedirs(extract_dir)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(extract_dir)
print(f"Extracted ZIP contents to: {extract_dir}")
if not zip_filename:
print("Download timed out or ZIP file not found.")
else:
zip_path = os.path.join(download_dir, zip_filename)
print(f"Downloaded file: {zip_path}")

# Unzip the downloaded archive into a subfolder called "extracted"
extract_dir = os.path.join(download_dir, "extracted")
if not os.path.exists(extract_dir):
os.makedirs(extract_dir)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(extract_dir)
print(f"Extracted ZIP contents to: {extract_dir}")

finally:
driver.quit()

finally:
driver.quit()
if __name__ == "__main__":
# Original hardcoded values
repo_to_clone = "https://github.com/openMF/mifos-gazelle?tab=readme-ov-file"
download_destination = os.path.abspath("downloads")
clone_repository(repo_to_clone, download_destination)
6 changes: 3 additions & 3 deletions Slack_scraper_bot/scripts/pii_remocval.py
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are you moving the imports inside the function?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right, thank you for pointing that out! I've moved the imports back to the top level in the latest commit to follow standard Python conventions. I had originally moved them to troubleshoot a local environment conflict, but that is no longer necessary.

Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import scrubadub
import scrubadub_spacy
import re
import sys
import scrubadub
import scrubadub_spacy

def create_scrubber():
scrubber = scrubadub.Scrubber()
Expand All @@ -19,7 +19,7 @@ def remove_user_tags(text):
return re.sub(timestamp_user_pattern, lambda m: m.group(0).split('] User:')[0] + ']', text)

def remove_name_lines(text):
name_pattern = r'^.*(?:my name is|I am|I\'m)\s+(?:{{NAME}}|[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*).*$\n?'
name_pattern = r'^.*(?:my name is|I am|I\'m).*\n'
return re.sub(name_pattern, '', text, flags=re.MULTILINE | re.IGNORECASE)

def process_file(input_path, output_path):
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.