-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
105 lines (89 loc) · 4.23 KB
/
.env.example
File metadata and controls
105 lines (89 loc) · 4.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# =================================================================
# DocuBot - AI Documentation Generator Configuration
# =================================================================
# =================================================================
# LLM Provider Configuration (REQUIRED)
# =================================================================
# Supported providers: openai, groq, ollama, openrouter, or custom
LLM_PROVIDER=custom
# Base URL for the LLM API
# OpenAI: https://api.openai.com/v1
# Groq: https://api.groq.com/openai/v1
# Ollama: http://localhost:11434/v1
# OpenRouter: https://openrouter.ai/api/v1
LLM_BASE_URL=https://your-llm-provider-url
# API key for the selected provider
# OpenAI: Get from https://platform.openai.com/api-keys
# Groq: Get from https://console.groq.com/keys
# Ollama: Leave empty (no key needed)
# OpenRouter: Get from https://openrouter.ai/keys
LLM_API_KEY=your-api-key
# Model name to use
# OpenAI: gpt-4o, gpt-4-turbo, gpt-4o-mini
# Groq: llama-3.2-90b-text-preview, llama-3.1-70b-versatile
# Ollama: qwen2.5:7b, llama3.1:8b, llama3.2:3b
# OpenRouter: anthropic/claude-3-haiku, google/gemini-pro
LLM_MODEL=gpt-4o
# =================================================================
# Generation Parameters
# =================================================================
# Model temperature (0.0 = deterministic, 1.0 = creative)
TEMPERATURE=0.7
# Maximum tokens per response
MAX_TOKENS=1000
# Number of retry attempts on API failures
MAX_RETRIES=3
# Request timeout in seconds (5 minutes default)
REQUEST_TIMEOUT=300
# =================================================================
# Micro-Agent Model Configuration
# =================================================================
# All agents use Qwen3-4B-Instruct (optimized SLM for code analysis)
# You can customize individual agent models if needed
CODE_EXPLORER_MODEL=Qwen/Qwen3-4B-Instruct-2507
API_REFERENCE_MODEL=Qwen/Qwen3-4B-Instruct-2507
CALL_GRAPH_MODEL=Qwen/Qwen3-4B-Instruct-2507
ERROR_ANALYSIS_MODEL=Qwen/Qwen3-4B-Instruct-2507
ENV_CONFIG_MODEL=Qwen/Qwen3-4B-Instruct-2507
DEPENDENCY_ANALYZER_MODEL=Qwen/Qwen3-4B-Instruct-2507
PLANNER_MODEL=Qwen/Qwen3-4B-Instruct-2507
MERMAID_MODEL=Qwen/Qwen3-4B-Instruct-2507
QA_VALIDATOR_MODEL=Qwen/Qwen3-4B-Instruct-2507
WRITER_MODEL=Qwen/Qwen3-4B-Instruct-2507
# =================================================================
# Repository Analysis Limits
# =================================================================
# All limits are configurable to suit your needs
TEMP_REPO_DIR=./tmp/repos
MAX_REPO_SIZE=10737418240 # 10GB in bytes
MAX_FILE_SIZE=1000000 # 1MB in bytes
MAX_FILES_TO_SCAN=500 # Maximum number of files to analyze
MAX_LINES_PER_FILE=500 # Line budget per file (pattern_window strategy extracts ~150-300 lines focusing on key patterns)
# =================================================================
# Agent Execution Settings
# =================================================================
AGENT_TEMPERATURE=0.7 # Model temperature (0.0-1.0)
AGENT_MAX_TOKENS=1000 # Maximum tokens per agent response
AGENT_TIMEOUT=300 # Agent timeout in seconds (5 minutes)
# =================================================================
# GitHub Integration (MCP)
# =================================================================
# Required for automatic PR creation
# Generate token at: https://github.com/settings/tokens
# Required scopes: repo (full access to repositories)
GITHUB_TOKEN=your_github_personal_access_token_here
# =================================================================
# Server Configuration
# =================================================================
API_PORT=5001
HOST=0.0.0.0
# =================================================================
# CORS Settings
# =================================================================
# Add your frontend URL if different from defaults
CORS_ORIGINS=["http://localhost:3000", "http://localhost:3001", "http://localhost:5173"]
# =================================================================
# Security Configuration
# =================================================================
# SSL Verification: Set to false only for development with self-signed certificates
VERIFY_SSL=true