-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexample.py
More file actions
executable file
Β·150 lines (117 loc) Β· 4.61 KB
/
example.py
File metadata and controls
executable file
Β·150 lines (117 loc) Β· 4.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
#!/usr/bin/env python3
"""
Quick example script demonstrating LLM Playground usage.
Run this after setting up to verify everything works.
"""
import sys
from pathlib import Path
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent))
from models import get_model
from logger import get_logger
from experiments.zero_shot import run_zero_shot_experiment
from experiments.few_shot import run_few_shot_experiment
from experiments.sampling_params import run_temperature_experiment
def print_section(title):
"""Print a formatted section header."""
print("\n" + "="*80)
print(f" {title}")
print("="*80 + "\n")
def main():
"""Run example experiments."""
print_section("π LLM Playground - Example Experiments")
# Initialize
print("Initializing...")
logger = get_logger()
try:
# Connect to model
print("Connecting to Ollama (llama2)...")
model = get_model("ollama", "llama2")
print("β
Connected!\n")
# Example 1: Simple generation
print_section("Example 1: Simple Text Generation")
prompt = "Explain what a neural network is in one sentence."
print(f"Prompt: {prompt}\n")
response = model.generate(
prompt=prompt,
temperature=0.7,
max_tokens=100,
)
print(f"Response: {response.text}")
print(f"\nMetrics: {response.total_tokens} tokens in {response.latency_ms:.0f}ms")
logger.log_interaction(
prompt=prompt,
response=response,
parameters={"temperature": 0.7, "max_tokens": 100},
experiment_type="example_simple",
)
# Example 2: Zero-shot experiment
print_section("Example 2: Zero-Shot Sentiment Analysis")
task = 'Classify sentiment as Positive, Negative, or Neutral: "This product is amazing!"'
print(f"Task: {task}\n")
response = run_zero_shot_experiment(
model=model,
task=task,
logger=logger,
temperature=0.3, # Lower for more deterministic
max_tokens=10,
)
print(f"Classification: {response.text}")
# Example 3: Temperature comparison
print_section("Example 3: Temperature Effects")
creative_prompt = "Write a creative opening line for a sci-fi story:"
print(f"Prompt: {creative_prompt}")
print("\nTesting temperatures: 0.1, 0.7, 1.5\n")
temps = [0.1, 0.7, 1.5]
results = run_temperature_experiment(
model=model,
prompt=creative_prompt,
temperatures=temps,
logger=logger,
max_tokens=50,
num_samples=1,
)
for temp in sorted(results.keys()):
response = results[temp][0]
print(f"Temperature {temp}:")
print(f" β {response.text[:100]}...")
print()
# Example 4: Few-shot learning
print_section("Example 4: Few-Shot vs Zero-Shot")
task = "It works fine, nothing special."
examples = [
("I loved it!", "Positive"),
("Waste of money.", "Negative"),
]
print("Examples provided:")
for inp, out in examples:
print(f" \"{inp}\" β {out}")
print(f"\nTest case: \"{task}\"\n")
zero_shot_resp, few_shot_resp = run_few_shot_experiment(
model=model,
task=task,
examples=examples,
logger=logger,
temperature=0.3,
max_tokens=10,
)
print(f"Zero-shot result: {zero_shot_resp.text}")
print(f"Few-shot result: {few_shot_resp.text}")
# Summary
print_section("β
Examples Complete!")
print(f"All interactions logged to: {logger.get_log_file_path()}")
print("\nNext steps:")
print(" 1. Run the Streamlit app: streamlit run app.py")
print(" 2. Try the CLI: python cli.py --help")
print(" 3. Read CONCEPTS.md for theory")
print(" 4. Check logs/ directory for detailed logs")
print("\nHappy experimenting! π\n")
except Exception as e:
print(f"\nβ Error: {e}")
print("\nTroubleshooting:")
print(" 1. Make sure Ollama is running: ollama serve")
print(" 2. Check that llama2 is installed: ollama pull llama2")
print(" 3. Verify connection: curl http://localhost:11434/api/tags")
sys.exit(1)
if __name__ == "__main__":
main()