-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathanalyze_results.py
More file actions
110 lines (89 loc) · 3.79 KB
/
analyze_results.py
File metadata and controls
110 lines (89 loc) · 3.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"""
analyze_results.py
Reads the latest results JSON and prints a markdown report.
Run after benchmark.py to generate findings.
"""
import json
import os
import glob
from datetime import datetime
def load_latest_results(results_dir="results") -> dict:
files = sorted(glob.glob(f"{results_dir}/run_*.json"))
if not files:
raise FileNotFoundError("No results found. Run benchmark.py first.")
latest = files[-1]
print(f"Loading: {latest}\n")
with open(latest) as f:
return json.load(f)
def generate_markdown_report(data: dict) -> str:
summary = data["summary"]
details = data["details"]
lines = []
lines.append("# Prompt Technique Benchmark — Findings\n")
lines.append(f"_Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}_\n")
lines.append("---\n")
# Summary table
lines.append("## Results Summary\n")
lines.append("| Technique | Accuracy | Avg Latency | Avg Tokens |")
lines.append("|-----------|----------|-------------|------------|")
sorted_summary = sorted(summary.items(), key=lambda x: -x[1]["accuracy"])
for technique, s in sorted_summary:
lines.append(
f"| `{technique}` | **{s['accuracy']}%** | {s['avg_latency_s']}s | {s['avg_tokens']} |"
)
lines.append("")
# Winner callout
winner = sorted_summary[0]
lines.append(f"## 🏆 Winner: `{winner[0]}` at {winner[1]['accuracy']}% accuracy\n")
# Per-technique analysis
lines.append("## Technique-by-Technique Analysis\n")
for technique, s in sorted_summary:
lines.append(f"### {technique.replace('_', '-').title()}")
lines.append(f"- Accuracy: **{s['accuracy']}%** ({s['correct']}/{s['total']})")
lines.append(f"- Avg latency: {s['avg_latency_s']}s")
lines.append(f"- Avg tokens: {s['avg_tokens']}")
# Errors
errors = [r for r in details[technique] if not r["correct"]]
if errors:
lines.append(f"- **Misclassifications ({len(errors)}):**")
for e in errors:
lines.append(
f' - `"{e["input"][:50]}"` → predicted `{e["predicted"]}`, expected `{e["expected"]}`'
)
else:
lines.append("- ✅ No misclassifications")
lines.append("")
# Key findings
lines.append("## Key Findings\n")
lines.append(
"_(Fill this in after reviewing results — this is your analysis section. "
"Example questions to answer:)_\n"
)
lines.append("- Which technique had the best accuracy/cost tradeoff?")
lines.append("- Did CoT reasoning help on ambiguous cases?")
lines.append("- Did self-consistency add value over few-shot?")
lines.append("- Which categories were hardest to classify and why?")
lines.append("- What would you do differently with more time?")
lines.append("")
# Methodology
lines.append("## Methodology\n")
lines.append("- **Task:** Customer support intent classification (10 classes)")
lines.append("- **Dataset:** 10 hand-labeled test cases")
lines.append("- **Scorer:** Exact match (label normalization applied)")
lines.append("- **Observability:** Langfuse traces per run")
lines.append("- **Techniques tested:**")
lines.append(" - `zero_shot` — Direct instruction, no examples")
lines.append(" - `few_shot` — 4 labeled examples in context")
lines.append(" - `cot` — Step-by-step reasoning before answer")
lines.append(" - `self_consistency` — 3 samples, majority vote")
lines.append("")
return "\n".join(lines)
if __name__ == "__main__":
data = load_latest_results()
report = generate_markdown_report(data)
# Print to terminal
print(report)
# Save to file
with open("findings.md", "w") as f:
f.write(report)
print("\n✅ Report saved to findings.md")