Skip to content

Commit 00aca4b

Browse files
feat(src): add evaluator v2
1 parent 015bc10 commit 00aca4b

1 file changed

Lines changed: 103 additions & 0 deletions

File tree

src/evaluator_v2.py

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
"""Evaluator for epidemic-prediction-framework v2.
2+
3+
Core module implementing evaluator functionality for the
4+
epidemic prediction framework system.
5+
"""
6+
import logging
7+
from typing import Any, Dict, List, Optional
8+
from dataclasses import dataclass, field
9+
from datetime import datetime
10+
11+
logger = logging.getLogger(__name__)
12+
13+
14+
@dataclass
15+
class EvaluatorConfig:
16+
"""Configuration for evaluator."""
17+
enabled: bool = True
18+
batch_size: int = 200
19+
timeout: int = 20
20+
max_retries: int = 3
21+
22+
23+
@dataclass
24+
class EvaluatorResult:
25+
"""Result from evaluator execution."""
26+
success: bool
27+
data: List[Dict[str, Any]] = field(default_factory=list)
28+
errors: List[str] = field(default_factory=list)
29+
duration_ms: float = 0.0
30+
metadata: Dict[str, Any] = field(default_factory=dict)
31+
32+
33+
class Evaluator:
34+
"""Primary evaluator handler for epidemic-prediction-framework.
35+
36+
Provides core evaluator capabilities including
37+
batch processing, validation, and result aggregation.
38+
"""
39+
40+
def __init__(self, config: Optional[EvaluatorConfig] = None):
41+
self.config = config or EvaluatorConfig()
42+
self._initialized = False
43+
self._run_count = 0
44+
self._start_time = datetime.utcnow()
45+
46+
def initialize(self) -> None:
47+
if self._initialized:
48+
return
49+
logger.info("Initializing evaluator for epidemic-prediction-framework")
50+
self._initialized = True
51+
52+
def execute(self, inputs: List[Dict[str, Any]]) -> EvaluatorResult:
53+
self.initialize()
54+
self._run_count += 1
55+
start = datetime.utcnow()
56+
57+
results = []
58+
errors = []
59+
60+
for batch_start in range(0, len(inputs), self.config.batch_size):
61+
batch = inputs[batch_start:batch_start + self.config.batch_size]
62+
for item in batch:
63+
try:
64+
processed = self._process_item(item)
65+
if self._validate(processed):
66+
results.append(processed)
67+
except Exception as e:
68+
errors.append(f"Item {item.get('id', '?')}: {e}")
69+
70+
duration = (datetime.utcnow() - start).total_seconds() * 1000
71+
72+
return EvaluatorResult(
73+
success=len(errors) == 0,
74+
data=results,
75+
errors=errors,
76+
duration_ms=duration,
77+
metadata={
78+
"run": self._run_count,
79+
"input_count": len(inputs),
80+
"output_count": len(results),
81+
"error_count": len(errors),
82+
},
83+
)
84+
85+
def _process_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
86+
return {
87+
**item,
88+
"processed_by": "evaluator",
89+
"version": 2,
90+
"timestamp": datetime.utcnow().isoformat(),
91+
}
92+
93+
def _validate(self, item: Dict[str, Any]) -> bool:
94+
return bool(item.get("id")) or bool(item.get("processed_by"))
95+
96+
@property
97+
def metrics(self) -> Dict[str, Any]:
98+
uptime = (datetime.utcnow() - self._start_time).total_seconds()
99+
return {
100+
"runs": self._run_count,
101+
"uptime_s": uptime,
102+
"initialized": self._initialized,
103+
}

0 commit comments

Comments
 (0)