2121PipelineSingleFeedback = CoSTEERSingleFeedback
2222PipelineMultiFeedback = CoSTEERMultiFeedback
2323
24+ NO_SUB = "<No submission.csv file found.>"
25+ NO_SCORE = "<No scores.csv file found.>"
26+
2427
2528class ModelDumpEvaluator (CoSTEEREvaluator ):
2629 """This evaluator assumes that it runs after the model"""
@@ -43,6 +46,7 @@ def evaluate(
4346 code = err_msg ,
4447 final_decision = False ,
4548 )
49+
4650 data_source_path = (
4751 f"{ DS_RD_SETTING .local_data_path } /{ self .scen .competition } "
4852 if self .data_type == "full"
@@ -61,12 +65,12 @@ def evaluate(
6165 submission_content_before = (
6266 (implementation .workspace_path / "submission.csv" ).read_text ()
6367 if (implementation .workspace_path / "submission.csv" ).exists ()
64- else None
68+ else NO_SUB
6569 )
6670 scores_content_before = (
6771 (implementation .workspace_path / "scores.csv" ).read_text ()
6872 if (implementation .workspace_path / "scores.csv" ).exists ()
69- else None
73+ else NO_SCORE
7074 )
7175
7276 # Remove the files submission.csv and scores.csv
@@ -103,11 +107,16 @@ def evaluate(
103107 final_decision = False ,
104108 )
105109
106- assert submission_content_before is not None
107- assert scores_content_before is not None
108-
109- submission_content_after = (implementation .workspace_path / "submission.csv" ).read_text ()
110- scores_content_after = (implementation .workspace_path / "scores.csv" ).read_text ()
110+ submission_content_after = (
111+ (implementation .workspace_path / "submission.csv" ).read_text ()
112+ if (implementation .workspace_path / "submission.csv" ).exists ()
113+ else NO_SUB
114+ )
115+ scores_content_after = (
116+ (implementation .workspace_path / "scores.csv" ).read_text ()
117+ if (implementation .workspace_path / "scores.csv" ).exists ()
118+ else NO_SCORE
119+ )
111120
112121 system_prompt = T (".prompts:dump_model_eval.system" ).r ()
113122 user_prompt = T (".prompts:dump_model_eval.user" ).r (
0 commit comments