|
1 | | -answer_format: For each question present the reasoning followed by the correct answer. |
2 | | -base_instruction: Lets think step by step. |
3 | | -few_shot_count: 5 |
4 | | -generate_expert_identity: true |
5 | | -generate_intent_keywords: true |
6 | | -generate_reasoning: true |
7 | | -max_eval_batches: 6 |
8 | | -min_correct_count: 3 |
| 1 | +# Specify one or more prompt refinement technique to be used. If you specify more than one prompt refinement techniques, |
| 2 | +# all these technique would run on same seed data. Result, iterations needed & cost incurred for each of these |
| 3 | +# technique would be logged. And winning technique for each data instance and overall would be logged. |
| 4 | + |
| 5 | +# Supported prompt refinement techniques: Basic, RecursiveEval, MedPrompt |
| 6 | +# Uncomment techniques that you want to use |
| 7 | +############################ Critique Task Description Start ############################ |
| 8 | +prompt_technique_name: "critique_n_refine" |
| 9 | +# unique_model_id of model defined in llm_config.yaml |
| 10 | +unique_model_id: gpt-4o |
| 11 | +# Number of iterations for conducting <mutation_rounds> rounds of mutation of task description |
| 12 | +# followed by refinement of instructions |
9 | 13 | mutate_refine_iterations: 3 |
10 | | -mutation_rounds: 2 |
11 | | -num_train_examples: 40 |
12 | | -prompt_technique_name: critique_n_refine |
13 | | -questions_batch_size: 1 |
| 14 | +# Number of rounds of mutation to be performed when generating different styles |
| 15 | +mutation_rounds: 3 |
| 16 | +# Refine instruction post mutation |
14 | 17 | refine_instruction: true |
| 18 | +# Number of iterations for refining task description and in context examples for few-shot |
15 | 19 | refine_task_eg_iterations: 3 |
16 | | -seen_set_size: 20 |
| 20 | +# Number of variations of prompts to generate in given iteration |
17 | 21 | style_variation: 5 |
18 | | -task_description: You are a mathematics expert. You will be given a mathematics problem |
19 | | - which you need to solve |
| 22 | +# Number of questions to be asked to LLM in a single batch, during training step |
| 23 | +questions_batch_size: 1 |
| 24 | +# Number of batches of questions to correctly answered, for a prompt to be considered as performing good |
| 25 | +min_correct_count: 3 |
| 26 | +# Max number of mini-batches on which we should evaluate our prompt |
| 27 | +max_eval_batches: 6 |
| 28 | +# Number of top best performing prompts to be considered for next iterations |
20 | 29 | top_n: 1 |
21 | | -unique_model_id: gpt-4o |
| 30 | +# Description of task. This will be fed to prompt |
| 31 | +task_description: "You are a mathematics expert. You will be given a mathematics problem which you need to solve" |
| 32 | +# Base instruction, in line with your dataset. This will be fed to prompt |
| 33 | +base_instruction: "Lets think step by step." |
| 34 | +# Instruction for specifying answer format |
| 35 | +answer_format: "For each question present the reasoning followed by the correct answer." |
| 36 | +# Number of samples from dataset, set aside as training data. In every iteration we would be drawing |
| 37 | +# `questions_batch_size` examples from training data with replacement. |
| 38 | +seen_set_size: 25 |
| 39 | +# Number of examples to be given for few shots |
| 40 | +few_shot_count: 5 |
| 41 | +# Number of synthetic training examples to be generated |
| 42 | +num_train_examples: 20 |
| 43 | +# Generate synthetic reasoning |
| 44 | +generate_reasoning: true |
| 45 | +# Generate description of an expert which can solve the task at hand |
| 46 | +generate_expert_identity: true |
| 47 | +# Generate keywords that describe the intent of the task |
| 48 | +generate_intent_keywords: false |
| 49 | +############################ Critique Task Description End ############################ |
| 50 | + |
| 51 | + |
| 52 | + |
| 53 | + |
0 commit comments