Browse Source

add mmlu_instruct for 3.2

Kai Wu 2 months ago
parent
commit
00691affbf

+ 2 - 2
end-to-end-use-cases/benchmarks/llm_eval_harness/meta_eval/eval_config.yaml

@@ -1,6 +1,6 @@
-model_name: "meta-llama/Llama-3.1-8B-Instruct" # The name of the model to evaluate. This must be a valid Meta Llama 3 based model name in the HuggingFace model hub."
+model_name: "meta-llama/Llama-3.2-3B-Instruct" # The name of the model to evaluate. This must be a valid Meta Llama 3 based model name in the HuggingFace model hub."
 
-evals_dataset: "meta-llama/Llama-3.1-8B-Instruct-evals" # The name of the 3.1 evals dataset to evaluate, please make sure this eval dataset corresponds to the model loaded. This must be a valid dataset name in the Llama 3.x Evals collection.
+evals_dataset: "meta-llama/Llama-3.2-3B-Instruct-evals" # The name of the 3.1 evals dataset to evaluate, please make sure this eval dataset corresponds to the model loaded. This must be a valid dataset name in the Llama 3.x Evals collection.
 # Must be one of the following ["meta-llama/Llama-3.1-8B-Instruct-evals","meta-llama/Llama-3.1-70B-Instruct-evals","meta-llama/Llama-3.1-405B-Instruct-evals","meta-llama/Llama-3.1-8B-evals","meta-llama/Llama-3.1-70B-evals","meta-llama/Llama-3.1-405B-evals","meta-llama/Llama-3.2-1B-evals","meta-llama/Llama-3.2-3B-evals", "meta-llama/Llama-3.2-1B-Instruct-evals", "meta-llama/Llama-3.2-3B-Instruct-evals"]
 
 tasks: "meta_instruct" # Available tasks for 3.1 instruct model: "meta_math_hard", "meta_gpqa_cot", "meta_mmlu_pro_instruct", "meta_ifeval"; or just use "meta_instruct" to run all of them.

+ 29 - 0
end-to-end-use-cases/benchmarks/llm_eval_harness/meta_eval/meta_template/mmlu/mmlu_instruct.yaml

@@ -0,0 +1,29 @@
+task: meta_mmlu_instruct
+dataset_path: meta-llama/Llama-3.1-8B-Instruct-evals
+dataset_name: Llama-3.1-8B-Instruct-evals__mmlu__details
+test_split: latest
+output_type: generate_until
+process_docs: !function utils.process_docs_instruct
+doc_to_text: !function utils.doc_to_text_instruct
+doc_to_target: gold
+filter_list:
+  - name: "strict-match"
+    filter:
+      - function: "regex"
+        group_select: -1
+        regex_pattern: ' ([A-D])'
+      - function: "take_first"
+generation_kwargs:
+  until: []
+  do_sample: false
+  temperature: 0
+  max_gen_toks: 1024
+num_fewshot: 0
+metric_list:
+  - metric: exact_match
+    aggregation: mean
+    higher_is_better: true
+    ignore_case: true
+    ignore_punctuation: true
+metadata:
+  version: 1.0

+ 3 - 3
end-to-end-use-cases/benchmarks/llm_eval_harness/meta_eval/meta_template/mmlu/mmlu.yaml

@@ -3,12 +3,12 @@ dataset_path: meta-llama/Llama-3.1-8B-evals
 dataset_name: Llama-3.1-8B-evals__mmlu__details
 test_split: latest
 output_type: multiple_choice
-process_docs: !function utils.process_docs
-doc_to_text: !function utils.doc_to_text
+process_docs: !function utils.process_docs_pretrain
+doc_to_text: !function utils.doc_to_text_pretrain
 doc_to_target: !function utils.doc_to_target
 doc_to_choice: ["A", "B", "C", "D"]
 # 5-shot prompts are already included in the dataset
 # So no need to generate
 num_fewshot: 0
 metadata:
-  version: 1.0
+  version: 1.0

+ 46 - 4
end-to-end-use-cases/benchmarks/llm_eval_harness/meta_eval/meta_template/mmlu/utils.py

@@ -1,12 +1,22 @@
 import string
+
 import datasets
 
-def doc_to_text(doc: dict) -> str:
+
+def doc_to_text_pretrain(doc: dict) -> str:
     # Strip out the last two characters, which is a space and the answer
     # E.g., "Answer: B" -> "Answer:"
     return doc["input_final_prompts"][0][:-2]
+    return text
+
+
+def doc_to_text_instruct(doc: dict) -> str:
+    # Strip out the last two characters, which is a space and the answer
+    # E.g., "Answer: B" -> "Answer:"
+    return doc["input_final_prompts"][0]
+
 
-def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
+def process_docs_pretrain(dataset: datasets.Dataset) -> datasets.Dataset:
     def _process_doc(doc: dict) -> dict:
         # input_correct_responses is in format of: "Answer: B"
         answer = doc["input_correct_responses"][0]
@@ -21,11 +31,43 @@ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
         return out_doc
 
     dataset = dataset.select_columns(
-        ["input_question", "input_correct_responses", "input_final_prompts", "is_correct", "input_question_hash",
-         "input_choice_list"])
+        [
+            "input_question",
+            "input_correct_responses",
+            "input_final_prompts",
+            "is_correct",
+            "input_question_hash",
+            "input_choice_list",
+        ]
+    )
+    dataset = dataset.rename_column("is_correct", "previously_is_correct")
+    dataset = dataset.map(_process_doc)
+    return dataset.map(_process_doc)
+
+
+def process_docs_instruct(dataset: datasets.Dataset) -> datasets.Dataset:
+    def _process_doc(doc: dict) -> dict:
+        out_doc = {
+            "problem": doc["input_question"],
+            "gold": doc["input_correct_responses"][0],
+        }
+        return out_doc
+
+    dataset = dataset.select_columns(
+        [
+            "input_question",
+            "input_correct_responses",
+            "input_final_prompts",
+            "is_correct",
+            "input_question_hash",
+            "input_choice_list",
+            "output_prediction_text",
+        ]
+    )
     dataset = dataset.rename_column("is_correct", "previously_is_correct")
     dataset = dataset.map(_process_doc)
     return dataset.map(_process_doc)
 
+
 def doc_to_target(doc: dict) -> str:
     return doc["gold"]

+ 14 - 4
end-to-end-use-cases/benchmarks/llm_eval_harness/meta_eval/meta_template/mmlu_pro/utils.py

@@ -1,13 +1,12 @@
 import string
 
-
 import datasets
 
 
-
 def doc_to_text(doc: dict) -> str:
     return doc["input_final_prompts"][0]
 
+
 def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
     def _process_doc(doc: dict) -> dict:
         out_doc = {
@@ -15,7 +14,18 @@ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
             "gold": doc["input_correct_responses"][0],
         }
         return out_doc
-    dataset = dataset.select_columns(["input_question", "input_correct_responses", "input_final_prompts", "is_correct","input_question_hash","input_choice_list","output_prediction_text"])
-    dataset = dataset.rename_column("is_correct","previously_is_correct")
+
+    dataset = dataset.select_columns(
+        [
+            "input_question",
+            "input_correct_responses",
+            "input_final_prompts",
+            "is_correct",
+            "input_question_hash",
+            "input_choice_list",
+            "output_prediction_text",
+        ]
+    )
+    dataset = dataset.rename_column("is_correct", "previously_is_correct")
     dataset = dataset.map(_process_doc)
     return dataset.map(_process_doc)

+ 2 - 2
end-to-end-use-cases/benchmarks/llm_eval_harness/meta_eval/prepare_meta_eval.py

@@ -188,7 +188,7 @@ def change_yaml(args, base_name):
     if args.evals_dataset in LLAMA_3_1_PRETRAIN_EVALS:
         meta_pretrain["task"] = ["meta_bbh", "meta_mmlu_pro_pretrain"]
     elif args.evals_dataset in LLAMA_3_2_PRETRAIN_EVALS:
-        meta_pretrain["task"] = ["meta_mmlu"]
+        meta_pretrain["task"] = ["meta_mmlu_pretrain"]
     with open(args.work_dir + "/meta_pretrain.yaml", "w") as yaml_file:
         yaml.dump(meta_pretrain, yaml_file)
 
@@ -203,7 +203,7 @@ def change_yaml(args, base_name):
             "meta_mmlu_pro_instruct",
         ]
     elif args.evals_dataset in LLAMA_3_2_INSTRUCT_EVALS:
-        meta_instruct["task"] = ["meta_mmlu", "meta_math", "meta_gpqa"]
+        meta_instruct["task"] = ["meta_mmlu_instruct", "meta_math", "meta_gpqa"]
     with open(args.work_dir + "/meta_instruct.yaml", "w") as yaml_file:
         yaml.dump(meta_instruct, yaml_file)