|
@@ -33,7 +33,7 @@
|
|
|
"\n",
|
|
|
"Llama Guard is provided with a reference taxonomy explained on [this page](https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-guard-3), where the prompting format is also explained. \n",
|
|
|
"\n",
|
|
|
- "The functions below combine already existing [prompt formatting code in llama-recipes](https://github.com/meta-llama/llama-recipes/blob/main/src/llama_recipes/inference/prompt_format_utils.py) with custom code to aid in the custimization of the taxonomy. "
|
|
|
+ "The functions below combine already existing [prompt formatting code in llama-recipes](https://github.com/meta-llama/llama-recipes/blob/main/src/llama_cookbook/inference/prompt_format_utils.py) with custom code to aid in the custimization of the taxonomy. "
|
|
|
]
|
|
|
},
|
|
|
{
|
|
@@ -80,7 +80,7 @@
|
|
|
],
|
|
|
"source": [
|
|
|
"from enum import Enum\n",
|
|
|
- "from llama_recipes.inference.prompt_format_utils import LLAMA_GUARD_3_CATEGORY, SafetyCategory, AgentType\n",
|
|
|
+ "from llama_cookbook.inference.prompt_format_utils import LLAMA_GUARD_3_CATEGORY, SafetyCategory, AgentType\n",
|
|
|
"from typing import List\n",
|
|
|
"\n",
|
|
|
"class LG3Cat(Enum):\n",
|
|
@@ -158,7 +158,7 @@
|
|
|
}
|
|
|
],
|
|
|
"source": [
|
|
|
- "from llama_recipes.inference.prompt_format_utils import build_custom_prompt, create_conversation, PROMPT_TEMPLATE_3, LLAMA_GUARD_3_CATEGORY_SHORT_NAME_PREFIX\n",
|
|
|
+ "from llama_cookbook.inference.prompt_format_utils import build_custom_prompt, create_conversation, PROMPT_TEMPLATE_3, LLAMA_GUARD_3_CATEGORY_SHORT_NAME_PREFIX\n",
|
|
|
"from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n",
|
|
|
"from typing import List, Tuple\n",
|
|
|
"from enum import Enum\n",
|
|
@@ -463,13 +463,13 @@
|
|
|
"\n",
|
|
|
"To add additional datasets\n",
|
|
|
"\n",
|
|
|
- "1. Copy llama-recipes/src/llama_recipes/datasets/toxicchat_dataset.py \n",
|
|
|
+ "1. Copy llama-recipes/src/llama_cookbook/datasets/toxicchat_dataset.py \n",
|
|
|
"2. Modify the file to change the dataset used\n",
|
|
|
"3. Add references to the new dataset in \n",
|
|
|
- " - llama-recipes/src/llama_recipes/configs/datasets.py\n",
|
|
|
- " - llama_recipes/datasets/__init__.py\n",
|
|
|
- " - llama_recipes/datasets/toxicchat_dataset.py\n",
|
|
|
- " - llama_recipes/utils/dataset_utils.py\n",
|
|
|
+ " - llama-recipes/src/llama_cookbook/configs/datasets.py\n",
|
|
|
+ " - llama_cookbook/datasets/__init__.py\n",
|
|
|
+ " - llama_cookbook/datasets/toxicchat_dataset.py\n",
|
|
|
+ " - llama_cookbook/utils/dataset_utils.py\n",
|
|
|
"\n",
|
|
|
"\n",
|
|
|
"## Evaluation\n",
|
|
@@ -484,7 +484,7 @@
|
|
|
"source": [
|
|
|
"from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n",
|
|
|
"\n",
|
|
|
- "from llama_recipes.inference.prompt_format_utils import build_default_prompt, create_conversation, LlamaGuardVersion\n",
|
|
|
+ "from llama_cookbook.inference.prompt_format_utils import build_default_prompt, create_conversation, LlamaGuardVersion\n",
|
|
|
"from llama.llama.generation import Llama\n",
|
|
|
"\n",
|
|
|
"from typing import List, Optional, Tuple, Dict\n",
|
|
@@ -726,7 +726,7 @@
|
|
|
"# \"unsafe_content\": [\"O1\"]\n",
|
|
|
"# }\n",
|
|
|
"# ```\n",
|
|
|
- "from llama_recipes.datasets.toxicchat_dataset import get_llamaguard_toxicchat_dataset\n",
|
|
|
+ "from llama_cookbook.datasets.toxicchat_dataset import get_llamaguard_toxicchat_dataset\n",
|
|
|
"validation_data = get_llamaguard_toxicchat_dataset(None, None, \"train\", return_jsonl = True)[0:100]\n",
|
|
|
"run_validation(validation_data, AgentType.USER, Type.HF, load_in_8bit = False, load_in_4bit = True)"
|
|
|
]
|
|
@@ -757,7 +757,7 @@
|
|
|
"outputs": [],
|
|
|
"source": [
|
|
|
"model_id = \"meta-llama/Llama-Guard-3-8B\"\n",
|
|
|
- "from llama_recipes import finetuning\n",
|
|
|
+ "from llama_cookbook import finetuning\n",
|
|
|
"\n",
|
|
|
"finetuning.main(\n",
|
|
|
" model_name = model_id,\n",
|