瀏覽代碼

Address feedback not possible before launch in LG3 recipe and dataset file (#606)

Co-authored-by: Thomas Robinson <trobinson@meta.com>
Thomas Robinson 8 月之前
父節點
當前提交
2668bf4c35

+ 4 - 4
recipes/responsible_ai/llama_guard/llama_guard_customization_via_prompting_and_fine_tuning.ipynb

@@ -499,12 +499,12 @@
     "\n",
     "def llm_eval(prompts: List[Tuple[List[str], AgentType]],\n",
     "            model_id: str = \"meta-llama/Llama-Guard-3-8B\",\n",
-    "            llama_guard_version: LlamaGuardVersion = LlamaGuardVersion.LLAMA_GUARD_2.name, \n",
+    "            llama_guard_version: LlamaGuardVersion = LlamaGuardVersion.LLAMA_GUARD_3.name, \n",
     "            load_in_8bit: bool = True, \n",
     "            load_in_4bit: bool = False, \n",
     "            logprobs: bool = False) -> Tuple[List[str], Optional[List[List[Tuple[int, float]]]]]:\n",
     "    \"\"\"\n",
-    "    Runs Llama Guard inference with HF transformers. Works with Llama Guard 1 or 2\n",
+    "    Runs Llama Guard inference with HF transformers.\n",
     "\n",
     "    This function loads Llama Guard from Hugging Face or a local model and \n",
     "    executes the predefined prompts in the script to showcase how to do inference with Llama Guard.\n",
@@ -515,9 +515,9 @@
     "            List of Tuples containing all the conversations to evaluate. The tuple contains a list of messages that configure a conversation and a role.\n",
     "        model_id : str \n",
     "            The ID of the pretrained model to use for generation. This can be either the path to a local folder containing the model files,\n",
-    "            or the repository ID of a model hosted on the Hugging Face Hub. Defaults to 'meta-llama/Meta-Llama-Guard-2-8B'.\n",
+    "            or the repository ID of a model hosted on the Hugging Face Hub. Defaults to 'meta-llama/Meta-Llama-Guard-3-8B'.\n",
     "        llama_guard_version : LlamaGuardVersion\n",
-    "            The version of the Llama Guard model to use for formatting prompts. Defaults to LLAMA_GUARD_2.\n",
+    "            The version of the Llama Guard model to use for formatting prompts. Defaults to 3.\n",
     "        load_in_8bit : bool\n",
     "            defines if the model should be loaded in 8 bit. Uses BitsAndBytes. Default True \n",
     "        load_in_4bit : bool\n",

+ 2 - 2
src/llama_recipes/datasets/toxicchat_dataset.py

@@ -1,7 +1,7 @@
 # Copyright (c) Meta Platforms, Inc. and affiliates.
-# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
+# This software may be used and distributed according to the terms of the Llama 3.1 Community License Agreement.
 
-# For dataset details visit: https://huggingface.co/datasets/samsum
+# For dataset details visit: https://huggingface.co/datasets/lmsys/toxic-chat
 
 import copy
 import datasets