|
@@ -499,12 +499,12 @@
|
|
|
"\n",
|
|
|
"def llm_eval(prompts: List[Tuple[List[str], AgentType]],\n",
|
|
|
" model_id: str = \"meta-llama/Llama-Guard-3-8B\",\n",
|
|
|
- " llama_guard_version: LlamaGuardVersion = LlamaGuardVersion.LLAMA_GUARD_2.name, \n",
|
|
|
+ " llama_guard_version: LlamaGuardVersion = LlamaGuardVersion.LLAMA_GUARD_3.name, \n",
|
|
|
" load_in_8bit: bool = True, \n",
|
|
|
" load_in_4bit: bool = False, \n",
|
|
|
" logprobs: bool = False) -> Tuple[List[str], Optional[List[List[Tuple[int, float]]]]]:\n",
|
|
|
" \"\"\"\n",
|
|
|
- " Runs Llama Guard inference with HF transformers. Works with Llama Guard 1 or 2\n",
|
|
|
+ " Runs Llama Guard inference with HF transformers.\n",
|
|
|
"\n",
|
|
|
" This function loads Llama Guard from Hugging Face or a local model and \n",
|
|
|
" executes the predefined prompts in the script to showcase how to do inference with Llama Guard.\n",
|
|
@@ -515,9 +515,9 @@
|
|
|
" List of Tuples containing all the conversations to evaluate. The tuple contains a list of messages that configure a conversation and a role.\n",
|
|
|
" model_id : str \n",
|
|
|
" The ID of the pretrained model to use for generation. This can be either the path to a local folder containing the model files,\n",
|
|
|
- " or the repository ID of a model hosted on the Hugging Face Hub. Defaults to 'meta-llama/Meta-Llama-Guard-2-8B'.\n",
|
|
|
+ " or the repository ID of a model hosted on the Hugging Face Hub. Defaults to 'meta-llama/Meta-Llama-Guard-3-8B'.\n",
|
|
|
" llama_guard_version : LlamaGuardVersion\n",
|
|
|
- " The version of the Llama Guard model to use for formatting prompts. Defaults to LLAMA_GUARD_2.\n",
|
|
|
+ " The version of the Llama Guard model to use for formatting prompts. Defaults to 3.\n",
|
|
|
" load_in_8bit : bool\n",
|
|
|
" defines if the model should be loaded in 8 bit. Uses BitsAndBytes. Default True \n",
|
|
|
" load_in_4bit : bool\n",
|