|
@@ -332,7 +332,7 @@
|
|
|
" skip_special_tokens=False, \n",
|
|
|
" excluded_category_keys=excluded_category_keys)\n",
|
|
|
" \n",
|
|
|
- " inputs = tokenizer(llama_guard_input_templ_applied, image, return_tensors=\"pt\").to(\"cuda\")\n",
|
|
|
+ " inputs = tokenizer(text=llama_guard_input_templ_applied, images=image, return_tensors=\"pt\").to(\"cuda\")\n",
|
|
|
" output = model.generate(\n",
|
|
|
" **inputs, \n",
|
|
|
" do_sample=False, \n",
|