@@ -99,9 +99,6 @@ def main(
print("Skipping the inference as the prompt is not safe.")
sys.exit(1) # Exit the program with an error status
- if peft_model:
- model = load_peft_model(model, peft_model)
-
model.eval()
batch = tokenizer(user_prompt, padding='max_length', truncation=True,max_length=max_padding_length,return_tensors="pt")