|
@@ -20,7 +20,7 @@ from transformers import LlamaTokenizer
|
|
|
import json
|
|
|
|
|
|
|
|
|
-from llama_recipes.model_checkpointing import save_model_checkpoint, save_model_and_optimizer_sharded, save_optimizer_checkpoint
|
|
|
+from llama_recipes.model_checkpointing import save_model_checkpoint, save_model_and_optimizer_sharded, save_optimizer_checkpoint, save_peft_checkpoint
|
|
|
from llama_recipes.policies import fpSixteen,bfSixteen, get_llama_wrapper
|
|
|
from llama_recipes.utils.memory_utils import MemoryTrace
|
|
|
from accelerate.utils import is_xpu_available, is_ccl_available
|
|
@@ -235,7 +235,7 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
|
|
|
print(f"we are about to save the PEFT modules")
|
|
|
else:
|
|
|
print(f"we are about to save the PEFT modules")
|
|
|
- model.save_pretrained(train_config.output_dir)
|
|
|
+ save_peft_checkpoint(model, train_config.output_dir)
|
|
|
if train_config.enable_fsdp:
|
|
|
if rank==0:
|
|
|
print(f"PEFT modules are saved in {train_config.output_dir} directory")
|