training.py 1.2 KB

1234567891011121314151617181920212223242526272829303132333435363738
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. from dataclasses import dataclass
  4. from typing import ClassVar
  5. @dataclass
  6. class train_config:
  7. model_name: str="PATH/to/LLAMA/7B"
  8. enable_fsdp: bool= False
  9. run_validation: bool=True
  10. batch_size_training: int=4
  11. num_epochs: int=3
  12. num_workers_dataloader: int=1
  13. lr: float=1e-4
  14. weight_decay: float=0.0
  15. gamma: float= 0.85
  16. seed: int=42
  17. use_fp16: bool=False
  18. mixed_precision: bool=True
  19. val_batch_size: int=1
  20. dataset = "samsum_dataset"
  21. micro_batch_size: int=4
  22. peft_method: str = "lora" # None , llama_adapter, prefix
  23. use_peft: bool=False
  24. output_dir: str = "PATH/to/save/PEFT/model"
  25. freeze_layers: bool = False
  26. num_freeze_layers: int = 1
  27. quantization: bool = False
  28. one_gpu: bool = False
  29. save_model: bool = True
  30. dist_checkpoint_root_folder: str="PATH/to/save/FSDP/model" # will be used if using FSDP
  31. dist_checkpoint_folder: str="fine-tuned" # will be used if using FSDP
  32. save_optimizer: bool=False # will be used if using FSDP