| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546 | # Copyright (c) Meta Platforms, Inc. and affiliates.# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.from dataclasses import dataclass@dataclassclass train_config:    model_name: str="PATH/to/LLAMA/7B"    tokenizer_name: str=None    enable_fsdp: bool=False    low_cpu_fsdp: bool=False    run_validation: bool=True    batch_size_training: int=4    batching_strategy: str="packing" #alternative: padding    context_length: int=4096    gradient_accumulation_steps: int=1    gradient_clipping: bool = False    gradient_clipping_threshold: float = 1.0    num_epochs: int=3    max_train_step: int=0    max_eval_step: int=0    num_workers_dataloader: int=1    lr: float=1e-4    weight_decay: float=0.0    gamma: float= 0.85    seed: int=42    use_fp16: bool=False    mixed_precision: bool=True    val_batch_size: int=1    dataset = "samsum_dataset"    peft_method: str = "lora" # None , llama_adapter, prefix    use_peft: bool=False    output_dir: str = "PATH/to/save/PEFT/model"    freeze_layers: bool = False    num_freeze_layers: int = 1    quantization: bool = False    one_gpu: bool = False    save_model: bool = True    dist_checkpoint_root_folder: str="PATH/to/save/FSDP/model" # will be used if using FSDP    dist_checkpoint_folder: str="fine-tuned" # will be used if using FSDP    save_optimizer: bool=False # will be used if using FSDP    use_fast_kernels: bool = False # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels    use_wandb: bool = False # Enable wandb for experient tracking    save_metrics: bool = False # saves training metrics to a json file for later plotting
 |