Browse Source

document less obvious training config parameters

Kevin Slagle 11 months ago
parent
commit
2f7001ef73
2 changed files with 8 additions and 8 deletions
  1. 4 4
      recipes/finetuning/README.md
  2. 4 4
      src/llama_recipes/configs/training.py

+ 4 - 4
recipes/finetuning/README.md

@@ -27,8 +27,8 @@ It lets us specify the training settings for everything from `model_name` to `da
 ```python
     model_name: str="PATH/to/Model"
     tokenizer_name: str=None
-    enable_fsdp: bool=False
-    low_cpu_fsdp: bool=False
+    enable_fsdp: bool=False # shards model parameters, optimizer states and gradients across DDP ranks
+    low_cpu_fsdp: bool=False # saves cpu memory by loading pretrained model on rank0 only
     run_validation: bool=True
     batch_size_training: int=4
     batching_strategy: str="packing" #alternative: padding
@@ -42,14 +42,14 @@ It lets us specify the training settings for everything from `model_name` to `da
     num_workers_dataloader: int=1
     lr: float=1e-4
     weight_decay: float=0.0
-    gamma: float= 0.85
+    gamma: float= 0.85 # multiplicatively decay the learning rate by gamma after each epoch
     seed: int=42
     use_fp16: bool=False
     mixed_precision: bool=True
     val_batch_size: int=1
     dataset = "samsum_dataset"
     peft_method: str = "lora" # None,llama_adapter, prefix
-    use_peft: bool=False
+    use_peft: bool=False # use parameter efficient fine tuning
     output_dir: str = "PATH/to/save/PEFT/model"
     freeze_layers: bool = False
     num_freeze_layers: int = 1

+ 4 - 4
src/llama_recipes/configs/training.py

@@ -8,8 +8,8 @@ from dataclasses import dataclass
 class train_config:
     model_name: str="PATH/to/Model"
     tokenizer_name: str=None
-    enable_fsdp: bool=False
-    low_cpu_fsdp: bool=False
+    enable_fsdp: bool=False # shards model parameters, optimizer states and gradients across DDP ranks
+    low_cpu_fsdp: bool=False # saves cpu memory by loading pretrained model on rank0 only
     run_validation: bool=True
     batch_size_training: int=4
     batching_strategy: str="packing" #alternative: padding
@@ -23,14 +23,14 @@ class train_config:
     num_workers_dataloader: int=1
     lr: float=1e-4
     weight_decay: float=0.0
-    gamma: float= 0.85
+    gamma: float= 0.85 # multiplicatively decay the learning rate by gamma after each epoch
     seed: int=42
     use_fp16: bool=False
     mixed_precision: bool=True
     val_batch_size: int=1
     dataset = "samsum_dataset"
     peft_method: str = "lora" # None, llama_adapter (Caution: llama_adapter is currently not supported with FSDP)
-    use_peft: bool=False
+    use_peft: bool=False # use parameter efficient fine tuning
     output_dir: str = "PATH/to/save/PEFT/model"
     freeze_layers: bool = False
     num_freeze_layers: int = 1