|
@@ -0,0 +1,117 @@
|
|
|
|
+# Config for multi-device full finetuning in full_finetune_distributed.py
|
|
|
|
+# using a Llama3.1 70B Instruct model
|
|
|
|
+#
|
|
|
|
+# This config assumes that you've run the following command before launching
|
|
|
|
+# this run:
|
|
|
|
+# tune download meta-llama/Meta-Llama-3.1-70B-Instruct --output-dir /tmp/Meta-Llama-3.1-70B-Instruct --ignore-patterns "original/consolidated*"
|
|
|
|
+#
|
|
|
|
+# To launch on 8 devices, run the following command from root:
|
|
|
|
+# tune run --nproc_per_node 8 full_finetune_distributed --config llama3_1/70B_full
|
|
|
|
+#
|
|
|
|
+# You can add specific overrides through the command line. For example
|
|
|
|
+# to override the checkpointer directory while launching training
|
|
|
|
+# you can run:
|
|
|
|
+# tune run --nproc_per_node 8 full_finetune_distributed --config llama3_1/70B_full checkpointer.checkpoint_dir=<YOUR_CHECKPOINT_DIR>
|
|
|
|
+#
|
|
|
|
+# This config is only tested on an 8xA100 machine.
|
|
|
|
+#
|
|
|
|
+
|
|
|
|
+output_dir: /tmp/torchtune/llama3_1_70B/full # /tmp may be deleted by your system. Change it to your preference.
|
|
|
|
+seed: 69
|
|
|
|
+shuffle: True
|
|
|
|
+# Parallelism
|
|
|
|
+tensor_parallel_dim: 1
|
|
|
|
+tensor_parallel_plan:
|
|
|
|
+ _component_: torchtune.models.llama3.base_llama_tp_plan
|
|
|
|
+
|
|
|
|
+# Tokenizer
|
|
|
|
+tokenizer:
|
|
|
|
+ _component_: torchtune.models.llama3.llama3_tokenizer
|
|
|
|
+ path: /tmp/Meta-Llama-3.1-70B-Instruct/original/tokenizer.model
|
|
|
|
+ max_seq_len: 16384
|
|
|
|
+
|
|
|
|
+dataset:
|
|
|
|
+ _component_: toolcall.custom_dataset
|
|
|
|
+ #data_files: "train_data.json"
|
|
|
|
+ #split: "train"
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+# Model Arguments
|
|
|
|
+model:
|
|
|
|
+ _component_: torchtune.models.llama3_1.llama3_1_70b
|
|
|
|
+
|
|
|
|
+checkpointer:
|
|
|
|
+ _component_: torchtune.training.FullModelHFCheckpointer
|
|
|
|
+ checkpoint_dir: /tmp/Meta-Llama-3.1-70B-Instruct/
|
|
|
|
+ checkpoint_files:
|
|
|
|
+ filename_format: model-{}-of-{}.safetensors
|
|
|
|
+ max_filename: "00030"
|
|
|
|
+ recipe_checkpoint: null
|
|
|
|
+ output_dir: ${output_dir}
|
|
|
|
+ model_type: LLAMA3
|
|
|
|
+resume_from_checkpoint: False
|
|
|
|
+
|
|
|
|
+# Fine-tuning arguments
|
|
|
|
+batch_size: 2
|
|
|
|
+epochs: 1
|
|
|
|
+
|
|
|
|
+optimizer:
|
|
|
|
+ _component_: torch.optim.AdamW
|
|
|
|
+ lr: 2e-5
|
|
|
|
+ # Note: highly recommended to use fused=True optimizer flag
|
|
|
|
+ # with CPU offload for faster optimizer step.
|
|
|
|
+ fused: False
|
|
|
|
+
|
|
|
|
+loss:
|
|
|
|
+ _component_: torchtune.modules.loss.CEWithChunkedOutputLoss
|
|
|
|
+max_steps_per_epoch: null
|
|
|
|
+gradient_accumulation_steps: 1 # Use to increase effective batch size
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+# Training env
|
|
|
|
+device: cuda
|
|
|
|
+
|
|
|
|
+# Memory management
|
|
|
|
+enable_activation_checkpointing: True # True reduces memory
|
|
|
|
+enable_activation_offloading: False # True reduces memory
|
|
|
|
+custom_sharded_layers: ['tok_embeddings', 'output'] # Layers to shard separately (useful for large vocab size models). Lower Memory, but lower speed.
|
|
|
|
+fsdp_cpu_offload: True
|
|
|
|
+clip_grad_norm: null
|
|
|
|
+compile: False # torch.compile the model + loss, True increases speed + decreases memory
|
|
|
|
+optimizer_in_bwd: False # True saves memory. Requires gradient_accumulation_steps=1
|
|
|
|
+
|
|
|
|
+# Reduced precision
|
|
|
|
+dtype: bf16
|
|
|
|
+
|
|
|
|
+# Logging
|
|
|
|
+metric_logger:
|
|
|
|
+ _component_: torchtune.training.metric_logging.DiskLogger
|
|
|
|
+ log_dir: ${output_dir}/logs
|
|
|
|
+log_every_n_steps: 1
|
|
|
|
+log_peak_memory_stats: True
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+# Profiler (disabled)
|
|
|
|
+profiler:
|
|
|
|
+ _component_: torchtune.training.setup_torch_profiler
|
|
|
|
+ enabled: False
|
|
|
|
+
|
|
|
|
+ #Output directory of trace artifacts
|
|
|
|
+ output_dir: ${output_dir}/profiling_outputs
|
|
|
|
+
|
|
|
|
+ #`torch.profiler.ProfilerActivity` types to trace
|
|
|
|
+ cpu: True
|
|
|
|
+ cuda: True
|
|
|
|
+
|
|
|
|
+ #trace options passed to `torch.profiler.profile`
|
|
|
|
+ profile_memory: False
|
|
|
|
+ with_stack: False
|
|
|
|
+ record_shapes: True
|
|
|
|
+ with_flops: False
|
|
|
|
+
|
|
|
|
+ # `torch.profiler.schedule` options:
|
|
|
|
+ # wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat
|
|
|
|
+ wait_steps: 5
|
|
|
|
+ warmup_steps: 3
|
|
|
|
+ active_steps: 2
|
|
|
|
+ num_cycles: 1
|