123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116 |
- # Config for multi-device full finetuning in full_finetune_distributed.py
- # using a Llama3.1 70B Instruct model
- #
- # This config assumes that you've run the following command before launching
- # this run:
- # tune download meta-llama/Meta-Llama-3.1-70B-Instruct --output-dir /tmp/Meta-Llama-3.1-70B-Instruct --ignore-patterns "original/consolidated*"
- #
- # To launch on 8 devices, run the following command from root:
- # tune run --nproc_per_node 8 tune run --nproc_per_node 8 full_finetune_distributed --config ft-config.yaml
- output_dir: /tmp/torchtune/llama3_1_70B/full # /tmp may be deleted by your system. Change it to your preference.
- seed: 69
- shuffle: True
- # Parallelism
- tensor_parallel_dim: 1
- tensor_parallel_plan:
- _component_: torchtune.models.llama3.base_llama_tp_plan
- # Tokenizer
- tokenizer:
- _component_: torchtune.models.llama3.llama3_tokenizer
- path: /tmp/Meta-Llama-3.1-70B-Instruct/original/tokenizer.model
- max_seq_len: 16384
- dataset:
- _component_: toolcall.custom_dataset
- #data_files: "train_data.json"
- #split: "train"
- # Model Arguments
- model:
- _component_: torchtune.models.llama3_1.llama3_1_70b
- checkpointer:
- _component_: torchtune.training.FullModelHFCheckpointer
- checkpoint_dir: /tmp/Meta-Llama-3.1-70B-Instruct/
- checkpoint_files:
- filename_format: model-{}-of-{}.safetensors
- max_filename: "00030"
- recipe_checkpoint: null
- output_dir: ${output_dir}
- model_type: LLAMA3
- resume_from_checkpoint: False
- # Fine-tuning arguments
- batch_size: 4
- epochs: 30
- save_every_epochs: 10
- max_steps_per_epoch: null
- lr_scheduler:
- _component_: torchtune.training.lr_schedulers.get_cosine_schedule_with_warmup
- num_warmup_steps: 10
- optimizer:
- _component_: torch.optim.AdamW
- lr: 2e-5
- # Note: highly recommended to use fused=True optimizer flag
- # with CPU offload for faster optimizer step.
- fused: False
- loss:
- _component_: torchtune.modules.loss.CEWithChunkedOutputLoss
- gradient_accumulation_steps: 1 # Use to increase effective batch size
- # Training env
- device: cuda
- # Memory management
- enable_activation_checkpointing: True # True reduces memory
- enable_activation_offloading: False # True reduces memory
- #custom_sharded_layers: ['tok_embeddings', 'output'] # Layers to shard separately (useful for large vocab size models). Lower Memory, but lower speed.
- fsdp_cpu_offload: False
- clip_grad_norm: null
- compile: False # torch.compile the model + loss, True increases speed + decreases memory
- optimizer_in_bwd: False # True saves memory. Requires gradient_accumulation_steps=1
- # Reduced precision
- dtype: bf16
- # Logging
- metric_logger:
- _component_: torchtune.training.metric_logging.WandBLogger
- project: ctt
- log_dir: ${output_dir}/logs
- log_every_n_steps: 1
- log_peak_memory_stats: True
- # Profiler (disabled)
- profiler:
- _component_: torchtune.training.setup_torch_profiler
- enabled: False
- #Output directory of trace artifacts
- output_dir: ${output_dir}/profiling_outputs
- #`torch.profiler.ProfilerActivity` types to trace
- cpu: True
- cuda: True
- #trace options passed to `torch.profiler.profile`
- profile_memory: False
- with_stack: False
- record_shapes: True
- with_flops: False
- # `torch.profiler.schedule` options:
- # wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat
- wait_steps: 5
- warmup_steps: 3
- active_steps: 2
- num_cycles: 1
|