finetuning.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. import os
  4. from pkg_resources import packaging
  5. import fire
  6. import torch
  7. import torch.optim as optim
  8. from peft import get_peft_model, prepare_model_for_int8_training
  9. from torch.distributed.fsdp import (
  10. FullyShardedDataParallel as FSDP,
  11. )
  12. from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
  13. from torch.optim.lr_scheduler import StepLR
  14. from transformers import (
  15. LlamaForCausalLM,
  16. LlamaTokenizer,
  17. LlamaConfig,
  18. )
  19. from transformers.models.llama.modeling_llama import LlamaDecoderLayer
  20. from llama_recipes.configs import fsdp_config, train_config
  21. from llama_recipes.policies import AnyPrecisionAdamW, apply_fsdp_checkpointing
  22. from llama_recipes.utils import fsdp_auto_wrap_policy
  23. from llama_recipes.utils.config_utils import (
  24. update_config,
  25. generate_peft_config,
  26. generate_dataset_config,
  27. get_sampler_kwargs,
  28. )
  29. from llama_recipes.utils.dataset_utils import get_preprocessed_dataset
  30. from llama_recipes.utils.train_utils import (
  31. train,
  32. freeze_transformer_layers,
  33. setup,
  34. setup_environ_flags,
  35. clear_gpu_cache,
  36. print_model_size,
  37. get_policies
  38. )
  39. def main(**kwargs):
  40. # Update the configuration for the training and sharding process
  41. update_config((train_config, fsdp_config), **kwargs)
  42. # Set the seeds for reproducibility
  43. torch.cuda.manual_seed(train_config.seed)
  44. torch.manual_seed(train_config.seed)
  45. if train_config.enable_fsdp:
  46. setup()
  47. # torchrun specific
  48. local_rank = int(os.environ["LOCAL_RANK"])
  49. rank = int(os.environ["RANK"])
  50. world_size = int(os.environ["WORLD_SIZE"])
  51. if torch.distributed.is_initialized():
  52. torch.cuda.set_device(local_rank)
  53. clear_gpu_cache(local_rank)
  54. setup_environ_flags(rank)
  55. # Load the pre-trained model and setup its configuration
  56. use_cache = False if train_config.enable_fsdp else None
  57. if train_config.enable_fsdp and train_config.low_cpu_fsdp:
  58. """
  59. for FSDP, we can save cpu memory by loading pretrained model on rank0 only.
  60. this avoids cpu oom when loading large models like llama 70B, in which case
  61. model alone would consume 2+TB cpu mem (70 * 4 * 8). This will add some comms
  62. overhead and currently requires latest nightly.
  63. """
  64. v = packaging.version.parse(torch.__version__)
  65. verify_latest_nightly = v.is_devrelease and v.dev >= 20230701
  66. if not verify_latest_nightly:
  67. raise Exception("latest pytorch nightly build is required to run with low_cpu_fsdp config, "
  68. "please install latest nightly.")
  69. if rank == 0:
  70. model = LlamaForCausalLM.from_pretrained(
  71. train_config.model_name,
  72. load_in_8bit=True if train_config.quantization else None,
  73. device_map="auto" if train_config.quantization else None,
  74. use_cache=use_cache,
  75. )
  76. else:
  77. llama_config = LlamaConfig.from_pretrained(train_config.model_name)
  78. llama_config.use_cache = use_cache
  79. with torch.device("meta"):
  80. model = LlamaForCausalLM(llama_config)
  81. else:
  82. model = LlamaForCausalLM.from_pretrained(
  83. train_config.model_name,
  84. load_in_8bit=True if train_config.quantization else None,
  85. device_map="auto" if train_config.quantization else None,
  86. use_cache=use_cache,
  87. )
  88. if train_config.enable_fsdp and train_config.use_fast_kernels:
  89. """
  90. For FSDP and FSDP+PEFT, setting 'use_fast_kernels' will enable
  91. using of Flash Attention or Xformer memory-efficient kernels
  92. based on the hardware being used. This would speed up fine-tuning.
  93. """
  94. try:
  95. from optimum.bettertransformer import BetterTransformer
  96. model = BetterTransformer.transform(model)
  97. except ImportError:
  98. print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
  99. print_model_size(model, train_config, rank if train_config.enable_fsdp else 0)
  100. # Prepare the model for int8 training if quantization is enabled
  101. if train_config.quantization:
  102. model = prepare_model_for_int8_training(model)
  103. # Convert the model to bfloat16 if fsdp and pure_bf16 is enabled
  104. if train_config.enable_fsdp and fsdp_config.pure_bf16:
  105. model.to(torch.bfloat16)
  106. # Load the tokenizer and add special tokens
  107. tokenizer = LlamaTokenizer.from_pretrained(train_config.model_name)
  108. tokenizer.add_special_tokens(
  109. {
  110. "pad_token": "<PAD>",
  111. }
  112. )
  113. if train_config.use_peft:
  114. peft_config = generate_peft_config(train_config, kwargs)
  115. model = get_peft_model(model, peft_config)
  116. model.print_trainable_parameters()
  117. #setting up FSDP if enable_fsdp is enabled
  118. if train_config.enable_fsdp:
  119. if not train_config.use_peft and train_config.freeze_layers:
  120. freeze_transformer_layers(train_config.num_freeze_layers)
  121. mixed_precision_policy, wrapping_policy = get_policies(fsdp_config, rank)
  122. my_auto_wrapping_policy = fsdp_auto_wrap_policy(model, LlamaDecoderLayer)
  123. model = FSDP(
  124. model,
  125. auto_wrap_policy= my_auto_wrapping_policy if train_config.use_peft else wrapping_policy,
  126. cpu_offload=CPUOffload(offload_params=True) if fsdp_config.fsdp_cpu_offload else None,
  127. mixed_precision=mixed_precision_policy if not fsdp_config.pure_bf16 else None,
  128. sharding_strategy=fsdp_config.sharding_strategy,
  129. device_id=torch.cuda.current_device(),
  130. limit_all_gathers=True,
  131. sync_module_states=train_config.low_cpu_fsdp,
  132. param_init_fn=lambda module: module.to_empty(device=torch.device("cuda"), recurse=False)
  133. if train_config.low_cpu_fsdp and rank != 0 else None,
  134. )
  135. if fsdp_config.fsdp_activation_checkpointing:
  136. apply_fsdp_checkpointing(model)
  137. elif not train_config.quantization and not train_config.enable_fsdp:
  138. model.to("cuda")
  139. dataset_config = generate_dataset_config(train_config, kwargs)
  140. # Load and preprocess the dataset for training and validation
  141. dataset_train = get_preprocessed_dataset(
  142. tokenizer,
  143. dataset_config,
  144. split="train",
  145. )
  146. if not train_config.enable_fsdp or rank == 0:
  147. print(f"--> Training Set Length = {len(dataset_train)}")
  148. dataset_val = get_preprocessed_dataset(
  149. tokenizer,
  150. dataset_config,
  151. split="test",
  152. )
  153. if not train_config.enable_fsdp or rank == 0:
  154. print(f"--> Validation Set Length = {len(dataset_val)}")
  155. train_dl_kwargs = get_sampler_kwargs(train_config, dataset_train, tokenizer, "train")
  156. val_dl_kwargs = get_sampler_kwargs(train_config, dataset_val, tokenizer, "val")
  157. # Create DataLoaders for the training and validation dataset
  158. train_dataloader = torch.utils.data.DataLoader(
  159. dataset_train,
  160. num_workers=train_config.num_workers_dataloader,
  161. pin_memory=True,
  162. **train_dl_kwargs,
  163. )
  164. eval_dataloader = None
  165. if train_config.run_validation:
  166. eval_dataloader = torch.utils.data.DataLoader(
  167. dataset_val,
  168. num_workers=train_config.num_workers_dataloader,
  169. pin_memory=True,
  170. **val_dl_kwargs,
  171. )
  172. # Initialize the optimizer and learning rate scheduler
  173. if fsdp_config.pure_bf16 and fsdp_config.optimizer == "anyprecision":
  174. optimizer = AnyPrecisionAdamW(
  175. model.parameters(),
  176. lr=train_config.lr,
  177. momentum_dtype=torch.bfloat16,
  178. variance_dtype=torch.bfloat16,
  179. use_kahan_summation=False,
  180. weight_decay=train_config.weight_decay,
  181. )
  182. else:
  183. optimizer = optim.AdamW(
  184. model.parameters(),
  185. lr=train_config.lr,
  186. weight_decay=train_config.weight_decay,
  187. )
  188. scheduler = StepLR(optimizer, step_size=1, gamma=train_config.gamma)
  189. # Start the training process
  190. results = train(
  191. model,
  192. train_dataloader,
  193. eval_dataloader,
  194. tokenizer,
  195. optimizer,
  196. scheduler,
  197. train_config.gradient_accumulation_steps,
  198. train_config,
  199. fsdp_config if train_config.enable_fsdp else None,
  200. local_rank if train_config.enable_fsdp else None,
  201. rank if train_config.enable_fsdp else None,
  202. )
  203. if not train_config.enable_fsdp or rank==0:
  204. [print(f'Key: {k}, Value: {v}') for k, v in results.items()]
  205. if __name__ == "__main__":
  206. fire.Fire(main)