finetuning.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. from collections import Counter
  4. import os
  5. import dataclasses
  6. import fire
  7. import random
  8. import torch
  9. import torch.optim as optim
  10. from peft import get_peft_model, PeftModel
  11. from torch.distributed.fsdp import (
  12. FullyShardedDataParallel as FSDP,
  13. ShardingStrategy
  14. )
  15. from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
  16. from torch.optim.lr_scheduler import StepLR
  17. from transformers import (
  18. AutoConfig,
  19. AutoTokenizer,
  20. BitsAndBytesConfig,
  21. AutoProcessor,
  22. LlamaForCausalLM,
  23. MllamaForConditionalGeneration,
  24. )
  25. from transformers.models.llama.modeling_llama import LlamaDecoderLayer
  26. from transformers.models.mllama.modeling_mllama import MllamaSelfAttentionDecoderLayer,MllamaCrossAttentionDecoderLayer,MllamaVisionEncoderLayer
  27. from llama_recipes.configs import fsdp_config as FSDP_CONFIG
  28. from llama_recipes.configs import train_config as TRAIN_CONFIG
  29. from llama_recipes.configs import quantization_config as QUANTIZATION_CONFIG
  30. from llama_recipes.data.concatenator import ConcatDataset
  31. from llama_recipes.policies import AnyPrecisionAdamW, apply_fsdp_checkpointing
  32. from llama_recipes.utils import fsdp_auto_wrap_policy
  33. from llama_recipes.utils.config_utils import (
  34. update_config,
  35. generate_peft_config,
  36. generate_dataset_config,
  37. get_dataloader_kwargs,
  38. check_fsdp_config,
  39. )
  40. from llama_recipes.utils.dataset_utils import get_preprocessed_dataset,get_custom_data_collator
  41. from llama_recipes.utils.fsdp_utils import hsdp_device_mesh
  42. from llama_recipes.utils.train_utils import (
  43. train,
  44. freeze_transformer_layers,
  45. setup,
  46. setup_environ_flags,
  47. clear_gpu_cache,
  48. print_model_size,
  49. get_policies,
  50. )
  51. from accelerate.utils import is_xpu_available
  52. from warnings import warn
  53. def setup_wandb(train_config, fsdp_config, **kwargs):
  54. try:
  55. import wandb
  56. except ImportError:
  57. raise ImportError(
  58. "You are trying to use wandb which is not currently installed. "
  59. "Please install it using pip install wandb"
  60. )
  61. from llama_recipes.configs import wandb_config as WANDB_CONFIG
  62. wandb_config = WANDB_CONFIG()
  63. update_config(wandb_config, **kwargs)
  64. init_dict = dataclasses.asdict(wandb_config)
  65. run = wandb.init(**init_dict)
  66. run.config.update(train_config)
  67. run.config.update(fsdp_config, allow_val_change=True)
  68. return run
  69. def main(**kwargs):
  70. # Update the configuration for the training and sharding process
  71. train_config, fsdp_config = TRAIN_CONFIG(), FSDP_CONFIG()
  72. update_config((train_config, fsdp_config), **kwargs)
  73. # Set the seeds for reproducibility
  74. if is_xpu_available():
  75. torch.xpu.manual_seed(train_config.seed)
  76. torch.manual_seed(train_config.seed)
  77. random.seed(train_config.seed)
  78. if train_config.enable_fsdp:
  79. setup()
  80. # torchrun specific
  81. local_rank = int(os.environ["LOCAL_RANK"])
  82. rank = int(os.environ["RANK"])
  83. world_size = int(os.environ["WORLD_SIZE"])
  84. if torch.distributed.is_initialized():
  85. if is_xpu_available():
  86. torch.xpu.set_device(local_rank)
  87. elif torch.cuda.is_available():
  88. torch.cuda.set_device(local_rank)
  89. clear_gpu_cache(local_rank)
  90. setup_environ_flags(rank)
  91. wandb_run = None
  92. if train_config.use_wandb:
  93. if not train_config.enable_fsdp or rank==0:
  94. wandb_run = setup_wandb(train_config, fsdp_config, **kwargs)
  95. #setting quantization configs
  96. bnb_config = None
  97. if train_config.quantization:
  98. if type(train_config.quantization) == type(True):
  99. warn("Quantization (--quantization) is a boolean, please specify quantization as '4bit' or '8bit'. Defaulting to '8bit' but this might change in the future.", FutureWarning)
  100. train_config.quantization = "8bit"
  101. if train_config.quantization == "8bit" and train_config.enable_fsdp:
  102. raise ValueError("8bit quantization is not supported with FSDP, please use 4bit quantization")
  103. quant_config = QUANTIZATION_CONFIG()
  104. update_config(quant_config, **kwargs)
  105. bnb_config = quant_config.create_bnb_config(train_config.quantization)
  106. # Load the pre-trained model and setup its configuration
  107. use_cache = False if train_config.enable_fsdp else None
  108. config = AutoConfig.from_pretrained(train_config.model_name)
  109. if config.model_type == "mllama":
  110. is_vision = True
  111. model = MllamaForConditionalGeneration.from_pretrained(
  112. train_config.model_name,
  113. quantization_config=bnb_config,
  114. attn_implementation="sdpa" if train_config.use_fast_kernels else None,
  115. device_map="auto" if train_config.quantization and not train_config.enable_fsdp else None,
  116. torch_dtype=torch.float16 if train_config.use_fp16 else torch.bfloat16,
  117. )
  118. processor = AutoProcessor.from_pretrained(train_config.model_name if train_config.tokenizer_name is None else train_config.tokenizer_name)
  119. processor.tokenizer.padding_side='right'
  120. model.supports_gradient_checkpointing = True
  121. model.language_model.supports_gradient_checkpointing = True
  122. elif config.model_type == "llama":
  123. is_vision = False
  124. model = LlamaForCausalLM.from_pretrained(
  125. train_config.model_name,
  126. quantization_config=bnb_config,
  127. use_cache=use_cache,
  128. attn_implementation="sdpa" if train_config.use_fast_kernels else None,
  129. device_map="auto" if train_config.quantization and not train_config.enable_fsdp else None,
  130. torch_dtype=torch.float16 if train_config.use_fp16 else torch.bfloat16,
  131. )
  132. else:
  133. raise ValueError(f"Model type {config.model_type} is not supported. Please use llama or mllama model.")
  134. # Load the tokenizer and add special tokens
  135. tokenizer = AutoTokenizer.from_pretrained(train_config.model_name if train_config.tokenizer_name is None else train_config.tokenizer_name)
  136. if not tokenizer.pad_token_id:
  137. tokenizer.pad_token_id = tokenizer.eos_token_id
  138. # If there is a mismatch between tokenizer vocab size and embedding matrix,
  139. # throw a warning and then expand the embedding matrix
  140. if len(tokenizer) > model.get_input_embeddings().weight.shape[0]:
  141. print("WARNING: Resizing the embedding matrix to match the tokenizer vocab size.")
  142. model.resize_token_embeddings(len(tokenizer))
  143. print_model_size(model, train_config, rank if train_config.enable_fsdp else 0)
  144. # Convert the model to bfloat16 if fsdp and pure_bf16 is enabled
  145. if train_config.enable_fsdp and fsdp_config.pure_bf16 and not train_config.quantization:
  146. model.to(torch.bfloat16)
  147. if train_config.use_peft:
  148. # Load the pre-trained peft model checkpoint and setup its configuration
  149. if train_config.from_peft_checkpoint:
  150. model = PeftModel.from_pretrained(model, train_config.from_peft_checkpoint, is_trainable=True)
  151. peft_config = model.peft_config
  152. # Generate the peft config and start fine-tuning from original model
  153. else:
  154. peft_config = generate_peft_config(train_config, kwargs)
  155. model = get_peft_model(model, peft_config)
  156. if wandb_run:
  157. wandb_run.config.update(peft_config)
  158. model.print_trainable_parameters()
  159. hsdp_device_mesh_plan = None
  160. if fsdp_config.hsdp and fsdp_config.sharding_strategy == ShardingStrategy.HYBRID_SHARD:
  161. hsdp_device_mesh_plan = hsdp_device_mesh(replica_group_size=fsdp_config.replica_group_size, sharding_group_size=fsdp_config.sharding_group_size)
  162. print("HSDP device mesh is ready")
  163. #setting up FSDP if enable_fsdp is enabled
  164. if train_config.enable_fsdp:
  165. check_fsdp_config(fsdp_config)
  166. if not train_config.use_peft and train_config.freeze_layers:
  167. freeze_transformer_layers(model, train_config.num_freeze_layers)
  168. mixed_precision_policy, wrapping_policy = get_policies(fsdp_config, rank)
  169. # Create the FSDP wrapper for MllamaSelfAttentionDecoderLayer,MllamaSelfAttentionDecoderLayer,MllamaVisionEncoderLayer in vision models
  170. if is_vision:
  171. my_auto_wrapping_policy = fsdp_auto_wrap_policy(model, [MllamaSelfAttentionDecoderLayer,MllamaSelfAttentionDecoderLayer,MllamaVisionEncoderLayer])
  172. else:
  173. # Create the FSDP wrapper for LlamaDecoderLayer in text models
  174. my_auto_wrapping_policy = fsdp_auto_wrap_policy(model, [LlamaDecoderLayer])
  175. device_id = 0
  176. if is_xpu_available():
  177. device_id = torch.xpu.current_device()
  178. elif torch.cuda.is_available():
  179. device_id = torch.cuda.current_device()
  180. model = FSDP(
  181. model,
  182. auto_wrap_policy= my_auto_wrapping_policy if train_config.use_peft else wrapping_policy,
  183. cpu_offload=CPUOffload(offload_params=True) if fsdp_config.fsdp_cpu_offload else None,
  184. mixed_precision=mixed_precision_policy if not fsdp_config.pure_bf16 else None,
  185. sharding_strategy=fsdp_config.sharding_strategy,
  186. device_mesh=hsdp_device_mesh_plan,
  187. device_id=device_id,
  188. limit_all_gathers=True,
  189. sync_module_states=train_config.low_cpu_fsdp,
  190. param_init_fn=(lambda module: module.to_empty(device=torch.device("cuda"), recurse=False))
  191. if train_config.low_cpu_fsdp and rank != 0 else None,
  192. )
  193. if fsdp_config.fsdp_activation_checkpointing:
  194. model.enable_input_require_grads()
  195. model.gradient_checkpointing_enable()
  196. apply_fsdp_checkpointing(model)
  197. elif not train_config.quantization and not train_config.enable_fsdp:
  198. if is_xpu_available():
  199. model.to("xpu:0")
  200. elif torch.cuda.is_available():
  201. model.to("cuda")
  202. dataset_config = generate_dataset_config(train_config, kwargs)
  203. if is_vision:
  204. dataset_processer = processor
  205. else:
  206. dataset_processer = tokenizer
  207. # Load and preprocess the dataset for training and validation
  208. dataset_train = get_preprocessed_dataset(
  209. dataset_processer,
  210. dataset_config,
  211. split="train",
  212. )
  213. if not train_config.enable_fsdp or rank == 0:
  214. print(f"--> Training Set Length = {len(dataset_train)}")
  215. dataset_val = get_preprocessed_dataset(
  216. dataset_processer,
  217. dataset_config,
  218. split="test",
  219. )
  220. if not train_config.enable_fsdp or rank == 0:
  221. print(f"--> Validation Set Length = {len(dataset_val)}")
  222. if train_config.batching_strategy == "packing":
  223. if is_vision:
  224. raise ValueError("Packing is not supported for vision datasets")
  225. else:
  226. dataset_train = ConcatDataset(dataset_train, chunk_size=train_config.context_length)
  227. train_dl_kwargs = get_dataloader_kwargs(train_config, dataset_train, dataset_processer, "train")
  228. print("length of dataset_train", len(dataset_train))
  229. custom_data_collator = get_custom_data_collator(dataset_processer,dataset_config)
  230. if custom_data_collator:
  231. print("custom_data_collator is used")
  232. train_dl_kwargs["collate_fn"] = custom_data_collator
  233. # Create DataLoaders for the training and validation dataset
  234. train_dataloader = torch.utils.data.DataLoader(
  235. dataset_train,
  236. num_workers=train_config.num_workers_dataloader,
  237. pin_memory=True,
  238. **train_dl_kwargs,
  239. )
  240. print(f"--> Num of Training Set Batches loaded = {len(train_dataloader)}")
  241. eval_dataloader = None
  242. if train_config.run_validation:
  243. if train_config.batching_strategy == "packing":
  244. if is_vision:
  245. raise ValueError("Packing is not supported for vision datasets")
  246. else:
  247. dataset_val = ConcatDataset(dataset_val, chunk_size=train_config.context_length)
  248. val_dl_kwargs = get_dataloader_kwargs(train_config, dataset_val, dataset_processer, "val")
  249. if custom_data_collator:
  250. val_dl_kwargs["collate_fn"] = custom_data_collator
  251. eval_dataloader = torch.utils.data.DataLoader(
  252. dataset_val,
  253. num_workers=train_config.num_workers_dataloader,
  254. pin_memory=True,
  255. **val_dl_kwargs,
  256. )
  257. print(f"--> Num of Validation Set Batches loaded = {len(eval_dataloader)}")
  258. if len(eval_dataloader) == 0:
  259. raise ValueError("The eval set size is too small for dataloader to load even one batch. Please increase the size of eval set.")
  260. else:
  261. print(f"--> Num of Validation Set Batches loaded = {len(eval_dataloader)}")
  262. # Initialize the optimizer and learning rate scheduler
  263. if fsdp_config.pure_bf16 and fsdp_config.optimizer == "anyprecision":
  264. optimizer = AnyPrecisionAdamW(
  265. model.parameters(),
  266. lr=train_config.lr,
  267. momentum_dtype=torch.bfloat16,
  268. variance_dtype=torch.bfloat16,
  269. use_kahan_summation=False,
  270. weight_decay=train_config.weight_decay,
  271. )
  272. else:
  273. optimizer = optim.AdamW(
  274. model.parameters(),
  275. lr=train_config.lr,
  276. weight_decay=train_config.weight_decay,
  277. )
  278. scheduler = StepLR(optimizer, step_size=1, gamma=train_config.gamma)
  279. results = train(
  280. model,
  281. train_dataloader,
  282. eval_dataloader,
  283. tokenizer,
  284. optimizer,
  285. scheduler,
  286. train_config.gradient_accumulation_steps,
  287. train_config,
  288. fsdp_config if train_config.enable_fsdp else None,
  289. local_rank if train_config.enable_fsdp else None,
  290. rank if train_config.enable_fsdp else None,
  291. wandb_run,
  292. )
  293. if not train_config.enable_fsdp or rank==0:
  294. [print(f'Key: {k}, Value: {v}') for k, v in results.items()]
  295. if train_config.use_wandb:
  296. for k,v in results.items():
  297. wandb_run.summary[k] = v
  298. if __name__ == "__main__":
  299. fire.Fire(main)