|
@@ -16,7 +16,11 @@
|
|
|
"source": [
|
|
|
"## PEFT Finetuning Quick Start Notebook\n",
|
|
|
"\n",
|
|
|
- "This notebook shows how to train a Meta Llama 3 model on a single GPU (e.g. A10 with 24GB) using int8 quantization and LoRA."
|
|
|
+ "This notebook shows how to train a Meta Llama 3 model on a single GPU (e.g. A10 with 24GB) using int8 quantization and LoRA finetuning.\n",
|
|
|
+ "\n",
|
|
|
+ "**_Note:_** To run this notebook on a machine with less than 24GB VRAM (e.g. T4 with 15GB) the context length of the training dataset needs to be adapted.\n",
|
|
|
+ "We do this based on the available VRAM during execution.\n",
|
|
|
+ "If you run into OOM issues try to further lower the value of train_config.context_length."
|
|
|
]
|
|
|
},
|
|
|
{
|
|
@@ -91,7 +95,7 @@
|
|
|
"train_config.lr = 3e-4\n",
|
|
|
"train_config.use_fast_kernels = True\n",
|
|
|
"train_config.use_fp16 = True\n",
|
|
|
- "train_config.context_length = 2048\n",
|
|
|
+ "train_config.context_length = 1024 if torch.cuda.get_device_properties(0).total_memory < 16e9 else 2048 # T4 15GB or A10 24GB\n",
|
|
|
"train_config.batching_strategy = \"packing\"\n",
|
|
|
"train_config.output_dir = \"meta-llama-samsum\"\n",
|
|
|
"\n",
|