Browse Source

fix output_dir

Maxime Labonne 1 năm trước cách đây
mục cha
commit
f0889f5fbd
1 tập tin đã thay đổi với 13 bổ sung2 xóa
  1. 13 2
      Fine_tune_Llama_2_in_Google_Colab.ipynb

+ 13 - 2
Fine_tune_Llama_2_in_Google_Colab.ipynb

@@ -6,7 +6,7 @@
       "provenance": [],
       "machine_shape": "hm",
       "gpuType": "V100",
-      "authorship_tag": "ABX9TyNKKJhDeFB7aXPizGqrvwhA",
+      "authorship_tag": "ABX9TyMgfvtuquE8AUCpv0te8LOT",
       "include_colab_link": true
     },
     "kernelspec": {
@@ -253,6 +253,15 @@
       "outputs": []
     },
     {
+      "cell_type": "markdown",
+      "source": [
+        "There is a problem with the VRAM here despite `del model` and emptying the VRAM. You probably need to restart the notebook, re-execute the three first cells, and then execute this one. Please contact me if you have a fix!"
+      ],
+      "metadata": {
+        "id": "6WjzALHtSfdb"
+      }
+    },
+    {
       "cell_type": "code",
       "source": [
         "from numba import cuda\n",
@@ -270,11 +279,13 @@
         "        torch_dtype=torch.float16,\n",
         "        device_map=device_map,\n",
         "    )\n",
-        "    model = PeftModel.from_pretrained(base_model, \"./adapter\", offload_folder=\"/content/sample_data\")\n",
+        "    model = PeftModel.from_pretrained(base_model, output_dir, offload_folder=\"/content/sample_data\")\n",
         "    model = model.merge_and_unload()\n",
         "\n",
         "# Save merged weights and tokenizer\n",
         "model.save_pretrained(new_model, use_safetensors=True)\n",
+        "tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)\n",
+        "tokenizer.pad_token = tokenizer.eos_token\n",
         "tokenizer.save_pretrained(new_model)"
       ],
       "metadata": {