Sfoglia il codice sorgente

Cleaned up notebook outputs containing references
to old repo name

Connor Treacy 3 mesi fa
parent
commit
b607b7645c

+ 2 - 47
3p-integrations/togetherai/knowledge_graphs_with_structured_outputs.ipynb

@@ -30,7 +30,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -38,52 +38,7 @@
     "id": "DFAjay1FZVrn",
     "outputId": "d4b17b31-c125-4de5-ad54-6d4d08d81eaa"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Requirement already satisfied: together in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (1.3.3)\n",
-      "Requirement already satisfied: aiohttp<4.0.0,>=3.9.3 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (3.10.10)\n",
-      "Requirement already satisfied: click<9.0.0,>=8.1.7 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (8.1.7)\n",
-      "Requirement already satisfied: eval-type-backport<0.3.0,>=0.1.3 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (0.2.0)\n",
-      "Requirement already satisfied: filelock<4.0.0,>=3.13.1 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (3.16.1)\n",
-      "Requirement already satisfied: numpy>=1.23.5 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (1.26.4)\n",
-      "Requirement already satisfied: pillow<11.0.0,>=10.3.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (10.4.0)\n",
-      "Requirement already satisfied: pyarrow>=10.0.1 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (18.0.0)\n",
-      "Requirement already satisfied: pydantic<3.0.0,>=2.6.3 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (2.9.2)\n",
-      "Requirement already satisfied: requests<3.0.0,>=2.31.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (2.32.3)\n",
-      "Requirement already satisfied: rich<14.0.0,>=13.8.1 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (13.9.3)\n",
-      "Requirement already satisfied: tabulate<0.10.0,>=0.9.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (0.9.0)\n",
-      "Requirement already satisfied: tqdm<5.0.0,>=4.66.2 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (4.66.6)\n",
-      "Requirement already satisfied: typer<0.13,>=0.9 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from together) (0.12.5)\n",
-      "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.9.3->together) (2.4.3)\n",
-      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.9.3->together) (1.3.1)\n",
-      "Requirement already satisfied: attrs>=17.3.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.9.3->together) (24.2.0)\n",
-      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.9.3->together) (1.5.0)\n",
-      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.9.3->together) (6.1.0)\n",
-      "Requirement already satisfied: yarl<2.0,>=1.12.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.9.3->together) (1.17.1)\n",
-      "Requirement already satisfied: async-timeout<5.0,>=4.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.9.3->together) (4.0.3)\n",
-      "Requirement already satisfied: annotated-types>=0.6.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from pydantic<3.0.0,>=2.6.3->together) (0.7.0)\n",
-      "Requirement already satisfied: pydantic-core==2.23.4 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from pydantic<3.0.0,>=2.6.3->together) (2.23.4)\n",
-      "Requirement already satisfied: typing-extensions>=4.6.1 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from pydantic<3.0.0,>=2.6.3->together) (4.12.2)\n",
-      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from requests<3.0.0,>=2.31.0->together) (3.4.0)\n",
-      "Requirement already satisfied: idna<4,>=2.5 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from requests<3.0.0,>=2.31.0->together) (3.10)\n",
-      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from requests<3.0.0,>=2.31.0->together) (2.2.3)\n",
-      "Requirement already satisfied: certifi>=2017.4.17 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from requests<3.0.0,>=2.31.0->together) (2024.8.30)\n",
-      "Requirement already satisfied: markdown-it-py>=2.2.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from rich<14.0.0,>=13.8.1->together) (3.0.0)\n",
-      "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from rich<14.0.0,>=13.8.1->together) (2.18.0)\n",
-      "Requirement already satisfied: shellingham>=1.3.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from typer<0.13,>=0.9->together) (1.5.4)\n",
-      "Requirement already satisfied: mdurl~=0.1 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from markdown-it-py>=2.2.0->rich<14.0.0,>=13.8.1->together) (0.1.2)\n",
-      "Requirement already satisfied: propcache>=0.2.0 in /Users/jeffxtang/anaconda3/envs/llama-recipes/lib/python3.10/site-packages (from yarl<2.0,>=1.12.0->aiohttp<4.0.0,>=3.9.3->together) (0.2.0)\n",
-      "Collecting graphviz\n",
-      "  Using cached graphviz-0.20.3-py3-none-any.whl.metadata (12 kB)\n",
-      "Using cached graphviz-0.20.3-py3-none-any.whl (47 kB)\n",
-      "Installing collected packages: graphviz\n",
-      "Successfully installed graphviz-0.20.3\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "!pip install together\n",
     "!pip install graphviz"

+ 4 - 50
getting-started/finetuning/quickstart_peft_finetuning.ipynb

@@ -217,19 +217,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/home/ubuntu/llama-recipes/src/llama_recipes/model_checkpointing/checkpoint_handler.py:17: DeprecationWarning: `torch.distributed._shard.checkpoint` will be deprecated, use `torch.distributed.checkpoint` instead\n",
-      "  from torch.distributed._shard.checkpoint import (\n",
-      "Preprocessing dataset: 100%|██████████| 14732/14732 [00:02<00:00, 5872.02it/s]\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "from llama_recipes.configs.datasets import samsum_dataset\n",
     "from llama_recipes.utils.dataset_utils import get_dataloader\n",
@@ -283,45 +273,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/home/ubuntu/llama-recipes/src/llama_recipes/utils/train_utils.py:92: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
-      "  scaler = torch.cuda.amp.GradScaler()\n",
-      "/home/ubuntu/miniconda3/envs/llama/lib/python3.11/site-packages/torch/cuda/memory.py:343: FutureWarning: torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, which resets /all/ peak memory stats.\n",
-      "  warnings.warn(\n",
-      "Training Epoch: 1:   0%|\u001b[34m          \u001b[0m| 0/319 [00:00<?, ?it/s]huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
-      "To disable this warning, you can either:\n",
-      "\t- Avoid using `tokenizers` before the fork if possible\n",
-      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
-      "/home/ubuntu/llama-recipes/src/llama_recipes/utils/train_utils.py:151: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
-      "  with autocast():\n",
-      "/home/ubuntu/miniconda3/envs/llama/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py:600: UserWarning: torch.utils.checkpoint: the use_reentrant parameter should be passed explicitly. In version 2.4 we will raise an exception if use_reentrant is not passed. use_reentrant=False is recommended, but if you need to preserve the current default behavior, you can pass use_reentrant=True. Refer to docs for more details on the differences between the two variants.\n",
-      "  return fn(*args, **kwargs)\n",
-      "/home/ubuntu/miniconda3/envs/llama/lib/python3.11/site-packages/bitsandbytes/autograd/_functions.py:316: UserWarning: MatMul8bitLt: inputs will be cast from torch.float32 to float16 during quantization\n",
-      "  warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n",
-      "/home/ubuntu/miniconda3/envs/llama/lib/python3.11/site-packages/torch/utils/checkpoint.py:295: FutureWarning: `torch.cpu.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cpu', args...)` instead.\n",
-      "  with torch.enable_grad(), device_autocast_ctx, torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs):  # type: ignore[attr-defined]\n",
-      "Training Epoch: 1/1, step 1278/1279 completed (loss: 0.28094857931137085): : 320it [2:08:50, 24.16s/it]                      4.21s/it]  \n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Max CUDA memory allocated was 15 GB\n",
-      "Max CUDA memory reserved was 16 GB\n",
-      "Peak active CUDA memory was 15 GB\n",
-      "CUDA Malloc retries : 0\n",
-      "CPU Total Peak Memory consumed during the train (max): 2 GB\n",
-      "Epoch 1: train_perplexity=1.3404, train_epoch_loss=0.2930, epoch time 7730.981359725998s\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "import torch.optim as optim\n",
     "from llama_recipes.utils.train_utils import train\n",