Matthias Reso 2 meses atrás
pai
commit
0fc58c607e

+ 26 - 26
src/tests/test_batching.py

@@ -31,15 +31,15 @@ fake_samsum_dataset = 2048*[{'id': '420',
  'summary': 'Mario and Luigi are going to save the princess.'}]
 
 @pytest.mark.skip_missing_tokenizer
-@patch('llama_recipes.finetuning.train')
-@patch('llama_recipes.finetuning.AutoTokenizer')
-@patch("llama_recipes.finetuning.AutoConfig.from_pretrained")
-@patch("llama_recipes.finetuning.AutoProcessor")
-@patch("llama_recipes.finetuning.MllamaForConditionalGeneration.from_pretrained")
-@patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
-@patch('llama_recipes.finetuning.optim.AdamW')
-@patch('llama_recipes.finetuning.StepLR')
-@patch('llama_recipes.datasets.samsum_dataset.datasets')
+@patch('llama_cookbook.finetuning.train')
+@patch('llama_cookbook.finetuning.AutoTokenizer')
+@patch("llama_cookbook.finetuning.AutoConfig.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoProcessor")
+@patch("llama_cookbook.finetuning.MllamaForConditionalGeneration.from_pretrained")
+@patch('llama_cookbook.finetuning.LlamaForCausalLM.from_pretrained')
+@patch('llama_cookbook.finetuning.optim.AdamW')
+@patch('llama_cookbook.finetuning.StepLR')
+@patch('llama_cookbook.datasets.samsum_dataset.datasets')
 def test_packing(
     datasets,
     step_lr,
@@ -55,7 +55,7 @@ def test_packing(
     llama_version,
     model_type,
     ):
-    from llama_recipes.finetuning import main
+    from llama_cookbook.finetuning import main
 
     setup_tokenizer(tokenizer)
     setup_processor(processor)
@@ -101,21 +101,21 @@ def test_packing(
 
 
 @pytest.mark.skip_missing_tokenizer
-@patch("llama_recipes.utils.train_utils.torch.cuda.is_bf16_supported")
-@patch("llama_recipes.finetuning.torch.cuda.is_available")
-@patch('llama_recipes.finetuning.train')
-@patch('llama_recipes.finetuning.AutoTokenizer')
-@patch("llama_recipes.finetuning.AutoConfig.from_pretrained")
-@patch("llama_recipes.finetuning.AutoProcessor")
-@patch("llama_recipes.finetuning.MllamaForConditionalGeneration.from_pretrained")
-@patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
-@patch('llama_recipes.finetuning.optim.AdamW')
-@patch('llama_recipes.finetuning.StepLR')
-@patch('llama_recipes.finetuning.setup')
-@patch('llama_recipes.finetuning.FSDP')
-@patch('llama_recipes.finetuning.torch.distributed.is_initialized')
-@patch('llama_recipes.utils.config_utils.dist')
-@patch('llama_recipes.datasets.samsum_dataset.datasets')
+@patch("llama_cookbook.utils.train_utils.torch.cuda.is_bf16_supported")
+@patch("llama_cookbook.finetuning.torch.cuda.is_available")
+@patch('llama_cookbook.finetuning.train')
+@patch('llama_cookbook.finetuning.AutoTokenizer')
+@patch("llama_cookbook.finetuning.AutoConfig.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoProcessor")
+@patch("llama_cookbook.finetuning.MllamaForConditionalGeneration.from_pretrained")
+@patch('llama_cookbook.finetuning.LlamaForCausalLM.from_pretrained')
+@patch('llama_cookbook.finetuning.optim.AdamW')
+@patch('llama_cookbook.finetuning.StepLR')
+@patch('llama_cookbook.finetuning.setup')
+@patch('llama_cookbook.finetuning.FSDP')
+@patch('llama_cookbook.finetuning.torch.distributed.is_initialized')
+@patch('llama_cookbook.utils.config_utils.dist')
+@patch('llama_cookbook.datasets.samsum_dataset.datasets')
 def test_distributed_packing(
     datasets,
     dist,
@@ -138,7 +138,7 @@ def test_distributed_packing(
     model_type,
     ):
     import os
-    from llama_recipes.finetuning import main
+    from llama_cookbook.finetuning import main
 
     setup_tokenizer(tokenizer)
     setup_processor(processor)

+ 1 - 1
src/tests/test_chat_completion.py

@@ -5,7 +5,7 @@ from unittest.mock import patch
 
 import pytest
 import torch
-from llama_recipes.inference.chat_utils import read_dialogs_from_file
+from llama_cookbook.inference.chat_utils import read_dialogs_from_file
 
 ROOT_DIR = Path(__file__).parents[2]
 CHAT_COMPLETION_DIR = ROOT_DIR / "getting-started/inference/local_inference/chat_completion/"

+ 41 - 41
src/tests/test_finetuning.py

@@ -9,9 +9,9 @@ from unittest.mock import patch
 import pytest
 
 import torch
-from llama_recipes.data.sampler import LengthBasedBatchSampler
+from llama_cookbook.data.sampler import LengthBasedBatchSampler
 
-from llama_recipes.finetuning import main
+from llama_cookbook.finetuning import main
 from pytest import approx
 from torch.optim import AdamW
 from torch.utils.data.dataloader import DataLoader
@@ -32,18 +32,18 @@ def get_fake_dataset():
     ]
 
 
-@patch("llama_recipes.finetuning.torch.cuda.is_available")
-@patch("llama_recipes.finetuning.train")
-@patch("llama_recipes.finetuning.MllamaForConditionalGeneration.from_pretrained")
-@patch("llama_recipes.finetuning.AutoProcessor.from_pretrained")
-@patch("llama_recipes.finetuning.LlamaForCausalLM.from_pretrained")
-@patch("llama_recipes.finetuning.AutoConfig.from_pretrained")
-@patch("llama_recipes.finetuning.AutoTokenizer.from_pretrained")
-@patch("llama_recipes.finetuning.get_preprocessed_dataset")
-@patch("llama_recipes.finetuning.generate_peft_config")
-@patch("llama_recipes.finetuning.get_peft_model")
-@patch("llama_recipes.finetuning.optim.AdamW")
-@patch("llama_recipes.finetuning.StepLR")
+@patch("llama_cookbook.finetuning.torch.cuda.is_available")
+@patch("llama_cookbook.finetuning.train")
+@patch("llama_cookbook.finetuning.MllamaForConditionalGeneration.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoProcessor.from_pretrained")
+@patch("llama_cookbook.finetuning.LlamaForCausalLM.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoConfig.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoTokenizer.from_pretrained")
+@patch("llama_cookbook.finetuning.get_preprocessed_dataset")
+@patch("llama_cookbook.finetuning.generate_peft_config")
+@patch("llama_cookbook.finetuning.get_peft_model")
+@patch("llama_cookbook.finetuning.optim.AdamW")
+@patch("llama_cookbook.finetuning.StepLR")
 @pytest.mark.parametrize("cuda_is_available", [True, False])
 @pytest.mark.parametrize("run_validation", [True, False])
 @pytest.mark.parametrize("use_peft", [True, False])
@@ -107,15 +107,15 @@ def test_finetuning(
         assert model.return_value.to.call_count == 0
 
 
-@patch("llama_recipes.finetuning.get_peft_model")
-@patch("llama_recipes.finetuning.setup")
-@patch("llama_recipes.finetuning.train")
-@patch("llama_recipes.finetuning.MllamaForConditionalGeneration.from_pretrained")
-@patch("llama_recipes.finetuning.AutoProcessor.from_pretrained")
-@patch("llama_recipes.finetuning.LlamaForCausalLM.from_pretrained")
-@patch("llama_recipes.finetuning.AutoConfig.from_pretrained")
-@patch("llama_recipes.finetuning.AutoTokenizer.from_pretrained")
-@patch("llama_recipes.finetuning.get_preprocessed_dataset")
+@patch("llama_cookbook.finetuning.get_peft_model")
+@patch("llama_cookbook.finetuning.setup")
+@patch("llama_cookbook.finetuning.train")
+@patch("llama_cookbook.finetuning.MllamaForConditionalGeneration.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoProcessor.from_pretrained")
+@patch("llama_cookbook.finetuning.LlamaForCausalLM.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoConfig.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoTokenizer.from_pretrained")
+@patch("llama_cookbook.finetuning.get_preprocessed_dataset")
 def test_finetuning_peft_llama_adapter(
     get_dataset,
     tokenizer,
@@ -165,15 +165,15 @@ def test_finetuning_peft_llama_adapter(
         main(**kwargs)
 
 
-@patch("llama_recipes.finetuning.train")
-@patch("llama_recipes.finetuning.MllamaForConditionalGeneration.from_pretrained")
-@patch("llama_recipes.finetuning.AutoProcessor.from_pretrained")
-@patch("llama_recipes.finetuning.LlamaForCausalLM.from_pretrained")
-@patch("llama_recipes.finetuning.AutoConfig.from_pretrained")
-@patch("llama_recipes.finetuning.AutoTokenizer.from_pretrained")
-@patch("llama_recipes.finetuning.get_preprocessed_dataset")
-@patch("llama_recipes.finetuning.get_peft_model")
-@patch("llama_recipes.finetuning.StepLR")
+@patch("llama_cookbook.finetuning.train")
+@patch("llama_cookbook.finetuning.MllamaForConditionalGeneration.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoProcessor.from_pretrained")
+@patch("llama_cookbook.finetuning.LlamaForCausalLM.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoConfig.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoTokenizer.from_pretrained")
+@patch("llama_cookbook.finetuning.get_preprocessed_dataset")
+@patch("llama_cookbook.finetuning.get_peft_model")
+@patch("llama_cookbook.finetuning.StepLR")
 def test_finetuning_weight_decay(
     step_lr,
     get_peft_model,
@@ -210,15 +210,15 @@ def test_finetuning_weight_decay(
     assert optimizer.state_dict()["param_groups"][0]["weight_decay"] == approx(0.01)
 
 
-@patch("llama_recipes.finetuning.train")
-@patch("llama_recipes.finetuning.MllamaForConditionalGeneration.from_pretrained")
-@patch("llama_recipes.finetuning.AutoProcessor.from_pretrained")
-@patch("llama_recipes.finetuning.LlamaForCausalLM.from_pretrained")
-@patch("llama_recipes.finetuning.AutoConfig.from_pretrained")
-@patch("llama_recipes.finetuning.AutoTokenizer.from_pretrained")
-@patch("llama_recipes.finetuning.get_preprocessed_dataset")
-@patch("llama_recipes.finetuning.optim.AdamW")
-@patch("llama_recipes.finetuning.StepLR")
+@patch("llama_cookbook.finetuning.train")
+@patch("llama_cookbook.finetuning.MllamaForConditionalGeneration.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoProcessor.from_pretrained")
+@patch("llama_cookbook.finetuning.LlamaForCausalLM.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoConfig.from_pretrained")
+@patch("llama_cookbook.finetuning.AutoTokenizer.from_pretrained")
+@patch("llama_cookbook.finetuning.get_preprocessed_dataset")
+@patch("llama_cookbook.finetuning.optim.AdamW")
+@patch("llama_cookbook.finetuning.StepLR")
 def test_batching_strategy(
     step_lr,
     optimizer,

+ 1 - 1
src/tests/test_finetuning_data_formatter.py

@@ -5,7 +5,7 @@ from enum import Enum
 import unittest
 from typing import Optional, List
 
-from llama_recipes.data.llama_guard.finetuning_data_formatter import (
+from llama_cookbook.data.llama_guard.finetuning_data_formatter import (
     AugmentationConfigs,
     Category,
     create_formatted_finetuning_examples,

+ 3 - 3
src/tests/test_sampler.py

@@ -6,8 +6,8 @@ import pytest
 
 import torch
 
-from llama_recipes.data.sampler import LengthBasedBatchSampler
-from llama_recipes.data.sampler import DistributedLengthBasedBatchSampler
+from llama_cookbook.data.sampler import LengthBasedBatchSampler
+from llama_cookbook.data.sampler import DistributedLengthBasedBatchSampler
 
 SAMPLES = 33
 
@@ -83,4 +83,4 @@ def test_dist_batch_sampling(dataset, batch_size):
     
     assert ids_1.isdisjoint(ids_2)
     assert len(ids_1)+len(ids_2) > 0
-    assert len(ids_1)+len(ids_2) == len(dataset) // batch_size  *  batch_size 
+    assert len(ids_1)+len(ids_2) == len(dataset) // batch_size  *  batch_size 

+ 5 - 5
src/tests/test_train_utils.py

@@ -9,7 +9,7 @@ import torch
 import os
 import shutil
 
-from llama_recipes.utils.train_utils import train
+from llama_cookbook.utils.train_utils import train
 
 TEMP_OUTPUT_DIR = os.getcwd() + "/tmp"
 
@@ -23,10 +23,10 @@ def temp_output_dir():
     shutil.rmtree(temp_output_dir)
 
 
-@patch("llama_recipes.utils.train_utils.MemoryTrace")
-@patch("llama_recipes.utils.train_utils.nullcontext")
-@patch("llama_recipes.utils.train_utils.torch.cuda.amp.GradScaler")
-@patch("llama_recipes.utils.train_utils.torch.cuda.amp.autocast")
+@patch("llama_cookbook.utils.train_utils.MemoryTrace")
+@patch("llama_cookbook.utils.train_utils.nullcontext")
+@patch("llama_cookbook.utils.train_utils.torch.cuda.amp.GradScaler")
+@patch("llama_cookbook.utils.train_utils.torch.cuda.amp.autocast")
 def test_gradient_accumulation(
     autocast,
     scaler,