Igor Kasianenko преди 3 месеца
родител
ревизия
7f5feae63b
променени са 3 файла, в които са добавени 5 реда и са изтрити 5 реда
  1. 1 1
      src/llama_recipes/configs/datasets.py
  2. 3 3
      src/tests/datasets/test_custom_dataset.py
  3. 1 1
      src/tests/test_chat_completion.py

+ 1 - 1
src/llama_recipes/configs/datasets.py

@@ -28,7 +28,7 @@ class alpaca_dataset:
 @dataclass
 class custom_dataset:
     dataset: str = "custom_dataset"
-    file: str = "src/llama_cookbook/datasets/custom_dataset.py"
+    file: str = "getting-started/finetuning/datasets/custom_dataset.py"
     train_split: str = "train"
     test_split: str = "validation"
     data_path: str = ""

+ 3 - 3
src/tests/datasets/test_custom_dataset.py

@@ -52,7 +52,7 @@ def test_custom_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker,
     kwargs = {
         "dataset": "custom_dataset",
         "model_name": llama_version,
-        "custom_dataset.file": "recipes/quickstart/finetuning/datasets/custom_dataset.py",
+        "custom_dataset.file": "getting-started/finetuning/datasets/custom_dataset.py",
         "custom_dataset.train_split": "validation",
         "batch_size_training": 2,
         "val_batch_size": 4,
@@ -111,7 +111,7 @@ def test_unknown_dataset_error(step_lr, optimizer, tokenizer, get_model, get_con
 
     kwargs = {
         "dataset": "custom_dataset",
-        "custom_dataset.file": "recipes/quickstart/finetuning/datasets/custom_dataset.py:get_unknown_dataset",
+        "custom_dataset.file": "getting-started/finetuning/datasets/custom_dataset.py:get_unknown_dataset",
         "batch_size_training": 1,
         "use_peft": False,
         }
@@ -121,7 +121,7 @@ def test_unknown_dataset_error(step_lr, optimizer, tokenizer, get_model, get_con
 @pytest.mark.skip_missing_tokenizer
 @patch('llama_recipes.finetuning.AutoTokenizer')
 def test_tokenize_dialog(tokenizer, monkeypatch, setup_tokenizer, llama_version):
-    monkeypatch.syspath_prepend("recipes/quickstart/finetuning/datasets/")
+    monkeypatch.syspath_prepend("getting-started/finetuning/datasets/")
     from custom_dataset import tokenize_dialog
 
     setup_tokenizer(tokenizer)

+ 1 - 1
src/tests/test_chat_completion.py

@@ -8,7 +8,7 @@ import torch
 from llama_recipes.inference.chat_utils import read_dialogs_from_file
 
 ROOT_DIR = Path(__file__).parents[2]
-CHAT_COMPLETION_DIR = ROOT_DIR / "recipes/quickstart/inference/local_inference/chat_completion/"
+CHAT_COMPLETION_DIR = ROOT_DIR / "getting-started/inference/local_inference/chat_completion/"
 
 sys.path = [CHAT_COMPLETION_DIR.as_posix()] + sys.path