瀏覽代碼

fix more imports

Sanyam Bhutani 3 月之前
父節點
當前提交
0ca612fff3

+ 8 - 8
src/llama_cookbook/finetuning.py

@@ -13,29 +13,29 @@ import torch
 import torch.optim as optim
 from accelerate.utils import is_xpu_available
 
-from llama_recipes.configs import (
+from llama_cookbook.configs import (
     fsdp_config as FSDP_CONFIG,
     quantization_config as QUANTIZATION_CONFIG,
     train_config as TRAIN_CONFIG,
 )
-from llama_recipes.data.concatenator import ConcatDataset
-from llama_recipes.policies import AnyPrecisionAdamW, apply_fsdp_checkpointing
+from llama_cookbook.data.concatenator import ConcatDataset
+from llama_cookbook.policies import AnyPrecisionAdamW, apply_fsdp_checkpointing
 
-from llama_recipes.utils import fsdp_auto_wrap_policy
-from llama_recipes.utils.config_utils import (
+from llama_cookbook.utils import fsdp_auto_wrap_policy
+from llama_cookbook.utils.config_utils import (
     check_fsdp_config,
     generate_dataset_config,
     generate_peft_config,
     get_dataloader_kwargs,
     update_config,
 )
-from llama_recipes.utils.dataset_utils import (
+from llama_cookbook.utils.dataset_utils import (
     get_custom_data_collator,
     get_preprocessed_dataset,
 )
 
-from llama_recipes.utils.fsdp_utils import hsdp_device_mesh
-from llama_recipes.utils.train_utils import (
+from llama_cookbook.utils.fsdp_utils import hsdp_device_mesh
+from llama_cookbook.utils.train_utils import (
     clear_gpu_cache,
     freeze_transformer_layers,
     freeze_LLM_only,

+ 4 - 4
src/llama_cookbook/utils/__init__.py

@@ -1,7 +1,7 @@
 # Copyright (c) Meta Platforms, Inc. and affiliates.
 # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
 
-from llama_recipes.utils.memory_utils import MemoryTrace
-from llama_recipes.utils.dataset_utils import *
-from llama_recipes.utils.fsdp_utils import fsdp_auto_wrap_policy, hsdp_device_mesh
-from llama_recipes.utils.train_utils import *
+from llama_cookbook.utils.memory_utils import MemoryTrace
+from llama_cookbook.utils.dataset_utils import *
+from llama_cookbook.utils.fsdp_utils import fsdp_auto_wrap_policy, hsdp_device_mesh
+from llama_cookbook.utils.train_utils import *

+ 3 - 3
src/llama_cookbook/utils/config_utils.py

@@ -15,9 +15,9 @@ from peft import (
 from transformers import default_data_collator
 from transformers.data import DataCollatorForSeq2Seq
 
-from llama_recipes.configs import datasets, lora_config, llama_adapter_config, prefix_config, train_config
-from llama_recipes.data.sampler import LengthBasedBatchSampler, DistributedLengthBasedBatchSampler
-from llama_recipes.datasets import DATASET_PREPROC
+from llama_cookbook.configs import datasets, lora_config, llama_adapter_config, prefix_config, train_config
+from llama_cookbook.data.sampler import LengthBasedBatchSampler, DistributedLengthBasedBatchSampler
+from llama_cookbook.datasets import DATASET_PREPROC
 
 def update_config(config, **kwargs):
     if isinstance(config, (tuple, list)):

+ 3 - 3
src/llama_cookbook/utils/dataset_utils.py

@@ -3,9 +3,9 @@
 
 import torch
 
-from llama_recipes.data.concatenator import ConcatDataset
-from llama_recipes.datasets import DATASET_PREPROC, DATALOADER_COLLATE_FUNC
-from llama_recipes.utils.config_utils import get_dataloader_kwargs
+from llama_cookbook.data.concatenator import ConcatDataset
+from llama_cookbook.datasets import DATASET_PREPROC, DATALOADER_COLLATE_FUNC
+from llama_cookbook.utils.config_utils import get_dataloader_kwargs
 
 
 def get_preprocessed_dataset(

+ 4 - 4
src/llama_cookbook/utils/train_utils.py

@@ -20,11 +20,11 @@ from transformers import LlamaTokenizer
 import json
 
 
-from llama_recipes.model_checkpointing import save_fsdp_model_checkpoint_full, save_model_and_optimizer_sharded, save_optimizer_checkpoint, save_peft_checkpoint, save_model_checkpoint
-from llama_recipes.policies import fpSixteen,bfSixteen, get_llama_wrapper
-from llama_recipes.utils.memory_utils import MemoryTrace
+from llama_cookbook.model_checkpointing import save_fsdp_model_checkpoint_full, save_model_and_optimizer_sharded, save_optimizer_checkpoint, save_peft_checkpoint, save_model_checkpoint
+from llama_cookbook.policies import fpSixteen,bfSixteen, get_llama_wrapper
+from llama_cookbook.utils.memory_utils import MemoryTrace
 from accelerate.utils import is_xpu_available, is_ccl_available
-from llama_recipes.utils.flop_utils import FlopMeasure
+from llama_cookbook.utils.flop_utils import FlopMeasure
 def set_tokenizer_params(tokenizer: LlamaTokenizer):
     tokenizer.pad_token_id = 0
     tokenizer.padding_side = "left"