123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081 |
- # Copyright (c) Meta Platforms, Inc. and affiliates.
- # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
- import importlib
- from functools import partial
- from pathlib import Path
- import torch
- from llama_recipes.datasets import (
- get_grammar_dataset,
- get_alpaca_dataset,
- get_samsum_dataset,
- get_llamaguard_toxicchat_dataset,
- )
- def load_module_from_py_file(py_file: str) -> object:
- """
- This method loads a module from a py file which is not in the Python path
- """
- module_name = Path(py_file).name
- loader = importlib.machinery.SourceFileLoader(module_name, py_file)
- spec = importlib.util.spec_from_loader(module_name, loader)
- module = importlib.util.module_from_spec(spec)
- loader.exec_module(module)
- return module
- def get_custom_dataset(dataset_config, tokenizer, split: str):
- if ":" in dataset_config.file:
- module_path, func_name = dataset_config.file.split(":")
- else:
- module_path, func_name = dataset_config.file, "get_custom_dataset"
- if not module_path.endswith(".py"):
- raise ValueError(f"Dataset file {module_path} is not a .py file.")
- module_path = Path(module_path)
- if not module_path.is_file():
- raise FileNotFoundError(f"Dataset py file {module_path.as_posix()} does not exist or is not a file.")
- module = load_module_from_py_file(module_path.as_posix())
- try:
- return getattr(module, func_name)(dataset_config, tokenizer, split)
- except AttributeError as e:
- print(f"It seems like the given method name ({func_name}) is not present in the dataset .py file ({module_path.as_posix()}).")
- raise e
- DATASET_PREPROC = {
- "alpaca_dataset": partial(get_alpaca_dataset),
- "grammar_dataset": get_grammar_dataset,
- "samsum_dataset": get_samsum_dataset,
- "custom_dataset": get_custom_dataset,
- "llamaguard_toxicchat_dataset": get_llamaguard_toxicchat_dataset,
- }
- def get_preprocessed_dataset(
- tokenizer, dataset_config, split: str = "train"
- ) -> torch.utils.data.Dataset:
- if not dataset_config.dataset in DATASET_PREPROC:
- raise NotImplementedError(f"{dataset_config.dataset} is not (yet) implemented")
- def get_split():
- return (
- dataset_config.train_split
- if split == "train"
- else dataset_config.test_split
- )
- return DATASET_PREPROC[dataset_config.dataset](
- dataset_config,
- tokenizer,
- get_split(),
- )
|