raft.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. import logging
  2. import os
  3. import argparse
  4. from raft_utils import generate_questions, add_chunk_to_dataset
  5. from format import DatasetConverter, datasetFormats, outputDatasetTypes
  6. from config import load_config
  7. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
  8. def main(api_config):
  9. ds = None
  10. try:
  11. logging.info("Starting to generate question pair.")
  12. # Generate questions as list for each chunk
  13. chunk_questions_zip = generate_questions(api_config)
  14. if not chunk_questions_zip:
  15. logging.warning("No questions generated from text. Please check the api_config or model configuration.")
  16. return
  17. logging.info(f"Successfully generated {sum([len(q) for c,q in chunk_questions_zip])} question/answer pairs.")
  18. ds = add_chunk_to_dataset(chunk_questions_zip,api_config)
  19. ds.save_to_disk(args.output)
  20. logging.info(f"Data successfully written to {api_config['output']}. Process completed.")
  21. formatter = DatasetConverter()
  22. # Extract format specific params
  23. format_params = {}
  24. formatter.convert(ds=ds, format=args.output_format, output_path=args.output+"raft", output_type=args.output_type, params=format_params)
  25. except Exception as e:
  26. logging.error(f"An unexpected error occurred during the process: {e}",exc_info=True)
  27. def parse_arguments():
  28. # Define command line arguments for the script
  29. parser = argparse.ArgumentParser(
  30. description="Generate RAFT question/answer/context pairs from documentation."
  31. )
  32. parser.add_argument(
  33. "-t", "--questions_per_chunk",
  34. type=int,
  35. default=4,
  36. help="Specify the number of question pairs to generate per chunk."
  37. )
  38. parser.add_argument(
  39. "-m", "--model",
  40. default="meta-llama/Meta-Llama-3-70B-Instruct",
  41. help="Select the model to use for generation."
  42. )
  43. parser.add_argument(
  44. "-c", "--config_path",
  45. default="./raft.yaml",
  46. help="Set the configuration file path that has system prompt along with language, dataset path and number of questions."
  47. )
  48. parser.add_argument(
  49. "-u", "--endpoint_url",
  50. default="http://localhost:8001/v1",
  51. type=str,
  52. help="LLM API url for generating question/answer pairs."
  53. )
  54. parser.add_argument(
  55. "-k", "--api_key",
  56. default="EMPTY",
  57. type=str,
  58. help="LLM API key for generating question/answer pairs."
  59. )
  60. parser.add_argument("--chunk_size", type=int, default=1000, help="The size of each chunk in number of tokens")
  61. parser.add_argument("-o","--output", type=str, default="./output/", help="The path at which to save the dataset")
  62. parser.add_argument("--output-format", type=str, default="hf", help="Format to convert the dataset to. Defaults to hf.", choices=datasetFormats)
  63. parser.add_argument("--output-type", type=str, default="jsonl", help="Type to export the dataset to. Defaults to jsonl.", choices=outputDatasetTypes)
  64. return parser.parse_args()
  65. if __name__ == "__main__":
  66. logging.info("Initializing the process and loading configuration...")
  67. args = parse_arguments()
  68. api_config = load_config(args.config_path)
  69. api_config["questions_per_chunk"] = args.questions_per_chunk
  70. api_config["model"] = args.model
  71. api_config["chunk_size"] = args.chunk_size
  72. api_config["endpoint_url"] = args.endpoint_url
  73. api_config["output"] = args.output
  74. api_config["api_key"] = args.api_key
  75. # if OPENAI_API_KEY is defined in the system environment, use it as the API key
  76. if os.environ.get('API_KEY') is not None:
  77. api_config["api_key"] = os.environ["API_KEY"]
  78. logging.info(f"Configuration loaded. Generating {args.questions_per_chunk} question per chunk using model '{args.model}'.")
  79. logging.info(f"Chunk size: {args.chunk_size}.")
  80. logging.info(f"num_distract_docs: {api_config['num_distract_docs']}, refusal_probability: {api_config['refusal_probability']}")
  81. logging.info(f"Will use endpoint_url: {args.endpoint_url}.")
  82. logging.info(f"Output will be written to {args.output}.")
  83. main(api_config)