gradio_lightrag_querying.py 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. import gradio as gr
  2. import os
  3. import logging
  4. from lightrag import LightRAG, QueryParam
  5. from lightrag.llm import ollama_model_complete, ollama_embedding
  6. from lightrag.utils import EmbeddingFunc
  7. import pdfplumber
  8. # Define constants and initialize
  9. WORKING_DIR = "./Legal_Documents"
  10. OUTPUTS_DIR = os.path.join(WORKING_DIR, "../outputs/")
  11. # PDF_PATH = "../Constituion-of-India.pdf"
  12. output_file = os.path.join(OUTPUTS_DIR, "output_queries_2.txt")
  13. if not os.path.exists(WORKING_DIR):
  14. os.mkdir(WORKING_DIR)
  15. if not os.path.exists(OUTPUTS_DIR):
  16. os.makedirs(OUTPUTS_DIR, exist_ok=True)
  17. logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
  18. # Initialize LightRAG
  19. rag = LightRAG(
  20. working_dir=WORKING_DIR,
  21. chunk_token_size=1200,
  22. llm_model_func=ollama_model_complete,
  23. llm_model_name="llama3.1:latest",
  24. llm_model_max_async=4,
  25. llm_model_max_token_size=32768,
  26. llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
  27. embedding_func=EmbeddingFunc(
  28. embedding_dim=768,
  29. max_token_size=8192,
  30. func=lambda texts: ollama_embedding(texts, embed_model="nomic-embed-text", host="http://localhost:11434"),
  31. ),
  32. )
  33. def query_rag(input_text, mode):
  34. try:
  35. result = rag.query(input_text, param=QueryParam(mode=mode))
  36. logs = f"Query executed successfully in mode '{mode}'"
  37. except Exception as e:
  38. # Catch exceptions and log errors
  39. result = "An error occurred during the query execution."
  40. logs = f"Error: {e}"
  41. return result, logs
  42. # Code snippet to display
  43. code_text = """
  44. LightRAG(
  45. working_dir=WORKING_DIR,
  46. chunk_token_size=1200,
  47. llm_model_func=ollama_model_complete,
  48. llm_model_name="llama3.1:latest",
  49. llm_model_max_async=4,
  50. llm_model_max_token_size=32768,
  51. llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
  52. embedding_func=EmbeddingFunc(
  53. embedding_dim=768,
  54. max_token_size=8192,
  55. func=lambda texts: ollama_embedding(texts, embed_model="nomic-embed-text", host="http://localhost:11434"),
  56. ),
  57. )
  58. """
  59. # Gradio layout
  60. # Define Gradio interface layout
  61. with gr.Blocks() as demo:
  62. gr.Markdown("<h1 style='color: white; text-align: center;'>LightRAG Gradio Demo</h1>")
  63. with gr.Row():
  64. with gr.Column(scale=1):
  65. query_input = gr.Textbox(label="Enter your query")
  66. mode_dropdown = gr.Dropdown(choices=["naive", "local", "global", "hybrid"], label="Select Query Mode")
  67. submit_button = gr.Button("Submit")
  68. with gr.Column(scale=2):
  69. result_output = gr.Textbox(label="LLM Response", lines=20, interactive=True)
  70. logs_output = gr.Textbox(label="Terminal Logs", lines=10, interactive=True)
  71. # Link button click to the query function
  72. submit_button.click(query_rag, inputs=[query_input, mode_dropdown], outputs=[result_output, logs_output])
  73. # Launch Gradio interface
  74. demo.launch()