main.py 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. import os
  2. from groq import Groq
  3. from langchain.chains import ConversationChain, LLMChain
  4. from langchain_core.prompts import (
  5. ChatPromptTemplate,
  6. HumanMessagePromptTemplate,
  7. MessagesPlaceholder,
  8. )
  9. from langchain_core.messages import SystemMessage
  10. from langchain.chains.conversation.memory import ConversationBufferWindowMemory
  11. from langchain_groq import ChatGroq
  12. from langchain.prompts import PromptTemplate
  13. def main():
  14. """
  15. This function is the main entry point of the application. It sets up the Groq client, the Streamlit interface, and handles the chat interaction.
  16. """
  17. # Get Groq API key
  18. groq_api_key = os.environ['GROQ_API_KEY']
  19. model = 'llama3-8b-8192'
  20. # Initialize Groq Langchain chat object and conversation
  21. groq_chat = ChatGroq(
  22. groq_api_key=groq_api_key,
  23. model_name=model
  24. )
  25. print("Hello! I'm your friendly Groq chatbot. I can help answer your questions, provide information, or just chat. I'm also super fast! Let's start our conversation!")
  26. system_prompt = 'You are a friendly conversational chatbot'
  27. conversational_memory_length = 5 # number of previous messages the chatbot will remember during the conversation
  28. memory = ConversationBufferWindowMemory(k=conversational_memory_length, memory_key="chat_history", return_messages=True)
  29. #chat_history = []
  30. while True:
  31. user_question = input("Ask a question: ")
  32. # If the user has asked a question,
  33. if user_question:
  34. # Construct a chat prompt template using various components
  35. prompt = ChatPromptTemplate.from_messages(
  36. [
  37. SystemMessage(
  38. content=system_prompt
  39. ), # This is the persistent system prompt that is always included at the start of the chat.
  40. MessagesPlaceholder(
  41. variable_name="chat_history"
  42. ), # This placeholder will be replaced by the actual chat history during the conversation. It helps in maintaining context.
  43. HumanMessagePromptTemplate.from_template(
  44. "{human_input}"
  45. ), # This template is where the user's current input will be injected into the prompt.
  46. ]
  47. )
  48. # Create a conversation chain using the LangChain LLM (Language Learning Model)
  49. conversation = LLMChain(
  50. llm=groq_chat, # The Groq LangChain chat object initialized earlier.
  51. prompt=prompt, # The constructed prompt template.
  52. verbose=False, # TRUE Enables verbose output, which can be useful for debugging.
  53. memory=memory, # The conversational memory object that stores and manages the conversation history.
  54. )
  55. # The chatbot's answer is generated by sending the full prompt to the Groq API.
  56. response = conversation.predict(human_input=user_question)
  57. print("Chatbot:", response)
  58. if __name__ == "__main__":
  59. main()