| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374 | 
							- import os
 
- from groq import Groq
 
- from langchain.chains import ConversationChain, LLMChain
 
- from langchain_core.prompts import (
 
-     ChatPromptTemplate,
 
-     HumanMessagePromptTemplate,
 
-     MessagesPlaceholder,
 
- )
 
- from langchain_core.messages import SystemMessage
 
- from langchain.chains.conversation.memory import ConversationBufferWindowMemory
 
- from langchain_groq import ChatGroq
 
- from langchain.prompts import PromptTemplate
 
- def main():
 
-     """
 
-     This function is the main entry point of the application. It sets up the Groq client, the Streamlit interface, and handles the chat interaction.
 
-     """
 
-     # Get Groq API key
 
-     groq_api_key = os.environ['GROQ_API_KEY']
 
-     model = 'llama3-8b-8192'
 
-     # Initialize Groq Langchain chat object and conversation
 
-     groq_chat = ChatGroq(
 
-             groq_api_key=groq_api_key, 
 
-             model_name=model
 
-     )
 
-     
 
-     print("Hello! I'm your friendly Groq chatbot. I can help answer your questions, provide information, or just chat. I'm also super fast! Let's start our conversation!")
 
-     system_prompt = 'You are a friendly conversational chatbot'
 
-     conversational_memory_length = 5 # number of previous messages the chatbot will remember during the conversation
 
-     memory = ConversationBufferWindowMemory(k=conversational_memory_length, memory_key="chat_history", return_messages=True)
 
-     #chat_history = []
 
-     while True:
 
-         user_question = input("Ask a question: ")
 
-         # If the user has asked a question,
 
-         if user_question:
 
-             # Construct a chat prompt template using various components
 
-             prompt = ChatPromptTemplate.from_messages(
 
-                 [
 
-                     SystemMessage(
 
-                         content=system_prompt
 
-                     ),  # This is the persistent system prompt that is always included at the start of the chat.
 
-                     MessagesPlaceholder(
 
-                         variable_name="chat_history"
 
-                     ),  # This placeholder will be replaced by the actual chat history during the conversation. It helps in maintaining context.
 
-                     HumanMessagePromptTemplate.from_template(
 
-                         "{human_input}"
 
-                     ),  # This template is where the user's current input will be injected into the prompt.
 
-                 ]
 
-             )
 
-             # Create a conversation chain using the LangChain LLM (Language Learning Model)
 
-             conversation = LLMChain(
 
-                 llm=groq_chat,  # The Groq LangChain chat object initialized earlier.
 
-                 prompt=prompt,  # The constructed prompt template.
 
-                 verbose=False,   # TRUE Enables verbose output, which can be useful for debugging.
 
-                 memory=memory,  # The conversational memory object that stores and manages the conversation history.
 
-             )
 
-             # The chatbot's answer is generated by sending the full prompt to the Groq API.
 
-             response = conversation.predict(human_input=user_question)
 
-             print("Chatbot:", response)
 
- if __name__ == "__main__":
 
-     main()
 
 
  |