Bläddra i källkod

Fix deprecated langchain warning for llama3

Srinidhi Viswanathan 1 månad sedan
förälder
incheckning
fa38b3742c
1 ändrade filer med 41 tillägg och 41 borttagningar
  1. 41 41
      3p-integrations/langchain/langgraph_rag_agent_local.ipynb

+ 41 - 41
3p-integrations/langchain/langgraph_rag_agent_local.ipynb

@@ -15,7 +15,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "! pip install -U langchain_community tiktoken langchainhub chromadb langchain langgraph tavily-python sentence-transformers"
+    "! pip install -U langchain-ollama langchain_community tiktoken langchainhub chromadb langchain langgraph tavily-python sentence-transformers"
    ]
   },
   {
@@ -158,27 +158,27 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "### Retrieval Grader \n",
+    "### Retrieval Grader\n",
     "\n",
     "from langchain.prompts import PromptTemplate\n",
-    "from langchain_community.chat_models import ChatOllama\n",
+    "from langchain_ollama import ChatOllama\n",
     "from langchain_core.output_parsers import JsonOutputParser\n",
     "\n",
     "# LLM\n",
     "llm = ChatOllama(model=local_llm, format=\"json\", temperature=0)\n",
     "\n",
     "prompt = PromptTemplate(\n",
-    "    template=\"\"\"You are a grader assessing relevance \n",
-    "    of a retrieved document to a user question. If the document contains keywords related to the user question, \n",
-    "    grade it as relevant. It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n",
-    "    \n",
+    "    template=\"\"\"You are a grader assessing relevance\n",
+    "    of a retrieved document to a user question. If the document contains keywords related to the user question,\n",
+    "    grade it as relevant. It does not need to be a stringent test. The goal is to filter out erroneous retrievals.\n",
+    "\n",
     "    Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.\n",
     "    Provide the binary score as a JSON with a single key 'score' and no premable or explanation.\n",
-    "     \n",
-    "    Here is the retrieved document: \n",
+    "\n",
+    "    Here is the retrieved document:\n",
     "    {document}\n",
-    "    \n",
-    "    Here is the user question: \n",
+    "\n",
+    "    Here is the user question:\n",
     "    {question}\n",
     "    \"\"\",\n",
     "    input_variables=[\"question\", \"document\"],\n",
@@ -206,12 +206,12 @@
     "\n",
     "# Prompt\n",
     "prompt = PromptTemplate(\n",
-    "    template=\"\"\"You are an assistant for question-answering tasks. \n",
-    "    Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. \n",
+    "    template=\"\"\"You are an assistant for question-answering tasks.\n",
+    "    Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know.\n",
     "    Use three sentences maximum and keep the answer concise:\n",
-    "    Question: {question} \n",
-    "    Context: {context} \n",
-    "    Answer: \n",
+    "    Question: {question}\n",
+    "    Context: {context}\n",
+    "    Answer:\n",
     "    \"\"\",\n",
     "    input_variables=[\"question\", \"document\"],\n",
     ")\n",
@@ -239,22 +239,22 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "### Hallucination Grader \n",
+    "### Hallucination Grader\n",
     "\n",
     "# LLM\n",
     "llm = ChatOllama(model=local_llm, format=\"json\", temperature=0)\n",
     "\n",
     "# Prompt\n",
     "prompt = PromptTemplate(\n",
-    "    template=\"\"\"You are a grader assessing whether \n",
-    "    an answer is grounded in / supported by a set of facts. Give a binary score 'yes' or 'no' score to indicate \n",
-    "    whether the answer is grounded in / supported by a set of facts. Provide the binary score as a JSON with a \n",
+    "    template=\"\"\"You are a grader assessing whether\n",
+    "    an answer is grounded in / supported by a set of facts. Give a binary score 'yes' or 'no' score to indicate\n",
+    "    whether the answer is grounded in / supported by a set of facts. Provide the binary score as a JSON with a\n",
     "    single key 'score' and no preamble or explanation.\n",
-    "    \n",
+    "\n",
     "    Here are the facts:\n",
-    "    {documents} \n",
+    "    {documents}\n",
     "\n",
-    "    Here is the answer: \n",
+    "    Here is the answer:\n",
     "    {generation}\n",
     "    \"\"\",\n",
     "    input_variables=[\"generation\", \"documents\"],\n",
@@ -271,19 +271,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "### Answer Grader \n",
+    "### Answer Grader\n",
     "\n",
     "# LLM\n",
     "llm = ChatOllama(model=local_llm, format=\"json\", temperature=0)\n",
     "\n",
     "# Prompt\n",
     "prompt = PromptTemplate(\n",
-    "    template=\"\"\"You are a grader assessing whether an \n",
-    "    answer is useful to resolve a question. Give a binary score 'yes' or 'no' to indicate whether the answer is \n",
+    "    template=\"\"\"You are a grader assessing whether an\n",
+    "    answer is useful to resolve a question. Give a binary score 'yes' or 'no' to indicate whether the answer is\n",
     "    useful to resolve a question. Provide the binary score as a JSON with a single key 'score' and no preamble or explanation.\n",
-    "     \n",
+    "\n",
     "    Here is the answer:\n",
-    "    {generation} \n",
+    "    {generation}\n",
     "\n",
     "    Here is the question: {question}\n",
     "    \"\"\",\n",
@@ -311,14 +311,14 @@
     "llm = ChatOllama(model=local_llm, format=\"json\", temperature=0)\n",
     "\n",
     "prompt = PromptTemplate(\n",
-    "    template=\"\"\"You are an expert at routing a \n",
-    "    user question to a vectorstore or web search. Use the vectorstore for questions on LLM  agents, \n",
-    "    prompt engineering, and adversarial attacks. You do not need to be stringent with the keywords \n",
-    "    in the question related to these topics. Otherwise, use web-search. Give a binary choice 'web_search' \n",
-    "    or 'vectorstore' based on the question. Return the a JSON with a single key 'datasource' and \n",
-    "    no premable or explanation. \n",
-    "    \n",
-    "    Question to route: \n",
+    "    template=\"\"\"You are an expert at routing a\n",
+    "    user question to a vectorstore or web search. Use the vectorstore for questions on LLM  agents,\n",
+    "    prompt engineering, and adversarial attacks. You do not need to be stringent with the keywords\n",
+    "    in the question related to these topics. Otherwise, use web-search. Give a binary choice 'web_search'\n",
+    "    or 'vectorstore' based on the question. Return the a JSON with a single key 'datasource' and\n",
+    "    no premable or explanation.\n",
+    "\n",
+    "    Question to route:\n",
     "    {question}\"\"\",\n",
     "    input_variables=[\"question\"],\n",
     ")\n",
@@ -371,7 +371,7 @@
     "        question: question\n",
     "        generation: LLM generation\n",
     "        web_search: whether to add search\n",
-    "        documents: list of documents \n",
+    "        documents: list of documents\n",
     "    \"\"\"\n",
     "    question : str\n",
     "    generation : str\n",
@@ -412,7 +412,7 @@
     "    print(\"---GENERATE---\")\n",
     "    question = state[\"question\"]\n",
     "    documents = state[\"documents\"]\n",
-    "    \n",
+    "\n",
     "    # RAG generation\n",
     "    generation = rag_chain.invoke({\"context\": documents, \"question\": question})\n",
     "    return {\"documents\": documents, \"question\": question, \"generation\": generation}\n",
@@ -432,7 +432,7 @@
     "    print(\"---CHECK DOCUMENT RELEVANCE TO QUESTION---\")\n",
     "    question = state[\"question\"]\n",
     "    documents = state[\"documents\"]\n",
-    "    \n",
+    "\n",
     "    # Score each doc\n",
     "    filtered_docs = []\n",
     "    web_search = \"No\"\n",
@@ -451,7 +451,7 @@
     "            web_search = \"Yes\"\n",
     "            continue\n",
     "    return {\"documents\": filtered_docs, \"question\": question, \"web_search\": web_search}\n",
-    "    \n",
+    "\n",
     "def web_search(state):\n",
     "    \"\"\"\n",
     "    Web search based based on the question\n",
@@ -493,7 +493,7 @@
     "    print(\"---ROUTE QUESTION---\")\n",
     "    question = state[\"question\"]\n",
     "    print(question)\n",
-    "    source = question_router.invoke({\"question\": question})  \n",
+    "    source = question_router.invoke({\"question\": question})\n",
     "    print(source)\n",
     "    print(source['datasource'])\n",
     "    if source['datasource'] == 'web_search':\n",