Przeglądaj źródła

Add Groq/Llama3 recipes (cookbook and command line examples) (#553)

Hamid Shojanazeri 10 miesięcy temu
rodzic
commit
a32e91983c
58 zmienionych plików z 8091 dodań i 938 usunięć
  1. 30 1
      .github/scripts/spellcheck_conf/wordlist.txt
  2. 1038 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/Function-Calling-101-Ecommerce.ipynb
  3. 41 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/customers.csv
  4. 21 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/orders.csv
  5. 21 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/products.csv
  6. 8 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/data/employees.csv
  7. 6 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/data/purchases.csv
  8. 677 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/json-mode-function-calling-for-sql.ipynb
  9. 7 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/employees-without-purchases.yaml
  10. 9 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/most-expensive-purchase.yaml
  11. 11 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/most-recent-purchases.yaml
  12. 6 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/number-of-teslas.yaml
  13. 639 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/SDOH-Json-mode.ipynb
  14. 31 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00456321.txt
  15. 28 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00567289.txt
  16. 28 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00678934.txt
  17. 32 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00785642.txt
  18. 30 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00893247.txt
  19. 427 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/llama3-stock-market-function-calling/llama3-stock-market-function-calling.ipynb
  20. 340 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/parallel-tool-use/parallel-tool-use.ipynb
  21. 2 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/parallel-tool-use/requirements.txt
  22. 993 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/rag-langchain-presidential-speeches/presidential_speeches.csv
  23. 664 0
      recipes/llama_api_providers/Groq/groq-api-cookbook/rag-langchain-presidential-speeches/rag-langchain-presidential-speeches.ipynb
  24. 21 0
      recipes/llama_api_providers/Groq/groq-example-templates/conversational-chatbot-langchain/README.md
  25. 74 0
      recipes/llama_api_providers/Groq/groq-example-templates/conversational-chatbot-langchain/main.py
  26. 0 0
      recipes/llama_api_providers/Groq/groq-example-templates/conversational-chatbot-langchain/requirements.txt
  27. 23 0
      recipes/llama_api_providers/Groq/groq-example-templates/crewai-agents/README.md
  28. 184 0
      recipes/llama_api_providers/Groq/groq-example-templates/crewai-agents/main.py
  29. 3 0
      recipes/llama_api_providers/Groq/groq-example-templates/crewai-agents/requirements.txt
  30. 21 0
      recipes/llama_api_providers/Groq/groq-example-templates/groq-quickstart-conversational-chatbot/README.md
  31. 38 0
      recipes/llama_api_providers/Groq/groq-example-templates/groq-quickstart-conversational-chatbot/main.py
  32. 1 0
      recipes/llama_api_providers/Groq/groq-example-templates/groq-quickstart-conversational-chatbot/requirements.txt
  33. 27 0
      recipes/llama_api_providers/Groq/groq-example-templates/groqing-the-stock-market-function-calling-llama3/README.md
  34. 139 0
      recipes/llama_api_providers/Groq/groq-example-templates/groqing-the-stock-market-function-calling-llama3/main.py
  35. 12 0
      recipes/llama_api_providers/Groq/groq-example-templates/groqing-the-stock-market-function-calling-llama3/requirements.txt
  36. 21 0
      recipes/llama_api_providers/Groq/groq-example-templates/llamachat-conversational-chatbot-with-llamaIndex/README.md
  37. 46 0
      recipes/llama_api_providers/Groq/groq-example-templates/llamachat-conversational-chatbot-with-llamaIndex/main.py
  38. 2 0
      recipes/llama_api_providers/Groq/groq-example-templates/llamachat-conversational-chatbot-with-llamaIndex/requirements.txt
  39. 33 0
      recipes/llama_api_providers/Groq/groq-example-templates/presidential-speeches-rag-with-pinecone/README.md
  40. 114 0
      recipes/llama_api_providers/Groq/groq-example-templates/presidential-speeches-rag-with-pinecone/main.py
  41. 8 0
      recipes/llama_api_providers/Groq/groq-example-templates/presidential-speeches-rag-with-pinecone/requirements.txt
  42. 57 0
      recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/README.md
  43. 8 0
      recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/data/employees.csv
  44. 6 0
      recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/data/purchases.csv
  45. 145 0
      recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/main.py
  46. 42 0
      recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/prompts/base_prompt.txt
  47. 4 0
      recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/requirements.txt
  48. 53 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/README.md
  49. 8 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/data/employees.csv
  50. 6 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/data/purchases.csv
  51. 158 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/main.py
  52. 9 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/requirements.txt
  53. 7 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/employees-without-purchases.yaml
  54. 9 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/most-expensive-purchase.yaml
  55. 9 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/most-recent-purchases.yaml
  56. 6 0
      recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/number-of-teslas.yaml
  57. 1708 0
      recipes/llama_api_providers/Groq/llama3_cookbook_groq.ipynb
  58. 0 937
      recipes/llama_api_providers/llama3_cookbook_groq.ipynb

+ 30 - 1
.github/scripts/spellcheck_conf/wordlist.txt

@@ -1350,4 +1350,33 @@ SalesBot
 Weaviate
 MediaGen
 SDXL
-SVD
+SVD
+DataFrame
+DuckDB
+Groq
+GroqCloud
+Replit
+Teslas
+duckdb
+teslas
+Groqs
+groq
+schemas
+Pinecone
+Pinecone's
+Repl
+docsearch
+presidental
+CrewAI
+kickstart
+DataFrames
+Groqing
+Langchain
+Plotly
+dfs
+yfinance
+Groq's
+LlamaChat
+chatbot's
+ConversationBufferWindowMemory
+chatbot's

Plik diff jest za duży
+ 1038 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/Function-Calling-101-Ecommerce.ipynb


+ 41 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/customers.csv

@@ -0,0 +1,41 @@
+customer_id,name,email,address
+1,Erin Boyle MD,erin.boyle.md@example.com,"165 Brown Springs
+Michaelport, IL 60228"
+2,Matthew Saunders,matthew.saunders@example.com,"219 Steven Mountains
+Port Gabriellafort, OH 52281"
+3,Amanda Anderson,amanda.anderson@example.com,"498 Laurie Glens
+Mitchelltown, CT 93655"
+4,Julian Butler,julian.butler@example.com,"909 Rodriguez Harbors Suite 119
+New Tracyburgh, MS 15487"
+5,Zachary Mitchell MD,zachary.mitchell.md@example.com,"9087 Matthew Drives
+Caitlynshire, OR 42442"
+6,Troy Bennett,troy.bennett@example.com,"73329 Kimberly Loaf Apt. 029
+Shellyborough, TX 55939"
+7,Allison Hall,allison.hall@example.com,"210 Shannon Camp
+New Michael, MO 65990"
+8,Carolyn Davis,carolyn.davis@example.com,"64228 Carol Courts Suite 087
+New Micheleshire, MT 42516"
+9,Cindy Munoz,cindy.munoz@example.com,"1722 Christine Plaza
+Danielport, UT 12261"
+10,Tom Testuser,tom.testuser@example.com,"451 Victoria Bridge Suite 529
+Pageton, WI 27404"
+11,Charles Walker,charles.walker@example.com,"2077 Lamb Drive
+Salazarton, IN 54619"
+12,Brianna Molina,brianna.molina@example.com,"586 Khan Mills Suite 202
+Lake Dominique, VA 98527"
+13,Austin Andrade,austin.andrade@example.com,"4857 Donna Cliffs
+Floydstad, PR 82540"
+14,Brandon Andrade,brandon.andrade@example.com,"906 Olivia Motorway
+Kelleyfort, AK 48960"
+15,Diane Lam,diane.lam@example.com,"070 Eric Rapid Suite 159
+Townsendbury, MI 57664"
+16,Jason Kelly,jason.kelly@example.com,"873 Angela Track Apt. 972
+Stephenville, NV 32705"
+17,Mr. Mitchell Saunders,mr..mitchell.saunders@example.com,"USS White
+FPO AE 91058"
+18,Regina Ross,regina.ross@example.com,"91857 Wendy Place
+East Charlesshire, CA 43705"
+19,Mrs. Denise May DDS,mrs..denise.may.dds@example.com,"64590 Kathleen Cove Apt. 736
+Derrickton, AK 05935"
+20,Lisa Boyle,lisa.boyle@example.com,"USNS Russell
+FPO AE 51528"

+ 21 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/orders.csv

@@ -0,0 +1,21 @@
+order_id,product_id,customer_id,order_date
+1,13,18,2024-02-15 15:15
+2,19,6,2024-01-03 17:43
+3,12,20,2024-03-11 1:13
+4,7,20,2024-02-04 12:04
+5,14,3,2024-05-02 17:12
+6,17,6,2024-02-12 1:46
+7,20,4,2024-02-26 2:59
+8,4,7,2024-05-02 16:51
+9,11,2,2024-01-04 11:09
+10,6,9,2024-04-09 15:04
+11,3,7,2024-02-21 21:17
+12,6,18,2024-02-21 18:50
+13,17,11,2024-05-02 16:20
+14,11,15,2024-04-20 2:49
+15,16,7,2024-01-18 1:12
+16,16,16,2024-05-03 11:20
+17,14,18,2024-03-26 22:51
+18,20,16,2024-05-07 23:25
+19,1,12,2024-05-20 12:41
+20,20,3,2024-01-17 7:25

+ 21 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/function-calling-101-ecommerce/products.csv

@@ -0,0 +1,21 @@
+product_id,name,description,price,stock_quantity
+1,Laptop,High performance laptop with 16GB RAM and 512GB SSD.,753.03,15
+2,Smartphone,Latest model smartphone with a stunning display and great camera.,398.54,59
+3,Headphones,Noise-cancelling over-ear headphones with long battery life.,889.79,97
+4,Monitor,24-inch 1080p monitor with vibrant colors and wide viewing angles.,604.44,98
+5,Keyboard,Mechanical keyboard with customizable RGB lighting.,500.24,52
+6,Mouse,Wireless mouse with ergonomic design and long battery life.,321.98,57
+7,Printer,All-in-one printer with wireless connectivity and high-quality printing.,695.29,32
+8,Tablet,Portable tablet with 10-inch display and powerful processor.,625.75,28
+9,Smartwatch,Stylish smartwatch with fitness tracking and notifications.,952.72,42
+10,Camera,Digital camera with 20MP sensor and 4K video recording.,247.93,99
+11,Speaker,Bluetooth speaker with excellent sound quality and deep bass.,896.4,32
+12,Router,Wi-Fi router with high speed and wide coverage.,976.16,59
+13,External Hard Drive,1TB external hard drive with fast data transfer speeds.,434.46,18
+14,USB Flash Drive,64GB USB flash drive with compact design and reliable storage.,991.09,77
+15,Microphone,Professional microphone with clear sound and adjustable settings.,276.23,30
+16,Webcam,HD webcam with wide-angle lens and built-in microphone.,890.39,13
+17,Drone,Compact drone with HD camera and stable flight controls.,285.93,37
+18,Projector,Portable projector with bright display and multiple connectivity options.,290.22,31
+19,Fitness Tracker,Fitness tracker with heart rate monitor and sleep tracking.,953.65,4
+20,E-Reader,Lightweight e-reader with high-resolution display and long battery life.,132.15,62

+ 8 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/data/employees.csv

@@ -0,0 +1,8 @@
+employee_id,name,email
+1,Richard Hendricks,richard@piedpiper.com
+2,Erlich Bachman,erlich@aviato.com
+3,Dinesh Chugtai,dinesh@piedpiper.com
+4,Bertram Gilfoyle,gilfoyle@piedpiper.com
+5,Jared Dunn,jared@piedpiper.com
+6,Monica Hall,monica@raviga.com
+7,Gavin Belson,gavin@hooli.com

+ 6 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/data/purchases.csv

@@ -0,0 +1,6 @@
+purchase_id,purchase_date,product_name,employee_id,amount
+1,'2024-02-01',iPhone,1,750
+2,'2024-02-02',Tesla,2,70000
+3,'2024-02-03',Humane pin,3,500
+4,'2024-02-04',iPhone,4,700
+5,'2024-02-05',Tesla,5,75000

Plik diff jest za duży
+ 677 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/json-mode-function-calling-for-sql.ipynb


+ 7 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/employees-without-purchases.yaml

@@ -0,0 +1,7 @@
+description: Employees without a purchase since Feb 1, 2024
+sql: |
+  SELECT employees.name as employees_without_purchases
+  FROM employees.csv AS employees
+  LEFT JOIN purchases.csv AS purchases ON employees.employee_id = purchases.employee_id
+  AND purchases.purchase_date > '2024-02-01'
+  WHERE purchases.purchase_id IS NULL

+ 9 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/most-expensive-purchase.yaml

@@ -0,0 +1,9 @@
+description: Employee with the most expensive purchase
+sql: |
+  SELECT employees.name AS employee_name,
+        MAX(amount) AS max_purchase_amount
+  FROM purchases.csv AS purchases
+  JOIN employees.csv AS employees ON purchases.employee_id = employees.employee_id
+  GROUP BY employees.name
+  ORDER BY max_purchase_amount DESC
+  LIMIT 1

+ 11 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/most-recent-purchases.yaml

@@ -0,0 +1,11 @@
+description: Five most recent purchases
+sql: |
+  SELECT 
+         purchases.purchase_date,
+         purchases.product_name,
+         purchases.amount,
+         employees.name
+  FROM purchases.csv AS purchases
+  JOIN employees.csv AS employees ON purchases.employee_id = employees.employee_id
+  ORDER BY purchases.purchase_date DESC
+  LIMIT 5;

+ 6 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-function-calling-for-sql/verified-queries/number-of-teslas.yaml

@@ -0,0 +1,6 @@
+description: Number of Teslas purchased
+sql: |
+  SELECT COUNT(*) as number_of_teslas
+  FROM purchases.csv AS p
+  JOIN employees.csv AS e ON e.employee_id = p.employee_id
+  WHERE p.product_name = 'Tesla'

Plik diff jest za duży
+ 639 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/SDOH-Json-mode.ipynb


Plik diff jest za duży
+ 31 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00456321.txt


Plik diff jest za duży
+ 28 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00567289.txt


Plik diff jest za duży
+ 28 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00678934.txt


+ 32 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00785642.txt

@@ -0,0 +1,32 @@
+**Date:** March 28, 2024
+
+**Patient:** Brian Lee, 55 years old
+
+**MRN:** 00785642
+
+**Chief Complaint:** "I've been having trouble managing my blood sugar levels."
+
+**History of Present Illness:** The patient is a 55-year-old with a known diagnosis of Type 2 Diabetes Mellitus, presenting with difficulty in managing blood sugar levels over the past month. Reports fluctuating blood sugar readings despite adherence to prescribed diet and medication. The patient expresses a desire to avoid any complications associated with poor diabetes management.
+
+**Past Medical History:** Type 2 Diabetes Mellitus, controlled hypertension
+
+**Social History:**
+The patient is a self-employed graphic designer, working from a home office. They describe their work as fulfilling and report a stable income. They own a home in a well-regarded neighborhood, noting its quiet and safe environment. The patient has a supportive spouse and a close circle of friends, often participating in social gatherings and community events.
+
+The patient completed a bachelor's degree in graphic design and continues to take online courses to stay updated in their field. They have reliable transportation, a recent model car, ensuring timely access to healthcare appointments. The patient is an active member of a local walking group, which meets thrice a week for exercise and socialization.
+
+Nutritionally, the patient is mindful of their diet, focusing on low-glycemic foods, and has not faced issues with food security. They have comprehensive health insurance coverage through a private provider, with satisfactory benefits that cover their medical needs, including diabetes management.
+
+**Review of Systems:** Reports consistent adherence to diabetic diet and medication regimen. Denies any episodes of hypoglycemia or diabetic ketoacidosis.
+
+**Physical Examination:**
+- General: Well-nourished and well-kept appearance. Alert and oriented.
+- Vitals: BP 130/80, HR 72, Temp 98.6°F, Resp 14/min
+
+**Assessment/Plan:**
+- Review current diabetes management plan and consider medication adjustments.
+- Recommend continuous glucose monitoring (CGM) to better understand glucose patterns and variability.
+- Encourage continued engagement with community exercise groups and dietary mindfulness.
+- Schedule a follow-up appointment in 3 months or sooner if glucose management issues persist.
+
+**Comments:** The patient demonstrates a proactive approach to managing their diabetes, supported by a stable and healthy social environment. Continued focus on lifestyle modification and close monitoring of blood sugar levels are key to preventing complications.

Plik diff jest za duży
+ 30 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/json-mode-social-determinants-of-health/clinical_notes/00893247.txt


Plik diff jest za duży
+ 427 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/llama3-stock-market-function-calling/llama3-stock-market-function-calling.ipynb


+ 340 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/parallel-tool-use/parallel-tool-use.ipynb

@@ -0,0 +1,340 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "104f2b97-f9bb-4dcc-a4c8-099710768851",
+   "metadata": {},
+   "source": [
+    "# Parallel Tool use"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f8dc57b6-2c48-4ee3-bb2c-25441274ed2f",
+   "metadata": {},
+   "source": [
+    "### Setup"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e70814b4",
+   "metadata": {},
+   "source": [
+    "Make sure you have `ipykernel` and `pip` pre-installed"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "962ae5e2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -r requirements.txt"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "e21816b3",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'Groq API key configured: gsk_7FdrzM...'"
+      ]
+     },
+     "execution_count": 1,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import os\n",
+    "import json\n",
+    "\n",
+    "from groq import Groq\n",
+    "from dotenv import load_dotenv\n",
+    "\n",
+    "load_dotenv()\n",
+    "\"Groq API key configured: \" + os.environ[\"GROQ_API_KEY\"][:10] + \"...\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7f7c9c55-e925-4cc1-89f2-58237acf14a4",
+   "metadata": {},
+   "source": [
+    "We will use the ```llama3-70b-8192``` model in this demo. Note that you will need a Groq API Key to proceed and can create an account [here](https://console.groq.com/) to generate one for free. Only Llama 3 models support parallel tool use at this time (05/07/2024).\n",
+    "\n",
+    "We recommend using the 70B Llama 3 model, 8B has subpar consistency."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "0cca781b-1950-4167-b36a-c1099d6b3b00",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client = Groq(api_key=os.getenv(\"GROQ_API_KEY\"))\n",
+    "model = \"llama3-70b-8192\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "2c23ec2b",
+   "metadata": {},
+   "source": [
+    "Let's define a dummy function we can invoke in our tool use loop"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "f2ce18dc",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def get_weather(city: str):\n",
+    "    if city == \"Madrid\":\n",
+    "        return 35\n",
+    "    elif city == \"San Francisco\":\n",
+    "        return 18\n",
+    "    elif city == \"Paris\":\n",
+    "        return 20\n",
+    "    else:\n",
+    "        return 15"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a37e3c92",
+   "metadata": {},
+   "source": [
+    "Now we define our messages and tools and run the completion request."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "6b454910-4352-40cc-b9b2-cc79edabd7c1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\"role\": \"system\", \"content\": \"\"\"You are a helpful assistant.\"\"\"},\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"What is the weather in Paris, Tokyo and Madrid?\",\n",
+    "    },\n",
+    "]\n",
+    "tools = [\n",
+    "    {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"get_weather\",\n",
+    "            \"description\": \"Returns the weather in the given city in degrees Celsius\",\n",
+    "            \"parameters\": {\n",
+    "                \"type\": \"object\",\n",
+    "                \"properties\": {\n",
+    "                    \"city\": {\n",
+    "                        \"type\": \"string\",\n",
+    "                        \"description\": \"The name of the city\",\n",
+    "                    }\n",
+    "                },\n",
+    "                \"required\": [\"city\"],\n",
+    "            },\n",
+    "        },\n",
+    "    }\n",
+    "]\n",
+    "response = client.chat.completions.create(\n",
+    "    model=model, messages=messages, tools=tools, tool_choice=\"auto\", max_tokens=4096\n",
+    ")\n",
+    "\n",
+    "response_message = response.choices[0].message"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "25c2838f",
+   "metadata": {},
+   "source": [
+    "# Processing the tool calls\n",
+    "\n",
+    "Now we process the assistant message and construct the required messages to continue the conversation. \n",
+    "\n",
+    "*Including* invoking each tool_call against our actual function."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "fe623ab9",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[\n",
+      "  {\n",
+      "    \"role\": \"system\",\n",
+      "    \"content\": \"You are a helpful assistant.\"\n",
+      "  },\n",
+      "  {\n",
+      "    \"role\": \"user\",\n",
+      "    \"content\": \"What is the weather in Paris, Tokyo and Madrid?\"\n",
+      "  },\n",
+      "  {\n",
+      "    \"role\": \"assistant\",\n",
+      "    \"tool_calls\": [\n",
+      "      {\n",
+      "        \"id\": \"call_5ak8\",\n",
+      "        \"function\": {\n",
+      "          \"name\": \"get_weather\",\n",
+      "          \"arguments\": \"{\\\"city\\\":\\\"Paris\\\"}\"\n",
+      "        },\n",
+      "        \"type\": \"function\"\n",
+      "      },\n",
+      "      {\n",
+      "        \"id\": \"call_zq26\",\n",
+      "        \"function\": {\n",
+      "          \"name\": \"get_weather\",\n",
+      "          \"arguments\": \"{\\\"city\\\":\\\"Tokyo\\\"}\"\n",
+      "        },\n",
+      "        \"type\": \"function\"\n",
+      "      },\n",
+      "      {\n",
+      "        \"id\": \"call_znf3\",\n",
+      "        \"function\": {\n",
+      "          \"name\": \"get_weather\",\n",
+      "          \"arguments\": \"{\\\"city\\\":\\\"Madrid\\\"}\"\n",
+      "        },\n",
+      "        \"type\": \"function\"\n",
+      "      }\n",
+      "    ]\n",
+      "  },\n",
+      "  {\n",
+      "    \"role\": \"tool\",\n",
+      "    \"content\": \"20\",\n",
+      "    \"tool_call_id\": \"call_5ak8\"\n",
+      "  },\n",
+      "  {\n",
+      "    \"role\": \"tool\",\n",
+      "    \"content\": \"15\",\n",
+      "    \"tool_call_id\": \"call_zq26\"\n",
+      "  },\n",
+      "  {\n",
+      "    \"role\": \"tool\",\n",
+      "    \"content\": \"35\",\n",
+      "    \"tool_call_id\": \"call_znf3\"\n",
+      "  }\n",
+      "]\n"
+     ]
+    }
+   ],
+   "source": [
+    "tool_calls = response_message.tool_calls\n",
+    "\n",
+    "messages.append(\n",
+    "    {\n",
+    "        \"role\": \"assistant\",\n",
+    "        \"tool_calls\": [\n",
+    "            {\n",
+    "                \"id\": tool_call.id,\n",
+    "                \"function\": {\n",
+    "                    \"name\": tool_call.function.name,\n",
+    "                    \"arguments\": tool_call.function.arguments,\n",
+    "                },\n",
+    "                \"type\": tool_call.type,\n",
+    "            }\n",
+    "            for tool_call in tool_calls\n",
+    "        ],\n",
+    "    }\n",
+    ")\n",
+    "\n",
+    "available_functions = {\n",
+    "    \"get_weather\": get_weather,\n",
+    "}\n",
+    "for tool_call in tool_calls:\n",
+    "    function_name = tool_call.function.name\n",
+    "    function_to_call = available_functions[function_name]\n",
+    "    function_args = json.loads(tool_call.function.arguments)\n",
+    "    function_response = function_to_call(**function_args)\n",
+    "\n",
+    "    # Note how we create a separate tool call message for each tool call\n",
+    "    # the model is able to discern the tool call result through the tool_call_id\n",
+    "    messages.append(\n",
+    "        {\n",
+    "            \"role\": \"tool\",\n",
+    "            \"content\": json.dumps(function_response),\n",
+    "            \"tool_call_id\": tool_call.id,\n",
+    "        }\n",
+    "    )\n",
+    "\n",
+    "print(json.dumps(messages, indent=2))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "1abe981a",
+   "metadata": {},
+   "source": [
+    "Now we run our final completion with multiple tool call results included in the messages array.\n",
+    "\n",
+    "**Note**\n",
+    "\n",
+    "We pass the tool definitions again to help the model understand:\n",
+    "\n",
+    "1. The assistant message with the tool call\n",
+    "2. Interpret the tool results."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "id": "5f077df3",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "The weather in Paris is 20°C, in Tokyo is 15°C, and in Madrid is 35°C.\n"
+     ]
+    }
+   ],
+   "source": [
+    "response = client.chat.completions.create(\n",
+    "    model=model, messages=messages, tools=tools, tool_choice=\"auto\", max_tokens=4096\n",
+    ")\n",
+    "\n",
+    "print(response.choices[0].message.content)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 2 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/parallel-tool-use/requirements.txt

@@ -0,0 +1,2 @@
+groq
+python-dotenv

Plik diff jest za duży
+ 993 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/rag-langchain-presidential-speeches/presidential_speeches.csv


Plik diff jest za duży
+ 664 - 0
recipes/llama_api_providers/Groq/groq-api-cookbook/rag-langchain-presidential-speeches/rag-langchain-presidential-speeches.ipynb


+ 21 - 0
recipes/llama_api_providers/Groq/groq-example-templates/conversational-chatbot-langchain/README.md

@@ -0,0 +1,21 @@
+# Groq LangChain Conversational Chatbot
+
+A simple application that allows users to interact with a conversational chatbot powered by LangChain. The application uses the Groq API to generate responses and leverages LangChain's [ConversationBufferWindowMemory](https://python.langchain.com/v0.1/docs/modules/memory/types/buffer_window/) to maintain a history of the conversation to provide context for the chatbot's responses.
+
+## Features
+
+- **Conversational Interface**: The application provides a conversational interface where users can ask questions or make statements, and the chatbot responds accordingly.
+
+- **Contextual Responses**: The application maintains a history of the conversation, which is used to provide context for the chatbot's responses.
+
+- **LangChain Integration**: The chatbot is powered by the LangChain API, which uses advanced natural language processing techniques to generate human-like responses.
+
+## Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->
+
+You can [fork and run this application on Replit](https://replit.com/@GroqCloud/Chatbot-with-Conversational-Memory-on-LangChain) or run it on the command line with `python main.py`

+ 74 - 0
recipes/llama_api_providers/Groq/groq-example-templates/conversational-chatbot-langchain/main.py

@@ -0,0 +1,74 @@
+import os
+from groq import Groq
+
+from langchain.chains import ConversationChain, LLMChain
+from langchain_core.prompts import (
+    ChatPromptTemplate,
+    HumanMessagePromptTemplate,
+    MessagesPlaceholder,
+)
+from langchain_core.messages import SystemMessage
+from langchain.chains.conversation.memory import ConversationBufferWindowMemory
+from langchain_groq import ChatGroq
+from langchain.prompts import PromptTemplate
+
+
+def main():
+    """
+    This function is the main entry point of the application. It sets up the Groq client, the Streamlit interface, and handles the chat interaction.
+    """
+
+    # Get Groq API key
+    groq_api_key = os.environ['GROQ_API_KEY']
+    model = 'llama3-8b-8192'
+    # Initialize Groq Langchain chat object and conversation
+    groq_chat = ChatGroq(
+            groq_api_key=groq_api_key, 
+            model_name=model
+    )
+    
+    print("Hello! I'm your friendly Groq chatbot. I can help answer your questions, provide information, or just chat. I'm also super fast! Let's start our conversation!")
+
+    system_prompt = 'You are a friendly conversational chatbot'
+    conversational_memory_length = 5 # number of previous messages the chatbot will remember during the conversation
+
+    memory = ConversationBufferWindowMemory(k=conversational_memory_length, memory_key="chat_history", return_messages=True)
+
+
+    #chat_history = []
+    while True:
+        user_question = input("Ask a question: ")
+
+        # If the user has asked a question,
+        if user_question:
+
+            # Construct a chat prompt template using various components
+            prompt = ChatPromptTemplate.from_messages(
+                [
+                    SystemMessage(
+                        content=system_prompt
+                    ),  # This is the persistent system prompt that is always included at the start of the chat.
+
+                    MessagesPlaceholder(
+                        variable_name="chat_history"
+                    ),  # This placeholder will be replaced by the actual chat history during the conversation. It helps in maintaining context.
+
+                    HumanMessagePromptTemplate.from_template(
+                        "{human_input}"
+                    ),  # This template is where the user's current input will be injected into the prompt.
+                ]
+            )
+
+            # Create a conversation chain using the LangChain LLM (Language Learning Model)
+            conversation = LLMChain(
+                llm=groq_chat,  # The Groq LangChain chat object initialized earlier.
+                prompt=prompt,  # The constructed prompt template.
+                verbose=False,   # TRUE Enables verbose output, which can be useful for debugging.
+                memory=memory,  # The conversational memory object that stores and manages the conversation history.
+            )
+            # The chatbot's answer is generated by sending the full prompt to the Groq API.
+            response = conversation.predict(human_input=user_question)
+            print("Chatbot:", response)
+
+if __name__ == "__main__":
+    main()

+ 0 - 0
recipes/llama_api_providers/Groq/groq-example-templates/conversational-chatbot-langchain/requirements.txt


+ 23 - 0
recipes/llama_api_providers/Groq/groq-example-templates/crewai-agents/README.md

@@ -0,0 +1,23 @@
+# CrewAI Machine Learning Assistant
+
+## Overview
+
+The [CrewAI](https://docs.crewai.com/) Machine Learning Assistant is a command line application designed to kickstart your machine learning projects. It leverages a team of AI agents to guide you through the initial steps of defining, assessing, and solving machine learning problems.
+
+## Features
+
+- **Agents**: Utilizes specialized agents to perform tasks such as problem definition, data assessment, model recommendation, and code generation, enhancing the workflow and efficiency of machine learning projects.
+
+- **CrewAI Framework**: Integrates multiple agents into a cohesive framework, enabling seamless interaction and task execution to streamline the machine learning process.
+
+- **LangChain Integration**: Incorporates LangChain to facilitate natural language processing and enhance the interaction between the user and the machine learning assistant.
+
+## Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->
+
+You can [fork and run this application on Replit](https://replit.com/@GroqCloud/CrewAI-Machine-Learning-Assistant) or run it on the command line with `python main.py`. You can upload a sample .csv to the same directory as `main.py` to give the application a head start on your ML problem. The application will output a Markdown file including python code for your ML use case to the same directory as main.py.

+ 184 - 0
recipes/llama_api_providers/Groq/groq-example-templates/crewai-agents/main.py

@@ -0,0 +1,184 @@
+import pandas as pd
+import os
+from crewai import Agent, Task, Crew
+from langchain_groq import ChatGroq
+
+
+def main():
+    """
+    Main function to initialize and run the CrewAI Machine Learning Assistant.
+
+    This function sets up a machine learning assistant using the Llama 3 model with the ChatGroq API.
+    It provides a text-based interface for users to define, assess, and solve machine learning problems
+    by interacting with multiple specialized AI agents. The function outputs the results to the console 
+    and writes them to a markdown file.
+
+    Steps:
+    1. Initialize the ChatGroq API with the specified model and API key.
+    2. Display introductory text about the CrewAI Machine Learning Assistant.
+    3. Create and configure four AI agents:
+        - Problem_Definition_Agent: Clarifies the machine learning problem.
+        - Data_Assessment_Agent: Evaluates the quality and suitability of the provided data.
+        - Model_Recommendation_Agent: Suggests suitable machine learning models.
+        - Starter_Code_Generator_Agent: Generates starter Python code for the project.
+    4. Prompt the user to describe their machine learning problem.
+    5. Check if a .csv file is available in the current directory and try to read it as a DataFrame.
+    6. Define tasks for the agents based on user input and data availability.
+    7. Create a Crew instance with the agents and tasks, and run the tasks.
+    8. Print the results and write them to an output markdown file.
+    """
+
+    model = 'llama3-8b-8192'
+
+    llm = ChatGroq(
+            temperature=0, 
+            groq_api_key = os.getenv('GROQ_API_KEY'), 
+            model_name=model
+        )
+
+    print('CrewAI Machine Learning Assistant')
+    multiline_text = """
+    The CrewAI Machine Learning Assistant is designed to guide users through the process of defining, assessing, and solving machine learning problems. It leverages a team of AI agents, each with a specific role, to clarify the problem, evaluate the data, recommend suitable models, and generate starter Python code. Whether you're a seasoned data scientist or a beginner, this application provides valuable insights and a head start in your machine learning projects.
+    """
+
+    print(multiline_text)
+
+
+    Problem_Definition_Agent = Agent(
+        role='Problem_Definition_Agent',
+        goal="""clarify the machine learning problem the user wants to solve, 
+            identifying the type of problem (e.g., classification, regression) and any specific requirements.""",
+        backstory="""You are an expert in understanding and defining machine learning problems. 
+            Your goal is to extract a clear, concise problem statement from the user's input, 
+            ensuring the project starts with a solid foundation.""",
+        verbose=True,
+        allow_delegation=False,
+        llm=llm,
+    )
+
+    Data_Assessment_Agent = Agent(
+        role='Data_Assessment_Agent',
+        goal="""evaluate the data provided by the user, assessing its quality, 
+            suitability for the problem, and suggesting preprocessing steps if necessary.""",
+        backstory="""You specialize in data evaluation and preprocessing. 
+            Your task is to guide the user in preparing their dataset for the machine learning model, 
+            including suggestions for data cleaning and augmentation.""",
+        verbose=True,
+        allow_delegation=False,
+        llm=llm,
+    )
+
+    Model_Recommendation_Agent = Agent(
+        role='Model_Recommendation_Agent',
+        goal="""suggest the most suitable machine learning models based on the problem definition 
+            and data assessment, providing reasons for each recommendation.""",
+        backstory="""As an expert in machine learning algorithms, you recommend models that best fit 
+            the user's problem and data. You provide insights into why certain models may be more effective than others,
+            considering classification vs regression and supervised vs unsupervised frameworks.""",
+        verbose=True,
+        allow_delegation=False,
+        llm=llm,
+    )
+
+
+    Starter_Code_Generator_Agent = Agent(
+        role='Starter_Code_Generator_Agent',
+        goal="""generate starter Python code for the project, including data loading, 
+            model definition, and a basic training loop, based on findings from the problem definitions,
+            data assessment and model recommendation""",
+        backstory="""You are a code wizard, able to generate starter code templates that users 
+            can customize for their projects. Your goal is to give users a head start in their coding efforts.""",
+        verbose=True,
+        allow_delegation=False,
+        llm=llm,
+    )
+
+
+    user_question = input("Describe your ML problem: ")
+    data_upload = False
+    # Check if there is a .csv file in the current directory
+    if any(file.endswith(".csv") for file in os.listdir()):
+        sample_fp = [file for file in os.listdir() if file.endswith(".csv")][0]
+        try:
+            # Attempt to read the uploaded file as a DataFrame
+            df = pd.read_csv(sample_fp).head(5)
+
+            # If successful, set 'data_upload' to True
+            data_upload = True
+
+            # Display the DataFrame in the app
+            print("Data successfully uploaded and read as DataFrame:")
+            print(df)
+        except Exception as e:
+            print(f"Error reading the file: {e}")
+
+    if user_question:
+
+        task_define_problem = Task(
+        description="""Clarify and define the machine learning problem, 
+            including identifying the problem type and specific requirements.
+
+            Here is the user's problem:
+            {ml_problem}
+            """.format(ml_problem=user_question),
+        agent=Problem_Definition_Agent,
+        expected_output="A clear and concise definition of the machine learning problem."
+        )
+
+        if data_upload:
+            task_assess_data = Task(
+                description="""Evaluate the user's data for quality and suitability, 
+                suggesting preprocessing or augmentation steps if needed.
+
+                Here is a sample of the user's data:
+                {df}
+                The file name is called {uploaded_file}
+
+                """.format(df=df.head(),uploaded_file=sample_fp),
+                agent=Data_Assessment_Agent,
+                expected_output="An assessment of the data's quality and suitability, with suggestions for preprocessing or augmentation if necessary."
+            )
+        else:
+            task_assess_data = Task(
+                description="""The user has not uploaded any specific data for this problem,
+                but please go ahead and consider a hypothetical dataset that might be useful
+                for their machine learning problem. 
+                """,
+                agent=Data_Assessment_Agent,
+                expected_output="A hypothetical dataset that might be useful for the user's machine learning problem, along with any necessary preprocessing steps."
+            )
+
+        task_recommend_model = Task(
+        description="""Suggest suitable machine learning models for the defined problem 
+            and assessed data, providing rationale for each suggestion.""",
+        agent=Model_Recommendation_Agent,
+        expected_output="A list of suitable machine learning models for the defined problem and assessed data, along with the rationale for each suggestion."
+        )
+
+
+        task_generate_code = Task(
+        description="""Generate starter Python code tailored to the user's project using the model recommendation agent's recommendation(s), 
+            including snippets for package import, data handling, model definition, and training
+            """,
+        agent=Starter_Code_Generator_Agent,
+        expected_output="Python code snippets for package import, data handling, model definition, and training, tailored to the user's project, plus a brief summary of the problem and model recommendations."
+        )
+
+
+        crew = Crew(
+            agents=[Problem_Definition_Agent, Data_Assessment_Agent, Model_Recommendation_Agent,  Starter_Code_Generator_Agent], 
+            tasks=[task_define_problem, task_assess_data, task_recommend_model,  task_generate_code], 
+            verbose=False
+        )
+
+        result = crew.kickoff()
+
+        print(result)
+
+        with open('output.md', "w") as file:
+            print('\n\nThese results have been exported to output.md')
+            file.write(result)
+
+
+if __name__ == "__main__":
+    main()

+ 3 - 0
recipes/llama_api_providers/Groq/groq-example-templates/crewai-agents/requirements.txt

@@ -0,0 +1,3 @@
+crewai
+langchain_groq
+pandas

+ 21 - 0
recipes/llama_api_providers/Groq/groq-example-templates/groq-quickstart-conversational-chatbot/README.md

@@ -0,0 +1,21 @@
+# Groq Quickstart Conversational Chatbot
+
+A simple application that allows users to interact with a conversational chatbot powered by Groq. This application is designed to get users up and running quickly with building a chatbot.
+
+## Features
+
+**Conversational Interface**: Provides a simple interface where users can input text and receive responses from the chatbot.
+
+**Short Responses**: The chatbot replies with very short and concise answers, keeping interactions brief and to the point.
+
+**Groq Integration**: Utilizes the Groq API to generate responses, leveraging the power of the Llama3-70b-8192 model.
+
+## Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->
+
+You can [fork and run this application on Replit](https://replit.com/@GroqCloud/Groq-Quickstart-Conversational-Chatbot) or run it on the command line with `python main.py`.

+ 38 - 0
recipes/llama_api_providers/Groq/groq-example-templates/groq-quickstart-conversational-chatbot/main.py

@@ -0,0 +1,38 @@
+#set GROQ_API_KEY in the secrets
+
+import os
+from groq import Groq
+
+# Create the Groq client
+client = Groq(
+    api_key=os.environ.get("GROQ_API_KEY")
+)
+
+# Set the system prompt
+system_prompt = {
+    "role": "system",
+    "content":
+    "You are a helpful assistant. You reply with very short answers."
+}
+
+# Initialize the chat history
+chat_history = [system_prompt]
+
+while True:
+  # Get user input from the console
+  user_input = input("You: ")
+
+  # Append the user input to the chat history
+  chat_history.append({"role": "user", "content": user_input})
+
+  response = client.chat.completions.create(model="llama3-70b-8192",
+                                            messages=chat_history,
+                                            max_tokens=100,
+                                            temperature=1.2)
+  # Append the response to the chat history
+  chat_history.append({
+      "role": "assistant",
+      "content": response.choices[0].message.content
+  })
+  # Print the response
+  print("Assistant:", response.choices[0].message.content)

+ 1 - 0
recipes/llama_api_providers/Groq/groq-example-templates/groq-quickstart-conversational-chatbot/requirements.txt

@@ -0,0 +1 @@
+groq

+ 27 - 0
recipes/llama_api_providers/Groq/groq-example-templates/groqing-the-stock-market-function-calling-llama3/README.md

@@ -0,0 +1,27 @@
+# 'Groqing the Stock Market' with Llama 3 Function Calling
+
+This is a simple application that leverages the yfinance API to provide insights into stocks and their prices. The application uses the Llama 3 model on Groq in conjunction with Langchain to call functions based on the user prompt.
+
+## Key Functions
+
+- **get_stock_info(symbol, key)**: This function fetches various information about a given stock symbol. The information can be anything from the company's address to its financial ratios. The 'key' parameter specifies the type of information to fetch.
+
+- **get_historical_price(symbol, start_date, end_date)**: This function fetches the historical stock prices for a given symbol from a specified start date to an end date. The returned data is a DataFrame with the date and closing price of the stock.
+
+- **plot_price_over_time(historical_price_dfs)**: This function takes a list of DataFrames (each containing historical price data for a stock) and plots the prices over time using Plotly. The plot is saved to the same directory as the app.
+
+- **call_functions(llm_with_tools, user_prompt)**: This function takes the user's question, invokes the appropriate tool (either get_stock_info or get_historical_price), and generates a response. If the user asked for historical prices, it also calls plot_price_over_time to generate a plot.
+
+## Function Calling
+
+The function calling in this application is handled by the Groq API, abstracted with Langchain. When the user asks a question, the application invokes the appropriate tool with parameters based on the user's question. The tool's output is then used to generate a response.
+
+## Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->
+
+You can [fork and run this application on Replit](https://replit.com/@GroqCloud/Groqing-the-Stock-Market-Function-Calling-with-Llama3) or run it on the command line with `python main.py`.

Plik diff jest za duży
+ 139 - 0
recipes/llama_api_providers/Groq/groq-example-templates/groqing-the-stock-market-function-calling-llama3/main.py


+ 12 - 0
recipes/llama_api_providers/Groq/groq-example-templates/groqing-the-stock-market-function-calling-llama3/requirements.txt

@@ -0,0 +1,12 @@
+streamlit
+pandas
+numpy
+groq
+langchain_community
+langchain_groq
+yfinance
+plotly
+langchain_core
+nbformat>=4.2.0
+ipython
+kaleido

+ 21 - 0
recipes/llama_api_providers/Groq/groq-example-templates/llamachat-conversational-chatbot-with-llamaIndex/README.md

@@ -0,0 +1,21 @@
+# LlamaChat: Conversational Chatbot with LlamaIndex and Llama3
+
+A simple application that allows users to interact with a conversational chatbot powered by the LlamaIndex framework and Meta's Llama3 model. The application uses the Groq API to generate responses and supports different modes of interaction, including simple chat, streaming chat, and customizable chat with system prompts.
+
+##Features
+
+**LlamaIndex**: The application uses LlamaIndex to manage and generate responses, leveraging the power of Groq's language model.
+
+**Simple Chat**: Generates responses based on user input using the Groq API with LlamaIndex.
+
+**Streaming Chat**: Provides real-time streaming responses for user input.
+
+**Customizable Chat**: Allows for chat customization by setting a system prompt to guide the chatbot's responses.
+
+##Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->

+ 46 - 0
recipes/llama_api_providers/Groq/groq-example-templates/llamachat-conversational-chatbot-with-llamaIndex/main.py

@@ -0,0 +1,46 @@
+from llama_index.llms.groq import Groq
+from llama_index.core.llms import ChatMessage
+
+llm = Groq(model="llama3-8b-8192")
+
+
+system_prompt = 'You are a friendly but highly sarcastic chatbot assistant'
+
+while True:
+    # Get the user's question
+    user_input = input("User: ")
+
+    #user_input = 'write a few paragraphs explaining generative AI to a college freshman'
+
+    ##################################
+    # Simple Chat
+    ##################################
+    print('Simple Chat:\n\n')
+    response = llm.complete(user_input)
+    print(response)
+
+
+    ##################################
+    # Streaming Chat
+    ##################################
+    stream_response = llm.stream_complete(
+        user_input
+    )
+    print('\n\nStreaming Chat:\n')
+    for t in stream_response:
+        print(t.delta, end="")
+
+
+    ##################################
+    # Customizable Chat
+    ##################################
+    messages = [
+        ChatMessage(role="system", content=system_prompt),
+        ChatMessage(role="user", content=user_input),
+    ]
+    print('\n\nChat with System Prompt:\n')
+    response_with_system_prompt = llm.chat(messages)
+
+    print(response_with_system_prompt)
+
+

+ 2 - 0
recipes/llama_api_providers/Groq/groq-example-templates/llamachat-conversational-chatbot-with-llamaIndex/requirements.txt

@@ -0,0 +1,2 @@
+llama_index
+llama-index-llms-groq

+ 33 - 0
recipes/llama_api_providers/Groq/groq-example-templates/presidential-speeches-rag-with-pinecone/README.md

@@ -0,0 +1,33 @@
+# Presidential Speeches RAG with Pinecone
+
+This repository contains a command line application that allows users to ask questions about US presidental speeches by applying Retrieval-Augmented Generation (RAG) over a Pinecone vector database. The application uses RAG to answer the user's question by retrieving the most relevant presidential speeches and using them to supplant the LLM response.
+
+## Features
+
+- **RAG (Retrieval-Augmented Generation)**: Enhances the generation of responses by integrating retrieval-based methods. This feature allows the system to fetch relevant information from a large corpus of data, providing more accurate and contextually appropriate answers by combining retrieved content with generative capabilities.
+
+- **Vector Databases (Pinecone)**: Integrates with Pinecone to store and manage vector embeddings efficiently. Pinecone's high-performance vector database allows for fast and scalable similarity searches, enabling quick retrieval of relevant data for various machine learning and AI applications.
+
+- **LangChain Integration**: Leverages LangChain to facilitate natural language processing tasks. LangChain enhances the interaction between the user and the system by providing robust language modeling capabilities, ensuring seamless and intuitive communication.
+
+## Code Overview
+
+The main script of the application is [main.py](./main.py). Here's a brief overview of its main functions:
+
+- `get_relevant_excerpts(user_question, docsearch)`: This function takes a user's question and a Pinecone vector store as input, performs a similarity search on the vector store using the user's question, and returns the most relevant excerpts from presidential speeches.
+
+- `get_relevant_excerpts(user_question, docsearch)`: This function takes a user's question and a Pinecone vector store as input, performs a similarity search on the vector store using the user's question, and returns the most relevant excerpts from presidential speeches.
+
+- `presidential_speech_chat_completion(client, model, user_question, relevant_excerpts, additional_context)`: This function takes a Groq client, a pre-trained model, a user's question, relevant excerpts from presidential speeches, and additional context as input. It generates a response to the user's question based on the relevant excerpts and the additional context
+
+## Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example outside of this Repl. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->
+
+You would also need your own [Pinecone](https://www.pinecone.io/) index with presidential speech embeddings to run this code locally. You can create a Pinecone API key and one index for a small project for free on their Starter plan, and visit [this Cookbook post](https://github.com/groq/groq-api-cookbook/blob/dan/replit-conversion/presidential-speeches-rag/presidential-speeches-rag.ipynb) for more info on RAG and a guide to uploading these embeddings to a vector database
+
+You can [fork and run this application on Replit](https://replit.com/@GroqCloud/Presidential-Speeches-RAG-with-Pinecone) or run it on the command line with `python main.py`.

+ 114 - 0
recipes/llama_api_providers/Groq/groq-example-templates/presidential-speeches-rag-with-pinecone/main.py

@@ -0,0 +1,114 @@
+import pandas as pd
+import numpy as np
+from groq import Groq
+from pinecone import Pinecone
+import os
+
+from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
+from langchain_pinecone import PineconeVectorStore
+
+
+def get_relevant_excerpts(user_question, docsearch):
+    """
+    This function retrieves the most relevant excerpts from presidential speeches based on the user's question.
+    Parameters:
+    user_question (str): The question asked by the user.
+    docsearch (PineconeVectorStore): The Pinecone vector store containing the presidential speeches.
+    Returns:
+    str: A string containing the most relevant excerpts from presidential speeches.
+    """
+
+    # Perform a similarity search on the Pinecone vector store using the user's question
+    relevent_docs = docsearch.similarity_search(user_question)
+
+    # Extract the page content from the top 3 most relevant documents and join them into a single string
+    relevant_excerpts = '\n\n------------------------------------------------------\n\n'.join([doc.page_content for doc in relevent_docs[:3]])
+
+    return relevant_excerpts
+
+
+def presidential_speech_chat_completion(client, model, user_question, relevant_excerpts):
+    """
+    This function generates a response to the user's question using a pre-trained model.
+    Parameters:
+    client (Groq): The Groq client used to interact with the pre-trained model.
+    model (str): The name of the pre-trained model.
+    user_question (str): The question asked by the user.
+    relevant_excerpts (str): A string containing the most relevant excerpts from presidential speeches.
+    Returns:
+    str: A string containing the response to the user's question.
+    """
+
+    # Define the system prompt
+    system_prompt = '''
+    You are a presidential historian. Given the user's question and relevant excerpts from 
+    presidential speeches, answer the question by including direct quotes from presidential speeches. 
+    When using a quote, site the speech that it was from (ignoring the chunk).
+    '''
+
+    # Generate a response to the user's question using the pre-trained model
+    chat_completion = client.chat.completions.create(
+        messages = [
+            {
+                "role": "system",
+                "content":  system_prompt
+            },
+            {
+                "role": "user",
+                "content": "User Question: " + user_question + "\n\nRelevant Speech Exerpt(s):\n\n" + relevant_excerpts,
+            }
+        ],
+        model = model
+    )
+
+    # Extract the response from the chat completion
+    response = chat_completion.choices[0].message.content
+
+    return response
+
+
+def main():
+    """
+    This is the main function that runs the application. It initializes the Groq client and the SentenceTransformer model,
+    gets user input from the Streamlit interface, retrieves relevant excerpts from presidential speeches based on the user's question,
+    generates a response to the user's question using a pre-trained model, and displays the response.
+    """
+
+    model = 'llama3-8b-8192'
+
+    embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
+
+    # Initialize the Groq client
+    groq_api_key = os.getenv('GROQ_API_KEY')
+    pinecone_api_key=os.getenv('PINECONE_API_KEY')
+    pinecone_index_name = "presidential-speeches"
+    client = Groq(
+        api_key=groq_api_key
+    )
+
+    pc = Pinecone(api_key = pinecone_api_key)
+    docsearch = PineconeVectorStore(index_name=pinecone_index_name, embedding=embedding_function)
+
+    # Display the title and introduction of the application
+    print("Presidential Speeches RAG")
+    multiline_text = """
+    Welcome! Ask questions about U.S. presidents, like "What were George Washington's views on democracy?" or "What did Abraham Lincoln say about national unity?". The app matches your question to relevant excerpts from presidential speeches and generates a response using a pre-trained model.
+    """
+
+    print(multiline_text)
+
+
+    while True:
+        # Get the user's question
+        user_question = input("Ask a question about a US president: ")
+
+        if user_question:
+            pinecone_index_name = "presidential-speeches"
+            relevant_excerpts = get_relevant_excerpts(user_question, docsearch)
+            response = presidential_speech_chat_completion(client, model, user_question, relevant_excerpts)
+            print(response)
+
+
+
+if __name__ == "__main__":
+    main()

+ 8 - 0
recipes/llama_api_providers/Groq/groq-example-templates/presidential-speeches-rag-with-pinecone/requirements.txt

@@ -0,0 +1,8 @@
+pandas
+numpy
+groq
+langchain_community
+langchain_pinecone
+transformers
+scikit-learn
+sentence-transformers

+ 57 - 0
recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/README.md

@@ -0,0 +1,57 @@
+# DuckDB Text-to-SQL with JSON Mode
+
+A command line application that allows users to ask questions about their DuckDB data. The application leverages Groq API's JSON mode to generate SQL queries based on the user's questions and execute them on a DuckDB database.
+
+## Features
+
+- **Text-to-SQL**: The application uses natural language processing to convert user questions into SQL queries, making it easy for users to query their data without knowing SQL.
+
+- **JSON mode**: A feature which enables the LLM to respond strictly in a structured JSON output, provided we supply it with the desired format
+
+- **Data Summarization**: After executing a SQL query, the application uses the AI to summarize the resulting data in relation to the user's original question.
+
+## Data
+
+The application queries data from two CSV files located in the `data` folder:
+
+- `employees.csv`: Contains employee data including their ID, full name, and email address.
+
+- `purchases.csv`: Records purchase details including purchase ID, date, associated employee ID, amount, and product name.
+
+## Prompts
+
+The base prompt for the AI is stored in a text file in the `prompts` folder:
+
+- `base_prompt.txt`
+
+A well-crafted system prompt is essential for building a functional Text-to-SQL application. Ours will serve 3 purposes:
+
+1. Provide the metadata schemas for our database tables
+2. Indicate any relevant context or tips for querying the DuckDB language or our database schema specifically
+3. Define our desired JSON output (note that to use JSON mode, we must include 'JSON' in the prompt)
+
+## Functions
+
+- `chat_with_groq()`: Sends a prompt to the Groq API and returns the AI's response.
+- `execute_duckdb_query()`: Executes a SQL query on a DuckDB database and returns the result.
+- `get_summarization()`: Generates a prompt for the AI to summarize the data resulting from a SQL query.
+
+## Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->
+
+You can [fork and run this application on Replit](https://replit.com/@GroqCloud/Building-a-Text-to-SQL-app-with-Groqs-JSON-mode) or run it on the command line with `python main.py`.
+
+## Customizing with Your Own Data
+
+This application is designed to be flexible and can be easily customized to work with your own data. If you want to use your own data, follow these steps:
+
+1. **Replace the CSV files**: The application queries data from two CSV files located in the `data` folder: `employees.csv` and `purchases.csv`. Replace these files with your own CSV files.
+
+2. **Modify the base prompt**: The base prompt for the AI, stored in the `prompts` folder as `base_prompt.txt`, contains specific information about the data metadata. Modify this prompt to match the structure and content of your own data. Make sure to accurately describe the tables, columns, and any specific rules or tips for querying your dataset.
+
+By following these steps, you can tailor the DuckDB Query Generator to your own data and use cases. Feel free to experiment and build off this repository to create your own powerful data querying applications.

+ 8 - 0
recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/data/employees.csv

@@ -0,0 +1,8 @@
+employee_id,name,email
+1,Richard Hendricks,richard@piedpiper.com
+2,Erlich Bachman,erlich@aviato.com
+3,Dinesh Chugtai,dinesh@piedpiper.com
+4,Bertram Gilfoyle,gilfoyle@piedpiper.com
+5,Jared Dunn,jared@piedpiper.com
+6,Monica Hall,monica@raviga.com
+7,Gavin Belson,gavin@hooli.com

+ 6 - 0
recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/data/purchases.csv

@@ -0,0 +1,6 @@
+purchase_id,purchase_date,product_name,employee_id,amount
+1,'2024-02-01',iPhone,1,750
+2,'2024-02-02',Tesla,2,70000
+3,'2024-02-03',Humane pin,3,500
+4,'2024-02-04',iPhone,4,700
+5,'2024-02-05',Tesla,5,75000

+ 145 - 0
recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/main.py

@@ -0,0 +1,145 @@
+import os
+from groq import Groq
+import json
+import duckdb
+import sqlparse
+
+def chat_with_groq(client, prompt, model, response_format):
+    """
+    This function sends a prompt to the Groq API and retrieves the AI's response.
+
+    Parameters:
+    client (Groq): The Groq API client.
+    prompt (str): The prompt to send to the AI.
+    model (str): The AI model to use for the response.
+    response_format (dict): The format of the response. 
+        If response_format is a dictionary with {"type": "json_object"}, it configures JSON mode.
+
+    Returns:
+    str: The content of the AI's response.
+    """
+    
+    completion = client.chat.completions.create(
+    model=model,
+    messages=[
+        {
+            "role": "user",
+            "content": prompt
+        }
+    ],
+    response_format=response_format
+    )
+
+    return completion.choices[0].message.content
+
+
+def execute_duckdb_query(query):
+    """
+    This function executes a SQL query on a DuckDB database and returns the result.
+
+    Parameters:
+    query (str): The SQL query to execute.
+
+    Returns:
+    DataFrame: The result of the query as a pandas DataFrame.
+    """
+    original_cwd = os.getcwd()
+    os.chdir('data')
+
+    try:
+        conn = duckdb.connect(database=':memory:', read_only=False)
+        query_result = conn.execute(query).fetchdf().reset_index(drop=True)
+    finally:
+        os.chdir(original_cwd)
+
+    return query_result
+
+
+def get_summarization(client, user_question, df, model):
+    """
+    This function generates a summarization prompt based on the user's question and the resulting data. 
+    It then sends this summarization prompt to the Groq API and retrieves the AI's response.
+
+    Parameters:
+    client (Groqcloud): The Groq API client.
+    user_question (str): The user's question.
+    df (DataFrame): The DataFrame resulting from the SQL query.
+    model (str): The AI model to use for the response.
+    
+    Returns:
+    str: The content of the AI's response to the summarization prompt.
+    """
+    prompt = '''
+    A user asked the following question pertaining to local database tables:
+    
+    {user_question}
+    
+    To answer the question, a dataframe was returned:
+    
+    Dataframe:
+    {df}
+    
+    In a few sentences, summarize the data in the table as it pertains to the original user question. Avoid qualifiers like "based on the data" and do not comment on the structure or metadata of the table itself
+    '''.format(user_question = user_question, df = df)
+    
+    # Response format is set to 'None'
+    return chat_with_groq(client,prompt,model,None)
+
+def main():
+    """
+    The main function of the application. It handles user input, controls the flow of the application, 
+    and initiates a conversation in the command line.
+    """
+
+    model = "llama3-70b-8192"
+
+    # Get the Groq API key and create a Groq client
+    groq_api_key = os.getenv('GROQ_API_KEY')
+    client = Groq(
+        api_key=groq_api_key
+    )
+
+    print("Welcome to the DuckDB Query Generator!")
+    print("You can ask questions about the data in the 'employees.csv' and 'purchases.csv' files.")
+
+    # Load the base prompt
+    with open('prompts/base_prompt.txt', 'r') as file:
+        base_prompt = file.read()
+
+    while True:
+        # Get the user's question
+        user_question = input("Ask a question: ")
+
+        if user_question:
+            # Generate the full prompt for the AI
+            full_prompt = base_prompt.format(user_question=user_question)
+
+            # Get the AI's response. Call with '{"type": "json_object"}' to use JSON mode
+            llm_response = chat_with_groq(client, full_prompt, model, {"type": "json_object"})
+
+            result_json = json.loads(llm_response)
+            if 'sql' in result_json:
+                sql_query = result_json['sql']
+                results_df = execute_duckdb_query(sql_query)
+
+                formatted_sql_query = sqlparse.format(sql_query, reindent=True, keyword_case='upper')
+
+                print("```sql\n" + formatted_sql_query + "\n```")
+                print(results_df.to_markdown(index=False))
+
+                summarization = get_summarization(client,user_question,results_df,model)
+                print(summarization.replace('$','\\$'))
+            elif 'error' in result_json:
+                print("ERROR:", 'Could not generate valid SQL for this question')
+                print(result_json['error'])
+
+if __name__ == "__main__":
+    main()
+
+
+
+
+
+
+
+

+ 42 - 0
recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/prompts/base_prompt.txt

@@ -0,0 +1,42 @@
+You are Groq Advisor, and you are tasked with generating SQL queries for DuckDB based on user questions about data stored in two tables derived from CSV files:
+
+Table: employees.csv
+Columns:
+employee_id (INTEGER): A unique identifier for each employee.
+name (VARCHAR): The full name of the employee.
+email (VARCHAR): employee's email address
+
+Table: purchases.csv
+Columns:
+purchase_id (INTEGER): A unique identifier for each purchase.
+purchase_date (DATE): Date of purchase
+employee_id (INTEGER): References the employee_id from the employees table, indicating which employee made the purchase.
+amount (FLOAT): The monetary value of the purchase.
+product_name (STRING): The name of the product purchased
+
+Given a user's question about this data, write a valid DuckDB SQL query that accurately extracts or calculates the requested information from these tables and adheres to SQL best practices for DuckDB, optimizing for readability and performance where applicable.
+
+Here are some tips for writing DuckDB queries:
+* DuckDB syntax requires querying from the .csv file itself, i.e. employees.csv and purchases.csv. For example: SELECT * FROM employees.csv as employees
+* All tables referenced MUST be aliased
+* DuckDB does not implicitly include a GROUP BY clause
+* CURRENT_DATE gets today's date
+* Aggregated fields like COUNT(*) must be appropriately named
+
+And some rules for querying the dataset:
+* Never include employee_id in the output - show employee name instead
+
+Also note that:
+* Valid values for product_name include 'Tesla','iPhone' and 'Humane pin'
+
+
+Question:
+--------
+{user_question}
+--------
+Reminder: Generate a DuckDB SQL to answer to the question:
+* respond as a valid JSON Document
+* [Best] If the question can be answered with the available tables: {{"sql": <sql here>}} 
+* If the question cannot be answered with the available tables: {{"error": <explanation here>}}
+* Ensure that the entire output is returned on only one single line
+* Keep your query as simple and straightforward as possible; do not use subqueries

+ 4 - 0
recipes/llama_api_providers/Groq/groq-example-templates/text-to-sql-json-mode/requirements.txt

@@ -0,0 +1,4 @@
+duckdb
+groq
+sqlparse
+pandas

+ 53 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/README.md

@@ -0,0 +1,53 @@
+# Executing Verified Queries with Function Calling
+
+A command line application that allows users to ask questions about their DuckDB data using the Groq API. The application uses function calling to find the most similar pre-verified query to the user's question, execute it against the data, and return the results.
+
+## Features
+
+- **Function Calling**: The application uses function calling to match the user's question to the most relevant pre-verified SQL query.
+
+- **SQL Execution**: The application executes the selected SQL query on a DuckDB database and displays the result.
+
+## Functions
+
+- `get_verified_queries(directory_path)`: Reads YAML files from the specified directory and loads the verified SQL queries and their descriptions.
+
+- `execute_duckdb_query_function_calling(query_name, verified_queries_dict)`: Executes the provided SQL query using DuckDB and returns the result as a DataFrame.
+
+## Data
+
+The application queries data from CSV files located in the data folder:
+
+- `employees.csv`: Contains employee data including their ID, full name, and email address.
+
+- `purchases.csv`: Records purchase details including purchase ID, date, associated employee ID, amount, and product name.
+
+## Verified Queries
+
+The verified SQL queries and their descriptions are stored in YAML files located in the `verified-queries` folder. Descriptions are used to semantically map prompts to queries:
+
+- `most-recent-purchases.yaml`: Returns the 5 most recent purchases
+
+- `most-expensive-purchase.yaml`: Finds the most expensive purchases
+
+- `number-of-teslas.yaml`: Counts the number of Teslas purchased
+
+- `employees-without-purchases.yaml`: Gets employees without any recent purchases
+
+## Usage
+
+<!-- markdown-link-check-disable -->
+
+You will need to store a valid Groq API Key as a secret to proceed with this example. You can generate one for free [here](https://console.groq.com/keys).
+
+<!-- markdown-link-check-enable -->
+
+You can [fork and run this application on Replit](https://replit.com/@GroqCloud/Execute-Verified-SQL-Queries-with-Function-Calling) or run it on the command line with `python main.py`.
+
+## Customizing with Your Own Data
+
+This application is designed to be flexible and can be easily customized to work with your own data. If you want to use your own data, follow these steps:
+
+1. **Replace the CSV files**: The application queries data from CSV files located in the `data` folder. Replace these files with your own CSV files.
+
+2. **Modify the verified queries**: The verified SQL queries and their descriptions are stored in YAML files located in the `verified-queries` folder. Replace these files with your own verified SQL queries and descriptions.

+ 8 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/data/employees.csv

@@ -0,0 +1,8 @@
+employee_id,name,email
+1,Richard Hendricks,richard@piedpiper.com
+2,Erlich Bachman,erlich@aviato.com
+3,Dinesh Chugtai,dinesh@piedpiper.com
+4,Bertram Gilfoyle,gilfoyle@piedpiper.com
+5,Jared Dunn,jared@piedpiper.com
+6,Monica Hall,monica@raviga.com
+7,Gavin Belson,gavin@hooli.com

+ 6 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/data/purchases.csv

@@ -0,0 +1,6 @@
+purchase_id,purchase_date,product_name,employee_id,amount
+1,'2024-02-01',iPhone,1,750
+2,'2024-02-02',Tesla,2,70000
+3,'2024-02-03',Humane pin,3,500
+4,'2024-02-04',iPhone,4,700
+5,'2024-02-05',Tesla,5,75000

+ 158 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/main.py

@@ -0,0 +1,158 @@
+import os
+from groq import Groq
+import duckdb
+import yaml
+import glob
+import json
+
+def get_verified_queries(directory_path):
+    """
+    Reads YAML files from the specified directory, loads the verified SQL queries and their descriptions,
+    and stores them in a dictionary.
+
+    Parameters:
+        directory_path (str): The path to the directory containing the YAML files with verified queries.
+
+    Returns:
+        dict: A dictionary where the keys are the names of the YAML files (without the directory path and file extension)
+              and the values are the parsed content of the YAML files.
+    """
+    verified_queries_yaml_files = glob.glob(os.path.join(directory_path, '*.yaml'))
+    verified_queries_dict = {}
+    for file in verified_queries_yaml_files:
+        with open(file, 'r') as stream:
+            try:
+                file_name = file[len(directory_path):-5]
+                verified_queries_dict[file_name] = yaml.safe_load(stream)
+            except yaml.YAMLError as exc:
+                continue
+        
+    return verified_queries_dict
+
+
+def execute_duckdb_query_function_calling(query_name,verified_queries_dict):
+    """
+    Executes a SQL query from the verified queries dictionary using DuckDB and returns the result as a DataFrame.
+
+    Parameters:
+        query_name (str): The name of the query to be executed, corresponding to a key in the verified queries dictionary.
+        verified_queries_dict (dict): A dictionary containing verified queries, where the keys are query names and the values
+                                      are dictionaries with query details including the SQL statement.
+
+    Returns:
+        pandas.DataFrame: The result of the executed query as a DataFrame.
+    """
+    
+    original_cwd = os.getcwd()
+    os.chdir('data')
+
+    query = verified_queries_dict[query_name]['sql']
+    
+    try:
+        conn = duckdb.connect(database=':memory:', read_only=False)
+        query_result = conn.execute(query).fetchdf().reset_index(drop=True)
+    finally:
+        os.chdir(original_cwd)
+
+    return query_result
+
+
+model = "llama3-8b-8192"
+
+# Initialize the Groq client
+groq_api_key = os.getenv('GROQ_API_KEY')
+client = Groq(
+    api_key=groq_api_key
+)
+
+directory_path = 'verified-queries/'
+verified_queries_dict = get_verified_queries(directory_path)
+
+# Display the title and introduction of the application
+multiline_text = """
+Welcome! Ask questions about employee data or purchase details, like "Show the 5 most recent purchases" or "What was the most expensive purchase?". The app matches your question to pre-verified SQL queries for accurate results.
+"""
+
+print(multiline_text)
+
+    
+while True:
+    # Get user input from the console
+    user_input = input("You: ")
+
+    
+    #Simplify verified_queries_dict to just show query name and description
+    query_description_mapping = {key: subdict['description'] for key, subdict in verified_queries_dict.items()}
+    
+    # Step 1: send the conversation and available functions to the model
+    # Define the messages to be sent to the Groq API
+    messages = [
+        {
+            "role": "system",
+            "content": '''You are a function calling LLM that uses the data extracted from the execute_duckdb_query_function_calling function to answer questions around a DuckDB dataset.
+
+            Extract the query_name parameter from this mapping by finding the one whose description best matches the user's question: 
+            {query_description_mapping}
+            '''.format(query_description_mapping=query_description_mapping)
+        },
+        {
+            "role": "user",
+            "content": user_input,
+        }
+    ]
+
+    # Define the tool (function) to be used by the Groq API
+    tools = [
+        {
+            "type": "function",
+            "function": {
+                "name": "execute_duckdb_query_function_calling",
+                "description": "Executes a verified DuckDB SQL Query",
+                "parameters": {
+                    "type": "object",
+                    "properties": {
+                        "query_name": {
+                            "type": "string",
+                            "description": "The name of the verified query (i.e. 'most-recent-purchases')",
+                        }
+                    },
+                    "required": ["query_name"],
+                },
+            },
+        }
+    ]
+
+    # Send the conversation and available functions to the Groq API
+    response = client.chat.completions.create(
+        model=model,
+        messages=messages,
+        tools=tools,
+        tool_choice="auto",  
+        max_tokens=4096
+    )
+
+    # Extract the response message and any tool calls from the response
+    response_message = response.choices[0].message
+    tool_calls = response_message.tool_calls
+
+    # Define a dictionary of available functions
+    available_functions = {
+        "execute_duckdb_query_function_calling": execute_duckdb_query_function_calling,
+    }
+
+    # Iterate over the tool calls in the response
+    for tool_call in tool_calls:
+        function_name = tool_call.function.name  # Get the function name
+        function_to_call = available_functions[function_name]  # Get the function to call
+        function_args = json.loads(tool_call.function.arguments)  # Parse the function arguments
+        print('Query found: ', function_args.get("query_name"))
+        
+        # Call the function with the provided arguments
+        function_response = function_to_call(
+            query_name=function_args.get("query_name"),
+            verified_queries_dict=verified_queries_dict
+        )
+
+    # Print the function response (query result)
+    print(function_response)
+

+ 9 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/requirements.txt

@@ -0,0 +1,9 @@
+groq
+sentence-transformers
+langchain_community
+scikit-learn
+numpy
+duckdb
+pyyaml
+sqlparse
+tabulate

+ 7 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/employees-without-purchases.yaml

@@ -0,0 +1,7 @@
+description: Employees without a purchase since Feb 1, 2024
+sql: |
+  SELECT employees.name as employees_without_purchases
+  FROM employees.csv AS employees
+  LEFT JOIN purchases.csv AS purchases ON employees.employee_id = purchases.employee_id
+  AND purchases.purchase_date > '2024-02-01'
+  WHERE purchases.purchase_id IS NULL

+ 9 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/most-expensive-purchase.yaml

@@ -0,0 +1,9 @@
+description: Employee with the most expensive purchase
+sql: |
+  SELECT employees.name AS employee_name,
+        MAX(amount) AS max_purchase_amount
+  FROM purchases.csv AS purchases
+  JOIN employees.csv AS employees ON purchases.employee_id = employees.employee_id
+  GROUP BY employees.name
+  ORDER BY max_purchase_amount DESC
+  LIMIT 1

+ 9 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/most-recent-purchases.yaml

@@ -0,0 +1,9 @@
+description: Five most recent purchases
+sql: |
+  SELECT purchases.product_name,
+         purchases.amount,
+         employees.name
+  FROM purchases.csv AS purchases
+  JOIN employees.csv AS employees ON purchases.employee_id = employees.employee_id
+  ORDER BY purchases.purchase_date DESC
+  LIMIT 5;

+ 6 - 0
recipes/llama_api_providers/Groq/groq-example-templates/verified-sql-function-calling/verified-queries/number-of-teslas.yaml

@@ -0,0 +1,6 @@
+description: Number of Teslas purchased
+sql: |
+  SELECT COUNT(*) as number_of_teslas
+  FROM purchases.csv AS p
+  JOIN employees.csv AS e ON e.employee_id = p.employee_id
+  WHERE p.product_name = 'Tesla'

Plik diff jest za duży
+ 1708 - 0
recipes/llama_api_providers/Groq/llama3_cookbook_groq.ipynb


+ 0 - 937
recipes/llama_api_providers/llama3_cookbook_groq.ipynb

@@ -1,937 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "id": "09211e76-286f-4b12-acd7-cfb082dc2d66",
-   "metadata": {},
-   "source": [
-    "# Llama 3 Cookbook with LlamaIndex and Groq\n",
-    "\n",
-    "<a href=\"https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/llama_api_providers/llama3_cookbook_groq.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n",
-    "\n",
-    "Meta developed and released the Meta [Llama 3](https://ai.meta.com/blog/meta-llama-3/) family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.\n",
-    "\n",
-    "In this notebook, we demonstrate how to use Llama 3 with LlamaIndex for a comprehensive set of use cases. \n",
-    "1. Basic completion / chat \n",
-    "2. Basic RAG (Vector Search, Summarization)\n",
-    "3. Advanced RAG (Routing)\n",
-    "4. Text-to-SQL \n",
-    "5. Structured Data Extraction\n",
-    "6. Chat Engine + Memory\n",
-    "7. Agents\n",
-    "\n",
-    "\n",
-    "We use Llama3-8B and Llama3-70B through [Groq](https://groq.com) - you can sign up there to get a free trial API key."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "de2901c0-e20d-48e5-9385-dbca2258c564",
-   "metadata": {},
-   "source": [
-    "## Installation and Setup"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "bcf643ac-b025-4812-aaed-f8f85d1ba505",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "!pip install llama-index\n",
-    "!pip install llama-index-llms-groq\n",
-    "!pip install llama-index-embeddings-huggingface\n",
-    "!pip install llama-parse"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "641fa5c8-d63e-47f8-b5bc-ebf994f6e314",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import nest_asyncio\n",
-    "\n",
-    "nest_asyncio.apply()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "1714ea83-6cd4-44bb-b53f-4499126c3809",
-   "metadata": {},
-   "source": [
-    "### Setup LLM using Groq\n",
-    "\n",
-    "To use [Groq](https://groq.com), you need to make sure that `GROQ_API_KEY` is specified as an environment variable."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "5d46440c",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "\n",
-    "os.environ[\"GROQ_API_KEY\"] = \"YOUR_GROQ_API_KEY\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "d5256970-eba4-499a-b438-8766a290a61a",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.llms.groq import Groq\n",
-    "\n",
-    "llm = Groq(model=\"llama3-8b-8192\")\n",
-    "llm_70b = Groq(model=\"llama3-70b-8192\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "41c3f154-d345-465d-8eed-63b99adbd3ca",
-   "metadata": {},
-   "source": [
-    "### Setup Embedding Model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "0cda736d-e414-44e3-8c15-6be49f5f0282",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
-    "\n",
-    "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "3625cf29-7c56-475a-8efd-fbe8ffce194d",
-   "metadata": {},
-   "source": [
-    "### Define Global Settings Configuration\n",
-    "\n",
-    "In LlamaIndex, you can define global settings so you don't have to pass the LLM / embedding model objects everywhere."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "be3565d1-cc5b-4149-ad5a-7be8f7818e0c",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core import Settings\n",
-    "\n",
-    "Settings.llm = llm\n",
-    "Settings.embed_model = embed_model"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "42449b68-47f5-40cf-9207-191307b25e8e",
-   "metadata": {},
-   "source": [
-    "### Download Data\n",
-    "\n",
-    "Here you'll download data that's used in section 2 and onwards.\n",
-    "\n",
-    "We'll download some articles on Kendrick, Drake, and their beef (as of May 2024)."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "59b18640-cdfa-42c1-ab53-115983c1fdc4",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "!mkdir data\n",
-    "!wget \"https://www.dropbox.com/scl/fi/t1soxfjdp0v44an6sdymd/drake_kendrick_beef.pdf?rlkey=u9546ymb7fj8lk2v64r6p5r5k&st=wjzzrgil&dl=1\" -O data/drake_kendrick_beef.pdf\n",
-    "!wget \"https://www.dropbox.com/scl/fi/nts3n64s6kymner2jppd6/drake.pdf?rlkey=hksirpqwzlzqoejn55zemk6ld&st=mohyfyh4&dl=1\" -O data/drake.pdf\n",
-    "!wget \"https://www.dropbox.com/scl/fi/8ax2vnoebhmy44bes2n1d/kendrick.pdf?rlkey=fhxvn94t5amdqcv9vshifd3hj&st=dxdtytn6&dl=1\" -O data/kendrick.pdf"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "9edee491-05f8-4fbb-9394-baa82f1e5087",
-   "metadata": {},
-   "source": [
-    "### Load Data\n",
-    "\n",
-    "We load data using LlamaParse by default, but you can also choose to opt for our free pypdf reader (in SimpleDirectoryReader by default) if you don't have an account! \n",
-    "\n",
-    "1. LlamaParse: Signup for an account here: cloud.llamaindex.ai. You get 1k free pages a day, and paid plan is 7k free pages + 0.3c per additional page. LlamaParse is a good option if you want to parse complex documents, like PDFs with charts, tables, and more. \n",
-    "\n",
-    "2. Default PDF Parser (In `SimpleDirectoryReader`). If you don't want to signup for an account / use a PDF service, just use the default PyPDF reader bundled in our file loader. It's a good choice for getting started!"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "b648635a-2672-407f-bae6-01660e5426d7",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Uncomment this code if you want to use LlamaParse\n",
-    "# from llama_parse import LlamaParse\n",
-    "\n",
-    "# docs_kendrick = LlamaParse(result_type=\"text\").load_data(\"./data/kendrick.pdf\")\n",
-    "# docs_drake = LlamaParse(result_type=\"text\").load_data(\"./data/drake.pdf\")\n",
-    "# docs_both = LlamaParse(result_type=\"text\").load_data(\n",
-    "#     \"./data/drake_kendrick_beef.pdf\"\n",
-    "# )\n",
-    "\n",
-    "# Uncomment this code if you want to use SimpleDirectoryReader / default PDF Parser\n",
-    "# from llama_index.core import SimpleDirectoryReader\n",
-    "\n",
-    "# docs_kendrick = SimpleDirectoryReader(input_files=[\"data/kendrick.pdf\"]).load_data()\n",
-    "# docs_drake = SimpleDirectoryReader(input_files=[\"data/drake.pdf\"]).load_data()\n",
-    "# docs_both = SimpleDirectoryReader(input_files=[\"data/drake_kendrick_beef.pdf\"]).load_data()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "071a8f44-2765-4d57-b8da-15d3c718874d",
-   "metadata": {},
-   "source": [
-    "## 1. Basic Completion and Chat"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "c0b1ace8-32fb-46b2-a065-8817ddc0310b",
-   "metadata": {},
-   "source": [
-    "### Call complete with a prompt"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "a2db43f9-74af-453c-9f83-8db0379c3302",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = llm.complete(\"do you like drake or kendrick better?\")\n",
-    "\n",
-    "print(response)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "89326153-e2d2-4136-8193-fb27d20670c3",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "stream_response = llm.stream_complete(\n",
-    "    \"you're a drake fan. tell me why you like drake more than kendrick\"\n",
-    ")\n",
-    "\n",
-    "for t in stream_response:\n",
-    "    print(t.delta, end=\"\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "a4558339-c8a1-4d26-a430-eb71768b5351",
-   "metadata": {},
-   "source": [
-    "### Call chat with a list of messages"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "5f393031-f743-4a28-a122-71817e3fbd1b",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core.llms import ChatMessage\n",
-    "\n",
-    "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are Kendrick.\"),\n",
-    "    ChatMessage(role=\"user\", content=\"Write a verse.\"),\n",
-    "]\n",
-    "response = llm.chat(messages)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "8e9551fc-0efc-4671-bc57-339121004c39",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(response)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "6a67a33d-fe7d-4381-983f-ca3a6945995d",
-   "metadata": {},
-   "source": [
-    "## 2. Basic RAG (Vector Search, Summarization)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "c104a0c5-e43b-475b-9fa6-186906c1f327",
-   "metadata": {},
-   "source": [
-    "### Basic RAG (Vector Search)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "216787b7-e40a-43fc-a4ca-c43cb798ce9e",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core import VectorStoreIndex\n",
-    "\n",
-    "index = VectorStoreIndex.from_documents(docs_both)\n",
-    "query_engine = index.as_query_engine(similarity_top_k=3)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "a854e9d3-70f1-4927-a2f6-59e90c31f2f0",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = query_engine.query(\"Tell me about family matters\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "da796970-bc38-4cb4-9d32-ebd1b71d4bdc",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(str(response))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "eff935b7-4f37-4758-8997-82fb0852e732",
-   "metadata": {},
-   "source": [
-    "### Basic RAG (Summarization)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "dfe72300-7a38-453e-b1f2-bc1c00a01ff7",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core import SummaryIndex\n",
-    "\n",
-    "summary_index = SummaryIndex.from_documents(docs_both)\n",
-    "summary_engine = summary_index.as_query_engine()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "178f1f12-51f7-4b45-9346-c16ed12b3b8d",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = summary_engine.query(\n",
-    "    \"Given your assessment of this article, who won the beef?\"\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "b8125382-d576-4b99-a0da-2fbb71a5b19b",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(str(response))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "68918eb6-f1e6-460c-b1d5-fb49c3fed4b8",
-   "metadata": {},
-   "source": [
-    "## 3. Advanced RAG (Routing)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "94fd7097-0287-4522-8e43-3e088291fa8a",
-   "metadata": {},
-   "source": [
-    "### Build a Router that can choose whether to do vector search or summarization"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "3949dd41-e9a1-47f6-900f-4f987cad3f84",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core.tools import QueryEngineTool, ToolMetadata\n",
-    "\n",
-    "vector_tool = QueryEngineTool(\n",
-    "    index.as_query_engine(),\n",
-    "    metadata=ToolMetadata(\n",
-    "        name=\"vector_search\",\n",
-    "        description=\"Useful for searching for specific facts.\",\n",
-    "    ),\n",
-    ")\n",
-    "\n",
-    "summary_tool = QueryEngineTool(\n",
-    "    index.as_query_engine(response_mode=\"tree_summarize\"),\n",
-    "    metadata=ToolMetadata(\n",
-    "        name=\"summary\",\n",
-    "        description=\"Useful for summarizing an entire document.\",\n",
-    "    ),\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "d063d07b-c03e-4b26-8556-e3c058d2fd52",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core.query_engine import RouterQueryEngine\n",
-    "\n",
-    "query_engine = RouterQueryEngine.from_defaults(\n",
-    "    [vector_tool, summary_tool], select_multi=False, verbose=True, llm=llm_70b\n",
-    ")\n",
-    "\n",
-    "response = query_engine.query(\n",
-    "    \"Tell me about the song meet the grahams - why is it significant\"\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "396aad75-5a71-4bd9-a760-7f13fe223079",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(response)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "a795f0bc-e871-4580-8983-6fb27d421fc5",
-   "metadata": {},
-   "source": [
-    "## 4. Text-to-SQL \n",
-    "\n",
-    "Here, we download and use a sample SQLite database with 11 tables, with various info about music, playlists, and customers. We will limit to a select few tables for this test."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "a5096501-92c3-41af-a871-ade869d710fb",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "!wget \"https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip\" -O \"./data/chinook.zip\"\n",
-    "!unzip \"./data/chinook.zip\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "d4db989e-c18d-4416-928e-7be4ead4d869",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from sqlalchemy import (\n",
-    "    create_engine,\n",
-    "    MetaData,\n",
-    "    Table,\n",
-    "    Column,\n",
-    "    String,\n",
-    "    Integer,\n",
-    "    select,\n",
-    "    column,\n",
-    ")\n",
-    "\n",
-    "engine = create_engine(\"sqlite:///chinook.db\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "bf6ed233-0ea3-4d4f-8c33-5b6d558b89b9",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core import SQLDatabase\n",
-    "\n",
-    "sql_database = SQLDatabase(engine)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "debae423-1004-40f6-9356-e1c3add4d965",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core.indices.struct_store import NLSQLTableQueryEngine\n",
-    "\n",
-    "query_engine = NLSQLTableQueryEngine(\n",
-    "    sql_database=sql_database,\n",
-    "    tables=[\"albums\", \"tracks\", \"artists\"],\n",
-    "    llm=llm_70b,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "a65ecd70-09c4-4872-b712-3a8235d03db2",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = query_engine.query(\"What are some albums?\")\n",
-    "\n",
-    "print(response)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "c12b93ef-d6d1-4d15-9cb2-343070f72851",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = query_engine.query(\"What are some artists? Limit it to 5.\")\n",
-    "\n",
-    "print(response)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "2c243d38-c6ac-445c-b9d4-53a9ae013b7b",
-   "metadata": {},
-   "source": [
-    "This last query should be a more complex join"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "553741c2-1050-445d-979a-ae2150ee3248",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = query_engine.query(\n",
-    "    \"What are some tracks from the artist AC/DC? Limit it to 3\"\n",
-    ")\n",
-    "\n",
-    "print(response)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "300689d7-9e67-4404-9898-27404ee6d4b5",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(response.metadata[\"sql_query\"])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "1419fe67-aa6a-47db-88cd-9bb251c15615",
-   "metadata": {},
-   "source": [
-    "## 5. Structured Data Extraction\n",
-    "\n",
-    "An important use case for function calling is extracting structured objects. LlamaIndex provides an intuitive interface for this through `structured_predict` - simply define the target Pydantic class (can be nested), and given a prompt, we extract out the desired object.\n",
-    "\n",
-    "**NOTE**: Since there's no native function calling support with Llama3, the structured extraction is performed by prompting the LLM + output parsing."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "4432f35a-5f29-45e9-a928-32e6d77b158e",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.llms.groq import Groq\n",
-    "from llama_index.core.prompts import PromptTemplate\n",
-    "from pydantic import BaseModel\n",
-    "\n",
-    "\n",
-    "class Restaurant(BaseModel):\n",
-    "    \"\"\"A restaurant with name, city, and cuisine.\"\"\"\n",
-    "\n",
-    "    name: str\n",
-    "    city: str\n",
-    "    cuisine: str\n",
-    "\n",
-    "\n",
-    "llm = Groq(model=\"llama3-8b-8192\", pydantic_program_mode=\"llm\")\n",
-    "prompt_tmpl = PromptTemplate(\n",
-    "    \"Generate a restaurant in a given city {city_name}\"\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "2c451f52-a051-4ba2-a683-0c1fd258d986",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "restaurant_obj = llm.structured_predict(\n",
-    "    Restaurant, prompt_tmpl, city_name=\"Miami\"\n",
-    ")\n",
-    "print(restaurant_obj)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "839018a9-b65f-4824-83f7-2e4e52b55c5d",
-   "metadata": {},
-   "source": [
-    "## 6. Adding Chat History to RAG (Chat Engine)\n",
-    "\n",
-    "In this section we create a stateful chatbot from a RAG pipeline, with our chat engine abstraction.\n",
-    "\n",
-    "Unlike a stateless query engine, the chat engine maintains conversation history (through a memory module like buffer memory). It performs retrieval given a condensed question, and feeds the condensed question + context + chat history into the final LLM prompt.\n",
-    "\n",
-    "Related resource: https://docs.llamaindex.ai/en/stable/examples/chat_engine/chat_engine_condense_plus_context/"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "27e56315-9513-4b32-bf9a-ce97c3ab52df",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core.memory import ChatMemoryBuffer\n",
-    "from llama_index.core.chat_engine import CondensePlusContextChatEngine\n",
-    "\n",
-    "memory = ChatMemoryBuffer.from_defaults(token_limit=3900)\n",
-    "\n",
-    "chat_engine = CondensePlusContextChatEngine.from_defaults(\n",
-    "    index.as_retriever(),\n",
-    "    memory=memory,\n",
-    "    llm=llm,\n",
-    "    context_prompt=(\n",
-    "        \"You are a chatbot, able to have normal interactions, as well as talk\"\n",
-    "        \" about the Kendrick and Drake beef.\"\n",
-    "        \"Here are the relevant documents for the context:\\n\"\n",
-    "        \"{context_str}\"\n",
-    "        \"\\nInstruction: Use the previous chat history, or the context above, to interact and help the user.\"\n",
-    "    ),\n",
-    "    verbose=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "b24524d2-fdce-4237-8ecc-67f139302303",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = chat_engine.chat(\n",
-    "    \"Tell me about the songs Drake released in the beef.\"\n",
-    ")\n",
-    "print(str(response))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "f9a87a16-2864-4c48-95e7-a2103e119242",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = chat_engine.chat(\"What about Kendrick?\")\n",
-    "print(str(response))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "a7fa07ed-58f0-445e-bbd3-4ad8bac6598e",
-   "metadata": {},
-   "source": [
-    "## 7. Agents\n",
-    "\n",
-    "Here we build agents with Llama 3. We perform RAG over simple functions as well as the documents above."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "aa98d735-5d43-413f-aab3-fc3adeed81b1",
-   "metadata": {},
-   "source": [
-    "### Agents And Tools"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "fb73a01f-8a2e-4dd6-91f8-710c92b81c56",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import json\n",
-    "from typing import Sequence, List\n",
-    "\n",
-    "from llama_index.core.llms import ChatMessage\n",
-    "from llama_index.core.tools import BaseTool, FunctionTool\n",
-    "from llama_index.core.agent import ReActAgent\n",
-    "\n",
-    "import nest_asyncio\n",
-    "\n",
-    "nest_asyncio.apply()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "efbee832-9786-4551-93f2-01ee90fa0f4d",
-   "metadata": {},
-   "source": [
-    "### Define Tools"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "b2058b36-8053-4dc8-9218-c286702ecf66",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def multiply(a: int, b: int) -> int:\n",
-    "    \"\"\"Multiple two integers and returns the result integer\"\"\"\n",
-    "    return a * b\n",
-    "\n",
-    "\n",
-    "def add(a: int, b: int) -> int:\n",
-    "    \"\"\"Add two integers and returns the result integer\"\"\"\n",
-    "    return a + b\n",
-    "\n",
-    "\n",
-    "def subtract(a: int, b: int) -> int:\n",
-    "    \"\"\"Subtract two integers and returns the result integer\"\"\"\n",
-    "    return a - b\n",
-    "\n",
-    "\n",
-    "def divide(a: int, b: int) -> int:\n",
-    "    \"\"\"Divides two integers and returns the result integer\"\"\"\n",
-    "    return a / b\n",
-    "\n",
-    "\n",
-    "multiply_tool = FunctionTool.from_defaults(fn=multiply)\n",
-    "add_tool = FunctionTool.from_defaults(fn=add)\n",
-    "subtract_tool = FunctionTool.from_defaults(fn=subtract)\n",
-    "divide_tool = FunctionTool.from_defaults(fn=divide)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "22d7d4dc-e2ce-402c-9350-0e7010d0080c",
-   "metadata": {},
-   "source": [
-    "### ReAct Agent"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "72a48053-e30d-4884-bcac-80752047d940",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "agent = ReActAgent.from_tools(\n",
-    "    [multiply_tool, add_tool, subtract_tool, divide_tool],\n",
-    "    llm=llm_70b,\n",
-    "    verbose=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "7ada828a-3b05-4fc1-90e8-986c5607ae61",
-   "metadata": {},
-   "source": [
-    "### Querying"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "9c0b1e56-d9f7-4615-a15a-c91fea1adb00",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = agent.chat(\"What is (121 + 2) * 5?\")\n",
-    "print(str(response))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "67ce45f6-bdd4-42aa-8f74-43a50f14094e",
-   "metadata": {},
-   "source": [
-    "### ReAct Agent With RAG QueryEngine Tools"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "97fce5f1-eacf-4ecc-9e83-072e74d3a2a9",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index.core import (\n",
-    "    SimpleDirectoryReader,\n",
-    "    VectorStoreIndex,\n",
-    "    StorageContext,\n",
-    "    load_index_from_storage,\n",
-    ")\n",
-    "\n",
-    "from llama_index.core.tools import QueryEngineTool, ToolMetadata"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "23963d00-e3d2-4ce1-9ac3-aa486bf4b1a5",
-   "metadata": {},
-   "source": [
-    "### Create ReAct Agent using RAG QueryEngine Tools"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "1844dbbd-477c-4c4d-bb18-2c2e16a75a50",
-   "metadata": {},
-   "source": [
-    "This may take 4 minutes to run:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "66ab1e60-3374-4eb9-b7dc-c28db3b47c51",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "drake_index = VectorStoreIndex.from_documents(docs_drake)\n",
-    "drake_query_engine = drake_index.as_query_engine(similarity_top_k=3)\n",
-    "\n",
-    "kendrick_index = VectorStoreIndex.from_documents(docs_kendrick)\n",
-    "kendrick_query_engine = kendrick_index.as_query_engine(similarity_top_k=3)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "0e241fe9-f390-4be5-b3c4-da4f56db01ef",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "drake_tool = QueryEngineTool(\n",
-    "    drake_index.as_query_engine(),\n",
-    "    metadata=ToolMetadata(\n",
-    "        name=\"drake_search\",\n",
-    "        description=\"Useful for searching over Drake's life.\",\n",
-    "    ),\n",
-    ")\n",
-    "\n",
-    "kendrick_tool = QueryEngineTool(\n",
-    "    kendrick_index.as_query_engine(),\n",
-    "    metadata=ToolMetadata(\n",
-    "        name=\"kendrick_search\",\n",
-    "        description=\"Useful for searching over Kendrick's life.\",\n",
-    "    ),\n",
-    ")\n",
-    "\n",
-    "query_engine_tools = [drake_tool, kendrick_tool]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "b922feac-b221-4737-92c6-e63eeab4eab7",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "agent = ReActAgent.from_tools(\n",
-    "    query_engine_tools,\n",
-    "    llm=llm_70b,\n",
-    "    verbose=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "7e38edc8-47f8-4f1a-ad87-bc3a9e31a65e",
-   "metadata": {},
-   "source": [
-    "### Querying"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "035c2c8b-5a5e-4df0-a423-4c2d6054f457",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "response = agent.chat(\"Tell me about how Kendrick and Drake grew up\")\n",
-    "print(str(response))"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.10.14"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}