Browse Source

ibm watsonxai code colab addon

Yuce Dincer 2 months ago
parent
commit
14e3f97b8b
1 changed files with 28 additions and 22 deletions
  1. 28 22
      3p-integrations/ibm/Get Started with watsonx.ai & Llama.ipynb

+ 28 - 22
3p-integrations/ibm/Get Started with watsonx.ai & Llama.ipynb

@@ -2,6 +2,14 @@
  "cells": [
   {
    "cell_type": "markdown",
+   "id": "35a81947",
+   "metadata": {},
+   "source": [
+    "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/meta-llama/llama-cookbook/blob/ibm-wxai/3p-integrations/ibm/Get%20Started%20with%20watsonx.ai%20%26%20Llama.ipynb)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
    "id": "83a07847-b672-4a88-9a6d-fdae11bb1efa",
    "metadata": {},
    "source": [
@@ -36,19 +44,19 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "id": "8abda17e-2849-4ad2-9cef-e2a1dd0b5827",
    "metadata": {},
    "outputs": [],
    "source": [
-    "# # # Install required packages\n",
+    "# Install required packages\n",
     "!pip install ibm-watsonx-ai\n",
     "!pip install python-dotenv pandas"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "bbc27c29-3848-41bd-8d60-71c450408371",
    "metadata": {},
    "outputs": [],
@@ -58,7 +66,6 @@
     "from dotenv import load_dotenv\n",
     "from ibm_watsonx_ai import APIClient, Credentials\n",
     "from ibm_watsonx_ai.foundation_models import Model\n",
-    "from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams\n",
     "from ibm_watsonx_ai.foundation_models.utils.enums import ModelTypes\n",
     "import pandas as pd\n",
     "import json"
@@ -116,7 +123,7 @@
     "    project_id = os.getenv(\"PROJECT_ID\")\n",
     "except KeyError:\n",
     "    project_id = input(\"Please enter your project_id (hit enter): \")\n",
-    "    \n",
+    "\n",
     "print(\"Credentials configured successfully!\")"
    ]
   },
@@ -272,7 +279,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 13,
+   "execution_count": null,
    "id": "199d4189-dc80-49db-a901-ebe7e82437de",
    "metadata": {},
    "outputs": [],
@@ -280,8 +287,8 @@
     "from ibm_watsonx_ai.foundation_models import ModelInference\n",
     "\n",
     "model = ModelInference(\n",
-    "    model_id=model_id, \n",
-    "    params=params, \n",
+    "    model_id=model_id,\n",
+    "    params=params,\n",
     "    credentials=credentials,\n",
     "    project_id=project_id)"
    ]
@@ -452,14 +459,14 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
+   "execution_count": null,
    "id": "1c6f1a12-6943-428d-833a-46aaac498cb3",
    "metadata": {},
    "outputs": [],
    "source": [
     "def email_assistant(context, tone=\"professional\"):\n",
     "    \"\"\"Generate email responses based on context and tone\"\"\"\n",
-    "    \n",
+    "\n",
     "    messages = [\n",
     "    {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
     "    {\"role\": \"user\", \"content\": f\"\"\"\n",
@@ -467,7 +474,7 @@
     "    Context: {context}\n",
     "    Email Response:\"\"\"}\n",
     "     ]\n",
-    "    \n",
+    "\n",
     "    params = TextChatParameters(\n",
     "        temperature=0.5,\n",
     "        max_tokens=250\n",
@@ -479,10 +486,10 @@
     "        credentials=credentials,\n",
     "        project_id=project_id\n",
     "    )\n",
-    "    \n",
+    "\n",
     "    response = model.chat(messages=messages)\n",
     "    clean_response = response[\"choices\"][0][\"message\"][\"content\"]\n",
-    "    \n",
+    "\n",
     "    return clean_response\n"
    ]
   },
@@ -529,34 +536,34 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 20,
+   "execution_count": null,
    "id": "856c6538-28fb-41f8-81f9-df0b38e47ce7",
    "metadata": {},
    "outputs": [],
    "source": [
     "def generate_docstring(code):\n",
     "    \"\"\"Generate documentation for code snippets\"\"\"\n",
-    "    \n",
+    "\n",
     "    messages = [\n",
     "    {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
     "    {\"role\": \"user\", \"content\": f\"\"\"\n",
     "    Generate a comprehensive docstring for this Python function:\n",
     "    {code}\n",
-    "    \n",
+    "\n",
     "    Include:\n",
     "    - Description\n",
     "    - Parameters\n",
     "    - Returns\n",
     "    - Example usage\n",
-    "    \n",
+    "\n",
     "    Docstring:\"\"\"}\n",
     "    ]\n",
-    "   \n",
+    "\n",
     "    params = TextChatParameters(\n",
     "        temperature=0.5,\n",
     "        max_tokens=1000\n",
     "    )\n",
-    "    \n",
+    "\n",
     "    model = ModelInference(\n",
     "        model_id=model_id,\n",
     "        params=params,\n",
@@ -566,9 +573,8 @@
     "\n",
     "    response = model.chat(messages=messages)\n",
     "    clean_response = response[\"choices\"][0][\"message\"][\"content\"]\n",
-    "    \n",
-    "    return clean_response\n",
-    "\n"
+    "\n",
+    "    return clean_response\n"
    ]
   },
   {