|
|
@@ -65,14 +65,6 @@
|
|
|
]
|
|
|
},
|
|
|
{
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "id": "d5d854f2-a6f9-4107-a687-492d6eba2003",
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": []
|
|
|
- },
|
|
|
- {
|
|
|
"cell_type": "markdown",
|
|
|
"id": "8f8d6bb9-96cb-44ea-b39c-1c957c7b1963",
|
|
|
"metadata": {},
|
|
|
@@ -131,7 +123,7 @@
|
|
|
" project_id = os.getenv(\"PROJECT_ID\")\n",
|
|
|
"except KeyError:\n",
|
|
|
" project_id = input(\"Please enter your project_id (hit enter): \")\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
"print(\"Credentials configured successfully!\")"
|
|
|
]
|
|
|
},
|
|
|
@@ -331,7 +323,7 @@
|
|
|
},
|
|
|
{
|
|
|
"cell_type": "code",
|
|
|
- "execution_count": 13,
|
|
|
+ "execution_count": null,
|
|
|
"id": "199d4189-dc80-49db-a901-ebe7e82437de",
|
|
|
"metadata": {},
|
|
|
"outputs": [],
|
|
|
@@ -339,8 +331,8 @@
|
|
|
"from ibm_watsonx_ai.foundation_models import ModelInference\n",
|
|
|
"\n",
|
|
|
"model = ModelInference(\n",
|
|
|
- " model_id=model_id, \n",
|
|
|
- " params=params, \n",
|
|
|
+ " model_id=model_id,\n",
|
|
|
+ " params=params,\n",
|
|
|
" credentials=credentials,\n",
|
|
|
" project_id=project_id)"
|
|
|
]
|
|
|
@@ -511,14 +503,14 @@
|
|
|
},
|
|
|
{
|
|
|
"cell_type": "code",
|
|
|
- "execution_count": 18,
|
|
|
+ "execution_count": null,
|
|
|
"id": "1c6f1a12-6943-428d-833a-46aaac498cb3",
|
|
|
"metadata": {},
|
|
|
"outputs": [],
|
|
|
"source": [
|
|
|
"def email_assistant(context, tone=\"professional\"):\n",
|
|
|
" \"\"\"Generate email responses based on context and tone\"\"\"\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" messages = [\n",
|
|
|
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
|
|
|
" {\"role\": \"user\", \"content\": f\"\"\"\n",
|
|
|
@@ -526,7 +518,7 @@
|
|
|
" Context: {context}\n",
|
|
|
" Email Response:\"\"\"}\n",
|
|
|
" ]\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" params = TextChatParameters(\n",
|
|
|
" temperature=0.5,\n",
|
|
|
" max_tokens=250\n",
|
|
|
@@ -538,10 +530,10 @@
|
|
|
" credentials=credentials,\n",
|
|
|
" project_id=project_id\n",
|
|
|
" )\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" response = model.chat(messages=messages)\n",
|
|
|
" clean_response = response[\"choices\"][0][\"message\"][\"content\"]\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" return clean_response\n"
|
|
|
]
|
|
|
},
|
|
|
@@ -588,34 +580,34 @@
|
|
|
},
|
|
|
{
|
|
|
"cell_type": "code",
|
|
|
- "execution_count": 20,
|
|
|
+ "execution_count": null,
|
|
|
"id": "856c6538-28fb-41f8-81f9-df0b38e47ce7",
|
|
|
"metadata": {},
|
|
|
"outputs": [],
|
|
|
"source": [
|
|
|
"def generate_docstring(code):\n",
|
|
|
" \"\"\"Generate documentation for code snippets\"\"\"\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" messages = [\n",
|
|
|
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
|
|
|
" {\"role\": \"user\", \"content\": f\"\"\"\n",
|
|
|
" Generate a comprehensive docstring for this Python function:\n",
|
|
|
" {code}\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" Include:\n",
|
|
|
" - Description\n",
|
|
|
" - Parameters\n",
|
|
|
" - Returns\n",
|
|
|
" - Example usage\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" Docstring:\"\"\"}\n",
|
|
|
" ]\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" params = TextChatParameters(\n",
|
|
|
" temperature=0.5,\n",
|
|
|
" max_tokens=1000\n",
|
|
|
" )\n",
|
|
|
- " \n",
|
|
|
+ "\n",
|
|
|
" model = ModelInference(\n",
|
|
|
" model_id=model_id,\n",
|
|
|
" params=params,\n",
|
|
|
@@ -625,9 +617,8 @@
|
|
|
"\n",
|
|
|
" response = model.chat(messages=messages)\n",
|
|
|
" clean_response = response[\"choices\"][0][\"message\"][\"content\"]\n",
|
|
|
- " \n",
|
|
|
- " return clean_response\n",
|
|
|
- "\n"
|
|
|
+ "\n",
|
|
|
+ " return clean_response\n"
|
|
|
]
|
|
|
},
|
|
|
{
|