|
@@ -0,0 +1,77 @@
|
|
|
+import os
|
|
|
+from dotenv import load_dotenv
|
|
|
+from typing import Annotated
|
|
|
+from homeassistant_api import Client
|
|
|
+from langchain_core.tools import tool
|
|
|
+from langchain_openai import ChatOpenAI
|
|
|
+from .prompts import choose_sensor_template
|
|
|
+from langchain_core.prompts import PromptTemplate
|
|
|
+
|
|
|
+
|
|
|
+load_dotenv()
|
|
|
+client = Client(os.environ["HA_URL"], os.environ["HA_API_KEY"])
|
|
|
+
|
|
|
+
|
|
|
+def get_all_entities():
|
|
|
+ all_entities = client.get_entities()["sensor"].entities
|
|
|
+ final_entities = []
|
|
|
+
|
|
|
+ objects = all_entities.keys()
|
|
|
+
|
|
|
+ for object in objects:
|
|
|
+ final_entities.append(
|
|
|
+ {
|
|
|
+ "entity_id": all_entities[object].state.entity_id,
|
|
|
+ "friendly_name": all_entities[object].state.attributes["friendly_name"],
|
|
|
+ }
|
|
|
+ )
|
|
|
+
|
|
|
+ return final_entities
|
|
|
+
|
|
|
+
|
|
|
+def get_sensor_state(entity_id: Annotated[str, "The ID of the sensor"]):
|
|
|
+ sensor = client.get_entity(entity_id=entity_id)
|
|
|
+
|
|
|
+ return {
|
|
|
+ "value": sensor.state.state,
|
|
|
+ "unit_of_measurement": sensor.state.attributes.get("unit_of_measurement"),
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+# Tool to dynamically choose the best sensor based on user input
|
|
|
+@tool
|
|
|
+def select_and_get_sensor_value(
|
|
|
+ description: Annotated[str, "Description of the sensor you need"]
|
|
|
+):
|
|
|
+ """
|
|
|
+ Let the LLM decide which sensor to use based on the provided description and extract its value.
|
|
|
+ And get the current state or value of a sensor.
|
|
|
+ Use this when the user asks you for data related to sensors.
|
|
|
+ """
|
|
|
+ sensors = get_all_entities()
|
|
|
+
|
|
|
+ # Prepare sensor details for LLM to analyze
|
|
|
+ sensor_info = "\n".join(
|
|
|
+ [
|
|
|
+ f"- entity_id: {sensor['entity_id']}, friendly_name: {sensor['friendly_name']}"
|
|
|
+ for sensor in sensors
|
|
|
+ ]
|
|
|
+ )
|
|
|
+
|
|
|
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
|
+ prompt = PromptTemplate(
|
|
|
+ template=choose_sensor_template, input_variables=["sensor_info", "description"]
|
|
|
+ )
|
|
|
+ llm_chain = prompt | llm
|
|
|
+
|
|
|
+ selected_sensor = llm_chain.invoke(
|
|
|
+ {"sensor_info": sensor_info, "description": description}
|
|
|
+ )
|
|
|
+
|
|
|
+ # Now retrieve the value of the selected sensor
|
|
|
+ sensor_value = get_sensor_state(selected_sensor.content.strip())
|
|
|
+
|
|
|
+ return sensor_value
|
|
|
+
|
|
|
+
|
|
|
+toolkit = [select_and_get_sensor_value]
|