Browse Source

initial commit

Radu Boncea 2 năm trước cách đây
commit
79d244535a

+ 319 - 0
.gitignore

@@ -0,0 +1,319 @@
+### macOS ###
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### macOS Patch ###
+# iCloud generated files
+*.icloud
+
+### PyCharm+all ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# AWS User-specific
+.idea/**/aws.xml
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn.  Uncomment if using
+# auto-import.
+# .idea/artifacts
+# .idea/compiler.xml
+# .idea/jarRepositories.xml
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# SonarLint plugin
+.idea/sonarlint/
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### PyCharm+all Patch ###
+# Ignore everything but code style settings and run configurations
+# that are supposed to be shared within teams.
+
+.idea/*
+
+!.idea/codeStyles
+!.idea/runConfigurations
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+#   For a library or package, you might want to ignore these files since the code is
+#   intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+#   This is especially recommended for binary packages to ensure reproducibility, and is more
+#   commonly ignored for libraries.
+#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+#   in version control.
+#   https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+#  and can be added to the global gitignore or merged into this file.  For a more nuclear
+#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+### Python Patch ###
+# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
+poetry.toml
+
+# ruff
+.ruff_cache/
+
+# LSP config files
+pyrightconfig.json
+
+### VisualStudioCode ###
+.vscode/*
+!.vscode/settings.json
+!.vscode/tasks.json
+!.vscode/launch.json
+!.vscode/extensions.json
+!.vscode/*.code-snippets
+
+# Local History for Visual Studio Code
+.history/
+
+# Built Visual Studio Code Extensions
+*.vsix
+
+### VisualStudioCode Patch ###
+# Ignore all local history of files
+.history
+.ionide
+
+
+### Project specific ###
+*/.chroma/*
+jupyterhub-proxy.pid
+jupyterhub_cookie_secret
+jupyterhub.sqlite
+.vscode/settings.json

+ 0 - 0
agents/__init__.py


+ 54 - 0
agents/ha.py

@@ -0,0 +1,54 @@
+import os
+from langchain.chat_models import ChatOpenAI
+from langchain.chains.conversation.memory import ConversationBufferWindowMemory
+from langchain.agents import initialize_agent
+from typing import List, Any
+
+
+class HAAgent(object):
+    
+    def __init__(self, tools: List[Any] = None):
+        if not tools:
+            tools = list()
+
+        self.llm = ChatOpenAI(
+            temperature=0,
+            model_name='gpt-3.5-turbo',
+            max_tokens=100,
+        )
+        self.tools = tools
+        self.llm.openai_api_key = os.environ.get('OPENAI_API_KEY', '')
+        self.memory = ConversationBufferWindowMemory(
+            memory_key='chat_history',
+            k=5,
+            return_messages=True
+        )
+
+        self.sys_msg = """Assistant is a large language model trained by OpenAI.
+        Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
+        Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
+        Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
+        """
+
+        self.agent = None
+
+        
+    
+    def get_agent(self):
+        if not self.agent:
+            self.agent = initialize_agent(
+                agent='chat-conversational-react-description',
+                system_message=self.sys_msg,
+                tools=self.tools,
+                llm=self.llm,
+                verbose=True,
+                max_iterations=3,
+                early_stopping_method='generate',
+                memory=self.memory
+            )
+        return self.agent
+    
+    def run(self, input):
+        
+        return self.get_agent().run({"input": input})
+    

+ 108 - 0
hachat.py

@@ -0,0 +1,108 @@
+from dotenv import dotenv_values, load_dotenv
+import os
+import speech_recognition as sr
+from utils.prompt import Prompt
+from langchain import LLMMathChain, SerpAPIWrapper
+from langchain.chains import RetrievalQA
+from langchain.llms import OpenAI
+from langchain.indexes import VectorstoreIndexCreator
+from langchain.document_loaders import TextLoader
+from langchain.utilities import OpenWeatherMapAPIWrapper
+from prompt_toolkit.key_binding import KeyBindings
+from models.ha import ChatHA
+
+
+from tools.ha import (
+    HALightControl, 
+    HATempHumReading, 
+    HAGeolocation, 
+    HASensorReading
+)
+
+from langchain.agents import Tool
+load_dotenv()
+
+prompt = Prompt('hachat')
+
+search = SerpAPIWrapper()
+weather = OpenWeatherMapAPIWrapper()
+
+chat = ChatHA(tools=[
+        Tool(
+            name = "Search",
+            func=search.run,
+            description="useful for when you need to answer questions about current events"
+        ),
+        Tool(
+            name = "Weather",
+            func=weather.run,
+            description="useful for when you need to search for weather information"
+        ),
+        HALightControl(),
+        HATempHumReading(),
+        HAGeolocation(),
+        HASensorReading(),    
+]
+)
+
+
+# index = VectorstoreIndexCreator().from_loaders(
+# [
+#     TextLoader('vectore_documents/administrative.txt', encoding='utf8'),
+#     TextLoader('vectore_documents/rotld.txt', encoding='utf8')
+#     ]
+# )
+
+
+if __name__ == '__main__':
+    import argparse
+    parser = argparse.ArgumentParser(description='Chat with the AI')
+    parser.add_argument('--novoice', type=bool, default=False, help='novoice: disable voice input')
+    args = parser.parse_args()
+
+    input_mode = 'text'
+    if not args.novoice:
+        input_mode = prompt.get_input_mode()
+        if not input_mode:
+            prompt.print("You're an imbecile...I shall decide for you", tag='warning')
+            input_mode = 'text'
+
+    prompt.print(f"Input mode is {input_mode}")
+
+    if input_mode == 'voice':
+         r = sr.Recognizer()
+
+
+    while True:
+        input = None
+
+        if input_mode == 'voice':
+            prompt.print("Listening...")
+            with sr.Microphone() as source:
+                audio = r.listen(source)
+            try:
+                input = r.recognize_google_cloud(audio, language='ro-RO')
+                print(f"{input}")
+            except sr.UnknownValueError:
+                prompt.print("Chat could not understand audio", tag='error')
+            except sr.RequestError as e:
+                prompt.print("Chat error; {0}".format(e), tag='error')
+        else:
+            input = prompt.get().strip()
+
+        if not input:
+            continue
+        
+        if input == 'exit':
+            break
+
+        try:
+            result = chat.run(input)
+            print("\n\nAI:>{}\n".format(result))
+        except Exception as e:
+            print("\n\nSystem:>{}\n".format(e))
+        
+        
+
+
+   

+ 202 - 0
icichat.py

@@ -0,0 +1,202 @@
+import os
+import re
+from typing import Callable, List, Union
+from dotenv import dotenv_values, load_dotenv
+from utils.prompt import Prompt
+from langchain import SerpAPIWrapper
+from langchain.agents import Tool
+from langchain.vectorstores import FAISS
+from langchain.embeddings import OpenAIEmbeddings
+from langchain.schema import Document
+from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
+from langchain.prompts import StringPromptTemplate
+from langchain import OpenAI, SerpAPIWrapper, LLMChain
+from langchain.schema import AgentAction, AgentFinish
+from langchain.memory import ConversationBufferMemory
+from langchain.agents import initialize_agent
+from langchain.chat_models import ChatOpenAI
+from langchain.chains.conversation.memory import ConversationBufferWindowMemory
+
+
+from tools.ha import HALightControlTool
+
+load_dotenv()
+
+cli_prompt = Prompt('ici-bot')
+
+search = SerpAPIWrapper()
+
+ALL_TOOLS =[
+        Tool(
+            name = "Search",
+            func=search.run,
+            description=""" Useful for when you need to answer questions about current events on the internet
+            Use it only when explicitly asked to search on internet. 
+            """
+        ),
+        HALightControlTool(),
+    ]
+
+
+template = """Answer the following questions as best you can. You have access to the following tools:
+
+{tools}
+
+Use the following format:
+
+Question: the input question you must answer
+Thought: you should always think about what to do
+Action: the action to take, should be one of [{tool_names}]
+Action Input: the input to the action
+Observation: the result of the action
+... (this Thought/Action/Action Input/Observation can repeat N times)
+Thought: I now know the final answer
+Final Answer: the final answer to the original input question
+
+Begin!
+
+Question: {input}
+{agent_scratchpad}"""
+
+
+class CustomPromptTemplate(StringPromptTemplate):
+    template: str
+    tools_getter: Callable
+    
+    def format(self, **kwargs) -> str:
+        intermediate_steps = kwargs.pop("intermediate_steps")
+        thoughts = ""
+        for action, observation in intermediate_steps:
+            thoughts += action.log
+            thoughts += f"\nObservation: {observation}\nThought: "
+        kwargs["agent_scratchpad"] = thoughts
+        tools = self.tools_getter(kwargs["input"])
+        kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
+        kwargs["tool_names"] = ", ".join([tool.name for tool in tools])
+        return self.template.format(**kwargs)
+
+
+class CustomOutputParser(AgentOutputParser):
+    
+    def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
+        if "Final Answer:" in llm_output:
+            return AgentFinish(
+                return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
+                log=llm_output,
+            )
+        regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
+        match = re.search(regex, llm_output, re.DOTALL)
+        if not match:
+            raise ValueError(f"Could not parse LLM output: `{llm_output}`")
+        action = match.group(1).strip()
+        action_input = match.group(2)
+        
+        return AgentAction(
+            tool=action, 
+            tool_input=action_input.strip(" ").strip('"'), 
+            log=llm_output
+            )
+    
+
+class ChatBot(object):
+    def __init__(self,
+                 tools: List[Tool], 
+                 model_name: str = "gpt-3.5-turbo",
+                 model_temperature: float = 0,
+                 model_max_tokens: int = 500,              
+                 verbose: bool = False):
+        
+        self.sys_msg = """Assistant is a large language model trained by OpenAI.
+        Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
+        Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
+        Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
+        """
+
+        self.tools = tools
+        self.verbose = verbose
+
+        docs = [
+            Document(
+            page_content=t.description, 
+            metadata={"index": i}) for i, t in enumerate(tools)
+            ]
+        
+        vector_store = FAISS.from_documents(docs, OpenAIEmbeddings())
+        self.retriever = vector_store.as_retriever()
+        
+        self.prompt = CustomPromptTemplate(
+            template=template,
+            tools_getter=self._get_tools,
+            input_variables=["input", "intermediate_steps"]
+        )
+        
+        self.output_parser = CustomOutputParser()
+
+        self.llm = ChatOpenAI(
+            temperature=model_temperature,
+            max_tokens=model_max_tokens,
+            model_name=model_name,
+            )
+        
+        self.llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)
+
+    def _get_tools(self, query:str):
+        docs = self.retriever.get_relevant_documents(query)
+        return [self.tools[d.metadata["index"]] for d in docs]
+    
+    def _get_agent(self, input: str):
+        memory = ConversationBufferWindowMemory(
+            memory_key='chat_history',
+            k=10,
+            return_messages=True
+        )
+
+        tools = self._get_tools(input)
+        tool_names = [tool.name for tool in tools]
+        
+        # agent = initialize_agent(
+        #         agent='chat-conversational-react-description',
+        #         system_message=self.sys_msg,
+        #         tools=self.tools,
+        #         llm=self.llm,
+        #         verbose=True,
+        #         max_iterations=3,
+        #         early_stopping_method='generate',
+        #         memory=memory,
+        #         output_parser=self.output_parser,
+        #         stop=["\nObservation:"],
+        #         allowed_tools=tool_names,
+        #         llm_chain=self.llm_chain, 
+        #     )
+        agent = LLMSingleActionAgent(
+            llm_chain=self.llm_chain, 
+            output_parser=self.output_parser,
+            stop=["\nObservation:"], 
+            allowed_tools=tool_names,
+            memory=memory,
+            max_iterations=3,
+            
+        )
+        return agent
+    
+    def run(self, input: str):
+        agent_executor = AgentExecutor.from_agent_and_tools(
+            agent=self._get_agent(input), 
+            tools=self.tools, 
+            verbose=self.verbose
+        )
+        agent_executor.run(input)
+
+
+if __name__ == '__main__':
+    bot = ChatBot(ALL_TOOLS, verbose=True)
+    while True:
+        input = cli_prompt.get().strip()
+        if input == 'exit':
+            break
+        try:
+            bot.run(input)
+        except Exception as e:
+            print("\n\nSystem:>{}\n".format(e))
+
+   

+ 0 - 0
models/__init__.py


+ 53 - 0
models/ha.py

@@ -0,0 +1,53 @@
+import os
+from langchain.chat_models import ChatOpenAI
+from langchain.chains.conversation.memory import ConversationBufferWindowMemory
+from langchain.agents import initialize_agent
+from typing import List, Any
+
+class ChatHA(object):
+    
+    def __init__(self, tools: List[Any] = None):
+        if not tools:
+            tools = list()
+
+        self.llm = ChatOpenAI(
+            temperature=0,
+            model_name='gpt-3.5-turbo',
+            max_tokens=500,
+        )
+        self.tools = tools
+        self.llm.openai_api_key = os.environ.get('OPENAI_API_KEY', '')
+        self.memory = ConversationBufferWindowMemory(
+            memory_key='chat_history',
+            k=10,
+            return_messages=True
+        )
+
+        self.sys_msg = """Assistant is a large language model trained by OpenAI.
+        Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
+        Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
+        Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
+        """
+
+        self.agent = None
+
+        
+    
+    def get_agent(self):
+        if not self.agent:
+            self.agent = initialize_agent(
+                agent='chat-conversational-react-description',
+                system_message=self.sys_msg,
+                tools=self.tools,
+                llm=self.llm,
+                verbose=True,
+                max_iterations=3,
+                early_stopping_method='generate',
+                memory=self.memory
+            )
+        return self.agent
+    
+    def run(self, input):
+        
+        return self.get_agent().run({"input": input})
+    

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 26 - 0
notebooks/documents/state_of_union_1.txt


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 143 - 0
notebooks/documents/state_of_union_2.txt


+ 0 - 0
notebooks/documents/state_of_union_3.txt


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 113 - 0
notebooks/ha.ipynb


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 226 - 0
notebooks/hachat.ipynb


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 152 - 0
notebooks/index.ipynb


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 110 - 0
notebooks/structured_tool_agent.ipynb


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 359 - 0
notebooks/tool_retriever.ipynb


+ 15 - 0
playground/speech2text.py

@@ -0,0 +1,15 @@
+import speech_recognition as sr
+
+# obtain audio from the microphone
+r = sr.Recognizer()
+with sr.Microphone() as source:
+    print("Say something!")
+    audio = r.listen(source)
+
+# recognize speech using Sphinx
+try:
+    print("Sphinx thinks you said " + r.recognize_google_cloud(audio))
+except sr.UnknownValueError:
+    print("Sphinx could not understand audio")
+except sr.RequestError as e:
+    print("Sphinx error; {0}".format(e))

+ 70 - 0
requirements.txt

@@ -0,0 +1,70 @@
+aiohttp==3.8.4
+aiosignal==1.3.1
+appnope @ file:///home/conda/feedstock_root/build_artifacts/appnope_1649077682618/work
+asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1670263926556/work
+async-timeout==4.0.2
+attrs==23.1.0
+backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
+backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1618230623929/work
+certifi==2022.12.7
+charset-normalizer==3.1.0
+comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1679481329611/work
+dataclasses-json==0.5.7
+debugpy @ file:///Users/runner/miniforge3/conda-bld/debugpy_1680755647138/work
+decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
+executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1667317341051/work
+filelock==3.12.0
+frozenlist==1.3.3
+fsspec==2023.4.0
+greenlet==2.0.2
+huggingface-hub==0.14.1
+idna==3.4
+importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1682176699712/work
+ipykernel @ file:///Users/runner/miniforge3/conda-bld/ipykernel_1679336661730/work
+ipython @ file:///Users/runner/miniforge3/conda-bld/ipython_1680185563420/work
+jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1669134318875/work
+jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1681432441054/work
+jupyter_core @ file:///Users/runner/miniforge3/conda-bld/jupyter_core_1678994447003/work
+langchain==0.0.151
+marshmallow==3.19.0
+marshmallow-enum==1.5.1
+matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1660814786464/work
+multidict==6.0.4
+mypy-extensions==1.0.0
+nest-asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1664684991461/work
+numexpr==2.8.4
+numpy==1.24.3
+openai==0.27.5
+openapi-schema-pydantic==1.2.4
+packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1681337016113/work
+parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1638334955874/work
+pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1667297516076/work
+pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
+platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1682644429438/work
+prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1677600924538/work
+psutil @ file:///Users/runner/miniforge3/conda-bld/psutil_1681775164927/work
+ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
+pure-eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1642875951954/work
+pydantic==1.10.7
+Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1681904169130/work
+python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work
+python-dotenv==1.0.0
+PyYAML==6.0
+pyzmq @ file:///Users/runner/miniforge3/conda-bld/pyzmq_1679317044518/work
+regex==2023.3.23
+requests==2.29.0
+six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
+SQLAlchemy==2.0.11
+stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work
+tenacity==8.2.2
+tokenizers==0.13.3
+tornado @ file:///Users/runner/miniforge3/conda-bld/tornado_1681817627536/work
+tqdm==4.65.0
+traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1675110562325/work
+transformers==4.28.1
+typing-inspect==0.8.0
+typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1678559861143/work
+urllib3==1.26.15
+wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1673864653149/work
+yarl==1.9.2
+zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1677313463193/work

+ 0 - 0
tools/__init__.py


+ 95 - 0
tools/ha.py

@@ -0,0 +1,95 @@
+import os
+from homeassistant_api import Client
+from langchain.tools import BaseTool
+from homeassistant_api.errors import EndpointNotFoundError
+ha_client = Client(
+    os.environ.get('HA_URL', 'http://localhost:8123'),
+    os.environ.get('HA_API_KEY', '')
+)
+
+
+class HALightControl(BaseTool):
+    name = "Control the lights in the room"
+    description  = (
+        "use this tool when you need to turn on or off the light in the room. "
+        "given the light entity name and an action like turnon or turnoff, this tool will turn the lights on or off. "
+        "To use the tool you must provide exactly two of the following parameters "
+        "['entity_name', 'action']"
+    )
+    return_direct = False
+
+    def _run(
+        self,
+        action: str,
+        entity_name: str,
+    ):
+        light = ha_client.get_domain("light")
+        if entity_name:
+            entity = "light.{entity_name}".format(entity_name=entity_name)
+            if action == "turnon":
+                light.turn_on(entity_id=entity)
+                return "{entity_name} turned on".format(entity_name=entity_name)
+            elif action == "turnoff":
+                light.turn_off(entity_id=entity)
+                return "{entity_name} turned off".format(entity_name=entity_name)
+        return ""
+    
+    def _arun(self, query: str):
+        raise NotImplementedError("This tool does not support async")
+
+
+class HASensorReading(BaseTool):
+    name = "Home Assistant Sensor Reading Tool"
+    description  = (
+        "use this tool when you need to read data from a sensor."
+        "To use the tool you must provide exactly the following parameter "
+        "'entity_name'"
+    )
+    return_direct = False
+
+    def _get_entity_name(self, entity_name: str):
+        # is entity_name starts with sensor. then return it as is
+        if entity_name.startswith("sensor."):
+            return entity_name
+        return f"sensor.{entity_name}"
+    
+    def _run(
+        self,
+        entity_name: str,
+    ):
+        entity = self._get_entity_name(entity_name)
+        try:
+            sensor = ha_client.get_entity(entity_id=entity)
+            state = sensor.get_state()
+        except EndpointNotFoundError as e:
+            return "No sensor found with name {entity_name}".format(entity_name=entity_name)
+        except Exception as e:
+            return "An error occurred while trying to get the sensor {entity_name}".format(entity_name=entity_name)
+
+        return state
+        
+    
+    def _arun(self, query: str):
+        raise NotImplementedError("This tool does not support async")
+    
+
+class HATempHumReading(HASensorReading):
+    name = "Room temperature and humidity readings"
+    description  = (
+        "use this tool when you need to get room temperature and humidity readings. "
+        "To use the tool you must provide exactly the following parameter "
+        "'entity_name'"
+    )
+    
+
+class HAGeolocation(HASensorReading):
+    name = "Home Assistant Geolocation Tool"
+    description  = (
+        "use this tool when you need to get geolocation of an entity, person, object. "
+        "To use the tool you must provide exactly the following parameter "
+        "'entity_name'"
+    )
+
+    def _get_entity_name(self, entity_name: str):
+        return "sensor.{entity_name}_geocoded_location".format(entity_name=entity_name)
+

+ 23 - 0
tools/index.py

@@ -0,0 +1,23 @@
+import os
+from langchain.tools import BaseTool
+
+
+
+class QAIndexedDocuments(BaseTool):
+    name = "Question Answering Tool for Indexed Documents"
+    description  = (
+        "use this tool when asked to search and find answers in documents. "
+        "If explicitly asked to search in indexed documents, this tool will search in the indexed documents. "
+        "To use the tool you must provide exactly one parameters as follows "
+        "['question']"
+    )
+    return_direct = True
+
+    def _run(
+        self,
+        question: str,
+    ):
+        return index.query_with_sources(question)
+    
+    def _arun(self, query: str):
+        raise NotImplementedError("This tool does not support async")

+ 0 - 0
utils/__init__.py


+ 118 - 0
utils/prompt.py

@@ -0,0 +1,118 @@
+import dotenv
+import getpass
+import platform
+import os
+from prompt_toolkit.shortcuts import prompt
+from prompt_toolkit import print_formatted_text, HTML
+from prompt_toolkit.styles import Style
+from prompt_toolkit.cursor_shapes import CursorShape
+from prompt_toolkit.shortcuts import button_dialog
+from prompt_toolkit.application import run_in_terminal
+from prompt_toolkit.key_binding import KeyBindings
+from prompt_toolkit import PromptSession
+from prompt_toolkit.history import FileHistory
+
+
+dotenv.load_dotenv()
+
+# bindings = KeyBindings()
+
+# @bindings.add('c-m')
+# def _(event):
+#     " Say 'hello' when `c-t` is pressed. "
+#     def print_hello():
+#         print('hello world')
+#     run_in_terminal(print_hello)
+
+# @bindings.add('c-x')
+# def _(event):
+#     " Exit when `c-x` is pressed. "
+#     event.app.exit()
+
+
+class PromptTheme:
+    def __init__(self, appname):
+        self.message = [
+            ('class:username', getpass.getuser()),
+            ('class:at',       '@'),
+            ('class:host',     platform.node()),
+            ('class:colon',    ':'),
+            ('class:appname',    appname),
+            ('class:pound',    '# '),
+        ]
+        self.style = Style.from_dict({
+                    '':          '#ffffff',
+                    'username': '#884444',
+                    'at':       '#00aa00',
+                    'colon':    '#ffffff',
+                    'pound':    '#00aa00',
+                    'appname':     '#00ffff bg:#444400',
+                    'path':     'ansicyan underline',
+                    'info':    'black bg:white',
+                    'warning':    'yellow bg:white',
+                    'error':    'red bg:white',
+                    'bottom-toolbar': '#ffffff bg:#333333',
+                    
+                })
+
+        if os.getenv('PROMPT_THEME'):
+            theme = os.getenv('PROMPT_THEME')
+            if theme == 'dark_plus':
+                self.style = Style.from_dict({
+                    '':          '#ffffff',
+                    'username': '#884444',
+                    'at':       '#00aa00',
+                    'colon':    '#ffffff',
+                    'pound':    '#00aa00',
+                    'appname':     '#00ffff bg:#444400',
+                    'path':     'ansicyan underline',
+                    'info':    'black bg:white',
+                    'warning':    'red bg:yellow',
+                    'error':    'red bg:white',
+                    'bottom-toolbar': '#ffffff bg:#333333',
+                })
+
+
+
+class Prompt:
+    def __init__(self, appname='chitchat'):
+        theme = PromptTheme(appname)
+        self.message = theme.message
+        self.style = theme.style
+        self.bottom_toolbar_text = ' Press [Ctrl+X] to exit '
+
+        # if history file is not present, create it
+        history_file = os.path.expanduser('~/.aiassistant_history')
+        if not os.path.exists(history_file):
+            with open(history_file, 'w') as f:
+                pass
+
+                
+        self.session = PromptSession(history=FileHistory(history_file))
+
+    def bottom_toolbar(self):
+        return [('class:bottom-toolbar', f"@aiassistant:~ {self.bottom_toolbar_text}")]
+
+
+    def print(self, message, tag='info'):
+        print_formatted_text(HTML(f"<{tag}>{message}</{tag}>"), style=self.style)
+
+
+    def get_input_mode(self):
+        return button_dialog(
+            title='Input Mode',
+            text='What input mode do you want to use?',
+            buttons=[
+                ('Text', 'text'),
+                ('Voice', 'voice'),
+                ('Surprise me', None)
+            ],
+        ).run()
+    
+    def get(self):
+        return self.session.prompt(
+            self.message, 
+            style=self.style, 
+            cursor=CursorShape.BLINKING_BLOCK, 
+            bottom_toolbar=self.bottom_toolbar,
+            )

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 26 - 0
vectore_documents/administrative.txt


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 143 - 0
vectore_documents/rotld.txt