Radu Boncea 1 рік тому
коміт
f23f0fe75c
5 змінених файлів з 111 додано та 0 видалено
  1. 5 0
      .gitignore
  2. 5 0
      OAI_CONFIG_LIST.json
  3. 34 0
      agentbuilder.py
  4. 52 0
      requirements.txt
  5. 15 0
      test.py

+ 5 - 0
.gitignore

@@ -0,0 +1,5 @@
+.DS_Store
+.env
+.venv
+.cache/*
+coding/*

+ 5 - 0
OAI_CONFIG_LIST.json

@@ -0,0 +1,5 @@
+[
+    {
+        "model": "gpt-4-1106-preview"
+    }
+]

+ 34 - 0
agentbuilder.py

@@ -0,0 +1,34 @@
+import autogen
+from dotenv import load_dotenv
+from autogen.agentchat.contrib.agent_builder import AgentBuilder
+
+load_dotenv()
+
+config_path = "OAI_CONFIG_LIST.json"
+config_list = autogen.config_list_from_json(config_path)
+
+default_llm_config = {
+    'temperature': 0
+}
+
+
+builder = AgentBuilder(config_path=config_path, builder_model='gpt-4-1106-preview', agent_model='gpt-4-1106-preview')
+
+building_task = "Find a paper on arxiv by programming, and analyze its application in some domain. For example, find a latest paper about gpt-4 on arxiv and find its potential applications in software."
+
+agent_list, agent_configs = builder.build(building_task, default_llm_config, coding=True)
+
+def start_task(execution_task: str, agent_list: list, llm_config: dict):
+    config_list = autogen.config_list_from_json(config_path, filter_dict={"model": ["gpt-4-1106-preview"]})
+
+    group_chat = autogen.GroupChat(agents=agent_list, messages=[], max_round=12)
+    manager = autogen.GroupChatManager(
+        groupchat=group_chat, llm_config={"config_list": config_list, **llm_config}
+    )
+    agent_list[0].initiate_chat(manager, message=execution_task)
+
+start_task(
+    execution_task="Find a recent paper about gpt-4 on arxiv and find its potential applications in software.",
+    agent_list=agent_list,
+    llm_config=default_llm_config
+)

+ 52 - 0
requirements.txt

@@ -0,0 +1,52 @@
+annotated-types==0.6.0
+anyio==4.2.0
+appdirs==1.4.4
+arxiv==2.1.0
+beautifulsoup4==4.12.2
+certifi==2023.11.17
+charset-normalizer==3.3.2
+contourpy==1.2.0
+cycler==0.12.1
+diskcache==5.6.3
+distro==1.9.0
+exceptiongroup==1.2.0
+feedparser==6.0.10
+FLAML==2.1.1
+fonttools==4.47.0
+frozendict==2.3.10
+h11==0.14.0
+html5lib==1.1
+httpcore==1.0.2
+httpx==0.26.0
+idna==3.6
+kiwisolver==1.4.5
+lxml==5.0.0
+matplotlib==3.8.2
+multitasking==0.0.11
+numpy==1.26.2
+openai==1.6.1
+packaging==23.2
+pandas==2.1.4
+peewee==3.17.0
+Pillow==10.1.0
+pyautogen==0.2.2
+pydantic==2.5.3
+pydantic_core==2.14.6
+pyparsing==3.1.1
+python-dateutil==2.8.2
+python-dotenv==1.0.0
+pytz==2023.3.post1
+regex==2023.12.25
+requests==2.31.0
+sgmllib3k==1.0.0
+six==1.16.0
+sniffio==1.3.0
+soupsieve==2.5
+termcolor==2.4.0
+tiktoken==0.5.2
+tqdm==4.66.1
+typing_extensions==4.9.0
+tzdata==2023.4
+urllib3==2.1.0
+webencodings==0.5.1
+yfinance==0.2.33

+ 15 - 0
test.py

@@ -0,0 +1,15 @@
+from dotenv import load_dotenv
+from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
+
+load_dotenv()
+
+# Load LLM inference endpoints from an env variable or a file
+# See https://microsoft.github.io/autogen/docs/FAQ#set-your-api-endpoints
+# and OAI_CONFIG_LIST_sample
+config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
+# You can also set config_list directly as a list, for example, config_list = [{'model': 'gpt-4', 'api_key': '<your OpenAI API key here>'},]
+assistant = AssistantAgent("assistant", llm_config={"config_list": config_list})
+user_proxy = UserProxyAgent("user_proxy", code_execution_config={"work_dir": "coding"})
+user_proxy.initiate_chat(assistant, message="Plot a chart of NVDA and TESLA stock price change YTD.")
+# This initiates an automated chat between the two agents to solve the task
+