| 12345678910111213141516 |
- from dotenv import load_dotenv
- from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
- load_dotenv()
- # Load LLM inference endpoints from an env variable or a file
- # See https://microsoft.github.io/autogen/docs/FAQ#set-your-api-endpoints
- # and OAI_CONFIG_LIST_sample
- config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
- # You can also set config_list directly as a list, for example, config_list = [{'model': 'gpt-4', 'api_key': '<your OpenAI API key here>'},]
- assistant = AssistantAgent("assistant", llm_config={"config_list": config_list})
- user_proxy = UserProxyAgent("user_proxy", code_execution_config={"work_dir": "coding"})
- user_proxy.initiate_chat(assistant, message="Plot a chart of NVDA and TESLA stock price change YTD.")
- # This initiates an automated chat between the two agents to solve the task
|