102 lines
2.6 KiB
Python
102 lines
2.6 KiB
Python
from typing import Optional
|
|
import requests
|
|
|
|
from smolagents.agents import ToolCallingAgent
|
|
from smolagents import CodeAgent, HfApiModel, tool
|
|
from huggingface_hub import login
|
|
from smolagents import LiteLLMModel
|
|
|
|
from dotenv import load_dotenv
|
|
import os
|
|
|
|
# load .env file
|
|
load_dotenv()
|
|
|
|
api_key = os.environ.get('API_KEY')
|
|
|
|
#print(api_key)
|
|
login(api_key)
|
|
|
|
# Select LLM engine to use!
|
|
model = HfApiModel()
|
|
# model = LiteLLMModel(
|
|
# model_id="ollama_chat/llama3.1",
|
|
# api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary
|
|
# #api_key="your-api-key", # replace with API key if necessary
|
|
# #num_ctx=8192, # ollama default is 2048 which will often fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model.
|
|
# )
|
|
|
|
|
|
@tool
|
|
def get_data(query: str) -> str:
|
|
"""
|
|
Fetches a data from the server.
|
|
Args:
|
|
query: data
|
|
Returns:
|
|
str: database data in JSON format
|
|
Raises:
|
|
requests.exceptions.RequestException: If there is an issue with the HTTP request.
|
|
"""
|
|
url = f"http://localhost:3000/{query}"
|
|
|
|
try:
|
|
response = requests.get(url)
|
|
response.raise_for_status()
|
|
|
|
data = response.json()
|
|
|
|
return data
|
|
except requests.exceptions.RequestException as e:
|
|
return f"Error fetching data: {e}"
|
|
|
|
|
|
# result = get_data('latest_data')
|
|
# print(result)
|
|
|
|
@tool
|
|
def get_latest_data() -> str:
|
|
"""
|
|
Fetches a data from the server.
|
|
Args:
|
|
query: data
|
|
Returns:
|
|
str: database data
|
|
Raises:
|
|
requests.exceptions.RequestException: If there is an issue with the HTTP request.
|
|
"""
|
|
url = f"http://localhost:3000/latest_data"
|
|
|
|
try:
|
|
response = requests.get(url)
|
|
response.raise_for_status()
|
|
|
|
data = response.json()
|
|
|
|
return data
|
|
except requests.exceptions.RequestException as e:
|
|
return f"Error fetching data: {e}"
|
|
|
|
# result = get_latest_data()
|
|
# print(result)
|
|
|
|
|
|
get_data_agent = ToolCallingAgent(
|
|
tools=[get_latest_data],
|
|
model=model,
|
|
name="get_data_agent",
|
|
description="This is an agent that get data from the server.",
|
|
)
|
|
|
|
# agent = CodeAgent(tools=[get_latest_data], model=model)
|
|
|
|
manager_agent = CodeAgent(
|
|
tools = [],
|
|
model = model,
|
|
managed_agents=[get_data_agent],
|
|
)
|
|
|
|
# agent.run("What is the latest data?")
|
|
# agent.run("What is the value x in the latest data?")
|
|
|
|
manager_agent.run("What is the value x in the latest data?") |