SmolAgent_Tutorial/URL_Database_project/ai_agent/app.py
2025-03-03 16:35:52 +08:00

89 lines
2.3 KiB
Python

from typing import Optional
import requests
from smolagents.agents import ToolCallingAgent
from smolagents import CodeAgent, HfApiModel, tool
from huggingface_hub import login
from smolagents import LiteLLMModel
from dotenv import load_dotenv
import os
# load .env file
load_dotenv()
api_key = os.environ.get('API_KEY')
#print(api_key)
login(api_key)
# Select LLM engine to use!
model = HfApiModel()
# model = LiteLLMModel(
# model_id="ollama_chat/llama3.1",
# api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary
# #api_key="your-api-key", # replace with API key if necessary
# #num_ctx=8192, # ollama default is 2048 which will often fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model.
# )
@tool
def get_data(query: str) -> str:
"""
Fetches a data from the server.
Args:
query: data
Returns:
str: database data in JSON format
Raises:
requests.exceptions.RequestException: If there is an issue with the HTTP request.
"""
url = f"http://localhost:3000/{query}"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
return data
except requests.exceptions.RequestException as e:
return f"Error fetching data: {e}"
# result = get_data('latest_data')
# print(result)
@tool
def get_latest_data() -> str:
"""
Fetches a data from the server.
Args:
query: data
Returns:
str: database data
Raises:
requests.exceptions.RequestException: If there is an issue with the HTTP request.
"""
url = f"http://localhost:3000/latest_data"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
return data
except requests.exceptions.RequestException as e:
return f"Error fetching data: {e}"
# result = get_latest_data()
# print(result)
agent = ToolCallingAgent(tools=[get_data], model=model)
# agent = CodeAgent(tools=[get_latest_data], model=model)
agent.run("What is the latest data?")
# agent.run("What is the value x in the latest data?")