diff --git a/T2_tools_define/random_fact.py b/T2_tools_define/random_fact.py new file mode 100644 index 0000000..4719d8c --- /dev/null +++ b/T2_tools_define/random_fact.py @@ -0,0 +1,53 @@ +from typing import Optional +import requests + +from smolagents.agents import ToolCallingAgent +from smolagents import CodeAgent, HfApiModel, tool +from huggingface_hub import login +from smolagents import LiteLLMModel + +from dotenv import load_dotenv +import os + +# load .env file +load_dotenv() + +api_key = os.environ.get('API_KEY') + +#print(api_key) +login(api_key) + + +# Select LLM engine to use! +model = HfApiModel() +# model = LiteLLMModel( +# model_id="ollama_chat/llama3.1", +# api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary +# #api_key="your-api-key", # replace with API key if necessary +# #num_ctx=8192, # ollama default is 2048 which will often fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. +# ) + + +@tool +def get_random_fact() -> str: + """ + Fetches a random fact from the "uselessfacts.jsph.pl" API. + Returns: + str: A string containing the random fact or an error message if the request fails. + """ + url = "https://uselessfacts.jsph.pl/random.json?language=en" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + + return f"Random Fact: {data['text']}" + except requests.exceptions.RequestException as e: + return f"Error fetching random fact: {str(e)}" + +agent = ToolCallingAgent(tools=[get_random_fact], model=model) +# agent = CodeAgent(tools=[get_weather], model=model) + +agent.run("Tell me a random fact!") \ No newline at end of file diff --git a/T2_tools_define/tools.py b/T2_tools_define/weather_tools.py similarity index 100% rename from T2_tools_define/tools.py rename to T2_tools_define/weather_tools.py diff --git a/T2_tools_define/wiki_tools.py b/T2_tools_define/wiki_tools.py new file mode 100644 index 0000000..425cef2 --- /dev/null +++ b/T2_tools_define/wiki_tools.py @@ -0,0 +1,61 @@ +from typing import Optional +import requests + +from smolagents.agents import ToolCallingAgent +from smolagents import CodeAgent, HfApiModel, tool +from huggingface_hub import login +from smolagents import LiteLLMModel + +from dotenv import load_dotenv +import os + +# load .env file +load_dotenv() + +api_key = os.environ.get('API_KEY') + +#print(api_key) +login(api_key) + + +# Select LLM engine to use! +model = HfApiModel() +# model = LiteLLMModel( +# model_id="ollama_chat/llama3.1", +# api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary +# #api_key="your-api-key", # replace with API key if necessary +# #num_ctx=8192, # ollama default is 2048 which will often fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. +# ) + + +@tool +def search_wikipedia(query: str) -> str: + """ + Fetches a summary of a Wikipedia page for a given query. + Args: + query: The search term to look up on Wikipedia. + Returns: + str: A summary of the Wikipedia page if successful, or an error message if the request fails. + Raises: + requests.exceptions.RequestException: If there is an issue with the HTTP request. + """ + url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{query}" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + title = data["title"] + extract = data["extract"] + + return f"Summary for {title}: {extract}" + + except requests.exceptions.RequestException as e: + return f"Error fetching Wikipedia data: {str(e)}" + + +agent = ToolCallingAgent(tools=[search_wikipedia], model=model) +# agent = CodeAgent(tools=[get_weather], model=model) + +agent.run("who is the director of the movie inception?") \ No newline at end of file