Spaces:
Sleeping
Sleeping
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool | |
from typing import Dict | |
import datetime | |
import requests | |
import pytz | |
import yaml | |
from tools.final_answer import FinalAnswerTool | |
from Gradio_UI import GradioUI | |
def calculate_bandwidth(users: int, usage: Dict[str, int]) -> float: | |
"""Calculate the recommended internet speed based on user inputs. | |
Args: | |
users: The total number of users requiring internet access. | |
usage: A dictionary with usage categories as keys and the number of users per category as values. | |
Expected keys are: | |
- "browsing": Number of users browsing the web. | |
- "video_call": Number of users on video calls. | |
- "hd_streaming": Number of users streaming in HD. | |
- "4k_streaming": Number of users streaming in 4K. | |
- "gaming": Number of users gaming online. | |
- "remote_work": Number of users working remotely. | |
""" | |
usage_requirements = { | |
"browsing": 1, # Mbps per user | |
"video_call": 2, # Mbps per user | |
"hd_streaming": 5, # Mbps per user | |
"4k_streaming": 25, # Mbps per user | |
"gaming": 10, # Mbps per user | |
"remote_work": 3 # Mbps per user | |
} | |
total_bandwidth = sum(usage_requirements[activity] * usage.get(activity, 0) for activity in usage_requirements) | |
overhead = 1.2 # 20% overhead for seamless experience | |
# Apply overhead of 1.2 | |
total_bandwidth_with_overhead = total_bandwidth * overhead | |
return round(total_bandwidth_with_overhead, 2) | |
final_answer = FinalAnswerTool() | |
#duck_duck_go_search = DuckDuckGoSearchTool() | |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
model = HfApiModel( | |
max_tokens=2096, | |
temperature=0.5, | |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded | |
custom_role_conversions=None, | |
) | |
# Import tool from Hub | |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
with open("prompts.yaml", 'r') as stream: | |
prompt_templates = yaml.safe_load(stream) | |
agent = CodeAgent( | |
model=model, | |
tools=[final_answer,calculate_bandwidth], ## Internet bandwidth Calculator Tool | |
max_steps=6, | |
verbosity_level=1, | |
grammar=None, | |
planning_interval=None, | |
name=None, | |
description=None, | |
prompt_templates=prompt_templates | |
) | |
GradioUI(agent).launch() |