from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, Tool from smolagents.default_tools import FinalAnswerTool, VisitWebpageTool from Gradio_UI import GradioUI # from langchain.agents import load_tools import datetime import yaml from typing import Optional from my_tools import * # Free Distill-R1 Model: # `deepseek-ai/DeepSeek-R1-Distill-Qwen-32B` # If the agent does not answer, the model is overloaded, please use another model or the original Hugging Face Endpoint for contains qwen2.5 coder: # 'Qwen/Qwen2.5-Coder-32B-Instruct' model = HfApiModel( max_tokens=15000, temperature=0.5, model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # Endpoint with 'Qwen/Qwen2.5-Coder-32B-Instruct' custom_role_conversions=None, ) # Init tools final_answer = FinalAnswerTool() image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) search_tool = DuckDuckGoSearchTool() visit_webpage_tool = VisitWebpageTool() # wolfram_tool = Tool.from_langchain(load_tools(["wolfram-alpha"])[0]) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[ final_answer, # wolfram_tool, wolfram_alpha, search_tool, image_generation_tool, get_weather, get_joke, search_wikipedia, get_current_time_in_timezone, visit_webpage_tool ], max_steps=10, verbosity_level=2, grammar=None, planning_interval=3, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch()