Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -36,16 +36,72 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
36 |
|
37 |
final_answer = FinalAnswerTool()
|
38 |
|
|
|
|
|
39 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
40 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
model = HfApiModel(
|
43 |
-
max_tokens=
|
44 |
-
temperature=0.5,
|
45 |
-
model_id='
|
46 |
-
|
|
|
|
|
|
|
47 |
)
|
48 |
|
|
|
|
|
|
|
49 |
|
50 |
# Import tool from Hub
|
51 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
|
|
36 |
|
37 |
final_answer = FinalAnswerTool()
|
38 |
|
39 |
+
############# MODEL SELECTION ################################################
|
40 |
+
|
41 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
42 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
43 |
|
44 |
+
# model = HfApiModel(
|
45 |
+
# max_tokens=2096,
|
46 |
+
# temperature=0.5,
|
47 |
+
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',# it is possible that this model may be overloaded
|
48 |
+
# custom_role_conversions=None,
|
49 |
+
# )
|
50 |
+
|
51 |
+
MODEL_IDS = [
|
52 |
+
#'https://wxknx1kg971u7k1n.us-east-1.aws.endpoints.huggingface.cloud/',
|
53 |
+
#'https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud/',
|
54 |
+
# 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
55 |
+
#'meta-llama/Llama-3.2-1B-Instruct', ## Does a poor job of interpreting my questions and matching them to the tools
|
56 |
+
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
57 |
+
'Qwen/Qwen2.5-Coder-14B-Instruct',
|
58 |
+
'Qwen/Qwen2.5-Coder-7B-Instruct',
|
59 |
+
'Qwen/Qwen2.5-Coder-3B-Instruct',
|
60 |
+
'Qwen/Qwen2.5-Coder-1.5B-Instruct'
|
61 |
+
# Add here wherever model is working for you
|
62 |
+
]
|
63 |
+
|
64 |
+
def is_model_overloaded(model_url):
|
65 |
+
"""Verify if the model is overloaded doing a test call."""
|
66 |
+
try:
|
67 |
+
response = requests.post(model_url, json={"inputs": "Test"})
|
68 |
+
if verbose:
|
69 |
+
print(response.status_code)
|
70 |
+
if response.status_code == 503: # 503 Service Unavailable = Overloaded
|
71 |
+
return True
|
72 |
+
if response.status_code == 404: # 404 Client Error: Not Found
|
73 |
+
return True
|
74 |
+
if response.status_code == 424: # 424 Client Error: Failed Dependency for url:
|
75 |
+
return True
|
76 |
+
return False
|
77 |
+
except requests.RequestException:
|
78 |
+
return True # if there are an error is overloaded
|
79 |
+
|
80 |
+
def get_available_model():
|
81 |
+
"""Select the first model available from the list."""
|
82 |
+
for model_url in MODEL_IDS:
|
83 |
+
print("trying",model_url)
|
84 |
+
if not is_model_overloaded(model_url):
|
85 |
+
return model_url
|
86 |
+
return MODEL_IDS[0] # if all are failing, use the first model by dfault
|
87 |
+
|
88 |
+
if verbose: print("Checking available models.")
|
89 |
+
|
90 |
+
selected_model_id = get_available_model()
|
91 |
+
|
92 |
model = HfApiModel(
|
93 |
+
max_tokens=1048,
|
94 |
+
temperature=0.5,
|
95 |
+
#model_id='meta-llama/Llama-3.2-1B-Instruct',
|
96 |
+
#model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
97 |
+
#model_id = 'Qwen/Qwen2.5-Coder-1.5B-Instruct',
|
98 |
+
model_id = selected_model_id, # model available selected from the list automatically
|
99 |
+
custom_role_conversions=None,
|
100 |
)
|
101 |
|
102 |
+
############# END: MODEL SELECTION ################################################
|
103 |
+
|
104 |
+
|
105 |
|
106 |
# Import tool from Hub
|
107 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|