AMfeta99 commited on
Commit
e3a4c52
·
verified ·
1 Parent(s): 8ac45ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -9
app.py CHANGED
@@ -7,15 +7,26 @@ from diffusers import DiffusionPipeline
7
  import torch
8
  from smolagents import OpenAIServerModel
9
 
10
- from huggingface_hub import login
11
  import os
 
12
 
13
- token = os.environ.get("HF_TOKEN")
 
 
 
 
 
 
14
 
15
- if token:
16
- login(token=token)
 
17
  else:
18
- print("Warning: HF_TOKEN not set. You may not be able to access private models or tools.")
 
 
 
 
19
 
20
 
21
  # =========================================================
@@ -83,13 +94,13 @@ image_generation_tool = Tool.from_space(
83
  search_tool = DuckDuckGoSearchTool()
84
  #llm_engine = InferenceClientModel("Qwen/Qwen2.5-72B-Instruct")
85
 
86
- llm_engine = InferenceClientModel("Qwen/Qwen2.5-Coder-32B-Instruct")
87
 
88
  # Inicialização do modelo OpenAI com smolagents
89
  llm_engine = OpenAIServerModel(
90
  model_id="gpt-4o-mini", # Exemplo: ajuste para o modelo OpenAI que deseja usar
91
  api_base="https://api.openai.com/v1",
92
- api_key=os.environ.get("OPENAI_API_KEY")
93
  )
94
 
95
 
@@ -101,6 +112,7 @@ agent = CodeAgent(tools=[image_generation_tool, search_tool], model=llm_engine)
101
 
102
  from PIL import Image
103
 
 
104
  def generate_object_history(object_name):
105
  images = []
106
  prompts = generate_prompts_for_object(object_name)
@@ -118,8 +130,8 @@ def generate_object_history(object_name):
118
  result = agent.run(
119
  general_instruction,
120
  additional_args={"prompt": prompt,
121
- "width": 256, # specify width
122
- "height": 256, # specify height
123
  "seed": 0, # optional seed
124
  "randomize_seed": False, # optional
125
  "num_inference_steps": 4 # optional
 
7
  import torch
8
  from smolagents import OpenAIServerModel
9
 
 
10
  import os
11
+ from huggingface_hub import login
12
 
13
+ openai_key = os.environ.get("OPENAI_API_KEY")
14
+ hf_token = os.environ.get("HF_TOKEN")
15
+
16
+ if hf_token:
17
+ login(token=hf_token)
18
+ else:
19
+ print("Warning: HF_TOKEN not set.")
20
 
21
+ if openai_key:
22
+ # Exemplo de como usar a OpenAI API key
23
+ print("OpenAI API key is set")
24
  else:
25
+ print("Warning: OPENAI_API_KEY not set.")
26
+
27
+ print("HF_TOKEN set?", "Yes" if hf_token else "No")
28
+ print("OPENAI_API_KEY set?", "Yes" if openai_key else "No")
29
+
30
 
31
 
32
  # =========================================================
 
94
  search_tool = DuckDuckGoSearchTool()
95
  #llm_engine = InferenceClientModel("Qwen/Qwen2.5-72B-Instruct")
96
 
97
+ #llm_engine = InferenceClientModel("Qwen/Qwen2.5-Coder-32B-Instruct")
98
 
99
  # Inicialização do modelo OpenAI com smolagents
100
  llm_engine = OpenAIServerModel(
101
  model_id="gpt-4o-mini", # Exemplo: ajuste para o modelo OpenAI que deseja usar
102
  api_base="https://api.openai.com/v1",
103
+ api_key=openai_key
104
  )
105
 
106
 
 
112
 
113
  from PIL import Image
114
 
115
+
116
  def generate_object_history(object_name):
117
  images = []
118
  prompts = generate_prompts_for_object(object_name)
 
130
  result = agent.run(
131
  general_instruction,
132
  additional_args={"prompt": prompt,
133
+ "width": 256, # specify width
134
+ "height": 256, # specify height
135
  "seed": 0, # optional seed
136
  "randomize_seed": False, # optional
137
  "num_inference_steps": 4 # optional