Ryukijano commited on
Commit
6f54de8
1 Parent(s): dff7251

change the app

Browse files
Files changed (1) hide show
  1. app.py +15 -13
app.py CHANGED
@@ -1,6 +1,9 @@
 
 
 
1
  import gradio as gr
2
  import os
3
- import spaces
4
  from transformers import GemmaTokenizer, AutoModelForCausalLM
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
  from threading import Thread
@@ -16,8 +19,8 @@ DESCRIPTION = '''
16
  <a style="display:inline-block" href="https://research.nvidia.com/labs/toronto-ai/LLaMA-Mesh/"><img src='https://img.shields.io/badge/public_website-8A2BE2'></a>
17
  <a style="display:inline-block; margin-left: .5em" href="https://github.com/nv-tlabs/LLaMA-Mesh"><img src='https://img.shields.io/github/stars/nv-tlabs/LLaMA-Mesh?style=social'/></a>
18
  </div>
19
- <p>LLaMA-Mesh: Unifying 3D Mesh Generation with Language Models.<a style="display:inline-block" href="https://research.nvidia.com/labs/toronto-ai/LLaMA-Mesh/">[Project Page]</a> <a style="display:inline-block" href="https://github.com/nv-tlabs/LLaMA-Mesh">[Code]</a></p>
20
- <p> Notice: (1) This demo supports up to 4096 tokens due to computational limits, while our full model supports 8k tokens. This limitation may result in incomplete generated meshes. To experience the full 8k token context, please run our model locally.</p>
21
  <p>(2) We only support generating a single mesh per dialog round. To generate another mesh, click the "clear" button and start a new dialog.</p>
22
  <p>(3) If the LLM refuses to generate a 3D mesh, try adding more explicit instructions to the prompt, such as "create a 3D model of a table <strong>in OBJ format</strong>." A more effective approach is to request the mesh generation at the start of the dialog.</p>
23
  </div>
@@ -75,7 +78,7 @@ def apply_gradient_color(mesh_text):
75
  str: Path to the GLB file with gradient colors applied.
76
  """
77
  # Load the mesh
78
- temp_file = tempfile.NamedTemporaryFile(suffix=f"", delete=False).name#"temp_mesh.obj"
79
  with open(temp_file+".obj", "w") as f:
80
  f.write(mesh_text)
81
  # return temp_file
@@ -114,7 +117,7 @@ def visualize_mesh(mesh_text):
114
  f.write(mesh_text)
115
  return temp_file
116
 
117
- @spaces.GPU(duration=120)
118
  def chat_llama3_8b(message: str,
119
  history: list,
120
  temperature: float,
@@ -138,9 +141,7 @@ def chat_llama3_8b(message: str,
138
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
139
 
140
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
141
- # print(max_new_tokens)
142
- max_new_tokens=4096
143
- temperature=0.9
144
  generate_kwargs = dict(
145
  input_ids= input_ids,
146
  streamer=streamer,
@@ -181,16 +182,14 @@ with gr.Blocks(fill_height=True, css=css) as demo:
181
  gr.Slider(minimum=0,
182
  maximum=1,
183
  step=0.1,
184
- value=0.9,
185
  label="Temperature",
186
- interactive = False,
187
  render=False),
188
  gr.Slider(minimum=128,
189
- maximum=4096,
190
  step=1,
191
  value=4096,
192
  label="Max new tokens",
193
- interactive = False,
194
  render=False),
195
  ],
196
  examples=[
@@ -237,4 +236,7 @@ with gr.Blocks(fill_height=True, css=css) as demo:
237
 
238
  if __name__ == "__main__":
239
  demo.launch()
240
-
 
 
 
 
1
+ import os
2
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
3
+
4
  import gradio as gr
5
  import os
6
+ # import spaces
7
  from transformers import GemmaTokenizer, AutoModelForCausalLM
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
  from threading import Thread
 
19
  <a style="display:inline-block" href="https://research.nvidia.com/labs/toronto-ai/LLaMA-Mesh/"><img src='https://img.shields.io/badge/public_website-8A2BE2'></a>
20
  <a style="display:inline-block; margin-left: .5em" href="https://github.com/nv-tlabs/LLaMA-Mesh"><img src='https://img.shields.io/github/stars/nv-tlabs/LLaMA-Mesh?style=social'/></a>
21
  </div>
22
+ <p>LLaMA-Mesh: Unifying 3D Mesh Generation with Language Models. <a style="display:inline-block" href="https://research.nvidia.com/labs/toronto-ai/LLaMA-Mesh/">[Project Page]</a> <a style="display:inline-block" href="https://github.com/nv-tlabs/LLaMA-Mesh">[Code]</a></p>
23
+ <p> Notice: (1) The default token length is 4096. If you observe incomplete generated meshes, try to increase the maximum token length into 8192.</p>
24
  <p>(2) We only support generating a single mesh per dialog round. To generate another mesh, click the "clear" button and start a new dialog.</p>
25
  <p>(3) If the LLM refuses to generate a 3D mesh, try adding more explicit instructions to the prompt, such as "create a 3D model of a table <strong>in OBJ format</strong>." A more effective approach is to request the mesh generation at the start of the dialog.</p>
26
  </div>
 
78
  str: Path to the GLB file with gradient colors applied.
79
  """
80
  # Load the mesh
81
+ temp_file = tempfile.NamedTemporaryFile(suffix=f"", delete=False).name
82
  with open(temp_file+".obj", "w") as f:
83
  f.write(mesh_text)
84
  # return temp_file
 
117
  f.write(mesh_text)
118
  return temp_file
119
 
120
+ # @spaces.GPU(duration=120)
121
  def chat_llama3_8b(message: str,
122
  history: list,
123
  temperature: float,
 
141
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
142
 
143
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
144
+
 
 
145
  generate_kwargs = dict(
146
  input_ids= input_ids,
147
  streamer=streamer,
 
182
  gr.Slider(minimum=0,
183
  maximum=1,
184
  step=0.1,
185
+ value=0.95,
186
  label="Temperature",
 
187
  render=False),
188
  gr.Slider(minimum=128,
189
+ maximum=8192,
190
  step=1,
191
  value=4096,
192
  label="Max new tokens",
 
193
  render=False),
194
  ],
195
  examples=[
 
236
 
237
  if __name__ == "__main__":
238
  demo.launch()
239
+
240
+
241
+
242
+