sobarine commited on
Commit
a6e9a02
·
verified ·
1 Parent(s): 40eb565

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -0
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
3
+ import google.generativeai as genai
4
+ from threading import Thread
5
+ import trimesh
6
+ import numpy as np
7
+ import tempfile
8
+ import os
9
+
10
+ # Configure the API key securely from Streamlit's secrets
11
+ genai.configure(api_key=st.secrets["GOOGLE_API_KEY"])
12
+
13
+ # Set an environment variable for Hugging Face token
14
+ os.environ["HF_TOKEN"] = st.secrets["HF_TOKEN"]
15
+
16
+ # Load the LLaMA-Mesh model and tokenizer
17
+ model_path = "Zhengyi/LLaMA-Mesh"
18
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
19
+ model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", low_cpu_mem_usage=True)
20
+ terminators = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")]
21
+
22
+ def generate_mesh(prompt, temperature=0.9, max_new_tokens=4096):
23
+ conversation = [{"role": "user", "content": prompt}]
24
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
25
+
26
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
27
+ generate_kwargs = dict(
28
+ input_ids=input_ids,
29
+ streamer=streamer,
30
+ max_new_tokens=max_new_tokens,
31
+ do_sample=True,
32
+ temperature=temperature,
33
+ eos_token_id=terminators,
34
+ )
35
+
36
+ if temperature == 0:
37
+ generate_kwargs['do_sample'] = False
38
+
39
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
40
+ t.start()
41
+
42
+ outputs = []
43
+ for text in streamer:
44
+ outputs.append(text)
45
+ return "".join(outputs)
46
+
47
+ def apply_gradient_color(mesh_text):
48
+ temp_file = tempfile.NamedTemporaryFile(suffix="", delete=False).name
49
+ with open(temp_file + ".obj", "w") as f:
50
+ f.write(mesh_text)
51
+ mesh = trimesh.load_mesh(temp_file + ".obj", file_type='obj')
52
+
53
+ vertices = mesh.vertices
54
+ y_values = vertices[:, 1]
55
+
56
+ y_normalized = (y_values - y_values.min()) / (y_values.max() - y_values.min())
57
+
58
+ colors = np.zeros((len(vertices), 4))
59
+ colors[:, 0] = y_normalized
60
+ colors[:, 2] = 1 - y_normalized
61
+ colors[:, 3] = 1.0
62
+
63
+ mesh.visual.vertex_colors = colors
64
+
65
+ glb_path = temp_file + ".glb"
66
+ with open(glb_path, "wb") as f:
67
+ f.write(trimesh.exchange.gltf.export_glb(mesh))
68
+ return glb_path
69
+
70
+ # Streamlit App UI
71
+ st.title("Ever AI - 3D CAD Model Generator")
72
+ st.write("Use generative AI to create 3D CAD models based on your prompt.")
73
+
74
+ prompt = st.text_input("Enter your prompt:", "Create a 3D model of a house.")
75
+
76
+ if st.button("Generate CAD Model"):
77
+ try:
78
+ response = generate_mesh(prompt)
79
+
80
+ cad_file_path = "generated_model.obj"
81
+ with open(cad_file_path, "w") as f:
82
+ f.write(response)
83
+
84
+ st.write("CAD Model Generated:")
85
+ st.code(response, language='plaintext')
86
+
87
+ glb_path = apply_gradient_color(response)
88
+ with open(glb_path, "rb") as f:
89
+ btn = st.download_button(
90
+ label="Download GLB File",
91
+ data=f,
92
+ file_name="generated_model.glb",
93
+ mime="application/octet-stream"
94
+ )
95
+ except Exception as e:
96
+ st.error(f"Error: {e}")