removed logger
Browse files
app.py
CHANGED
@@ -1,19 +1,15 @@
|
|
1 |
import torch
|
2 |
-
import logging
|
3 |
from diffusers import DiffusionPipeline
|
4 |
import gradio as gr
|
5 |
import numpy as np
|
6 |
-
# from PIL import Image, ImageDraw
|
7 |
-
import threading
|
8 |
import openai
|
9 |
import os
|
10 |
import spaces
|
11 |
import base64
|
12 |
-
import traceback
|
13 |
|
14 |
# Setup logging
|
15 |
-
logging.basicConfig(level=logging.DEBUG)
|
16 |
-
logger = logging.getLogger(__name__)
|
17 |
|
18 |
# Retrieve the OpenAI API key from the environment
|
19 |
API_KEY = os.getenv('OPEN_AI_API_KEYS')
|
@@ -25,12 +21,6 @@ DESCRIPTION = '''
|
|
25 |
<p style="text-align: center;">For Instructions on how to use the models <a href="https://huggingface.co/spaces/sandz7/chimera/blob/main/README.md"><b>view this</b></a></p>
|
26 |
</div>
|
27 |
'''
|
28 |
-
# DESCRIPTION = '''
|
29 |
-
# <div>
|
30 |
-
# <h1 style="text-align: center;">Chimera Image Generation</h1>
|
31 |
-
# <p>This contains a Stable Diffusor from <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0"><b>stabilityai/stable-diffusion-xl-base-1.0</b></a> and a Multimodal from <a href="https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers"><b>xtuner/llava-llama-3-8b-v1_1-transformers</b></a></p>
|
32 |
-
# </div>
|
33 |
-
# '''
|
34 |
|
35 |
# load both base and refiner
|
36 |
base = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda:0")
|
@@ -42,10 +32,6 @@ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-ref
|
|
42 |
variant="fp16").to("cuda:0")
|
43 |
|
44 |
chat_mode = {}
|
45 |
-
# class ChatMode:
|
46 |
-
# def __init__(self):
|
47 |
-
# self.modes = {}
|
48 |
-
# self.current_mode = None
|
49 |
|
50 |
def encode_image(image_path):
|
51 |
chat_mode["the_mode"] = "diffusing"
|
@@ -72,7 +58,6 @@ def generation(message, history):
|
|
72 |
|
73 |
if image_path is None:
|
74 |
chat_mode["mode"] = "text"
|
75 |
-
# input_prompt = message if isinstance(message, str) else message.get("text", "")
|
76 |
client = openai.OpenAI(api_key=API_KEY)
|
77 |
stream = client.chat.completions.create(
|
78 |
model="gpt-3.5-turbo",
|
@@ -83,7 +68,6 @@ def generation(message, history):
|
|
83 |
return stream
|
84 |
else:
|
85 |
chat_mode["mode"] = "image"
|
86 |
-
# input_prompt = message if isinstance(message, str) else message.get("text", "")
|
87 |
base64_image = encode_image(image_path=image_path)
|
88 |
client = openai.OpenAI(api_key=API_KEY)
|
89 |
stream = client.chat.completions.create(
|
@@ -110,50 +94,21 @@ def diffusing(prompt: str,
|
|
110 |
"""
|
111 |
|
112 |
# Generate image based on text
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
image = refiner(
|
129 |
-
prompt=prompt,
|
130 |
-
num_inference_steps=n_steps,
|
131 |
-
denoising_start=denoising,
|
132 |
-
image=image_base
|
133 |
-
).images[0]
|
134 |
-
|
135 |
-
logger.debug("Refined image generated successfully.")
|
136 |
-
|
137 |
-
return image
|
138 |
-
|
139 |
-
except Exception as e:
|
140 |
-
logger.error(f"Error in diffusing: {str(e)}")
|
141 |
-
logger.error(traceback.format_exc())
|
142 |
-
raise
|
143 |
-
# image_base = base(
|
144 |
-
# prompt=prompt,
|
145 |
-
# num_inference_steps=n_steps,
|
146 |
-
# denoising_end=denoising,
|
147 |
-
# output_type="latent"
|
148 |
-
# ).images
|
149 |
-
# image = refiner(
|
150 |
-
# prompt=prompt,
|
151 |
-
# num_inference_steps=n_steps,
|
152 |
-
# denoising_start=denoising,
|
153 |
-
# image=image_base
|
154 |
-
# ).images[0]
|
155 |
-
|
156 |
-
# return image
|
157 |
|
158 |
def check_cuda_availability():
|
159 |
if torch.cuda.is_available():
|
@@ -175,7 +130,6 @@ def bot_comms(message, history):
|
|
175 |
message = {"text": message}
|
176 |
|
177 |
if message["text"] == "check cuda":
|
178 |
-
logger.debug("Checking CUDA availability.")
|
179 |
yield check_cuda_availability()
|
180 |
return
|
181 |
|
|
|
1 |
import torch
|
|
|
2 |
from diffusers import DiffusionPipeline
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
|
|
|
|
5 |
import openai
|
6 |
import os
|
7 |
import spaces
|
8 |
import base64
|
|
|
9 |
|
10 |
# Setup logging
|
11 |
+
# logging.basicConfig(level=logging.DEBUG)
|
12 |
+
# logger = logging.getLogger(__name__)
|
13 |
|
14 |
# Retrieve the OpenAI API key from the environment
|
15 |
API_KEY = os.getenv('OPEN_AI_API_KEYS')
|
|
|
21 |
<p style="text-align: center;">For Instructions on how to use the models <a href="https://huggingface.co/spaces/sandz7/chimera/blob/main/README.md"><b>view this</b></a></p>
|
22 |
</div>
|
23 |
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
# load both base and refiner
|
26 |
base = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda:0")
|
|
|
32 |
variant="fp16").to("cuda:0")
|
33 |
|
34 |
chat_mode = {}
|
|
|
|
|
|
|
|
|
35 |
|
36 |
def encode_image(image_path):
|
37 |
chat_mode["the_mode"] = "diffusing"
|
|
|
58 |
|
59 |
if image_path is None:
|
60 |
chat_mode["mode"] = "text"
|
|
|
61 |
client = openai.OpenAI(api_key=API_KEY)
|
62 |
stream = client.chat.completions.create(
|
63 |
model="gpt-3.5-turbo",
|
|
|
68 |
return stream
|
69 |
else:
|
70 |
chat_mode["mode"] = "image"
|
|
|
71 |
base64_image = encode_image(image_path=image_path)
|
72 |
client = openai.OpenAI(api_key=API_KEY)
|
73 |
stream = client.chat.completions.create(
|
|
|
94 |
"""
|
95 |
|
96 |
# Generate image based on text
|
97 |
+
image_base = base(
|
98 |
+
prompt=prompt,
|
99 |
+
num_inference_steps=n_steps,
|
100 |
+
denoising_end=denoising,
|
101 |
+
output_type="latent"
|
102 |
+
).images
|
103 |
+
|
104 |
+
image = refiner(
|
105 |
+
prompt=prompt,
|
106 |
+
num_inference_steps=n_steps,
|
107 |
+
denoising_start=denoising,
|
108 |
+
image=image_base
|
109 |
+
).images[0]
|
110 |
+
|
111 |
+
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
def check_cuda_availability():
|
114 |
if torch.cuda.is_available():
|
|
|
130 |
message = {"text": message}
|
131 |
|
132 |
if message["text"] == "check cuda":
|
|
|
133 |
yield check_cuda_availability()
|
134 |
return
|
135 |
|