File size: 11,210 Bytes
7f412e3
be51b34
7f412e3
 
 
 
81837b1
7f412e3
3f8d160
fc1b391
18b3737
fc1b391
c8fdeaf
 
 
 
3f8d160
fc1b391
7f412e3
 
 
a391a20
784fb3b
7f412e3
 
9524220
 
 
 
 
 
a423985
3f8d160
dea0287
 
 
 
 
7f412e3
dea0287
3f8d160
dea0287
3f8d160
dea0287
7f412e3
99258b1
 
 
 
 
 
 
 
7f412e3
99258b1
 
 
 
 
 
 
 
 
 
 
6eabca8
99258b1
 
 
 
 
e134cad
7f412e3
3c218b1
 
9b33095
3c218b1
 
7ea7d03
a4cb095
 
 
 
3c218b1
a41297a
3c218b1
 
 
fc1b391
3f8d160
 
 
dea0287
 
 
 
 
 
 
 
 
 
 
 
3a1fb15
dea0287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a338474
 
 
 
373e5e4
3f8d160
a338474
 
3f8d160
a338474
 
 
 
06abb8b
fc1b391
705f145
 
a338474
fc1b391
 
 
705f145
 
06abb8b
fc1b391
a338474
fc1b391
81837b1
fc1b391
 
3f8d160
fc1b391
 
 
 
6e54d32
81837b1
0d883c8
 
81837b1
3f8d160
 
 
 
 
3a1fb15
 
 
 
fc1b391
d5fb537
98beb1c
0d883c8
3f8d160
6e54d32
 
 
 
 
 
3f8d160
0ffeed6
 
 
 
 
 
3f8d160
6e54d32
 
 
 
 
 
 
 
3f8d160
 
 
0ffeed6
 
 
 
 
 
 
 
 
 
 
 
3f8d160
0059c8b
0ffeed6
3f8d160
a338474
 
c266f85
a338474
c266f85
a338474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4e1a0b
 
 
 
 
 
 
 
 
a391a20
 
 
784fb3b
a391a20
 
0bae6f9
 
 
 
dea0287
e4e1a0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
705f145
e4e1a0b
 
7f412e3
e4e1a0b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
import torch
import logging
from transformers import TextIteratorStreamer, AutoProcessor, LlavaForConditionalGeneration
from diffusers import DiffusionPipeline
import gradio as gr
import numpy as np
from PIL import Image, ImageDraw
import threading
import openai
import os
import spaces

# Setup logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# Retrieve the OpenAI API key from the environment
API_KEY = os.getenv('OPEN_AI_API_KEYS')

DESCRIPTION = '''
<div>
<h1 style="text-align: center;">Chimera Image Generation</h1>
<p style="text-align: center;">This contains a Stable Diffusor from <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0"><b>stabilityai/stable-diffusion-xl-base-1.0</b></a></p>
</div>
'''
# DESCRIPTION = '''
# <div>
# <h1 style="text-align: center;">Chimera Image Generation</h1>
# <p>This contains a Stable Diffusor from <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0"><b>stabilityai/stable-diffusion-xl-base-1.0</b></a> and a Multimodal from <a href="https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers"><b>xtuner/llava-llama-3-8b-v1_1-transformers</b></a></p>
# </div>
# '''

# Initialize the models
# llava_model = LlavaForConditionalGeneration.from_pretrained(
#     "xtuner/llava-llama-3-8b-v1_1-transformers",
#     torch_dtype=torch.float16,
#     low_cpu_mem_usage=True,
# )

# llava_model.to("cuda:0")

# processor = AutoProcessor.from_pretrained("xtuner/llava-llama-3-8b-v1_1-transformers")

# llava_model.generation_config.eos_token_id = 128009

# # Initialize Stable Diffusion pipelines
# base = DiffusionPipeline.from_pretrained(
#     "stabilityai/stable-diffusion-xl-base-1.0",
#     torch_dtype=torch.float16,
#     variant="fp16",
#     use_safetensors=True,
# )
# base.to('cuda')

# refiner = DiffusionPipeline.from_pretrained(
#     "stabilityai/stable-diffusion-xl-base-1.0",
#     text_encoder_2=base.text_encoder_2,
#     vae=base.vae,
#     torch_dtype=torch.float16,
#     use_safetensors=True,
#     variant="fp16",
# )
# refiner.to('cuda')

# load both base and refiner
base = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda:0")
refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0",
                                            text_encoder_2=base.text_encoder_2,
                                            vae=base.vae,
                                            torch_dtype=torch.float16,
                                            use_safetensor=True,
                                            variant="fp16").to("cuda:0")

class ModeManager:
    def __init__(self):
        self.mode = None

    def set_mode(self, mode):
        if mode == "chatting":
            self.mode = mode
        else:
            self.mode = mode
    
    def get_mode(self):
        return self.mode if self.mode is not None else "chatting"

mode_manager = ModeManager()

def multimodal_and_generation(message, history):
    """
    Generates a response based on the input message and optionally an image.
    """
    # image_path = None
    # if "files" in message and message["files"]:
    #     if type(message["files"][-1]) == dict:
    #         image_path = message["files"][-1]["path"]
    #     else:
    #         image_path = message["files"][-1]
    # else:
    #     for hist in history:
    #         if type(hist[0]) == tuple:
    #             image_path = hist[0][0]

    # if image_path is None:
    input_prompt = message if isinstance(message, str) else message.get("text", "")
    client = openai.OpenAI(api_key=API_KEY)
    stream = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "system", "content": "You are a helpful assistant called 'chimera'."},
                    {"role": "user", "content": input_prompt}],
        stream=True,
    )
    return stream
    # else:
    #     prompt = f"user\n\n<image>\n{message['text']}assistant\n\n"
    #     image = Image.open(image_path)
    #     inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
    #     streamer = TextIteratorStreamer(processor.tokenizer, **{"skip_special_tokens": False, "skip_prompt": True})
    #     generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False)

    #     thread = threading.Thread(target=llava_model.generate, kwargs=generation_kwargs)
    #     thread.start()

    #     return streamer

# function to take input and generate text tokena
@spaces.GPU(duration=120)
def diffusing(prompt: str,
              history):
    """
    Takes input, passes it into the pipeline, 
    get the top 5 scores, and ouput those scores into images
    """

    # n_steps = int(n_steps)

    # Generate image based on text
    image_base = base(
        prompt=prompt,
        num_inference_steps=40,
        denoising_end=0.8,
        output_type="latent"
    ).images
    image = refiner(
        prompt=prompt,
        num_inference_steps=40,
        denoising_start=0.8,
        image=image_base
    ).images[0]

    return image
    
def check_cuda_availability():
    if torch.cuda.is_available():
        return f"GPU: {torch.cuda.get_device_name(0)}"
    else:
        return "No CUDA device found."

mode = ""
# logger.debug(f"\n\nthis is the mode before calling it in bot_comms: {mode}\n\n")
    
# Image created from diffusing
image_created = {}

@spaces.GPU(duration=120)
def bot_comms(message, history):
    """
    Handles communication between Gradio and the models.
    """

    # ensures message is a dictionary
    if not isinstance(message, dict):
        message = {"text": message}

    if message["text"] == "check cuda":
        logger.debug("Checking CUDA availability.")
        return check_cuda_availability()
    
    # if message["text"] == "imagery":
    #     logger.debug("Switching to imagery mode.")
    #     # mode_manager.set_mode("imagery")
    #     mode += "imagery"
    #     # logger.debug(f"\nimagery mode: {mode}\n")
    #     return "Imagery On! Type your prompt to make the image πŸ–ΌοΈ"
    
    # if message["text"] == "chatting":
    #     logger.debug("Switching to chatting mode.")
    #     # mode_manager.set_mode("chatting")
    #     mode += "chatting"
    #     # logger.debug(f"\nchatting mode: {mode}\n")
    #     return "Imagery Off. Ask me any questions. β˜„οΈ"
    
    # if mode == "imagery":
    #     logger.debug("Processing imagery prompt.")
    #     if isinstance(message, dict) and "text" in message:
    #         message = message["text"]
    #     image = diffusing(message)
    #     # mode_gradio("imagery")
    #     image_created["Image"] = image
    #     return image
        
    buffer = ""
    gpt_outputs = []
    # if mode == "chatting" or mode == "":
    # logger.debug("On chatting or no mode.\n\n")
    stream = multimodal_and_generation(message, history)
    mode += "chatting"
    for chunk in stream:
        if chunk is not None and hasattr(chunk.choices[0].delta, "content"):
            logger.debug("\n\nFound the chunk in stream for gpt-3.5\n\n")
            text = chunk.choices[0].delta.content
            if text:
                gpt_outputs.append(text)
                buffer += text
            yield "".join(gpt_outputs)

chatbot = gr.Chatbot(height=600, label="Chimera AI")
# chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)

# with gr.Blocks(fill_height=True) as demo:
#     gr.Markdown(DESCRIPTION)
    
#     # image_output = gr.Image(type="pil", label="Generated Image")
    
#     # def process_response(message, history):
#     #     response = bot_comms(message, history)
#     #     if isinstance(response, tuple) and len(response) == 2:
#     #         text, image = response
#     #         return text, image
#     #     return response, None

#     # chatbot_output = gr.Chatbot(height=600, label="Chimera AI")

#     # chat_input.submit(process_response, inputs=[chat_input, chatbot], outputs=[chatbot_output, image_output])
#     if mode_manager.get_mode() == "imagery":
#     #     # Ensure's a unique block ID for image output
#         gr.Interface(
#             fn=diffusing,
#             inputs="text",
#             outputs="image",
#             fill_height=True,
#         )
#     #     with gr.Blocks():
#     #         gr.Interface(
#     #             fn=diffusing,
#     #             inputs='text',
#     #             outputs='image',
#     #             fill_height=True,
#     #         )
#     #         # Customize chatinterface to handle tuples
#     #         # def custom_fn(*args, **kwargs):
#     #         #     result = list(bot_comms(*args, **kwargs))
#     #         #     output = []
#     #         #     for item in result:
#     #         #         if isinstance(item, tuple) and isinstance(item[1], Image.Image):
#     #         #             output.append((item[0], None))
#     #         #             output.append((None, item[1]))
#     #         #         else:
#     #         #             output.append(item)
#     #         #     return output
#     # else:
#     #     # Unique block ID for chat interface
#     #     with gr.Blocks():
#     #         gr.ChatInterface(
#     #             fn=bot_comms,
#     #             chatbot=chatbot,
#     #             fill_height=True,
#     #             multimodal=True,
#     #             textbox=chat_input,
#     #         )

# if __name__ == "__main__":
#     demo.launch()

with gr.Blocks(fill_height=True) as demo:
    with gr.Row():
        with gr.Column():
            gr.Markdown(DESCRIPTION)
            image_prompt = gr.Textbox(label="Image Prompt")
            output_image = gr.Image(label="Generated Image")
            generate_image_button = gr.Button("Generate Image")
            generate_image_button.click(fn=diffusing, inputs=image_prompt, outputs=output_image)

        with gr.Column():
            gr.Markdown('''
<div>
<h1 style="text-align: center;">Chimera Text Generation</h1>
<p style="text-align: center;">This contains a Generative LLM from <a href="https://openai.com/"><b>Open AI</b></a> called GPT-3.5-Turbo</p>
</div>
''')
            # text_prompt = gr.Textbox(label="Text Prompt")
            # text_output = gr.Textbox(label="Generated Text")
            # generate_text_button = gr.Button("Generated Text")
            # generate_text_button.click(fn=bot_comms, inputs=text_prompt, outputs=text_output)
            chat = gr.ChatInterface(fn=bot_comms)

    # gr.Markdown(DESCRIPTION)
    # if mode == "chatting":
    #     gr.Interface(
    #         fn=bot_comms,
    #         inputs="text",
    #         outputs="text",
    #         fill_height=True,
    #     )
    # g
    #     # logger.debug(f"\n|now on chat interface|\n")
    #     gr.ChatInterface(
    #         fn=bot_comms,
    #         chatbot=chatbot,
    #         fill_height=True,
    #         multimodal=True,
    #         textbox=chat_input,
    #     )

# if __name__ == "__main__":
#     demo.launch()

demo.launch()