Spaces:
Runtime error
Runtime error
import concurrent.futures | |
import random | |
import gradio as gr | |
import requests, os | |
import io, base64, json | |
import spaces | |
import torch | |
from PIL import Image | |
from openai import OpenAI | |
from .models import IMAGE_GENERATION_MODELS, VIDEO_GENERATION_MODELS, B2I_MODELS, load_pipeline | |
from serve.upload import get_random_mscoco_prompt, get_random_video_prompt, get_ssh_random_video_prompt, get_ssh_random_image_prompt | |
from serve.constants import SSH_CACHE_OPENSOURCE, SSH_CACHE_ADVANCE, SSH_CACHE_PIKA, SSH_CACHE_SORA, SSH_CACHE_IMAGE | |
class ModelManager: | |
def __init__(self): | |
self.model_ig_list = IMAGE_GENERATION_MODELS | |
self.model_ie_list = [] #IMAGE_EDITION_MODELS | |
self.model_vg_list = VIDEO_GENERATION_MODELS | |
self.model_b2i_list = B2I_MODELS | |
self.loaded_models = {} | |
def load_model_pipe(self, model_name): | |
if not model_name in self.loaded_models: | |
pipe = load_pipeline(model_name) | |
self.loaded_models[model_name] = pipe | |
else: | |
pipe = self.loaded_models[model_name] | |
return pipe | |
def generate_image_ig(self, prompt, model_name): | |
pipe = self.load_model_pipe(model_name) | |
if 'Stable-cascade' not in model_name: | |
result = pipe(prompt=prompt).images[0] | |
else: | |
prior, decoder = pipe | |
prior.enable_model_cpu_offload() | |
prior_output = prior( | |
prompt=prompt, | |
height=512, | |
width=512, | |
negative_prompt='', | |
guidance_scale=4.0, | |
num_images_per_prompt=1, | |
num_inference_steps=20 | |
) | |
decoder.enable_model_cpu_offload() | |
result = decoder( | |
image_embeddings=prior_output.image_embeddings.to(torch.float16), | |
prompt=prompt, | |
negative_prompt='', | |
guidance_scale=0.0, | |
output_type="pil", | |
num_inference_steps=10 | |
).images[0] | |
return result | |
def generate_image_ig_api(self, prompt, model_name): | |
pipe = self.load_model_pipe(model_name) | |
result = pipe(prompt=prompt) | |
return result | |
def generate_image_ig_parallel_anony(self, prompt, model_A, model_B, model_C, model_D): | |
if model_A == "" and model_B == "" and model_C == "" and model_D == "": | |
from .matchmaker import matchmaker | |
not_run = [20,21,22, 25,26, 30] #12,13,14,15,16,17,18,19,20,21,22, #23,24, | |
model_ids = matchmaker(num_players=len(self.model_ig_list), not_run=not_run) | |
print(model_ids) | |
model_names = [self.model_ig_list[i] for i in model_ids] | |
print(model_names) | |
else: | |
model_names = [model_A, model_B, model_C, model_D] | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("huggingface") | |
else executor.submit(self.generate_image_ig_api, prompt, model) for model in model_names] | |
results = [future.result() for future in futures] | |
return results[0], results[1], results[2], results[3], \ | |
model_names[0], model_names[1], model_names[2], model_names[3] | |
def generate_image_b2i(self, prompt, grounding_instruction, bbox, model_name): | |
pipe = self.load_model_pipe(model_name) | |
if model_name == "local_MIGC_b2i": | |
from model_bbox.MIGC.inference_single_image import inference_image | |
result = inference_image(pipe, prompt, grounding_instruction, bbox) | |
elif model_name == "huggingface_ReCo_b2i": | |
from model_bbox.ReCo.inference import inference_image | |
result = inference_image(pipe, prompt, grounding_instruction, bbox) | |
return result | |
def generate_image_b2i_parallel_anony(self, prompt, grounding_instruction, bbox, model_A, model_B, model_C, model_D): | |
if model_A == "" and model_B == "" and model_C == "" and model_D == "": | |
from .matchmaker import matchmaker | |
not_run = [] #12,13,14,15,16,17,18,19,20,21,22, #23,24, | |
# model_ids = matchmaker(num_players=len(self.model_ig_list), not_run=not_run) | |
model_ids = [0, 1] | |
print(model_ids) | |
model_names = [self.model_b2i_list[i] for i in model_ids] | |
print(model_names) | |
else: | |
model_names = [model_A, model_B, model_C, model_D] | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
futures = [executor.submit(self.generate_image_b2i, prompt, grounding_instruction, bbox, model) for model in model_names] | |
results = [future.result() for future in futures] | |
blank_image = None | |
final_results = [] | |
for i in range(4): | |
if i < len(model_ids): | |
# ε¦ζζ―ζζ樑εοΌθΏεηΈεΊηηζη»ζ | |
final_results.append(results[i]) | |
else: | |
# ε¦ζ沑ζηζη»ζοΌεθΏεη©Ίη½εΎε | |
final_results.append(blank_image) | |
final_model_names = [] | |
for i in range(4): | |
if i < len(model_ids): | |
final_model_names.append(model_names[i]) | |
else: | |
final_model_names.append("") | |
return final_results[0], final_results[1], final_results[2], final_results[3], \ | |
final_model_names[0], final_model_names[1], final_model_names[2], final_model_names[3] | |
def generate_image_ig_cache_anony(self, model_A, model_B, model_C, model_D): | |
if model_A == "" and model_B == "" and model_C == "" and model_D == "": | |
from .matchmaker import matchmaker | |
not_run = [20,21,22] | |
model_ids = matchmaker(num_players=len(self.model_ig_list), not_run=not_run) | |
print(model_ids) | |
model_names = [self.model_ig_list[i] for i in model_ids] | |
print(model_names) | |
else: | |
model_names = [model_A, model_B, model_C, model_D] | |
root_dir = SSH_CACHE_IMAGE | |
local_dir = "./cache_image" | |
if not os.path.exists(local_dir): | |
os.makedirs(local_dir) | |
prompt, results = get_ssh_random_image_prompt(root_dir, local_dir, model_names) | |
return results[0], results[1], results[2], results[3], \ | |
model_names[0], model_names[1], model_names[2], model_names[3], prompt | |
def generate_video_vg_parallel_anony(self, model_A, model_B, model_C, model_D): | |
if model_A == "" and model_B == "" and model_C == "" and model_D == "": | |
# model_names = random.sample([model for model in self.model_vg_list], 4) | |
from .matchmaker_video import matchmaker_video | |
model_ids = matchmaker_video(num_players=len(self.model_vg_list)) | |
print(model_ids) | |
model_names = [self.model_vg_list[i] for i in model_ids] | |
print(model_names) | |
else: | |
model_names = [model_A, model_B, model_C, model_D] | |
root_dir = SSH_CACHE_OPENSOURCE | |
for name in model_names: | |
if "Runway-Gen3" in name or "Runway-Gen2" in name or "Pika-v1.0" in name: | |
root_dir = SSH_CACHE_ADVANCE | |
elif "Pika-beta" in name: | |
root_dir = SSH_CACHE_PIKA | |
elif "Sora" in name and "OpenSora" not in name: | |
root_dir = SSH_CACHE_SORA | |
local_dir = "./cache_video" | |
if not os.path.exists(local_dir): | |
os.makedirs(local_dir) | |
prompt, results = get_ssh_random_video_prompt(root_dir, local_dir, model_names) | |
cache_dir = local_dir | |
return results[0], results[1], results[2], results[3], \ | |
model_names[0], model_names[1], model_names[2], model_names[3], prompt, cache_dir | |
def generate_image_ig_museum_parallel_anony(self, model_A, model_B, model_C, model_D): | |
if model_A == "" and model_B == "" and model_C == "" and model_D == "": | |
# model_names = random.sample([model for model in self.model_ig_list], 4) | |
from .matchmaker import matchmaker | |
model_ids = matchmaker(num_players=len(self.model_ig_list)) | |
print(model_ids) | |
model_names = [self.model_ig_list[i] for i in model_ids] | |
print(model_names) | |
else: | |
model_names = [model_A, model_B, model_C, model_D] | |
prompt = get_random_mscoco_prompt() | |
print(prompt) | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("huggingface") | |
else executor.submit(self.generate_image_ig_api, prompt, model) for model in model_names] | |
results = [future.result() for future in futures] | |
return results[0], results[1], results[2], results[3], \ | |
model_names[0], model_names[1], model_names[2], model_names[3], prompt | |
def generate_image_ig_parallel(self, prompt, model_A, model_B): | |
model_names = [model_A, model_B] | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("imagenhub") | |
else executor.submit(self.generate_image_ig_api, prompt, model) for model in model_names] | |
results = [future.result() for future in futures] | |
return results[0], results[1] | |
def generate_image_ie(self, textbox_source, textbox_target, textbox_instruct, source_image, model_name): | |
pipe = self.load_model_pipe(model_name) | |
result = pipe(src_image = source_image, src_prompt = textbox_source, target_prompt = textbox_target, instruct_prompt = textbox_instruct) | |
return result | |
def generate_image_ie_parallel(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B): | |
model_names = [model_A, model_B] | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
futures = [ | |
executor.submit(self.generate_image_ie, textbox_source, textbox_target, textbox_instruct, source_image, | |
model) for model in model_names] | |
results = [future.result() for future in futures] | |
return results[0], results[1] | |
def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B): | |
if model_A == "" and model_B == "": | |
model_names = random.sample([model for model in self.model_ie_list], 2) | |
else: | |
model_names = [model_A, model_B] | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
futures = [executor.submit(self.generate_image_ie, textbox_source, textbox_target, textbox_instruct, source_image, model) for model in model_names] | |
results = [future.result() for future in futures] | |
return results[0], results[1], model_names[0], model_names[1] | |