Spaces:
Runtime error
Runtime error
from asyncio import constants | |
import gradio as gr | |
import requests | |
import os | |
import re | |
import random | |
from words import * | |
from base64 import b64decode | |
from PIL import Image | |
import io | |
import numpy as np | |
# GPT-J-6B API | |
API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-j-6B" | |
#HF_TOKEN = os.environ["HF_TOKEN"] | |
#headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
prompt = """ | |
Bilbo is a hobbit rogue who wears a brown cloak and carries a ring. | |
Bremen is a human wizard, he wears a blue robe and carries a wand. | |
""" | |
examples = [["river"], ["night"], ["trees"],["table"],["laughs"]] | |
def npc_randomize(): | |
#name is a random combination of syllables | |
name ="" | |
for i in range(random.randint(2,4)): | |
name += random.choice(constants) | |
name += random.choice(vowels) | |
if random.random()<0.5: | |
name += random.choice(constants) | |
if random.random()<0.1: | |
name += random.choice(seperators) | |
#capitalize first letter | |
name = name[0].upper() + name[1:] | |
race=random.choice(races) | |
characterClass=random.choice(classes) | |
pronoun=random.choices(["he","she","they"],weights=[0.45,0.45,0.1],k=1)[0] | |
return name,race,characterClass,pronoun | |
def genericDescription(): | |
desc=" wears a {color} {outfit}".format(color=random.choice(colors),outfit=random.choice(outfits)) | |
if random.random()<0.5: | |
desc+=" and a {color} {outfit}".format(color=random.choice(colors),outfit=random.choice(outfits)) | |
if random.random()<0.5: | |
desc+=" and carries a {weapon}".format(weapon=random.choice(weapons)) | |
elif random.random()<0.5: | |
desc+=" and carries a {weapon} and a {object}".format(weapon=random.choice(weapons),object=random.choice(objects)) | |
else: | |
desc+=" and carries two {weapon}s".format(weapon=random.choice(weapons)) | |
return desc | |
def npc_generate(name,race,characterClass,pronoun): | |
desc="{name} is a {race} {characterClass}, {pronoun}".format(name=name,race=race,characterClass=characterClass,pronoun=pronoun) | |
p = prompt + "\n"+desc | |
print(f"*****Inside desc_generate - Prompt is :{p}") | |
json_ = {"inputs": p, | |
"parameters": | |
{ | |
"top_p": 0.9, | |
"temperature": 1.1, | |
"max_new_tokens": 50, | |
"return_full_text": False, | |
}} | |
#response = requests.post(API_URL, headers=headers, json=json_) | |
response = requests.post(API_URL, json=json_) | |
output = response.json() | |
print(f"If there was an error? Reason is : {output}") | |
#error handling | |
if "error" in output: | |
print("using fallback description method!") | |
#fallback method | |
longDescription=genericDescription() | |
else: | |
output_tmp = output[0]['generated_text'] | |
print(f"GPTJ response without splits is: {output_tmp}") | |
if "\n\n" not in output_tmp: | |
if output_tmp.find('.') != -1: | |
idx = output_tmp.find('.') | |
longDescription = output_tmp[:idx+1] | |
else: | |
idx = output_tmp.rfind('\n') | |
longDescription = output_tmp[:idx] | |
else: | |
longDescription = output_tmp.split("\n\n")[0] # +"." | |
longDescription = longDescription.replace('?','') | |
print(f"longDescription being returned is: {longDescription}") | |
return desc+longDescription | |
def desc_to_image(desc): | |
print("*****Inside desc_to_image") | |
desc = " ".join(desc.split('\n')) | |
desc = desc + ", character art, concept art, artstation" | |
steps, width, height, images, diversity = '50','256','256','1',15 | |
iface = gr.Interface.load("spaces/multimodalart/latentdiffusion") | |
print("about to die",iface,dir(iface)) | |
prompt = re.sub(r'[^a-zA-Z0-9 ,.]', '', desc) | |
print("about to die",prompt) | |
img=iface(desc, steps, width, height, images, diversity)[0] | |
return img | |
def desc_to_image_dalle(desc): | |
print("*****Inside desc_to_image") | |
desc = " ".join(desc.split('\n')) | |
desc = desc + ", character art, concept art, artstation" | |
steps, width, height, images, diversity = '50','256','256','1',15 | |
#iface = gr.Interface.load("huggingface/flax-community/dalle-mini")#this isn't a real interface | |
iface = gr.Interface.load("spaces/multimodalart/rudalle") | |
print("about to die",iface,dir(iface)) | |
prompt = re.sub(r'[^a-zA-Z0-9 ,.]', '', desc) | |
print("about to die",prompt) | |
model='Realism' | |
aspect_ratio = 'Square' | |
#img=iface(desc,model,aspect_ratio)[0] | |
result=iface(desc,"Square","Realism") | |
print(f"result is: {result}") | |
return result[0] | |
def desc_to_image_cf(desc): | |
cf = gr.Interface.load("spaces/Gradio-Blocks/clip-guided-faces") | |
print("about to die",cf) | |
text=desc | |
init_image=None | |
skip_timesteps=0 | |
clip_guidance_scale=600 | |
tv_scale=0 | |
range_scale=0 | |
init_scale=0 | |
seed=0 | |
image_prompts=None | |
timestep_respacing= 25 | |
cutn=16 | |
im_prompt_weight =1 | |
result = cf.fns[0].fn(text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, range_scale, init_scale, seed, image_prompts,timestep_respacing, cutn, im_prompt_weight) | |
#convert result from dataurl to image | |
img=result[0] | |
header, encoded = img.split(",", 1) | |
data = b64decode(encoded) | |
image = Image.open(io.BytesIO(data)) | |
image_np = np.array(image) | |
return image_np | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown("<h1><center>NPC Generator</center></h1>") | |
gr.Markdown( | |
"based on <a href=https://huggingface.co/spaces/Gradio-Blocks/GPTJ6B_Poetry_LatentDiff_Illustration> Gradio poetry generator</a>." | |
"<div>first input name, race and class (or generate them randomly)</div>" | |
"<div>Next, use GPT-J to generate a short description</div>" | |
"<div>Finally, Generate an illustration 🎨 provided by <a href=https://huggingface.co/spaces/multimodalart/latentdiffusion>Latent Diffusion model</a>.</div>" | |
#"<div>Or using <a href=https://huggingface.co/spaces/multimodalart/rudalle> Rudalle model</a>.</div>" | |
"<div>Or using <a href=https://huggingface.co/spaces/Gradio-Blocks/clip-guided-faces> clip-guides faces</a>.</div>" | |
) | |
with gr.Row(): | |
b0 = gr.Button("Randomize name,race and class") | |
b1 = gr.Button("Generate NPC Description") | |
b2 = gr.Button("Generate Portrait (latent diffusion)") | |
b3 = gr.Button("Generate Portrait (clip-faces)") | |
with gr.Row(): | |
input_name = gr.Textbox(label="name",placeholder="Drizzt") | |
input_race = gr.Textbox(label="race",placeholder="dark elf") | |
input_class = gr.Textbox(label="class",placeholder="ranger") | |
input_pronoun = gr.Textbox(label="pronoun",placeholder="he") | |
with gr.Row(): | |
desc_txt = gr.Textbox(label="description",lines=7) | |
output_image = gr.Image(label="portrait",type="filepath", shape=(256,256)) | |
b0.click(npc_randomize,inputs=[],outputs=[input_name,input_race,input_class,input_pronoun]) | |
b1.click(npc_generate, inputs=[ input_name,input_race,input_class,input_pronoun], outputs=desc_txt) | |
b2.click(desc_to_image, desc_txt, output_image) | |
b3.click(desc_to_image_cf, desc_txt, output_image) | |
#examples=examples | |
demo.launch(enable_queue=True, debug=True) |