Spaces:
Runtime error
Runtime error
File size: 7,228 Bytes
0c696ac 4c68c58 4ee9eee 0c696ac aa35052 1da0e2a 4c68c58 a72960c 4c68c58 0c696ac 66f7296 0c696ac 8f115fa 833c8dd 0c696ac 7033da3 d137e2d 7033da3 d137e2d 7033da3 78317e6 7033da3 d137e2d 7033da3 d137e2d 7033da3 0c696ac 833c8dd 4c68c58 833c8dd 84519dc 7033da3 4c68c58 ecbc1ab 4c68c58 a72960c 4c68c58 7033da3 d137e2d 7033da3 4c68c58 7033da3 c5a683f 7033da3 c5a683f 7033da3 4c68c58 ecbc1ab e457e19 7033da3 e457e19 7033da3 4c68c58 5916805 23f68f1 d57bf64 5916805 156d86f 1da0e2a 156d86f 4c68c58 e457e19 4c68c58 e457e19 33f9fc8 b12ef9e 4c68c58 40e5857 4c68c58 40e5857 ace7020 5916805 f607082 40e5857 833c8dd 4c68c58 40e5857 7033da3 40e5857 4c68c58 833c8dd 7033da3 156d86f 4c68c58 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
from asyncio import constants
import gradio as gr
import requests
import os
import re
import random
from words import *
from base64 import b64decode
from PIL import Image
import io
import numpy as np
# GPT-J-6B API
API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-j-6B"
#HF_TOKEN = os.environ["HF_TOKEN"]
#headers = {"Authorization": f"Bearer {HF_TOKEN}"}
prompt = """
Bilbo is a hobbit rogue who wears a brown cloak and carries a ring.
Bremen is a human wizard, he wears a blue robe and carries a wand.
"""
examples = [["river"], ["night"], ["trees"],["table"],["laughs"]]
def npc_randomize():
#name is a random combination of syllables
name =""
for i in range(random.randint(2,4)):
name += random.choice(constants)
name += random.choice(vowels)
if random.random()<0.5:
name += random.choice(constants)
if random.random()<0.1:
name += random.choice(seperators)
#capitalize first letter
name = name[0].upper() + name[1:]
race=random.choice(races)
characterClass=random.choice(classes)
pronoun=random.choices(["he","she","they"],weights=[0.45,0.45,0.1],k=1)[0]
return name,race,characterClass,pronoun
def genericDescription():
desc=" wears a {color} {outfit}".format(color=random.choice(colors),outfit=random.choice(outfits))
if random.random()<0.5:
desc+=" and a {color} {outfit}".format(color=random.choice(colors),outfit=random.choice(outfits))
if random.random()<0.5:
desc+=" and carries a {weapon}".format(weapon=random.choice(weapons))
elif random.random()<0.5:
desc+=" and carries a {weapon} and a {object}".format(weapon=random.choice(weapons),object=random.choice(objects))
else:
desc+=" and carries two {weapon}s".format(weapon=random.choice(weapons))
return desc
def npc_generate(name,race,characterClass,pronoun):
desc="{name} is a {race} {characterClass}, {pronoun}".format(name=name,race=race,characterClass=characterClass,pronoun=pronoun)
p = prompt + "\n"+desc
print(f"*****Inside desc_generate - Prompt is :{p}")
json_ = {"inputs": p,
"parameters":
{
"top_p": 0.9,
"temperature": 1.1,
"max_new_tokens": 50,
"return_full_text": False,
}}
#response = requests.post(API_URL, headers=headers, json=json_)
response = requests.post(API_URL, json=json_)
output = response.json()
print(f"If there was an error? Reason is : {output}")
#error handling
if "error" in output:
print("using fallback description method!")
#fallback method
longDescription=genericDescription()
else:
output_tmp = output[0]['generated_text']
print(f"GPTJ response without splits is: {output_tmp}")
if "\n\n" not in output_tmp:
if output_tmp.find('.') != -1:
idx = output_tmp.find('.')
longDescription = output_tmp[:idx+1]
else:
idx = output_tmp.rfind('\n')
longDescription = output_tmp[:idx]
else:
longDescription = output_tmp.split("\n\n")[0] # +"."
longDescription = longDescription.replace('?','')
print(f"longDescription being returned is: {longDescription}")
return desc+longDescription
def desc_to_image(desc):
print("*****Inside desc_to_image")
desc = " ".join(desc.split('\n'))
desc = desc + ", character art, concept art, artstation"
steps, width, height, images, diversity = '50','256','256','1',15
iface = gr.Interface.load("spaces/multimodalart/latentdiffusion")
print("about to die",iface,dir(iface))
prompt = re.sub(r'[^a-zA-Z0-9 ,.]', '', desc)
print("about to die",prompt)
img=iface(desc, steps, width, height, images, diversity)[0]
return img
def desc_to_image_dalle(desc):
print("*****Inside desc_to_image")
desc = " ".join(desc.split('\n'))
desc = desc + ", character art, concept art, artstation"
steps, width, height, images, diversity = '50','256','256','1',15
#iface = gr.Interface.load("huggingface/flax-community/dalle-mini")#this isn't a real interface
iface = gr.Interface.load("spaces/multimodalart/rudalle")
print("about to die",iface,dir(iface))
prompt = re.sub(r'[^a-zA-Z0-9 ,.]', '', desc)
print("about to die",prompt)
model='Realism'
aspect_ratio = 'Square'
#img=iface(desc,model,aspect_ratio)[0]
result=iface(desc,"Square","Realism")
print(f"result is: {result}")
return result[0]
def desc_to_image_cf(desc):
cf = gr.Interface.load("spaces/Gradio-Blocks/clip-guided-faces")
print("about to die",cf)
text=desc
init_image=None
skip_timesteps=0
clip_guidance_scale=600
tv_scale=0
range_scale=0
init_scale=0
seed=0
image_prompts=None
timestep_respacing= 25
cutn=16
im_prompt_weight =1
result = cf.fns[0].fn(text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, range_scale, init_scale, seed, image_prompts,timestep_respacing, cutn, im_prompt_weight)
#convert result from dataurl to image
img=result[0]
header, encoded = img.split(",", 1)
data = b64decode(encoded)
image = Image.open(io.BytesIO(data))
image_np = np.array(image)
return image_np
demo = gr.Blocks()
with demo:
gr.Markdown("<h1><center>NPC Generator</center></h1>")
gr.Markdown(
"based on <a href=https://huggingface.co/spaces/Gradio-Blocks/GPTJ6B_Poetry_LatentDiff_Illustration> Gradio poetry generator</a>."
"<div>first input name, race and class (or generate them randomly)</div>"
"<div>Next, use GPT-J to generate a short description</div>"
"<div>Finally, Generate an illustration 🎨 provided by <a href=https://huggingface.co/spaces/multimodalart/latentdiffusion>Latent Diffusion model</a>.</div>"
#"<div>Or using <a href=https://huggingface.co/spaces/multimodalart/rudalle> Rudalle model</a>.</div>"
"<div>Or using <a href=https://huggingface.co/spaces/Gradio-Blocks/clip-guided-faces> clip-guides faces</a>.</div>"
)
with gr.Row():
b0 = gr.Button("Randomize name,race and class")
b1 = gr.Button("Generate NPC Description")
b2 = gr.Button("Generate Portrait (latent diffusion)")
b3 = gr.Button("Generate Portrait (clip-faces)")
with gr.Row():
input_name = gr.Textbox(label="name",placeholder="Drizzt")
input_race = gr.Textbox(label="race",placeholder="dark elf")
input_class = gr.Textbox(label="class",placeholder="ranger")
input_pronoun = gr.Textbox(label="pronoun",placeholder="he")
with gr.Row():
desc_txt = gr.Textbox(label="description",lines=7)
output_image = gr.Image(label="portrait",type="filepath", shape=(256,256))
b0.click(npc_randomize,inputs=[],outputs=[input_name,input_race,input_class,input_pronoun])
b1.click(npc_generate, inputs=[ input_name,input_race,input_class,input_pronoun], outputs=desc_txt)
b2.click(desc_to_image, desc_txt, output_image)
b3.click(desc_to_image_cf, desc_txt, output_image)
#examples=examples
demo.launch(enable_queue=True, debug=True) |