Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,513 Bytes
1746e54 c7f3034 1746e54 c7f3034 4ebdfdd c7f3034 5e2f771 4ebdfdd b83f0ab 4ebdfdd c7f3034 5e2f771 c7f3034 4ebdfdd c7f3034 9bbeb12 6142c36 c7f3034 c772844 c7f3034 6142c36 c7f3034 e9d99d4 6eafee9 4ebdfdd 5fb149b 5e2f771 5fb149b c7f3034 4ebdfdd c7f3034 4ebdfdd b83f0ab 5fb149b 4ebdfdd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import subprocess
import os
import gradio as gr
import torch
if torch.cuda.is_available():
device = "cuda"
print("Using GPU")
else:
device = "cpu"
print("Using CPU")
subprocess.run(["git", "clone", "https://github.com/Nick088Official/Stable_Diffusion_Finetuned_Minecraft_Skin_Generator.git"])
os.chdir("Stable_Diffusion_Finetuned_Minecraft_Skin_Generator")
def run_inference(prompt, stable_diffusion_model, num_inference_steps, guidance_scale, num_images_per_prompt, model_precision_type, output_image_name, verbose):
if stable_diffusion_model == '2':
sd_model = "minecraft-skins"
else:
sd_model = "minecraft-skins-sdxl"
command = f"Python_Scripts/{sd_model}.py '{prompt}' {num_inference_steps} {guidance_scale} {num_images_per_prompt} {model_precision_type} {output_image_name} {'--verbose' if verbose else ''}"
subprocess.run(["python", command], shell=True, check=True)
return output_image_name
prompt = gr.Textbox(label="Prompt", interactive=True)
stable_diffusion_model = gr.Dropdown(["2", "xl"], interactive=True, label="Stable Diffusion Model", value="xl", info="Choose which Stable Diffusion Model to use, xl understands prompts better")
num_inference_steps = gr.Number(value=50, minimum=1, interactive=True, label="Inference Steps",)
guidance_scale = gr.Number(value=7.5, minimum=0.1, interactive=True, label="Guidance Scale", info="How closely the generated image adheres to the prompt")
num_images_per_prompt = gr.Number(value=1, minimum=1, interactive=True, label="Images Per Prompt", info="The number of images to make with the prompt")
model_precision_type = gr.Dropdown(["fp16", "fp32"], value="fp16", interactive=True, label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results")
seed = gr.Number(value=42, interactive=True, label="Seed", info="A starting point to initiate the generation process, put 0 for a random one")
output_image_name = gr.Textbox(label="Name of Generated Skin Output", interactive=True, value="output.png")
verbose = gr.Checkbox(label="Verbose Output", interactive=True, value=False, info="Produce verbose output while running")
# Define Gradio UI components
prompt_input = gr.Textbox(label="Your Prompt", info="What the Minecraft Skin should look like")
stable_diffusion_model_input = gr.Dropdown(['2', 'xl'], label="Stable Diffusion Model", info="Choose which Stable Diffusion Model to use, xl understands prompts better")
num_inference_steps_input = gr.Number(label="Number of Inference Steps", precision=0, value=25)
guidance_scale_input = gr.Number(minimum=0.1, value=7.5, label="Guidance Scale", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference")
num_images_per_prompt_input = gr.inputs.Number(minimum=1, value=1, precision=0, label="Number of Images per Prompt", info="The number of images to make with the prompt")
model_precision_type_input = gr.Dropdown(["fp16", "fp32"], value="fp16", label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results")
output_image_name_input = gr.Textbox(label="Output Image Name", info="The name of the file of the output image skin, keep the .png", value="output-skin.png")
verbose_input = gr.Checkbox(label="Verbose Output", info="Produce more detailed output while running", value=False)
examples = [
[
"A man in a purple suit wearing a tophat.",
"xl",
25,
7.5,
1,
"fp16",
42,
"output.png",
False
]
]
# Create the Gradio interface
gr.Interface(
fn=run_inference,
inputs=[
prompt_input,
stable_diffusion_model_input,
num_inference_steps_input,
guidance_scale_input,
num_images_per_prompt_input,
model_precision_type_input,
output_image_name_input,
verbose_input
],
outputs=gr.outputs.Image(label="Generated Image"),
title="Minecraft Skin Generator",
description="Make AI generated Minecraft Skins by a Finetuned Stable Diffusion Version!<br>Model used: https://github.com/Nick088Official/Stable_Diffusion_Finetuned_Minecraft_Skin_Generator<br>Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)",
examples=examples,
).launch(show_api=False, share=True)
# return os.path.join(f"output_minecraft_skins/{output_image_name}")
|