Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,153 Bytes
1746e54 c7f3034 1746e54 c7f3034 d24b718 4ebdfdd c7f3034 5e2f771 22e2daf b0b4353 ec8f5ef f864746 b83f0ab d8f39da c7f3034 4ebdfdd c49e3c0 4ebdfdd 1859591 4ebdfdd 248fb01 4ebdfdd c7f3034 4ebdfdd c7f3034 4ebdfdd d24b718 4ebdfdd 67b2d6d 4ebdfdd b83f0ab 4ebdfdd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import subprocess
import os
import gradio as gr
import torch
if torch.cuda.is_available():
device = "cuda"
print("Using GPU")
else:
device = "cpu"
print("Using CPU")
subprocess.run(["git", "clone", "https://github.com/Nick088Official/Stable_Diffusion_Finetuned_Minecraft_Skin_Generator.git"])
os.chdir("Stable_Diffusion_Finetuned_Minecraft_Skin_Generator")
def run_inference(prompt, stable_diffusion_model, num_inference_steps, guidance_scale, num_images_per_prompt, model_precision_type, seed, output_image_name, verbose):
if stable_diffusion_model == '2':
sd_model = "minecraft-skins"
else:
sd_model = "minecraft-skins-sdxl"
command = f"python Python_Scripts/{sd_model}.py '{prompt}' {num_inference_steps} {guidance_scale} {num_images_per_prompt} {model_precision_type} {seed} {output_image_name} {'--verbose' if verbose else ''}"
os.system(command)
return os.path.join(f"output_minecraft_skins/{output_image_name}")
# Define Gradio UI components
prompt_input = gr.Textbox(label="Your Prompt", info="What the Minecraft Skin should look like")
stable_diffusion_model_input = gr.Dropdown(['2', 'xl'], value="xl", label="Stable Diffusion Model", info="Choose which Stable Diffusion Model to use, xl understands prompts better")
num_inference_steps_input = gr.Number(label="Number of Inference Steps", precision=0, value=25)
guidance_scale_input = gr.Number(minimum=0.1, value=7.5, label="Guidance Scale", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference")
num_images_per_prompt_input = gr.Number(minimum=1, value=1, precision=0, label="Number of Images per Prompt", info="The number of images to make with the prompt")
model_precision_type_input = gr.Dropdown(["fp16", "fp32"], value="fp16", label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results")
seed_input = gr.Number(value=42, label="Seed", info="A starting point to initiate generation, put 0 for a random one")
output_image_name_input = gr.Textbox(label="Output Image Name", info="The name of the file of the output image skin, keep the .png", value="output-skin.png")
verbose_input = gr.Checkbox(label="Verbose Output", info="Produce more detailed output while running", value=False)
# Create the Gradio interface
gr.Interface(
fn=run_inference,
inputs=[
prompt_input,
stable_diffusion_model_input,
num_inference_steps_input,
guidance_scale_input,
num_images_per_prompt_input,
model_precision_type_input,
seed_input,
output_image_name_input,
verbose_input
],
outputs=gr.Image(label="Generated Minecraft Skin Image Asset"),
title="Minecraft Skin Generator",
description="Make AI generated Minecraft Skins by a Finetuned Stable Diffusion Version!<br>Model used: https://github.com/Nick088Official/Stable_Diffusion_Finetuned_Minecraft_Skin_Generator<br>Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)",
).launch(show_api=False, share=True)
|