Spaces:
Running
on
Zero
Running
on
Zero
Upload app.py
Browse files
app.py
CHANGED
@@ -37,10 +37,10 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
37 |
MAX_IMAGE_SIZE = 1216
|
38 |
|
39 |
css = """
|
40 |
-
#container { margin: 0 auto; }
|
41 |
-
#col-container { margin: 0 auto; }
|
42 |
-
#result { max-width: 520px; max-height: 520px; width: 520px; height: 520px; align: center; }
|
43 |
-
#model-info { text-align: center; }
|
44 |
"""
|
45 |
|
46 |
with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
|
@@ -75,17 +75,16 @@ with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
|
|
75 |
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
|
76 |
|
77 |
with gr.Row():
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
|
82 |
|
83 |
with gr.Row():
|
84 |
sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler a")
|
85 |
vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
|
86 |
|
87 |
-
with gr.
|
88 |
-
with gr.
|
89 |
with gr.Group():
|
90 |
lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
|
91 |
lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
|
@@ -136,9 +135,9 @@ with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
|
|
136 |
recom_prompt = gr.Checkbox(label="Recommended prompt", value=True)
|
137 |
quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None")
|
138 |
style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None")
|
139 |
-
|
140 |
-
with gr.
|
141 |
-
with gr.
|
142 |
chatbot = gr.Chatbot(likeable=False, render_markdown=False, visible=False) # component for auto-translation
|
143 |
chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
|
144 |
chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
|
|
|
37 |
MAX_IMAGE_SIZE = 1216
|
38 |
|
39 |
css = """
|
40 |
+
#container { margin: 0 auto; !important; }
|
41 |
+
#col-container { margin: 0 auto; !important; }
|
42 |
+
#result { display: block; max-width: 520px; max-height: 520px; width: 520px; height: 520px; align: center; !important; }
|
43 |
+
#model-info { text-align: center; !important; }
|
44 |
"""
|
45 |
|
46 |
with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
|
|
|
75 |
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
|
76 |
|
77 |
with gr.Row():
|
78 |
+
model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.", choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0], allow_custom_value=True, interactive=True)
|
79 |
+
model_info = gr.Markdown(elem_id="model-info")
|
80 |
+
model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
|
|
|
81 |
|
82 |
with gr.Row():
|
83 |
sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler a")
|
84 |
vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
|
85 |
|
86 |
+
with gr.Accordion("LoRA", open=True, visible=True):
|
87 |
+
with gr.Row():
|
88 |
with gr.Group():
|
89 |
lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
|
90 |
lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
|
|
|
135 |
recom_prompt = gr.Checkbox(label="Recommended prompt", value=True)
|
136 |
quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None")
|
137 |
style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None")
|
138 |
+
|
139 |
+
with gr.Accordion("Translation Settings", open=False):
|
140 |
+
with gr.Row():
|
141 |
chatbot = gr.Chatbot(likeable=False, render_markdown=False, visible=False) # component for auto-translation
|
142 |
chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
|
143 |
chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
|