using config for device selection
Browse files- app.py +4 -3
- appConfig.json +3 -0
app.py
CHANGED
@@ -52,6 +52,7 @@ model_configs = appConfig.get("models", {})
|
|
52 |
models = list(model_configs.keys())
|
53 |
scheduler_configs = appConfig.get("schedulers", {})
|
54 |
schedulers = list(scheduler_configs.keys())
|
|
|
55 |
|
56 |
device = None
|
57 |
variant = None
|
@@ -266,7 +267,7 @@ with gr.Blocks() as demo:
|
|
266 |
gr.Markdown("## Image Generation")
|
267 |
gr.Markdown("### Device specific settings")
|
268 |
with gr.Row():
|
269 |
-
|
270 |
rg_data_type = gr.Radio(label="Data Type:", value=data_type, choices=["bfloat16", "float16"], info="`blfoat16` is not supported on MPS devices right now; Half-precision weights, will save GPU memory, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16")
|
271 |
rg_allow_tensorfloat32 = gr.Radio(label="Allow TensorFloat32:", value=allow_tensorfloat32, choices=[True, False], info="is not supported on MPS devices right now; use TensorFloat-32 is faster, but results in slightly less accurate computations, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
|
272 |
rg_variant = gr.Radio(label="Variant:", value=variant, choices=["fp16", None], info="Use half-precision weights will save GPU memory, not all models support that, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
|
@@ -307,7 +308,7 @@ with gr.Blocks() as demo:
|
|
307 |
el_result = gr.Image()
|
308 |
txt_code = gr.Code(get_sorted_code(), label="Code")
|
309 |
|
310 |
-
|
311 |
rg_data_type.change(data_type_change, inputs=[rg_data_type], outputs=[txt_code])
|
312 |
rg_allow_tensorfloat32.change(tensorfloat32_change, inputs=[rg_allow_tensorfloat32], outputs=[txt_code])
|
313 |
rg_variant.change(variant_change, inputs=[rg_variant], outputs=[txt_code])
|
@@ -317,7 +318,7 @@ with gr.Blocks() as demo:
|
|
317 |
dd_schedulers.change(schedulers_change, inputs=[dd_schedulers], outputs=[txt_code, txt_scheduler])
|
318 |
btn_start_pipeline.click(start_pipeline, inputs=[
|
319 |
dd_models,
|
320 |
-
|
321 |
rg_use_safetensors,
|
322 |
rg_data_type,
|
323 |
rg_variant,
|
|
|
52 |
models = list(model_configs.keys())
|
53 |
scheduler_configs = appConfig.get("schedulers", {})
|
54 |
schedulers = list(scheduler_configs.keys())
|
55 |
+
devices = appConfig.get("devices", [])
|
56 |
|
57 |
device = None
|
58 |
variant = None
|
|
|
267 |
gr.Markdown("## Image Generation")
|
268 |
gr.Markdown("### Device specific settings")
|
269 |
with gr.Row():
|
270 |
+
el_devices = gr.Dropdown(label="Device:", value=device, choices=devices, filterable=True, multiselect=False, allow_custom_value=True)
|
271 |
rg_data_type = gr.Radio(label="Data Type:", value=data_type, choices=["bfloat16", "float16"], info="`blfoat16` is not supported on MPS devices right now; Half-precision weights, will save GPU memory, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16")
|
272 |
rg_allow_tensorfloat32 = gr.Radio(label="Allow TensorFloat32:", value=allow_tensorfloat32, choices=[True, False], info="is not supported on MPS devices right now; use TensorFloat-32 is faster, but results in slightly less accurate computations, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
|
273 |
rg_variant = gr.Radio(label="Variant:", value=variant, choices=["fp16", None], info="Use half-precision weights will save GPU memory, not all models support that, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
|
|
|
308 |
el_result = gr.Image()
|
309 |
txt_code = gr.Code(get_sorted_code(), label="Code")
|
310 |
|
311 |
+
el_devices.change(device_change, inputs=[el_devices], outputs=[txt_code])
|
312 |
rg_data_type.change(data_type_change, inputs=[rg_data_type], outputs=[txt_code])
|
313 |
rg_allow_tensorfloat32.change(tensorfloat32_change, inputs=[rg_allow_tensorfloat32], outputs=[txt_code])
|
314 |
rg_variant.change(variant_change, inputs=[rg_variant], outputs=[txt_code])
|
|
|
318 |
dd_schedulers.change(schedulers_change, inputs=[dd_schedulers], outputs=[txt_code, txt_scheduler])
|
319 |
btn_start_pipeline.click(start_pipeline, inputs=[
|
320 |
dd_models,
|
321 |
+
el_devices,
|
322 |
rg_use_safetensors,
|
323 |
rg_data_type,
|
324 |
rg_variant,
|
appConfig.json
CHANGED
@@ -11,6 +11,9 @@
|
|
11 |
}
|
12 |
|
13 |
},
|
|
|
|
|
|
|
14 |
"schedulers": {
|
15 |
"DDPMScheduler": "This is DDPM",
|
16 |
"DDIMScheduler": "This is DDIM",
|
|
|
11 |
}
|
12 |
|
13 |
},
|
14 |
+
"devices": [
|
15 |
+
"cpu", "cuda", "ipu", "xpu", "mkldnn", "opengl", "opencl", "ideep", "hip", "ve", "fpga", "ort", "xla", "lazy", "vulkan", "mps", "meta", "hpu", "mtia", "privateuseone", "gpu"
|
16 |
+
],
|
17 |
"schedulers": {
|
18 |
"DDPMScheduler": "This is DDPM",
|
19 |
"DDIMScheduler": "This is DDIM",
|