salomonsky commited on
Commit
d95dbe9
1 Parent(s): 0e11554

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -35
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import os
2
- import torch
3
  import gradio as gr
4
  import numpy as np
5
  import random
@@ -12,20 +11,20 @@ from PIL import Image
12
  from gradio_client import Client, handle_file
13
  from huggingface_hub import login
14
  from gradio_imageslider import ImageSlider
15
- from gfpgan.utils import GFPGANer
16
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
  HF_TOKEN = os.environ.get("HF_TOKEN")
19
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
20
 
21
- if not os.path.exists('GFPGANv1.4.pth'):
22
- os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .")
23
 
24
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
25
- model_path = 'GFPGANv1.4.pth'
26
- gfpgan = GFPGANer(model_path=model_path, upscale_factor=4, arch='clean', channel_multiplier=2, model_name='GPFGAN', device=device)
 
27
 
28
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
 
29
  try:
30
  if seed == -1:
31
  seed = random.randint(0, MAX_SEED)
@@ -35,27 +34,23 @@ async def generate_image(prompt, model, lora_word, width, height, scales, steps,
35
  image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
36
  return image, seed
37
  except Exception as e:
38
- print(f"Error generating image: {e}")
39
  return None, None
40
 
41
- def get_upscale_gfpgan(prompt, img_path):
42
- try:
43
- img = gfpgan.enhance(img_path)
44
- return img
45
- except Exception as e:
46
- print(f"Error upscale image: {e}")
47
- return None
48
 
49
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
 
50
  try:
51
  client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
52
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
53
  return result[1]
54
  except Exception as e:
55
- print(f"Error upscale image: {e}")
56
  return None
57
 
58
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, upscale_model):
 
 
59
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
60
  image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
61
  if image is None:
@@ -65,20 +60,23 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
65
  image.save(image_path, format="JPEG")
66
 
67
  if process_upscale:
68
- if upscale_model == "GPFGAN":
69
- upscale_image = get_upscale_gfpgan(prompt, image_path)
70
- elif upscale_model == "Finegrain":
71
- upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
72
- upscale_image_path = "upscale_image.jpg"
73
- upscale_image.save(upscale_image_path, format="JPEG")
74
- return [image_path, upscale_image_path]
 
75
  else:
76
  return [image_path, image_path]
77
 
 
78
  css = """
79
  #col-container{ margin: 0 auto; max-width: 1024px;}
80
  """
81
 
 
82
  with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
83
  with gr.Column(elem_id="col-container"):
84
  with gr.Row():
@@ -91,20 +89,14 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
91
  process_lora = gr.Checkbox(label="Procesar LORA")
92
  process_upscale = gr.Checkbox(label="Procesar Escalador")
93
  upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
94
- upscale_model = gr.Radio(label="Modelo de Escalado", choices=["GPFGAN", "Finegrain"], value="GPFGAN")
95
 
96
  with gr.Accordion(label="Opciones Avanzadas", open=False):
97
- width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=512)
98
- height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=512)
99
  scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
100
  steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
101
  seed = gr.Number(label="Semilla", value=-1)
102
-
103
  btn = gr.Button("Generar")
104
- btn.click(
105
- fn=gen,
106
- inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora, upscale_model,],
107
- outputs=output_res,
108
- )
109
-
110
- demo.launch()
 
1
  import os
 
2
  import gradio as gr
3
  import numpy as np
4
  import random
 
11
  from gradio_client import Client, handle_file
12
  from huggingface_hub import login
13
  from gradio_imageslider import ImageSlider
14
+
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  HF_TOKEN = os.environ.get("HF_TOKEN")
18
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
19
 
 
 
20
 
21
+ def enable_lora(lora_add, basemodel):
22
+ """Habilita o deshabilita LoRA según la opción seleccionada"""
23
+ return basemodel if not lora_add else lora_add
24
+
25
 
26
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
27
+ """Genera una imagen utilizando el modelo seleccionado"""
28
  try:
29
  if seed == -1:
30
  seed = random.randint(0, MAX_SEED)
 
34
  image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
35
  return image, seed
36
  except Exception as e:
37
+ print(f"Error generando imagen: {e}")
38
  return None, None
39
 
 
 
 
 
 
 
 
40
 
41
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
42
+ """Escala una imagen utilizando FineGrain"""
43
  try:
44
  client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
45
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
46
  return result[1]
47
  except Exception as e:
48
+ print(f"Error escalando imagen: {e}")
49
  return None
50
 
51
+
52
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
53
+ """Función principal que genera y escala la imagen"""
54
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
55
  image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
56
  if image is None:
 
60
  image.save(image_path, format="JPEG")
61
 
62
  if process_upscale:
63
+ upscale_image_path = get_upscale_finegrain(prompt, image_path, upscale_factor)
64
+ if upscale_image_path is not None:
65
+ upscale_image = Image.open(upscale_image_path)
66
+ upscale_image.save("upscale_image.jpg", format="JPEG")
67
+ return [image_path, "upscale_image.jpg"]
68
+ else:
69
+ print("Error: La ruta de la imagen escalada es None")
70
+ return [image_path, image_path]
71
  else:
72
  return [image_path, image_path]
73
 
74
+
75
  css = """
76
  #col-container{ margin: 0 auto; max-width: 1024px;}
77
  """
78
 
79
+
80
  with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
81
  with gr.Column(elem_id="col-container"):
82
  with gr.Row():
 
89
  process_lora = gr.Checkbox(label="Procesar LORA")
90
  process_upscale = gr.Checkbox(label="Procesar Escalador")
91
  upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
 
92
 
93
  with gr.Accordion(label="Opciones Avanzadas", open=False):
94
+ width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
95
+ height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
96
  scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
97
  steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
98
  seed = gr.Number(label="Semilla", value=-1)
99
+
100
  btn = gr.Button("Generar")
101
+ btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res,)
102
+ demo.launch()