salomonsky commited on
Commit
219d097
1 Parent(s): e3be785

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -13
app.py CHANGED
@@ -13,31 +13,33 @@ from huggingface_hub import login
13
  from gradio_imageslider import ImageSlider
14
 
15
 
 
16
  translator = Translator()
17
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
18
  basemodel = "black-forest-labs/FLUX.1-schnell"
19
  MAX_SEED = np.iinfo(np.int32).max
20
- CSS = "footer { visibility: hidden; }"
21
- JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
22
 
23
 
 
24
  def enable_lora(lora_add):
25
  return basemodel if not lora_add else lora_add
26
 
27
 
 
28
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
29
- if seed == -1:
30
- seed = random.randint(0, MAX_SEED)
31
- seed = int(seed)
32
- text = str(translator.translate(prompt, 'English')) + "," + lora_word
33
- client = AsyncInferenceClient()
34
- try:
35
  image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
 
36
  except Exception as e:
37
- raise gr.Error(f"Error in {e}")
38
- return image, seed
39
 
40
 
 
41
  async def gen(prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale):
42
  model = enable_lora(lora_add)
43
  image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
@@ -52,12 +54,14 @@ async def gen(prompt, lora_add, lora_word, width, height, scales, steps, seed, u
52
  return [image_path, upscale_image]
53
 
54
 
 
55
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
56
  client = Client("finegrain/finegrain-image-enhancer")
57
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
58
  return result[1]
59
 
60
 
 
61
  css = """
62
  #col-container{
63
  margin: 0 auto;
@@ -65,7 +69,6 @@ css = """
65
  }
66
  """
67
 
68
-
69
  with gr.Blocks(css=css) as demo:
70
  with gr.Column(elem_id="col-container"):
71
  gr.Markdown("# Flux Upscaled")
@@ -94,4 +97,8 @@ with gr.Blocks(css=css) as demo:
94
  fn=gen,
95
  inputs=[prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale],
96
  outputs=[output_res]
97
- )
 
 
 
 
 
13
  from gradio_imageslider import ImageSlider
14
 
15
 
16
+ # Configuración inicial
17
  translator = Translator()
18
+ HF_TOKEN = os.environ.get("HF_TOKEN")
19
  basemodel = "black-forest-labs/FLUX.1-schnell"
20
  MAX_SEED = np.iinfo(np.int32).max
 
 
21
 
22
 
23
+ # Función para habilitar LoRA
24
  def enable_lora(lora_add):
25
  return basemodel if not lora_add else lora_add
26
 
27
 
28
+ # Función asíncrona para generar imágenes
29
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
30
+ try:
31
+ if seed == -1:
32
+ seed = random.randint(0, MAX_SEED)
33
+ seed = int(seed)
34
+ text = str(translator.translate(prompt, 'English')) + "," + lora_word
35
+ client = AsyncInferenceClient()
36
  image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
37
+ return image, seed
38
  except Exception as e:
39
+ raise gr.Error(f"Error en {e}")
 
40
 
41
 
42
+ # Función asíncrona para generar imágenes y aplicar upscale
43
  async def gen(prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale):
44
  model = enable_lora(lora_add)
45
  image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
 
54
  return [image_path, upscale_image]
55
 
56
 
57
+ # Función para aplicar upscale con Finegrain
58
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
59
  client = Client("finegrain/finegrain-image-enhancer")
60
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
61
  return result[1]
62
 
63
 
64
+ # Configuración de CSS
65
  css = """
66
  #col-container{
67
  margin: 0 auto;
 
69
  }
70
  """
71
 
 
72
  with gr.Blocks(css=css) as demo:
73
  with gr.Column(elem_id="col-container"):
74
  gr.Markdown("# Flux Upscaled")
 
97
  fn=gen,
98
  inputs=[prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale],
99
  outputs=[output_res]
100
+ )
101
+
102
+
103
+ # Iniciar la aplicación
104
+ demo.launch()