salomonsky commited on
Commit
9a11c4c
1 Parent(s): e3c6864

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -51
app.py CHANGED
@@ -6,7 +6,6 @@ from huggingface_hub import AsyncInferenceClient, InferenceClient
6
  from PIL import Image
7
  from gradio_client import Client, handle_file
8
  from gradio_imageslider import ImageSlider
9
- import asyncio
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
  HF_TOKEN = os.environ.get("HF_TOKEN")
@@ -38,40 +37,37 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
38
  return None
39
 
40
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
41
- try:
42
- model = enable_lora(lora_model, basemodel) if process_lora else basemodel
43
-
44
- improved_prompt = await improve_prompt(prompt)
45
- combined_prompt = f"{prompt} {improved_prompt}"
46
 
47
- image, seed = await generate_image(combined_prompt, model, "", width, height, scales, steps, seed)
48
-
49
- if isinstance(image, str) and image.startswith("Error"):
50
- return [image, None, "", "No generando"]
51
-
52
- image_path = "temp_image.jpg"
53
- image.save(image_path, format="JPEG")
54
-
55
- if process_upscale:
56
- upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
57
- if upscale_image_path is not None:
58
- upscale_image = Image.open(upscale_image_path)
59
- upscale_image.save("upscale_image.jpg", format="JPEG")
60
- return [image_path, "upscale_image.jpg", "", "Generando..."]
61
- else:
62
- return [image_path, image_path, "", "Generando..."]
63
  else:
64
- return [image_path, image_path, "", "Generando..."]
65
- except Exception as e:
66
- return [f"Error: {e}", None, "", "No generando"]
67
 
68
  async def improve_prompt(prompt):
69
  try:
70
- instruction = "improve this idea and describe in English a detailed img2vid prompt in a single paragraph of up to 200 characters, developing atmosphere, characters, lighting, and cameras."
71
- formatted_prompt = f"{prompt}: {instruction}"
72
- response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
73
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
74
-
75
  return improved_text
76
  except Exception as e:
77
  return f"Error mejorando el prompt: {e}"
@@ -80,10 +76,6 @@ css = """
80
  #col-container{ margin: 0 auto; max-width: 1024px;}
81
  """
82
 
83
- def improve_prompt_wrapper(prompt):
84
- improved_text = asyncio.run(improve_prompt(prompt))
85
- return prompt, improved_text
86
-
87
  with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
88
  with gr.Column(elem_id="col-container"):
89
  with gr.Row():
@@ -98,25 +90,20 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
98
  process_lora = gr.Checkbox(label="Procesar LORA")
99
  process_upscale = gr.Checkbox(label="Procesar Escalador")
100
 
 
 
101
  improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
102
- improve_btn = gr.Button("Mejora mi prompt")
103
 
104
- improve_btn.click(fn=improve_prompt_wrapper, inputs=[prompt], outputs=[prompt, improved_prompt])
 
105
 
106
- generating_label = gr.Label(label="No generando")
 
 
 
 
 
 
107
  btn = gr.Button("Generar")
108
-
109
- width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
110
- height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
111
- upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
112
- scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
113
- steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
114
- seed = gr.Number(label="Semilla", value=-1)
115
-
116
- btn.click(
117
- fn=gen,
118
- inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
119
- outputs=[output_res, prompt, generating_label]
120
- )
121
-
122
- demo.launch()
 
6
  from PIL import Image
7
  from gradio_client import Client, handle_file
8
  from gradio_imageslider import ImageSlider
 
9
 
10
  MAX_SEED = np.iinfo(np.int32).max
11
  HF_TOKEN = os.environ.get("HF_TOKEN")
 
37
  return None
38
 
39
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
40
+ model = enable_lora(lora_model, basemodel) if process_lora else basemodel
41
+
42
+ improved_prompt = await improve_prompt(prompt)
43
+ combined_prompt = f"{prompt} {improved_prompt}"
 
44
 
45
+ image, seed = await generate_image(combined_prompt, model, "", width, height, scales, steps, seed)
46
+
47
+ if isinstance(image, str) and image.startswith("Error"):
48
+ return [image, None]
49
+
50
+ image_path = "temp_image.jpg"
51
+ image.save(image_path, format="JPEG")
52
+
53
+ if process_upscale:
54
+ upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
55
+ if upscale_image_path is not None:
56
+ upscale_image = Image.open(upscale_image_path)
57
+ upscale_image.save("upscale_image.jpg", format="JPEG")
58
+ return [image_path, "upscale_image.jpg"]
 
 
59
  else:
60
+ return [image_path, image_path]
61
+ else:
62
+ return [image_path, image_path]
63
 
64
  async def improve_prompt(prompt):
65
  try:
66
+ instruction = "Improve and translate this prompt into English, adding detailed descriptions of style, cinematography, cameras, atmosphere, and lighting for the best quality, up to 200 words."
67
+ formatted_prompt = f"{instruction}: {prompt}"
68
+ response = llm_client.text_generation(formatted_prompt, max_new_tokens=300) # Allowing more tokens for detailed description
69
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
70
+
71
  return improved_text
72
  except Exception as e:
73
  return f"Error mejorando el prompt: {e}"
 
76
  #col-container{ margin: 0 auto; max-width: 1024px;}
77
  """
78
 
 
 
 
 
79
  with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
80
  with gr.Column(elem_id="col-container"):
81
  with gr.Row():
 
90
  process_lora = gr.Checkbox(label="Procesar LORA")
91
  process_upscale = gr.Checkbox(label="Procesar Escalador")
92
 
93
+ upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
94
+
95
  improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
 
96
 
97
+ improve_btn = gr.Button("Mejora mi prompt")
98
+ improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt)
99
 
100
+ with gr.Accordion(label="Opciones Avanzadas", open=False):
101
+ width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
102
+ height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
103
+ scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
104
+ steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
105
+ seed = gr.Number(label="Semilla", value=-1)
106
+
107
  btn = gr.Button("Generar")
108
+ btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res)
109
+ demo.launch()