salomonsky commited on
Commit
429404e
1 Parent(s): 33c6619

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -57,10 +57,10 @@ def save_prompt(prompt_text, seed):
57
  st.error(f"Error al guardar el prompt: {e}")
58
  return None
59
 
60
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer):
61
  combined_prompt = prompt
62
  if process_enhancer:
63
- improved_prompt = await improve_prompt(prompt)
64
  combined_prompt = f"{prompt} {improved_prompt}"
65
 
66
  if seed == -1:
@@ -92,11 +92,12 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
92
  progress_bar.progress(100)
93
  return [str(image_path), str(prompt_file_path)]
94
 
95
- async def improve_prompt(prompt):
96
  try:
97
  instruction_en = "With this idea, describe in English a detailed txt2img prompt in 500 characters at most, add illumination, atmosphere, cinematic elements, and characters..."
98
  instruction_es = "Con esta idea, describe en español un prompt detallado de txt2img en un máximo de 500 caracteres, añadiendo iluminación, atmósfera, elementos cinematográficos y personajes..."
99
- formatted_prompt = f"{prompt}: {instruction_en} {instruction_es}"
 
100
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=300)
101
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
102
  return improved_text[:300] if len(improved_text) > 300 else improved_text
@@ -145,6 +146,7 @@ def main():
145
  scales = st.sidebar.slider("Escalado", 1, 20, 10)
146
  steps = st.sidebar.slider("Pasos", 1, 100, 20)
147
  seed = st.sidebar.number_input("Semilla", value=-1)
 
148
 
149
  if format_option == "9:16":
150
  width = 720
@@ -155,7 +157,7 @@ def main():
155
 
156
  if st.sidebar.button("Generar Imagen"):
157
  with st.spinner("Mejorando y generando imagen..."):
158
- result = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer))
159
  image_paths = result[0]
160
  prompt_file = result[1]
161
 
@@ -197,4 +199,4 @@ def main():
197
  st.error(f"Error al borrar la imagen o prompt: {e}")
198
 
199
  if __name__ == "__main__":
200
- main()
 
57
  st.error(f"Error al guardar el prompt: {e}")
58
  return None
59
 
60
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language):
61
  combined_prompt = prompt
62
  if process_enhancer:
63
+ improved_prompt = await improve_prompt(prompt, language)
64
  combined_prompt = f"{prompt} {improved_prompt}"
65
 
66
  if seed == -1:
 
92
  progress_bar.progress(100)
93
  return [str(image_path), str(prompt_file_path)]
94
 
95
+ async def improve_prompt(prompt, language):
96
  try:
97
  instruction_en = "With this idea, describe in English a detailed txt2img prompt in 500 characters at most, add illumination, atmosphere, cinematic elements, and characters..."
98
  instruction_es = "Con esta idea, describe en español un prompt detallado de txt2img en un máximo de 500 caracteres, añadiendo iluminación, atmósfera, elementos cinematográficos y personajes..."
99
+ instruction = instruction_en if language == "en" else instruction_es
100
+ formatted_prompt = f"{prompt}: {instruction}"
101
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=300)
102
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
103
  return improved_text[:300] if len(improved_text) > 300 else improved_text
 
146
  scales = st.sidebar.slider("Escalado", 1, 20, 10)
147
  steps = st.sidebar.slider("Pasos", 1, 100, 20)
148
  seed = st.sidebar.number_input("Semilla", value=-1)
149
+ language = st.sidebar.selectbox("Idioma", ["es", "en"])
150
 
151
  if format_option == "9:16":
152
  width = 720
 
157
 
158
  if st.sidebar.button("Generar Imagen"):
159
  with st.spinner("Mejorando y generando imagen..."):
160
+ result = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language))
161
  image_paths = result[0]
162
  prompt_file = result[1]
163
 
 
199
  st.error(f"Error al borrar la imagen o prompt: {e}")
200
 
201
  if __name__ == "__main__":
202
+ main()