salomonsky commited on
Commit
cfc9459
1 Parent(s): 69ea3c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -42,7 +42,6 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
42
  except Exception as e:
43
  return None
44
 
45
- # Función para guardar el prompt
46
  def save_prompt(prompt_text, seed):
47
  try:
48
  prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
@@ -53,7 +52,6 @@ def save_prompt(prompt_text, seed):
53
  st.error(f"Error al guardar el prompt: {e}")
54
  return None
55
 
56
- # Función principal de generación de imágenes
57
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
58
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
59
  improved_prompt = await improve_prompt(prompt)
@@ -90,7 +88,7 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
90
 
91
  async def improve_prompt(prompt):
92
  try:
93
- instruction = ("With this idea, describe in English a detailed txt2img prompt...")
94
  formatted_prompt = f"{prompt}: {instruction}"
95
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
96
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
@@ -98,6 +96,15 @@ async def improve_prompt(prompt):
98
  except Exception as e:
99
  return f"Error mejorando el prompt: {e}"
100
 
 
 
 
 
 
 
 
 
 
101
  def get_storage():
102
  files = [{"name": str(file.resolve()), "size": file.stat().st_size} for file in DATA_PATH.glob("*.jpg") if file.is_file()]
103
  usage = sum([f['size'] for f in files])
@@ -123,12 +130,10 @@ def run_gen():
123
  prompt_to_use = st.session_state.get('improved_prompt', prompt)
124
  return loop.run_until_complete(gen(prompt_to_use, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
125
 
126
- # Configuración de la página y sidebar
127
  st.set_page_config(layout="wide")
128
  st.title("Generador de Imágenes FLUX")
129
  prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=200)
130
 
131
- # Opciones avanzadas
132
  with st.sidebar.expander("Opciones avanzadas", expanded=False):
133
  basemodel = st.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
134
  lora_model = st.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
 
42
  except Exception as e:
43
  return None
44
 
 
45
  def save_prompt(prompt_text, seed):
46
  try:
47
  prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
 
52
  st.error(f"Error al guardar el prompt: {e}")
53
  return None
54
 
 
55
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
56
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
57
  improved_prompt = await improve_prompt(prompt)
 
88
 
89
  async def improve_prompt(prompt):
90
  try:
91
+ instruction = ("With this idea, describe in English a detailed txt2img prompt in 200 character at most...")
92
  formatted_prompt = f"{prompt}: {instruction}"
93
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
94
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
 
96
  except Exception as e:
97
  return f"Error mejorando el prompt: {e}"
98
 
99
+ def save_image(image, seed):
100
+ try:
101
+ image_path = DATA_PATH / f"image_{seed}.jpg"
102
+ image.save(image_path, format="JPEG")
103
+ return image_path
104
+ except Exception as e:
105
+ st.error(f"Error al guardar la imagen: {e}")
106
+ return None
107
+
108
  def get_storage():
109
  files = [{"name": str(file.resolve()), "size": file.stat().st_size} for file in DATA_PATH.glob("*.jpg") if file.is_file()]
110
  usage = sum([f['size'] for f in files])
 
130
  prompt_to_use = st.session_state.get('improved_prompt', prompt)
131
  return loop.run_until_complete(gen(prompt_to_use, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
132
 
 
133
  st.set_page_config(layout="wide")
134
  st.title("Generador de Imágenes FLUX")
135
  prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=200)
136
 
 
137
  with st.sidebar.expander("Opciones avanzadas", expanded=False):
138
  basemodel = st.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
139
  lora_model = st.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])