salomonsky commited on
Commit
980ffaa
1 Parent(s): a9f1e60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -20
app.py CHANGED
@@ -10,7 +10,8 @@ import asyncio
10
  from concurrent.futures import ThreadPoolExecutor
11
 
12
  MAX_SEED = np.iinfo(np.int32).max
13
- HF_TOKEN = os.environ.get("HF_TOKEN_UPSCALER")
 
14
  client = AsyncInferenceClient()
15
  llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
  DATA_PATH = Path("./data")
@@ -23,9 +24,6 @@ def run_async(func):
23
  result = loop.run_in_executor(executor, func)
24
  return loop.run_until_complete(result)
25
 
26
- def enable_lora(lora_add, basemodel):
27
- return lora_add if lora_add else basemodel
28
-
29
  async def generate_image(combined_prompt, model, width, height, scales, steps, seed):
30
  try:
31
  if seed == -1:
@@ -59,10 +57,8 @@ def save_prompt(prompt_text, seed):
59
  st.error(f"Error al guardar el prompt: {e}")
60
  return None
61
 
62
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, process_enhancer):
63
- model = enable_lora(lora_model, basemodel) if process_lora else basemodel
64
  combined_prompt = prompt
65
-
66
  if process_enhancer:
67
  improved_prompt = await improve_prompt(prompt)
68
  combined_prompt = f"{prompt} {improved_prompt}"
@@ -71,7 +67,7 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
71
  seed = random.randint(0, MAX_SEED)
72
  seed = int(seed)
73
  progress_bar = st.progress(0)
74
- image, seed = await generate_image(combined_prompt, model, width, height, scales, steps, seed)
75
  progress_bar.progress(50)
76
 
77
  if isinstance(image, str) and image.startswith("Error"):
@@ -98,11 +94,12 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
98
 
99
  async def improve_prompt(prompt):
100
  try:
101
- instruction = ("With this idea, describe in English a detailed txt2img prompt in 500 characters at most, add ilumination, admosphere, cinematic and characters...")
102
- formatted_prompt = f"{prompt}: {instruction}"
103
- response = llm_client.text_generation(formatted_prompt, max_new_tokens=500)
 
104
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
105
- return improved_text[:500] if len(improved_text) > 500 else improved_text
106
  except Exception as e:
107
  return f"Error mejorando el prompt: {e}"
108
 
@@ -137,15 +134,13 @@ def delete_image(image_path):
137
 
138
  def main():
139
  st.set_page_config(layout="wide")
140
- st.title("FLUX +prompt/enhancer +upscaler +LORA")
141
-
142
- prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=500)
143
- process_enhancer = st.sidebar.checkbox("Mejorar Prompt", value=False)
144
  basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
145
- lora_model = st.sidebar.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
146
  format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9"])
147
- process_lora = st.sidebar.checkbox("Procesar LORA", value=False)
148
- process_upscale = st.sidebar.checkbox("Procesar Escalador", value=False)
149
  upscale_factor = st.sidebar.selectbox("Factor de Escala", [2, 4, 8], index=0)
150
  scales = st.sidebar.slider("Escalado", 1, 20, 10)
151
  steps = st.sidebar.slider("Pasos", 1, 100, 20)
@@ -160,7 +155,7 @@ def main():
160
 
161
  if st.sidebar.button("Generar Imagen"):
162
  with st.spinner("Mejorando y generando imagen..."):
163
- result = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, process_enhancer))
164
  image_paths = result[0]
165
  prompt_file = result[1]
166
 
 
10
  from concurrent.futures import ThreadPoolExecutor
11
 
12
  MAX_SEED = np.iinfo(np.int32).max
13
+ HF_TOKEN = os.environ.get("HF_TOKEN")
14
+ HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
15
  client = AsyncInferenceClient()
16
  llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
17
  DATA_PATH = Path("./data")
 
24
  result = loop.run_in_executor(executor, func)
25
  return loop.run_until_complete(result)
26
 
 
 
 
27
  async def generate_image(combined_prompt, model, width, height, scales, steps, seed):
28
  try:
29
  if seed == -1:
 
57
  st.error(f"Error al guardar el prompt: {e}")
58
  return None
59
 
60
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer):
 
61
  combined_prompt = prompt
 
62
  if process_enhancer:
63
  improved_prompt = await improve_prompt(prompt)
64
  combined_prompt = f"{prompt} {improved_prompt}"
 
67
  seed = random.randint(0, MAX_SEED)
68
  seed = int(seed)
69
  progress_bar = st.progress(0)
70
+ image, seed = await generate_image(combined_prompt, basemodel, width, height, scales, steps, seed)
71
  progress_bar.progress(50)
72
 
73
  if isinstance(image, str) and image.startswith("Error"):
 
94
 
95
  async def improve_prompt(prompt):
96
  try:
97
+ instruction_en = "With this idea, describe in English a detailed txt2img prompt in 500 characters at most, add illumination, atmosphere, cinematic elements, and characters..."
98
+ instruction_es = "Con esta idea, describe en español un prompt detallado de txt2img en un máximo de 500 caracteres, añadiendo iluminación, atmósfera, elementos cinematográficos y personajes..."
99
+ formatted_prompt = f"{prompt}: {instruction_en} {instruction_es}"
100
+ response = llm_client.text_generation(formatted_prompt, max_new_tokens=300)
101
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
102
+ return improved_text[:300] if len(improved_text) > 300 else improved_text
103
  except Exception as e:
104
  return f"Error mejorando el prompt: {e}"
105
 
 
134
 
135
  def main():
136
  st.set_page_config(layout="wide")
137
+ st.title("FLUX with prompt enhancer and upscaler")
138
+
139
+ prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=200)
140
+ process_enhancer = st.sidebar.checkbox("Mejorar Prompt", value=True)
141
  basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
 
142
  format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9"])
143
+ process_upscale = st.sidebar.checkbox("Procesar Escalador", value=True)
 
144
  upscale_factor = st.sidebar.selectbox("Factor de Escala", [2, 4, 8], index=0)
145
  scales = st.sidebar.slider("Escalado", 1, 20, 10)
146
  steps = st.sidebar.slider("Pasos", 1, 100, 20)
 
155
 
156
  if st.sidebar.button("Generar Imagen"):
157
  with st.spinner("Mejorando y generando imagen..."):
158
+ result = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer))
159
  image_paths = result[0]
160
  prompt_file = result[1]
161