salomonsky commited on
Commit
7bf5a19
1 Parent(s): 3a060b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -76
app.py CHANGED
@@ -9,13 +9,16 @@ from gradio_client import Client, handle_file
9
  import asyncio
10
  from concurrent.futures import ThreadPoolExecutor
11
  import yaml
 
 
12
 
 
13
  try:
14
  with open("config.yaml", "r") as file:
15
  credentials = yaml.safe_load(file)
16
  except Exception as e:
17
  st.error(f"Error al cargar el archivo de configuración: {e}")
18
- credentials = {"username": "", "password": ""}
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
@@ -32,10 +35,8 @@ def run_async(func):
32
  return loop.run_until_complete(result)
33
 
34
  async def generate_image(combined_prompt, model, width, height, scales, steps, seed):
 
35
  try:
36
- if seed == -1:
37
- seed = random.randint(0, MAX_SEED)
38
- seed = int(seed)
39
  image = await client.text_to_image(
40
  prompt=combined_prompt, height=height, width=width, guidance_scale=scales,
41
  num_inference_steps=steps, model=model
@@ -51,7 +52,7 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
51
  input_image=handle_file(img_path), prompt=prompt, upscale_factor=upscale_factor
52
  )
53
  return result[1] if isinstance(result, list) and len(result) > 1 else None
54
- except Exception as e:
55
  return None
56
 
57
  def save_prompt(prompt_text, seed):
@@ -65,15 +66,10 @@ def save_prompt(prompt_text, seed):
65
  return None
66
 
67
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language):
68
- combined_prompt = prompt
69
- if process_enhancer:
70
- improved_prompt = await improve_prompt(prompt, language)
71
- combined_prompt = f"{prompt} {improved_prompt}"
72
-
73
- if seed == -1:
74
- seed = random.randint(0, MAX_SEED)
75
- seed = int(seed)
76
  progress_bar = st.progress(0)
 
77
  image, seed = await generate_image(combined_prompt, basemodel, width, height, scales, steps, seed)
78
  progress_bar.progress(50)
79
 
@@ -87,29 +83,26 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
87
  if process_upscale:
88
  upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
89
  if upscale_image_path:
90
- upscale_image = Image.open(upscale_image_path)
91
- upscale_image.save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
92
  progress_bar.progress(100)
93
- image_path.unlink()
94
  return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
95
- else:
96
- progress_bar.empty()
97
- return [str(image_path), str(prompt_file_path)]
98
- else:
99
- progress_bar.progress(100)
100
- return [str(image_path), str(prompt_file_path)]
101
 
102
  async def improve_prompt(prompt, language):
103
- try:
104
- instruction_en = "With this idea, describe in English a detailed txt2img prompt in 500 characters at most, add illumination, atmosphere, cinematic elements, and characters if needed..."
105
- instruction_es = "Con esta idea, describe en español un prompt detallado de txt2img en un máximo de 500 caracteres, con iluminación, atmósfera, elementos cinematográficos y en su caso personajes..."
106
- instruction = instruction_en if language == "en" else instruction_es
107
- formatted_prompt = f"{prompt}: {instruction}"
108
- response = llm_client.text_generation(formatted_prompt, max_new_tokens=500)
109
- improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
110
- return improved_text[:500] if len(improved_text) > 500 else improved_text
111
- except Exception as e:
112
- return f"Error mejorando el prompt: {e}"
 
113
 
114
  def save_image(image, seed):
115
  try:
@@ -123,8 +116,8 @@ def save_image(image, seed):
123
  def get_storage():
124
  files = [file for file in DATA_PATH.glob("*.jpg") if file.is_file()]
125
  files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
126
- usage = sum([file.stat().st_size for file in files])
127
- return [str(file.resolve()) for file in files], f"Uso total: {usage/(1024.0 ** 3):.3f}GB"
128
 
129
  def get_prompts():
130
  prompt_files = [file for file in DATA_PATH.glob("*.txt") if file.is_file()]
@@ -142,8 +135,31 @@ def delete_image(image_path):
142
 
143
  def swap_faces(image_path):
144
  try:
145
- swapped_image_path = image_path # Simulación del swap; aquí debes implementar la lógica real.
146
- return swapped_image_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  except Exception as e:
148
  st.error(f"Error en el face swap: {e}")
149
  return None
@@ -161,54 +177,35 @@ def main():
161
  steps = st.sidebar.slider("Pasos", 1, 100, 20)
162
  seed = st.sidebar.number_input("Semilla", value=-1)
163
 
164
- if format_option == "9:16":
165
- width = 720
166
- height = 1280
167
- else:
168
- width = 1280
169
- height = 720
170
 
171
  if st.sidebar.button("Generar Imagen"):
172
  with st.spinner("Mejorando y generando imagen..."):
173
  result = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language))
174
- image_paths = result[0]
175
- prompt_file = result[1]
176
 
177
  st.write(f"Image paths: {image_paths}")
178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  if image_paths:
180
- if Path(image_paths).exists():
181
- st.image(image_paths, caption="Imagen Generada")
182
- else:
183
- st.error("El archivo de imagen no existe.")
184
-
185
- if prompt_file and Path(prompt_file).exists():
186
- prompt_text = Path(prompt_file).read_text()
187
- st.write(f"Prompt utilizado: {prompt_text}")
188
- else:
189
- st.write("El archivo del prompt no está disponible.")
190
-
191
- files, usage = get_storage()
192
- st.text(usage)
193
- cols = st.columns(6)
194
- prompts = get_prompts()
195
-
196
- for idx, file in enumerate(files):
197
- with cols[idx % 6]:
198
- image = Image.open(file)
199
- prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
200
- prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
201
-
202
- st.image(image, caption=f"Imagen {idx+1}")
203
- st.write(f"Prompt: {prompt_text}")
204
-
205
- if st.button(f"Borrar Imagen {idx+1}", key=f"delete_{idx+1}"):
206
- delete_image(file)
207
-
208
- if st.button(f"Swap Face {idx+1}", key=f"swap_{idx+1}"):
209
- swapped_image_path = swap_faces(file)
210
- if swapped_image_path:
211
- st.image(swapped_image_path, caption=f"Imagen {idx+1} con Face Swap")
212
 
213
  if __name__ == "__main__":
214
  main()
 
9
  import asyncio
10
  from concurrent.futures import ThreadPoolExecutor
11
  import yaml
12
+ import cv2
13
+ import dlib
14
 
15
+ # Cargar configuración
16
  try:
17
  with open("config.yaml", "r") as file:
18
  credentials = yaml.safe_load(file)
19
  except Exception as e:
20
  st.error(f"Error al cargar el archivo de configuración: {e}")
21
+ credentials = {"username": "", "password": ""}
22
 
23
  MAX_SEED = np.iinfo(np.int32).max
24
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
 
35
  return loop.run_until_complete(result)
36
 
37
  async def generate_image(combined_prompt, model, width, height, scales, steps, seed):
38
+ seed = int(seed) if seed != -1 else random.randint(0, MAX_SEED)
39
  try:
 
 
 
40
  image = await client.text_to_image(
41
  prompt=combined_prompt, height=height, width=width, guidance_scale=scales,
42
  num_inference_steps=steps, model=model
 
52
  input_image=handle_file(img_path), prompt=prompt, upscale_factor=upscale_factor
53
  )
54
  return result[1] if isinstance(result, list) and len(result) > 1 else None
55
+ except Exception:
56
  return None
57
 
58
  def save_prompt(prompt_text, seed):
 
66
  return None
67
 
68
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language):
69
+ combined_prompt = f"{prompt} {await improve_prompt(prompt, language) if process_enhancer else ''}".strip()
70
+ seed = int(seed) if seed != -1 else random.randint(0, MAX_SEED)
 
 
 
 
 
 
71
  progress_bar = st.progress(0)
72
+
73
  image, seed = await generate_image(combined_prompt, basemodel, width, height, scales, steps, seed)
74
  progress_bar.progress(50)
75
 
 
83
  if process_upscale:
84
  upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
85
  if upscale_image_path:
86
+ Image.open(upscale_image_path).save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
 
87
  progress_bar.progress(100)
88
+ image_path.unlink()
89
  return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
90
+
91
+ progress_bar.progress(100)
92
+ return [str(image_path), str(prompt_file_path)]
 
 
 
93
 
94
  async def improve_prompt(prompt, language):
95
+ instruction = (
96
+ "Con esta idea, describe en español un prompt detallado de txt2img en un máximo de 500 caracteres, "
97
+ "con iluminación, atmósfera, elementos cinematográficos y en su caso personajes..."
98
+ if language == "es" else
99
+ "With this idea, describe in English a detailed txt2img prompt in 500 characters at most, "
100
+ "add illumination, atmosphere, cinematic elements, and characters if needed..."
101
+ )
102
+ formatted_prompt = f"{prompt}: {instruction}"
103
+ response = llm_client.text_generation(formatted_prompt, max_new_tokens=500)
104
+ improved_text = response.get('generated_text', '').strip() if 'generated_text' in response else response.strip()
105
+ return improved_text[:500] if len(improved_text) > 500 else improved_text
106
 
107
  def save_image(image, seed):
108
  try:
 
116
  def get_storage():
117
  files = [file for file in DATA_PATH.glob("*.jpg") if file.is_file()]
118
  files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
119
+ usage = sum(file.stat().st_size for file in files)
120
+ return [str(file.resolve()) for file in files], f"Uso total: {usage / (1024.0 ** 3):.3f}GB"
121
 
122
  def get_prompts():
123
  prompt_files = [file for file in DATA_PATH.glob("*.txt") if file.is_file()]
 
135
 
136
  def swap_faces(image_path):
137
  try:
138
+ image = cv2.imread(str(image_path))
139
+ detector = dlib.get_frontal_face_detector()
140
+ predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
141
+ faces = detector(image)
142
+
143
+ if len(faces) != 2:
144
+ st.error("Se necesitan exactamente dos caras para realizar el intercambio.")
145
+ return None
146
+
147
+ landmarks1 = predictor(image, faces[0])
148
+ landmarks2 = predictor(image, faces[1])
149
+ points1 = np.array([[p.x, p.y] for p in landmarks1.parts()])
150
+ points2 = np.array([[p.x, p.y] for p in landmarks2.parts()])
151
+
152
+ mask1 = np.zeros(image.shape[:2], dtype=np.uint8)
153
+ cv2.fillConvexPoly(mask1, cv2.convexHull(points1), 255)
154
+ mask2 = np.zeros(image.shape[:2], dtype=np.uint8)
155
+ cv2.fillConvexPoly(mask2, cv2.convexHull(points2), 255)
156
+ face1 = cv2.bitwise_and(image, image, mask=mask1)
157
+ face2 = cv2.bitwise_and(image, image, mask=mask2)
158
+ image[mask1 == 255] = face2[mask1 == 255]
159
+ image[mask2 == 255] = face1[mask2 == 255]
160
+ swapped_image_path = DATA_PATH / f"swapped_image_{Path(image_path).stem}.jpg"
161
+ cv2.imwrite(str(swapped_image_path), image)
162
+ return str(swapped_image_path)
163
  except Exception as e:
164
  st.error(f"Error en el face swap: {e}")
165
  return None
 
177
  steps = st.sidebar.slider("Pasos", 1, 100, 20)
178
  seed = st.sidebar.number_input("Semilla", value=-1)
179
 
180
+ width, height = (720, 1280) if format_option == "9:16" else (1280, 720)
 
 
 
 
 
181
 
182
  if st.sidebar.button("Generar Imagen"):
183
  with st.spinner("Mejorando y generando imagen..."):
184
  result = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language))
185
+ image_paths, prompt_file = result[0], result[1]
 
186
 
187
  st.write(f"Image paths: {image_paths}")
188
 
189
+ if image_paths and Path(image_paths).exists():
190
+ st.image(image_paths, caption="Imagen generada", use_column_width=True)
191
+ if prompt_file and Path(prompt_file).exists():
192
+ with open(prompt_file, "r") as file:
193
+ st.text_area("Prompt utilizado", file.read(), height=150)
194
+
195
+ st.sidebar.header("Galería de Imágenes")
196
+ image_storage, usage = get_storage()
197
+ st.sidebar.write(usage)
198
+ for img_path in image_storage:
199
+ st.sidebar.image(img_path, width=100)
200
+
201
+ if st.sidebar.button("Borrar Imagen"):
202
+ delete_image(image_paths)
203
+
204
+ if st.sidebar.button("Intercambiar Caras"):
205
  if image_paths:
206
+ swapped_path = swap_faces(image_paths)
207
+ if swapped_path:
208
+ st.image(swapped_path, caption="Imagen con caras intercambiadas", use_column_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
  if __name__ == "__main__":
211
  main()