Spaces:
Runtime error
Runtime error
Badr AlKhamissi
commited on
Commit
•
8bffede
1
Parent(s):
6e51c75
changed description and made GIF faster
Browse files
app.py
CHANGED
@@ -55,10 +55,10 @@ import warnings
|
|
55 |
TITLE="""<h1 style="font-size: 42px;" align="center">Word-To-Image: Morphing Arabic Text to a Visual Representation</h1>"""
|
56 |
|
57 |
|
58 |
-
DESCRIPTION="""This demo builds on the [Word-As-Image for Semantic Typography](https://wordasimage.github.io/Word-As-Image-Page/) work to support Arabic fonts and morphing whole words
|
59 |
|
60 |
# DESCRIPTION += '\n<p>This demo is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"> Creative Commons Attribution-ShareAlike 4.0 International License</a>.</p>'
|
61 |
-
DESCRIPTION += '\n<p>For faster inference without waiting in queue, you can <a href="https://colab.research.google.com/drive/1wobOAsnLpkIzaRxG5yac8NcV7iCrlycP"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a></p>'
|
62 |
|
63 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
64 |
DESCRIPTION = DESCRIPTION.replace("</p>", " ")
|
@@ -213,6 +213,7 @@ def run_main_app(semantic_concept, word, prompt_suffix, font_name, num_steps, se
|
|
213 |
# training loop
|
214 |
t_range = tqdm(range(num_iter))
|
215 |
gif_frames = []
|
|
|
216 |
for step in t_range:
|
217 |
optim.zero_grad()
|
218 |
|
@@ -224,7 +225,9 @@ def run_main_app(semantic_concept, word, prompt_suffix, font_name, num_steps, se
|
|
224 |
img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=device) * (
|
225 |
1 - img[:, :, 3:4])
|
226 |
img = img[:, :, :3]
|
227 |
-
|
|
|
|
|
228 |
|
229 |
|
230 |
filename = os.path.join(cfg.experiment_dir, "video-svg", f"iter{step:04d}.svg")
|
@@ -365,10 +368,10 @@ with gr.Blocks() as demo:
|
|
365 |
with gr.Row():
|
366 |
# examples
|
367 |
examples = [
|
368 |
-
["قطة", "Cat",
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
]
|
373 |
demo.queue(max_size=10, concurrency_count=1)
|
374 |
gr.Examples(examples=examples,
|
|
|
55 |
TITLE="""<h1 style="font-size: 42px;" align="center">Word-To-Image: Morphing Arabic Text to a Visual Representation</h1>"""
|
56 |
|
57 |
|
58 |
+
DESCRIPTION="""This demo builds on the [Word-As-Image for Semantic Typography](https://wordasimage.github.io/Word-As-Image-Page/) work to support Arabic fonts and morphing whole words and phrases to a visual representation of a semantic concept. This is part of an ongoing effort with the [ARBML](https://arbml.github.io/website/) community to build open-source Arabic tools using machine learning."""
|
59 |
|
60 |
# DESCRIPTION += '\n<p>This demo is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"> Creative Commons Attribution-ShareAlike 4.0 International License</a>.</p>'
|
61 |
+
DESCRIPTION += '\n<p>Note: it takes about 5 minutes for 500 iterations to generate the final GIF. For faster inference without waiting in queue, you can <a href="https://colab.research.google.com/drive/1wobOAsnLpkIzaRxG5yac8NcV7iCrlycP"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a></p>'
|
62 |
|
63 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
64 |
DESCRIPTION = DESCRIPTION.replace("</p>", " ")
|
|
|
213 |
# training loop
|
214 |
t_range = tqdm(range(num_iter))
|
215 |
gif_frames = []
|
216 |
+
skip = 5
|
217 |
for step in t_range:
|
218 |
optim.zero_grad()
|
219 |
|
|
|
225 |
img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=device) * (
|
226 |
1 - img[:, :, 3:4])
|
227 |
img = img[:, :, :3]
|
228 |
+
|
229 |
+
if step % skip == 0:
|
230 |
+
gif_frames += [img.detach().cpu().numpy()*255]
|
231 |
|
232 |
|
233 |
filename = os.path.join(cfg.experiment_dir, "video-svg", f"iter{step:04d}.svg")
|
|
|
368 |
with gr.Row():
|
369 |
# examples
|
370 |
examples = [
|
371 |
+
["قطة", "Cat", 250, 42],
|
372 |
+
["جمل جميل", "Horse", 250, 42],
|
373 |
+
["كلب", "Dog", 250, 42],
|
374 |
+
["أخطبوط", "Octopus", 250, 42],
|
375 |
]
|
376 |
demo.queue(max_size=10, concurrency_count=1)
|
377 |
gr.Examples(examples=examples,
|