Spaces:
Running
Running
jeremyLE-Ekimetrics
commited on
Commit
•
62bf5f4
1
Parent(s):
b703681
fix
Browse files- __pycache__/text.cpython-310.pyc +0 -0
- main.py +94 -72
- style.css +4 -0
- text.py +19 -0
__pycache__/text.cpython-310.pyc
ADDED
Binary file (1.13 kB). View file
|
|
main.py
CHANGED
@@ -4,48 +4,58 @@ import numpy as np
|
|
4 |
from openai import OpenAI
|
5 |
import os
|
6 |
|
7 |
-
|
8 |
-
|
9 |
import streamlit as st
|
10 |
from PIL import Image
|
11 |
from diffusers import AutoPipelineForText2Image
|
12 |
import random
|
|
|
|
|
13 |
@st.cache_data(ttl=3600)
|
14 |
-
def get_prompt_to_guess():
|
15 |
-
prompts = [
|
16 |
-
|
17 |
-
"Un coucher de soleil sur une plage déserte",
|
18 |
-
"Un champ de tulipes multicolores à perte de vue",
|
19 |
-
"Un château perché sur une montagne majestueuse",
|
20 |
-
"Une ville futuriste illuminée par des néons",
|
21 |
-
"Une forêt brumeuse où les arbres semblent danser",
|
22 |
-
"Une soirée magique dans le ciel étoilé",
|
23 |
-
"Une bibliothèque remplie de livres flottants",
|
24 |
-
"Un paysage hivernal avec des arbres enneigés",
|
25 |
-
"Une ville suspendue dans les nuages",
|
26 |
-
"Un pont de cristal au-dessus d'une cascade étincelante",
|
27 |
-
"Un champ de coquelicots sous un ciel bleu azur",
|
28 |
-
"Un bateau en papier naviguant sur une rivière magique",
|
29 |
-
"Un jardin secret rempli de fleurs exotiques",
|
30 |
-
"Une île déserte entourée d'une mer turquoise",
|
31 |
-
"Une montgolfière survolant un paysage onirique",
|
32 |
-
"Un champ de lavande embaumant l'air",
|
33 |
-
"Un petit village entouré de montagnes enneigées",
|
34 |
-
"Une forêt tropicale avec des plantes géantes",
|
35 |
-
"Un phare solitaire sur une falaise abrupte",
|
36 |
-
"Un arc-en-ciel se reflétant dans un lac calme",
|
37 |
-
"Une cabane en bois cachée au milieu des arbres",
|
38 |
-
"Un champ de tournesols sous un soleil éclatant",
|
39 |
-
"Une ville médiévale entourée de murailles imposantes",
|
40 |
-
"Un château de glace scintillant dans la nuit",
|
41 |
-
"Un chemin de pierres menant à un endroit mystérieux",
|
42 |
-
"Une rue animée remplie de cafés et de terrasses",
|
43 |
-
"Une cascade gelée dans un paysage d'hiver",
|
44 |
-
"Un jardin japonais paisible avec un petit étang",
|
45 |
-
"Une aurore boréale éblouissante embrassant le ciel étoilé",
|
46 |
-
]
|
47 |
-
return random.choice(prompts)
|
48 |
-
random_prompt = ["
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
response = client.chat.completions.create(
|
50 |
model="gpt-3.5-turbo",
|
51 |
messages=[
|
@@ -64,57 +74,69 @@ def get_model():
|
|
64 |
def generate_image(_pipe, prompt):
|
65 |
return _pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0, seed=1).images[0]
|
66 |
|
67 |
-
if "ask_answer" not in st.session_state:
|
68 |
-
st.session_state["ask_answer"] = False
|
69 |
-
|
70 |
-
if "testing" not in st.session_state:
|
71 |
-
st.session_state["testing"] = False
|
72 |
-
|
73 |
-
if "submit_guess" not in st.session_state:
|
74 |
-
st.session_state["submit_guess"] = False
|
75 |
-
|
76 |
-
if "real_ask_answer" not in st.session_state:
|
77 |
-
st.session_state["real_ask_answer"] = False
|
78 |
-
|
79 |
def check_prompt(prompt, prompt_to_guess):
|
80 |
return prompt.strip() == prompt_to_guess.strip()
|
81 |
|
82 |
pipe = get_model()
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
86 |
|
87 |
st.title("Guess the prompt by Ekimetrics")
|
88 |
-
st.
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
with col_1:
|
93 |
st.header("GUESS THE PROMPT")
|
94 |
-
guessed_prompt = st.
|
95 |
-
|
96 |
-
if
|
97 |
if check_prompt(guessed_prompt, prompt):
|
98 |
-
st.
|
99 |
else:
|
100 |
-
st.
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
105 |
|
106 |
|
107 |
-
if "testing" not in st.session_state:
|
108 |
-
st.session_state["testing"] = False
|
109 |
-
|
110 |
with col_2:
|
111 |
st.header("TEST THE PROMPT")
|
112 |
-
testing_prompt = st.
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
im = generate_image(pipe, testing_prompt)
|
115 |
st.session_state["testing"] = False
|
116 |
else:
|
117 |
im = np.zeros([h,w,3])
|
118 |
-
st.session_state["testing"] = st.button("test the prompt")
|
119 |
st.image(im)
|
120 |
-
|
|
|
4 |
from openai import OpenAI
|
5 |
import os
|
6 |
|
|
|
|
|
7 |
import streamlit as st
|
8 |
from PIL import Image
|
9 |
from diffusers import AutoPipelineForText2Image
|
10 |
import random
|
11 |
+
|
12 |
+
client = OpenAI()
|
13 |
@st.cache_data(ttl=3600)
|
14 |
+
def get_prompt_to_guess(index):
|
15 |
+
# prompts = [
|
16 |
+
# "Une cascade lumineuse dans une forêt enchantée",
|
17 |
+
# "Un coucher de soleil sur une plage déserte",
|
18 |
+
# "Un champ de tulipes multicolores à perte de vue",
|
19 |
+
# "Un château perché sur une montagne majestueuse",
|
20 |
+
# "Une ville futuriste illuminée par des néons",
|
21 |
+
# "Une forêt brumeuse où les arbres semblent danser",
|
22 |
+
# "Une soirée magique dans le ciel étoilé",
|
23 |
+
# "Une bibliothèque remplie de livres flottants",
|
24 |
+
# "Un paysage hivernal avec des arbres enneigés",
|
25 |
+
# "Une ville suspendue dans les nuages",
|
26 |
+
# "Un pont de cristal au-dessus d'une cascade étincelante",
|
27 |
+
# "Un champ de coquelicots sous un ciel bleu azur",
|
28 |
+
# "Un bateau en papier naviguant sur une rivière magique",
|
29 |
+
# "Un jardin secret rempli de fleurs exotiques",
|
30 |
+
# "Une île déserte entourée d'une mer turquoise",
|
31 |
+
# "Une montgolfière survolant un paysage onirique",
|
32 |
+
# "Un champ de lavande embaumant l'air",
|
33 |
+
# "Un petit village entouré de montagnes enneigées",
|
34 |
+
# "Une forêt tropicale avec des plantes géantes",
|
35 |
+
# "Un phare solitaire sur une falaise abrupte",
|
36 |
+
# "Un arc-en-ciel se reflétant dans un lac calme",
|
37 |
+
# "Une cabane en bois cachée au milieu des arbres",
|
38 |
+
# "Un champ de tournesols sous un soleil éclatant",
|
39 |
+
# "Une ville médiévale entourée de murailles imposantes",
|
40 |
+
# "Un château de glace scintillant dans la nuit",
|
41 |
+
# "Un chemin de pierres menant à un endroit mystérieux",
|
42 |
+
# "Une rue animée remplie de cafés et de terrasses",
|
43 |
+
# "Une cascade gelée dans un paysage d'hiver",
|
44 |
+
# "Un jardin japonais paisible avec un petit étang",
|
45 |
+
# "Une aurore boréale éblouissante embrassant le ciel étoilé",
|
46 |
+
# ]
|
47 |
+
# return random.choice(prompts)
|
48 |
+
random_prompt = ["arbre",
|
49 |
+
"écologie",
|
50 |
+
"chat",
|
51 |
+
"chien",
|
52 |
+
"consultant",
|
53 |
+
"artificial intelligence",
|
54 |
+
"beauté",
|
55 |
+
"immeuble",
|
56 |
+
"plage",
|
57 |
+
"cyborg",
|
58 |
+
"futuriste"]
|
59 |
response = client.chat.completions.create(
|
60 |
model="gpt-3.5-turbo",
|
61 |
messages=[
|
|
|
74 |
def generate_image(_pipe, prompt):
|
75 |
return _pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0, seed=1).images[0]
|
76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
def check_prompt(prompt, prompt_to_guess):
|
78 |
return prompt.strip() == prompt_to_guess.strip()
|
79 |
|
80 |
pipe = get_model()
|
81 |
+
|
82 |
+
with open("style.css") as f:
|
83 |
+
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
|
84 |
+
|
85 |
+
from text import compare_text, format_text_html
|
86 |
+
if "guess_number" not in st.session_state:
|
87 |
+
st.session_state["guess_number"] = 0
|
88 |
|
89 |
st.title("Guess the prompt by Ekimetrics")
|
90 |
+
st.markdown("""
|
91 |
+
Game developed by Jeremy LE from Ekimetrics to test and play with the new SDXL Turbo model from stability.ai\n
|
92 |
+
Rules : \n
|
93 |
+
- guess the prompt (in French, with no fault) to generate the left image with the sdxl turbo model\n
|
94 |
+
- use testing prompt side to help you guess the prompt by testing some\n
|
95 |
+
- If a word is **correct** and **at the right place in sentence**, the word is in :green[green]\n
|
96 |
+
- If a word is **correct** and **not** at the right place in sentence, the word is in :gray[gray]\n
|
97 |
+
- If a word is **incorrect**, the word is in :red[red]\n
|
98 |
+
**Disclosure** : this runs on CPU so generation are quite slow (even with sdxl turbo). Generation time took approx 40s.
|
99 |
+
""")
|
100 |
+
|
101 |
+
next_guess = st.button("click here for next guess", use_container_width=True)
|
102 |
+
if next_guess:
|
103 |
+
st.session_state["guess_number"] += 1
|
104 |
+
print("getting next pormpt")
|
105 |
+
print(st.session_state["guess_number"])
|
106 |
+
prompt = get_prompt_to_guess(st.session_state["guess_number"])
|
107 |
+
|
108 |
+
col_1, col_2 = st.columns([1,1])
|
109 |
with col_1:
|
110 |
st.header("GUESS THE PROMPT")
|
111 |
+
guessed_prompt = st.text_input("Input your guess prompt")
|
112 |
+
submit_guess = st.button("guess the prompt", use_container_width=True, type="primary")
|
113 |
+
if submit_guess:
|
114 |
if check_prompt(guessed_prompt, prompt):
|
115 |
+
st.markdown("Good prompt ! test again in 1h or click on next guess!")
|
116 |
else:
|
117 |
+
st.markdown("wrong prompt !")
|
118 |
+
compare_dict = compare_text(guessed_prompt, prompt)
|
119 |
+
st.markdown(format_text_html(compare_dict))
|
120 |
+
get_answer = st.button("get the answer", use_container_width=True)
|
121 |
+
if get_answer:
|
122 |
+
st.markdown(f"Cheater ! but here is the prompt : \n**{prompt}**")
|
123 |
|
124 |
|
|
|
|
|
|
|
125 |
with col_2:
|
126 |
st.header("TEST THE PROMPT")
|
127 |
+
testing_prompt = st.text_input("Input your testing prompt")
|
128 |
+
test_prompt = st.button("test prompt",use_container_width=True, type="primary")
|
129 |
+
|
130 |
+
with col_1:
|
131 |
+
im_to_guess = generate_image(pipe, prompt)
|
132 |
+
h, w = im_to_guess.size
|
133 |
+
st.image(im_to_guess)
|
134 |
+
|
135 |
+
with col_2:
|
136 |
+
if test_prompt:
|
137 |
im = generate_image(pipe, testing_prompt)
|
138 |
st.session_state["testing"] = False
|
139 |
else:
|
140 |
im = np.zeros([h,w,3])
|
|
|
141 |
st.image(im)
|
142 |
+
|
style.css
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.fullScreenFrame > div {
|
2 |
+
display: flex;
|
3 |
+
justify-content: center;
|
4 |
+
}
|
text.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def compare_text(text_to_guess, text_to_match):
|
2 |
+
words_to_match = text_to_match.lower().split(" ")
|
3 |
+
words_to_guess = text_to_guess.strip().split(" ")
|
4 |
+
return [
|
5 |
+
[word, word.lower() in words_to_match, i < len(words_to_match) and word.lower() == words_to_match[i]] for i, word in enumerate(words_to_guess)
|
6 |
+
]
|
7 |
+
|
8 |
+
def format_one_word(word, in_sentence, correct_place):
|
9 |
+
if in_sentence and correct_place:
|
10 |
+
return f"**:green[{word}]**"
|
11 |
+
elif in_sentence:
|
12 |
+
return f"**:gray[{word}]**"
|
13 |
+
else:
|
14 |
+
return f"**:red[{word}]**"
|
15 |
+
|
16 |
+
def format_text_html(compare_text_dict):
|
17 |
+
return " ".join([
|
18 |
+
format_one_word(word, in_sentence, correct_place) for word, in_sentence, correct_place in compare_text_dict
|
19 |
+
])
|