Spaces:
Sleeping
Sleeping
Commit
·
2826168
1
Parent(s):
7ac8e11
perchè un'altra televisione è impossibile.. viva lammerda
Browse files- app.py +3 -9
- config/config.yaml +1 -1
- src/utils.py +10 -23
app.py
CHANGED
|
@@ -13,13 +13,12 @@ def main():
|
|
| 13 |
options = ['1', '2', '3', '4']
|
| 14 |
class_names = config['dataset'][config['dataset']['name']]['class_names']
|
| 15 |
data_dir = os.path.join(config['dataset']['path'], config['dataset']['name'])
|
| 16 |
-
id_generator = UserID()
|
| 17 |
|
| 18 |
with gr.Blocks(theme=gr.themes.Glass(), css=css) as demo:
|
| 19 |
# Main App Components
|
| 20 |
title = gr.Markdown("# Saliency evaluation - experiment 1")
|
| 21 |
user_state = gr.State(0)
|
| 22 |
-
user_id = gr.State(
|
| 23 |
answers = gr.State([])
|
| 24 |
|
| 25 |
with gr.Row():
|
|
@@ -122,17 +121,13 @@ def main():
|
|
| 122 |
dropdown3 = gr.Dropdown(choices=options, label="sidu")
|
| 123 |
dropdown4 = gr.Dropdown(choices=options, label="rise")
|
| 124 |
return dropdown1, dropdown2, dropdown3, dropdown4
|
| 125 |
-
|
| 126 |
-
def init(request: gr.Request):
|
| 127 |
-
user_id.value = id_generator.increment()
|
| 128 |
-
return user_id
|
| 129 |
|
| 130 |
def redirect():
|
| 131 |
pass
|
| 132 |
|
| 133 |
def register_answers(answers):
|
| 134 |
experiment_dir = config['results']['exp1_dir']
|
| 135 |
-
save_results(
|
| 136 |
|
| 137 |
def add_answer(dropdown1,dropdown2,dropdown3,dropdown4, answers):
|
| 138 |
rank = [dropdown1,dropdown2,dropdown3,dropdown4]
|
|
@@ -172,8 +167,7 @@ def main():
|
|
| 172 |
).then(
|
| 173 |
redirect, js="window.location = 'https://marcoparola.github.io/saliency-evaluation-app/end'")
|
| 174 |
|
| 175 |
-
demo.load(
|
| 176 |
-
|
| 177 |
demo.launch()
|
| 178 |
|
| 179 |
if __name__ == "__main__":
|
|
|
|
| 13 |
options = ['1', '2', '3', '4']
|
| 14 |
class_names = config['dataset'][config['dataset']['name']]['class_names']
|
| 15 |
data_dir = os.path.join(config['dataset']['path'], config['dataset']['name'])
|
|
|
|
| 16 |
|
| 17 |
with gr.Blocks(theme=gr.themes.Glass(), css=css) as demo:
|
| 18 |
# Main App Components
|
| 19 |
title = gr.Markdown("# Saliency evaluation - experiment 1")
|
| 20 |
user_state = gr.State(0)
|
| 21 |
+
#user_id = gr.State(load_global_variable())
|
| 22 |
answers = gr.State([])
|
| 23 |
|
| 24 |
with gr.Row():
|
|
|
|
| 121 |
dropdown3 = gr.Dropdown(choices=options, label="sidu")
|
| 122 |
dropdown4 = gr.Dropdown(choices=options, label="rise")
|
| 123 |
return dropdown1, dropdown2, dropdown3, dropdown4
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
def redirect():
|
| 126 |
pass
|
| 127 |
|
| 128 |
def register_answers(answers):
|
| 129 |
experiment_dir = config['results']['exp1_dir']
|
| 130 |
+
save_results( experiment_dir, answers)
|
| 131 |
|
| 132 |
def add_answer(dropdown1,dropdown2,dropdown3,dropdown4, answers):
|
| 133 |
rank = [dropdown1,dropdown2,dropdown3,dropdown4]
|
|
|
|
| 167 |
).then(
|
| 168 |
redirect, js="window.location = 'https://marcoparola.github.io/saliency-evaluation-app/end'")
|
| 169 |
|
| 170 |
+
demo.load()
|
|
|
|
| 171 |
demo.launch()
|
| 172 |
|
| 173 |
if __name__ == "__main__":
|
config/config.yaml
CHANGED
|
@@ -15,7 +15,7 @@ dataset:
|
|
| 15 |
name: intel_image
|
| 16 |
path: data
|
| 17 |
intel_image:
|
| 18 |
-
n_classes:
|
| 19 |
class_names: ['BUILDING', 'FOREST', 'GLACIER', 'MOUNTAIN', 'SEA', 'STREET']
|
| 20 |
imagenette:
|
| 21 |
n_classes: 10
|
|
|
|
| 15 |
name: intel_image
|
| 16 |
path: data
|
| 17 |
intel_image:
|
| 18 |
+
n_classes: 6
|
| 19 |
class_names: ['BUILDING', 'FOREST', 'GLACIER', 'MOUNTAIN', 'SEA', 'STREET']
|
| 20 |
imagenette:
|
| 21 |
n_classes: 10
|
src/utils.py
CHANGED
|
@@ -3,6 +3,7 @@ import pandas as pd
|
|
| 3 |
from huggingface_hub import HfApi, HfFolder
|
| 4 |
import yaml
|
| 5 |
import numpy as np
|
|
|
|
| 6 |
|
| 7 |
config = yaml.safe_load(open("./config/config.yaml"))
|
| 8 |
|
|
@@ -26,42 +27,28 @@ def load_example_images(class_idx, data_dir, max_images=16):
|
|
| 26 |
images = [os.path.join(path, images[id]) for id in ids]
|
| 27 |
return images
|
| 28 |
|
| 29 |
-
|
| 30 |
# Function to load words based on global variable
|
| 31 |
def load_words(idx):
|
| 32 |
words = [f"word_{idx}_{i}" for i in range(20)]
|
| 33 |
return words
|
| 34 |
|
| 35 |
# Function to save results and increment global variable
|
| 36 |
-
def save_results(
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
# convert answers (list of list) to a pandas dataframe
|
| 40 |
-
df = pd.DataFrame(answers, columns=config['saliency_methods'])
|
| 41 |
if not os.path.exists(folder):
|
| 42 |
os.makedirs(folder)
|
| 43 |
-
df.to_csv(os.path.join(folder, 'results.csv'), index=False)
|
| 44 |
-
print(f"Results saved to {folder}", df)
|
| 45 |
-
|
| 46 |
-
'''
|
| 47 |
-
filename = "results.txt"
|
| 48 |
-
print('ooooooo', global_counter)
|
| 49 |
-
print(dropdowns)
|
| 50 |
-
str_dropdowns = str(dropdowns)
|
| 51 |
-
# remove the curly braces
|
| 52 |
-
dropdowns = str_dropdowns[1:-1]
|
| 53 |
-
# split by comma and select the number contained in the string
|
| 54 |
-
dropdowns = [r.split(":")[1].strip().replace("'", "") for r in dropdowns.split(",")]
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
f.
|
| 59 |
-
|
|
|
|
| 60 |
|
| 61 |
# Upload the file to Hugging Face Hub
|
| 62 |
api = HfApi()
|
| 63 |
token = os.getenv("HUGGINGFACE_TOKEN")
|
| 64 |
-
#
|
| 65 |
|
| 66 |
if not token:
|
| 67 |
print("Token not found. Please login to Hugging Face.")
|
|
|
|
| 3 |
from huggingface_hub import HfApi, HfFolder
|
| 4 |
import yaml
|
| 5 |
import numpy as np
|
| 6 |
+
import time
|
| 7 |
|
| 8 |
config = yaml.safe_load(open("./config/config.yaml"))
|
| 9 |
|
|
|
|
| 27 |
images = [os.path.join(path, images[id]) for id in ids]
|
| 28 |
return images
|
| 29 |
|
|
|
|
| 30 |
# Function to load words based on global variable
|
| 31 |
def load_words(idx):
|
| 32 |
words = [f"word_{idx}_{i}" for i in range(20)]
|
| 33 |
return words
|
| 34 |
|
| 35 |
# Function to save results and increment global variable
|
| 36 |
+
def save_results(experiment_dir, answers):
|
| 37 |
+
user_id = time.time()
|
| 38 |
+
folder = os.path.join(config['results']['save_dir'], experiment_dir, str(user_id))
|
|
|
|
|
|
|
| 39 |
if not os.path.exists(folder):
|
| 40 |
os.makedirs(folder)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
+
# answers is a list of lists
|
| 43 |
+
for idx, answer in enumerate(answers):
|
| 44 |
+
filename = os.path.join(folder, f"results_{idx}.txt")
|
| 45 |
+
with open(filename, 'w') as f:
|
| 46 |
+
f.write("\n".join(answer))
|
| 47 |
|
| 48 |
# Upload the file to Hugging Face Hub
|
| 49 |
api = HfApi()
|
| 50 |
token = os.getenv("HUGGINGFACE_TOKEN")
|
| 51 |
+
#token = HfFolder.get_token()
|
| 52 |
|
| 53 |
if not token:
|
| 54 |
print("Token not found. Please login to Hugging Face.")
|