File size: 6,071 Bytes
d41bb77
 
 
985480e
246a819
985480e
5c00eb6
 
985480e
3db1f3f
 
 
 
985480e
4f0e58b
 
 
 
 
2e609a1
4f0e58b
d41bb77
fd0ca7e
d41bb77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
985480e
 
d41bb77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
985480e
d41bb77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
985480e
d41bb77
 
 
 
 
 
 
 
 
 
 
 
 
 
246a819
d41bb77
 
fd0ca7e
 
 
 
ab28992
 
d41bb77
 
4bdbad4
 
4f0e58b
 
 
d41bb77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5bcd188
d41bb77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f0e58b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import yaml
from yaml import safe_load
import streamlit as st
from pathlib import Path
from huggingface_hub import CommitScheduler, login, hf_hub_download

EVAL_DATABASE_DIR = Path("data")
EVAL_DATABASE_DIR.mkdir(parents=True, exist_ok=True)

GEN_EVAL_DATABASE_PATH = 'data/general_eval_database.yaml'
TASK_EVAL_DATABASE_PATH = 'data/task_oriented_eval_database.yaml'
# GEN_EVAL_DATABASE_PATH = EVAL_DATABASE_DIR / f"general_eval_database.yaml"
# TASK_EVAL_DATABASE_PATH = EVAL_DATABASE_DIR / f"task_oriented_eval_database.yaml"

EVAL_DATABASE_UPDATE_SCHEDULER = CommitScheduler(
    repo_id="try-before-you-bias-data",
    repo_type="dataset",
    folder_path=EVAL_DATABASE_DIR,
    path_in_repo="data",
    every=1,
)

USERNAME = "ANONYMOUS"
EVAL_ID = None
MODEL = None
MODEL_TYPE = None
NO_SAMPLES = None
RESOLUTION = None
INFERENCE_STEPS = None
GEN_OBJECTS = None
GEN_ACTIONS = None
GEN_OCCUPATIONS = None
TASK_TARGET = None
DIST_BIAS = None
HALLUCINATION = None
MISS_RATE = None
DATE = None
TIME = None
RUN_TIME = None

EVAL_METRICS = None
OBJECT_IMAGES = []
OCCUPATION_IMAGES = []
TASK_IMAGES = []
OBJECT_CAPTIONS = None
OCCUPATION_CAPTIONS = None
TASK_CAPTIONS = None
TASK_COCOIDs = None

OBJECT_IMAGES_IN_UI = False
OCCUPATION_IMAGES_IN_UI = False
TASK_IMAGES_IN_UI = False
CURRENT_EVAL_TYPE = None
def update_evaluation_table(evalType, debugging):
    global USERNAME
    global EVAL_ID
    global MODEL
    global MODEL_TYPE
    global NO_SAMPLES
    global RESOLUTION
    global INFERENCE_STEPS
    global GEN_OBJECTS
    global GEN_ACTIONS
    global GEN_OCCUPATIONS
    global TASK_TARGET
    global DIST_BIAS
    global HALLUCINATION
    global MISS_RATE
    global DATE
    global TIME
    global RUN_TIME
    global CURRENT_EVAL_TYPE
    global GEN_EVAL_DATABASE_PATH
    global TASK_EVAL_DATABASE_PATH

    if debugging:
        st.write("Username: ", USERNAME)
        st.write("EVAL_ID: ", EVAL_ID)
        st.write("MODEL: ", MODEL)
        st.write("MODEL_TYPE: ", MODEL_TYPE)
        st.write("NO_SAMPLES: ", NO_SAMPLES)
        st.write("RESOLUTION: ", RESOLUTION)
        st.write("INFERENCE_STEPS: ", INFERENCE_STEPS)
        st.write("GEN_OBJECTS: ", GEN_OBJECTS)
        st.write("GEN_ACTIONS: ", GEN_ACTIONS)
        st.write("GEN_OCCUPATIONS: ", GEN_OCCUPATIONS)
        st.write("TASK_TARGET: ", TASK_TARGET)
        st.write("DIST_BIAS: ", DIST_BIAS)
        st.write("HALLUCINATION: ", HALLUCINATION)
        st.write("MISS_RATE: ", MISS_RATE)
        st.write("DATE: ", DATE)
        st.write("TIME: ", TIME)
        st.write("RUN_TIME: ", RUN_TIME)

    newEvaluationData = None
    if evalType == 'general':
        evalDataPath = GEN_EVAL_DATABASE_PATH
        newEvaluationData = {
            "Model": MODEL,
            "Model Type": MODEL_TYPE,
            "No. Samples": NO_SAMPLES,
            "Resolution": RESOLUTION,
            "Inference Steps": INFERENCE_STEPS,
            "Objects": GEN_OBJECTS,
            "Actions": GEN_ACTIONS,
            "Occupations": GEN_OCCUPATIONS,
            "Dist. Bias": DIST_BIAS,
            "Hallucination": HALLUCINATION,
            "Gen. Miss Rate": MISS_RATE,
            "Date": DATE,
            "Time": TIME,
            "Run Time": RUN_TIME
        }
    else:
        evalDataPath = TASK_EVAL_DATABASE_PATH
        newEvaluationData = {
            "Model": MODEL,
            "Model Type": MODEL_TYPE,
            "No. Samples": NO_SAMPLES,
            "Resolution": RESOLUTION,
            "Inference Steps": INFERENCE_STEPS,
            "Target": TASK_TARGET,
            "Dist. Bias": DIST_BIAS,
            "Hallucination": HALLUCINATION,
            "Gen. Miss Rate": MISS_RATE,
            "Date": DATE,
            "Time": TIME,
            "Run Time": RUN_TIME
        }
    with open(hf_hub_download(repo_id="JVice/try-before-you-bias-data", filename=evalDataPath, repo_type="dataset"), 'r') as f:
        yamlData = safe_load(f)

    if TASK_TARGET is None:
        st.success('Congrats on your General Bias evaluation!', icon='\U0001F388')
    else:
        st.success('Congrats on your Task-Oriented Bias evaluation!', icon='\U0001F388')
    # if "USERNAME" not in yamlData['evaluations']['username']:
    #     yamlData['evaluations']['username'][USERNAME]= {}

    yamlData['evaluations']['username'][USERNAME][EVAL_ID] = newEvaluationData
    if debugging:
        st.write("NEW DATABASE ", yamlData['evaluations']['username'][USERNAME])
    with EVAL_DATABASE_UPDATE_SCHEDULER.lock:
        with open(evalDataPath, 'w') as yaml_file:
            yaml_file.write(yaml.dump(yamlData, default_flow_style=False))

def reset_variables(evalType):
    global USERNAME
    global EVAL_ID
    global MODEL
    global MODEL_TYPE
    global NO_SAMPLES
    global RESOLUTION
    global INFERENCE_STEPS
    global GEN_OBJECTS
    global GEN_ACTIONS
    global GEN_OCCUPATIONS
    global TASK_TARGET
    global DIST_BIAS
    global HALLUCINATION
    global MISS_RATE
    global DATE
    global TIME
    global RUN_TIME
    global EVAL_METRICS
    global OBJECT_IMAGES
    global OCCUPATION_IMAGES
    global TASK_IMAGES
    global OBJECT_CAPTIONS
    global OCCUPATION_CAPTIONS
    global TASK_CAPTIONS
    global TASK_COCOIDs
    global OBJECT_IMAGES_IN_UI
    global OCCUPATION_IMAGES_IN_UI
    global TASK_IMAGES_IN_UI
    global CURRENT_EVAL_TYPE
    EVAL_ID = None
    NO_SAMPLES = None
    RESOLUTION = None
    INFERENCE_STEPS = None
    GEN_OBJECTS = None
    GEN_ACTIONS = None
    GEN_OCCUPATIONS = None
    
    DIST_BIAS = None
    HALLUCINATION = None
    MISS_RATE = None
    DATE = None
    TIME = None
    RUN_TIME = None

    EVAL_METRICS = None
    CURRENT_EVAL_TYPE = None

    if evalType == 'general':
        OBJECT_IMAGES = []
        OCCUPATION_IMAGES = []
        OBJECT_CAPTIONS = None
        OCCUPATION_CAPTIONS = None
        OBJECT_IMAGES_IN_UI = False
        OCCUPATION_IMAGES_IN_UI = False
    else:
        TASK_IMAGES = []
        TASK_CAPTIONS = None
        TASK_COCOIDs = None
        TASK_IMAGES_IN_UI = False
        TASK_TARGET = None