downloadable json having the right format
Browse files- .gitignore +2 -0
- Pipfile +11 -0
- README.md +2 -0
- app.py +9 -5
- config.py +5 -2
- services/huggingface.py +57 -32
- services/json_generator.py +243 -30
- ui/form_components.py +207 -132
- utils/validation.py +6 -3
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
test.json
|
Pipfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[[source]]
|
2 |
+
url = "https://pypi.org/simple"
|
3 |
+
verify_ssl = true
|
4 |
+
name = "pypi"
|
5 |
+
|
6 |
+
[packages]
|
7 |
+
|
8 |
+
[dev-packages]
|
9 |
+
|
10 |
+
[requires]
|
11 |
+
python_version = "3.12"
|
README.md
CHANGED
@@ -12,3 +12,5 @@ short_description: Create a report in BoAmps format
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
15 |
+
|
16 |
+
This project was initiated by a group of students from Sud Telecom Paris.
|
app.py
CHANGED
@@ -16,21 +16,24 @@ from ui.form_components import (
|
|
16 |
# Initialize Hugging Face
|
17 |
init_huggingface()
|
18 |
|
|
|
19 |
def handle_submit(*inputs):
|
20 |
message, file_output, json_output = generate_json(*inputs)
|
21 |
-
|
22 |
# Check if the message indicates validation failure
|
23 |
if message.startswith("The following fields are required"):
|
24 |
return message, file_output, json_output
|
25 |
-
|
26 |
# If validation passed, proceed to update_dataset
|
27 |
update_output = update_dataset(json_output)
|
|
|
28 |
return update_output, file_output, json_output
|
29 |
|
|
|
30 |
# Create Gradio interface
|
31 |
with gr.Blocks() as demo:
|
32 |
gr.Markdown("## Data Collection Form")
|
33 |
-
gr.Markdown("Welcome to this Huggingface space
|
34 |
|
35 |
# Create form tabs
|
36 |
header_components = create_header_tab()
|
@@ -46,12 +49,13 @@ with gr.Blocks() as demo:
|
|
46 |
# Submit and Download Buttons
|
47 |
submit_button = gr.Button("Submit")
|
48 |
output = gr.Textbox(label="Output", lines=1)
|
|
|
49 |
json_output = gr.Textbox(visible=False)
|
50 |
file_output = gr.File(label="Downloadable JSON")
|
51 |
|
52 |
# Event Handlers
|
53 |
submit_button.click(
|
54 |
-
handle_submit,
|
55 |
inputs=[
|
56 |
*header_components,
|
57 |
*task_components,
|
@@ -67,4 +71,4 @@ with gr.Blocks() as demo:
|
|
67 |
)
|
68 |
|
69 |
if __name__ == "__main__":
|
70 |
-
demo.launch()
|
|
|
16 |
# Initialize Hugging Face
|
17 |
init_huggingface()
|
18 |
|
19 |
+
|
20 |
def handle_submit(*inputs):
|
21 |
message, file_output, json_output = generate_json(*inputs)
|
22 |
+
|
23 |
# Check if the message indicates validation failure
|
24 |
if message.startswith("The following fields are required"):
|
25 |
return message, file_output, json_output
|
26 |
+
|
27 |
# If validation passed, proceed to update_dataset
|
28 |
update_output = update_dataset(json_output)
|
29 |
+
print(json_output)
|
30 |
return update_output, file_output, json_output
|
31 |
|
32 |
+
|
33 |
# Create Gradio interface
|
34 |
with gr.Blocks() as demo:
|
35 |
gr.Markdown("## Data Collection Form")
|
36 |
+
gr.Markdown("Welcome to this Huggingface space, where you can create a report on the energy consumption of an AI task in BoAmps format, by filling in a form.")
|
37 |
|
38 |
# Create form tabs
|
39 |
header_components = create_header_tab()
|
|
|
49 |
# Submit and Download Buttons
|
50 |
submit_button = gr.Button("Submit")
|
51 |
output = gr.Textbox(label="Output", lines=1)
|
52 |
+
# je comprend pas pq le fichier est vide ???
|
53 |
json_output = gr.Textbox(visible=False)
|
54 |
file_output = gr.File(label="Downloadable JSON")
|
55 |
|
56 |
# Event Handlers
|
57 |
submit_button.click(
|
58 |
+
handle_submit,
|
59 |
inputs=[
|
60 |
*header_components,
|
61 |
*task_components,
|
|
|
71 |
)
|
72 |
|
73 |
if __name__ == "__main__":
|
74 |
+
demo.launch()
|
config.py
CHANGED
@@ -5,12 +5,13 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
5 |
DATASET_NAME = "soprasteria/BoAmps_leaderboard"
|
6 |
|
7 |
# Form Field Configurations
|
|
|
8 |
OBLIGATORY_FIELDS = [
|
9 |
"formatVersion", "reportId", "reportStatus", "confidentialityLevel",
|
10 |
"taskType", "taskFamily", "taskStage", "algorithmName", "dataType",
|
11 |
"volume", "volumeUnit", "nbRequest", "measurementMethod", "unit",
|
12 |
-
"powerConsumption", "os", "language", "infraType", "
|
13 |
-
"nbComponent", "country", "hashAlgorithm", "cryptographicAlgorithm", "
|
14 |
]
|
15 |
|
16 |
# Dropdown Options
|
@@ -18,6 +19,8 @@ REPORT_STATUS_OPTIONS = ["draft", "final", "corrective", "$other"]
|
|
18 |
CONFIDENTIALITY_LEVELS = ["public", "internal", "confidential", "secret"]
|
19 |
DATA_TYPES = ["tabular", "audio", "boolean",
|
20 |
"image", "video", "object", "text", "$other"]
|
|
|
|
|
21 |
ACCURACY_LEVELS = ["veryPoor", "poor", "average", "good", "veryGood"]
|
22 |
MEASUREMENT_UNITS = ["Wh", "kWh", "MWh", "GWh", "kJoule", "MJoule", "GJoule", "TJoule", "PJoule",
|
23 |
"BTU", "kiloFLOPS", "megaFLOPS", "gigaFLOPS", "teraFLOPS", "petaFLOPS",
|
|
|
5 |
DATASET_NAME = "soprasteria/BoAmps_leaderboard"
|
6 |
|
7 |
# Form Field Configurations
|
8 |
+
MANDATORY_SECTIONS = ["task", "measures", "infrastructure", "environment"]
|
9 |
OBLIGATORY_FIELDS = [
|
10 |
"formatVersion", "reportId", "reportStatus", "confidentialityLevel",
|
11 |
"taskType", "taskFamily", "taskStage", "algorithmName", "dataType",
|
12 |
"volume", "volumeUnit", "nbRequest", "measurementMethod", "unit",
|
13 |
+
"powerConsumption", "os", "language", "infraType", "componentType",
|
14 |
+
"nbComponent", "country", "hashAlgorithm", "cryptographicAlgorithm", "value"
|
15 |
]
|
16 |
|
17 |
# Dropdown Options
|
|
|
19 |
CONFIDENTIALITY_LEVELS = ["public", "internal", "confidential", "secret"]
|
20 |
DATA_TYPES = ["tabular", "audio", "boolean",
|
21 |
"image", "video", "object", "text", "$other"]
|
22 |
+
DATA_UNITS = ['kilobyte', 'megabyte', 'gigabyte', 'terabyte',
|
23 |
+
'petabyte', 'exabyte', 'zettabyte', 'yottabyte']
|
24 |
ACCURACY_LEVELS = ["veryPoor", "poor", "average", "good", "veryGood"]
|
25 |
MEASUREMENT_UNITS = ["Wh", "kWh", "MWh", "GWh", "kJoule", "MJoule", "GJoule", "TJoule", "PJoule",
|
26 |
"BTU", "kiloFLOPS", "megaFLOPS", "gigaFLOPS", "teraFLOPS", "petaFLOPS",
|
services/huggingface.py
CHANGED
@@ -3,12 +3,15 @@ from datasets import load_dataset, Dataset, concatenate_datasets
|
|
3 |
import json
|
4 |
from config import HF_TOKEN, DATASET_NAME
|
5 |
|
|
|
6 |
def init_huggingface():
|
7 |
"""Initialize Hugging Face authentication."""
|
8 |
if HF_TOKEN is None:
|
9 |
-
raise ValueError(
|
|
|
10 |
login(token=HF_TOKEN)
|
11 |
|
|
|
12 |
def update_dataset(json_data):
|
13 |
"""Update the Hugging Face dataset with new data."""
|
14 |
if json_data is None or json_data.startswith("The following fields are required"):
|
@@ -18,7 +21,9 @@ def update_dataset(json_data):
|
|
18 |
data = json.loads(json_data)
|
19 |
except json.JSONDecodeError:
|
20 |
return "Invalid JSON data. Please ensure all required fields are filled correctly."
|
21 |
-
|
|
|
|
|
22 |
try:
|
23 |
dataset = load_dataset(DATASET_NAME, split="train")
|
24 |
except:
|
@@ -26,19 +31,22 @@ def update_dataset(json_data):
|
|
26 |
|
27 |
new_data = create_flattened_data(data)
|
28 |
new_dataset = Dataset.from_dict(new_data)
|
29 |
-
|
30 |
if len(dataset) > 0:
|
31 |
updated_dataset = concatenate_datasets([dataset, new_dataset])
|
32 |
else:
|
33 |
updated_dataset = new_dataset
|
34 |
|
35 |
updated_dataset.push_to_hub(DATASET_NAME)
|
|
|
36 |
return "Data submitted successfully and dataset updated!"
|
37 |
|
|
|
38 |
def create_flattened_data(data):
|
39 |
"""Create a flattened data structure for the dataset."""
|
40 |
# Handle hyperparameters
|
41 |
-
hyperparameters = data.get("task", {}).get("algorithms", [{}])[
|
|
|
42 |
|
43 |
# Process hyperparameters
|
44 |
hyperparameter_names = []
|
@@ -48,16 +56,19 @@ def create_flattened_data(data):
|
|
48 |
hyperparameter_names.append(hp["name"])
|
49 |
hyperparameter_values.append(str(hp["value"]))
|
50 |
|
51 |
-
hyperparameter_name_str = ", ".join(
|
52 |
-
|
|
|
|
|
53 |
|
54 |
# Handle inference properties
|
55 |
-
inference_props = data.get("task", {}).get(
|
|
|
56 |
|
57 |
# Process inference properties
|
58 |
inference_data = []
|
59 |
for props in inference_props:
|
60 |
-
if props:
|
61 |
inference_data.append({
|
62 |
"nbRequest": props.get("nbRequest"),
|
63 |
"nbTokensInput": props.get("nbTokensInput"),
|
@@ -68,19 +79,26 @@ def create_flattened_data(data):
|
|
68 |
"cache": props.get("cache")
|
69 |
})
|
70 |
|
71 |
-
nbRequest_str = ", ".join([str(p["nbRequest"]) for p in inference_data if p.get(
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
# Handle components
|
80 |
components = data.get("infrastructure", {}).get("components", [])
|
81 |
component_data = []
|
82 |
for comp in components:
|
83 |
-
if comp:
|
84 |
component_data.append({
|
85 |
"componentName": comp.get("componentName"),
|
86 |
"nbComponent": comp.get("nbComponent"),
|
@@ -91,13 +109,20 @@ def create_flattened_data(data):
|
|
91 |
"share": comp.get("share")
|
92 |
})
|
93 |
|
94 |
-
componentName_str = ", ".join([str(p["componentName"]) for p in component_data if p.get(
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
return {
|
103 |
# Header
|
@@ -112,7 +137,7 @@ def create_flattened_data(data):
|
|
112 |
"publisher_projectName": [data["header"]["publisher"]["projectName"]],
|
113 |
"publisher_confidentialityLevel": [data["header"]["publisher"]["confidentialityLevel"]],
|
114 |
"publisher_publicKey": [data["header"]["publisher"]["publicKey"]],
|
115 |
-
|
116 |
# Task
|
117 |
"taskType": [data["task"]["taskType"]],
|
118 |
"taskFamily": [data["task"]["taskFamily"]],
|
@@ -143,7 +168,7 @@ def create_flattened_data(data):
|
|
143 |
"owner": [data["task"]["dataset"][0]["owner"]],
|
144 |
"measuredAccuracy": [data["task"]["measuredAccuracy"]],
|
145 |
"estimatedAccuracy": [data["task"]["estimatedAccuracy"]],
|
146 |
-
|
147 |
# Measures
|
148 |
"measurementMethod": [data["measures"][0]["measurementMethod"]],
|
149 |
"manufacturer": [data["measures"][0]["manufacturer"]],
|
@@ -159,16 +184,16 @@ def create_flattened_data(data):
|
|
159 |
"powerConsumption": [data["measures"][0]["powerConsumption"]],
|
160 |
"measurementDuration": [data["measures"][0]["measurementDuration"]],
|
161 |
"measurementDateTime": [data["measures"][0]["measurementDateTime"]],
|
162 |
-
|
163 |
# System
|
164 |
"os": [data["system"]["os"]],
|
165 |
"distribution": [data["system"]["distribution"]],
|
166 |
"distributionVersion": [data["system"]["distributionVersion"]],
|
167 |
-
|
168 |
# Software
|
169 |
"language": [data["software"]["language"]],
|
170 |
"version_software": [data["software"]["version"]],
|
171 |
-
|
172 |
# Infrastructure
|
173 |
"infraType": [data["infrastructure"]["infraType"]],
|
174 |
"cloudProvider": [data["infrastructure"]["cloudProvider"]],
|
@@ -180,7 +205,7 @@ def create_flattened_data(data):
|
|
180 |
"family": [family_str],
|
181 |
"series": [series_str],
|
182 |
"share": [share_str],
|
183 |
-
|
184 |
# Environment
|
185 |
"country": [data["environment"]["country"]],
|
186 |
"latitude": [data["environment"]["latitude"]],
|
@@ -189,12 +214,12 @@ def create_flattened_data(data):
|
|
189 |
"powerSupplierType": [data["environment"]["powerSupplierType"]],
|
190 |
"powerSource": [data["environment"]["powerSource"]],
|
191 |
"powerSourceCarbonIntensity": [data["environment"]["powerSourceCarbonIntensity"]],
|
192 |
-
|
193 |
# Quality
|
194 |
"quality": [data["quality"]],
|
195 |
-
|
196 |
# Hash
|
197 |
"hashAlgorithm": [data["$hash"]["hashAlgorithm"]],
|
198 |
"cryptographicAlgorithm": [data["$hash"]["cryptographicAlgorithm"]],
|
199 |
"value": [data["$hash"]["ecryptedValue"]]
|
200 |
-
}
|
|
|
3 |
import json
|
4 |
from config import HF_TOKEN, DATASET_NAME
|
5 |
|
6 |
+
|
7 |
def init_huggingface():
|
8 |
"""Initialize Hugging Face authentication."""
|
9 |
if HF_TOKEN is None:
|
10 |
+
raise ValueError(
|
11 |
+
"Hugging Face token not found in environment variables.")
|
12 |
login(token=HF_TOKEN)
|
13 |
|
14 |
+
|
15 |
def update_dataset(json_data):
|
16 |
"""Update the Hugging Face dataset with new data."""
|
17 |
if json_data is None or json_data.startswith("The following fields are required"):
|
|
|
21 |
data = json.loads(json_data)
|
22 |
except json.JSONDecodeError:
|
23 |
return "Invalid JSON data. Please ensure all required fields are filled correctly."
|
24 |
+
|
25 |
+
# data flattening and saving to dataset
|
26 |
+
"""
|
27 |
try:
|
28 |
dataset = load_dataset(DATASET_NAME, split="train")
|
29 |
except:
|
|
|
31 |
|
32 |
new_data = create_flattened_data(data)
|
33 |
new_dataset = Dataset.from_dict(new_data)
|
34 |
+
|
35 |
if len(dataset) > 0:
|
36 |
updated_dataset = concatenate_datasets([dataset, new_dataset])
|
37 |
else:
|
38 |
updated_dataset = new_dataset
|
39 |
|
40 |
updated_dataset.push_to_hub(DATASET_NAME)
|
41 |
+
"""
|
42 |
return "Data submitted successfully and dataset updated!"
|
43 |
|
44 |
+
|
45 |
def create_flattened_data(data):
|
46 |
"""Create a flattened data structure for the dataset."""
|
47 |
# Handle hyperparameters
|
48 |
+
hyperparameters = data.get("task", {}).get("algorithms", [{}])[
|
49 |
+
0].get("hyperparameters", {}).get("values", [])
|
50 |
|
51 |
# Process hyperparameters
|
52 |
hyperparameter_names = []
|
|
|
56 |
hyperparameter_names.append(hp["name"])
|
57 |
hyperparameter_values.append(str(hp["value"]))
|
58 |
|
59 |
+
hyperparameter_name_str = ", ".join(
|
60 |
+
hyperparameter_names) if hyperparameter_names else None
|
61 |
+
hyperparameter_value_str = ", ".join(
|
62 |
+
hyperparameter_values) if hyperparameter_values else None
|
63 |
|
64 |
# Handle inference properties
|
65 |
+
inference_props = data.get("task", {}).get(
|
66 |
+
"dataset", {}).get("inferenceProperties", [])
|
67 |
|
68 |
# Process inference properties
|
69 |
inference_data = []
|
70 |
for props in inference_props:
|
71 |
+
if props:
|
72 |
inference_data.append({
|
73 |
"nbRequest": props.get("nbRequest"),
|
74 |
"nbTokensInput": props.get("nbTokensInput"),
|
|
|
79 |
"cache": props.get("cache")
|
80 |
})
|
81 |
|
82 |
+
nbRequest_str = ", ".join([str(p["nbRequest"]) for p in inference_data if p.get(
|
83 |
+
"nbRequest")]) if inference_data else None
|
84 |
+
nbTokensInput_str = ", ".join([str(p["nbTokensInput"]) for p in inference_data if p.get(
|
85 |
+
"nbTokensInput")]) if inference_data else None
|
86 |
+
nbWordsInput_str = ", ".join([str(p["nbWordsInput"]) for p in inference_data if p.get(
|
87 |
+
"nbWordsInput")]) if inference_data else None
|
88 |
+
nbTokensOutput_str = ", ".join([str(p["nbTokensOutput"]) for p in inference_data if p.get(
|
89 |
+
"nbTokensOutput")]) if inference_data else None
|
90 |
+
nbWordsOutput_str = ", ".join([str(p["nbWordsOutput"]) for p in inference_data if p.get(
|
91 |
+
"nbWordsOutput")]) if inference_data else None
|
92 |
+
contextWindowSize_str = ", ".join([str(p["contextWindowSize"]) for p in inference_data if p.get(
|
93 |
+
"contextWindowSize")]) if inference_data else None
|
94 |
+
cache_str = ", ".join([str(p["cache"]) for p in inference_data if p.get(
|
95 |
+
"cache")]) if inference_data else None
|
96 |
|
97 |
# Handle components
|
98 |
components = data.get("infrastructure", {}).get("components", [])
|
99 |
component_data = []
|
100 |
for comp in components:
|
101 |
+
if comp:
|
102 |
component_data.append({
|
103 |
"componentName": comp.get("componentName"),
|
104 |
"nbComponent": comp.get("nbComponent"),
|
|
|
109 |
"share": comp.get("share")
|
110 |
})
|
111 |
|
112 |
+
componentName_str = ", ".join([str(p["componentName"]) for p in component_data if p.get(
|
113 |
+
"componentName")]) if component_data else None
|
114 |
+
nbComponent_str = ", ".join([str(p["nbComponent"]) for p in component_data if p.get(
|
115 |
+
"nbComponent")]) if component_data else None
|
116 |
+
memorySize_str = ", ".join([str(p["memorySize"]) for p in component_data if p.get(
|
117 |
+
"memorySize")]) if component_data else None
|
118 |
+
manufacturer_infra_str = ", ".join([str(p["manufacturer"]) for p in component_data if p.get(
|
119 |
+
"manufacturer")]) if component_data else None
|
120 |
+
family_str = ", ".join([str(p["family"]) for p in component_data if p.get(
|
121 |
+
"family")]) if component_data else None
|
122 |
+
series_str = ", ".join([str(p["series"]) for p in component_data if p.get(
|
123 |
+
"series")]) if component_data else None
|
124 |
+
share_str = ", ".join([str(p["share"]) for p in component_data if p.get(
|
125 |
+
"share")]) if component_data else None
|
126 |
|
127 |
return {
|
128 |
# Header
|
|
|
137 |
"publisher_projectName": [data["header"]["publisher"]["projectName"]],
|
138 |
"publisher_confidentialityLevel": [data["header"]["publisher"]["confidentialityLevel"]],
|
139 |
"publisher_publicKey": [data["header"]["publisher"]["publicKey"]],
|
140 |
+
|
141 |
# Task
|
142 |
"taskType": [data["task"]["taskType"]],
|
143 |
"taskFamily": [data["task"]["taskFamily"]],
|
|
|
168 |
"owner": [data["task"]["dataset"][0]["owner"]],
|
169 |
"measuredAccuracy": [data["task"]["measuredAccuracy"]],
|
170 |
"estimatedAccuracy": [data["task"]["estimatedAccuracy"]],
|
171 |
+
|
172 |
# Measures
|
173 |
"measurementMethod": [data["measures"][0]["measurementMethod"]],
|
174 |
"manufacturer": [data["measures"][0]["manufacturer"]],
|
|
|
184 |
"powerConsumption": [data["measures"][0]["powerConsumption"]],
|
185 |
"measurementDuration": [data["measures"][0]["measurementDuration"]],
|
186 |
"measurementDateTime": [data["measures"][0]["measurementDateTime"]],
|
187 |
+
|
188 |
# System
|
189 |
"os": [data["system"]["os"]],
|
190 |
"distribution": [data["system"]["distribution"]],
|
191 |
"distributionVersion": [data["system"]["distributionVersion"]],
|
192 |
+
|
193 |
# Software
|
194 |
"language": [data["software"]["language"]],
|
195 |
"version_software": [data["software"]["version"]],
|
196 |
+
|
197 |
# Infrastructure
|
198 |
"infraType": [data["infrastructure"]["infraType"]],
|
199 |
"cloudProvider": [data["infrastructure"]["cloudProvider"]],
|
|
|
205 |
"family": [family_str],
|
206 |
"series": [series_str],
|
207 |
"share": [share_str],
|
208 |
+
|
209 |
# Environment
|
210 |
"country": [data["environment"]["country"]],
|
211 |
"latitude": [data["environment"]["latitude"]],
|
|
|
214 |
"powerSupplierType": [data["environment"]["powerSupplierType"]],
|
215 |
"powerSource": [data["environment"]["powerSource"]],
|
216 |
"powerSourceCarbonIntensity": [data["environment"]["powerSourceCarbonIntensity"]],
|
217 |
+
|
218 |
# Quality
|
219 |
"quality": [data["quality"]],
|
220 |
+
|
221 |
# Hash
|
222 |
"hashAlgorithm": [data["$hash"]["hashAlgorithm"]],
|
223 |
"cryptographicAlgorithm": [data["$hash"]["cryptographicAlgorithm"]],
|
224 |
"value": [data["$hash"]["ecryptedValue"]]
|
225 |
+
}
|
services/json_generator.py
CHANGED
@@ -3,6 +3,7 @@ import tempfile
|
|
3 |
from datetime import datetime
|
4 |
from utils.validation import validate_obligatory_fields
|
5 |
|
|
|
6 |
def generate_json(
|
7 |
# Header
|
8 |
licensing, formatVersion, formatVersionSpecificationUri, reportId, reportDatetime, reportStatus,
|
@@ -21,7 +22,7 @@ def generate_json(
|
|
21 |
# Software
|
22 |
language, version_software,
|
23 |
# Infrastructure
|
24 |
-
infraType, cloudProvider, cloudInstance, componentName, nbComponent, memorySize, manufacturer_infra, family, series, share,
|
25 |
# Environment
|
26 |
country, latitude, longitude, location, powerSupplierType, powerSource, powerSourceCarbonIntensity,
|
27 |
# Quality
|
@@ -30,6 +31,7 @@ def generate_json(
|
|
30 |
hashAlgorithm, cryptographicAlgorithm, value_hash
|
31 |
):
|
32 |
"""Generate JSON data from form inputs."""
|
|
|
33 |
# Process hyperparameters
|
34 |
hyperparameters = []
|
35 |
max_length = max(len(hyperparameter_names), len(hyperparameter_values))
|
@@ -38,35 +40,245 @@ def generate_json(
|
|
38 |
"name": hyperparameter_names[i] if i < len(hyperparameter_names) and hyperparameter_names[i] else "",
|
39 |
"value": hyperparameter_values[i] if i < len(hyperparameter_values) and hyperparameter_values[i] else ""
|
40 |
})
|
41 |
-
|
42 |
# Process inference properties
|
43 |
inference_props_list = []
|
44 |
-
max_length = max(len(nbRequest), len(nbTokensInput), len(nbWordsInput), len(
|
|
|
45 |
for i in range(max_length):
|
46 |
-
|
47 |
-
|
48 |
-
"
|
49 |
-
|
50 |
-
"
|
51 |
-
|
52 |
-
"
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
# Process components
|
57 |
components_list = []
|
58 |
-
max_length = max(len(componentName), len(
|
|
|
59 |
for i in range(max_length):
|
60 |
-
|
61 |
-
|
62 |
-
"
|
63 |
-
|
64 |
-
"
|
65 |
-
|
66 |
-
"
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
data = {
|
71 |
"header": {
|
72 |
"licensing": licensing,
|
@@ -170,16 +382,17 @@ def generate_json(
|
|
170 |
"ecryptedValue": value_hash
|
171 |
}
|
172 |
}
|
|
|
173 |
|
174 |
# Validate obligatory fields
|
175 |
-
is_valid, message = validate_obligatory_fields(
|
176 |
if not is_valid:
|
177 |
return message, None, ""
|
178 |
-
|
179 |
-
# Create the JSON string
|
180 |
-
json_str = json.dumps(data, indent=4)
|
181 |
|
|
|
|
|
|
|
182 |
# Create and save the JSON file
|
183 |
-
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json') as
|
184 |
-
json.dump(
|
185 |
-
return message,
|
|
|
3 |
from datetime import datetime
|
4 |
from utils.validation import validate_obligatory_fields
|
5 |
|
6 |
+
|
7 |
def generate_json(
|
8 |
# Header
|
9 |
licensing, formatVersion, formatVersionSpecificationUri, reportId, reportDatetime, reportStatus,
|
|
|
22 |
# Software
|
23 |
language, version_software,
|
24 |
# Infrastructure
|
25 |
+
infraType, cloudProvider, cloudInstance, componentName, componentType, nbComponent, memorySize, manufacturer_infra, family, series, share,
|
26 |
# Environment
|
27 |
country, latitude, longitude, location, powerSupplierType, powerSource, powerSourceCarbonIntensity,
|
28 |
# Quality
|
|
|
31 |
hashAlgorithm, cryptographicAlgorithm, value_hash
|
32 |
):
|
33 |
"""Generate JSON data from form inputs."""
|
34 |
+
# TO CHANGE
|
35 |
# Process hyperparameters
|
36 |
hyperparameters = []
|
37 |
max_length = max(len(hyperparameter_names), len(hyperparameter_values))
|
|
|
40 |
"name": hyperparameter_names[i] if i < len(hyperparameter_names) and hyperparameter_names[i] else "",
|
41 |
"value": hyperparameter_values[i] if i < len(hyperparameter_values) and hyperparameter_values[i] else ""
|
42 |
})
|
43 |
+
|
44 |
# Process inference properties
|
45 |
inference_props_list = []
|
46 |
+
max_length = max(len(nbRequest), len(nbTokensInput), len(nbWordsInput), len(
|
47 |
+
nbTokensOutput), len(nbWordsOutput), len(contextWindowSize), len(cache))
|
48 |
for i in range(max_length):
|
49 |
+
inference_props = {}
|
50 |
+
if i < len(nbRequest) and nbRequest[i]:
|
51 |
+
inference_props["nbRequest"] = nbRequest[i]
|
52 |
+
if i < len(nbTokensInput) and nbTokensInput[i]:
|
53 |
+
inference_props["nbTokensInput"] = nbTokensInput[i]
|
54 |
+
if i < len(nbWordsInput) and nbWordsInput[i]:
|
55 |
+
inference_props["nbWordsInput"] = nbWordsInput[i]
|
56 |
+
if i < len(nbTokensOutput) and nbTokensOutput[i]:
|
57 |
+
inference_props["nbTokensOutput"] = nbTokensOutput[i]
|
58 |
+
if i < len(nbWordsOutput) and nbWordsOutput[i]:
|
59 |
+
inference_props["nbWordsOutput"] = nbWordsOutput[i]
|
60 |
+
if i < len(contextWindowSize) and contextWindowSize[i]:
|
61 |
+
inference_props["contextWindowSize"] = contextWindowSize[i]
|
62 |
+
if i < len(cache) and cache[i]:
|
63 |
+
inference_props["cache"] = cache[i]
|
64 |
+
inference_props_list.append(inference_props)
|
65 |
|
66 |
# Process components
|
67 |
components_list = []
|
68 |
+
max_length = max(len(componentName), len(componentType), len(nbComponent), len(memorySize), len(
|
69 |
+
manufacturer_infra), len(family), len(series), len(share))
|
70 |
for i in range(max_length):
|
71 |
+
component = {}
|
72 |
+
if i < len(componentName) and componentName[i]:
|
73 |
+
component["componentName"] = componentName[i]
|
74 |
+
if i < len(componentType) and componentType[i]:
|
75 |
+
component["componentType"] = componentType[i]
|
76 |
+
if i < len(nbComponent) and nbComponent[i]:
|
77 |
+
component["nbComponent"] = nbComponent[i]
|
78 |
+
if i < len(memorySize) and memorySize[i]:
|
79 |
+
component["memorySize"] = memorySize[i]
|
80 |
+
if i < len(manufacturer_infra) and manufacturer_infra[i]:
|
81 |
+
component["manufacturer"] = manufacturer_infra[i]
|
82 |
+
if i < len(family) and family[i]:
|
83 |
+
component["family"] = family[i]
|
84 |
+
if i < len(series) and series[i]:
|
85 |
+
component["series"] = series[i]
|
86 |
+
if i < len(share) and share[i]:
|
87 |
+
component["share"] = share[i]
|
88 |
+
components_list.append(component)
|
89 |
+
|
90 |
+
# process report
|
91 |
+
report = {}
|
92 |
+
|
93 |
+
# Process header
|
94 |
+
header = {}
|
95 |
+
if licensing:
|
96 |
+
header["licensing"] = licensing
|
97 |
+
if formatVersion:
|
98 |
+
header["formatVersion"] = formatVersion
|
99 |
+
if formatVersionSpecificationUri:
|
100 |
+
header["formatVersionSpecificationUri"] = formatVersionSpecificationUri
|
101 |
+
if reportId:
|
102 |
+
header["reportId"] = reportId
|
103 |
+
if reportDatetime:
|
104 |
+
header["reportDatetime"] = reportDatetime or datetime.now().isoformat()
|
105 |
+
if reportStatus:
|
106 |
+
header["reportStatus"] = reportStatus
|
107 |
+
|
108 |
+
publisher = {}
|
109 |
+
if publisher_name:
|
110 |
+
publisher["name"] = publisher_name
|
111 |
+
if publisher_division:
|
112 |
+
publisher["division"] = publisher_division
|
113 |
+
if publisher_projectName:
|
114 |
+
publisher["projectName"] = publisher_projectName
|
115 |
+
if publisher_confidentialityLevel:
|
116 |
+
publisher["confidentialityLevel"] = publisher_confidentialityLevel
|
117 |
+
if publisher_publicKey:
|
118 |
+
publisher["publicKey"] = publisher_publicKey
|
119 |
+
|
120 |
+
if publisher:
|
121 |
+
header["publisher"] = publisher
|
122 |
+
|
123 |
+
if header:
|
124 |
+
report["header"] = header
|
125 |
+
|
126 |
+
# proceed task
|
127 |
+
|
128 |
+
# proceed algorithm
|
129 |
+
algorithm = {}
|
130 |
+
if algorithmName:
|
131 |
+
algorithm["algorithmName"] = algorithmName
|
132 |
+
if framework:
|
133 |
+
algorithm["framework"] = framework
|
134 |
+
if frameworkVersion:
|
135 |
+
algorithm["frameworkVersion"] = frameworkVersion
|
136 |
+
if classPath:
|
137 |
+
algorithm["classPath"] = classPath
|
138 |
+
if hyperparameters:
|
139 |
+
algorithm["hyperparameters"] = hyperparameters
|
140 |
+
if quantization:
|
141 |
+
algorithm["quantization"] = quantization
|
142 |
+
|
143 |
+
# proceed dataset
|
144 |
+
dataset = {}
|
145 |
+
if dataType:
|
146 |
+
dataset["dataType"] = dataType
|
147 |
+
if fileType:
|
148 |
+
dataset["fileType"] = fileType
|
149 |
+
if volume:
|
150 |
+
dataset["volume"] = volume
|
151 |
+
if volumeUnit:
|
152 |
+
dataset["volumeUnit"] = volumeUnit
|
153 |
+
if items:
|
154 |
+
dataset["items"] = items
|
155 |
+
if shape_item:
|
156 |
+
dataset["shape"] = [{"item": shape_item}]
|
157 |
+
if inference_props_list:
|
158 |
+
dataset["inferenceProperties"] = inference_props_list
|
159 |
+
if source:
|
160 |
+
dataset["source"] = source
|
161 |
+
if sourceUri:
|
162 |
+
dataset["sourceUri"] = sourceUri
|
163 |
+
if owner:
|
164 |
+
dataset["owner"] = owner
|
165 |
+
|
166 |
+
# proceed all task
|
167 |
+
task = {}
|
168 |
+
if taskType:
|
169 |
+
task["taskType"] = taskType
|
170 |
+
if taskFamily:
|
171 |
+
task["taskFamily"] = taskFamily
|
172 |
+
if taskStage:
|
173 |
+
task["taskStage"] = taskStage
|
174 |
+
if algorithm:
|
175 |
+
task["algorithms"] = [algorithm]
|
176 |
+
if dataset:
|
177 |
+
task["dataset"] = [dataset]
|
178 |
+
if measuredAccuracy:
|
179 |
+
task["measuredAccuracy"] = measuredAccuracy
|
180 |
+
if estimatedAccuracy:
|
181 |
+
task["estimatedAccuracy"] = estimatedAccuracy
|
182 |
+
report["task"] = task
|
183 |
+
|
184 |
+
# proceed measures
|
185 |
+
measures = {}
|
186 |
+
if measurementMethod:
|
187 |
+
measures["measurementMethod"] = measurementMethod
|
188 |
+
if manufacturer:
|
189 |
+
measures["manufacturer"] = manufacturer
|
190 |
+
if version:
|
191 |
+
measures["version"] = version
|
192 |
+
if cpuTrackingMode:
|
193 |
+
measures["cpuTrackingMode"] = cpuTrackingMode
|
194 |
+
if gpuTrackingMode:
|
195 |
+
measures["gpuTrackingMode"] = gpuTrackingMode
|
196 |
+
if averageUtilizationCpu:
|
197 |
+
measures["averageUtilizationCpu"] = averageUtilizationCpu
|
198 |
+
if averageUtilizationGpu:
|
199 |
+
measures["averageUtilizationGpu"] = averageUtilizationGpu
|
200 |
+
if serverSideInference:
|
201 |
+
measures["serverSideInference"] = serverSideInference
|
202 |
+
if unit:
|
203 |
+
measures["unit"] = unit
|
204 |
+
if powerCalibrationMeasurement:
|
205 |
+
measures["powerCalibrationMeasurement"] = powerCalibrationMeasurement
|
206 |
+
if durationCalibrationMeasurement:
|
207 |
+
measures["durationCalibrationMeasurement"] = durationCalibrationMeasurement
|
208 |
+
if powerConsumption:
|
209 |
+
measures["powerConsumption"] = powerConsumption
|
210 |
+
if measurementDuration:
|
211 |
+
measures["measurementDuration"] = measurementDuration
|
212 |
+
if measurementDateTime:
|
213 |
+
measures["measurementDateTime"] = measurementDateTime
|
214 |
+
report["measures"] = [measures]
|
215 |
+
|
216 |
+
# proceed system
|
217 |
+
system = {}
|
218 |
+
if os:
|
219 |
+
system["os"] = os
|
220 |
+
if distribution:
|
221 |
+
system["distribution"] = distribution
|
222 |
+
if distributionVersion:
|
223 |
+
system["distributionVersion"] = distributionVersion
|
224 |
+
if system:
|
225 |
+
report["system"] = system
|
226 |
+
|
227 |
+
# proceed software
|
228 |
+
software = {}
|
229 |
+
if language:
|
230 |
+
software["language"] = language
|
231 |
+
if version_software:
|
232 |
+
software["version"] = version_software
|
233 |
+
if software:
|
234 |
+
report["software"] = software
|
235 |
+
|
236 |
+
# proceed infrastructure
|
237 |
+
infrastructure = {}
|
238 |
+
if infraType:
|
239 |
+
infrastructure["infraType"] = infraType
|
240 |
+
if cloudProvider:
|
241 |
+
infrastructure["cloudProvider"] = cloudProvider
|
242 |
+
if cloudInstance:
|
243 |
+
infrastructure["cloudInstance"] = cloudInstance
|
244 |
+
if components_list:
|
245 |
+
infrastructure["components"] = components_list
|
246 |
+
report["infrastructure"] = infrastructure
|
247 |
+
|
248 |
+
# proceed environment
|
249 |
+
environment = {}
|
250 |
+
if country:
|
251 |
+
environment["country"] = country
|
252 |
+
if latitude:
|
253 |
+
environment["latitude"] = latitude
|
254 |
+
if longitude:
|
255 |
+
environment["longitude"] = longitude
|
256 |
+
if location:
|
257 |
+
environment["location"] = location
|
258 |
+
if powerSupplierType:
|
259 |
+
environment["powerSupplierType"] = powerSupplierType
|
260 |
+
if powerSource:
|
261 |
+
environment["powerSource"] = powerSource
|
262 |
+
if powerSourceCarbonIntensity:
|
263 |
+
environment["powerSourceCarbonIntensity"] = powerSourceCarbonIntensity
|
264 |
+
report["environment"] = environment
|
265 |
+
|
266 |
+
# proceed quality
|
267 |
+
if quality:
|
268 |
+
report["quality"] = quality
|
269 |
+
|
270 |
+
# proceed hash
|
271 |
+
hash = {}
|
272 |
+
if hashAlgorithm:
|
273 |
+
hash["hashAlgorithm"] = hashAlgorithm
|
274 |
+
if cryptographicAlgorithm:
|
275 |
+
hash["cryptographicAlgorithm"] = cryptographicAlgorithm
|
276 |
+
if value_hash:
|
277 |
+
hash["value"] = value_hash
|
278 |
+
if hash:
|
279 |
+
report["$hash"] = hash
|
280 |
+
|
281 |
+
"""
|
282 |
data = {
|
283 |
"header": {
|
284 |
"licensing": licensing,
|
|
|
382 |
"ecryptedValue": value_hash
|
383 |
}
|
384 |
}
|
385 |
+
"""
|
386 |
|
387 |
# Validate obligatory fields
|
388 |
+
is_valid, message = validate_obligatory_fields(report)
|
389 |
if not is_valid:
|
390 |
return message, None, ""
|
|
|
|
|
|
|
391 |
|
392 |
+
# Create the JSON string
|
393 |
+
json_str = json.dumps(report)
|
394 |
+
print(json_str)
|
395 |
# Create and save the JSON file
|
396 |
+
with tempfile.NamedTemporaryFile(mode='w', prefix="report", delete=False, suffix='.json') as file:
|
397 |
+
json.dump(report, file, indent=4)
|
398 |
+
return message, file.name, json_str
|
ui/form_components.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
import gradio as gr
|
2 |
from config import (
|
3 |
REPORT_STATUS_OPTIONS, CONFIDENTIALITY_LEVELS, DATA_TYPES,
|
4 |
-
ACCURACY_LEVELS, MEASUREMENT_UNITS, INFRA_TYPES,
|
5 |
POWER_SUPPLIER_TYPES, POWER_SOURCES, QUALITY_LEVELS,
|
6 |
HASH_ALGORITHMS, CRYPTO_ALGORITHMS, CACHE_OPTIONS
|
7 |
)
|
8 |
|
9 |
-
|
|
|
10 |
# State management
|
11 |
count_state = gr.State(value=initial_count+1)
|
12 |
field_states = [gr.State([]) for _ in fields_config]
|
@@ -33,12 +34,12 @@ def create_dynamic_section(section_name, fields_config, initial_count = 1, layou
|
|
33 |
def render_dynamic_section(count):
|
34 |
nonlocal all_components
|
35 |
all_components = []
|
36 |
-
|
37 |
for i in range(count):
|
38 |
with (gr.Row() if layout == "row" else gr.Column()):
|
39 |
row_components = []
|
40 |
field_refs = [] # To store references to current row's components
|
41 |
-
|
42 |
for field_idx, config in enumerate(fields_config):
|
43 |
component = config["type"](
|
44 |
label=f"{config['label']} {i + 1}",
|
@@ -46,7 +47,7 @@ def create_dynamic_section(section_name, fields_config, initial_count = 1, layou
|
|
46 |
**config.get("kwargs", {})
|
47 |
)
|
48 |
row_components.append(component)
|
49 |
-
field_refs.append(component)
|
50 |
|
51 |
# Create change event with ALL current field values
|
52 |
component.change(
|
@@ -54,7 +55,7 @@ def create_dynamic_section(section_name, fields_config, initial_count = 1, layou
|
|
54 |
inputs=[*field_states, *field_refs, gr.State(i)],
|
55 |
outputs=field_states
|
56 |
)
|
57 |
-
|
58 |
# Remove button
|
59 |
remove_btn = gr.Button("❌", variant="secondary")
|
60 |
remove_btn.click(
|
@@ -66,63 +67,82 @@ def create_dynamic_section(section_name, fields_config, initial_count = 1, layou
|
|
66 |
outputs=[count_state, *field_states]
|
67 |
)
|
68 |
row_components.append(remove_btn)
|
69 |
-
|
70 |
all_components.extend(row_components)
|
71 |
return all_components
|
72 |
|
73 |
# Initialize with initial count
|
74 |
render_dynamic_section(count=initial_count)
|
75 |
-
|
76 |
add_btn = gr.Button(f"Add {section_name}")
|
77 |
add_btn.click(lambda x: x + 1, count_state, count_state)
|
78 |
|
79 |
return (count_state, *field_states, add_btn)
|
80 |
|
|
|
81 |
def create_header_tab():
|
82 |
"""Create the header tab components."""
|
83 |
with gr.Tab("Header"):
|
84 |
-
licensing = gr.Textbox(
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
89 |
reportStatus = gr.Dropdown(value=None,
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
with gr.Accordion("Publisher"):
|
96 |
-
publisher_name = gr.Textbox(
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
99 |
publisher_confidentialityLevel = gr.Dropdown(value=None,
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
publisher_publicKey = gr.Textbox(
|
105 |
-
|
|
|
106 |
return [
|
107 |
licensing, formatVersion, formatVersionSpecificationUri, reportId,
|
108 |
reportDatetime, reportStatus, publisher_name, publisher_division,
|
109 |
publisher_projectName, publisher_confidentialityLevel, publisher_publicKey
|
110 |
]
|
111 |
|
|
|
112 |
def create_task_tab():
|
113 |
"""Create the task tab components."""
|
114 |
with gr.Tab("Task"):
|
115 |
-
taskType = gr.Textbox(
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
119 |
with gr.Accordion("Algorithms"):
|
120 |
-
algorithmName = gr.Textbox(
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
126 |
with gr.Accordion("Hyperparameters"):
|
127 |
_, hyperparameter_names, hyperparameter_values, add_btn = create_dynamic_section(
|
128 |
section_name="Hyperparameter",
|
@@ -143,51 +163,65 @@ def create_task_tab():
|
|
143 |
initial_count=0,
|
144 |
)
|
145 |
|
146 |
-
quantization = gr.Textbox(
|
147 |
-
|
|
|
148 |
with gr.Accordion("Dataset"):
|
149 |
dataType = gr.Dropdown(value=None,
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
fileType = gr.Textbox(
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
with gr.Accordion("Inference Properties"):
|
161 |
-
|
162 |
section_name="Inference Property",
|
163 |
fields_config=[
|
164 |
{
|
165 |
-
"type": gr.
|
|
|
166 |
"label": "Number of Requests",
|
167 |
"info": "Required field<br>(the number of requests the measure corresponds to)",
|
168 |
},
|
169 |
{
|
170 |
-
"type": gr.
|
|
|
171 |
"label": "Number of Tokens Input",
|
172 |
"info": "(the number of tokens in the input)",
|
173 |
},
|
174 |
{
|
175 |
-
"type": gr.
|
|
|
176 |
"label": "Number of Words Input",
|
177 |
"info": "(the number of words in the input)",
|
178 |
},
|
179 |
{
|
180 |
-
"type": gr.
|
|
|
181 |
"label": "Number of Tokens Output",
|
182 |
"info": "(the number of tokens in the output)",
|
183 |
},
|
184 |
{
|
185 |
-
"type": gr.
|
|
|
186 |
"label": "Number of Words Output",
|
187 |
"info": "(the number of words in the output)",
|
188 |
},
|
189 |
{
|
190 |
-
"type": gr.
|
|
|
191 |
"label": "Context Window Size",
|
192 |
"info": "(the number of tokens kept in memory)",
|
193 |
},
|
@@ -201,19 +235,23 @@ def create_task_tab():
|
|
201 |
initial_count=0,
|
202 |
layout="column"
|
203 |
)
|
204 |
-
|
205 |
-
source = gr.Textbox(
|
206 |
-
|
207 |
-
|
|
|
|
|
|
|
208 |
|
209 |
with gr.Row():
|
210 |
-
measuredAccuracy = gr.
|
|
|
211 |
estimatedAccuracy = gr.Dropdown(value=None,
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
return [
|
218 |
taskType, taskFamily, taskStage, algorithmName, framework,
|
219 |
frameworkVersion, classPath, tuning_method, hyperparameter_names, hyperparameter_values,
|
@@ -223,28 +261,42 @@ def create_task_tab():
|
|
223 |
cache, source, sourceUri, owner, measuredAccuracy, estimatedAccuracy
|
224 |
]
|
225 |
|
|
|
226 |
def create_measures_tab():
|
227 |
"""Create the measures tab components."""
|
228 |
with gr.Tab("Measures"):
|
229 |
-
measurementMethod = gr.Textbox(
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
unit = gr.Dropdown(value=None,
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
powerCalibrationMeasurement = gr.
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
|
|
|
|
|
|
|
|
|
|
248 |
return [
|
249 |
measurementMethod, manufacturer, version, cpuTrackingMode,
|
250 |
gpuTrackingMode, averageUtilizationCpu, averageUtilizationGpu,
|
@@ -253,49 +305,66 @@ def create_measures_tab():
|
|
253 |
measurementDuration, measurementDateTime
|
254 |
]
|
255 |
|
|
|
256 |
def create_system_tab():
|
257 |
"""Create the system tab components."""
|
258 |
with gr.Tab("System"):
|
259 |
-
os = gr.Textbox(
|
260 |
-
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
263 |
return [os, distribution, distributionVersion]
|
264 |
|
|
|
265 |
def create_software_tab():
|
266 |
"""Create the software tab components."""
|
267 |
with gr.Tab("Software"):
|
268 |
-
language = gr.Textbox(
|
269 |
-
|
270 |
-
|
|
|
|
|
271 |
return [language, version_software]
|
272 |
|
|
|
273 |
def create_infrastructure_tab():
|
274 |
"""Create the infrastructure tab components."""
|
275 |
with gr.Tab("Infrastructure"):
|
276 |
infraType = gr.Dropdown(value=None,
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
cloudProvider = gr.Textbox(
|
282 |
-
|
|
|
|
|
283 |
with gr.Accordion("Components"):
|
284 |
-
_, componentName, nbComponent, memorySize, manufacturer_infra, family, series, share, add_component_btn = create_dynamic_section(
|
285 |
section_name="Component",
|
286 |
fields_config=[
|
287 |
{
|
288 |
"type": gr.Textbox,
|
289 |
"label": "Component Name",
|
290 |
-
"info": "
|
291 |
},
|
292 |
{
|
293 |
"type": gr.Textbox,
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
"label": "Number of Components",
|
295 |
"info": "Required field<br>(number of items of this component)",
|
296 |
},
|
297 |
{
|
298 |
-
"type": gr.
|
|
|
299 |
"label": "Memory Size",
|
300 |
"info": "(size of memory in Gbytes)",
|
301 |
},
|
@@ -315,7 +384,8 @@ def create_infrastructure_tab():
|
|
315 |
"info": "(series of this component)",
|
316 |
},
|
317 |
{
|
318 |
-
"type": gr.
|
|
|
319 |
"label": "Share",
|
320 |
"info": "(percentage of equipment used)",
|
321 |
}
|
@@ -323,61 +393,66 @@ def create_infrastructure_tab():
|
|
323 |
initial_count=0,
|
324 |
layout="column"
|
325 |
)
|
326 |
-
|
327 |
return [
|
328 |
-
infraType, cloudProvider, cloudInstance, componentName,
|
329 |
nbComponent, memorySize, manufacturer_infra, family,
|
330 |
series, share
|
331 |
]
|
332 |
|
|
|
333 |
def create_environment_tab():
|
334 |
"""Create the environment tab components."""
|
335 |
with gr.Tab("Environment"):
|
336 |
country = gr.Textbox(label="Country", info="Required field")
|
337 |
-
latitude = gr.
|
338 |
-
longitude = gr.
|
339 |
location = gr.Textbox(label="Location")
|
340 |
-
powerSupplierType = gr.Dropdown(value=None,
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
powerSource = gr.Dropdown(value=None,
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
powerSourceCarbonIntensity = gr.
|
351 |
-
|
|
|
352 |
return [
|
353 |
country, latitude, longitude, location,
|
354 |
powerSupplierType, powerSource, powerSourceCarbonIntensity
|
355 |
]
|
356 |
|
|
|
357 |
def create_quality_tab():
|
358 |
"""Create the quality tab components."""
|
359 |
with gr.Tab("Quality"):
|
360 |
quality = gr.Dropdown(value=None,
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
return [quality]
|
367 |
|
|
|
368 |
def create_hash_tab():
|
369 |
"""Create the hash tab components."""
|
370 |
with gr.Tab("Hash"):
|
371 |
hashAlgorithm = gr.Dropdown(value=None,
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
cryptographicAlgorithm = gr.Dropdown(value=None,
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
value_hash = gr.Textbox(
|
382 |
-
|
383 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
from config import (
|
3 |
REPORT_STATUS_OPTIONS, CONFIDENTIALITY_LEVELS, DATA_TYPES,
|
4 |
+
DATA_UNITS, ACCURACY_LEVELS, MEASUREMENT_UNITS, INFRA_TYPES,
|
5 |
POWER_SUPPLIER_TYPES, POWER_SOURCES, QUALITY_LEVELS,
|
6 |
HASH_ALGORITHMS, CRYPTO_ALGORITHMS, CACHE_OPTIONS
|
7 |
)
|
8 |
|
9 |
+
|
10 |
+
def create_dynamic_section(section_name, fields_config, initial_count=1, layout="row"):
|
11 |
# State management
|
12 |
count_state = gr.State(value=initial_count+1)
|
13 |
field_states = [gr.State([]) for _ in fields_config]
|
|
|
34 |
def render_dynamic_section(count):
|
35 |
nonlocal all_components
|
36 |
all_components = []
|
37 |
+
|
38 |
for i in range(count):
|
39 |
with (gr.Row() if layout == "row" else gr.Column()):
|
40 |
row_components = []
|
41 |
field_refs = [] # To store references to current row's components
|
42 |
+
|
43 |
for field_idx, config in enumerate(fields_config):
|
44 |
component = config["type"](
|
45 |
label=f"{config['label']} {i + 1}",
|
|
|
47 |
**config.get("kwargs", {})
|
48 |
)
|
49 |
row_components.append(component)
|
50 |
+
field_refs.append(component)
|
51 |
|
52 |
# Create change event with ALL current field values
|
53 |
component.change(
|
|
|
55 |
inputs=[*field_states, *field_refs, gr.State(i)],
|
56 |
outputs=field_states
|
57 |
)
|
58 |
+
|
59 |
# Remove button
|
60 |
remove_btn = gr.Button("❌", variant="secondary")
|
61 |
remove_btn.click(
|
|
|
67 |
outputs=[count_state, *field_states]
|
68 |
)
|
69 |
row_components.append(remove_btn)
|
70 |
+
|
71 |
all_components.extend(row_components)
|
72 |
return all_components
|
73 |
|
74 |
# Initialize with initial count
|
75 |
render_dynamic_section(count=initial_count)
|
76 |
+
|
77 |
add_btn = gr.Button(f"Add {section_name}")
|
78 |
add_btn.click(lambda x: x + 1, count_state, count_state)
|
79 |
|
80 |
return (count_state, *field_states, add_btn)
|
81 |
|
82 |
+
|
83 |
def create_header_tab():
|
84 |
"""Create the header tab components."""
|
85 |
with gr.Tab("Header"):
|
86 |
+
licensing = gr.Textbox(
|
87 |
+
label="Licensing", info="(the type of licensing applicable for the sharing of the report)")
|
88 |
+
formatVersion = gr.Textbox(
|
89 |
+
label="Format Version", info="Required field<br>(the version of the specification of this set of schemas defining the report's fields)")
|
90 |
+
formatVersionSpecificationUri = gr.Textbox(
|
91 |
+
label="Format Version Specification URI", info="(the URI of the present specification of this set of schemas)")
|
92 |
+
reportId = gr.Textbox(
|
93 |
+
label="Report ID", info="Required field<br>(the unique identifier of this report, preferably as a uuid4 string)")
|
94 |
+
reportDatetime = gr.Textbox(
|
95 |
+
label="Report Datetime", info="(the publishing date of this report in format YYYY-MM-DD HH:MM:SS)")
|
96 |
reportStatus = gr.Dropdown(value=None,
|
97 |
+
label="Report Status",
|
98 |
+
choices=REPORT_STATUS_OPTIONS,
|
99 |
+
info="Required field<br>(the status of this report)"
|
100 |
+
)
|
101 |
+
|
102 |
with gr.Accordion("Publisher"):
|
103 |
+
publisher_name = gr.Textbox(
|
104 |
+
label="Name", info="(name of the organization)")
|
105 |
+
publisher_division = gr.Textbox(
|
106 |
+
label="Division", info="(name of the publishing department within the organization)")
|
107 |
+
publisher_projectName = gr.Textbox(
|
108 |
+
label="Project Name", info="(name of the publishing project within the organization)")
|
109 |
publisher_confidentialityLevel = gr.Dropdown(value=None,
|
110 |
+
label="Confidentiality Level",
|
111 |
+
choices=CONFIDENTIALITY_LEVELS,
|
112 |
+
info="Required field<br>(the confidentiality of the report)"
|
113 |
+
)
|
114 |
+
publisher_publicKey = gr.Textbox(
|
115 |
+
label="Public Key", info="(the cryptographic public key to check the identity of the publishing organization)")
|
116 |
+
|
117 |
return [
|
118 |
licensing, formatVersion, formatVersionSpecificationUri, reportId,
|
119 |
reportDatetime, reportStatus, publisher_name, publisher_division,
|
120 |
publisher_projectName, publisher_confidentialityLevel, publisher_publicKey
|
121 |
]
|
122 |
|
123 |
+
|
124 |
def create_task_tab():
|
125 |
"""Create the task tab components."""
|
126 |
with gr.Tab("Task"):
|
127 |
+
taskType = gr.Textbox(
|
128 |
+
label="Task Type", info="Required field<br>(type of the computing task of machine learning, example : datacreation, preprocessing, supervisedLearning, unsupervisedLearning, semiSupervisedLearning ...)")
|
129 |
+
taskFamily = gr.Textbox(
|
130 |
+
label="Task Family", info="Required field<br>(the family of task performed, example : classification, regression, chatbot, summarization, keyword extraction, image recognition...)")
|
131 |
+
taskStage = gr.Textbox(
|
132 |
+
label="Task Stage", info="Required field<br>(stage of the task, example: training, finetuning, reinforcement, inference, rag...)")
|
133 |
+
|
134 |
with gr.Accordion("Algorithms"):
|
135 |
+
algorithmName = gr.Textbox(
|
136 |
+
label="Algorithm Name", info="Required field<br>(the case-sensitive common name of the algorithm, example: randomForest, svm, xgboost...)")
|
137 |
+
framework = gr.Textbox(
|
138 |
+
label="Framework", info="(the common name of the software framework implementing the algorithm)")
|
139 |
+
frameworkVersion = gr.Textbox(
|
140 |
+
label="Framework Version", info="(the version of the software framework)")
|
141 |
+
classPath = gr.Textbox(
|
142 |
+
label="Class Path", info="(the full class path of the algorithm within the framework)")
|
143 |
+
tuning_method = gr.Textbox(
|
144 |
+
label="Tuning Method", info="(the method of hyperparameters tuning used (if any), example: gridSearch, randomizedSearch...)")
|
145 |
+
|
146 |
with gr.Accordion("Hyperparameters"):
|
147 |
_, hyperparameter_names, hyperparameter_values, add_btn = create_dynamic_section(
|
148 |
section_name="Hyperparameter",
|
|
|
163 |
initial_count=0,
|
164 |
)
|
165 |
|
166 |
+
quantization = gr.Textbox(
|
167 |
+
label="Quantization", info="(the data weights (in bits) obtained thanks to the quantization, example: 2, 8, 16...)")
|
168 |
+
|
169 |
with gr.Accordion("Dataset"):
|
170 |
dataType = gr.Dropdown(value=None,
|
171 |
+
label="Data Type",
|
172 |
+
choices=DATA_TYPES,
|
173 |
+
info="Required field<br>(the nature of the data)"
|
174 |
+
)
|
175 |
+
fileType = gr.Textbox(
|
176 |
+
label="File Type", info="(the file type of the dataset)")
|
177 |
+
volume = gr.Number(value=lambda: None,
|
178 |
+
label="Volume", info="Required field<br>(the size of the dataset)")
|
179 |
+
volumeUnit = gr.Dropdown(value=None,
|
180 |
+
label="Volume Unit",
|
181 |
+
choices=DATA_UNITS,
|
182 |
+
info="Required field<br>(the unit of the size)")
|
183 |
+
items = gr.Number(value=lambda: None,
|
184 |
+
label="Items", info="(the number of items in the dataset)")
|
185 |
+
shape_item = gr.Number(value=lambda: None,
|
186 |
+
label="Shape Item", info="(the shape of each dataset item)")
|
187 |
+
|
188 |
with gr.Accordion("Inference Properties"):
|
189 |
+
_, nbRequest, nbTokensInput, nbWordsInput, nbTokensOutput, nbWordsOutput, contextWindowSize, cache, add_inference_btn = create_dynamic_section(
|
190 |
section_name="Inference Property",
|
191 |
fields_config=[
|
192 |
{
|
193 |
+
"type": gr.Number,
|
194 |
+
"value": lambda: None,
|
195 |
"label": "Number of Requests",
|
196 |
"info": "Required field<br>(the number of requests the measure corresponds to)",
|
197 |
},
|
198 |
{
|
199 |
+
"type": gr.Number,
|
200 |
+
"value": lambda: None,
|
201 |
"label": "Number of Tokens Input",
|
202 |
"info": "(the number of tokens in the input)",
|
203 |
},
|
204 |
{
|
205 |
+
"type": gr.Number,
|
206 |
+
"value": lambda: None,
|
207 |
"label": "Number of Words Input",
|
208 |
"info": "(the number of words in the input)",
|
209 |
},
|
210 |
{
|
211 |
+
"type": gr.Number,
|
212 |
+
"value": lambda: None,
|
213 |
"label": "Number of Tokens Output",
|
214 |
"info": "(the number of tokens in the output)",
|
215 |
},
|
216 |
{
|
217 |
+
"type": gr.Number,
|
218 |
+
"value": lambda: None,
|
219 |
"label": "Number of Words Output",
|
220 |
"info": "(the number of words in the output)",
|
221 |
},
|
222 |
{
|
223 |
+
"type": gr.Number,
|
224 |
+
"value": lambda: None,
|
225 |
"label": "Context Window Size",
|
226 |
"info": "(the number of tokens kept in memory)",
|
227 |
},
|
|
|
235 |
initial_count=0,
|
236 |
layout="column"
|
237 |
)
|
238 |
+
|
239 |
+
source = gr.Textbox(
|
240 |
+
label="Source", info="(the kind of source of the dataset)")
|
241 |
+
sourceUri = gr.Textbox(
|
242 |
+
label="Source URI", info="(the URI of the dataset)")
|
243 |
+
owner = gr.Textbox(
|
244 |
+
label="Owner", info="(the owner of the dataset)")
|
245 |
|
246 |
with gr.Row():
|
247 |
+
measuredAccuracy = gr.Number(value=lambda: None,
|
248 |
+
label="Measured Accuracy", info="(the measured accuracy of your model (between 0 and 1))")
|
249 |
estimatedAccuracy = gr.Dropdown(value=None,
|
250 |
+
label="Estimated Accuracy",
|
251 |
+
choices=ACCURACY_LEVELS,
|
252 |
+
info="(estimated accuracy assessment)"
|
253 |
+
)
|
254 |
+
|
255 |
return [
|
256 |
taskType, taskFamily, taskStage, algorithmName, framework,
|
257 |
frameworkVersion, classPath, tuning_method, hyperparameter_names, hyperparameter_values,
|
|
|
261 |
cache, source, sourceUri, owner, measuredAccuracy, estimatedAccuracy
|
262 |
]
|
263 |
|
264 |
+
|
265 |
def create_measures_tab():
|
266 |
"""Create the measures tab components."""
|
267 |
with gr.Tab("Measures"):
|
268 |
+
measurementMethod = gr.Textbox(
|
269 |
+
label="Measurement Method", info="Required field<br>(the method used to perform the energy or FLOPS measure)")
|
270 |
+
manufacturer = gr.Textbox(
|
271 |
+
label="Manufacturer", info="(the builder of the measuring tool)")
|
272 |
+
version = gr.Textbox(
|
273 |
+
label="Version", info="(the version of the measuring tool)")
|
274 |
+
cpuTrackingMode = gr.Textbox(
|
275 |
+
label="CPU Tracking Mode", info="(the method used to track CPU consumption)")
|
276 |
+
gpuTrackingMode = gr.Textbox(
|
277 |
+
label="GPU Tracking Mode", info="(the method used to track GPU consumption)")
|
278 |
+
averageUtilizationCpu = gr.Number(value=lambda: None,
|
279 |
+
label="Average Utilization CPU", info="(the average percentage of CPU use)")
|
280 |
+
averageUtilizationGpu = gr.Number(value=lambda: None,
|
281 |
+
label="Average Utilization GPU", info="(the average percentage of GPU use)")
|
282 |
+
serverSideInference = gr.Textbox(
|
283 |
+
label="Server Side Inference", info="(inference server consumption estimation)")
|
284 |
unit = gr.Dropdown(value=None,
|
285 |
+
label="Unit",
|
286 |
+
choices=MEASUREMENT_UNITS,
|
287 |
+
info="Required field<br>(the unit of power consumption measure)"
|
288 |
+
)
|
289 |
+
powerCalibrationMeasurement = gr.Number(value=lambda: None,
|
290 |
+
label="Power Calibration Measurement", info="(power consumed during calibration)")
|
291 |
+
durationCalibrationMeasurement = gr.Number(value=lambda: None,
|
292 |
+
label="Duration Calibration Measurement", info="(duration of calibration in seconds)")
|
293 |
+
powerConsumption = gr.Number(value=lambda: None,
|
294 |
+
label="Power Consumption", info="Required field<br>(the power consumption measure)")
|
295 |
+
measurementDuration = gr.Number(value=lambda: None,
|
296 |
+
label="Measurement Duration", info="(the duration of measurement in seconds)")
|
297 |
+
measurementDateTime = gr.Textbox(
|
298 |
+
label="Measurement DateTime", info="(when measurement began)")
|
299 |
+
|
300 |
return [
|
301 |
measurementMethod, manufacturer, version, cpuTrackingMode,
|
302 |
gpuTrackingMode, averageUtilizationCpu, averageUtilizationGpu,
|
|
|
305 |
measurementDuration, measurementDateTime
|
306 |
]
|
307 |
|
308 |
+
|
309 |
def create_system_tab():
|
310 |
"""Create the system tab components."""
|
311 |
with gr.Tab("System"):
|
312 |
+
os = gr.Textbox(
|
313 |
+
label="OS", info="Required field<br>(name of the operating system)")
|
314 |
+
distribution = gr.Textbox(
|
315 |
+
label="Distribution", info="(distribution of the operating system)")
|
316 |
+
distributionVersion = gr.Textbox(
|
317 |
+
label="Distribution Version", info="(distribution version)")
|
318 |
+
|
319 |
return [os, distribution, distributionVersion]
|
320 |
|
321 |
+
|
322 |
def create_software_tab():
|
323 |
"""Create the software tab components."""
|
324 |
with gr.Tab("Software"):
|
325 |
+
language = gr.Textbox(
|
326 |
+
label="Language", info="Required field<br>(programming language information)")
|
327 |
+
version_software = gr.Textbox(
|
328 |
+
label="Version", info="(version of the programming language)")
|
329 |
+
|
330 |
return [language, version_software]
|
331 |
|
332 |
+
|
333 |
def create_infrastructure_tab():
|
334 |
"""Create the infrastructure tab components."""
|
335 |
with gr.Tab("Infrastructure"):
|
336 |
infraType = gr.Dropdown(value=None,
|
337 |
+
label="Infrastructure Type",
|
338 |
+
choices=INFRA_TYPES,
|
339 |
+
info="Required field<br>(the type of infrastructure used)"
|
340 |
+
)
|
341 |
+
cloudProvider = gr.Textbox(
|
342 |
+
label="Cloud Provider", info="(name of your cloud provider)")
|
343 |
+
cloudInstance = gr.Textbox(
|
344 |
+
label="Cloud Instance", info="(name of your cloud instance)")
|
345 |
with gr.Accordion("Components"):
|
346 |
+
_, componentName, componentType, nbComponent, memorySize, manufacturer_infra, family, series, share, add_component_btn = create_dynamic_section(
|
347 |
section_name="Component",
|
348 |
fields_config=[
|
349 |
{
|
350 |
"type": gr.Textbox,
|
351 |
"label": "Component Name",
|
352 |
+
"info": "(the name of this subsystem part of your infrastructure, example returned by codecarbon: 1 x NVIDIA GeForce GTX 1080 Ti)",
|
353 |
},
|
354 |
{
|
355 |
"type": gr.Textbox,
|
356 |
+
"label": "Component Type",
|
357 |
+
"info": "Required field<br>(the type of this subsystem part of your infrastructure, example: cpu, gpu, ram, hdd, sdd...)",
|
358 |
+
},
|
359 |
+
{
|
360 |
+
"type": gr.Number,
|
361 |
+
"value": lambda: None,
|
362 |
"label": "Number of Components",
|
363 |
"info": "Required field<br>(number of items of this component)",
|
364 |
},
|
365 |
{
|
366 |
+
"type": gr.Number,
|
367 |
+
"value": lambda: None,
|
368 |
"label": "Memory Size",
|
369 |
"info": "(size of memory in Gbytes)",
|
370 |
},
|
|
|
384 |
"info": "(series of this component)",
|
385 |
},
|
386 |
{
|
387 |
+
"type": gr.Number,
|
388 |
+
"value": lambda: None,
|
389 |
"label": "Share",
|
390 |
"info": "(percentage of equipment used)",
|
391 |
}
|
|
|
393 |
initial_count=0,
|
394 |
layout="column"
|
395 |
)
|
396 |
+
|
397 |
return [
|
398 |
+
infraType, cloudProvider, cloudInstance, componentName, componentType,
|
399 |
nbComponent, memorySize, manufacturer_infra, family,
|
400 |
series, share
|
401 |
]
|
402 |
|
403 |
+
|
404 |
def create_environment_tab():
|
405 |
"""Create the environment tab components."""
|
406 |
with gr.Tab("Environment"):
|
407 |
country = gr.Textbox(label="Country", info="Required field")
|
408 |
+
latitude = gr.Number(label="Latitude", value=lambda: None)
|
409 |
+
longitude = gr.Number(label="Longitude", value=lambda: None)
|
410 |
location = gr.Textbox(label="Location")
|
411 |
+
powerSupplierType = gr.Dropdown(value=lambda: None,
|
412 |
+
label="Power Supplier Type",
|
413 |
+
choices=POWER_SUPPLIER_TYPES,
|
414 |
+
info="(the type of power supplier)"
|
415 |
+
)
|
416 |
powerSource = gr.Dropdown(value=None,
|
417 |
+
label="Power Source",
|
418 |
+
choices=POWER_SOURCES,
|
419 |
+
info="(the source of power)"
|
420 |
+
)
|
421 |
+
powerSourceCarbonIntensity = gr.Number(value=lambda: None,
|
422 |
+
label="Power Source Carbon Intensity")
|
423 |
+
|
424 |
return [
|
425 |
country, latitude, longitude, location,
|
426 |
powerSupplierType, powerSource, powerSourceCarbonIntensity
|
427 |
]
|
428 |
|
429 |
+
|
430 |
def create_quality_tab():
|
431 |
"""Create the quality tab components."""
|
432 |
with gr.Tab("Quality"):
|
433 |
quality = gr.Dropdown(value=None,
|
434 |
+
label="Quality",
|
435 |
+
choices=QUALITY_LEVELS,
|
436 |
+
info="(the quality of the information provided)"
|
437 |
+
)
|
438 |
+
|
439 |
return [quality]
|
440 |
|
441 |
+
|
442 |
def create_hash_tab():
|
443 |
"""Create the hash tab components."""
|
444 |
with gr.Tab("Hash"):
|
445 |
hashAlgorithm = gr.Dropdown(value=None,
|
446 |
+
label="Hash Algorithm",
|
447 |
+
choices=HASH_ALGORITHMS,
|
448 |
+
info="Required field<br>(the hash function to apply)"
|
449 |
+
)
|
450 |
cryptographicAlgorithm = gr.Dropdown(value=None,
|
451 |
+
label="Cryptographic Algorithm",
|
452 |
+
choices=CRYPTO_ALGORITHMS,
|
453 |
+
info="Required field<br>(the public key function to apply)"
|
454 |
+
)
|
455 |
+
value_hash = gr.Textbox(
|
456 |
+
label="Value", info="Required field<br>(encrypted value of the hash)")
|
457 |
+
|
458 |
+
return [hashAlgorithm, cryptographicAlgorithm, value_hash]
|
utils/validation.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from config import OBLIGATORY_FIELDS
|
2 |
|
|
|
3 |
def validate_obligatory_fields(data):
|
4 |
"""Validate that all required fields are present in the data."""
|
5 |
def find_field(d, field):
|
@@ -17,13 +18,15 @@ def validate_obligatory_fields(data):
|
|
17 |
if result is not None:
|
18 |
return result
|
19 |
return None
|
20 |
-
|
21 |
missing_fields = []
|
22 |
for field in OBLIGATORY_FIELDS:
|
|
|
|
|
23 |
value = find_field(data, field)
|
24 |
if not value and value != 0: # Allow 0 as a valid value
|
25 |
missing_fields.append(field)
|
26 |
-
|
27 |
if missing_fields:
|
28 |
return False, f"The following fields are required: {', '.join(missing_fields)}"
|
29 |
-
return True, "All required fields are filled."
|
|
|
1 |
from config import OBLIGATORY_FIELDS
|
2 |
|
3 |
+
|
4 |
def validate_obligatory_fields(data):
|
5 |
"""Validate that all required fields are present in the data."""
|
6 |
def find_field(d, field):
|
|
|
18 |
if result is not None:
|
19 |
return result
|
20 |
return None
|
21 |
+
|
22 |
missing_fields = []
|
23 |
for field in OBLIGATORY_FIELDS:
|
24 |
+
# if the field is mandatory, check if it is inside a mandatory section
|
25 |
+
|
26 |
value = find_field(data, field)
|
27 |
if not value and value != 0: # Allow 0 as a valid value
|
28 |
missing_fields.append(field)
|
29 |
+
|
30 |
if missing_fields:
|
31 |
return False, f"The following fields are required: {', '.join(missing_fields)}"
|
32 |
+
return True, "All required fields are filled."
|