soury commited on
Commit
66ac827
·
1 Parent(s): 8a15dbd

load json results into a dataset

Browse files
app.py CHANGED
@@ -25,10 +25,16 @@ def handle_submit(*inputs):
25
  if message.startswith("The following fields are required"):
26
  return message, file_output, json_output
27
 
 
 
 
 
 
 
 
28
  # If validation passed, proceed to update_dataset
29
  update_output = update_dataset(json_output)
30
- print(json_output)
31
- return update_output, file_output, json_output
32
 
33
 
34
  # Create Gradio interface
@@ -47,11 +53,12 @@ with gr.Blocks(css_paths=css_path) as app:
47
  quality_components = create_quality_tab()
48
 
49
  # Submit and Download Buttons
50
- submit_button = gr.Button("Submit")
51
  output = gr.Textbox(label="Output", lines=1)
52
- # je comprend pas pq le fichier est vide ???
53
  json_output = gr.Textbox(visible=False)
54
  file_output = gr.File(label="Downloadable JSON")
 
 
55
 
56
  # Event Handlers
57
  submit_button.click(
@@ -66,7 +73,15 @@ with gr.Blocks(css_paths=css_path) as app:
66
  *environment_components,
67
  *quality_components,
68
  ],
69
- outputs=[output, file_output, json_output]
 
 
 
 
 
 
 
 
70
  )
71
 
72
  if __name__ == "__main__":
 
25
  if message.startswith("The following fields are required"):
26
  return message, file_output, json_output
27
 
28
+ publish_button = gr.Button(
29
+ "Share your data to the public repository", interactive=True, elem_classes="pubbutton")
30
+
31
+ return "Report sucessefully created", file_output, json_output, publish_button
32
+
33
+
34
+ def handle_publi(json_output):
35
  # If validation passed, proceed to update_dataset
36
  update_output = update_dataset(json_output)
37
+ return update_output
 
38
 
39
 
40
  # Create Gradio interface
 
53
  quality_components = create_quality_tab()
54
 
55
  # Submit and Download Buttons
56
+ submit_button = gr.Button("Submit", elem_classes="subbutton")
57
  output = gr.Textbox(label="Output", lines=1)
 
58
  json_output = gr.Textbox(visible=False)
59
  file_output = gr.File(label="Downloadable JSON")
60
+ publish_button = gr.Button(
61
+ "Share your data to the public repository", interactive=False, elem_classes="pubbutton")
62
 
63
  # Event Handlers
64
  submit_button.click(
 
73
  *environment_components,
74
  *quality_components,
75
  ],
76
+ outputs=[output, file_output, json_output, publish_button]
77
+ )
78
+ # Event Handlers
79
+ publish_button.click(
80
+ handle_publi,
81
+ inputs=[
82
+ json_output
83
+ ],
84
+ outputs=[output]
85
  )
86
 
87
  if __name__ == "__main__":
assets/styles/app.css CHANGED
@@ -34,11 +34,25 @@ div {
34
  color: var(--primary-600);
35
  font-weight: bold;
36
  }
 
 
 
 
 
37
  .mandatory_field label > span{
38
  color: var(--primary-600);
39
  font-weight: bold !important;
40
  }
41
- .mandatory_field div > span{
42
  color: var(--primary-600);
43
  font-weight: bold !important;
44
  }
 
 
 
 
 
 
 
 
 
 
34
  color: var(--primary-600);
35
  font-weight: bold;
36
  }
37
+ /* for gr.Accordion objects */
38
+ #mandatory_part button > span {
39
+ color: var(--primary-600);
40
+ font-weight: bold !important;
41
+ }
42
  .mandatory_field label > span{
43
  color: var(--primary-600);
44
  font-weight: bold !important;
45
  }
46
+ .mandatory_field div > span {
47
  color: var(--primary-600);
48
  font-weight: bold !important;
49
  }
50
+ /* submission button */
51
+ .subbutton{
52
+ background-color: var(--primary-400);
53
+ }
54
+ /* publish button */
55
+ .pubbutton{
56
+ background-color: #65db86;
57
+ }
58
+
src/services/huggingface.py CHANGED
@@ -1,6 +1,7 @@
1
- import json
2
  from huggingface_hub import login
3
- from src.services.util import HF_TOKEN
 
 
4
 
5
 
6
  def init_huggingface():
@@ -21,10 +22,9 @@ def update_dataset(json_data):
21
  except json.JSONDecodeError:
22
  return "Invalid JSON data. Please ensure all required fields are filled correctly."
23
 
24
- # data flattening and saving to dataset
25
- """
26
  try:
27
  dataset = load_dataset(DATASET_NAME, split="train")
 
28
  except:
29
  dataset = Dataset.from_dict({})
30
 
@@ -32,15 +32,198 @@ def update_dataset(json_data):
32
  new_dataset = Dataset.from_dict(new_data)
33
 
34
  if len(dataset) > 0:
 
 
 
 
35
  updated_dataset = concatenate_datasets([dataset, new_dataset])
36
  else:
37
  updated_dataset = new_dataset
38
 
39
  updated_dataset.push_to_hub(DATASET_NAME)
40
- """
41
  return "Data submitted successfully and dataset updated!"
42
 
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  def create_flattened_data(data):
45
  out = {}
46
 
@@ -58,3 +241,4 @@ def create_flattened_data(data):
58
 
59
  flatten(data)
60
  return out
 
 
 
1
  from huggingface_hub import login
2
+ from datasets import load_dataset, Dataset, concatenate_datasets
3
+ import json
4
+ from src.services.util import HF_TOKEN, DATASET_NAME
5
 
6
 
7
  def init_huggingface():
 
22
  except json.JSONDecodeError:
23
  return "Invalid JSON data. Please ensure all required fields are filled correctly."
24
 
 
 
25
  try:
26
  dataset = load_dataset(DATASET_NAME, split="train")
27
+ print(dataset)
28
  except:
29
  dataset = Dataset.from_dict({})
30
 
 
32
  new_dataset = Dataset.from_dict(new_data)
33
 
34
  if len(dataset) > 0:
35
+ print("dataset intitial")
36
+ print(dataset)
37
+ print("data to add ")
38
+ print(new_dataset)
39
  updated_dataset = concatenate_datasets([dataset, new_dataset])
40
  else:
41
  updated_dataset = new_dataset
42
 
43
  updated_dataset.push_to_hub(DATASET_NAME)
 
44
  return "Data submitted successfully and dataset updated!"
45
 
46
 
47
+ def create_flattened_data(data):
48
+ """Create a flattened data structure for the algorithms."""
49
+ # Handle algorithms
50
+ algorithms = data.get("task", {}).get("algorithms", [])
51
+ fields = ["trainingType", "algorithmType", "algorithmName", "algorithmUri", "foundationModelName", "foundationModelUri",
52
+ "parametersNumber", "framework", "frameworkVersion", "classPath", "layersNumber", "epochsNumber", "optimizer", "quantization"]
53
+ """Create a flattened data structure for the algorithms."""
54
+ algorithms_data = {field: "| ".join(str(algo.get(
55
+ field)) for algo in algorithms if algo.get(field)) or None for field in fields}
56
+ trainingType_str = algorithms_data["trainingType"]
57
+ algorithmType_str = algorithms_data["algorithmType"]
58
+ algorithmName_str = algorithms_data["algorithmName"]
59
+ algorithmUri_str = algorithms_data["algorithmUri"]
60
+ foundationModelName_str = algorithms_data["foundationModelName"]
61
+ foundationModelUri_str = algorithms_data["foundationModelUri"]
62
+ parametersNumber_str = algorithms_data["parametersNumber"]
63
+ framework_str = algorithms_data["framework"]
64
+ frameworkVersion_str = algorithms_data["frameworkVersion"]
65
+ classPath_str = algorithms_data["classPath"]
66
+ layersNumber_str = algorithms_data["layersNumber"]
67
+ epochsNumber_str = algorithms_data["epochsNumber"]
68
+ optimizer_str = algorithms_data["optimizer"]
69
+ quantization_str = algorithms_data["quantization"]
70
+
71
+ """Create a flattened data structure for the dataset."""
72
+ # Handle dataset
73
+ dataset = data.get("task", {}).get("dataset", [])
74
+ fields = ["dataUsage", "dataType", "dataFormat", "dataSize",
75
+ "dataQuantity", "shape", "source", "sourceUri", "owner"]
76
+ """Create a flattened data structure for the dataset."""
77
+ dataset_data = {field: "| ".join(
78
+ str(d.get(field)) for d in dataset if d.get(field)) or None for field in fields}
79
+ dataUsage_str = dataset_data["dataUsage"]
80
+ dataType_str = dataset_data["dataType"]
81
+ dataFormat_str = dataset_data["dataFormat"]
82
+ dataSize_str = dataset_data["dataSize"]
83
+ dataQuantity_str = dataset_data["dataQuantity"]
84
+ shape_str = dataset_data["shape"]
85
+ source_str = dataset_data["source"]
86
+ sourceUri_str = dataset_data["sourceUri"]
87
+ owner_str = dataset_data["owner"]
88
+
89
+ """Create a flattened data structure for the measures."""
90
+ # Handle measures
91
+ measures = data.get("measures", [])
92
+ fields = ["measurementMethod", "manufacturer", "version", "cpuTrackingMode", "gpuTrackingMode", "averageUtilizationCpu", "averageUtilizationGpu",
93
+ "powerCalibrationMeasurement", "durationCalibrationMeasurement", "powerConsumption", "measurementDuration", "measurementDateTime"]
94
+ """Create a flattened data structure for the measures."""
95
+ measures_data = {field: "| ".join(str(measure.get(
96
+ field)) for measure in measures if measure.get(field)) or None for field in fields}
97
+ measurementMethod_str = measures_data["measurementMethod"]
98
+ manufacturer_str = measures_data["manufacturer"]
99
+ version_str = measures_data["version"]
100
+ cpuTrackingMode_str = measures_data["cpuTrackingMode"]
101
+ gpuTrackingMode_str = measures_data["gpuTrackingMode"]
102
+ averageUtilizationCpu_str = measures_data["averageUtilizationCpu"]
103
+ averageUtilizationGpu_str = measures_data["averageUtilizationGpu"]
104
+ powerCalibrationMeasurement_str = measures_data["powerCalibrationMeasurement"]
105
+ durationCalibrationMeasurement_str = measures_data["durationCalibrationMeasurement"]
106
+ powerConsumption_str = measures_data["powerConsumption"]
107
+ measurementDuration_str = measures_data["measurementDuration"]
108
+ measurementDateTime_str = measures_data["measurementDateTime"]
109
+
110
+ # Handle components
111
+ components = data.get("infrastructure", {}).get("components", [])
112
+ fields = ["componentName", "componentType", "nbComponent", "memorySize",
113
+ "manufacturer", "family", "series", "share"]
114
+
115
+ # Generate concatenated strings for each field
116
+ component_data = {field: "| ".join(str(comp.get(
117
+ field)) for comp in components if comp.get(field)) or None for field in fields}
118
+
119
+ componentName_str = component_data["componentName"]
120
+ componentType_str = component_data["componentType"]
121
+ nbComponent_str = component_data["nbComponent"]
122
+ memorySize_str = component_data["memorySize"]
123
+ manufacturer_infra_str = component_data["manufacturer"]
124
+ family_str = component_data["family"]
125
+ series_str = component_data["series"]
126
+ share_str = component_data["share"]
127
+
128
+ return {
129
+ # Header
130
+ "licensing": [data.get("header", {}).get("licensing", "")],
131
+ "formatVersion": [data.get("header", {}).get("formatVersion", "")],
132
+ "formatVersionSpecificationUri": [data.get("header", {}).get("formatVersionSpecificationUri", "")],
133
+ "reportId": [data.get("header", {}).get("reportId", "")],
134
+ "reportDatetime": [data.get("header", {}).get("reportDatetime", "")],
135
+ "reportStatus": [data.get("header", {}).get("reportStatus", "")],
136
+ "publisher_name": [data.get("header", {}).get("publisher", {}).get("name", "")],
137
+ "publisher_division": [data.get("header", {}).get("publisher", {}).get("division", "")],
138
+ "publisher_projectName": [data.get("header", {}).get("publisher", {}).get("projectName", "")],
139
+ "publisher_confidentialityLevel": [data.get("header", {}).get("publisher", {}).get("confidentialityLevel", "")],
140
+ "publisher_publicKey": [data.get("header", {}).get("publisher", {}).get("publicKey", "")],
141
+
142
+ # Task
143
+ "taskStage": [data.get("task", {}).get("taskStage", "")],
144
+ "taskFamily": [data.get("task", {}).get("taskFamily", "")],
145
+ "nbRequest": [data.get("task", {}).get("nbRequest", "")],
146
+ # Algorithms
147
+ "trainingType": [trainingType_str],
148
+ "algorithmType": [algorithmType_str],
149
+ "algorithmName": [algorithmName_str],
150
+ "algorithmUri": [algorithmUri_str],
151
+ "foundationModelName": [foundationModelName_str],
152
+ "foundationModelUri": [foundationModelUri_str],
153
+ "parametersNumber": [parametersNumber_str],
154
+ "framework": [framework_str],
155
+ "frameworkVersion": [frameworkVersion_str],
156
+ "classPath": [classPath_str],
157
+ "layersNumber": [layersNumber_str],
158
+ "epochsNumber": [epochsNumber_str],
159
+ "optimizer": [optimizer_str],
160
+ "quantization": [quantization_str],
161
+ # Dataset
162
+ "dataUsage": [dataUsage_str],
163
+ "dataType": [dataType_str],
164
+ "dataFormat": [dataFormat_str],
165
+ "dataSize": [dataSize_str],
166
+ "dataQuantity": [dataQuantity_str],
167
+ "shape": [shape_str],
168
+ "source": [source_str],
169
+ "sourceUri": [sourceUri_str],
170
+ "owner": [owner_str],
171
+ "measuredAccuracy": [data.get("task", {}).get("measuredAccuracy", "")],
172
+ "estimatedAccuracy": [data.get("task", {}).get("estimatedAccuracy", "")],
173
+ "taskDescription": [data.get("task", {}).get("taskDescription", "")],
174
+
175
+ # Measures
176
+ "measurementMethod": [measurementMethod_str],
177
+ "manufacturer": [manufacturer_str],
178
+ "version": [version_str],
179
+ "cpuTrackingMode": [cpuTrackingMode_str],
180
+ "gpuTrackingMode": [gpuTrackingMode_str],
181
+ "averageUtilizationCpu": [averageUtilizationCpu_str],
182
+ "averageUtilizationGpu": [averageUtilizationGpu_str],
183
+ "powerCalibrationMeasurement": [powerCalibrationMeasurement_str],
184
+ "durationCalibrationMeasurement": [durationCalibrationMeasurement_str],
185
+ "powerConsumption": [powerConsumption_str],
186
+ "measurementDuration": [measurementDuration_str],
187
+ "measurementDateTime": [measurementDateTime_str],
188
+
189
+ # System
190
+ "os": [data.get("system", {}).get("os", "")],
191
+ "distribution": [data.get("system", {}).get("distribution", "")],
192
+ "distributionVersion": [data.get("system", {}).get("distributionVersion", "")],
193
+
194
+ # Software
195
+ "language": [data.get("software", {}).get("language", "")],
196
+ "version_software": [data.get("software", {}).get("version_software", "")],
197
+
198
+ # Infrastructure
199
+ "infraType": [data.get("infrastructure", {}).get("infra_type", "")],
200
+ "cloudProvider": [data.get("infrastructure", {}).get("cloudProvider", "")],
201
+ "cloudInstance": [data.get("infrastructure", {}).get("cloudInstance", "")],
202
+ "cloudService": [data.get("infrastructure", {}).get("cloudService", "")],
203
+ "componentName": [componentName_str],
204
+ "componentType": [componentType_str],
205
+ "nbComponent": [nbComponent_str],
206
+ "memorySize": [memorySize_str],
207
+ "manufacturer_infra": [manufacturer_infra_str],
208
+ "family": [family_str],
209
+ "series": [series_str],
210
+ "share": [share_str],
211
+
212
+ # Environment
213
+ "country": [data.get("environment", {}).get("country", "")],
214
+ "latitude": [data.get("environment", {}).get("latitude", "")],
215
+ "longitude": [data.get("environment", {}).get("longitude", "")],
216
+ "location": [data.get("environment", {}).get("location", "")],
217
+ "powerSupplierType": [data.get("environment", {}).get("powerSupplierType", "")],
218
+ "powerSource": [data.get("environment", {}).get("powerSource", "")],
219
+ "powerSourceCarbonIntensity": [data.get("environment", {}).get("powerSourceCarbonIntensity", "")],
220
+
221
+ # Quality
222
+ "quality": [data.get("quality", "null")],
223
+ }
224
+
225
+
226
+ """
227
  def create_flattened_data(data):
228
  out = {}
229
 
 
241
 
242
  flatten(data)
243
  return out
244
+ """
src/services/util.py CHANGED
@@ -17,10 +17,20 @@ OBLIGATORY_FIELDS = [
17
  REPORT_STATUS_OPTIONS = ["draft", "final", "corrective", "other"]
18
  CONFIDENTIALITY_LEVELS = ["public", "internal", "confidential", "secret"]
19
  DATA_USAGE_OPTIONS = ["input", "output"]
20
- DATA_FORMAT = ["3gp", "3gpp", "3gpp2", "8svx", "aa", "aac", "aax", "act", "afdesign", "afphoto", "ai", "aiff", "alac", "amr", "amv", "ape", "arrow", "asf", "au", "avi", "avif", "awb", "bmp", "bpg", "cd5", "cda", "cdr", "cgm", "clip", "cpt", "csv", "deep", "dirac", "divx", "drawingml", "drw", "dss", "dvf", "ecw", "eps", "fits", "flac", "flif", "flv", "flvf4v", "gem", "gerber", "gif", "gle", "gsm", "heif", "hp-gl", "html", "hvif", "ico", "iklax", "ilbm", "img", "ivs", "jpeg", "json", "kra", "lottie", "m4a", "m4b", "m4p", "m4v", "mathml", "matroska", "mdp", "mmf", "movpkg", "mp3", "mpc", "mpeg1",
21
- "mpeg2", "mpeg4", "msv", "mxf", "naplps", "netpbm", "nmf", "nrrd", "nsv", "odg", "ods", "ogg", "opus", "pam", "parquet", "pbm", "pcx", "pdf", "pdn", "pgf", "pgm", "pgml", "pict", "plbm", "png", "pnm", "postscript", "ppm", "psd", "psp", "pstricks", "qcc", "quicktime", "ra", "raw", "realmedia", "regis", "rf64", "roq", "sai", "sgi", "sid", "sql", "sln", "svg", "svi", "swf", "text", "tga", "tiff", "tinyvg", "tta", "vicar", "vivoactive", "vml", "vob", "voc", "vox", "wav", "webm", "webp", "wma", "wmf", "wmv", "wv", "xaml", "xar", "xcf", "xisf", "xls", "xlsx", "xml", "xps", "yaml", "other"]
22
  DATA_TYPES = ["tabular", "audio", "boolean", "image",
23
  "video", "object", "text", "token", "word", "other"]
 
 
 
 
 
 
 
 
 
 
 
 
24
  DATA_SOURCE = ["public", "private", "other"]
25
  ACCURACY_LEVELS = ["veryPoor", "poor", "average", "good", "veryGood"]
26
  MEASUREMENT_UNITS = ["Wh", "kWh", "MWh", "GWh", "kJoule", "MJoule", "GJoule", "TJoule", "PJoule",
@@ -31,7 +41,3 @@ POWER_SUPPLIER_TYPES = ["public", "private", "internal", "other"]
31
  POWER_SOURCES = ["solar", "wind", "nuclear",
32
  "hydroelectric", "gas", "coal", "other"]
33
  QUALITY_LEVELS = ["high", "medium", "low"]
34
- HASH_ALGORITHMS = ["MD5", "RIPEMD-128", "RIPEMD-160", "RIPEMD-256", "RIPEMD-320",
35
- "SHA-1", "SHA-224", "SHA256", "SHA-384", "SHA-512"]
36
- CRYPTO_ALGORITHMS = ["RSA", "DSA", "ECDSA", "EDDSA"]
37
- CACHE_OPTIONS = ["true", "false"]
 
17
  REPORT_STATUS_OPTIONS = ["draft", "final", "corrective", "other"]
18
  CONFIDENTIALITY_LEVELS = ["public", "internal", "confidential", "secret"]
19
  DATA_USAGE_OPTIONS = ["input", "output"]
 
 
20
  DATA_TYPES = ["tabular", "audio", "boolean", "image",
21
  "video", "object", "text", "token", "word", "other"]
22
+ DATA_FORMAT = ["3gp", "3gpp", "3gpp2", "8svx", "aa", "aac", "aax", "act", "afdesign", "afphoto", "ai", "aiff", "alac",
23
+ "amr", "amv", "ape", "arrow", "asf", "au", "avi", "avif", "awb", "bmp", "bpg", "cd5", "cda", "cdr",
24
+ "cgm", "clip", "cpt", "csv", "deep", "dirac", "divx", "drawingml", "drw", "dss", "dvf", "ecw", "eps",
25
+ "fits", "flac", "flif", "flv", "flvf4v", "gem", "gerber", "gif", "gle", "gsm", "heif", "hp-gl", "html", "hvif",
26
+ "ico", "iklax", "ilbm", "img", "ivs", "jpeg", "json", "kra", "lottie", "m4a", "m4b", "m4p", "m4v", "mathml",
27
+ "matroska", "mdp", "mmf", "movpkg", "mp3", "mpc", "mpeg1", "mpeg2", "mpeg4", "msv", "mxf", "naplps", "netpbm",
28
+ "nmf", "nrrd", "nsv", "odg", "ods", "ogg", "opus", "pam", "parquet", "pbm", "pcx", "pdf", "pdn", "pgf", "pgm",
29
+ "pgml", "pict", "plbm", "png", "pnm", "postscript", "ppm", "psd", "psp", "pstricks", "qcc", "quicktime", "ra",
30
+ "raw", "realmedia", "regis", "rf64", "roq", "sai", "sgi", "sid", "sql", "sln", "svg", "svi", "swf", "text", "tga",
31
+ "tiff", "tinyvg", "tta", "vicar", "vivoactive", "vml", "vob", "voc", "vox", "wav", "webm", "webp", "wma", "wmf",
32
+ "wmv", "wv", "xaml", "xar", "xcf", "xisf", "xls", "xlsx", "xml", "xps", "yaml", "other"]
33
+
34
  DATA_SOURCE = ["public", "private", "other"]
35
  ACCURACY_LEVELS = ["veryPoor", "poor", "average", "good", "veryGood"]
36
  MEASUREMENT_UNITS = ["Wh", "kWh", "MWh", "GWh", "kJoule", "MJoule", "GJoule", "TJoule", "PJoule",
 
41
  POWER_SOURCES = ["solar", "wind", "nuclear",
42
  "hydroelectric", "gas", "coal", "other"]
43
  QUALITY_LEVELS = ["high", "medium", "low"]
 
 
 
 
src/ui/form_components.py CHANGED
@@ -3,8 +3,7 @@ from src.services.util import (
3
  REPORT_STATUS_OPTIONS, CONFIDENTIALITY_LEVELS, DATA_USAGE_OPTIONS, DATA_FORMAT,
4
  DATA_TYPES, DATA_SOURCE,
5
  ACCURACY_LEVELS, INFRA_TYPES,
6
- POWER_SUPPLIER_TYPES, POWER_SOURCES, QUALITY_LEVELS,
7
- HASH_ALGORITHMS, CRYPTO_ALGORITHMS
8
  )
9
 
10
 
@@ -144,7 +143,7 @@ def create_header_tab():
144
  reportId = gr.Textbox(
145
  label="Report ID", info="(the unique identifier of this report, preferably as a uuid4 string)")
146
  reportDatetime = gr.Textbox(
147
- label="Report Datetime", info="(Required field<br>the publishing date of this report in format YYYY-MM-DD HH:MM:SS)", elem_classes="mandatory_field")
148
  reportStatus = gr.Dropdown(value=None,
149
  label="Report Status",
150
  choices=REPORT_STATUS_OPTIONS,
@@ -185,9 +184,9 @@ def create_task_tab():
185
  label="Number of Requests", info="(if inference stage, the number of requests the measure corresponds to, 0 or empty if you're not measuring the inference stage)",
186
  value=lambda: None, minimum=0)
187
 
188
- with gr.Accordion("Algorithms"):
189
  _, trainingType, algorithmType, algorithmName, algorithmUri, foundationModelName, foundationModelUri, parametersNumber, framework, frameworkVersion, classPath, layersNumber, epochsNumber, optimizer, quantization, add_algorithm_btn = create_dynamic_section(
190
- section_name="Algorithms",
191
  fields_config=[
192
  {
193
  "type": gr.Textbox,
@@ -202,7 +201,7 @@ def create_task_tab():
202
  {
203
  "type": gr.Textbox,
204
  "label": "Algorithm Name",
205
- "info": "(the case-sensitive common name of the algorithm, example: randomForest, svm, xgboost...)",
206
  },
207
  {
208
  "type": gr.Textbox,
@@ -264,9 +263,9 @@ def create_task_tab():
264
  layout="column"
265
  )
266
 
267
- with gr.Accordion("Dataset"):
268
  _, dataUsage, dataType, dataFormat, dataSize, dataQuantity, shape, source, sourceUri, owner, add_dataset_btn = create_dynamic_section(
269
- section_name="Dataset",
270
  fields_config=[
271
  {
272
  "type": gr.Dropdown,
@@ -353,12 +352,12 @@ def create_measures_tab():
353
  with gr.Tab("Measures", elem_id="mandatory_part"):
354
  with gr.Accordion("Measures"):
355
  _, measurementMethod, manufacturer, version, cpuTrackingMode, gpuTrackingMode, averageUtilizationCpu, averageUtilizationGpu, powerCalibrationMeasurement, durationCalibrationMeasurement, powerConsumption, measurementDuration, measurementDateTime, add_measurement_btn = create_dynamic_section(
356
- section_name="Measures",
357
  fields_config=[
358
  {
359
  "type": gr.Textbox,
360
  "label": "Method of measurement",
361
- "info": "Required field<br>(the energy measure obtained from software and/or hardware tools, for a computing task)",
362
  "elem_classes": "mandatory_field",
363
  },
364
  {
@@ -451,9 +450,9 @@ def create_software_tab():
451
  """Create the software tab components."""
452
  with gr.Tab("Software"):
453
  language = gr.Textbox(
454
- label="Language", info="Required field<br>(programming language information)", elem_classes="mandatory_field")
455
  version_software = gr.Textbox(
456
- label="Version", info="(version of the programming language)")
457
 
458
  return [language, version_software]
459
 
@@ -473,9 +472,9 @@ def create_infrastructure_tab():
473
  label="Cloud Instance", info="(If you are on a cloud vm, the name of your cloud instance, for example : a1.large, dasv4-type2...)")
474
  cloudService = gr.Textbox(
475
  label="Cloud Service", info="(If you are using an AI cloud service, the name of your cloud service, for example : openAI service...)")
476
- with gr.Accordion("Components"):
477
  _, componentName, componentType, nbComponent, memorySize, manufacturer_infra, family, series, share, add_component_btn = create_dynamic_section(
478
- section_name="Component",
479
  fields_config=[
480
 
481
  {
@@ -566,7 +565,7 @@ def create_quality_tab():
566
  quality = gr.Dropdown(value=None,
567
  label="Quality",
568
  choices=QUALITY_LEVELS,
569
- info="(the quality of the information provided)"
570
  )
571
 
572
  return [quality]
 
3
  REPORT_STATUS_OPTIONS, CONFIDENTIALITY_LEVELS, DATA_USAGE_OPTIONS, DATA_FORMAT,
4
  DATA_TYPES, DATA_SOURCE,
5
  ACCURACY_LEVELS, INFRA_TYPES,
6
+ POWER_SUPPLIER_TYPES, POWER_SOURCES, QUALITY_LEVELS
 
7
  )
8
 
9
 
 
143
  reportId = gr.Textbox(
144
  label="Report ID", info="(the unique identifier of this report, preferably as a uuid4 string)")
145
  reportDatetime = gr.Textbox(
146
+ label="Report Datetime", info="Required field<br>(the publishing date of this report in format YYYY-MM-DD HH:MM:SS)", elem_classes="mandatory_field")
147
  reportStatus = gr.Dropdown(value=None,
148
  label="Report Status",
149
  choices=REPORT_STATUS_OPTIONS,
 
184
  label="Number of Requests", info="(if inference stage, the number of requests the measure corresponds to, 0 or empty if you're not measuring the inference stage)",
185
  value=lambda: None, minimum=0)
186
 
187
+ with gr.Accordion("Algorithms", elem_id="mandatory_part"):
188
  _, trainingType, algorithmType, algorithmName, algorithmUri, foundationModelName, foundationModelUri, parametersNumber, framework, frameworkVersion, classPath, layersNumber, epochsNumber, optimizer, quantization, add_algorithm_btn = create_dynamic_section(
189
+ section_name="algorithm",
190
  fields_config=[
191
  {
192
  "type": gr.Textbox,
 
201
  {
202
  "type": gr.Textbox,
203
  "label": "Algorithm Name",
204
+ "info": "(the case-sensitive common name of the algorithm, example: randomForest, naive bayes, cnn, rnn, transformers, if you are directly using a foundation model, let it empty and fill the field foundationModelName...)",
205
  },
206
  {
207
  "type": gr.Textbox,
 
263
  layout="column"
264
  )
265
 
266
+ with gr.Accordion("Dataset", elem_id="mandatory_part"):
267
  _, dataUsage, dataType, dataFormat, dataSize, dataQuantity, shape, source, sourceUri, owner, add_dataset_btn = create_dynamic_section(
268
+ section_name="dataset",
269
  fields_config=[
270
  {
271
  "type": gr.Dropdown,
 
352
  with gr.Tab("Measures", elem_id="mandatory_part"):
353
  with gr.Accordion("Measures"):
354
  _, measurementMethod, manufacturer, version, cpuTrackingMode, gpuTrackingMode, averageUtilizationCpu, averageUtilizationGpu, powerCalibrationMeasurement, durationCalibrationMeasurement, powerConsumption, measurementDuration, measurementDateTime, add_measurement_btn = create_dynamic_section(
355
+ section_name="measure",
356
  fields_config=[
357
  {
358
  "type": gr.Textbox,
359
  "label": "Method of measurement",
360
+ "info": "Required field<br>(the method used to perform the energy measure, example: codecarbon, carbonai, flops-compute, wattmeter...)",
361
  "elem_classes": "mandatory_field",
362
  },
363
  {
 
450
  """Create the software tab components."""
451
  with gr.Tab("Software"):
452
  language = gr.Textbox(
453
+ label="Language", info="Required field<br>(name of the programming language used, example : c, java, julia, python...)", elem_classes="mandatory_field")
454
  version_software = gr.Textbox(
455
+ label="Version", info="(version of the programming language used)")
456
 
457
  return [language, version_software]
458
 
 
472
  label="Cloud Instance", info="(If you are on a cloud vm, the name of your cloud instance, for example : a1.large, dasv4-type2...)")
473
  cloudService = gr.Textbox(
474
  label="Cloud Service", info="(If you are using an AI cloud service, the name of your cloud service, for example : openAI service...)")
475
+ with gr.Accordion("Components", elem_id="mandatory_part"):
476
  _, componentName, componentType, nbComponent, memorySize, manufacturer_infra, family, series, share, add_component_btn = create_dynamic_section(
477
+ section_name="component",
478
  fields_config=[
479
 
480
  {
 
565
  quality = gr.Dropdown(value=None,
566
  label="Quality",
567
  choices=QUALITY_LEVELS,
568
+ info="(the quality of the information you provided, 3 possibilities : high (percentage error +/-10%), medium (percentage error +/-25%), low (percentage error +/-50%))"
569
  )
570
 
571
  return [quality]