soury commited on
Commit
c9117ba
·
1 Parent(s): 7bd4b6e

udpate form to the new datamodel

Browse files
Pipfile.lock ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_meta": {
3
+ "hash": {
4
+ "sha256": "702ad05de9bc9de99a4807c8dde1686f31e0041d7b5f6f6b74861195a52110f5"
5
+ },
6
+ "pipfile-spec": 6,
7
+ "requires": {
8
+ "python_version": "3.12"
9
+ },
10
+ "sources": [
11
+ {
12
+ "name": "pypi",
13
+ "url": "https://pypi.org/simple",
14
+ "verify_ssl": true
15
+ }
16
+ ]
17
+ },
18
+ "default": {},
19
+ "develop": {}
20
+ }
assets/styles/app.css ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Personnalisation du thème */
2
+ :root, :root .dark {
3
+ --primary-50: #fef2f2;
4
+ --primary-100: #fee2e2;
5
+ --primary-200: #fecaca;
6
+ --primary-300: #fca5a5;
7
+ --primary-400: #f87171;
8
+ --primary-500: #ef4444;
9
+ --primary-600: #dc2626;
10
+ --primary-700: #b91c1c;
11
+ --primary-800: #991b1b;
12
+ --primary-900: #7f1d1d;
13
+ --primary-950: #450a0a;
14
+
15
+ --neutral-50: #f6f2fa;
16
+ --neutral-100: #f2eff6;
17
+ --neutral-200: #e4e1e8;
18
+ --neutral-300: #d5d2d9;
19
+ --neutral-400: #bfbcc2;
20
+ --neutral-500: #78777a;
21
+ --neutral-600: #5a595c;
22
+ --neutral-700: #444345;
23
+ --neutral-800: #282829;
24
+ --neutral-900: #1c1b1c;
25
+ --neutral-950: #121112;
26
+ }
27
+
28
+ /* Changer la couleur de fond grise par défaut */
29
+ div {
30
+ background : white;
31
+ }
32
+
33
+ #mandatory_part-button{
34
+ color: var(--primary-600);
35
+ font-weight: bold;
36
+ }
37
+ .mandatory_field label > span{
38
+ color: var(--primary-600);
39
+ font-weight: bold !important;
40
+ }
41
+ .mandatory_field div > span{
42
+ color: var(--primary-600);
43
+ font-weight: bold !important;
44
+ }
{utils → assets/utils}/validation.py RENAMED
@@ -1,4 +1,4 @@
1
- from config import OBLIGATORY_FIELDS
2
 
3
 
4
  def validate_obligatory_fields(data):
@@ -20,6 +20,7 @@ def validate_obligatory_fields(data):
20
  return None
21
 
22
  missing_fields = []
 
23
  for field in OBLIGATORY_FIELDS:
24
  # if the field is mandatory, check if it is inside a mandatory section
25
 
 
1
+ from src.services.util import OBLIGATORY_FIELDS
2
 
3
 
4
  def validate_obligatory_fields(data):
 
20
  return None
21
 
22
  missing_fields = []
23
+
24
  for field in OBLIGATORY_FIELDS:
25
  # if the field is mandatory, check if it is inside a mandatory section
26
 
main.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from src.app import app
2
+
3
+ print("Launching BoAmps form")
4
+ app.launch()
services/json_generator.py DELETED
@@ -1,398 +0,0 @@
1
- import json
2
- import tempfile
3
- from datetime import datetime
4
- from utils.validation import validate_obligatory_fields
5
-
6
-
7
- def generate_json(
8
- # Header
9
- licensing, formatVersion, formatVersionSpecificationUri, reportId, reportDatetime, reportStatus,
10
- publisher_name, publisher_division, publisher_projectName, publisher_confidentialityLevel, publisher_publicKey,
11
- # Task
12
- taskType, taskFamily, taskStage, algorithmName, framework, frameworkVersion, classPath, tuning_method,
13
- hyperparameter_names, hyperparameter_values, quantization, dataType, fileType, volume, volumeUnit, items,
14
- shape_item, nbRequest, nbTokensInput, nbWordsInput, nbTokensOutput, nbWordsOutput, contextWindowSize, cache,
15
- source, sourceUri, owner, measuredAccuracy, estimatedAccuracy,
16
- # Measures
17
- measurementMethod, manufacturer, version, cpuTrackingMode, gpuTrackingMode, averageUtilizationCpu,
18
- averageUtilizationGpu, serverSideInference, unit, powerCalibrationMeasurement, durationCalibrationMeasurement,
19
- powerConsumption, measurementDuration, measurementDateTime,
20
- # System
21
- os, distribution, distributionVersion,
22
- # Software
23
- language, version_software,
24
- # Infrastructure
25
- infraType, cloudProvider, cloudInstance, componentName, componentType, nbComponent, memorySize, manufacturer_infra, family, series, share,
26
- # Environment
27
- country, latitude, longitude, location, powerSupplierType, powerSource, powerSourceCarbonIntensity,
28
- # Quality
29
- quality,
30
- # Hash
31
- hashAlgorithm, cryptographicAlgorithm, value_hash
32
- ):
33
- """Generate JSON data from form inputs."""
34
- # TO CHANGE
35
- # Process hyperparameters
36
- hyperparameters = []
37
- max_length = max(len(hyperparameter_names), len(hyperparameter_values))
38
- for i in range(max_length):
39
- hyperparameters.append({
40
- "name": hyperparameter_names[i] if i < len(hyperparameter_names) and hyperparameter_names[i] else "",
41
- "value": hyperparameter_values[i] if i < len(hyperparameter_values) and hyperparameter_values[i] else ""
42
- })
43
-
44
- # Process inference properties
45
- inference_props_list = []
46
- max_length = max(len(nbRequest), len(nbTokensInput), len(nbWordsInput), len(
47
- nbTokensOutput), len(nbWordsOutput), len(contextWindowSize), len(cache))
48
- for i in range(max_length):
49
- inference_props = {}
50
- if i < len(nbRequest) and nbRequest[i]:
51
- inference_props["nbRequest"] = nbRequest[i]
52
- if i < len(nbTokensInput) and nbTokensInput[i]:
53
- inference_props["nbTokensInput"] = nbTokensInput[i]
54
- if i < len(nbWordsInput) and nbWordsInput[i]:
55
- inference_props["nbWordsInput"] = nbWordsInput[i]
56
- if i < len(nbTokensOutput) and nbTokensOutput[i]:
57
- inference_props["nbTokensOutput"] = nbTokensOutput[i]
58
- if i < len(nbWordsOutput) and nbWordsOutput[i]:
59
- inference_props["nbWordsOutput"] = nbWordsOutput[i]
60
- if i < len(contextWindowSize) and contextWindowSize[i]:
61
- inference_props["contextWindowSize"] = contextWindowSize[i]
62
- if i < len(cache) and cache[i]:
63
- inference_props["cache"] = cache[i]
64
- inference_props_list.append(inference_props)
65
-
66
- # Process components
67
- components_list = []
68
- max_length = max(len(componentName), len(componentType), len(nbComponent), len(memorySize), len(
69
- manufacturer_infra), len(family), len(series), len(share))
70
- for i in range(max_length):
71
- component = {}
72
- if i < len(componentName) and componentName[i]:
73
- component["componentName"] = componentName[i]
74
- if i < len(componentType) and componentType[i]:
75
- component["componentType"] = componentType[i]
76
- if i < len(nbComponent) and nbComponent[i]:
77
- component["nbComponent"] = nbComponent[i]
78
- if i < len(memorySize) and memorySize[i]:
79
- component["memorySize"] = memorySize[i]
80
- if i < len(manufacturer_infra) and manufacturer_infra[i]:
81
- component["manufacturer"] = manufacturer_infra[i]
82
- if i < len(family) and family[i]:
83
- component["family"] = family[i]
84
- if i < len(series) and series[i]:
85
- component["series"] = series[i]
86
- if i < len(share) and share[i]:
87
- component["share"] = share[i]
88
- components_list.append(component)
89
-
90
- # process report
91
- report = {}
92
-
93
- # Process header
94
- header = {}
95
- if licensing:
96
- header["licensing"] = licensing
97
- if formatVersion:
98
- header["formatVersion"] = formatVersion
99
- if formatVersionSpecificationUri:
100
- header["formatVersionSpecificationUri"] = formatVersionSpecificationUri
101
- if reportId:
102
- header["reportId"] = reportId
103
- if reportDatetime:
104
- header["reportDatetime"] = reportDatetime or datetime.now().isoformat()
105
- if reportStatus:
106
- header["reportStatus"] = reportStatus
107
-
108
- publisher = {}
109
- if publisher_name:
110
- publisher["name"] = publisher_name
111
- if publisher_division:
112
- publisher["division"] = publisher_division
113
- if publisher_projectName:
114
- publisher["projectName"] = publisher_projectName
115
- if publisher_confidentialityLevel:
116
- publisher["confidentialityLevel"] = publisher_confidentialityLevel
117
- if publisher_publicKey:
118
- publisher["publicKey"] = publisher_publicKey
119
-
120
- if publisher:
121
- header["publisher"] = publisher
122
-
123
- if header:
124
- report["header"] = header
125
-
126
- # proceed task
127
-
128
- # proceed algorithm
129
- algorithm = {}
130
- if algorithmName:
131
- algorithm["algorithmName"] = algorithmName
132
- if framework:
133
- algorithm["framework"] = framework
134
- if frameworkVersion:
135
- algorithm["frameworkVersion"] = frameworkVersion
136
- if classPath:
137
- algorithm["classPath"] = classPath
138
- if hyperparameters:
139
- algorithm["hyperparameters"] = hyperparameters
140
- if quantization:
141
- algorithm["quantization"] = quantization
142
-
143
- # proceed dataset
144
- dataset = {}
145
- if dataType:
146
- dataset["dataType"] = dataType
147
- if fileType:
148
- dataset["fileType"] = fileType
149
- if volume:
150
- dataset["volume"] = volume
151
- if volumeUnit:
152
- dataset["volumeUnit"] = volumeUnit
153
- if items:
154
- dataset["items"] = items
155
- if shape_item:
156
- dataset["shape"] = [{"item": shape_item}]
157
- if inference_props_list:
158
- dataset["inferenceProperties"] = inference_props_list
159
- if source:
160
- dataset["source"] = source
161
- if sourceUri:
162
- dataset["sourceUri"] = sourceUri
163
- if owner:
164
- dataset["owner"] = owner
165
-
166
- # proceed all task
167
- task = {}
168
- if taskType:
169
- task["taskType"] = taskType
170
- if taskFamily:
171
- task["taskFamily"] = taskFamily
172
- if taskStage:
173
- task["taskStage"] = taskStage
174
- if algorithm:
175
- task["algorithms"] = [algorithm]
176
- if dataset:
177
- task["dataset"] = [dataset]
178
- if measuredAccuracy:
179
- task["measuredAccuracy"] = measuredAccuracy
180
- if estimatedAccuracy:
181
- task["estimatedAccuracy"] = estimatedAccuracy
182
- report["task"] = task
183
-
184
- # proceed measures
185
- measures = {}
186
- if measurementMethod:
187
- measures["measurementMethod"] = measurementMethod
188
- if manufacturer:
189
- measures["manufacturer"] = manufacturer
190
- if version:
191
- measures["version"] = version
192
- if cpuTrackingMode:
193
- measures["cpuTrackingMode"] = cpuTrackingMode
194
- if gpuTrackingMode:
195
- measures["gpuTrackingMode"] = gpuTrackingMode
196
- if averageUtilizationCpu:
197
- measures["averageUtilizationCpu"] = averageUtilizationCpu
198
- if averageUtilizationGpu:
199
- measures["averageUtilizationGpu"] = averageUtilizationGpu
200
- if serverSideInference:
201
- measures["serverSideInference"] = serverSideInference
202
- if unit:
203
- measures["unit"] = unit
204
- if powerCalibrationMeasurement:
205
- measures["powerCalibrationMeasurement"] = powerCalibrationMeasurement
206
- if durationCalibrationMeasurement:
207
- measures["durationCalibrationMeasurement"] = durationCalibrationMeasurement
208
- if powerConsumption:
209
- measures["powerConsumption"] = powerConsumption
210
- if measurementDuration:
211
- measures["measurementDuration"] = measurementDuration
212
- if measurementDateTime:
213
- measures["measurementDateTime"] = measurementDateTime
214
- report["measures"] = [measures]
215
-
216
- # proceed system
217
- system = {}
218
- if os:
219
- system["os"] = os
220
- if distribution:
221
- system["distribution"] = distribution
222
- if distributionVersion:
223
- system["distributionVersion"] = distributionVersion
224
- if system:
225
- report["system"] = system
226
-
227
- # proceed software
228
- software = {}
229
- if language:
230
- software["language"] = language
231
- if version_software:
232
- software["version"] = version_software
233
- if software:
234
- report["software"] = software
235
-
236
- # proceed infrastructure
237
- infrastructure = {}
238
- if infraType:
239
- infrastructure["infraType"] = infraType
240
- if cloudProvider:
241
- infrastructure["cloudProvider"] = cloudProvider
242
- if cloudInstance:
243
- infrastructure["cloudInstance"] = cloudInstance
244
- if components_list:
245
- infrastructure["components"] = components_list
246
- report["infrastructure"] = infrastructure
247
-
248
- # proceed environment
249
- environment = {}
250
- if country:
251
- environment["country"] = country
252
- if latitude:
253
- environment["latitude"] = latitude
254
- if longitude:
255
- environment["longitude"] = longitude
256
- if location:
257
- environment["location"] = location
258
- if powerSupplierType:
259
- environment["powerSupplierType"] = powerSupplierType
260
- if powerSource:
261
- environment["powerSource"] = powerSource
262
- if powerSourceCarbonIntensity:
263
- environment["powerSourceCarbonIntensity"] = powerSourceCarbonIntensity
264
- report["environment"] = environment
265
-
266
- # proceed quality
267
- if quality:
268
- report["quality"] = quality
269
-
270
- # proceed hash
271
- hash = {}
272
- if hashAlgorithm:
273
- hash["hashAlgorithm"] = hashAlgorithm
274
- if cryptographicAlgorithm:
275
- hash["cryptographicAlgorithm"] = cryptographicAlgorithm
276
- if value_hash:
277
- hash["value"] = value_hash
278
- if hash:
279
- report["$hash"] = hash
280
-
281
- """
282
- data = {
283
- "header": {
284
- "licensing": licensing,
285
- "formatVersion": formatVersion,
286
- "formatVersionSpecificationUri": formatVersionSpecificationUri,
287
- "reportId": reportId,
288
- "reportDatetime": reportDatetime or datetime.now().isoformat(),
289
- "reportStatus": reportStatus,
290
- "publisher": {
291
- "name": publisher_name,
292
- "division": publisher_division,
293
- "projectName": publisher_projectName,
294
- "confidentialityLevel": publisher_confidentialityLevel,
295
- "publicKey": publisher_publicKey
296
- }
297
- },
298
- "task": {
299
- "taskType": taskType,
300
- "taskFamily": taskFamily,
301
- "taskStage": taskStage,
302
- "algorithms": [
303
- {
304
- "algorithmName": algorithmName,
305
- "framework": framework,
306
- "frameworkVersion": frameworkVersion,
307
- "classPath": classPath,
308
- "hyperparameters": {
309
- "tuning_method": tuning_method,
310
- "values": hyperparameters,
311
- },
312
- "quantization": quantization
313
- }
314
- ],
315
- "dataset": [
316
- {
317
- "dataType": dataType,
318
- "fileType": fileType,
319
- "volume": volume,
320
- "volumeUnit": volumeUnit,
321
- "items": items,
322
- "shape": [
323
- {
324
- "item": shape_item
325
- }
326
- ],
327
- "inferenceProperties": inference_props_list,
328
- "source": source,
329
- "sourceUri": sourceUri,
330
- "owner": owner
331
- }
332
- ],
333
- "measuredAccuracy": measuredAccuracy,
334
- "estimatedAccuracy": estimatedAccuracy
335
- },
336
- "measures": [
337
- {
338
- "measurementMethod": measurementMethod,
339
- "manufacturer": manufacturer,
340
- "version": version,
341
- "cpuTrackingMode": cpuTrackingMode,
342
- "gpuTrackingMode": gpuTrackingMode,
343
- "averageUtilizationCpu": averageUtilizationCpu,
344
- "averageUtilizationGpu": averageUtilizationGpu,
345
- "serverSideInference": serverSideInference,
346
- "unit": unit,
347
- "powerCalibrationMeasurement": powerCalibrationMeasurement,
348
- "durationCalibrationMeasurement": durationCalibrationMeasurement,
349
- "powerConsumption": powerConsumption,
350
- "measurementDuration": measurementDuration,
351
- "measurementDateTime": measurementDateTime
352
- }
353
- ],
354
- "system": {
355
- "os": os,
356
- "distribution": distribution,
357
- "distributionVersion": distributionVersion
358
- },
359
- "software": {
360
- "language": language,
361
- "version": version_software
362
- },
363
- "infrastructure": {
364
- "infraType": infraType,
365
- "cloudProvider": cloudProvider,
366
- "cloudInstance": cloudInstance,
367
- "components": components_list
368
- },
369
- "environment": {
370
- "country": country,
371
- "latitude": latitude,
372
- "longitude": longitude,
373
- "location": location,
374
- "powerSupplierType": powerSupplierType,
375
- "powerSource": powerSource,
376
- "powerSourceCarbonIntensity": powerSourceCarbonIntensity
377
- },
378
- "quality": quality,
379
- "$hash": {
380
- "hashAlgorithm": hashAlgorithm,
381
- "cryptographicAlgorithm": cryptographicAlgorithm,
382
- "ecryptedValue": value_hash
383
- }
384
- }
385
- """
386
-
387
- # Validate obligatory fields
388
- is_valid, message = validate_obligatory_fields(report)
389
- if not is_valid:
390
- return message, None, ""
391
-
392
- # Create the JSON string
393
- json_str = json.dumps(report)
394
- print(json_str)
395
- # Create and save the JSON file
396
- with tempfile.NamedTemporaryFile(mode='w', prefix="report", delete=False, suffix='.json') as file:
397
- json.dump(report, file, indent=4)
398
- return message, file.name, json_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py → src/app.py RENAMED
@@ -1,7 +1,8 @@
1
  import gradio as gr
2
- from services.huggingface import init_huggingface, update_dataset
3
- from services.json_generator import generate_json
4
- from ui.form_components import (
 
5
  create_header_tab,
6
  create_task_tab,
7
  create_measures_tab,
@@ -12,6 +13,7 @@ from ui.form_components import (
12
  create_quality_tab,
13
  create_hash_tab
14
  )
 
15
 
16
  # Initialize Hugging Face
17
  init_huggingface()
@@ -31,7 +33,7 @@ def handle_submit(*inputs):
31
 
32
 
33
  # Create Gradio interface
34
- with gr.Blocks() as demo:
35
  gr.Markdown("## Data Collection Form")
36
  gr.Markdown("Welcome to this Huggingface space, where you can create a report on the energy consumption of an AI task in BoAmps format, by filling in a form.")
37
 
@@ -69,6 +71,3 @@ with gr.Blocks() as demo:
69
  ],
70
  outputs=[output, file_output, json_output]
71
  )
72
-
73
- if __name__ == "__main__":
74
- demo.launch()
 
1
  import gradio as gr
2
+ from os import path
3
+ from src.services.huggingface import init_huggingface, update_dataset
4
+ from src.services.json_generator import generate_json
5
+ from src.ui.form_components import (
6
  create_header_tab,
7
  create_task_tab,
8
  create_measures_tab,
 
13
  create_quality_tab,
14
  create_hash_tab
15
  )
16
+ css_path = path.join(path.dirname(__file__), "../assets/styles/app.css")
17
 
18
  # Initialize Hugging Face
19
  init_huggingface()
 
33
 
34
 
35
  # Create Gradio interface
36
+ with gr.Blocks(css_paths=css_path) as app:
37
  gr.Markdown("## Data Collection Form")
38
  gr.Markdown("Welcome to this Huggingface space, where you can create a report on the energy consumption of an AI task in BoAmps format, by filling in a form.")
39
 
 
71
  ],
72
  outputs=[output, file_output, json_output]
73
  )
 
 
 
{services → src/services}/huggingface.py RENAMED
@@ -1,7 +1,6 @@
1
- from huggingface_hub import login
2
- from datasets import load_dataset, Dataset, concatenate_datasets
3
  import json
4
- from config import HF_TOKEN, DATASET_NAME
 
5
 
6
 
7
  def init_huggingface():
@@ -44,22 +43,6 @@ def update_dataset(json_data):
44
 
45
  def create_flattened_data(data):
46
  """Create a flattened data structure for the dataset."""
47
- # Handle hyperparameters
48
- hyperparameters = data.get("task", {}).get("algorithms", [{}])[
49
- 0].get("hyperparameters", {}).get("values", [])
50
-
51
- # Process hyperparameters
52
- hyperparameter_names = []
53
- hyperparameter_values = []
54
- for hp in hyperparameters:
55
- if "name" in hp and "value" in hp: # Match the keys used in JSON
56
- hyperparameter_names.append(hp["name"])
57
- hyperparameter_values.append(str(hp["value"]))
58
-
59
- hyperparameter_name_str = ", ".join(
60
- hyperparameter_names) if hyperparameter_names else None
61
- hyperparameter_value_str = ", ".join(
62
- hyperparameter_values) if hyperparameter_values else None
63
 
64
  # Handle inference properties
65
  inference_props = data.get("task", {}).get(
@@ -139,16 +122,12 @@ def create_flattened_data(data):
139
  "publisher_publicKey": [data["header"]["publisher"]["publicKey"]],
140
 
141
  # Task
142
- "taskType": [data["task"]["taskType"]],
143
  "taskFamily": [data["task"]["taskFamily"]],
144
  "taskStage": [data["task"]["taskStage"]],
145
  "algorithmName": [data["task"]["algorithms"][0]["algorithmName"]],
146
  "framework": [data["task"]["algorithms"][0]["framework"]],
147
  "frameworkVersion": [data["task"]["algorithms"][0]["frameworkVersion"]],
148
  "classPath": [data["task"]["algorithms"][0]["classPath"]],
149
- "tuning_method": [data["task"]["algorithms"][0]["hyperparameters"]["tuning_method"]],
150
- "hyperparameterName": [hyperparameter_name_str],
151
- "hyperparameterValue": [hyperparameter_value_str],
152
  "quantization": [data["task"]["algorithms"][0]["quantization"]],
153
  "dataType": [data["task"]["dataset"][0]["dataType"]],
154
  "fileType": [data["task"]["dataset"][0]["fileType"]],
 
 
 
1
  import json
2
+ from huggingface_hub import login
3
+ from src.services.util import HF_TOKEN
4
 
5
 
6
  def init_huggingface():
 
43
 
44
  def create_flattened_data(data):
45
  """Create a flattened data structure for the dataset."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  # Handle inference properties
48
  inference_props = data.get("task", {}).get(
 
122
  "publisher_publicKey": [data["header"]["publisher"]["publicKey"]],
123
 
124
  # Task
 
125
  "taskFamily": [data["task"]["taskFamily"]],
126
  "taskStage": [data["task"]["taskStage"]],
127
  "algorithmName": [data["task"]["algorithms"][0]["algorithmName"]],
128
  "framework": [data["task"]["algorithms"][0]["framework"]],
129
  "frameworkVersion": [data["task"]["algorithms"][0]["frameworkVersion"]],
130
  "classPath": [data["task"]["algorithms"][0]["classPath"]],
 
 
 
131
  "quantization": [data["task"]["algorithms"][0]["quantization"]],
132
  "dataType": [data["task"]["dataset"][0]["dataType"]],
133
  "fileType": [data["task"]["dataset"][0]["fileType"]],
src/services/json_generator.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import tempfile
3
+ from datetime import datetime
4
+ from assets.utils.validation import validate_obligatory_fields
5
+
6
+
7
+ def generate_json(
8
+ # Header
9
+ licensing, formatVersion, formatVersionSpecificationUri, reportId,
10
+ reportDatetime, reportStatus, publisher_name, publisher_division,
11
+ publisher_projectName, publisher_confidentialityLevel, publisher_publicKey,
12
+ # Task
13
+ taskFamily, taskStage, nbRequest,
14
+ trainingType, algorithmType, algorithmName, algorithmUri, foundationModelName, foundationModelUri, parametersNumber, framework, frameworkVersion, classPath, layersNumber, epochsNumber, optimizer, quantization,
15
+ dataUsage, dataType, dataFormat, dataSize, dataQuantity, shape, source, sourceUri, owner,
16
+ measuredAccuracy, estimatedAccuracy,
17
+ # Measures
18
+ measurementMethod, manufacturer, version, cpuTrackingMode, gpuTrackingMode,
19
+ averageUtilizationCpu, averageUtilizationGpu, powerCalibrationMeasurement,
20
+ durationCalibrationMeasurement, powerConsumption,
21
+ measurementDuration, measurementDateTime,
22
+ # System
23
+ os, distribution, distributionVersion,
24
+ # Software
25
+ language, version_software,
26
+ # Infrastructure
27
+ infraType, cloudProvider, cloudInstance, cloudService, componentName, componentType,
28
+ nbComponent, memorySize, manufacturer_infra, family,
29
+ series, share,
30
+ # Environment
31
+ country, latitude, longitude, location,
32
+ powerSupplierType, powerSource, powerSourceCarbonIntensity,
33
+ # Quality
34
+ quality,
35
+ # Hash
36
+ hashAlgorithm, cryptographicAlgorithm, value_hash
37
+ ):
38
+ """Generate JSON data from form inputs."""
39
+ # Process algorithms
40
+ algorithms_list = []
41
+ algorithm_fields = {"trainingType": trainingType, "algorithmType": algorithmType, "algorithmName": algorithmName, "algorithmUri": algorithmUri, "foundationModelName": foundationModelName, "foundationModelUri": foundationModelUri,
42
+ "parametersNumber": parametersNumber, "framework": framework, "frameworkVersion": frameworkVersion, "classPath": classPath, "layersNumber": layersNumber, "epochsNumber": epochsNumber, "optimizer": optimizer, "quantization": quantization}
43
+ nb_algo = 0
44
+ # ça ça marche pas
45
+ for f in algorithm_fields:
46
+ nb_algo = max(nb_algo, len(algorithm_fields[f]))
47
+ for i in range(nb_algo):
48
+ algortithm = {}
49
+ for f in algorithm_fields:
50
+ if i < len(algorithm_fields[f]) and algorithm_fields[f][i]:
51
+ algortithm[f] = algorithm_fields[f][i]
52
+ algorithms_list.append(algortithm)
53
+
54
+ # Process dataset
55
+ dataset_list = []
56
+ dataset_fields = {"dataUsage": dataUsage, "dataType": dataType, "dataFormat": dataFormat, "dataSize": dataSize,
57
+ "dataQuantity": dataQuantity, "shape": shape, "source": source, "sourceUri": sourceUri, "owner": owner}
58
+ nb_data = 0
59
+ for f in dataset_fields:
60
+ nb_data = max(nb_data, len(dataset_fields[f]))
61
+ for i in range(nb_data):
62
+ data = {}
63
+ for f in dataset_fields:
64
+ if i < len(dataset_fields[f]) and dataset_fields[f][i]:
65
+ data[f] = dataset_fields[f][i]
66
+ dataset_list.append(data)
67
+
68
+ # Process measures
69
+ measures_list = []
70
+ measure_fields = {"measurementMethod": measurementMethod, "manufacturer": manufacturer, "version": version, "cpuTrackingMode": cpuTrackingMode,
71
+ "gpuTrackingMode": gpuTrackingMode, "averageUtilizationCpu": averageUtilizationCpu, "averageUtilizationGpu": averageUtilizationGpu,
72
+ "powerCalibrationMeasurement": powerCalibrationMeasurement, "durationCalibrationMeasurement": durationCalibrationMeasurement,
73
+ "powerConsumption": powerConsumption, "measurementDuration": measurementDuration, "measurementDateTime": measurementDateTime}
74
+ nb_measures = 0
75
+ for f in measure_fields:
76
+ nb_measures = max(nb_measures, len(measure_fields[f]))
77
+ for i in range(nb_measures):
78
+ measure = {}
79
+ for f in measure_fields:
80
+ if i < len(measure_fields[f]) and measure_fields[f][i]:
81
+ measure[f] = measure_fields[f][i]
82
+ measures_list.append(measure)
83
+
84
+ # Process components
85
+ components_list = []
86
+ component_fields = {"componentName": componentName, "componentType": componentType, "nbComponent": nbComponent,
87
+ "memorySize": memorySize, "manufacturer_infra": manufacturer_infra, "family": family,
88
+ "series": series, "share": share}
89
+ nb_components = 0
90
+ for f in component_fields:
91
+ nb_components = max(nb_components, len(component_fields[f]))
92
+ for i in range(nb_components):
93
+ component = {}
94
+ for f in component_fields:
95
+ if i < len(component_fields[f]) and component_fields[f][i]:
96
+ component[f] = component_fields[f][i]
97
+ components_list.append(component)
98
+
99
+ # process report
100
+ report = {}
101
+
102
+ # Process header
103
+ header = {}
104
+ if licensing:
105
+ header["licensing"] = licensing
106
+ if formatVersion:
107
+ header["formatVersion"] = formatVersion
108
+ if formatVersionSpecificationUri:
109
+ header["formatVersionSpecificationUri"] = formatVersionSpecificationUri
110
+ if reportId:
111
+ header["reportId"] = reportId
112
+ if reportDatetime:
113
+ header["reportDatetime"] = reportDatetime or datetime.now().isoformat()
114
+ if reportStatus:
115
+ header["reportStatus"] = reportStatus
116
+
117
+ publisher = {}
118
+ if publisher_name:
119
+ publisher["name"] = publisher_name
120
+ if publisher_division:
121
+ publisher["division"] = publisher_division
122
+ if publisher_projectName:
123
+ publisher["projectName"] = publisher_projectName
124
+ if publisher_confidentialityLevel:
125
+ publisher["confidentialityLevel"] = publisher_confidentialityLevel
126
+ if publisher_publicKey:
127
+ publisher["publicKey"] = publisher_publicKey
128
+
129
+ if publisher:
130
+ header["publisher"] = publisher
131
+
132
+ if header:
133
+ report["header"] = header
134
+
135
+ # proceed task
136
+ task = {}
137
+ if taskStage:
138
+ task["taskStage"] = taskStage
139
+ if taskFamily:
140
+ task["taskFamily"] = taskFamily
141
+ if nbRequest:
142
+ task["nbRequest"] = nbRequest
143
+ if algorithms_list:
144
+ task["algorithms"] = algorithms_list
145
+ if dataset_list:
146
+ task["dataset"] = dataset_list
147
+ if measuredAccuracy:
148
+ task["measuredAccuracy"] = measuredAccuracy
149
+ if estimatedAccuracy:
150
+ task["estimatedAccuracy"] = estimatedAccuracy
151
+ report["task"] = task
152
+
153
+ # proceed measures
154
+ if measures_list:
155
+ report["measures"] = measures_list
156
+
157
+ # proceed system
158
+ system = {}
159
+ if os:
160
+ system["os"] = os
161
+ if distribution:
162
+ system["distribution"] = distribution
163
+ if distributionVersion:
164
+ system["distributionVersion"] = distributionVersion
165
+ if system:
166
+ report["system"] = system
167
+
168
+ # proceed software
169
+ software = {}
170
+ if language:
171
+ software["language"] = language
172
+ if version_software:
173
+ software["version"] = version_software
174
+ if software:
175
+ report["software"] = software
176
+
177
+ # proceed infrastructure
178
+ infrastructure = {}
179
+ if infraType:
180
+ infrastructure["infraType"] = infraType
181
+ if cloudProvider:
182
+ infrastructure["cloudProvider"] = cloudProvider
183
+ if cloudInstance:
184
+ infrastructure["cloudInstance"] = cloudInstance
185
+ if cloudService:
186
+ infrastructure["cloudService"] = cloudService
187
+ if components_list:
188
+ infrastructure["components"] = components_list
189
+ report["infrastructure"] = infrastructure
190
+
191
+ # proceed environment
192
+ environment = {}
193
+ if country:
194
+ environment["country"] = country
195
+ if latitude:
196
+ environment["latitude"] = latitude
197
+ if longitude:
198
+ environment["longitude"] = longitude
199
+ if location:
200
+ environment["location"] = location
201
+ if powerSupplierType:
202
+ environment["powerSupplierType"] = powerSupplierType
203
+ if powerSource:
204
+ environment["powerSource"] = powerSource
205
+ if powerSourceCarbonIntensity:
206
+ environment["powerSourceCarbonIntensity"] = powerSourceCarbonIntensity
207
+ if environment:
208
+ report["environment"] = environment
209
+
210
+ # proceed quality
211
+ if quality:
212
+ report["quality"] = quality
213
+
214
+ # proceed hash
215
+ hash = {}
216
+ if hashAlgorithm:
217
+ hash["hashAlgorithm"] = hashAlgorithm
218
+ if cryptographicAlgorithm:
219
+ hash["cryptographicAlgorithm"] = cryptographicAlgorithm
220
+ if value_hash:
221
+ hash["value_hash"] = value_hash
222
+ if hash:
223
+ report["hash"] = hash
224
+
225
+ # Validate obligatory fields
226
+ is_valid, message = validate_obligatory_fields(report)
227
+ if not is_valid:
228
+ return message, None, ""
229
+ # Create the JSON string
230
+ json_str = json.dumps(report)
231
+ print(json_str)
232
+ # Create and save the JSON file
233
+ with tempfile.NamedTemporaryFile(mode='w', prefix="report", delete=False, suffix='.json') as file:
234
+ json.dump(report, file, indent=4)
235
+ return message, file.name, json_str
config.py → src/services/util.py RENAMED
@@ -5,30 +5,31 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
5
  DATASET_NAME = "soprasteria/BoAmps_leaderboard"
6
 
7
  # Form Field Configurations
8
- MANDATORY_SECTIONS = ["task", "measures", "infrastructure", "environment"]
 
9
  OBLIGATORY_FIELDS = [
10
- "formatVersion", "reportId", "reportStatus", "confidentialityLevel",
11
- "taskType", "taskFamily", "taskStage", "algorithmName", "dataType",
12
- "volume", "volumeUnit", "nbRequest", "measurementMethod", "unit",
13
- "powerConsumption", "os", "language", "infraType", "componentType",
14
- "nbComponent", "country", "hashAlgorithm", "cryptographicAlgorithm", "value"
15
  ]
16
 
17
  # Dropdown Options
18
- REPORT_STATUS_OPTIONS = ["draft", "final", "corrective", "$other"]
19
  CONFIDENTIALITY_LEVELS = ["public", "internal", "confidential", "secret"]
20
- DATA_TYPES = ["tabular", "audio", "boolean",
21
- "image", "video", "object", "text", "$other"]
22
- DATA_UNITS = ['kilobyte', 'megabyte', 'gigabyte', 'terabyte',
23
- 'petabyte', 'exabyte', 'zettabyte', 'yottabyte']
 
 
24
  ACCURACY_LEVELS = ["veryPoor", "poor", "average", "good", "veryGood"]
25
  MEASUREMENT_UNITS = ["Wh", "kWh", "MWh", "GWh", "kJoule", "MJoule", "GJoule", "TJoule", "PJoule",
26
  "BTU", "kiloFLOPS", "megaFLOPS", "gigaFLOPS", "teraFLOPS", "petaFLOPS",
27
  "exaFLOPS", "zettaFLOPS", "yottaFLOPS"]
28
- INFRA_TYPES = ["publicCloud", "privateCloud", "onPremise", "$other"]
29
- POWER_SUPPLIER_TYPES = ["public", "private", "internal", "$other"]
30
  POWER_SOURCES = ["solar", "wind", "nuclear",
31
- "hydroelectric", "gas", "coal", "$other"]
32
  QUALITY_LEVELS = ["high", "medium", "low"]
33
  HASH_ALGORITHMS = ["MD5", "RIPEMD-128", "RIPEMD-160", "RIPEMD-256", "RIPEMD-320",
34
  "SHA-1", "SHA-224", "SHA256", "SHA-384", "SHA-512"]
 
5
  DATASET_NAME = "soprasteria/BoAmps_leaderboard"
6
 
7
  # Form Field Configurations
8
+ # not used and verified for now
9
+ MANDATORY_SECTIONS = ["task", "measures", "infrastructure"]
10
  OBLIGATORY_FIELDS = [
11
+ "taskStage", "taskFamily", "dataUsage", "dataType",
12
+ "measurementMethod", "powerConsumption", "infraType", "componentType",
13
+ "nbComponent"
 
 
14
  ]
15
 
16
  # Dropdown Options
17
+ REPORT_STATUS_OPTIONS = ["draft", "final", "corrective", "other"]
18
  CONFIDENTIALITY_LEVELS = ["public", "internal", "confidential", "secret"]
19
+ DATA_USAGE_OPTIONS = ["input", "output"]
20
+ DATA_FORMAT = ["3gp", "3gpp", "3gpp2", "8svx", "aa", "aac", "aax", "act", "afdesign", "afphoto", "ai", "aiff", "alac", "amr", "amv", "ape", "arrow", "asf", "au", "avi", "avif", "awb", "bmp", "bpg", "cd5", "cda", "cdr", "cgm", "clip", "cpt", "csv", "deep", "dirac", "divx", "drawingml", "drw", "dss", "dvf", "ecw", "eps", "fits", "flac", "flif", "flv", "flvf4v", "gem", "gerber", "gif", "gle", "gsm", "heif", "hp-gl", "html", "hvif", "ico", "iklax", "ilbm", "img", "ivs", "jpeg", "json", "kra", "lottie", "m4a", "m4b", "m4p", "m4v", "mathml", "matroska", "mdp", "mmf", "movpkg", "mp3", "mpc", "mpeg1",
21
+ "mpeg2", "mpeg4", "msv", "mxf", "naplps", "netpbm", "nmf", "nrrd", "nsv", "odg", "ods", "ogg", "opus", "pam", "parquet", "pbm", "pcx", "pdf", "pdn", "pgf", "pgm", "pgml", "pict", "plbm", "png", "pnm", "postscript", "ppm", "psd", "psp", "pstricks", "qcc", "quicktime", "ra", "raw", "realmedia", "regis", "rf64", "roq", "sai", "sgi", "sid", "sql", "sln", "svg", "svi", "swf", "text", "tga", "tiff", "tinyvg", "tta", "vicar", "vivoactive", "vml", "vob", "voc", "vox", "wav", "webm", "webp", "wma", "wmf", "wmv", "wv", "xaml", "xar", "xcf", "xisf", "xls", "xlsx", "xml", "xps", "yaml", "other"]
22
+ DATA_TYPES = ["tabular", "audio", "boolean", "image",
23
+ "video", "object", "text", "token", "word", "other"]
24
+ DATA_SOURCE = ["public", "private", "other"]
25
  ACCURACY_LEVELS = ["veryPoor", "poor", "average", "good", "veryGood"]
26
  MEASUREMENT_UNITS = ["Wh", "kWh", "MWh", "GWh", "kJoule", "MJoule", "GJoule", "TJoule", "PJoule",
27
  "BTU", "kiloFLOPS", "megaFLOPS", "gigaFLOPS", "teraFLOPS", "petaFLOPS",
28
  "exaFLOPS", "zettaFLOPS", "yottaFLOPS"]
29
+ INFRA_TYPES = ["publicCloud", "privateCloud", "onPremise", "other"]
30
+ POWER_SUPPLIER_TYPES = ["public", "private", "internal", "other"]
31
  POWER_SOURCES = ["solar", "wind", "nuclear",
32
+ "hydroelectric", "gas", "coal", "other"]
33
  QUALITY_LEVELS = ["high", "medium", "low"]
34
  HASH_ALGORITHMS = ["MD5", "RIPEMD-128", "RIPEMD-160", "RIPEMD-256", "RIPEMD-320",
35
  "SHA-1", "SHA-224", "SHA256", "SHA-384", "SHA-512"]
{ui → src/ui}/form_components.py RENAMED
@@ -1,9 +1,10 @@
1
  import gradio as gr
2
- from config import (
3
- REPORT_STATUS_OPTIONS, CONFIDENTIALITY_LEVELS, DATA_TYPES,
4
- DATA_UNITS, ACCURACY_LEVELS, MEASUREMENT_UNITS, INFRA_TYPES,
 
5
  POWER_SUPPLIER_TYPES, POWER_SOURCES, QUALITY_LEVELS,
6
- HASH_ALGORITHMS, CRYPTO_ALGORITHMS, CACHE_OPTIONS
7
  )
8
 
9
 
@@ -42,9 +43,11 @@ def create_dynamic_section(section_name, fields_config, initial_count=1, layout=
42
 
43
  for field_idx, config in enumerate(fields_config):
44
  component = config["type"](
45
- label=f"{config['label']} {i + 1}",
46
  info=config.get("info", ""),
47
- **config.get("kwargs", {})
 
 
48
  )
49
  row_components.append(component)
50
  field_refs.append(component)
@@ -86,17 +89,17 @@ def create_header_tab():
86
  licensing = gr.Textbox(
87
  label="Licensing", info="(the type of licensing applicable for the sharing of the report)")
88
  formatVersion = gr.Textbox(
89
- label="Format Version", info="Required field<br>(the version of the specification of this set of schemas defining the report's fields)")
90
  formatVersionSpecificationUri = gr.Textbox(
91
  label="Format Version Specification URI", info="(the URI of the present specification of this set of schemas)")
92
  reportId = gr.Textbox(
93
- label="Report ID", info="Required field<br>(the unique identifier of this report, preferably as a uuid4 string)")
94
  reportDatetime = gr.Textbox(
95
- label="Report Datetime", info="(the publishing date of this report in format YYYY-MM-DD HH:MM:SS)")
96
  reportStatus = gr.Dropdown(value=None,
97
  label="Report Status",
98
  choices=REPORT_STATUS_OPTIONS,
99
- info="Required field<br>(the status of this report)"
100
  )
101
 
102
  with gr.Accordion("Publisher"):
@@ -109,7 +112,8 @@ def create_header_tab():
109
  publisher_confidentialityLevel = gr.Dropdown(value=None,
110
  label="Confidentiality Level",
111
  choices=CONFIDENTIALITY_LEVELS,
112
- info="Required field<br>(the confidentiality of the report)"
 
113
  )
114
  publisher_publicKey = gr.Textbox(
115
  label="Public Key", info="(the cryptographic public key to check the identity of the publishing organization)")
@@ -123,125 +127,157 @@ def create_header_tab():
123
 
124
  def create_task_tab():
125
  """Create the task tab components."""
126
- with gr.Tab("Task"):
127
- taskType = gr.Textbox(
128
- label="Task Type", info="Required field<br>(type of the computing task of machine learning, example : datacreation, preprocessing, supervisedLearning, unsupervisedLearning, semiSupervisedLearning ...)")
129
- taskFamily = gr.Textbox(
130
- label="Task Family", info="Required field<br>(the family of task performed, example : classification, regression, chatbot, summarization, keyword extraction, image recognition...)")
131
  taskStage = gr.Textbox(
132
- label="Task Stage", info="Required field<br>(stage of the task, example: training, finetuning, reinforcement, inference, rag...)")
 
 
 
 
 
133
 
134
  with gr.Accordion("Algorithms"):
135
- algorithmName = gr.Textbox(
136
- label="Algorithm Name", info="Required field<br>(the case-sensitive common name of the algorithm, example: randomForest, svm, xgboost...)")
137
- framework = gr.Textbox(
138
- label="Framework", info="(the common name of the software framework implementing the algorithm)")
139
- frameworkVersion = gr.Textbox(
140
- label="Framework Version", info="(the version of the software framework)")
141
- classPath = gr.Textbox(
142
- label="Class Path", info="(the full class path of the algorithm within the framework)")
143
- tuning_method = gr.Textbox(
144
- label="Tuning Method", info="(the method of hyperparameters tuning used (if any), example: gridSearch, randomizedSearch...)")
145
-
146
- with gr.Accordion("Hyperparameters"):
147
- _, hyperparameter_names, hyperparameter_values, add_btn = create_dynamic_section(
148
- section_name="Hyperparameter",
149
- fields_config=[
150
- {
151
- "type": gr.Textbox,
152
- "label": "Hyperparameter Name",
153
- "info": "(name of the hyperparameter)",
154
- "kwargs": {"interactive": True}
155
- },
156
- {
157
- "type": gr.Textbox,
158
- "label": "Hyperparameter Value",
159
- "info": "(value of the hyperparameter)",
160
- "kwargs": {"placeholder": "Enter value..."}
161
- }
162
- ],
163
- initial_count=0,
164
- )
165
-
166
- quantization = gr.Textbox(
167
- label="Quantization", info="(the data weights (in bits) obtained thanks to the quantization, example: 2, 8, 16...)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
  with gr.Accordion("Dataset"):
170
- dataType = gr.Dropdown(value=None,
171
- label="Data Type",
172
- choices=DATA_TYPES,
173
- info="Required field<br>(the nature of the data)"
174
- )
175
- fileType = gr.Textbox(
176
- label="File Type", info="(the file type of the dataset)")
177
- volume = gr.Number(value=lambda: None,
178
- label="Volume", info="Required field<br>(the size of the dataset)")
179
- volumeUnit = gr.Dropdown(value=None,
180
- label="Volume Unit",
181
- choices=DATA_UNITS,
182
- info="Required field<br>(the unit of the size)")
183
- items = gr.Number(value=lambda: None,
184
- label="Items", info="(the number of items in the dataset)")
185
- shape_item = gr.Number(value=lambda: None,
186
- label="Shape Item", info="(the shape of each dataset item)")
187
-
188
- with gr.Accordion("Inference Properties"):
189
- _, nbRequest, nbTokensInput, nbWordsInput, nbTokensOutput, nbWordsOutput, contextWindowSize, cache, add_inference_btn = create_dynamic_section(
190
- section_name="Inference Property",
191
- fields_config=[
192
- {
193
- "type": gr.Number,
194
- "value": lambda: None,
195
- "label": "Number of Requests",
196
- "info": "Required field<br>(the number of requests the measure corresponds to)",
197
- },
198
- {
199
- "type": gr.Number,
200
- "value": lambda: None,
201
- "label": "Number of Tokens Input",
202
- "info": "(the number of tokens in the input)",
203
- },
204
- {
205
- "type": gr.Number,
206
- "value": lambda: None,
207
- "label": "Number of Words Input",
208
- "info": "(the number of words in the input)",
209
- },
210
- {
211
- "type": gr.Number,
212
- "value": lambda: None,
213
- "label": "Number of Tokens Output",
214
- "info": "(the number of tokens in the output)",
215
- },
216
- {
217
- "type": gr.Number,
218
- "value": lambda: None,
219
- "label": "Number of Words Output",
220
- "info": "(the number of words in the output)",
221
- },
222
- {
223
- "type": gr.Number,
224
- "value": lambda: None,
225
- "label": "Context Window Size",
226
- "info": "(the number of tokens kept in memory)",
227
- },
228
- {
229
- "type": gr.Dropdown,
230
- "label": "Cache",
231
- "info": "(the presence of a cache function)",
232
- "kwargs": {"choices": CACHE_OPTIONS, "value": None}
233
- }
234
- ],
235
- initial_count=0,
236
- layout="column"
237
- )
238
-
239
- source = gr.Textbox(
240
- label="Source", info="(the kind of source of the dataset)")
241
- sourceUri = gr.Textbox(
242
- label="Source URI", info="(the URI of the dataset)")
243
- owner = gr.Textbox(
244
- label="Owner", info="(the owner of the dataset)")
245
 
246
  with gr.Row():
247
  measuredAccuracy = gr.Number(value=lambda: None,
@@ -252,65 +288,105 @@ def create_task_tab():
252
  info="(estimated accuracy assessment)"
253
  )
254
 
255
- return [
256
- taskType, taskFamily, taskStage, algorithmName, framework,
257
- frameworkVersion, classPath, tuning_method, hyperparameter_names, hyperparameter_values,
258
- quantization, dataType, fileType, volume,
259
- volumeUnit, items, shape_item, nbRequest, nbTokensInput,
260
- nbWordsInput, nbTokensOutput, nbWordsOutput, contextWindowSize,
261
- cache, source, sourceUri, owner, measuredAccuracy, estimatedAccuracy
262
- ]
263
 
264
 
265
  def create_measures_tab():
266
  """Create the measures tab components."""
267
- with gr.Tab("Measures"):
268
- measurementMethod = gr.Textbox(
269
- label="Measurement Method", info="Required field<br>(the method used to perform the energy or FLOPS measure)")
270
- manufacturer = gr.Textbox(
271
- label="Manufacturer", info="(the builder of the measuring tool)")
272
- version = gr.Textbox(
273
- label="Version", info="(the version of the measuring tool)")
274
- cpuTrackingMode = gr.Textbox(
275
- label="CPU Tracking Mode", info="(the method used to track CPU consumption)")
276
- gpuTrackingMode = gr.Textbox(
277
- label="GPU Tracking Mode", info="(the method used to track GPU consumption)")
278
- averageUtilizationCpu = gr.Number(value=lambda: None,
279
- label="Average Utilization CPU", info="(the average percentage of CPU use)")
280
- averageUtilizationGpu = gr.Number(value=lambda: None,
281
- label="Average Utilization GPU", info="(the average percentage of GPU use)")
282
- serverSideInference = gr.Textbox(
283
- label="Server Side Inference", info="(inference server consumption estimation)")
284
- unit = gr.Dropdown(value=None,
285
- label="Unit",
286
- choices=MEASUREMENT_UNITS,
287
- info="Required field<br>(the unit of power consumption measure)"
288
- )
289
- powerCalibrationMeasurement = gr.Number(value=lambda: None,
290
- label="Power Calibration Measurement", info="(power consumed during calibration)")
291
- durationCalibrationMeasurement = gr.Number(value=lambda: None,
292
- label="Duration Calibration Measurement", info="(duration of calibration in seconds)")
293
- powerConsumption = gr.Number(value=lambda: None,
294
- label="Power Consumption", info="Required field<br>(the power consumption measure)")
295
- measurementDuration = gr.Number(value=lambda: None,
296
- label="Measurement Duration", info="(the duration of measurement in seconds)")
297
- measurementDateTime = gr.Textbox(
298
- label="Measurement DateTime", info="(when measurement began)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
- return [
301
- measurementMethod, manufacturer, version, cpuTrackingMode,
302
- gpuTrackingMode, averageUtilizationCpu, averageUtilizationGpu,
303
- serverSideInference, unit, powerCalibrationMeasurement,
304
- durationCalibrationMeasurement, powerConsumption,
305
- measurementDuration, measurementDateTime
306
- ]
307
 
308
 
309
  def create_system_tab():
310
  """Create the system tab components."""
311
  with gr.Tab("System"):
312
  os = gr.Textbox(
313
- label="OS", info="Required field<br>(name of the operating system)")
314
  distribution = gr.Textbox(
315
  label="Distribution", info="(distribution of the operating system)")
316
  distributionVersion = gr.Textbox(
@@ -323,7 +399,7 @@ def create_software_tab():
323
  """Create the software tab components."""
324
  with gr.Tab("Software"):
325
  language = gr.Textbox(
326
- label="Language", info="Required field<br>(programming language information)")
327
  version_software = gr.Textbox(
328
  label="Version", info="(version of the programming language)")
329
 
@@ -332,20 +408,24 @@ def create_software_tab():
332
 
333
  def create_infrastructure_tab():
334
  """Create the infrastructure tab components."""
335
- with gr.Tab("Infrastructure"):
336
  infraType = gr.Dropdown(value=None,
337
  label="Infrastructure Type",
338
  choices=INFRA_TYPES,
339
- info="Required field<br>(the type of infrastructure used)"
 
340
  )
341
  cloudProvider = gr.Textbox(
342
- label="Cloud Provider", info="(name of your cloud provider)")
343
  cloudInstance = gr.Textbox(
344
- label="Cloud Instance", info="(name of your cloud instance)")
 
 
345
  with gr.Accordion("Components"):
346
  _, componentName, componentType, nbComponent, memorySize, manufacturer_infra, family, series, share, add_component_btn = create_dynamic_section(
347
  section_name="Component",
348
  fields_config=[
 
349
  {
350
  "type": gr.Textbox,
351
  "label": "Component Name",
@@ -355,39 +435,38 @@ def create_infrastructure_tab():
355
  "type": gr.Textbox,
356
  "label": "Component Type",
357
  "info": "Required field<br>(the type of this subsystem part of your infrastructure, example: cpu, gpu, ram, hdd, sdd...)",
 
358
  },
359
  {
360
  "type": gr.Number,
361
- "value": lambda: None,
362
  "label": "Number of Components",
363
- "info": "Required field<br>(number of items of this component)",
 
364
  },
365
  {
366
  "type": gr.Number,
367
- "value": lambda: None,
368
  "label": "Memory Size",
369
- "info": "(size of memory in Gbytes)",
370
  },
371
  {
372
  "type": gr.Textbox,
373
  "label": "Manufacturer",
374
- "info": "(name of the manufacturer)",
375
  },
376
  {
377
  "type": gr.Textbox,
378
  "label": "Family",
379
- "info": "(family of this component)",
380
  },
381
  {
382
  "type": gr.Textbox,
383
  "label": "Series",
384
- "info": "(series of this component)",
385
  },
386
  {
387
  "type": gr.Number,
388
- "value": lambda: None,
389
  "label": "Share",
390
- "info": "(percentage of equipment used)",
391
  }
392
  ],
393
  initial_count=0,
@@ -395,7 +474,7 @@ def create_infrastructure_tab():
395
  )
396
 
397
  return [
398
- infraType, cloudProvider, cloudInstance, componentName, componentType,
399
  nbComponent, memorySize, manufacturer_infra, family,
400
  series, share
401
  ]
@@ -404,10 +483,12 @@ def create_infrastructure_tab():
404
  def create_environment_tab():
405
  """Create the environment tab components."""
406
  with gr.Tab("Environment"):
407
- country = gr.Textbox(label="Country", info="Required field")
 
408
  latitude = gr.Number(label="Latitude", value=lambda: None)
409
  longitude = gr.Number(label="Longitude", value=lambda: None)
410
- location = gr.Textbox(label="Location")
 
411
  powerSupplierType = gr.Dropdown(value=lambda: None,
412
  label="Power Supplier Type",
413
  choices=POWER_SUPPLIER_TYPES,
@@ -445,14 +526,16 @@ def create_hash_tab():
445
  hashAlgorithm = gr.Dropdown(value=None,
446
  label="Hash Algorithm",
447
  choices=HASH_ALGORITHMS,
448
- info="Required field<br>(the hash function to apply)"
 
449
  )
450
  cryptographicAlgorithm = gr.Dropdown(value=None,
451
  label="Cryptographic Algorithm",
452
  choices=CRYPTO_ALGORITHMS,
453
- info="Required field<br>(the public key function to apply)"
 
454
  )
455
  value_hash = gr.Textbox(
456
- label="Value", info="Required field<br>(encrypted value of the hash)")
457
 
458
  return [hashAlgorithm, cryptographicAlgorithm, value_hash]
 
1
  import gradio as gr
2
+ from src.services.util import (
3
+ REPORT_STATUS_OPTIONS, CONFIDENTIALITY_LEVELS, DATA_USAGE_OPTIONS, DATA_FORMAT,
4
+ DATA_TYPES, DATA_SOURCE,
5
+ ACCURACY_LEVELS, INFRA_TYPES,
6
  POWER_SUPPLIER_TYPES, POWER_SOURCES, QUALITY_LEVELS,
7
+ HASH_ALGORITHMS, CRYPTO_ALGORITHMS
8
  )
9
 
10
 
 
43
 
44
  for field_idx, config in enumerate(fields_config):
45
  component = config["type"](
46
+ label=f"{config['label']} ({section_name}{i + 1})",
47
  info=config.get("info", ""),
48
+ value=config.get("value", ""),
49
+ **config.get("kwargs", {}),
50
+ elem_classes=config.get("elem_classes", "")
51
  )
52
  row_components.append(component)
53
  field_refs.append(component)
 
89
  licensing = gr.Textbox(
90
  label="Licensing", info="(the type of licensing applicable for the sharing of the report)")
91
  formatVersion = gr.Textbox(
92
+ label="Format Version", info="(the version of the specification of this set of schemas defining the report's fields)")
93
  formatVersionSpecificationUri = gr.Textbox(
94
  label="Format Version Specification URI", info="(the URI of the present specification of this set of schemas)")
95
  reportId = gr.Textbox(
96
+ label="Report ID", info="(the unique identifier of this report, preferably as a uuid4 string)")
97
  reportDatetime = gr.Textbox(
98
+ label="Report Datetime", info="(Required field<br>the publishing date of this report in format YYYY-MM-DD HH:MM:SS)", elem_classes="mandatory_field")
99
  reportStatus = gr.Dropdown(value=None,
100
  label="Report Status",
101
  choices=REPORT_STATUS_OPTIONS,
102
+ info="(the status of this report)"
103
  )
104
 
105
  with gr.Accordion("Publisher"):
 
112
  publisher_confidentialityLevel = gr.Dropdown(value=None,
113
  label="Confidentiality Level",
114
  choices=CONFIDENTIALITY_LEVELS,
115
+ info="Required field<br>(the confidentiality of the report)",
116
+ elem_classes="mandatory_field"
117
  )
118
  publisher_publicKey = gr.Textbox(
119
  label="Public Key", info="(the cryptographic public key to check the identity of the publishing organization)")
 
127
 
128
  def create_task_tab():
129
  """Create the task tab components."""
130
+ with gr.Tab("Task", elem_id="mandatory_part"):
 
 
 
 
131
  taskStage = gr.Textbox(
132
+ label="Task Stage", info="Required field<br>(stage of the task, example: datacreation, preprocessing, training, finetuning, inference, retraining..., add a + between stages if several but we do recommand to measure each step independantly)", elem_classes="mandatory_field")
133
+ taskFamily = gr.Textbox(
134
+ label="Task Family", info="Required field<br>(the family of task you are running, e.g. text classification, image generation, speech recognition, robotics navigation...)", elem_classes="mandatory_field")
135
+ nbRequest = gr.Number(
136
+ label="Number of Requests", info="(if inference stage, the number of requests the measure corresponds to, 0 or empty if you're not measuring the inference stage)",
137
+ value=lambda: None, minimum=0)
138
 
139
  with gr.Accordion("Algorithms"):
140
+ _, trainingType, algorithmType, algorithmName, algorithmUri, foundationModelName, foundationModelUri, parametersNumber, framework, frameworkVersion, classPath, layersNumber, epochsNumber, optimizer, quantization, add_algorithm_btn = create_dynamic_section(
141
+ section_name="Algorithms",
142
+ fields_config=[
143
+ {
144
+ "type": gr.Textbox,
145
+ "label": "Type of training",
146
+ "info": "(if applicable, type of training (if the stage corresponds to a training) : supervisedLearning, unsupervisedLearning, semiSupervisedLearning, reinforcementLearning, transferLearning ...)",
147
+ },
148
+ {
149
+ "type": gr.Textbox,
150
+ "label": "Type of algorithm",
151
+ "info": "(the type of algorithm used, example : embeddings creation, rag, nlp, neural network, llm...)",
152
+ },
153
+ {
154
+ "type": gr.Textbox,
155
+ "label": "Algorithm Name",
156
+ "info": "(the case-sensitive common name of the algorithm, example: randomForest, svm, xgboost...)",
157
+ },
158
+ {
159
+ "type": gr.Textbox,
160
+ "label": "Algorithm Uri",
161
+ "info": "(the URI of the model, if publicly available)",
162
+ },
163
+ {
164
+ "type": gr.Textbox,
165
+ "label": "Foundation Model Name",
166
+ "info": "(if a foundation model is used, its case-sensitive common name, example: llama3.1-8b, gpt4-o...)",
167
+ },
168
+ {
169
+ "type": gr.Textbox,
170
+ "label": "Foundation Model Uri",
171
+ "info": "(the URI of the foundation model, if publicly available)",
172
+ },
173
+ {
174
+ "type": gr.Number,
175
+ "label": "Number of parameters",
176
+ "info": "(if applicable, number of billions of total parameters of your model, e.g. 8 for llama3.1-8b)",
177
+ },
178
+ {
179
+ "type": gr.Textbox,
180
+ "label": "Framework",
181
+ "info": "(the common name of the software framework implementing the algorithm, if any)",
182
+ },
183
+ {
184
+ "type": gr.Textbox,
185
+ "label": "frameworkVersion",
186
+ "info": "(the version of the software framework implementing the algorithm, if any)",
187
+ },
188
+ {
189
+ "type": gr.Textbox,
190
+ "label": "classPath",
191
+ "info": "(the full class path of the algorithm within the framework, with elements separated by dots)",
192
+ },
193
+ {
194
+ "type": gr.Number,
195
+ "label": "Number of layers in the network",
196
+ "info": "(if deep learning, precise the number of layers in your network)",
197
+ },
198
+ {
199
+ "type": gr.Number,
200
+ "label": "Number of epochs",
201
+ "info": "(if training, the number of complete passes through the training dataset)",
202
+ },
203
+ {
204
+ "type": gr.Textbox,
205
+ "label": "optimizer",
206
+ "info": "(the algorithm used to optimize the models weights, e.g. gridSearch, lora, adam)",
207
+ },
208
+ {
209
+ "type": gr.Textbox,
210
+ "label": "quantization",
211
+ "info": "(the type of quantization used : fp32, fp16, b16, int8 ...)",
212
+ }
213
+ ],
214
+ initial_count=0,
215
+ layout="column"
216
+ )
217
 
218
  with gr.Accordion("Dataset"):
219
+ _, dataUsage, dataType, dataFormat, dataSize, dataQuantity, shape, source, sourceUri, owner, add_dataset_btn = create_dynamic_section(
220
+ section_name="Dataset",
221
+ fields_config=[
222
+ {
223
+ "type": gr.Dropdown,
224
+ "label": "Data Usage",
225
+ "info": "Required field<br>(the use of the dataset: is it used as model input or output ?)",
226
+ "value": None,
227
+ "kwargs": {"choices": DATA_USAGE_OPTIONS},
228
+ "elem_classes": "mandatory_field",
229
+ },
230
+ {
231
+ "type": gr.Dropdown,
232
+ "label": "Data Type",
233
+ "info": "Required field<br>(the nature of the data used)",
234
+ "value": None,
235
+ "kwargs": {"choices": DATA_TYPES},
236
+ "elem_classes": "mandatory_field",
237
+ },
238
+ {
239
+ "type": gr.Dropdown,
240
+ "label": "Data Format",
241
+ "info": "(if the data is passed in the form of a file, what format is the data in?)",
242
+ "value": None,
243
+ "kwargs": {"choices": DATA_FORMAT}
244
+ },
245
+ {
246
+ "type": gr.Number,
247
+ "label": "Data Size",
248
+ "info": "(the size of the dataset (in Go), if small quantity just fill the field quantity)",
249
+ },
250
+ {
251
+ "type": gr.Number,
252
+ "label": "Data Quantity",
253
+ "info": "(the number of data in the dataset, e.g. 3 (images, audio or tokens))",
254
+ },
255
+ {
256
+ "type": gr.Textbox,
257
+ "label": "Data shape",
258
+ "info": "(the shape of your dataset, can be found with X.shape with dataframes, e.g. (12, 1000) for a 2D table with 12 columns and 1000 rows)",
259
+ },
260
+ {
261
+ "type": gr.Dropdown,
262
+ "label": "Data source",
263
+ "info": "(the kind of source of the dataset)",
264
+ "value": None,
265
+ "kwargs": {"choices": DATA_SOURCE}
266
+ },
267
+ {
268
+ "type": gr.Textbox,
269
+ "label": "Source Uri",
270
+ "info": "(the URI of the dataset if available)",
271
+ },
272
+ {
273
+ "type": gr.Textbox,
274
+ "label": "Owner",
275
+ "info": "(the owner of the dataset if available)",
276
+ }
277
+ ],
278
+ initial_count=0,
279
+ layout="column"
280
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
281
 
282
  with gr.Row():
283
  measuredAccuracy = gr.Number(value=lambda: None,
 
288
  info="(estimated accuracy assessment)"
289
  )
290
 
291
+ return [
292
+ taskFamily, taskStage, nbRequest,
293
+ trainingType, algorithmType, algorithmName, algorithmUri, foundationModelName, foundationModelUri, parametersNumber, framework, frameworkVersion, classPath, layersNumber, epochsNumber, optimizer, quantization,
294
+ dataUsage, dataType, dataFormat, dataSize, dataQuantity, shape, source, sourceUri, owner,
295
+ measuredAccuracy, estimatedAccuracy
296
+ ]
 
 
297
 
298
 
299
  def create_measures_tab():
300
  """Create the measures tab components."""
301
+ with gr.Tab("Measures", elem_id="mandatory_part"):
302
+ with gr.Accordion("Measures"):
303
+ _, measurementMethod, manufacturer, version, cpuTrackingMode, gpuTrackingMode, averageUtilizationCpu, averageUtilizationGpu, powerCalibrationMeasurement, durationCalibrationMeasurement, powerConsumption, measurementDuration, measurementDateTime, add_measurement_btn = create_dynamic_section(
304
+ section_name="Measures",
305
+ fields_config=[
306
+ {
307
+ "type": gr.Textbox,
308
+ "label": "Method of measurement",
309
+ "info": "Required field<br>(the energy measure obtained from software and/or hardware tools, for a computing task)",
310
+ "elem_classes": "mandatory_field",
311
+ },
312
+ {
313
+ "type": gr.Textbox,
314
+ "label": "Manufacturer",
315
+ "info": "(the builder of the measuring tool, if the measurement method is wattmeter)",
316
+ },
317
+ {
318
+ "type": gr.Textbox,
319
+ "label": "Version of the measurement tool",
320
+ "info": "(the version of the measuring tool, if any)",
321
+ },
322
+ {
323
+ "type": gr.Textbox,
324
+ "label": "CPU tracking mode",
325
+ "info": "(the method used to track the consumption of the CPU, example: constant, rapl...)",
326
+ },
327
+ {
328
+ "type": gr.Textbox,
329
+ "label": "GPU tracking mode",
330
+ "info": "(the method used to track the consumption of the GPU, example: constant, nvml...)",
331
+ },
332
+ {
333
+ "type": gr.Number,
334
+ "label": "Average CPU Utilization",
335
+ "info": "(the average percentage of use of the CPU during the task, for example: 0.5 if your CPU load was 50% on average)",
336
+ "minimum": 0,
337
+ "maximum": 1
338
+ },
339
+ {
340
+ "type": gr.Number,
341
+ "label": "Average GPU Utilization",
342
+ "info": "(the average percentage of use of the GPU during the task, for example: 0.8 if your GPU load was 80% on average)",
343
+ "minimum": 0,
344
+ "maximum": 1
345
+ },
346
+ {
347
+ "type": gr.Number,
348
+ "label": "Power calibration measurement",
349
+ "info": "(the power consumed (in kWh) during the calibration measure if any (to isolate the initial consumption of the hardware))",
350
+ },
351
+ {
352
+ "type": gr.Number,
353
+ "label": "Duration calibration measurement",
354
+ "info": "(the duration of the calibration if any (in seconds))",
355
+ },
356
+ {
357
+ "type": gr.Number,
358
+ "label": "Power consumption",
359
+ "info": "Required field<br>(the power consumption measure of the computing task (in kWh))",
360
+ "elem_classes": "mandatory_field",
361
+ },
362
+ {
363
+ "type": gr.Number,
364
+ "label": "Measurement Duration",
365
+ "info": "(the duration of the measurement (in seconds))",
366
+ },
367
+ {
368
+ "type": gr.Textbox,
369
+ "label": "Measurement date time",
370
+ "info": "(the date when the measurement began, in format YYYY-MM-DD HH:MM:SS)",
371
+ }
372
+ ],
373
+ initial_count=0,
374
+ layout="column"
375
+ )
376
 
377
+ return [
378
+ measurementMethod, manufacturer, version, cpuTrackingMode, gpuTrackingMode,
379
+ averageUtilizationCpu, averageUtilizationGpu, powerCalibrationMeasurement,
380
+ durationCalibrationMeasurement, powerConsumption,
381
+ measurementDuration, measurementDateTime
382
+ ]
 
383
 
384
 
385
  def create_system_tab():
386
  """Create the system tab components."""
387
  with gr.Tab("System"):
388
  os = gr.Textbox(
389
+ label="OS", info="Required field<br>(name of the operating system)", elem_classes="mandatory_field")
390
  distribution = gr.Textbox(
391
  label="Distribution", info="(distribution of the operating system)")
392
  distributionVersion = gr.Textbox(
 
399
  """Create the software tab components."""
400
  with gr.Tab("Software"):
401
  language = gr.Textbox(
402
+ label="Language", info="Required field<br>(programming language information)", elem_classes="mandatory_field")
403
  version_software = gr.Textbox(
404
  label="Version", info="(version of the programming language)")
405
 
 
408
 
409
  def create_infrastructure_tab():
410
  """Create the infrastructure tab components."""
411
+ with gr.Tab("Infrastructure", elem_id="mandatory_part"):
412
  infraType = gr.Dropdown(value=None,
413
  label="Infrastructure Type",
414
  choices=INFRA_TYPES,
415
+ info="Required field<br>(the type of infrastructure used)",
416
+ elem_classes="mandatory_field"
417
  )
418
  cloudProvider = gr.Textbox(
419
+ label="Cloud Provider", info="(If you are on the cloud, the name of your cloud provider, for example : aws, azure, google, ovh...)")
420
  cloudInstance = gr.Textbox(
421
+ label="Cloud Instance", info="(If you are on a cloud vm, the name of your cloud instance, for example : a1.large, dasv4-type2...)")
422
+ cloudService = gr.Textbox(
423
+ label="Cloud Service", info="(If you are using an AI cloud service, the name of your cloud service, for example : openAI service...)")
424
  with gr.Accordion("Components"):
425
  _, componentName, componentType, nbComponent, memorySize, manufacturer_infra, family, series, share, add_component_btn = create_dynamic_section(
426
  section_name="Component",
427
  fields_config=[
428
+
429
  {
430
  "type": gr.Textbox,
431
  "label": "Component Name",
 
435
  "type": gr.Textbox,
436
  "label": "Component Type",
437
  "info": "Required field<br>(the type of this subsystem part of your infrastructure, example: cpu, gpu, ram, hdd, sdd...)",
438
+ "elem_classes": "mandatory_field",
439
  },
440
  {
441
  "type": gr.Number,
 
442
  "label": "Number of Components",
443
+ "info": "Required field<br>(the number of items of this component in your infrastructure, if you have 1 RAM of 32Go, fill 1 here and 32 inside memorySize)",
444
+ "elem_classes": "mandatory_field",
445
  },
446
  {
447
  "type": gr.Number,
 
448
  "label": "Memory Size",
449
+ "info": "(the size of the memory of the component in Gbytes, useful to detail the memory associated to ONE of your gpus for example (if we want the total memory, we will multiply the memorySize by nbComponent). If the component is CPU do not fill the RAM size here, create another component for RAM, this field is for the embeded memory of a component.)",
450
  },
451
  {
452
  "type": gr.Textbox,
453
  "label": "Manufacturer",
454
+ "info": "(the name of the manufacturer, example: nvidia)",
455
  },
456
  {
457
  "type": gr.Textbox,
458
  "label": "Family",
459
+ "info": "(the family of this component, example: geforce)",
460
  },
461
  {
462
  "type": gr.Textbox,
463
  "label": "Series",
464
+ "info": "(the series of this component, example: gtx1080)",
465
  },
466
  {
467
  "type": gr.Number,
 
468
  "label": "Share",
469
+ "info": "(the percentage of the physical equipment used by the task, this sharing property should be set to 1 by default (if no share) and otherwise to the correct percentage, e.g. 0.5 if you share half-time.)",
470
  }
471
  ],
472
  initial_count=0,
 
474
  )
475
 
476
  return [
477
+ infraType, cloudProvider, cloudInstance, cloudService, componentName, componentType,
478
  nbComponent, memorySize, manufacturer_infra, family,
479
  series, share
480
  ]
 
483
  def create_environment_tab():
484
  """Create the environment tab components."""
485
  with gr.Tab("Environment"):
486
+ country = gr.Textbox(
487
+ label="Country", info="Required field", elem_classes="mandatory_field")
488
  latitude = gr.Number(label="Latitude", value=lambda: None)
489
  longitude = gr.Number(label="Longitude", value=lambda: None)
490
+ location = gr.Textbox(
491
+ label="Location", info="(more precise location like city, region or datacenter name)")
492
  powerSupplierType = gr.Dropdown(value=lambda: None,
493
  label="Power Supplier Type",
494
  choices=POWER_SUPPLIER_TYPES,
 
526
  hashAlgorithm = gr.Dropdown(value=None,
527
  label="Hash Algorithm",
528
  choices=HASH_ALGORITHMS,
529
+ info="Required field<br>(the hash function to apply)",
530
+ elem_classes="mandatory_field"
531
  )
532
  cryptographicAlgorithm = gr.Dropdown(value=None,
533
  label="Cryptographic Algorithm",
534
  choices=CRYPTO_ALGORITHMS,
535
+ info="Required field<br>(the public key function to apply)",
536
+ elem_classes="mandatory_field"
537
  )
538
  value_hash = gr.Textbox(
539
+ label="Value", info="Required field<br>(encrypted value of the hash)", elem_classes="mandatory_field")
540
 
541
  return [hashAlgorithm, cryptographicAlgorithm, value_hash]