The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: ArrowNotImplementedError Message: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 620, in write_table self._build_writer(inferred_schema=pa_table.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1886, in _prepare_split_single num_examples, num_bytes = writer.finalize() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 639, in finalize self._build_writer(self.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1417, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1049, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
config
dict | report
dict | name
string | backend
dict | scenario
dict | launcher
dict | environment
dict | print_report
bool | log_report
bool | load_model
dict | forward
dict |
---|---|---|---|---|---|---|---|---|---|---|
{
"name": "test_api_push_to_hub_mixin",
"backend": {
"name": "pytorch",
"version": "2.5.1+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"model": "google-bert/bert-base-uncased",
"processor": "google-bert/bert-base-uncased",
"task": "fill-mask",
"library": "transformers",
"model_type": "bert",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": false,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16,
"num_choices": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"forward_kwargs": {},
"generate_kwargs": {},
"call_kwargs": {}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16766.779392,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.8.0-1017-azure-x86_64-with-glibc2.39",
"processor": "x86_64",
"python_version": "3.12.8",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": "7cec62e016d76fe612308e4c2c074fc7f09289fd",
"transformers_version": "4.47.0",
"transformers_commit": null,
"accelerate_version": "1.2.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.12",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
},
"print_report": true,
"log_report": true
} | {
"load_model": {
"memory": {
"unit": "MB",
"max_ram": 597.929984,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
2.512950135000011
],
"count": 1,
"total": 2.512950135000011,
"mean": 2.512950135000011,
"p50": 2.512950135000011,
"p90": 2.512950135000011,
"p95": 2.512950135000011,
"p99": 2.512950135000011,
"stdev": 0,
"stdev_": 0
},
"throughput": null,
"energy": null,
"efficiency": null
},
"forward": {
"memory": {
"unit": "MB",
"max_ram": 962.818048,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.19753662300001906,
0.20275870399999008,
0.1957810019999897,
0.1170311679999827,
0.08497341000000347,
0.08282356300000515,
0.08215677499998719,
0.08206344100000251
],
"count": 8,
"total": 1.0451246859999799,
"mean": 0.13064058574999748,
"p50": 0.10100228899999308,
"p90": 0.19910324730001036,
"p95": 0.20093097565000023,
"p99": 0.2023931583299921,
"stdev": 0.053836523804653015,
"stdev_": 41.209646677241764
},
"throughput": {
"unit": "samples/s",
"value": 15.30917814336299
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null |
null | null | test_api_push_to_hub_mixin | {
"name": "pytorch",
"version": "2.5.1+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"model": "google-bert/bert-base-uncased",
"processor": "google-bert/bert-base-uncased",
"task": "fill-mask",
"library": "transformers",
"model_type": "bert",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": false,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16,
"num_choices": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"forward_kwargs": {},
"generate_kwargs": {},
"call_kwargs": {}
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16766.779392,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.8.0-1017-azure-x86_64-with-glibc2.39",
"processor": "x86_64",
"python_version": "3.12.8",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": "7cec62e016d76fe612308e4c2c074fc7f09289fd",
"transformers_version": "4.47.0",
"transformers_commit": null,
"accelerate_version": "1.2.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.12",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
} | true | true | null | null |
null | null | null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 597.929984,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
2.512950135000011
],
"count": 1,
"total": 2.512950135000011,
"mean": 2.512950135000011,
"p50": 2.512950135000011,
"p90": 2.512950135000011,
"p95": 2.512950135000011,
"p99": 2.512950135000011,
"stdev": 0,
"stdev_": 0
},
"throughput": null,
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 962.818048,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.19753662300001906,
0.20275870399999008,
0.1957810019999897,
0.1170311679999827,
0.08497341000000347,
0.08282356300000515,
0.08215677499998719,
0.08206344100000251
],
"count": 8,
"total": 1.0451246859999799,
"mean": 0.13064058574999748,
"p50": 0.10100228899999308,
"p90": 0.19910324730001036,
"p95": 0.20093097565000023,
"p99": 0.2023931583299921,
"stdev": 0.053836523804653015,
"stdev_": 41.209646677241764
},
"throughput": {
"unit": "samples/s",
"value": 15.30917814336299
},
"energy": null,
"efficiency": null
} |