File size: 5,511 Bytes
3badfd3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
hydra:
  run:
    dir: _benchmark/tmpjivn_1kh/commit=bd5091df8db7cea1a9f94f797fc11487f840ade1/${hydra.job.override_dirname}
  sweep:
    dir: _benchmark/tmpjivn_1kh/commit=bd5091df8db7cea1a9f94f797fc11487f840ade1
    subdir: ${hydra.job.override_dirname}
  launcher:
    _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
  sweeper:
    _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
    max_batch_size: null
    params: null
  help:
    app_name: ${hydra.job.name}
    header: '${hydra.help.app_name} is powered by Hydra.

      '
    footer: 'Powered by Hydra (https://hydra.cc)

      Use --hydra-help to view Hydra specific help

      '
    template: '${hydra.help.header}

      == Configuration groups ==

      Compose your configuration from those groups (group=option)


      $APP_CONFIG_GROUPS


      == Config ==

      Override anything in the config (foo.bar=value)


      $CONFIG


      ${hydra.help.footer}

      '
  hydra_help:
    template: 'Hydra (${hydra.runtime.version})

      See https://hydra.cc for more info.


      == Flags ==

      $FLAGS_HELP


      == Configuration groups ==

      Compose your configuration from those groups (For example, append hydra/job_logging=disabled
      to command line)


      $HYDRA_CONFIG_GROUPS


      Use ''--cfg hydra'' to Show the Hydra config.

      '
    hydra_help: ???
  hydra_logging:
    version: 1
    root:
      level: ERROR
    disable_existing_loggers: true
  job_logging:
    version: 1
    root:
      level: ERROR
    disable_existing_loggers: true
  env: {}
  mode: MULTIRUN
  searchpath: []
  callbacks: {}
  output_subdir: .hydra
  overrides:
    hydra:
    - hydra/job_logging=disabled
    - hydra/hydra_logging=disabled
    - hydra.sweep.dir=_benchmark/tmpjivn_1kh/commit\=bd5091df8db7cea1a9f94f797fc11487f840ade1
    - hydra.run.dir=_benchmark/tmpjivn_1kh/commit\=bd5091df8db7cea1a9f94f797fc11487f840ade1/${hydra.job.override_dirname}
    - hydra.mode=MULTIRUN
    task:
    - backend.model=google/gemma-2b
    - backend.cache_implementation=null,static
    - backend.torch_compile=false,true
  job:
    name: cli
    chdir: true
    override_dirname: backend.cache_implementation=null,static,backend.model=google/gemma-2b,backend.torch_compile=false,true
    id: ???
    num: ???
    config_name: generation
    env_set:
      OVERRIDE_BENCHMARKS: '1'
      LOG_LEVEL: WARN
    env_copy: []
    config:
      override_dirname:
        kv_sep: '='
        item_sep: ','
        exclude_keys: []
  runtime:
    version: 1.3.2
    version_base: '1.3'
    cwd: /transformers
    config_sources:
    - path: hydra.conf
      schema: pkg
      provider: hydra
    - path: optimum_benchmark
      schema: pkg
      provider: main
    - path: /transformers/benchmark/config
      schema: file
      provider: command-line
    - path: ''
      schema: structured
      provider: schema
    output_dir: ???
    choices:
      backend: pytorch
      launcher: process
      scenario: inference
      hydra/env: default
      hydra/callbacks: null
      hydra/job_logging: disabled
      hydra/hydra_logging: disabled
      hydra/hydra_help: default
      hydra/help: default
      hydra/sweeper: basic
      hydra/launcher: basic
      hydra/output: default
  verbose: false
name: pytorch_generate
backend:
  name: pytorch
  version: 2.3.0+cu121
  _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
  task: null
  library: null
  model: google/gemma-2b
  processor: null
  device: cuda
  device_ids: '0'
  seed: 42
  inter_op_num_threads: null
  intra_op_num_threads: null
  model_kwargs: {}
  processor_kwargs: {}
  hub_kwargs: {}
  no_weights: true
  device_map: null
  torch_dtype: float16
  eval_mode: true
  to_bettertransformer: false
  low_cpu_mem_usage: null
  attn_implementation: null
  cache_implementation: static
  autocast_enabled: false
  autocast_dtype: null
  torch_compile: true
  torch_compile_target: forward
  torch_compile_config:
    backend: inductor
    mode: reduce-overhead
    fullgraph: true
  quantization_scheme: null
  quantization_config: {}
  deepspeed_inference: false
  deepspeed_inference_config: {}
  peft_type: null
  peft_config: {}
scenario:
  name: inference
  _target_: optimum_benchmark.scenarios.inference.scenario.InferenceScenario
  iterations: 2
  duration: 0
  warmup_runs: 10
  input_shapes:
    batch_size: 1
    sequence_length: 7
  new_tokens: null
  latency: true
  memory: true
  energy: false
  forward_kwargs: {}
  generate_kwargs:
    max_new_tokens: 128
    min_new_tokens: 128
    do_sample: false
  call_kwargs: {}
launcher:
  name: process
  _target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
  device_isolation: true
  device_isolation_action: warn
  start_method: spawn
environment:
  cpu: ' AMD EPYC 7R32'
  cpu_count: 16
  cpu_ram_mb: 66697.29792
  system: Linux
  machine: x86_64
  platform: Linux-5.10.217-205.860.amzn2.x86_64-x86_64-with-glibc2.29
  processor: x86_64
  python_version: 3.8.10
  gpu:
  - NVIDIA A10G
  gpu_count: 1
  gpu_vram_mb: 24146608128
  optimum_benchmark_version: 0.2.1
  optimum_benchmark_commit: null
  transformers_version: 4.42.0.dev0
  transformers_commit: bd5091df8db7cea1a9f94f797fc11487f840ade1
  accelerate_version: 0.31.0.dev0
  accelerate_commit: null
  diffusers_version: null
  diffusers_commit: null
  optimum_version: 1.21.0.dev0
  optimum_commit: null
  timm_version: 0.9.16
  timm_commit: null
  peft_version: 0.11.2.dev0
  peft_commit: null