QinLiuNLP commited on
Commit
3718184
1 Parent(s): 3f8b62b

Model save

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ base_model: Jackie999/llama3-sudo-5epochs-tofu_full_sft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: llama3-sudo-dpo-10epochs-forget10mix400-1sft-2fullpara-1e-5
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # llama3-sudo-dpo-10epochs-forget10mix400-1sft-2fullpara-1e-5
17
+
18
+ This model is a fine-tuned version of [Jackie999/llama3-sudo-5epochs-tofu_full_sft](https://huggingface.co/Jackie999/llama3-sudo-5epochs-tofu_full_sft) on an unknown dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 1e-05
38
+ - train_batch_size: 4
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 64
45
+ - total_eval_batch_size: 32
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 10
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.44.0
58
+ - Pytorch 2.1.2
59
+ - Datasets 3.0.0
60
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.048202857348136605,
5
+ "train_runtime": 22140.6758,
6
+ "train_samples": 1600,
7
+ "train_samples_per_second": 0.723,
8
+ "train_steps_per_second": 0.011
9
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128009
7
+ ],
8
+ "max_length": 4096,
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.44.0"
12
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.048202857348136605,
5
+ "train_runtime": 22140.6758,
6
+ "train_samples": 1600,
7
+ "train_samples_per_second": 0.723,
8
+ "train_steps_per_second": 0.011
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "eval_steps": 1000,
6
+ "global_step": 250,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04,
13
+ "grad_norm": 5.49031856887175,
14
+ "learning_rate": 4.0000000000000003e-07,
15
+ "logits/chosen": -1.73323655128479,
16
+ "logits/rejected": -1.963712453842163,
17
+ "logps/chosen": -64.71795654296875,
18
+ "logps/rejected": -92.56527709960938,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.4,
28
+ "grad_norm": 1.9493782971816778,
29
+ "learning_rate": 4.000000000000001e-06,
30
+ "logits/chosen": -1.7276591062545776,
31
+ "logits/rejected": -1.90531325340271,
32
+ "logps/chosen": -72.89968872070312,
33
+ "logps/rejected": -117.47108459472656,
34
+ "loss": 0.6205,
35
+ "rewards/accuracies": 0.6180555820465088,
36
+ "rewards/chosen": -0.09507845342159271,
37
+ "rewards/margins": 0.2017170786857605,
38
+ "rewards/rejected": -0.296795517206192,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.8,
43
+ "grad_norm": 3.7554212607527964,
44
+ "learning_rate": 8.000000000000001e-06,
45
+ "logits/chosen": -1.4812278747558594,
46
+ "logits/rejected": -1.7664066553115845,
47
+ "logps/chosen": -64.52765655517578,
48
+ "logps/rejected": -272.7065734863281,
49
+ "loss": 0.3953,
50
+ "rewards/accuracies": 0.8374999761581421,
51
+ "rewards/chosen": -0.0347316637635231,
52
+ "rewards/margins": 1.7828114032745361,
53
+ "rewards/rejected": -1.8175432682037354,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 1.2,
58
+ "grad_norm": 6.377266655785215,
59
+ "learning_rate": 9.987820251299121e-06,
60
+ "logits/chosen": -1.6151340007781982,
61
+ "logits/rejected": -1.9460217952728271,
62
+ "logps/chosen": -143.10971069335938,
63
+ "logps/rejected": -631.4593505859375,
64
+ "loss": 0.1279,
65
+ "rewards/accuracies": 0.981249988079071,
66
+ "rewards/chosen": -0.8146063685417175,
67
+ "rewards/margins": 4.596449851989746,
68
+ "rewards/rejected": -5.4110565185546875,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 1.6,
73
+ "grad_norm": 0.5750895192621723,
74
+ "learning_rate": 9.890738003669029e-06,
75
+ "logits/chosen": -1.6781848669052124,
76
+ "logits/rejected": -2.1725101470947266,
77
+ "logps/chosen": -153.36740112304688,
78
+ "logps/rejected": -981.5718994140625,
79
+ "loss": 0.0112,
80
+ "rewards/accuracies": 1.0,
81
+ "rewards/chosen": -0.9592069387435913,
82
+ "rewards/margins": 7.915135383605957,
83
+ "rewards/rejected": -8.87434196472168,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 2.0,
88
+ "grad_norm": 15.42824961576695,
89
+ "learning_rate": 9.698463103929542e-06,
90
+ "logits/chosen": -2.032032012939453,
91
+ "logits/rejected": -2.422545909881592,
92
+ "logps/chosen": -291.60833740234375,
93
+ "logps/rejected": -1471.12451171875,
94
+ "loss": 0.0265,
95
+ "rewards/accuracies": 0.9937499761581421,
96
+ "rewards/chosen": -2.294036388397217,
97
+ "rewards/margins": 11.53666877746582,
98
+ "rewards/rejected": -13.830705642700195,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 2.4,
103
+ "grad_norm": 0.04134383974122578,
104
+ "learning_rate": 9.414737964294636e-06,
105
+ "logits/chosen": -1.654057264328003,
106
+ "logits/rejected": -2.1446919441223145,
107
+ "logps/chosen": -305.9764709472656,
108
+ "logps/rejected": -1340.496826171875,
109
+ "loss": 0.0101,
110
+ "rewards/accuracies": 1.0,
111
+ "rewards/chosen": -2.476349353790283,
112
+ "rewards/margins": 9.981760025024414,
113
+ "rewards/rejected": -12.458109855651855,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 2.8,
118
+ "grad_norm": 0.08981946731483229,
119
+ "learning_rate": 9.045084971874738e-06,
120
+ "logits/chosen": -1.8004175424575806,
121
+ "logits/rejected": -2.220939874649048,
122
+ "logps/chosen": -351.9838562011719,
123
+ "logps/rejected": -1534.653564453125,
124
+ "loss": 0.0004,
125
+ "rewards/accuracies": 1.0,
126
+ "rewards/chosen": -2.8868424892425537,
127
+ "rewards/margins": 11.563664436340332,
128
+ "rewards/rejected": -14.450506210327148,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 3.2,
133
+ "grad_norm": 0.004562484598212617,
134
+ "learning_rate": 8.596699001693257e-06,
135
+ "logits/chosen": -1.955934762954712,
136
+ "logits/rejected": -2.4210100173950195,
137
+ "logps/chosen": -273.1143493652344,
138
+ "logps/rejected": -1595.3917236328125,
139
+ "loss": 0.0003,
140
+ "rewards/accuracies": 1.0,
141
+ "rewards/chosen": -2.1271727085113525,
142
+ "rewards/margins": 12.920974731445312,
143
+ "rewards/rejected": -15.048149108886719,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 3.6,
148
+ "grad_norm": 0.1380036178242026,
149
+ "learning_rate": 8.078307376628292e-06,
150
+ "logits/chosen": -2.363739252090454,
151
+ "logits/rejected": -2.916693925857544,
152
+ "logps/chosen": -290.6210021972656,
153
+ "logps/rejected": -1820.208251953125,
154
+ "loss": 0.0001,
155
+ "rewards/accuracies": 1.0,
156
+ "rewards/chosen": -2.2858262062072754,
157
+ "rewards/margins": 15.026013374328613,
158
+ "rewards/rejected": -17.311840057373047,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 4.0,
163
+ "grad_norm": 0.027548373376636768,
164
+ "learning_rate": 7.500000000000001e-06,
165
+ "logits/chosen": -1.8597825765609741,
166
+ "logits/rejected": -2.5159268379211426,
167
+ "logps/chosen": -259.3269348144531,
168
+ "logps/rejected": -1694.156005859375,
169
+ "loss": 0.0036,
170
+ "rewards/accuracies": 1.0,
171
+ "rewards/chosen": -1.9733333587646484,
172
+ "rewards/margins": 14.052156448364258,
173
+ "rewards/rejected": -16.025489807128906,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 4.4,
178
+ "grad_norm": 0.002852087112507335,
179
+ "learning_rate": 6.873032967079562e-06,
180
+ "logits/chosen": -1.1590913534164429,
181
+ "logits/rejected": -1.9738283157348633,
182
+ "logps/chosen": -243.7049560546875,
183
+ "logps/rejected": -1585.557373046875,
184
+ "loss": 0.0008,
185
+ "rewards/accuracies": 1.0,
186
+ "rewards/chosen": -1.8130344152450562,
187
+ "rewards/margins": 13.158452033996582,
188
+ "rewards/rejected": -14.97148609161377,
189
+ "step": 110
190
+ },
191
+ {
192
+ "epoch": 4.8,
193
+ "grad_norm": 0.050302608930815555,
194
+ "learning_rate": 6.209609477998339e-06,
195
+ "logits/chosen": -1.1821445226669312,
196
+ "logits/rejected": -2.0527586936950684,
197
+ "logps/chosen": -265.8364562988281,
198
+ "logps/rejected": -1697.0533447265625,
199
+ "loss": 0.0002,
200
+ "rewards/accuracies": 1.0,
201
+ "rewards/chosen": -2.049001693725586,
202
+ "rewards/margins": 14.01880168914795,
203
+ "rewards/rejected": -16.06780242919922,
204
+ "step": 120
205
+ },
206
+ {
207
+ "epoch": 5.2,
208
+ "grad_norm": 0.002082230483938349,
209
+ "learning_rate": 5.522642316338268e-06,
210
+ "logits/chosen": -1.4268571138381958,
211
+ "logits/rejected": -2.196958303451538,
212
+ "logps/chosen": -261.50567626953125,
213
+ "logps/rejected": -1654.785888671875,
214
+ "loss": 0.0002,
215
+ "rewards/accuracies": 1.0,
216
+ "rewards/chosen": -2.0303101539611816,
217
+ "rewards/margins": 13.576850891113281,
218
+ "rewards/rejected": -15.607162475585938,
219
+ "step": 130
220
+ },
221
+ {
222
+ "epoch": 5.6,
223
+ "grad_norm": 0.0019893945992160438,
224
+ "learning_rate": 4.825502516487497e-06,
225
+ "logits/chosen": -1.7991822957992554,
226
+ "logits/rejected": -2.550363063812256,
227
+ "logps/chosen": -319.41717529296875,
228
+ "logps/rejected": -1847.844970703125,
229
+ "loss": 0.0001,
230
+ "rewards/accuracies": 1.0,
231
+ "rewards/chosen": -2.583193302154541,
232
+ "rewards/margins": 14.985641479492188,
233
+ "rewards/rejected": -17.568836212158203,
234
+ "step": 140
235
+ },
236
+ {
237
+ "epoch": 6.0,
238
+ "grad_norm": 0.0015741333192918123,
239
+ "learning_rate": 4.131759111665349e-06,
240
+ "logits/chosen": -1.9366722106933594,
241
+ "logits/rejected": -2.7011678218841553,
242
+ "logps/chosen": -317.91302490234375,
243
+ "logps/rejected": -1850.040771484375,
244
+ "loss": 0.0001,
245
+ "rewards/accuracies": 1.0,
246
+ "rewards/chosen": -2.5857417583465576,
247
+ "rewards/margins": 15.00117015838623,
248
+ "rewards/rejected": -17.586910247802734,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 6.4,
253
+ "grad_norm": 0.0011046666078130404,
254
+ "learning_rate": 3.4549150281252635e-06,
255
+ "logits/chosen": -1.9223568439483643,
256
+ "logits/rejected": -2.68572735786438,
257
+ "logps/chosen": -357.874755859375,
258
+ "logps/rejected": -2010.9976806640625,
259
+ "loss": 0.0001,
260
+ "rewards/accuracies": 1.0,
261
+ "rewards/chosen": -2.9552831649780273,
262
+ "rewards/margins": 16.265609741210938,
263
+ "rewards/rejected": -19.22089195251465,
264
+ "step": 160
265
+ },
266
+ {
267
+ "epoch": 6.8,
268
+ "grad_norm": 0.0008850683483477801,
269
+ "learning_rate": 2.8081442660546126e-06,
270
+ "logits/chosen": -1.939117431640625,
271
+ "logits/rejected": -2.6780524253845215,
272
+ "logps/chosen": -341.6114196777344,
273
+ "logps/rejected": -1941.843017578125,
274
+ "loss": 0.0,
275
+ "rewards/accuracies": 1.0,
276
+ "rewards/chosen": -2.7991585731506348,
277
+ "rewards/margins": 15.720464706420898,
278
+ "rewards/rejected": -18.519622802734375,
279
+ "step": 170
280
+ },
281
+ {
282
+ "epoch": 7.2,
283
+ "grad_norm": 0.0006990799912761066,
284
+ "learning_rate": 2.204035482646267e-06,
285
+ "logits/chosen": -2.032839298248291,
286
+ "logits/rejected": -2.8106019496917725,
287
+ "logps/chosen": -401.89849853515625,
288
+ "logps/rejected": -2151.74072265625,
289
+ "loss": 0.0,
290
+ "rewards/accuracies": 1.0,
291
+ "rewards/chosen": -3.3739535808563232,
292
+ "rewards/margins": 17.275264739990234,
293
+ "rewards/rejected": -20.649219512939453,
294
+ "step": 180
295
+ },
296
+ {
297
+ "epoch": 7.6,
298
+ "grad_norm": 0.0009810237591607713,
299
+ "learning_rate": 1.6543469682057105e-06,
300
+ "logits/chosen": -2.005420446395874,
301
+ "logits/rejected": -2.788722038269043,
302
+ "logps/chosen": -371.88824462890625,
303
+ "logps/rejected": -2029.203125,
304
+ "loss": 0.0,
305
+ "rewards/accuracies": 1.0,
306
+ "rewards/chosen": -3.0968661308288574,
307
+ "rewards/margins": 16.302059173583984,
308
+ "rewards/rejected": -19.398929595947266,
309
+ "step": 190
310
+ },
311
+ {
312
+ "epoch": 8.0,
313
+ "grad_norm": 0.005038481428283606,
314
+ "learning_rate": 1.1697777844051105e-06,
315
+ "logits/chosen": -1.6506569385528564,
316
+ "logits/rejected": -2.4032554626464844,
317
+ "logps/chosen": -272.77264404296875,
318
+ "logps/rejected": -1795.190185546875,
319
+ "loss": 0.0,
320
+ "rewards/accuracies": 1.0,
321
+ "rewards/chosen": -2.1385960578918457,
322
+ "rewards/margins": 14.876760482788086,
323
+ "rewards/rejected": -17.01535987854004,
324
+ "step": 200
325
+ },
326
+ {
327
+ "epoch": 8.4,
328
+ "grad_norm": 0.0007269378594182772,
329
+ "learning_rate": 7.597595192178702e-07,
330
+ "logits/chosen": -1.87616765499115,
331
+ "logits/rejected": -2.6102375984191895,
332
+ "logps/chosen": -321.3392639160156,
333
+ "logps/rejected": -1848.580810546875,
334
+ "loss": 0.0,
335
+ "rewards/accuracies": 1.0,
336
+ "rewards/chosen": -2.6000113487243652,
337
+ "rewards/margins": 14.976901054382324,
338
+ "rewards/rejected": -17.576915740966797,
339
+ "step": 210
340
+ },
341
+ {
342
+ "epoch": 8.8,
343
+ "grad_norm": 0.0007139184876414333,
344
+ "learning_rate": 4.322727117869951e-07,
345
+ "logits/chosen": -1.9712779521942139,
346
+ "logits/rejected": -2.749927043914795,
347
+ "logps/chosen": -361.2076416015625,
348
+ "logps/rejected": -1984.336669921875,
349
+ "loss": 0.0,
350
+ "rewards/accuracies": 1.0,
351
+ "rewards/chosen": -2.9964027404785156,
352
+ "rewards/margins": 15.949694633483887,
353
+ "rewards/rejected": -18.946096420288086,
354
+ "step": 220
355
+ },
356
+ {
357
+ "epoch": 9.2,
358
+ "grad_norm": 0.0009109490304217519,
359
+ "learning_rate": 1.9369152030840553e-07,
360
+ "logits/chosen": -1.8861472606658936,
361
+ "logits/rejected": -2.6229307651519775,
362
+ "logps/chosen": -340.4961853027344,
363
+ "logps/rejected": -1894.4420166015625,
364
+ "loss": 0.0,
365
+ "rewards/accuracies": 1.0,
366
+ "rewards/chosen": -2.806462287902832,
367
+ "rewards/margins": 15.230853080749512,
368
+ "rewards/rejected": -18.037317276000977,
369
+ "step": 230
370
+ },
371
+ {
372
+ "epoch": 9.6,
373
+ "grad_norm": 0.0007160536583349626,
374
+ "learning_rate": 4.865965629214819e-08,
375
+ "logits/chosen": -1.8993374109268188,
376
+ "logits/rejected": -2.6610684394836426,
377
+ "logps/chosen": -320.7115783691406,
378
+ "logps/rejected": -1872.178466796875,
379
+ "loss": 0.0,
380
+ "rewards/accuracies": 1.0,
381
+ "rewards/chosen": -2.6092982292175293,
382
+ "rewards/margins": 15.210273742675781,
383
+ "rewards/rejected": -17.81957244873047,
384
+ "step": 240
385
+ },
386
+ {
387
+ "epoch": 10.0,
388
+ "grad_norm": 0.0006100741232287624,
389
+ "learning_rate": 0.0,
390
+ "logits/chosen": -1.9146867990493774,
391
+ "logits/rejected": -2.670241117477417,
392
+ "logps/chosen": -317.877685546875,
393
+ "logps/rejected": -1852.078857421875,
394
+ "loss": 0.0,
395
+ "rewards/accuracies": 1.0,
396
+ "rewards/chosen": -2.5683541297912598,
397
+ "rewards/margins": 15.033134460449219,
398
+ "rewards/rejected": -17.60148811340332,
399
+ "step": 250
400
+ },
401
+ {
402
+ "epoch": 10.0,
403
+ "step": 250,
404
+ "total_flos": 0.0,
405
+ "train_loss": 0.048202857348136605,
406
+ "train_runtime": 22140.6758,
407
+ "train_samples_per_second": 0.723,
408
+ "train_steps_per_second": 0.011
409
+ }
410
+ ],
411
+ "logging_steps": 10,
412
+ "max_steps": 250,
413
+ "num_input_tokens_seen": 0,
414
+ "num_train_epochs": 10,
415
+ "save_steps": 1000,
416
+ "stateful_callbacks": {
417
+ "TrainerControl": {
418
+ "args": {
419
+ "should_epoch_stop": false,
420
+ "should_evaluate": false,
421
+ "should_log": false,
422
+ "should_save": true,
423
+ "should_training_stop": true
424
+ },
425
+ "attributes": {}
426
+ }
427
+ },
428
+ "total_flos": 0.0,
429
+ "train_batch_size": 4,
430
+ "trial_name": null,
431
+ "trial_params": null
432
+ }