asahi417 commited on
Commit
644bf8b
·
1 Parent(s): 92b61fd
Files changed (1) hide show
  1. training_scripts/finetune_t5.py +197 -217
training_scripts/finetune_t5.py CHANGED
@@ -9,10 +9,9 @@ import os
9
  import argparse
10
  import gc
11
  from glob import glob
12
- from typing import List, Set, Dict
13
  from shutil import copyfile
14
  from statistics import mean
15
- from itertools import product
16
  from distutils.dir_util import copy_tree
17
 
18
  import torch
@@ -49,6 +48,7 @@ def load_model(
49
  param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
50
  return model_class(model_name, **param)
51
 
 
52
  def train(
53
  model_name: str,
54
  model_low_cpu_mem_usage: bool,
@@ -59,7 +59,7 @@ def train(
59
  random_seed: int,
60
  use_auth_token: bool):
61
  """Fine-tune seq2seq model."""
62
- logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
63
  output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
64
 
65
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
@@ -73,7 +73,7 @@ def train(
73
  for n, lr_tmp in enumerate(_LR):
74
  logging.info(f"[TRAIN {n}/{len(_LR)}] lr: {lr_tmp}")
75
  output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}"
76
- if os.path.exists(f"{output_dir_tmp}/eval_results.json"):
77
  continue
78
  model = load_model(
79
  model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
@@ -103,198 +103,159 @@ def train(
103
  del model
104
  gc.collect()
105
  torch.cuda.empty_cache()
106
- #
107
- #
108
- # def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> float:
109
- # scores = []
110
- # for g, r in zip(references, predictions):
111
- # tp = len(set(g).intersection(set(r)))
112
- # fp = len([_g for _g in g if _g not in r])
113
- # fn = len([_r for _r in r if _r not in g])
114
- # if tp == 0:
115
- # f1 = 0
116
- # else:
117
- # f1 = 2 * tp / (2 * tp + fp + fn)
118
- # scores.append(f1)
119
- # return mean(scores)
120
- #
121
- #
122
- #
123
- #
124
- # def evaluate(
125
- # model_path: str,
126
- # batch_eval: int,
127
- # dataset_column_text: str,
128
- # dataset_instance,
129
- # dataset_split_test: str,
130
- # dataset_column_label: str,
131
- # ):
132
- # prediction_file = f'{model_path}/prediction.{dataset_name}.{}.txt'
133
- # input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
134
- # if not os.path.exists():
135
- # pipe = pipeline(
136
- # 'text2text-generation',
137
- # model=model_path,
138
- # device='cuda:0' if torch.cuda.is_available() else 'cpu',
139
- # )
140
- # output = pipe(input_data, batch_size=batch_eval)
141
- # output = [i['generated_text'] for i in output]
142
- # with open(f'{model_path}/prediction_test.txt', 'w') as f:
143
- # f.write('\n'.join(output))
144
- # with open(f'{model_path}/prediction_test.txt') as f:
145
- # output = [set(i.split(',')) for i in f.read().split('\n')]
146
- # dataset_tmp = dataset_instance[dataset_split_test]
147
- # label_list = dataset_tmp[dataset_column_label]
148
- # _references = [
149
- # set([_l for __i, _l in zip(_i[dataset_column_label], label_list) if __i == 1]) for _i in dataset_tmp
150
- # ]
151
- # eval_metric = get_f1_score(_references, output)
152
- # eval_metric[f'f1/{dataset}/{dataset_name}'] = eval_metric.pop('f1')
153
- # logging.info(json.dumps(eval_metric, indent=4))
154
- # with open(f'{model_path}/evaluation_metrics.json', 'w') as f:
155
- # json.dump(eval_metric, f)
156
- #
157
- #
158
- # def train(
159
- # model_name: str,
160
- # model_low_cpu_mem_usage: bool,
161
- # dataset: str,
162
- # dataset_name: str,
163
- # dataset_column_label: str,
164
- # dataset_column_text: str,
165
- # dataset_split_train: str,
166
- # dataset_split_validation: str,
167
- # dataset_split_test: str,
168
- # lr: List,
169
- # epoch: List,
170
- # batch: List,
171
- # down_sample_train: int,
172
- # down_sample_validation: int,
173
- # random_seed: int,
174
- # use_auth_token: bool,
175
- # output_dir: str,
176
- # model_alias: str,
177
- # model_organization: str,
178
- # skip_train: bool = False,
179
- # skip_test: bool = False,
180
- # skip_upload: bool = False,
181
- # batch_eval: int = None):
182
- # """Fine-tune seq2seq model."""
183
- # logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
184
- # if not output_dir:
185
- # output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
186
- # # dataset process
187
- # tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
188
- # dataset_split = {
189
- # 'train': dataset_split_train,
190
- # 'validation': dataset_split_validation
191
- # }
192
- # dataset_instance = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token)
193
- # tokenized_dataset = {}
194
- # for s, s_dataset in zip(['train', 'validation'], [dataset_split_train, dataset_split_validation):
195
- # tokenized_dataset[s] = []
196
- # for i in dataset_instance[s_dataset]:
197
- # model_inputs = tokenizer(i[dataset_column_text], truncation=True)
198
- # model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
199
- # tokenized_dataset[s].append(model_inputs)
200
- #
201
- # if not skip_train:
202
- # lr = [1e-6, 1e-5, 1e-4] if lr is None else lr
203
- # batch = [32] if not batch else batch
204
- # epoch = [3, 5] if not epoch else epoch
205
- # batch_eval = min(batch) if not batch_eval else batch_eval
206
- # for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
207
- # logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
208
- # output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
209
- # if os.path.exists(f"{output_dir_tmp}/eval_results.json"):
210
- # continue
211
- # model = load_model(
212
- # model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
213
- # )
214
- # trainer = Seq2SeqTrainer(
215
- # model=model,
216
- # args=Seq2SeqTrainingArguments(
217
- # num_train_epochs=epoch_tmp,
218
- # learning_rate=lr_tmp,
219
- # output_dir=output_dir_tmp,
220
- # evaluation_strategy="no",
221
- # seed=random_seed,
222
- # per_device_train_batch_size=batch_tmp,
223
- # ),
224
- # data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model),
225
- # train_dataset=tokenized_dataset['train_ds'],
226
- # )
227
- # # train
228
- # result = trainer.train()
229
- # trainer.log_metrics("train", result.metrics)
230
- # trainer.save_metrics("train", result.metrics)
231
- # # clean up memory
232
- # trainer.save_model()
233
- # trainer.save_state()
234
- # del trainer
235
- # del model
236
- # gc.collect()
237
- # torch.cuda.empty_cache()
238
- #
239
- # model_score = []
240
- # for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
241
- # with open(eval_file) as f:
242
- # results = json.load(f)
243
- # model_score.append([os.path.dirname(eval_file), results['eval_loss'], results['eval_f1']])
244
- # logging.info("Search Result")
245
- # for i in model_score:
246
- # logging.info(i)
247
- # max_metric = max(model_score, key=lambda x: x[2])
248
- # if len([i for i in model_score if i[2] == max_metric]) > 1:
249
- # best_model = sorted(model_score, key=lambda x: x[1])[0][0]
250
- # else:
251
- # best_model = sorted(model_score, key=lambda x: x[2])[-1][0]
252
- # copy_tree(best_model, f'{output_dir}/best_model')
253
- # tokenizer.save_pretrained(f'{output_dir}/best_model')
254
- # else:
255
- # logging.info('skip hyperparameter search & model training (already done)')
256
- #
257
- # # get metric on the test set
258
- # if not skip_test:
259
- # logging.info('run evaluation on test set')
260
- # if not skip_upload:
261
- # assert model_alias is not None and model_organization is not None,\
262
- # 'model_organization must be specified when model_alias is specified'
263
- # logging.info('uploading to huggingface')
264
- # args = {'use_auth_token': use_auth_token, 'organization': model_organization}
265
- # model = load_model(model_name=f'{output_dir}/best_model')
266
- # model.push_to_hub(model_alias, **args)
267
- # tokenizer.push_to_hub(model_alias, **args)
268
- # repo = Repository(model_alias, f'{model_organization}/{model_alias}')
269
- # if os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
270
- # copyfile(f'{output_dir}/best_model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
271
- # if os.path.exists(f'{output_dir}/best_model/evaluation_metrics.json'):
272
- # copyfile(f'{output_dir}/best_model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
273
- # sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
274
- # sample = [i for i in sample if ''' not in i and ''' not in i][:3]
275
- # widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
276
- # with open(f'{model_alias}/README.md', 'w') as f:
277
- # f.write(f"""
278
- # ---
279
- # widget:
280
- # {widget}
281
- # ---
282
- #
283
- # # {model_organization}/{model_alias}
284
- #
285
- # This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
286
- #
287
- # ### Usage
288
- #
289
- # ```python
290
- # from transformers import pipeline
291
- #
292
- # pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
293
- # output = pipe('{sample[0]}')
294
- # ```
295
- # """)
296
- # repo.push_to_hub()
297
- #
298
 
299
  if __name__ == '__main__':
300
  # arguments
@@ -306,33 +267,52 @@ if __name__ == '__main__':
306
  parser.add_argument('--dataset-name', default='ja', type=str)
307
  parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str)
308
  parser.add_argument('--dataset-column-text', default='text', type=str)
309
- parser.add_argument('--dataset-split-train', default='train', type=str)
310
- parser.add_argument('--dataset-split-validation', default='validation', type=str)
311
- parser.add_argument('--dataset-split-test', default='test', type=str)
312
- parser.add_argument('--lr', nargs='+', default=None, type=float)
313
- parser.add_argument('--epoch', nargs='+', default=None, type=int)
314
- parser.add_argument('--batch', nargs='+', default=None, type=int)
315
- parser.add_argument('--batch-eval', type=int, default=None)
316
- parser.add_argument('--down-sample-train', default=None, type=int)
317
- parser.add_argument('--down-sample-validation', default=200, type=int)
318
  parser.add_argument('--random-seed', default=42, type=int)
319
  parser.add_argument('--use-auth-token', action='store_true')
320
- parser.add_argument('--eval-steps', default=100, type=int)
321
- parser.add_argument('--output-dir', default=None, type=str)
322
  parser.add_argument('--model-alias', default=None, type=str)
323
  parser.add_argument('--model-organization', default=None, type=str)
324
  parser.add_argument('--skip-train', action='store_true')
 
325
  parser.add_argument('--skip-test', action='store_true')
326
  parser.add_argument('--skip-upload', action='store_true')
327
  opt = parser.parse_args()
328
 
329
- train(
330
- model_name=opt.model_name,
331
- model_low_cpu_mem_usage=opt.low_cpu_mem_usage,
332
- dataset=opt.dataset,
333
- dataset_name=opt.dataset_name,
334
- dataset_column_label=opt.dataset_column_label,
335
- dataset_column_text=opt.dataset_column_text,
336
- random_seed=opt.random_seed,
337
- use_auth_token=opt.use_auth_token,
338
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import argparse
10
  import gc
11
  from glob import glob
12
+ from typing import List, Set
13
  from shutil import copyfile
14
  from statistics import mean
 
15
  from distutils.dir_util import copy_tree
16
 
17
  import torch
 
48
  param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
49
  return model_class(model_name, **param)
50
 
51
+
52
  def train(
53
  model_name: str,
54
  model_low_cpu_mem_usage: bool,
 
59
  random_seed: int,
60
  use_auth_token: bool):
61
  """Fine-tune seq2seq model."""
62
+ logging.info(f'[TRAIN]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
63
  output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
64
 
65
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
 
73
  for n, lr_tmp in enumerate(_LR):
74
  logging.info(f"[TRAIN {n}/{len(_LR)}] lr: {lr_tmp}")
75
  output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}"
76
+ if os.path.exists(f"{output_dir_tmp}/pytorch_model.bin"):
77
  continue
78
  model = load_model(
79
  model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
 
103
  del model
104
  gc.collect()
105
  torch.cuda.empty_cache()
106
+
107
+ for model_path in glob(f"{output_dir}/*/*"):
108
+ tokenizer.save_pretrained(model_path)
109
+
110
+
111
+ def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> float:
112
+ scores = []
113
+ for g, r in zip(references, predictions):
114
+ tp = len(set(g).intersection(set(r)))
115
+ fp = len([_g for _g in g if _g not in r])
116
+ fn = len([_r for _r in r if _r not in g])
117
+ f1 = 0 if tp == 0 else 2 * tp / (2 * tp + fp + fn)
118
+ scores.append(f1)
119
+ return mean(scores)
120
+
121
+
122
+ def get_metric(
123
+ prediction_file: str,
124
+ metric_file: str,
125
+ model_path: str,
126
+ data: List[str],
127
+ label: List[str]) -> float:
128
+ if os.path.exists(metric_file):
129
+ with open(metric_file) as f:
130
+ eval_metric = json.load(f)
131
+ return eval_metric['f1']
132
+ if not os.path.exists(prediction_file):
133
+ pipe = pipeline(
134
+ 'text2text-generation',
135
+ model=model_path,
136
+ device='cuda:0' if torch.cuda.is_available() else 'cpu',
137
+ )
138
+ output = pipe(data, batch_size=_BATCH)
139
+ output = [i['generated_text'] for i in output]
140
+ with open(prediction_file, 'w') as f:
141
+ f.write('\n'.join(output))
142
+ with open(prediction_file) as f:
143
+ output = [set(i.split(',')) for i in f.read().split('\n')]
144
+ label = [set(i.split(',')) for i in label]
145
+ eval_metric = {'f1': get_f1_score(label, output)}
146
+ logging.info(json.dumps(eval_metric, indent=4))
147
+ with open(metric_file, 'w') as f:
148
+ json.dump(eval_metric, f)
149
+ return eval_metric['f1']
150
+
151
+
152
+ def validate(
153
+ model_name: str,
154
+ dataset: str,
155
+ dataset_name: str,
156
+ dataset_column_text: str,
157
+ use_auth_token: bool,
158
+ dataset_column_label: str):
159
+ logging.info(f'[VALIDATE]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
160
+ output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
161
+ dataset_instance = load_dataset(dataset, dataset_name, split='validation', use_auth_token=use_auth_token)
162
+ label = [i[dataset_column_label] for i in dataset_instance]
163
+ data = [i[dataset_column_text] for i in dataset_instance]
164
+ model_score = []
165
+ for model_path in glob(f"{output_dir}/*/*"):
166
+ prediction_file = f"{model_path}/prediction.validate.{dataset}.{dataset_name}.txt"
167
+ metric_file = f"{model_path}/metric.validate.{dataset}.{dataset_name}.json"
168
+ metric = get_metric(
169
+ prediction_file=prediction_file,
170
+ metric_file=metric_file,
171
+ model_path=model_path,
172
+ label=label,
173
+ data=data
174
+ )
175
+ model_score.append([model_path, metric])
176
+ model_score = sorted(model_score, key=lambda x: x[1])
177
+ logging.info('Validation Result')
178
+ for k, v in model_score:
179
+ logging.info(f'{k}: {v}')
180
+ best_model = model_score[-1][0]
181
+ best_model_path = f'{output_dir}/best_model'
182
+ copy_tree(best_model, best_model_path)
183
+
184
+
185
+ def test(
186
+ model_name: str,
187
+ dataset: str,
188
+ dataset_name: str,
189
+ dataset_column_text: str,
190
+ use_auth_token: bool,
191
+ dataset_column_label: str):
192
+ logging.info(f'[TEST]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
193
+ output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
194
+ dataset_instance = load_dataset(dataset, dataset_name, split='test', use_auth_token=use_auth_token)
195
+ label = [i[dataset_column_label] for i in dataset_instance]
196
+ data = [i[dataset_column_text] for i in dataset_instance]
197
+ model_path = f'{output_dir}/best_model'
198
+ prediction_file = f"{model_path}/prediction.{dataset}.{dataset_name}.txt"
199
+ metric_file = f"{model_path}/metric.{dataset}.{dataset_name}.json"
200
+ metric = get_metric(
201
+ prediction_file=prediction_file,
202
+ metric_file=metric_file,
203
+ model_path=model_path,
204
+ label=label,
205
+ data=data
206
+ )
207
+ logging.info(f'Test Result: {metric}')
208
+
209
+
210
+ def upload(
211
+ model_name: str,
212
+ dataset: str,
213
+ dataset_name: str,
214
+ dataset_column_text: str,
215
+ use_auth_token: bool,
216
+ output_dir: str,
217
+ model_alias: str,
218
+ model_organization: str):
219
+ assert model_alias is not None and model_organization is not None,\
220
+ 'model_organization must be specified when model_alias is specified'
221
+ logging.info('uploading to huggingface')
222
+ output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
223
+ args = {'use_auth_token': use_auth_token, 'organization': model_organization}
224
+ model = load_model(model_name=f'{output_dir}/best_model')
225
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
226
+ model.push_to_hub(model_alias, **args)
227
+ tokenizer.push_to_hub(model_alias, **args)
228
+ repo = Repository(model_alias, f'{model_organization}/{model_alias}')
229
+ if os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
230
+ copyfile(f'{output_dir}/best_model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
231
+ if os.path.exists(f'{output_dir}/best_model/evaluation_metrics.json'):
232
+ copyfile(f'{output_dir}/best_model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
233
+ dataset_instance = load_dataset(dataset, dataset_name, split='test', use_auth_token=use_auth_token)
234
+ sample = [i[dataset_column_text] for i in dataset_instance]
235
+ sample = [i for i in sample if ''' not in i and ''' not in i][:3]
236
+ widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
237
+ with open(f'{model_alias}/README.md', 'w') as f:
238
+ f.write(f"""
239
+ ---
240
+ widget:
241
+ {widget}
242
+ ---
243
+
244
+ # {model_organization}/{model_alias}
245
+
246
+ This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
247
+
248
+ ### Usage
249
+
250
+ ```python
251
+ from transformers import pipeline
252
+
253
+ pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
254
+ output = pipe('{sample[0]}')
255
+ ```
256
+ """)
257
+ repo.push_to_hub()
258
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
 
260
  if __name__ == '__main__':
261
  # arguments
 
267
  parser.add_argument('--dataset-name', default='ja', type=str)
268
  parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str)
269
  parser.add_argument('--dataset-column-text', default='text', type=str)
 
 
 
 
 
 
 
 
 
270
  parser.add_argument('--random-seed', default=42, type=int)
271
  parser.add_argument('--use-auth-token', action='store_true')
 
 
272
  parser.add_argument('--model-alias', default=None, type=str)
273
  parser.add_argument('--model-organization', default=None, type=str)
274
  parser.add_argument('--skip-train', action='store_true')
275
+ parser.add_argument('--skip-validate', action='store_true')
276
  parser.add_argument('--skip-test', action='store_true')
277
  parser.add_argument('--skip-upload', action='store_true')
278
  opt = parser.parse_args()
279
 
280
+ if not opt.skip_train:
281
+ train(
282
+ model_name=opt.model_name,
283
+ model_low_cpu_mem_usage=opt.low_cpu_mem_usage,
284
+ dataset=opt.dataset,
285
+ dataset_name=opt.dataset_name,
286
+ dataset_column_label=opt.dataset_column_label,
287
+ dataset_column_text=opt.dataset_column_text,
288
+ random_seed=opt.random_seed,
289
+ use_auth_token=opt.use_auth_token,
290
+ )
291
+ if not opt.skip_validate:
292
+ validate(
293
+ model_name=opt.model_name,
294
+ dataset=opt.dataset,
295
+ dataset_name=opt.dataset_name,
296
+ dataset_column_label=opt.dataset_column_label,
297
+ dataset_column_text=opt.dataset_column_text,
298
+ use_auth_token=opt.use_auth_token
299
+ )
300
+ if not opt.skip_test:
301
+ test(
302
+ model_name=opt.model_name,
303
+ dataset=opt.dataset,
304
+ dataset_name=opt.dataset_name,
305
+ dataset_column_label=opt.dataset_column_label,
306
+ dataset_column_text=opt.dataset_column_text,
307
+ use_auth_token=opt.use_auth_token
308
+ )
309
+ if not opt.skip_upload:
310
+ upload(
311
+ model_name=opt.model_name,
312
+ dataset=opt.dataset,
313
+ dataset_name=opt.dataset_name,
314
+ dataset_column_text=opt.dataset_column_text,
315
+ use_auth_token=opt.use_auth_token,
316
+ model_alias=opt.model_alias,
317
+ model_organization=opt.model_organization
318
+ )