nlparabic commited on
Commit
1f8d6fb
1 Parent(s): 2b4e8d7

Training in progress, epoch 3

Browse files
Files changed (3) hide show
  1. egy_training_log.txt +288 -0
  2. model.safetensors +1 -1
  3. training_args.bin +1 -1
egy_training_log.txt CHANGED
@@ -286,3 +286,291 @@ INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
286
  INFO:absl:Using default tokenizer.
287
  INFO:root:Epoch 2.0: Train Loss = 0.2232, Eval Loss = 12.67569351196289
288
  INFO:absl:Using default tokenizer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
  INFO:absl:Using default tokenizer.
287
  INFO:root:Epoch 2.0: Train Loss = 0.2232, Eval Loss = 12.67569351196289
288
  INFO:absl:Using default tokenizer.
289
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
290
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
291
+ _n_gpu=1,
292
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
293
+ adafactor=False,
294
+ adam_beta1=0.9,
295
+ adam_beta2=0.999,
296
+ adam_epsilon=1e-08,
297
+ auto_find_batch_size=False,
298
+ batch_eval_metrics=False,
299
+ bf16=False,
300
+ bf16_full_eval=False,
301
+ data_seed=None,
302
+ dataloader_drop_last=False,
303
+ dataloader_num_workers=0,
304
+ dataloader_persistent_workers=False,
305
+ dataloader_pin_memory=True,
306
+ dataloader_prefetch_factor=None,
307
+ ddp_backend=None,
308
+ ddp_broadcast_buffers=None,
309
+ ddp_bucket_cap_mb=None,
310
+ ddp_find_unused_parameters=None,
311
+ ddp_timeout=1800,
312
+ debug=[],
313
+ deepspeed=None,
314
+ disable_tqdm=False,
315
+ dispatch_batches=None,
316
+ do_eval=True,
317
+ do_predict=False,
318
+ do_train=True,
319
+ eval_accumulation_steps=None,
320
+ eval_delay=0,
321
+ eval_do_concat_batches=True,
322
+ eval_on_start=False,
323
+ eval_steps=None,
324
+ eval_strategy=IntervalStrategy.EPOCH,
325
+ eval_use_gather_object=False,
326
+ evaluation_strategy=epoch,
327
+ fp16=False,
328
+ fp16_backend=auto,
329
+ fp16_full_eval=False,
330
+ fp16_opt_level=O1,
331
+ fsdp=[],
332
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
333
+ fsdp_min_num_params=0,
334
+ fsdp_transformer_layer_cls_to_wrap=None,
335
+ full_determinism=False,
336
+ gradient_accumulation_steps=1,
337
+ gradient_checkpointing=False,
338
+ gradient_checkpointing_kwargs=None,
339
+ greater_is_better=False,
340
+ group_by_length=False,
341
+ half_precision_backend=auto,
342
+ hub_always_push=False,
343
+ hub_model_id=None,
344
+ hub_private_repo=False,
345
+ hub_strategy=HubStrategy.EVERY_SAVE,
346
+ hub_token=<HUB_TOKEN>,
347
+ ignore_data_skip=False,
348
+ include_inputs_for_metrics=False,
349
+ include_num_input_tokens_seen=False,
350
+ include_tokens_per_second=False,
351
+ jit_mode_eval=False,
352
+ label_names=None,
353
+ label_smoothing_factor=0.0,
354
+ learning_rate=5e-05,
355
+ length_column_name=length,
356
+ load_best_model_at_end=True,
357
+ local_rank=0,
358
+ log_level=passive,
359
+ log_level_replica=warning,
360
+ log_on_each_node=True,
361
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large/runs/Sep28_19-32-44_lmgpu-node-09,
362
+ logging_first_step=False,
363
+ logging_nan_inf_filter=True,
364
+ logging_steps=500,
365
+ logging_strategy=IntervalStrategy.EPOCH,
366
+ lr_scheduler_kwargs={},
367
+ lr_scheduler_type=SchedulerType.LINEAR,
368
+ max_grad_norm=1.0,
369
+ max_steps=-1,
370
+ metric_for_best_model=loss,
371
+ mp_parameters=,
372
+ neftune_noise_alpha=None,
373
+ no_cuda=False,
374
+ num_train_epochs=20.0,
375
+ optim=OptimizerNames.ADAMW_TORCH,
376
+ optim_args=None,
377
+ optim_target_modules=None,
378
+ output_dir=/home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large,
379
+ overwrite_output_dir=False,
380
+ past_index=-1,
381
+ per_device_eval_batch_size=4,
382
+ per_device_train_batch_size=4,
383
+ prediction_loss_only=False,
384
+ push_to_hub=True,
385
+ push_to_hub_model_id=None,
386
+ push_to_hub_organization=None,
387
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
388
+ ray_scope=last,
389
+ remove_unused_columns=True,
390
+ report_to=[],
391
+ restore_callback_states_from_checkpoint=False,
392
+ resume_from_checkpoint=None,
393
+ run_name=/home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large,
394
+ save_on_each_node=False,
395
+ save_only_model=False,
396
+ save_safetensors=True,
397
+ save_steps=500,
398
+ save_strategy=IntervalStrategy.EPOCH,
399
+ save_total_limit=None,
400
+ seed=42,
401
+ skip_memory_metrics=True,
402
+ split_batches=None,
403
+ tf32=None,
404
+ torch_compile=False,
405
+ torch_compile_backend=None,
406
+ torch_compile_mode=None,
407
+ torch_empty_cache_steps=None,
408
+ torchdynamo=None,
409
+ tpu_metrics_debug=False,
410
+ tpu_num_cores=None,
411
+ use_cpu=False,
412
+ use_ipex=False,
413
+ use_legacy_prediction_loop=False,
414
+ use_mps_device=False,
415
+ warmup_ratio=0.0,
416
+ warmup_steps=500,
417
+ weight_decay=0.0,
418
+ )
419
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large/checkpoint-67504. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
420
+ INFO:datasets.builder:Using custom data configuration default-9b1924bdbebfdb40
421
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
422
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
423
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
424
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
425
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
426
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-b1d5dd2f7f488bc8.arrow
427
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-6440d45a974a1bab.arrow
428
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=1024 instead. You can change that default value by passing --block_size xxx.
429
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-e36ec5529056b6c9.arrow
430
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-e1a99260ee54f3bd.arrow
431
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
432
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
433
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
434
+ _n_gpu=1,
435
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
436
+ adafactor=False,
437
+ adam_beta1=0.9,
438
+ adam_beta2=0.999,
439
+ adam_epsilon=1e-08,
440
+ auto_find_batch_size=False,
441
+ batch_eval_metrics=False,
442
+ bf16=False,
443
+ bf16_full_eval=False,
444
+ data_seed=None,
445
+ dataloader_drop_last=False,
446
+ dataloader_num_workers=0,
447
+ dataloader_persistent_workers=False,
448
+ dataloader_pin_memory=True,
449
+ dataloader_prefetch_factor=None,
450
+ ddp_backend=None,
451
+ ddp_broadcast_buffers=None,
452
+ ddp_bucket_cap_mb=None,
453
+ ddp_find_unused_parameters=None,
454
+ ddp_timeout=1800,
455
+ debug=[],
456
+ deepspeed=None,
457
+ disable_tqdm=False,
458
+ dispatch_batches=None,
459
+ do_eval=True,
460
+ do_predict=False,
461
+ do_train=True,
462
+ eval_accumulation_steps=None,
463
+ eval_delay=0,
464
+ eval_do_concat_batches=True,
465
+ eval_on_start=False,
466
+ eval_steps=None,
467
+ eval_strategy=IntervalStrategy.EPOCH,
468
+ eval_use_gather_object=False,
469
+ evaluation_strategy=epoch,
470
+ fp16=False,
471
+ fp16_backend=auto,
472
+ fp16_full_eval=False,
473
+ fp16_opt_level=O1,
474
+ fsdp=[],
475
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
476
+ fsdp_min_num_params=0,
477
+ fsdp_transformer_layer_cls_to_wrap=None,
478
+ full_determinism=False,
479
+ gradient_accumulation_steps=1,
480
+ gradient_checkpointing=False,
481
+ gradient_checkpointing_kwargs=None,
482
+ greater_is_better=False,
483
+ group_by_length=False,
484
+ half_precision_backend=auto,
485
+ hub_always_push=False,
486
+ hub_model_id=None,
487
+ hub_private_repo=False,
488
+ hub_strategy=HubStrategy.EVERY_SAVE,
489
+ hub_token=<HUB_TOKEN>,
490
+ ignore_data_skip=False,
491
+ include_inputs_for_metrics=False,
492
+ include_num_input_tokens_seen=False,
493
+ include_tokens_per_second=False,
494
+ jit_mode_eval=False,
495
+ label_names=None,
496
+ label_smoothing_factor=0.0,
497
+ learning_rate=5e-05,
498
+ length_column_name=length,
499
+ load_best_model_at_end=True,
500
+ local_rank=0,
501
+ log_level=passive,
502
+ log_level_replica=warning,
503
+ log_on_each_node=True,
504
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large/runs/Sep28_20-13-40_lmgpu-node-09,
505
+ logging_first_step=False,
506
+ logging_nan_inf_filter=True,
507
+ logging_steps=500,
508
+ logging_strategy=IntervalStrategy.EPOCH,
509
+ lr_scheduler_kwargs={},
510
+ lr_scheduler_type=SchedulerType.LINEAR,
511
+ max_grad_norm=1.0,
512
+ max_steps=-1,
513
+ metric_for_best_model=loss,
514
+ mp_parameters=,
515
+ neftune_noise_alpha=None,
516
+ no_cuda=False,
517
+ num_train_epochs=20.0,
518
+ optim=OptimizerNames.ADAMW_TORCH,
519
+ optim_args=None,
520
+ optim_target_modules=None,
521
+ output_dir=/home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large,
522
+ overwrite_output_dir=False,
523
+ past_index=-1,
524
+ per_device_eval_batch_size=4,
525
+ per_device_train_batch_size=4,
526
+ prediction_loss_only=False,
527
+ push_to_hub=True,
528
+ push_to_hub_model_id=None,
529
+ push_to_hub_organization=None,
530
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
531
+ ray_scope=last,
532
+ remove_unused_columns=True,
533
+ report_to=[],
534
+ restore_callback_states_from_checkpoint=False,
535
+ resume_from_checkpoint=None,
536
+ run_name=/home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large,
537
+ save_on_each_node=False,
538
+ save_only_model=False,
539
+ save_safetensors=True,
540
+ save_steps=500,
541
+ save_strategy=IntervalStrategy.EPOCH,
542
+ save_total_limit=None,
543
+ seed=42,
544
+ skip_memory_metrics=True,
545
+ split_batches=None,
546
+ tf32=None,
547
+ torch_compile=False,
548
+ torch_compile_backend=None,
549
+ torch_compile_mode=None,
550
+ torch_empty_cache_steps=None,
551
+ torchdynamo=None,
552
+ tpu_metrics_debug=False,
553
+ tpu_num_cores=None,
554
+ use_cpu=False,
555
+ use_ipex=False,
556
+ use_legacy_prediction_loop=False,
557
+ use_mps_device=False,
558
+ warmup_ratio=0.0,
559
+ warmup_steps=500,
560
+ weight_decay=0.0,
561
+ )
562
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_cls_all_aragpt2-large/checkpoint-67504. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
563
+ INFO:datasets.builder:Using custom data configuration default-9b1924bdbebfdb40
564
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
565
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
566
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
567
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
568
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
569
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-b1d5dd2f7f488bc8.arrow
570
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-6440d45a974a1bab.arrow
571
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=1024 instead. You can change that default value by passing --block_size xxx.
572
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-e36ec5529056b6c9.arrow
573
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-9b1924bdbebfdb40/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-e1a99260ee54f3bd.arrow
574
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
575
+ INFO:root:Epoch 3.0: Train Loss = 0.1946, Eval Loss = 11.576802253723145
576
+ INFO:absl:Using default tokenizer.
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:788494a5d6419659a7a4b57a698635ce7473c8cee2b6a49023094972a20fb51e
3
  size 3166581272
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e918727f0918bdd64eedd2980444f27a1c250115db0c31bd204c924886d7325
3
  size 3166581272
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56beb4bcaa10817b08989fea279e49aee4c357c4ae969f6bcf3954a73d8a204a
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51e2f3b4431a06c299541bdd6783c47ed92974324b81b08990711d02af6a5fba
3
  size 5240