Thecoder3281f commited on
Commit
55dc60f
·
verified ·
1 Parent(s): ff023fe

End of training

Browse files
README.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ language:
4
+ - en
5
+ license: apache-2.0
6
+ base_model: openai/whisper-small
7
+ tags:
8
+ - generated_from_trainer
9
+ datasets:
10
+ - mesolitica/IMDA-TTS
11
+ metrics:
12
+ - wer
13
+ model-index:
14
+ - name: Whisper Small NSC small (1000 steps) - Jarrett Er
15
+ results:
16
+ - task:
17
+ type: automatic-speech-recognition
18
+ name: Automatic Speech Recognition
19
+ dataset:
20
+ name: NSC Small section
21
+ type: mesolitica/IMDA-TTS
22
+ config: default
23
+ split: train
24
+ args: 'config: en, split: train'
25
+ metrics:
26
+ - type: wer
27
+ value: 3.123272526257601
28
+ name: Wer
29
+ ---
30
+
31
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
32
+ should probably proofread and complete it, then remove this comment. -->
33
+
34
+ # Whisper Small NSC small (1000 steps) - Jarrett Er
35
+
36
+ This model is a fine-tuned version of [Thecoder3281f/whisper-small-hi-commonvoice17-1000](https://huggingface.co/Thecoder3281f/whisper-small-hi-commonvoice17-1000) on the NSC Small section dataset.
37
+ It achieves the following results on the evaluation set:
38
+ - Loss: 0.0676
39
+ - Wer: 3.1233
40
+
41
+ ## Model description
42
+
43
+ More information needed
44
+
45
+ ## Intended uses & limitations
46
+
47
+ More information needed
48
+
49
+ ## Training and evaluation data
50
+
51
+ More information needed
52
+
53
+ ## Training procedure
54
+
55
+ ### Training hyperparameters
56
+
57
+ The following hyperparameters were used during training:
58
+ - learning_rate: 0.0001
59
+ - train_batch_size: 16
60
+ - eval_batch_size: 8
61
+ - seed: 42
62
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
63
+ - lr_scheduler_type: linear
64
+ - lr_scheduler_warmup_steps: 100
65
+ - training_steps: 1000
66
+ - mixed_precision_training: Native AMP
67
+
68
+ ### Training results
69
+
70
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
71
+ |:-------------:|:------:|:----:|:---------------:|:------:|
72
+ | 0.0806 | 0.2941 | 100 | 0.0737 | 3.4549 |
73
+ | 0.0618 | 0.5882 | 200 | 0.0690 | 3.2062 |
74
+ | 0.0689 | 0.8824 | 300 | 0.0655 | 3.0265 |
75
+ | 0.0385 | 1.1765 | 400 | 0.0652 | 3.1509 |
76
+ | 0.0441 | 1.4706 | 500 | 0.0653 | 3.1647 |
77
+ | 0.0389 | 1.7647 | 600 | 0.0652 | 3.0404 |
78
+ | 0.032 | 2.0588 | 700 | 0.0646 | 3.1786 |
79
+ | 0.0264 | 2.3529 | 800 | 0.0672 | 3.1095 |
80
+ | 0.0307 | 2.6471 | 900 | 0.0672 | 3.1647 |
81
+ | 0.0266 | 2.9412 | 1000 | 0.0676 | 3.1233 |
82
+
83
+
84
+ ### Framework versions
85
+
86
+ - PEFT 0.14.0
87
+ - Transformers 4.45.2
88
+ - Pytorch 2.5.1+cu124
89
+ - Datasets 3.2.1.dev0
90
+ - Tokenizers 0.20.3
adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "WhisperForConditionalGeneration",
5
+ "parent_library": "transformers.models.whisper.modeling_whisper"
6
+ },
7
+ "base_model_name_or_path": "openai/whisper-small",
8
+ "bias": "none",
9
+ "eva_config": null,
10
+ "exclude_modules": null,
11
+ "fan_in_fan_out": false,
12
+ "inference_mode": true,
13
+ "init_lora_weights": true,
14
+ "layer_replication": null,
15
+ "layers_pattern": null,
16
+ "layers_to_transform": null,
17
+ "loftq_config": {},
18
+ "lora_alpha": 64,
19
+ "lora_bias": false,
20
+ "lora_dropout": 0.1,
21
+ "megatron_config": null,
22
+ "megatron_core": "megatron.core",
23
+ "modules_to_save": null,
24
+ "peft_type": "LORA",
25
+ "r": 8,
26
+ "rank_pattern": {},
27
+ "revision": null,
28
+ "target_modules": [
29
+ "fc1",
30
+ "k_proj",
31
+ "out_proj",
32
+ "v_proj",
33
+ "fc2",
34
+ "q_proj"
35
+ ],
36
+ "task_type": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2d0412db683bfe3e68f5a8aa1e09ace80a9d5fc57d426e6df718593ce03c2dd
3
+ size 13028552
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
runs/Dec23_10-02-24_jarrett-a100/events.out.tfevents.1734948152.jarrett-a100.7018.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:870362fbb042321561f1f7343771ebcba4899cc8c7fcbf0a3ba997307d044aa1
3
+ size 18696
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e77425e2de299a1c8a8689e2f63f88f809dbff9b35b57a485c783112e2de64
3
+ size 5368